aboutsummaryrefslogtreecommitdiffstats
path: root/host/lib/convert
diff options
context:
space:
mode:
Diffstat (limited to 'host/lib/convert')
-rw-r--r--host/lib/convert/CMakeLists.txt120
-rw-r--r--host/lib/convert/convert_common.hpp241
-rw-r--r--host/lib/convert/convert_fc32_to_sc8_with_sse2.cpp150
-rw-r--r--host/lib/convert/convert_fc32_with_sse2.cpp196
-rw-r--r--host/lib/convert/convert_fc64_to_sc8_with_sse2.cpp156
-rw-r--r--host/lib/convert/convert_fc64_with_sse2.cpp212
-rw-r--r--host/lib/convert/convert_impl.cpp143
-rw-r--r--host/lib/convert/convert_orc.orc80
-rw-r--r--host/lib/convert/convert_with_neon.cpp61
-rw-r--r--host/lib/convert/convert_with_orc.cpp65
-rw-r--r--host/lib/convert/convert_with_tables.cpp282
-rw-r--r--host/lib/convert/gen_convert_general.py206
12 files changed, 1912 insertions, 0 deletions
diff --git a/host/lib/convert/CMakeLists.txt b/host/lib/convert/CMakeLists.txt
new file mode 100644
index 000000000..c42a0a434
--- /dev/null
+++ b/host/lib/convert/CMakeLists.txt
@@ -0,0 +1,120 @@
+#
+# Copyright 2011-2012 Ettus Research LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+########################################################################
+# This file included, use CMake directory variables
+########################################################################
+INCLUDE(CheckIncludeFileCXX)
+MESSAGE(STATUS "")
+
+########################################################################
+# Look for Orc support
+########################################################################
+FIND_PACKAGE(PkgConfig)
+IF(PKG_CONFIG_FOUND)
+PKG_CHECK_MODULES(ORC "orc-0.4 > 0.4.11")
+ENDIF(PKG_CONFIG_FOUND)
+
+FIND_PROGRAM(ORCC_EXECUTABLE orcc)
+
+LIBUHD_REGISTER_COMPONENT("ORC" ENABLE_ORC ON "ENABLE_LIBUHD;ORC_FOUND;ORCC_EXECUTABLE" OFF)
+
+IF(ENABLE_ORC)
+ INCLUDE_DIRECTORIES(${ORC_INCLUDE_DIRS})
+ LINK_DIRECTORIES(${ORC_LIBRARY_DIRS})
+ ENABLE_LANGUAGE(C)
+
+ SET(orcc_src ${CMAKE_CURRENT_SOURCE_DIR}/convert_orc.orc)
+
+ GET_FILENAME_COMPONENT(orc_file_name_we ${orcc_src} NAME_WE)
+ SET(orcc_gen ${CMAKE_CURRENT_BINARY_DIR}/${orc_file_name_we}.c)
+ MESSAGE(STATUS "Orc found, enabling Orc support.")
+ ADD_CUSTOM_COMMAND(
+ COMMAND ${ORCC_EXECUTABLE} --implementation -o ${orcc_gen} ${orcc_src}
+ DEPENDS ${orcc_src} OUTPUT ${orcc_gen}
+ )
+ LIBUHD_APPEND_SOURCES(${orcc_gen})
+ LIBUHD_APPEND_SOURCES(
+ ${CMAKE_CURRENT_SOURCE_DIR}/convert_with_orc.cpp
+ )
+ LIBUHD_APPEND_LIBS(${ORC_LIBRARIES})
+ELSE(ENABLE_ORC)
+ MESSAGE(STATUS "Orc not found, disabling orc support.")
+ENDIF(ENABLE_ORC)
+
+########################################################################
+# Check for SSE2 SIMD headers
+########################################################################
+IF(CMAKE_COMPILER_IS_GNUCXX)
+ SET(EMMINTRIN_FLAGS -msse2)
+ELSEIF(MSVC)
+ SET(EMMINTRIN_FLAGS /arch:SSE2)
+ENDIF()
+
+SET(CMAKE_REQUIRED_FLAGS ${EMMINTRIN_FLAGS})
+CHECK_INCLUDE_FILE_CXX(emmintrin.h HAVE_EMMINTRIN_H)
+UNSET(CMAKE_REQUIRED_FLAGS)
+
+IF(HAVE_EMMINTRIN_H)
+ SET(convert_with_sse2_sources
+ ${CMAKE_CURRENT_SOURCE_DIR}/convert_fc32_with_sse2.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/convert_fc64_with_sse2.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/convert_fc32_to_sc8_with_sse2.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/convert_fc64_to_sc8_with_sse2.cpp
+ )
+ SET_SOURCE_FILES_PROPERTIES(
+ ${convert_with_sse2_sources}
+ PROPERTIES COMPILE_FLAGS "${EMMINTRIN_FLAGS}"
+ )
+ LIBUHD_APPEND_SOURCES(${convert_with_sse2_sources})
+ENDIF(HAVE_EMMINTRIN_H)
+
+########################################################################
+# Check for NEON SIMD headers
+########################################################################
+IF(CMAKE_COMPILER_IS_GNUCXX)
+ SET(NEON_FLAGS "-mfloat-abi=softfp -mfpu=neon")
+ SET(CMAKE_REQUIRED_FLAGS ${NEON_FLAGS})
+ CHECK_INCLUDE_FILE_CXX(arm_neon.h HAVE_ARM_NEON_H)
+ UNSET(CMAKE_REQUIRED_FLAGS)
+ENDIF(CMAKE_COMPILER_IS_GNUCXX)
+
+IF(HAVE_ARM_NEON_H)
+ SET_SOURCE_FILES_PROPERTIES(
+ ${CMAKE_CURRENT_SOURCE_DIR}/convert_with_neon.cpp
+ PROPERTIES COMPILE_FLAGS "${NEON_FLAGS}"
+ )
+ LIBUHD_APPEND_SOURCES(
+ ${CMAKE_CURRENT_SOURCE_DIR}/convert_with_neon.cpp
+ )
+ENDIF()
+
+########################################################################
+# Convert types generation
+########################################################################
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR})
+
+LIBUHD_PYTHON_GEN_SOURCE(
+ ${CMAKE_CURRENT_SOURCE_DIR}/gen_convert_general.py
+ ${CMAKE_CURRENT_BINARY_DIR}/convert_general.cpp
+)
+
+LIBUHD_APPEND_SOURCES(
+ ${CMAKE_CURRENT_SOURCE_DIR}/convert_with_tables.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/convert_impl.cpp
+)
diff --git a/host/lib/convert/convert_common.hpp b/host/lib/convert/convert_common.hpp
new file mode 100644
index 000000000..55bc2e99d
--- /dev/null
+++ b/host/lib/convert/convert_common.hpp
@@ -0,0 +1,241 @@
+//
+// Copyright 2011-2012 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#ifndef INCLUDED_LIBUHD_CONVERT_COMMON_HPP
+#define INCLUDED_LIBUHD_CONVERT_COMMON_HPP
+
+#include <uhd/convert.hpp>
+#include <uhd/utils/static.hpp>
+#include <boost/cstdint.hpp>
+#include <complex>
+
+#define _DECLARE_CONVERTER(name, in_form, num_in, out_form, num_out, prio) \
+ struct name : public uhd::convert::converter{ \
+ static sptr make(void){return sptr(new name());} \
+ double scale_factor; \
+ void set_scalar(const double s){scale_factor = s;} \
+ void operator()(const input_type&, const output_type&, const size_t); \
+ }; \
+ UHD_STATIC_BLOCK(__register_##name##_##prio){ \
+ uhd::convert::id_type id; \
+ id.input_format = #in_form; \
+ id.num_inputs = num_in; \
+ id.output_format = #out_form; \
+ id.num_outputs = num_out; \
+ uhd::convert::register_converter(id, &name::make, prio); \
+ } \
+ void name::operator()( \
+ const input_type &inputs, const output_type &outputs, const size_t nsamps \
+ )
+
+#define DECLARE_CONVERTER(in_form, num_in, out_form, num_out, prio) \
+ _DECLARE_CONVERTER(__convert_##in_form##_##num_in##_##out_form##_##num_out##_##prio, in_form, num_in, out_form, num_out, prio)
+
+/***********************************************************************
+ * Setup priorities
+ **********************************************************************/
+static const int PRIORITY_GENERAL = 0;
+static const int PRIORITY_EMPTY = -1;
+
+#ifdef __ARM_NEON__
+static const int PRIORITY_LIBORC = 3;
+static const int PRIORITY_SIMD = 1; //neon conversions could be implemented better, orc wins
+static const int PRIORITY_TABLE = 2; //tables require large cache, so they are slower on arm
+#else
+static const int PRIORITY_LIBORC = 1;
+static const int PRIORITY_SIMD = 2;
+static const int PRIORITY_TABLE = 3;
+#endif
+
+/***********************************************************************
+ * Typedefs
+ **********************************************************************/
+typedef std::complex<double> fc64_t;
+typedef std::complex<float> fc32_t;
+typedef std::complex<boost::int32_t> sc32_t;
+typedef std::complex<boost::int16_t> sc16_t;
+typedef std::complex<boost::int8_t> sc8_t;
+typedef double f64_t;
+typedef float f32_t;
+typedef boost::int32_t s32_t;
+typedef boost::int16_t s16_t;
+typedef boost::int8_t s8_t;
+
+typedef boost::uint32_t item32_t;
+
+/***********************************************************************
+ * Convert complex short buffer to items32 sc16
+ **********************************************************************/
+static UHD_INLINE item32_t sc16_to_item32_sc16(sc16_t num, double){
+ boost::uint16_t real = num.real();
+ boost::uint16_t imag = num.imag();
+ return (item32_t(real) << 16) | (item32_t(imag) << 0);
+}
+
+/***********************************************************************
+ * Convert items32 sc16 buffer to complex short
+ **********************************************************************/
+static UHD_INLINE sc16_t item32_sc16_to_sc16(item32_t item, double){
+ return sc16_t(
+ boost::int16_t(item >> 16),
+ boost::int16_t(item >> 0)
+ );
+}
+
+/***********************************************************************
+ * Convert complex float buffer to items32 sc16
+ **********************************************************************/
+static UHD_INLINE item32_t fc32_to_item32_sc16(fc32_t num, double scale_factor){
+ boost::uint16_t real = boost::int16_t(num.real()*float(scale_factor));
+ boost::uint16_t imag = boost::int16_t(num.imag()*float(scale_factor));
+ return (item32_t(real) << 16) | (item32_t(imag) << 0);
+}
+
+/***********************************************************************
+ * Convert items32 sc16 buffer to complex float
+ **********************************************************************/
+static UHD_INLINE fc32_t item32_sc16_to_fc32(item32_t item, double scale_factor){
+ return fc32_t(
+ float(boost::int16_t(item >> 16)*float(scale_factor)),
+ float(boost::int16_t(item >> 0)*float(scale_factor))
+ );
+}
+
+/***********************************************************************
+ * Convert complex double buffer to items32 sc16
+ **********************************************************************/
+static UHD_INLINE item32_t fc64_to_item32_sc16(fc64_t num, double scale_factor){
+ boost::uint16_t real = boost::int16_t(num.real()*scale_factor);
+ boost::uint16_t imag = boost::int16_t(num.imag()*scale_factor);
+ return (item32_t(real) << 16) | (item32_t(imag) << 0);
+}
+
+/***********************************************************************
+ * Convert items32 sc16 buffer to complex double
+ **********************************************************************/
+static UHD_INLINE fc64_t item32_sc16_to_fc64(item32_t item, double scale_factor){
+ return fc64_t(
+ float(boost::int16_t(item >> 16)*scale_factor),
+ float(boost::int16_t(item >> 0)*scale_factor)
+ );
+}
+
+/***********************************************************************
+ * Convert items32 sc8 buffer to complex char
+ **********************************************************************/
+static UHD_INLINE void item32_sc8_to_sc8(item32_t item, sc8_t &out0, sc8_t &out1, double){
+ out0 = sc8_t(
+ boost::int8_t(item >> 8),
+ boost::int8_t(item >> 0)
+ );
+ out1 = sc8_t(
+ boost::int8_t(item >> 24),
+ boost::int8_t(item >> 16)
+ );
+}
+
+/***********************************************************************
+ * Convert items32 sc8 buffer to complex short
+ **********************************************************************/
+static UHD_INLINE void item32_sc8_to_sc16(item32_t item, sc16_t &out0, sc16_t &out1, double){
+ out0 = sc16_t(
+ boost::int8_t(item >> 8),
+ boost::int8_t(item >> 0)
+ );
+ out1 = sc16_t(
+ boost::int8_t(item >> 24),
+ boost::int8_t(item >> 16)
+ );
+}
+
+/***********************************************************************
+ * Convert items32 sc8 buffer to complex float
+ **********************************************************************/
+static UHD_INLINE void item32_sc8_to_fc32(item32_t item, fc32_t &out0, fc32_t &out1, double scale_factor){
+ out0 = fc32_t(
+ float(boost::int8_t(item >> 8)*float(scale_factor)),
+ float(boost::int8_t(item >> 0)*float(scale_factor))
+ );
+ out1 = fc32_t(
+ float(boost::int8_t(item >> 24)*float(scale_factor)),
+ float(boost::int8_t(item >> 16)*float(scale_factor))
+ );
+}
+
+/***********************************************************************
+ * Convert items32 sc8 buffer to complex double
+ **********************************************************************/
+static UHD_INLINE void item32_sc8_to_fc64(item32_t item, fc64_t &out0, fc64_t &out1, double scale_factor){
+ out0 = fc64_t(
+ float(boost::int8_t(item >> 8)*scale_factor),
+ float(boost::int8_t(item >> 0)*scale_factor)
+ );
+ out1 = fc64_t(
+ float(boost::int8_t(item >> 24)*scale_factor),
+ float(boost::int8_t(item >> 16)*scale_factor)
+ );
+}
+
+/***********************************************************************
+ * Convert complex char to items32 sc8 buffer
+ **********************************************************************/
+static UHD_INLINE item32_t sc8_to_item32_sc8(sc8_t in0, sc8_t in1, double){
+ return
+ (item32_t(boost::uint8_t(in0.real())) << 8) |
+ (item32_t(boost::uint8_t(in0.imag())) << 0) |
+ (item32_t(boost::uint8_t(in1.real())) << 24) |
+ (item32_t(boost::uint8_t(in1.imag())) << 16)
+ ;
+}
+
+/***********************************************************************
+ * Convert complex short to items32 sc8 buffer
+ **********************************************************************/
+static UHD_INLINE item32_t sc16_to_item32_sc8(sc16_t in0, sc16_t in1, double){
+ return
+ (item32_t(boost::uint8_t(in0.real())) << 8) |
+ (item32_t(boost::uint8_t(in0.imag())) << 0) |
+ (item32_t(boost::uint8_t(in1.real())) << 24) |
+ (item32_t(boost::uint8_t(in1.imag())) << 16)
+ ;
+}
+
+/***********************************************************************
+ * Convert complex float to items32 sc8 buffer
+ **********************************************************************/
+static UHD_INLINE item32_t fc32_to_item32_sc8(fc32_t in0, fc32_t in1, double scale_factor){
+ return
+ (item32_t(boost::uint8_t(in0.real()*float(scale_factor))) << 8) |
+ (item32_t(boost::uint8_t(in0.imag()*float(scale_factor))) << 0) |
+ (item32_t(boost::uint8_t(in1.real()*float(scale_factor))) << 24) |
+ (item32_t(boost::uint8_t(in1.imag()*float(scale_factor))) << 16)
+ ;
+}
+
+/***********************************************************************
+ * Convert complex double to items32 sc8 buffer
+ **********************************************************************/
+static UHD_INLINE item32_t fc64_to_item32_sc8(fc64_t in0, fc64_t in1, double scale_factor){
+ return
+ (item32_t(boost::uint8_t(in0.real()*(scale_factor))) << 8) |
+ (item32_t(boost::uint8_t(in0.imag()*(scale_factor))) << 0) |
+ (item32_t(boost::uint8_t(in1.real()*(scale_factor))) << 24) |
+ (item32_t(boost::uint8_t(in1.imag()*(scale_factor))) << 16)
+ ;
+}
+
+#endif /* INCLUDED_LIBUHD_CONVERT_COMMON_HPP */
diff --git a/host/lib/convert/convert_fc32_to_sc8_with_sse2.cpp b/host/lib/convert/convert_fc32_to_sc8_with_sse2.cpp
new file mode 100644
index 000000000..b633f487c
--- /dev/null
+++ b/host/lib/convert/convert_fc32_to_sc8_with_sse2.cpp
@@ -0,0 +1,150 @@
+//
+// Copyright 2012 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <uhd/utils/byteswap.hpp>
+#include <emmintrin.h>
+
+using namespace uhd::convert;
+
+UHD_INLINE __m128i pack_sc32_4x_be(
+ const __m128 &in0, const __m128 &in1,
+ const __m128 &in2, const __m128 &in3,
+ const __m128 &scalar
+){
+ __m128i tmpi0 = _mm_cvtps_epi32(_mm_mul_ps(in0, scalar));
+ tmpi0 = _mm_shuffle_epi32(tmpi0, _MM_SHUFFLE(1, 0, 3, 2));
+ __m128i tmpi1 = _mm_cvtps_epi32(_mm_mul_ps(in1, scalar));
+ tmpi1 = _mm_shuffle_epi32(tmpi1, _MM_SHUFFLE(1, 0, 3, 2));
+ const __m128i lo = _mm_packs_epi32(tmpi0, tmpi1);
+
+ __m128i tmpi2 = _mm_cvtps_epi32(_mm_mul_ps(in2, scalar));
+ tmpi2 = _mm_shuffle_epi32(tmpi2, _MM_SHUFFLE(1, 0, 3, 2));
+ __m128i tmpi3 = _mm_cvtps_epi32(_mm_mul_ps(in3, scalar));
+ tmpi3 = _mm_shuffle_epi32(tmpi3, _MM_SHUFFLE(1, 0, 3, 2));
+ const __m128i hi = _mm_packs_epi32(tmpi2, tmpi3);
+
+ return _mm_packs_epi16(lo, hi);
+}
+
+UHD_INLINE __m128i pack_sc32_4x_le(
+ const __m128 &in0, const __m128 &in1,
+ const __m128 &in2, const __m128 &in3,
+ const __m128 &scalar
+){
+ __m128i tmpi0 = _mm_cvtps_epi32(_mm_mul_ps(in0, scalar));
+ tmpi0 = _mm_shuffle_epi32(tmpi0, _MM_SHUFFLE(2, 3, 0, 1));
+ __m128i tmpi1 = _mm_cvtps_epi32(_mm_mul_ps(in1, scalar));
+ tmpi1 = _mm_shuffle_epi32(tmpi1, _MM_SHUFFLE(2, 3, 0, 1));
+ const __m128i lo = _mm_packs_epi32(tmpi0, tmpi1);
+
+ __m128i tmpi2 = _mm_cvtps_epi32(_mm_mul_ps(in2, scalar));
+ tmpi2 = _mm_shuffle_epi32(tmpi2, _MM_SHUFFLE(2, 3, 0, 1));
+ __m128i tmpi3 = _mm_cvtps_epi32(_mm_mul_ps(in3, scalar));
+ tmpi3 = _mm_shuffle_epi32(tmpi3, _MM_SHUFFLE(2, 3, 0, 1));
+ const __m128i hi = _mm_packs_epi32(tmpi2, tmpi3);
+
+ return _mm_packs_epi16(lo, hi);
+}
+
+DECLARE_CONVERTER(fc32, 1, sc8_item32_be, 1, PRIORITY_SIMD){
+ const fc32_t *input = reinterpret_cast<const fc32_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const __m128 scalar = _mm_set_ps1(float(scale_factor));
+
+ #define convert_fc32_1_to_sc8_item32_1_bswap_guts(_al_) \
+ for (size_t j = 0; i+7 < nsamps; i+=8, j+=4){ \
+ /* load from input */ \
+ __m128 tmp0 = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+0)); \
+ __m128 tmp1 = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+2)); \
+ __m128 tmp2 = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+4)); \
+ __m128 tmp3 = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+6)); \
+ \
+ /* convert */ \
+ const __m128i tmpi = pack_sc32_4x_be(tmp0, tmp1, tmp2, tmp3, scalar); \
+ \
+ /* store to output */ \
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(output+j), tmpi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ if ((size_t(input) & 0xf) == 0){
+ convert_fc32_1_to_sc8_item32_1_bswap_guts(_)
+ }
+ else{
+ convert_fc32_1_to_sc8_item32_1_bswap_guts(u_)
+ }
+
+ //convert remainder
+ const size_t num_pairs = nsamps/2;
+ for (size_t j = i/2; j < num_pairs; j++, i+=2){
+ const item32_t item = fc32_to_item32_sc8(input[i], input[i+1], scale_factor);
+ output[j] = uhd::byteswap(item);
+ }
+
+ if (nsamps != num_pairs*2){
+ const item32_t item = fc32_to_item32_sc8(input[nsamps-1], 0, scale_factor);
+ output[num_pairs] = uhd::byteswap(item);
+ }
+}
+
+DECLARE_CONVERTER(fc32, 1, sc8_item32_le, 1, PRIORITY_SIMD){
+ const fc32_t *input = reinterpret_cast<const fc32_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const __m128 scalar = _mm_set_ps1(float(scale_factor));
+
+ #define convert_fc32_1_to_sc8_item32_1_nswap_guts(_al_) \
+ for (size_t j = 0; i+7 < nsamps; i+=8, j+=4){ \
+ /* load from input */ \
+ __m128 tmp0 = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+0)); \
+ __m128 tmp1 = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+2)); \
+ __m128 tmp2 = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+4)); \
+ __m128 tmp3 = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+6)); \
+ \
+ /* convert */ \
+ const __m128i tmpi = pack_sc32_4x_le(tmp0, tmp1, tmp2, tmp3, scalar); \
+ \
+ /* store to output */ \
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(output+j), tmpi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ if ((size_t(input) & 0xf) == 0){
+ convert_fc32_1_to_sc8_item32_1_nswap_guts(_)
+ }
+ else{
+ convert_fc32_1_to_sc8_item32_1_nswap_guts(u_)
+ }
+
+ //convert remainder
+ const size_t num_pairs = nsamps/2;
+ for (size_t j = i/2; j < num_pairs; j++, i+=2){
+ const item32_t item = fc32_to_item32_sc8(input[i], input[i+1], scale_factor);
+ output[j] = (item);
+ }
+
+ if (nsamps != num_pairs*2){
+ const item32_t item = fc32_to_item32_sc8(input[nsamps-1], 0, scale_factor);
+ output[num_pairs] = (item);
+ }
+}
diff --git a/host/lib/convert/convert_fc32_with_sse2.cpp b/host/lib/convert/convert_fc32_with_sse2.cpp
new file mode 100644
index 000000000..97a3e8cdc
--- /dev/null
+++ b/host/lib/convert/convert_fc32_with_sse2.cpp
@@ -0,0 +1,196 @@
+//
+// Copyright 2011 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <uhd/utils/byteswap.hpp>
+#include <emmintrin.h>
+
+using namespace uhd::convert;
+
+DECLARE_CONVERTER(fc32, 1, sc16_item32_le, 1, PRIORITY_SIMD){
+ const fc32_t *input = reinterpret_cast<const fc32_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const __m128 scalar = _mm_set_ps1(float(scale_factor));
+
+ #define convert_fc32_1_to_item32_1_nswap_guts(_al_) \
+ for (; i+3 < nsamps; i+=4){ \
+ /* load from input */ \
+ __m128 tmplo = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+0)); \
+ __m128 tmphi = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+2)); \
+ \
+ /* convert and scale */ \
+ __m128i tmpilo = _mm_cvtps_epi32(_mm_mul_ps(tmplo, scalar)); \
+ __m128i tmpihi = _mm_cvtps_epi32(_mm_mul_ps(tmphi, scalar)); \
+ \
+ /* pack + swap 16-bit pairs */ \
+ __m128i tmpi = _mm_packs_epi32(tmpilo, tmpihi); \
+ tmpi = _mm_shufflelo_epi16(tmpi, _MM_SHUFFLE(2, 3, 0, 1)); \
+ tmpi = _mm_shufflehi_epi16(tmpi, _MM_SHUFFLE(2, 3, 0, 1)); \
+ \
+ /* store to output */ \
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(output+i), tmpi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ switch (size_t(input) & 0xf){
+ case 0x8:
+ output[i] = fc32_to_item32_sc16(input[i], float(scale_factor)); i++;
+ case 0x0:
+ convert_fc32_1_to_item32_1_nswap_guts(_)
+ break;
+ default: convert_fc32_1_to_item32_1_nswap_guts(u_)
+ }
+
+ //convert remainder
+ for (; i < nsamps; i++){
+ output[i] = fc32_to_item32_sc16(input[i], float(scale_factor));
+ }
+}
+
+DECLARE_CONVERTER(fc32, 1, sc16_item32_be, 1, PRIORITY_SIMD){
+ const fc32_t *input = reinterpret_cast<const fc32_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const __m128 scalar = _mm_set_ps1(float(scale_factor));
+
+ #define convert_fc32_1_to_item32_1_bswap_guts(_al_) \
+ for (; i+3 < nsamps; i+=4){ \
+ /* load from input */ \
+ __m128 tmplo = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+0)); \
+ __m128 tmphi = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+2)); \
+ \
+ /* convert and scale */ \
+ __m128i tmpilo = _mm_cvtps_epi32(_mm_mul_ps(tmplo, scalar)); \
+ __m128i tmpihi = _mm_cvtps_epi32(_mm_mul_ps(tmphi, scalar)); \
+ \
+ /* pack + byteswap -> byteswap 16 bit words */ \
+ __m128i tmpi = _mm_packs_epi32(tmpilo, tmpihi); \
+ tmpi = _mm_or_si128(_mm_srli_epi16(tmpi, 8), _mm_slli_epi16(tmpi, 8)); \
+ \
+ /* store to output */ \
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(output+i), tmpi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ switch (size_t(input) & 0xf){
+ case 0x8:
+ output[i] = uhd::byteswap(fc32_to_item32_sc16(input[i], float(scale_factor))); i++;
+ case 0x0:
+ convert_fc32_1_to_item32_1_bswap_guts(_)
+ break;
+ default: convert_fc32_1_to_item32_1_bswap_guts(u_)
+ }
+
+ //convert remainder
+ for (; i < nsamps; i++){
+ output[i] = uhd::byteswap(fc32_to_item32_sc16(input[i], float(scale_factor)));
+ }
+}
+
+DECLARE_CONVERTER(sc16_item32_le, 1, fc32, 1, PRIORITY_SIMD){
+ const item32_t *input = reinterpret_cast<const item32_t *>(inputs[0]);
+ fc32_t *output = reinterpret_cast<fc32_t *>(outputs[0]);
+
+ const __m128 scalar = _mm_set_ps1(float(scale_factor)/(1 << 16));
+ const __m128i zeroi = _mm_setzero_si128();
+
+ #define convert_item32_1_to_fc32_1_nswap_guts(_al_) \
+ for (; i+3 < nsamps; i+=4){ \
+ /* load from input */ \
+ __m128i tmpi = _mm_loadu_si128(reinterpret_cast<const __m128i *>(input+i)); \
+ \
+ /* unpack + swap 16-bit pairs */ \
+ tmpi = _mm_shufflelo_epi16(tmpi, _MM_SHUFFLE(2, 3, 0, 1)); \
+ tmpi = _mm_shufflehi_epi16(tmpi, _MM_SHUFFLE(2, 3, 0, 1)); \
+ __m128i tmpilo = _mm_unpacklo_epi16(zeroi, tmpi); /* value in upper 16 bits */ \
+ __m128i tmpihi = _mm_unpackhi_epi16(zeroi, tmpi); \
+ \
+ /* convert and scale */ \
+ __m128 tmplo = _mm_mul_ps(_mm_cvtepi32_ps(tmpilo), scalar); \
+ __m128 tmphi = _mm_mul_ps(_mm_cvtepi32_ps(tmpihi), scalar); \
+ \
+ /* store to output */ \
+ _mm_store ## _al_ ## ps(reinterpret_cast<float *>(output+i+0), tmplo); \
+ _mm_store ## _al_ ## ps(reinterpret_cast<float *>(output+i+2), tmphi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ switch (size_t(output) & 0xf){
+ case 0x8:
+ output[i] = item32_sc16_to_fc32(input[i], float(scale_factor)); i++;
+ case 0x0:
+ convert_item32_1_to_fc32_1_nswap_guts(_)
+ break;
+ default: convert_item32_1_to_fc32_1_nswap_guts(u_)
+ }
+
+ //convert remainder
+ for (; i < nsamps; i++){
+ output[i] = item32_sc16_to_fc32(input[i], float(scale_factor));
+ }
+}
+
+DECLARE_CONVERTER(sc16_item32_be, 1, fc32, 1, PRIORITY_SIMD){
+ const item32_t *input = reinterpret_cast<const item32_t *>(inputs[0]);
+ fc32_t *output = reinterpret_cast<fc32_t *>(outputs[0]);
+
+ const __m128 scalar = _mm_set_ps1(float(scale_factor)/(1 << 16));
+ const __m128i zeroi = _mm_setzero_si128();
+
+ #define convert_item32_1_to_fc32_1_bswap_guts(_al_) \
+ for (; i+3 < nsamps; i+=4){ \
+ /* load from input */ \
+ __m128i tmpi = _mm_loadu_si128(reinterpret_cast<const __m128i *>(input+i)); \
+ \
+ /* byteswap + unpack -> byteswap 16 bit words */ \
+ tmpi = _mm_or_si128(_mm_srli_epi16(tmpi, 8), _mm_slli_epi16(tmpi, 8)); \
+ __m128i tmpilo = _mm_unpacklo_epi16(zeroi, tmpi); /* value in upper 16 bits */ \
+ __m128i tmpihi = _mm_unpackhi_epi16(zeroi, tmpi); \
+ \
+ /* convert and scale */ \
+ __m128 tmplo = _mm_mul_ps(_mm_cvtepi32_ps(tmpilo), scalar); \
+ __m128 tmphi = _mm_mul_ps(_mm_cvtepi32_ps(tmpihi), scalar); \
+ \
+ /* store to output */ \
+ _mm_store ## _al_ ## ps(reinterpret_cast<float *>(output+i+0), tmplo); \
+ _mm_store ## _al_ ## ps(reinterpret_cast<float *>(output+i+2), tmphi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ switch (size_t(output) & 0xf){
+ case 0x8:
+ output[i] = item32_sc16_to_fc32(uhd::byteswap(input[i]), float(scale_factor)); i++;
+ case 0x0:
+ convert_item32_1_to_fc32_1_bswap_guts(_)
+ break;
+ default: convert_item32_1_to_fc32_1_bswap_guts(u_)
+ }
+
+ //convert remainder
+ for (; i < nsamps; i++){
+ output[i] = item32_sc16_to_fc32(uhd::byteswap(input[i]), float(scale_factor));
+ }
+}
diff --git a/host/lib/convert/convert_fc64_to_sc8_with_sse2.cpp b/host/lib/convert/convert_fc64_to_sc8_with_sse2.cpp
new file mode 100644
index 000000000..405850601
--- /dev/null
+++ b/host/lib/convert/convert_fc64_to_sc8_with_sse2.cpp
@@ -0,0 +1,156 @@
+//
+// Copyright 2012 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <uhd/utils/byteswap.hpp>
+#include <emmintrin.h>
+
+using namespace uhd::convert;
+
+UHD_INLINE __m128i pack_sc8_item32_4x(
+ const __m128i &in0, const __m128i &in1,
+ const __m128i &in2, const __m128i &in3
+){
+ const __m128i lo = _mm_packs_epi32(in0, in1);
+ const __m128i hi = _mm_packs_epi32(in2, in3);
+ return _mm_packs_epi16(lo, hi);
+}
+
+UHD_INLINE __m128i pack_sc32_4x_be(
+ const __m128d &lo, const __m128d &hi,
+ const __m128d &scalar
+){
+ const __m128i tmpi_lo = _mm_cvttpd_epi32(_mm_mul_pd(hi, scalar));
+ const __m128i tmpi_hi = _mm_cvttpd_epi32(_mm_mul_pd(lo, scalar));
+ return _mm_unpacklo_epi64(tmpi_lo, tmpi_hi);
+}
+
+UHD_INLINE __m128i pack_sc32_4x_le(
+ const __m128d &lo, const __m128d &hi,
+ const __m128d &scalar
+){
+ const __m128i tmpi_lo = _mm_cvttpd_epi32(_mm_mul_pd(lo, scalar));
+ const __m128i tmpi_hi = _mm_cvttpd_epi32(_mm_mul_pd(hi, scalar));
+ const __m128i tmpi = _mm_unpacklo_epi64(tmpi_lo, tmpi_hi);
+ return _mm_shuffle_epi32(tmpi, _MM_SHUFFLE(2, 3, 0, 1));
+}
+
+DECLARE_CONVERTER(fc64, 1, sc8_item32_be, 1, PRIORITY_SIMD){
+ const fc64_t *input = reinterpret_cast<const fc64_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const __m128d scalar = _mm_set1_pd(scale_factor);
+
+ #define convert_fc64_1_to_sc8_item32_1_bswap_guts(_al_) \
+ for (size_t j = 0; i+7 < nsamps; i+=8, j+=4){ \
+ /* load from input */ \
+ __m128d tmp0 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+0)); \
+ __m128d tmp1 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+1)); \
+ __m128d tmp2 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+2)); \
+ __m128d tmp3 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+3)); \
+ __m128d tmp4 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+4)); \
+ __m128d tmp5 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+5)); \
+ __m128d tmp6 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+6)); \
+ __m128d tmp7 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+7)); \
+ \
+ /* interleave */ \
+ const __m128i tmpi = pack_sc8_item32_4x( \
+ pack_sc32_4x_be(tmp0, tmp1, scalar), \
+ pack_sc32_4x_be(tmp2, tmp3, scalar), \
+ pack_sc32_4x_be(tmp4, tmp5, scalar), \
+ pack_sc32_4x_be(tmp6, tmp7, scalar) \
+ ); \
+ \
+ /* store to output */ \
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(output+j), tmpi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ if ((size_t(input) & 0xf) == 0){
+ convert_fc64_1_to_sc8_item32_1_bswap_guts(_)
+ }
+ else{
+ convert_fc64_1_to_sc8_item32_1_bswap_guts(u_)
+ }
+
+ //convert remainder
+ const size_t num_pairs = nsamps/2;
+ for (size_t j = i/2; j < num_pairs; j++, i+=2){
+ const item32_t item = fc64_to_item32_sc8(input[i], input[i+1], scale_factor);
+ output[j] = uhd::byteswap(item);
+ }
+
+ if (nsamps != num_pairs*2){
+ const item32_t item = fc64_to_item32_sc8(input[nsamps-1], 0, scale_factor);
+ output[num_pairs] = uhd::byteswap(item);
+ }
+}
+
+DECLARE_CONVERTER(fc64, 1, sc8_item32_le, 1, PRIORITY_SIMD){
+ const fc64_t *input = reinterpret_cast<const fc64_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const __m128d scalar = _mm_set1_pd(scale_factor);
+
+ #define convert_fc64_1_to_sc8_item32_1_nswap_guts(_al_) \
+ for (size_t j = 0; i+7 < nsamps; i+=8, j+=4){ \
+ /* load from input */ \
+ __m128d tmp0 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+0)); \
+ __m128d tmp1 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+1)); \
+ __m128d tmp2 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+2)); \
+ __m128d tmp3 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+3)); \
+ __m128d tmp4 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+4)); \
+ __m128d tmp5 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+5)); \
+ __m128d tmp6 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+6)); \
+ __m128d tmp7 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+7)); \
+ \
+ /* interleave */ \
+ const __m128i tmpi = pack_sc8_item32_4x( \
+ pack_sc32_4x_le(tmp0, tmp1, scalar), \
+ pack_sc32_4x_le(tmp2, tmp3, scalar), \
+ pack_sc32_4x_le(tmp4, tmp5, scalar), \
+ pack_sc32_4x_le(tmp6, tmp7, scalar) \
+ ); \
+ \
+ /* store to output */ \
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(output+j), tmpi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ if ((size_t(input) & 0xf) == 0){
+ convert_fc64_1_to_sc8_item32_1_nswap_guts(_)
+ }
+ else{
+ convert_fc64_1_to_sc8_item32_1_nswap_guts(u_)
+ }
+
+ //convert remainder
+ const size_t num_pairs = nsamps/2;
+ for (size_t j = i/2; j < num_pairs; j++, i+=2){
+ const item32_t item = fc64_to_item32_sc8(input[i], input[i+1], scale_factor);
+ output[j] = (item);
+ }
+
+ if (nsamps != num_pairs*2){
+ const item32_t item = fc64_to_item32_sc8(input[nsamps-1], 0, scale_factor);
+ output[num_pairs] = (item);
+ }
+}
diff --git a/host/lib/convert/convert_fc64_with_sse2.cpp b/host/lib/convert/convert_fc64_with_sse2.cpp
new file mode 100644
index 000000000..6e097e380
--- /dev/null
+++ b/host/lib/convert/convert_fc64_with_sse2.cpp
@@ -0,0 +1,212 @@
+//
+// Copyright 2011 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <uhd/utils/byteswap.hpp>
+#include <emmintrin.h>
+
+using namespace uhd::convert;
+
+DECLARE_CONVERTER(fc64, 1, sc16_item32_le, 1, PRIORITY_SIMD){
+ const fc64_t *input = reinterpret_cast<const fc64_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const __m128d scalar = _mm_set1_pd(scale_factor);
+
+ #define convert_fc64_1_to_item32_1_nswap_guts(_al_) \
+ for (; i+3 < nsamps; i+=4){ \
+ /* load from input */ \
+ __m128d tmp0 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+0)); \
+ __m128d tmp1 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+1)); \
+ __m128d tmp2 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+2)); \
+ __m128d tmp3 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+3)); \
+ \
+ /* convert and scale */ \
+ __m128i tmpi0 = _mm_cvttpd_epi32(_mm_mul_pd(tmp0, scalar)); \
+ __m128i tmpi1 = _mm_cvttpd_epi32(_mm_mul_pd(tmp1, scalar)); \
+ __m128i tmpilo = _mm_unpacklo_epi64(tmpi0, tmpi1); \
+ __m128i tmpi2 = _mm_cvttpd_epi32(_mm_mul_pd(tmp2, scalar)); \
+ __m128i tmpi3 = _mm_cvttpd_epi32(_mm_mul_pd(tmp3, scalar)); \
+ __m128i tmpihi = _mm_unpacklo_epi64(tmpi2, tmpi3); \
+ \
+ /* pack + swap 16-bit pairs */ \
+ __m128i tmpi = _mm_packs_epi32(tmpilo, tmpihi); \
+ tmpi = _mm_shufflelo_epi16(tmpi, _MM_SHUFFLE(2, 3, 0, 1)); \
+ tmpi = _mm_shufflehi_epi16(tmpi, _MM_SHUFFLE(2, 3, 0, 1)); \
+ \
+ /* store to output */ \
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(output+i), tmpi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ if ((size_t(input) & 0xf) == 0){
+ convert_fc64_1_to_item32_1_nswap_guts(_)
+ }
+ else{
+ convert_fc64_1_to_item32_1_nswap_guts(u_)
+ }
+
+ //convert remainder
+ for (; i < nsamps; i++){
+ output[i] = fc64_to_item32_sc16(input[i], scale_factor);
+ }
+}
+
+DECLARE_CONVERTER(fc64, 1, sc16_item32_be, 1, PRIORITY_SIMD){
+ const fc64_t *input = reinterpret_cast<const fc64_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const __m128d scalar = _mm_set1_pd(scale_factor);
+
+ #define convert_fc64_1_to_item32_1_bswap_guts(_al_) \
+ for (; i+3 < nsamps; i+=4){ \
+ /* load from input */ \
+ __m128d tmp0 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+0)); \
+ __m128d tmp1 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+1)); \
+ __m128d tmp2 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+2)); \
+ __m128d tmp3 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+3)); \
+ \
+ /* convert and scale */ \
+ __m128i tmpi0 = _mm_cvttpd_epi32(_mm_mul_pd(tmp0, scalar)); \
+ __m128i tmpi1 = _mm_cvttpd_epi32(_mm_mul_pd(tmp1, scalar)); \
+ __m128i tmpilo = _mm_unpacklo_epi64(tmpi0, tmpi1); \
+ __m128i tmpi2 = _mm_cvttpd_epi32(_mm_mul_pd(tmp2, scalar)); \
+ __m128i tmpi3 = _mm_cvttpd_epi32(_mm_mul_pd(tmp3, scalar)); \
+ __m128i tmpihi = _mm_unpacklo_epi64(tmpi2, tmpi3); \
+ \
+ /* pack + byteswap -> byteswap 16 bit words */ \
+ __m128i tmpi = _mm_packs_epi32(tmpilo, tmpihi); \
+ tmpi = _mm_or_si128(_mm_srli_epi16(tmpi, 8), _mm_slli_epi16(tmpi, 8)); \
+ \
+ /* store to output */ \
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(output+i), tmpi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ if ((size_t(input) & 0xf) == 0){
+ convert_fc64_1_to_item32_1_bswap_guts(_)
+ }
+ else{
+ convert_fc64_1_to_item32_1_bswap_guts(u_)
+ }
+
+ //convert remainder
+ for (; i < nsamps; i++){
+ output[i] = uhd::byteswap(fc64_to_item32_sc16(input[i], scale_factor));
+ }
+}
+
+DECLARE_CONVERTER(sc16_item32_le, 1, fc64, 1, PRIORITY_SIMD){
+ const item32_t *input = reinterpret_cast<const item32_t *>(inputs[0]);
+ fc64_t *output = reinterpret_cast<fc64_t *>(outputs[0]);
+
+ const __m128d scalar = _mm_set1_pd(scale_factor/(1 << 16));
+ const __m128i zeroi = _mm_setzero_si128();
+
+ #define convert_item32_1_to_fc64_1_nswap_guts(_al_) \
+ for (; i+3 < nsamps; i+=4){ \
+ /* load from input */ \
+ __m128i tmpi = _mm_loadu_si128(reinterpret_cast<const __m128i *>(input+i)); \
+ \
+ /* unpack + swap 16-bit pairs */ \
+ tmpi = _mm_shufflelo_epi16(tmpi, _MM_SHUFFLE(2, 3, 0, 1)); \
+ tmpi = _mm_shufflehi_epi16(tmpi, _MM_SHUFFLE(2, 3, 0, 1)); \
+ __m128i tmpilo = _mm_unpacklo_epi16(zeroi, tmpi); /* value in upper 16 bits */ \
+ __m128i tmpihi = _mm_unpackhi_epi16(zeroi, tmpi); \
+ \
+ /* convert and scale */ \
+ __m128d tmp0 = _mm_mul_pd(_mm_cvtepi32_pd(tmpilo), scalar); \
+ tmpilo = _mm_unpackhi_epi64(tmpilo, zeroi); \
+ __m128d tmp1 = _mm_mul_pd(_mm_cvtepi32_pd(tmpilo), scalar); \
+ __m128d tmp2 = _mm_mul_pd(_mm_cvtepi32_pd(tmpihi), scalar); \
+ tmpihi = _mm_unpackhi_epi64(tmpihi, zeroi); \
+ __m128d tmp3 = _mm_mul_pd(_mm_cvtepi32_pd(tmpihi), scalar); \
+ \
+ /* store to output */ \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+i+0), tmp0); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+i+1), tmp1); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+i+2), tmp2); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+i+3), tmp3); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ if ((size_t(output) & 0xf) == 0){
+ convert_item32_1_to_fc64_1_nswap_guts(_)
+ }
+ else{
+ convert_item32_1_to_fc64_1_nswap_guts(u_)
+ }
+
+ //convert remainder
+ for (; i < nsamps; i++){
+ output[i] = item32_sc16_to_fc64(input[i], scale_factor);
+ }
+}
+
+DECLARE_CONVERTER(sc16_item32_be, 1, fc64, 1, PRIORITY_SIMD){
+ const item32_t *input = reinterpret_cast<const item32_t *>(inputs[0]);
+ fc64_t *output = reinterpret_cast<fc64_t *>(outputs[0]);
+
+ const __m128d scalar = _mm_set1_pd(scale_factor/(1 << 16));
+ const __m128i zeroi = _mm_setzero_si128();
+
+ #define convert_item32_1_to_fc64_1_bswap_guts(_al_) \
+ for (; i+3 < nsamps; i+=4){ \
+ /* load from input */ \
+ __m128i tmpi = _mm_loadu_si128(reinterpret_cast<const __m128i *>(input+i)); \
+ \
+ /* byteswap + unpack -> byteswap 16 bit words */ \
+ tmpi = _mm_or_si128(_mm_srli_epi16(tmpi, 8), _mm_slli_epi16(tmpi, 8)); \
+ __m128i tmpilo = _mm_unpacklo_epi16(zeroi, tmpi); /* value in upper 16 bits */ \
+ __m128i tmpihi = _mm_unpackhi_epi16(zeroi, tmpi); \
+ \
+ /* convert and scale */ \
+ __m128d tmp0 = _mm_mul_pd(_mm_cvtepi32_pd(tmpilo), scalar); \
+ tmpilo = _mm_unpackhi_epi64(tmpilo, zeroi); \
+ __m128d tmp1 = _mm_mul_pd(_mm_cvtepi32_pd(tmpilo), scalar); \
+ __m128d tmp2 = _mm_mul_pd(_mm_cvtepi32_pd(tmpihi), scalar); \
+ tmpihi = _mm_unpackhi_epi64(tmpihi, zeroi); \
+ __m128d tmp3 = _mm_mul_pd(_mm_cvtepi32_pd(tmpihi), scalar); \
+ \
+ /* store to output */ \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+i+0), tmp0); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+i+1), tmp1); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+i+2), tmp2); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+i+3), tmp3); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ if ((size_t(output) & 0xf) == 0){
+ convert_item32_1_to_fc64_1_bswap_guts(_)
+ }
+ else{
+ convert_item32_1_to_fc64_1_bswap_guts(u_)
+ }
+
+ //convert remainder
+ for (; i < nsamps; i++){
+ output[i] = item32_sc16_to_fc64(uhd::byteswap(input[i]), scale_factor);
+ }
+}
diff --git a/host/lib/convert/convert_impl.cpp b/host/lib/convert/convert_impl.cpp
new file mode 100644
index 000000000..12ad54486
--- /dev/null
+++ b/host/lib/convert/convert_impl.cpp
@@ -0,0 +1,143 @@
+//
+// Copyright 2011 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include <uhd/convert.hpp>
+#include <uhd/utils/log.hpp>
+#include <uhd/utils/static.hpp>
+#include <uhd/types/dict.hpp>
+#include <uhd/exception.hpp>
+#include <boost/cstdint.hpp>
+#include <boost/format.hpp>
+#include <complex>
+
+using namespace uhd;
+
+bool convert::operator==(const convert::id_type &lhs, const convert::id_type &rhs){
+ return true
+ and (lhs.input_format == rhs.input_format)
+ and (lhs.num_inputs == rhs.num_inputs)
+ and (lhs.output_format == rhs.output_format)
+ and (lhs.num_outputs == rhs.num_outputs)
+ ;
+}
+
+std::string convert::id_type::to_pp_string(void) const{
+ return str(boost::format(
+ "conversion ID\n"
+ " Input format: %s\n"
+ " Num inputs: %d\n"
+ " Output format: %s\n"
+ " Num outputs: %d\n"
+ )
+ % this->input_format
+ % this->num_inputs
+ % this->output_format
+ % this->num_outputs
+ );
+}
+
+/***********************************************************************
+ * Define types for the function tables
+ **********************************************************************/
+struct fcn_table_entry_type{
+ convert::priority_type prio;
+ convert::function_type fcn;
+};
+
+/***********************************************************************
+ * Setup the table registry
+ **********************************************************************/
+typedef uhd::dict<convert::id_type, fcn_table_entry_type> fcn_table_type;
+UHD_SINGLETON_FCN(fcn_table_type, get_table);
+
+/***********************************************************************
+ * The registry functions
+ **********************************************************************/
+void uhd::convert::register_converter(
+ const id_type &id,
+ const function_type &fcn,
+ const priority_type prio
+){
+ //get a reference to the function table
+ fcn_table_type &table = get_table();
+
+ //register the function if higher priority
+ if (not table.has_key(id) or table[id].prio < prio){
+ table[id].fcn = fcn;
+ table[id].prio = prio;
+ }
+
+ //----------------------------------------------------------------//
+ UHD_LOGV(always) << "register_converter: " << id.to_pp_string() << std::endl
+ << " prio: " << prio << std::endl
+ << std::endl
+ ;
+ //----------------------------------------------------------------//
+}
+
+/***********************************************************************
+ * The converter functions
+ **********************************************************************/
+convert::function_type convert::get_converter(const id_type &id){
+ if (get_table().has_key(id)) return get_table()[id].fcn;
+ throw uhd::key_error("Cannot find a conversion routine for " + id.to_pp_string());
+}
+
+/***********************************************************************
+ * Mappings for item format to byte size for all items we can
+ **********************************************************************/
+typedef uhd::dict<std::string, size_t> item_size_type;
+UHD_SINGLETON_FCN(item_size_type, get_item_size_table);
+
+void convert::register_bytes_per_item(
+ const std::string &format, const size_t size
+){
+ get_item_size_table()[format] = size;
+}
+
+size_t convert::get_bytes_per_item(const std::string &format){
+ if (get_item_size_table().has_key(format)) return get_item_size_table()[format];
+
+ //OK. I am sorry about this.
+ //We didnt find a match, so lets find a match for the first term.
+ //This is partially a hack because of the way I append strings.
+ //But as long as life is kind, we can keep this.
+ const size_t pos = format.find("_");
+ if (pos != std::string::npos){
+ return get_bytes_per_item(format.substr(0, pos));
+ }
+
+ throw uhd::key_error("Cannot find an item size:\n" + format);
+}
+
+UHD_STATIC_BLOCK(convert_register_item_sizes){
+ //register standard complex types
+ convert::register_bytes_per_item("fc64", sizeof(std::complex<double>));
+ convert::register_bytes_per_item("fc32", sizeof(std::complex<float>));
+ convert::register_bytes_per_item("sc64", sizeof(std::complex<boost::int64_t>));
+ convert::register_bytes_per_item("sc32", sizeof(std::complex<boost::int32_t>));
+ convert::register_bytes_per_item("sc16", sizeof(std::complex<boost::int16_t>));
+ convert::register_bytes_per_item("sc8", sizeof(std::complex<boost::int8_t>));
+
+ //register standard real types
+ convert::register_bytes_per_item("f64", sizeof(double));
+ convert::register_bytes_per_item("f32", sizeof(float));
+ convert::register_bytes_per_item("s64", sizeof(boost::int64_t));
+ convert::register_bytes_per_item("s32", sizeof(boost::int32_t));
+ convert::register_bytes_per_item("s16", sizeof(boost::int16_t));
+ convert::register_bytes_per_item("s8", sizeof(boost::int8_t));
+}
diff --git a/host/lib/convert/convert_orc.orc b/host/lib/convert/convert_orc.orc
new file mode 100644
index 000000000..f7075606e
--- /dev/null
+++ b/host/lib/convert/convert_orc.orc
@@ -0,0 +1,80 @@
+.function _convert_fc32_1_to_item32_1_nswap_orc
+.source 8 src
+.dest 4 dst
+.floatparam 4 scalar
+.temp 8 scaled
+.temp 8 converted
+.temp 4 short
+x2 mulf scaled, src, scalar
+x2 convfl converted, scaled
+x2 convlw short, converted
+swapl short, short
+x2 swapw dst, short
+
+.function _convert_fc32_1_to_item32_1_bswap_orc
+.source 8 src
+.dest 4 dst
+.floatparam 4 scalar
+.temp 8 scaled
+.temp 8 converted
+.temp 4 short
+x2 mulf scaled, src, scalar
+x2 convfl converted, scaled
+x2 convlw short, converted
+x2 swapw dst, short
+
+.function _convert_item32_1_to_fc32_1_nswap_orc
+.source 4 src
+.dest 8 dst
+.floatparam 4 scalar
+.temp 4 tmp1
+.temp 8 tmp2
+x2 swapw tmp1, src
+swapl tmp1, tmp1
+x2 convswl tmp2, tmp1
+x2 convlf tmp2, tmp2
+x2 mulf dst, tmp2, scalar
+
+.function _convert_item32_1_to_fc32_1_bswap_orc
+.source 4 src
+.dest 8 dst
+.floatparam 4 scalar
+.temp 4 tmp1
+.temp 8 tmp2
+x2 swapw tmp1, src
+x2 convswl tmp2, tmp1
+x2 convlf tmp2, tmp2
+x2 mulf dst, tmp2, scalar
+
+.function _convert_sc16_1_to_item32_1_nswap_orc
+.source 4 src
+.dest 4 dst
+.temp 4 tmp
+.floatparam 4 scalar
+swapl tmp, src
+x2 swapw dst, tmp
+
+.function _convert_item32_1_to_sc16_1_nswap_orc
+.source 4 src
+.dest 4 dst
+.floatparam 4 scalar
+.temp 4 tmp
+x2 swapw tmp, src
+swapl dst, tmp
+
+.function _convert_swap_byte_pairs_orc
+.source 4 src
+.dest 4 dst
+swapl dst, src
+
+.function _convert_fc32_1_to_sc8_1_nswap_orc
+.source 8 src
+.dest 2 dst
+.temp 8 tmp
+.temp 4 tmp2
+.floatparam 4 scalar
+x2 mulf tmp, src, scalar
+x2 convfl tmp, tmp
+swaplq tmp, tmp
+x2 convlw tmp2, tmp
+x2 convwb dst, tmp2
diff --git a/host/lib/convert/convert_with_neon.cpp b/host/lib/convert/convert_with_neon.cpp
new file mode 100644
index 000000000..c7ad62104
--- /dev/null
+++ b/host/lib/convert/convert_with_neon.cpp
@@ -0,0 +1,61 @@
+//
+// Copyright 2011-2011 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <arm_neon.h>
+
+using namespace uhd::convert;
+
+DECLARE_CONVERTER(fc32, 1, sc16_item32_le, 1, PRIORITY_SIMD){
+ const fc32_t *input = reinterpret_cast<const fc32_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ size_t i;
+
+ float32x4_t Q0 = vdupq_n_f32(float(scale_factor));
+ for (i=0; i < (nsamps & ~0x03); i+=2) {
+ float32x4_t Q1 = vld1q_f32(reinterpret_cast<const float *>(&input[i]));
+ float32x4_t Q2 = vmulq_f32(Q1, Q0);
+ int32x4_t Q3 = vcvtq_s32_f32(Q2);
+ int16x4_t D8 = vmovn_s32(Q3);
+ int16x4_t D9 = vrev32_s16(D8);
+ vst1_s16((reinterpret_cast<int16_t *>(&output[i])), D9);
+ }
+
+ for (; i < nsamps; i++)
+ output[i] = fc32_to_item32_sc16(input[i], scale_factor);
+}
+
+DECLARE_CONVERTER(sc16_item32_le, 1, fc32, 1, PRIORITY_SIMD){
+ const item32_t *input = reinterpret_cast<const item32_t *>(inputs[0]);
+ fc32_t *output = reinterpret_cast<fc32_t *>(outputs[0]);
+
+ size_t i;
+
+ float32x4_t Q1 = vdupq_n_f32(float(scale_factor));
+ for (i=0; i < (nsamps & ~0x03); i+=2) {
+ int16x4_t D0 = vld1_s16(reinterpret_cast<const int16_t *>(&input[i]));
+ int16x4_t D1 = vrev32_s16(D0);
+ int32x4_t Q2 = vmovl_s16(D1);
+ float32x4_t Q3 = vcvtq_f32_s32(Q2);
+ float32x4_t Q4 = vmulq_f32(Q3, Q1);
+ vst1q_f32((reinterpret_cast<float *>(&output[i])), Q4);
+ }
+
+ for (; i < nsamps; i++)
+ output[i] = item32_sc16_to_fc32(input[i], scale_factor);
+}
diff --git a/host/lib/convert/convert_with_orc.cpp b/host/lib/convert/convert_with_orc.cpp
new file mode 100644
index 000000000..e44c8ca73
--- /dev/null
+++ b/host/lib/convert/convert_with_orc.cpp
@@ -0,0 +1,65 @@
+//
+// Copyright 2011 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <uhd/utils/byteswap.hpp>
+
+using namespace uhd::convert;
+
+extern "C" {
+extern void _convert_fc32_1_to_item32_1_nswap_orc(void *, const void *, float, int);
+extern void _convert_fc32_1_to_item32_1_bswap_orc(void *, const void *, float, int);
+extern void _convert_item32_1_to_fc32_1_nswap_orc(void *, const void *, float, int);
+extern void _convert_item32_1_to_fc32_1_bswap_orc(void *, const void *, float, int);
+extern void _convert_sc16_1_to_item32_1_nswap_orc(void *, const void *, float, int);
+extern void _convert_item32_1_to_sc16_1_nswap_orc(void *, const void *, float, int);
+extern void _convert_fc32_1_to_sc8_1_nswap_orc(void *, const void *, float, int);
+extern void _convert_swap_byte_pairs_orc(void *, const void *, int);
+}
+
+DECLARE_CONVERTER(fc32, 1, sc16_item32_le, 1, PRIORITY_LIBORC){
+ _convert_fc32_1_to_item32_1_nswap_orc(outputs[0], inputs[0], scale_factor, nsamps);
+}
+
+DECLARE_CONVERTER(fc32, 1, sc16_item32_be, 1, PRIORITY_LIBORC){
+ _convert_fc32_1_to_item32_1_bswap_orc(outputs[0], inputs[0], scale_factor, nsamps);
+}
+
+DECLARE_CONVERTER(sc16_item32_le, 1, fc32, 1, PRIORITY_LIBORC){
+ _convert_item32_1_to_fc32_1_nswap_orc(outputs[0], inputs[0], scale_factor, nsamps);
+}
+
+DECLARE_CONVERTER(sc16_item32_be, 1, fc32, 1, PRIORITY_LIBORC){
+ _convert_item32_1_to_fc32_1_bswap_orc(outputs[0], inputs[0], scale_factor, nsamps);
+}
+
+DECLARE_CONVERTER(sc16, 1, sc16_item32_le, 1, PRIORITY_LIBORC){
+ _convert_sc16_1_to_item32_1_nswap_orc(outputs[0], inputs[0], scale_factor, nsamps);
+}
+
+DECLARE_CONVERTER(sc16_item32_le, 1, sc16, 1, PRIORITY_LIBORC){
+ _convert_item32_1_to_sc16_1_nswap_orc(outputs[0], inputs[0], scale_factor, nsamps);
+}
+
+DECLARE_CONVERTER(fc32, 1, sc8_item32_be, 1, PRIORITY_LIBORC){
+ _convert_fc32_1_to_sc8_1_nswap_orc(outputs[0], inputs[0], scale_factor, nsamps);
+ _convert_swap_byte_pairs_orc(outputs[0], outputs[0], (nsamps + 1)/2);
+}
+
+DECLARE_CONVERTER(fc32, 1, sc8_item32_le, 1, PRIORITY_LIBORC){
+ _convert_fc32_1_to_sc8_1_nswap_orc(outputs[0], inputs[0], scale_factor, nsamps);
+}
diff --git a/host/lib/convert/convert_with_tables.cpp b/host/lib/convert/convert_with_tables.cpp
new file mode 100644
index 000000000..cd7773d4b
--- /dev/null
+++ b/host/lib/convert/convert_with_tables.cpp
@@ -0,0 +1,282 @@
+//
+// Copyright 2011-2012 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <uhd/utils/byteswap.hpp>
+#include <boost/math/special_functions/round.hpp>
+#include <vector>
+
+using namespace uhd::convert;
+
+static const size_t sc16_table_len = size_t(1 << 16);
+
+typedef boost::uint16_t (*tohost16_type)(boost::uint16_t);
+
+/***********************************************************************
+ * Implementation for sc16 to sc8 lookup table
+ * - Lookup the real and imaginary parts individually
+ **********************************************************************/
+template <bool swap>
+class convert_sc16_1_to_sc8_item32_1 : public converter{
+public:
+ convert_sc16_1_to_sc8_item32_1(void): _table(sc16_table_len){}
+
+ void set_scalar(const double scalar){
+ for (size_t i = 0; i < sc16_table_len; i++){
+ const boost::int16_t val = boost::uint16_t(i);
+ _table[i] = boost::int8_t(boost::math::iround(val * scalar / 32767.));
+ }
+ }
+
+ void operator()(const input_type &inputs, const output_type &outputs, const size_t nsamps){
+ const sc16_t *input = reinterpret_cast<const sc16_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const size_t num_pairs = nsamps/2;
+ for (size_t i = 0, j = 0; i < num_pairs; i++, j+=2){
+ output[i] = this->lookup(input[j], input[j+1]);
+ }
+
+ if (nsamps != num_pairs*2){
+ output[num_pairs] = this->lookup(input[nsamps-1], 0);;
+ }
+ }
+
+ item32_t lookup(const sc16_t &in0, const sc16_t &in1){
+ if (swap){ //hope this compiles out, its a template constant
+ return
+ (item32_t(_table[boost::uint16_t(in0.real())]) << 16) |
+ (item32_t(_table[boost::uint16_t(in0.imag())]) << 24) |
+ (item32_t(_table[boost::uint16_t(in1.real())]) << 0) |
+ (item32_t(_table[boost::uint16_t(in1.imag())]) << 8) ;
+ }
+ return
+ (item32_t(_table[boost::uint16_t(in0.real())]) << 8) |
+ (item32_t(_table[boost::uint16_t(in0.imag())]) << 0) |
+ (item32_t(_table[boost::uint16_t(in1.real())]) << 24) |
+ (item32_t(_table[boost::uint16_t(in1.imag())]) << 16) ;
+ }
+
+private:
+ std::vector<boost::uint8_t> _table;
+};
+
+/***********************************************************************
+ * Implementation for sc16 lookup table
+ * - Lookup the real and imaginary parts individually
+ **********************************************************************/
+template <typename type, tohost16_type tohost, size_t re_shift, size_t im_shift>
+class convert_sc16_item32_1_to_fcxx_1 : public converter{
+public:
+ convert_sc16_item32_1_to_fcxx_1(void): _table(sc16_table_len){}
+
+ void set_scalar(const double scalar){
+ for (size_t i = 0; i < sc16_table_len; i++){
+ const boost::uint16_t val = tohost(boost::uint16_t(i & 0xffff));
+ _table[i] = type(boost::int16_t(val)*scalar);
+ }
+ }
+
+ void operator()(const input_type &inputs, const output_type &outputs, const size_t nsamps){
+ const item32_t *input = reinterpret_cast<const item32_t *>(inputs[0]);
+ std::complex<type> *output = reinterpret_cast<std::complex<type> *>(outputs[0]);
+
+ for (size_t i = 0; i < nsamps; i++){
+ const item32_t item = input[i];
+ output[i] = std::complex<type>(
+ _table[boost::uint16_t(item >> re_shift)],
+ _table[boost::uint16_t(item >> im_shift)]
+ );
+ }
+ }
+
+private:
+ std::vector<type> _table;
+};
+
+/***********************************************************************
+ * Implementation for sc8 lookup table
+ * - Lookup the real and imaginary parts together
+ **********************************************************************/
+template <typename type, tohost16_type tohost, size_t lo_shift, size_t hi_shift>
+class convert_sc8_item32_1_to_fcxx_1 : public converter{
+public:
+ convert_sc8_item32_1_to_fcxx_1(void): _table(sc16_table_len){}
+
+ //special case for sc16 type, 32767 undoes float normalization
+ static type conv(const boost::int8_t &num, const double scalar){
+ if (sizeof(type) == sizeof(s16_t)){
+ return type(boost::math::iround(num*scalar*32767));
+ }
+ return type(num*scalar);
+ }
+
+ void set_scalar(const double scalar){
+ for (size_t i = 0; i < sc16_table_len; i++){
+ const boost::uint16_t val = tohost(boost::uint16_t(i & 0xffff));
+ const type real = conv(boost::int8_t(val >> 8), scalar);
+ const type imag = conv(boost::int8_t(val >> 0), scalar);
+ _table[i] = std::complex<type>(real, imag);
+ }
+ }
+
+ void operator()(const input_type &inputs, const output_type &outputs, const size_t nsamps){
+ const item32_t *input = reinterpret_cast<const item32_t *>(size_t(inputs[0]) & ~0x3);
+ std::complex<type> *output = reinterpret_cast<std::complex<type> *>(outputs[0]);
+
+ size_t num_samps = nsamps;
+
+ if ((size_t(inputs[0]) & 0x3) != 0){
+ const item32_t item0 = *input++;
+ *output++ = _table[boost::uint16_t(item0 >> hi_shift)];
+ num_samps--;
+ }
+
+ const size_t num_pairs = num_samps/2;
+ for (size_t i = 0, j = 0; i < num_pairs; i++, j+=2){
+ const item32_t item_i = (input[i]);
+ output[j] = _table[boost::uint16_t(item_i >> lo_shift)];
+ output[j + 1] = _table[boost::uint16_t(item_i >> hi_shift)];
+ }
+
+ if (num_samps != num_pairs*2){
+ const item32_t item_n = input[num_pairs];
+ output[num_samps-1] = _table[boost::uint16_t(item_n >> lo_shift)];
+ }
+ }
+
+private:
+ std::vector<std::complex<type> > _table;
+};
+
+/***********************************************************************
+ * Factory functions and registration
+ **********************************************************************/
+
+#ifdef BOOST_BIG_ENDIAN
+# define SHIFT_PAIR0 16, 0
+# define SHIFT_PAIR1 0, 16
+# define BE_SWAP false
+# define LE_SWAP true
+#else
+# define SHIFT_PAIR0 0, 16
+# define SHIFT_PAIR1 16, 0
+# define BE_SWAP true
+# define LE_SWAP false
+#endif
+
+static converter::sptr make_convert_sc16_item32_be_1_to_fc32_1(void){
+ return converter::sptr(new convert_sc16_item32_1_to_fcxx_1<float, uhd::ntohx, SHIFT_PAIR0>());
+}
+
+static converter::sptr make_convert_sc16_item32_be_1_to_fc64_1(void){
+ return converter::sptr(new convert_sc16_item32_1_to_fcxx_1<double, uhd::ntohx, SHIFT_PAIR0>());
+}
+
+static converter::sptr make_convert_sc16_item32_le_1_to_fc32_1(void){
+ return converter::sptr(new convert_sc16_item32_1_to_fcxx_1<float, uhd::wtohx, SHIFT_PAIR1>());
+}
+
+static converter::sptr make_convert_sc16_item32_le_1_to_fc64_1(void){
+ return converter::sptr(new convert_sc16_item32_1_to_fcxx_1<double, uhd::wtohx, SHIFT_PAIR1>());
+}
+
+static converter::sptr make_convert_sc8_item32_be_1_to_fc32_1(void){
+ return converter::sptr(new convert_sc8_item32_1_to_fcxx_1<float, uhd::ntohx, SHIFT_PAIR1>());
+}
+
+static converter::sptr make_convert_sc8_item32_be_1_to_fc64_1(void){
+ return converter::sptr(new convert_sc8_item32_1_to_fcxx_1<double, uhd::ntohx, SHIFT_PAIR1>());
+}
+
+static converter::sptr make_convert_sc8_item32_le_1_to_fc32_1(void){
+ return converter::sptr(new convert_sc8_item32_1_to_fcxx_1<float, uhd::wtohx, SHIFT_PAIR0>());
+}
+
+static converter::sptr make_convert_sc8_item32_le_1_to_fc64_1(void){
+ return converter::sptr(new convert_sc8_item32_1_to_fcxx_1<double, uhd::wtohx, SHIFT_PAIR0>());
+}
+
+static converter::sptr make_convert_sc8_item32_be_1_to_sc16_1(void){
+ return converter::sptr(new convert_sc8_item32_1_to_fcxx_1<s16_t, uhd::ntohx, SHIFT_PAIR1>());
+}
+
+static converter::sptr make_convert_sc8_item32_le_1_to_sc16_1(void){
+ return converter::sptr(new convert_sc8_item32_1_to_fcxx_1<s16_t, uhd::wtohx, SHIFT_PAIR0>());
+}
+
+static converter::sptr make_convert_sc16_1_to_sc8_item32_be_1(void){
+ return converter::sptr(new convert_sc16_1_to_sc8_item32_1<BE_SWAP>());
+}
+
+static converter::sptr make_convert_sc16_1_to_sc8_item32_le_1(void){
+ return converter::sptr(new convert_sc16_1_to_sc8_item32_1<LE_SWAP>());
+}
+
+UHD_STATIC_BLOCK(register_convert_sc16_item32_1_to_fcxx_1){
+ uhd::convert::id_type id;
+ id.num_inputs = 1;
+ id.num_outputs = 1;
+
+ id.output_format = "fc32";
+ id.input_format = "sc16_item32_be";
+ uhd::convert::register_converter(id, &make_convert_sc16_item32_be_1_to_fc32_1, PRIORITY_TABLE);
+
+ id.output_format = "fc64";
+ id.input_format = "sc16_item32_be";
+ uhd::convert::register_converter(id, &make_convert_sc16_item32_be_1_to_fc64_1, PRIORITY_TABLE);
+
+ id.output_format = "fc32";
+ id.input_format = "sc16_item32_le";
+ uhd::convert::register_converter(id, &make_convert_sc16_item32_le_1_to_fc32_1, PRIORITY_TABLE);
+
+ id.output_format = "fc64";
+ id.input_format = "sc16_item32_le";
+ uhd::convert::register_converter(id, &make_convert_sc16_item32_le_1_to_fc64_1, PRIORITY_TABLE);
+
+ id.output_format = "fc32";
+ id.input_format = "sc8_item32_be";
+ uhd::convert::register_converter(id, &make_convert_sc8_item32_be_1_to_fc32_1, PRIORITY_TABLE);
+
+ id.output_format = "fc64";
+ id.input_format = "sc8_item32_be";
+ uhd::convert::register_converter(id, &make_convert_sc8_item32_be_1_to_fc64_1, PRIORITY_TABLE);
+
+ id.output_format = "fc32";
+ id.input_format = "sc8_item32_le";
+ uhd::convert::register_converter(id, &make_convert_sc8_item32_le_1_to_fc32_1, PRIORITY_TABLE);
+
+ id.output_format = "fc64";
+ id.input_format = "sc8_item32_le";
+ uhd::convert::register_converter(id, &make_convert_sc8_item32_le_1_to_fc64_1, PRIORITY_TABLE);
+
+ id.output_format = "sc16";
+ id.input_format = "sc8_item32_be";
+ uhd::convert::register_converter(id, &make_convert_sc8_item32_be_1_to_sc16_1, PRIORITY_TABLE);
+
+ id.output_format = "sc16";
+ id.input_format = "sc8_item32_le";
+ uhd::convert::register_converter(id, &make_convert_sc8_item32_le_1_to_sc16_1, PRIORITY_TABLE);
+
+ id.input_format = "sc16";
+ id.output_format = "sc8_item32_be";
+ uhd::convert::register_converter(id, &make_convert_sc16_1_to_sc8_item32_be_1, PRIORITY_TABLE);
+
+ id.input_format = "sc16";
+ id.output_format = "sc8_item32_le";
+ uhd::convert::register_converter(id, &make_convert_sc16_1_to_sc8_item32_le_1, PRIORITY_TABLE);
+}
diff --git a/host/lib/convert/gen_convert_general.py b/host/lib/convert/gen_convert_general.py
new file mode 100644
index 000000000..364c4bd1a
--- /dev/null
+++ b/host/lib/convert/gen_convert_general.py
@@ -0,0 +1,206 @@
+#!/usr/bin/env python
+#
+# Copyright 2011-2012 Ettus Research LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+TMPL_HEADER = """
+#import time
+/***********************************************************************
+ * This file was generated by $file on $time.strftime("%c")
+ **********************************************************************/
+
+\#include "convert_common.hpp"
+\#include <uhd/utils/byteswap.hpp>
+
+using namespace uhd::convert;
+"""
+
+TMPL_CONV_GEN2_ITEM32 = """
+DECLARE_CONVERTER(item32, 1, sc16_item32_$(end), 1, PRIORITY_GENERAL){
+ const item32_t *input = reinterpret_cast<const item32_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ for (size_t i = 0; i < nsamps; i++){
+ output[i] = $(to_wire)(input[i]);
+ }
+}
+
+DECLARE_CONVERTER(sc16_item32_$(end), 1, item32, 1, PRIORITY_GENERAL){
+ const item32_t *input = reinterpret_cast<const item32_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ for (size_t i = 0; i < nsamps; i++){
+ output[i] = $(to_host)(input[i]);
+ }
+}
+"""
+
+TMPL_CONV_GEN2_SC16 = """
+DECLARE_CONVERTER($(cpu_type), 1, sc16_item32_$(end), 1, PRIORITY_GENERAL){
+ const $(cpu_type)_t *input = reinterpret_cast<const $(cpu_type)_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ for (size_t i = 0; i < nsamps; i++){
+ output[i] = $(to_wire)($(cpu_type)_to_item32_sc16(input[i], scale_factor));
+ }
+}
+
+DECLARE_CONVERTER(sc16_item32_$(end), 1, $(cpu_type), 1, PRIORITY_GENERAL){
+ const item32_t *input = reinterpret_cast<const item32_t *>(inputs[0]);
+ $(cpu_type)_t *output = reinterpret_cast<$(cpu_type)_t *>(outputs[0]);
+
+ for (size_t i = 0; i < nsamps; i++){
+ output[i] = item32_sc16_to_$(cpu_type)($(to_host)(input[i]), scale_factor);
+ }
+}
+"""
+
+TMPL_CONV_GEN2_SC8 = """
+DECLARE_CONVERTER(sc8_item32_$(end), 1, $(cpu_type), 1, PRIORITY_GENERAL){
+ const item32_t *input = reinterpret_cast<const item32_t *>(size_t(inputs[0]) & ~0x3);
+ $(cpu_type)_t *output = reinterpret_cast<$(cpu_type)_t *>(outputs[0]);
+ $(cpu_type)_t dummy;
+ size_t num_samps = nsamps;
+
+ if ((size_t(inputs[0]) & 0x3) != 0){
+ const item32_t item0 = $(to_host)(*input++);
+ item32_sc8_to_$(cpu_type)(item0, dummy, *output++, scale_factor);
+ num_samps--;
+ }
+
+ const size_t num_pairs = num_samps/2;
+ for (size_t i = 0, j = 0; i < num_pairs; i++, j+=2){
+ const item32_t item_i = $(to_host)(input[i]);
+ item32_sc8_to_$(cpu_type)(item_i, output[j], output[j+1], scale_factor);
+ }
+
+ if (num_samps != num_pairs*2){
+ const item32_t item_n = $(to_host)(input[num_pairs]);
+ item32_sc8_to_$(cpu_type)(item_n, output[num_samps-1], dummy, scale_factor);
+ }
+}
+
+DECLARE_CONVERTER($(cpu_type), 1, sc8_item32_$(end), 1, PRIORITY_GENERAL){
+ const $(cpu_type)_t *input = reinterpret_cast<const $(cpu_type)_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const size_t num_pairs = nsamps/2;
+ for (size_t i = 0, j = 0; i < num_pairs; i++, j+=2){
+ const item32_t item = $(cpu_type)_to_item32_sc8(input[j], input[j+1], scale_factor);
+ output[i] = $(to_wire)(item);
+ }
+
+ if (nsamps != num_pairs*2){
+ const item32_t item = $(cpu_type)_to_item32_sc8(input[nsamps-1], 0, scale_factor);
+ output[num_pairs] = $(to_wire)(item);
+ }
+}
+"""
+
+TMPL_CONV_USRP1_COMPLEX = """
+DECLARE_CONVERTER($(cpu_type), $(width), sc16_item16_usrp1, 1, PRIORITY_GENERAL){
+ #for $w in range($width)
+ const $(cpu_type)_t *input$(w) = reinterpret_cast<const $(cpu_type)_t *>(inputs[$(w)]);
+ #end for
+ boost::uint16_t *output = reinterpret_cast<boost::uint16_t *>(outputs[0]);
+
+ for (size_t i = 0, j = 0; i < nsamps; i++){
+ #for $w in range($width)
+ output[j++] = $(to_wire)(boost::uint16_t(boost::int16_t(input$(w)[i].real()$(do_scale))));
+ output[j++] = $(to_wire)(boost::uint16_t(boost::int16_t(input$(w)[i].imag()$(do_scale))));
+ #end for
+ }
+}
+
+DECLARE_CONVERTER(sc16_item16_usrp1, 1, $(cpu_type), $(width), PRIORITY_GENERAL){
+ const boost::uint16_t *input = reinterpret_cast<const boost::uint16_t *>(inputs[0]);
+ #for $w in range($width)
+ $(cpu_type)_t *output$(w) = reinterpret_cast<$(cpu_type)_t *>(outputs[$(w)]);
+ #end for
+
+ for (size_t i = 0, j = 0; i < nsamps; i++){
+ #for $w in range($width)
+ output$(w)[i] = $(cpu_type)_t(
+ boost::int16_t($(to_host)(input[j+0]))$(do_scale),
+ boost::int16_t($(to_host)(input[j+1]))$(do_scale)
+ );
+ j += 2;
+ #end for
+ }
+}
+
+DECLARE_CONVERTER(sc8_item16_usrp1, 1, $(cpu_type), $(width), PRIORITY_GENERAL){
+ const boost::uint16_t *input = reinterpret_cast<const boost::uint16_t *>(inputs[0]);
+ #for $w in range($width)
+ $(cpu_type)_t *output$(w) = reinterpret_cast<$(cpu_type)_t *>(outputs[$(w)]);
+ #end for
+
+ for (size_t i = 0, j = 0; i < nsamps; i++){
+ #for $w in range($width)
+ {
+ const boost::uint16_t num = $(to_host)(input[j++]);
+ output$(w)[i] = $(cpu_type)_t(
+ boost::int8_t(num)$(do_scale),
+ boost::int8_t(num >> 8)$(do_scale)
+ );
+ }
+ #end for
+ }
+}
+"""
+
+def parse_tmpl(_tmpl_text, **kwargs):
+ from Cheetah.Template import Template
+ return str(Template(_tmpl_text, kwargs))
+
+if __name__ == '__main__':
+ import sys, os
+ file = os.path.basename(__file__)
+ output = parse_tmpl(TMPL_HEADER, file=file)
+
+ #generate complex converters for all gen2 platforms
+ for end, to_host, to_wire in (
+ ('be', 'uhd::ntohx', 'uhd::htonx'),
+ ('le', 'uhd::wtohx', 'uhd::htowx'),
+ ):
+ for cpu_type in 'fc64', 'fc32', 'sc16':
+ output += parse_tmpl(
+ TMPL_CONV_GEN2_SC16,
+ end=end, to_host=to_host, to_wire=to_wire, cpu_type=cpu_type
+ )
+ for cpu_type in 'fc64', 'fc32', 'sc16', 'sc8':
+ output += parse_tmpl(
+ TMPL_CONV_GEN2_SC8,
+ end=end, to_host=to_host, to_wire=to_wire, cpu_type=cpu_type
+ )
+ output += parse_tmpl(
+ TMPL_CONV_GEN2_ITEM32,
+ end=end, to_host=to_host, to_wire=to_wire
+ )
+
+ #generate complex converters for usrp1 format
+ for width in 1, 2, 4:
+ for cpu_type, do_scale in (
+ ('fc64', '*scale_factor'),
+ ('fc32', '*float(scale_factor)'),
+ ('sc16', ''),
+ ):
+ output += parse_tmpl(
+ TMPL_CONV_USRP1_COMPLEX,
+ width=width, to_host='uhd::wtohx', to_wire='uhd::htowx',
+ cpu_type=cpu_type, do_scale=do_scale
+ )
+ open(sys.argv[1], 'w').write(output)