aboutsummaryrefslogtreecommitdiffstats
path: root/host/lib/convert
diff options
context:
space:
mode:
Diffstat (limited to 'host/lib/convert')
-rw-r--r--host/lib/convert/CMakeLists.txt125
-rw-r--r--host/lib/convert/convert_common.hpp279
-rw-r--r--host/lib/convert/convert_impl.cpp146
-rw-r--r--host/lib/convert/convert_item32.cpp44
-rw-r--r--host/lib/convert/convert_orc.orc80
-rw-r--r--host/lib/convert/convert_with_neon.cpp60
-rw-r--r--host/lib/convert/convert_with_orc.cpp65
-rw-r--r--host/lib/convert/convert_with_tables.cpp282
-rw-r--r--host/lib/convert/gen_convert_general.py134
-rw-r--r--host/lib/convert/sse2_fc32_to_sc16.cpp103
-rw-r--r--host/lib/convert/sse2_fc32_to_sc8.cpp115
-rw-r--r--host/lib/convert/sse2_fc64_to_sc16.cpp111
-rw-r--r--host/lib/convert/sse2_fc64_to_sc8.cpp129
-rw-r--r--host/lib/convert/sse2_sc16_to_fc32.cpp107
-rw-r--r--host/lib/convert/sse2_sc16_to_fc64.cpp115
-rw-r--r--host/lib/convert/sse2_sc8_to_fc32.cpp132
-rw-r--r--host/lib/convert/sse2_sc8_to_fc64.cpp151
17 files changed, 2178 insertions, 0 deletions
diff --git a/host/lib/convert/CMakeLists.txt b/host/lib/convert/CMakeLists.txt
new file mode 100644
index 000000000..0d9d0983f
--- /dev/null
+++ b/host/lib/convert/CMakeLists.txt
@@ -0,0 +1,125 @@
+#
+# Copyright 2011-2012 Ettus Research LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+########################################################################
+# This file included, use CMake directory variables
+########################################################################
+INCLUDE(CheckIncludeFileCXX)
+MESSAGE(STATUS "")
+
+########################################################################
+# Look for Orc support
+########################################################################
+FIND_PACKAGE(PkgConfig)
+IF(PKG_CONFIG_FOUND)
+PKG_CHECK_MODULES(ORC "orc-0.4 > 0.4.11")
+ENDIF(PKG_CONFIG_FOUND)
+
+FIND_PROGRAM(ORCC_EXECUTABLE orcc)
+
+LIBUHD_REGISTER_COMPONENT("ORC" ENABLE_ORC ON "ENABLE_LIBUHD;ORC_FOUND;ORCC_EXECUTABLE" OFF)
+
+IF(ENABLE_ORC)
+ INCLUDE_DIRECTORIES(${ORC_INCLUDE_DIRS})
+ LINK_DIRECTORIES(${ORC_LIBRARY_DIRS})
+ ENABLE_LANGUAGE(C)
+
+ SET(orcc_src ${CMAKE_CURRENT_SOURCE_DIR}/convert_orc.orc)
+
+ GET_FILENAME_COMPONENT(orc_file_name_we ${orcc_src} NAME_WE)
+ SET(orcc_gen ${CMAKE_CURRENT_BINARY_DIR}/${orc_file_name_we}.c)
+ MESSAGE(STATUS "Orc found, enabling Orc support.")
+ ADD_CUSTOM_COMMAND(
+ COMMAND ${ORCC_EXECUTABLE} --implementation -o ${orcc_gen} ${orcc_src}
+ DEPENDS ${orcc_src} OUTPUT ${orcc_gen}
+ )
+ LIBUHD_APPEND_SOURCES(${orcc_gen})
+ LIBUHD_APPEND_SOURCES(
+ ${CMAKE_CURRENT_SOURCE_DIR}/convert_with_orc.cpp
+ )
+ LIBUHD_APPEND_LIBS(${ORC_LIBRARIES})
+ELSE(ENABLE_ORC)
+ MESSAGE(STATUS "Orc not found, disabling orc support.")
+ENDIF(ENABLE_ORC)
+
+########################################################################
+# Check for SSE2 SIMD headers
+########################################################################
+IF(CMAKE_COMPILER_IS_GNUCXX)
+ SET(EMMINTRIN_FLAGS -msse2)
+ELSEIF(MSVC)
+ SET(EMMINTRIN_FLAGS /arch:SSE2)
+ENDIF()
+
+SET(CMAKE_REQUIRED_FLAGS ${EMMINTRIN_FLAGS})
+CHECK_INCLUDE_FILE_CXX(emmintrin.h HAVE_EMMINTRIN_H)
+UNSET(CMAKE_REQUIRED_FLAGS)
+
+IF(HAVE_EMMINTRIN_H)
+ SET(convert_with_sse2_sources
+ ${CMAKE_CURRENT_SOURCE_DIR}/sse2_sc16_to_fc64.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/sse2_sc16_to_fc32.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/sse2_sc8_to_fc64.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/sse2_sc8_to_fc32.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/sse2_fc64_to_sc16.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/sse2_fc32_to_sc16.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/sse2_fc64_to_sc8.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/sse2_fc32_to_sc8.cpp
+ )
+ SET_SOURCE_FILES_PROPERTIES(
+ ${convert_with_sse2_sources}
+ PROPERTIES COMPILE_FLAGS "${EMMINTRIN_FLAGS}"
+ )
+ LIBUHD_APPEND_SOURCES(${convert_with_sse2_sources})
+ENDIF(HAVE_EMMINTRIN_H)
+
+########################################################################
+# Check for NEON SIMD headers
+########################################################################
+IF(CMAKE_COMPILER_IS_GNUCXX)
+ SET(NEON_FLAGS "-mfloat-abi=softfp -mfpu=neon")
+ SET(CMAKE_REQUIRED_FLAGS ${NEON_FLAGS})
+ CHECK_INCLUDE_FILE_CXX(arm_neon.h HAVE_ARM_NEON_H)
+ UNSET(CMAKE_REQUIRED_FLAGS)
+ENDIF(CMAKE_COMPILER_IS_GNUCXX)
+
+IF(HAVE_ARM_NEON_H)
+ SET_SOURCE_FILES_PROPERTIES(
+ ${CMAKE_CURRENT_SOURCE_DIR}/convert_with_neon.cpp
+ PROPERTIES COMPILE_FLAGS "${NEON_FLAGS}"
+ )
+ LIBUHD_APPEND_SOURCES(
+ ${CMAKE_CURRENT_SOURCE_DIR}/convert_with_neon.cpp
+ )
+ENDIF()
+
+########################################################################
+# Convert types generation
+########################################################################
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR})
+
+LIBUHD_PYTHON_GEN_SOURCE(
+ ${CMAKE_CURRENT_SOURCE_DIR}/gen_convert_general.py
+ ${CMAKE_CURRENT_BINARY_DIR}/convert_general.cpp
+)
+
+LIBUHD_APPEND_SOURCES(
+ ${CMAKE_CURRENT_SOURCE_DIR}/convert_with_tables.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/convert_impl.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/convert_item32.cpp
+)
diff --git a/host/lib/convert/convert_common.hpp b/host/lib/convert/convert_common.hpp
new file mode 100644
index 000000000..933978a8f
--- /dev/null
+++ b/host/lib/convert/convert_common.hpp
@@ -0,0 +1,279 @@
+//
+// Copyright 2011-2012 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#ifndef INCLUDED_LIBUHD_CONVERT_COMMON_HPP
+#define INCLUDED_LIBUHD_CONVERT_COMMON_HPP
+
+#include <uhd/convert.hpp>
+#include <uhd/utils/static.hpp>
+#include <boost/cstdint.hpp>
+#include <complex>
+
+#define _DECLARE_CONVERTER(name, in_form, num_in, out_form, num_out, prio) \
+ struct name : public uhd::convert::converter{ \
+ static sptr make(void){return sptr(new name());} \
+ double scale_factor; \
+ void set_scalar(const double s){scale_factor = s;} \
+ void operator()(const input_type&, const output_type&, const size_t); \
+ }; \
+ UHD_STATIC_BLOCK(__register_##name##_##prio){ \
+ uhd::convert::id_type id; \
+ id.input_format = #in_form; \
+ id.num_inputs = num_in; \
+ id.output_format = #out_form; \
+ id.num_outputs = num_out; \
+ uhd::convert::register_converter(id, &name::make, prio); \
+ } \
+ void name::operator()( \
+ const input_type &inputs, const output_type &outputs, const size_t nsamps \
+ )
+
+#define DECLARE_CONVERTER(in_form, num_in, out_form, num_out, prio) \
+ _DECLARE_CONVERTER(__convert_##in_form##_##num_in##_##out_form##_##num_out##_##prio, in_form, num_in, out_form, num_out, prio)
+
+/***********************************************************************
+ * Setup priorities
+ **********************************************************************/
+static const int PRIORITY_GENERAL = 0;
+static const int PRIORITY_EMPTY = -1;
+
+#ifdef __ARM_NEON__
+static const int PRIORITY_LIBORC = 3;
+static const int PRIORITY_SIMD = 2; //neon conversions could be implemented better, orc wins
+static const int PRIORITY_TABLE = 1; //tables require large cache, so they are slower on arm
+#else
+static const int PRIORITY_LIBORC = 2;
+static const int PRIORITY_SIMD = 3;
+static const int PRIORITY_TABLE = 1;
+#endif
+
+/***********************************************************************
+ * Typedefs
+ **********************************************************************/
+typedef std::complex<double> fc64_t;
+typedef std::complex<float> fc32_t;
+typedef std::complex<boost::int32_t> sc32_t;
+typedef std::complex<boost::int16_t> sc16_t;
+typedef std::complex<boost::int8_t> sc8_t;
+typedef double f64_t;
+typedef float f32_t;
+typedef boost::int32_t s32_t;
+typedef boost::int16_t s16_t;
+typedef boost::int8_t s8_t;
+
+typedef boost::uint32_t item32_t;
+
+typedef item32_t (*xtox_t)(item32_t);
+
+/***********************************************************************
+ * Convert xx to items32 sc16 buffer
+ **********************************************************************/
+template <typename T> UHD_INLINE item32_t xx_to_item32_sc16_x1(
+ const std::complex<T> &num, const double scale_factor
+){
+ boost::uint16_t real = boost::int16_t(num.real()*float(scale_factor));
+ boost::uint16_t imag = boost::int16_t(num.imag()*float(scale_factor));
+ return (item32_t(real) << 16) | (item32_t(imag) << 0);
+}
+
+template <> UHD_INLINE item32_t xx_to_item32_sc16_x1(
+ const sc16_t &num, const double
+){
+ boost::uint16_t real = boost::int16_t(num.real());
+ boost::uint16_t imag = boost::int16_t(num.imag());
+ return (item32_t(real) << 16) | (item32_t(imag) << 0);
+}
+
+template <xtox_t to_wire, typename T>
+UHD_INLINE void xx_to_item32_sc16(
+ const std::complex<T> *input,
+ item32_t *output,
+ const size_t nsamps,
+ const double scale_factor
+){
+ for (size_t i = 0; i < nsamps; i++){
+ const item32_t item = xx_to_item32_sc16_x1(input[i], scale_factor);
+ output[i] = to_wire(item);
+ }
+}
+
+/***********************************************************************
+ * Convert items32 sc16 buffer to xx
+ **********************************************************************/
+template <typename T> UHD_INLINE std::complex<T> item32_sc16_x1_to_xx(
+ const item32_t item, const double scale_factor
+){
+ return std::complex<T>(
+ T(boost::int16_t(item >> 16)*float(scale_factor)),
+ T(boost::int16_t(item >> 0)*float(scale_factor))
+ );
+}
+
+template <> UHD_INLINE sc16_t item32_sc16_x1_to_xx(
+ const item32_t item, const double
+){
+ return sc16_t(
+ boost::int16_t(item >> 16), boost::int16_t(item >> 0)
+ );
+}
+
+template <xtox_t to_host, typename T>
+UHD_INLINE void item32_sc16_to_xx(
+ const item32_t *input,
+ std::complex<T> *output,
+ const size_t nsamps,
+ const double scale_factor
+){
+ for (size_t i = 0; i < nsamps; i++){
+ const item32_t item_i = to_host(input[i]);
+ output[i] = item32_sc16_x1_to_xx<T>(item_i, scale_factor);
+ }
+}
+
+/***********************************************************************
+ * Convert xx to items32 sc8 buffer
+ **********************************************************************/
+template <typename T> UHD_INLINE item32_t xx_to_item32_sc8_x1(
+ const std::complex<T> &in0, const std::complex<T> &in1, const double scale_factor
+){
+ boost::uint8_t real0 = boost::int8_t(in0.real()*float(scale_factor));
+ boost::uint8_t imag0 = boost::int8_t(in0.imag()*float(scale_factor));
+ boost::uint8_t real1 = boost::int8_t(in1.real()*float(scale_factor));
+ boost::uint8_t imag1 = boost::int8_t(in1.imag()*float(scale_factor));
+ return
+ (item32_t(real0) << 8) | (item32_t(imag0) << 0) |
+ (item32_t(real1) << 24) | (item32_t(imag1) << 16)
+ ;
+}
+
+template <> UHD_INLINE item32_t xx_to_item32_sc8_x1(
+ const sc16_t &in0, const sc16_t &in1, const double
+){
+ boost::uint8_t real0 = boost::int8_t(in0.real());
+ boost::uint8_t imag0 = boost::int8_t(in0.imag());
+ boost::uint8_t real1 = boost::int8_t(in1.real());
+ boost::uint8_t imag1 = boost::int8_t(in1.imag());
+ return
+ (item32_t(real0) << 8) | (item32_t(imag0) << 0) |
+ (item32_t(real1) << 24) | (item32_t(imag1) << 16)
+ ;
+}
+
+template <> UHD_INLINE item32_t xx_to_item32_sc8_x1(
+ const sc8_t &in0, const sc8_t &in1, const double
+){
+ boost::uint8_t real0 = boost::int8_t(in0.real());
+ boost::uint8_t imag0 = boost::int8_t(in0.imag());
+ boost::uint8_t real1 = boost::int8_t(in1.real());
+ boost::uint8_t imag1 = boost::int8_t(in1.imag());
+ return
+ (item32_t(real0) << 8) | (item32_t(imag0) << 0) |
+ (item32_t(real1) << 24) | (item32_t(imag1) << 16)
+ ;
+}
+
+template <xtox_t to_wire, typename T>
+UHD_INLINE void xx_to_item32_sc8(
+ const std::complex<T> *input,
+ item32_t *output,
+ const size_t nsamps,
+ const double scale_factor
+){
+ const size_t num_pairs = nsamps/2;
+ for (size_t i = 0, j = 0; i < num_pairs; i++, j+=2){
+ const item32_t item = xx_to_item32_sc8_x1(input[j], input[j+1], scale_factor);
+ output[i] = to_wire(item);
+ }
+
+ if (nsamps != num_pairs*2){
+ const item32_t item = xx_to_item32_sc8_x1(input[nsamps-1], std::complex<T>(0), scale_factor);
+ output[num_pairs] = to_wire(item);
+ }
+}
+
+/***********************************************************************
+ * Convert items32 sc8 buffer to xx
+ **********************************************************************/
+template <typename T> UHD_INLINE void item32_sc8_x1_to_xx(
+ const item32_t item, std::complex<T> &out0, std::complex<T> &out1, const double scale_factor
+){
+ out0 = std::complex<T>(
+ T(boost::int8_t(item >> 8)*float(scale_factor)),
+ T(boost::int8_t(item >> 0)*float(scale_factor))
+ );
+ out1 = std::complex<T>(
+ T(boost::int8_t(item >> 24)*float(scale_factor)),
+ T(boost::int8_t(item >> 16)*float(scale_factor))
+ );
+}
+
+template <> UHD_INLINE void item32_sc8_x1_to_xx(
+ const item32_t item, sc16_t &out0, sc16_t &out1, const double
+){
+ out0 = sc16_t(
+ boost::int16_t(boost::int8_t(item >> 8)),
+ boost::int16_t(boost::int8_t(item >> 0))
+ );
+ out1 = sc16_t(
+ boost::int16_t(boost::int8_t(item >> 24)),
+ boost::int16_t(boost::int8_t(item >> 16))
+ );
+}
+
+template <> UHD_INLINE void item32_sc8_x1_to_xx(
+ const item32_t item, sc8_t &out0, sc8_t &out1, const double
+){
+ out0 = sc8_t(
+ boost::int8_t(boost::int8_t(item >> 8)),
+ boost::int8_t(boost::int8_t(item >> 0))
+ );
+ out1 = sc8_t(
+ boost::int8_t(boost::int8_t(item >> 24)),
+ boost::int8_t(boost::int8_t(item >> 16))
+ );
+}
+
+template <xtox_t to_host, typename T>
+UHD_INLINE void item32_sc8_to_xx(
+ const item32_t *input,
+ std::complex<T> *output,
+ const size_t nsamps,
+ const double scale_factor
+){
+ input = reinterpret_cast<const item32_t *>(size_t(input) & ~0x3);
+ std::complex<T> dummy;
+ size_t num_samps = nsamps;
+
+ if ((size_t(input) & 0x3) != 0){
+ const item32_t item0 = to_host(*input++);
+ item32_sc8_x1_to_xx(item0, dummy, *output++, scale_factor);
+ num_samps--;
+ }
+
+ const size_t num_pairs = num_samps/2;
+ for (size_t i = 0, j = 0; i < num_pairs; i++, j+=2){
+ const item32_t item_i = to_host(input[i]);
+ item32_sc8_x1_to_xx(item_i, output[j], output[j+1], scale_factor);
+ }
+
+ if (num_samps != num_pairs*2){
+ const item32_t item_n = to_host(input[num_pairs]);
+ item32_sc8_x1_to_xx(item_n, output[num_samps-1], dummy, scale_factor);
+ }
+}
+
+#endif /* INCLUDED_LIBUHD_CONVERT_COMMON_HPP */
diff --git a/host/lib/convert/convert_impl.cpp b/host/lib/convert/convert_impl.cpp
new file mode 100644
index 000000000..dc7f8f9dc
--- /dev/null
+++ b/host/lib/convert/convert_impl.cpp
@@ -0,0 +1,146 @@
+//
+// Copyright 2011 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include <uhd/convert.hpp>
+#include <uhd/utils/log.hpp>
+#include <uhd/utils/static.hpp>
+#include <uhd/types/dict.hpp>
+#include <uhd/exception.hpp>
+#include <boost/cstdint.hpp>
+#include <boost/format.hpp>
+#include <boost/foreach.hpp>
+#include <complex>
+
+using namespace uhd;
+
+bool convert::operator==(const convert::id_type &lhs, const convert::id_type &rhs){
+ return true
+ and (lhs.input_format == rhs.input_format)
+ and (lhs.num_inputs == rhs.num_inputs)
+ and (lhs.output_format == rhs.output_format)
+ and (lhs.num_outputs == rhs.num_outputs)
+ ;
+}
+
+std::string convert::id_type::to_pp_string(void) const{
+ return str(boost::format(
+ "conversion ID\n"
+ " Input format: %s\n"
+ " Num inputs: %d\n"
+ " Output format: %s\n"
+ " Num outputs: %d\n"
+ )
+ % this->input_format
+ % this->num_inputs
+ % this->output_format
+ % this->num_outputs
+ );
+}
+
+/***********************************************************************
+ * Setup the table registry
+ **********************************************************************/
+typedef uhd::dict<convert::id_type, uhd::dict<convert::priority_type, convert::function_type> > fcn_table_type;
+UHD_SINGLETON_FCN(fcn_table_type, get_table);
+
+/***********************************************************************
+ * The registry functions
+ **********************************************************************/
+void uhd::convert::register_converter(
+ const id_type &id,
+ const function_type &fcn,
+ const priority_type prio
+){
+ get_table()[id][prio] = fcn;
+
+ //----------------------------------------------------------------//
+ UHD_LOGV(always) << "register_converter: " << id.to_pp_string() << std::endl
+ << " prio: " << prio << std::endl
+ << std::endl
+ ;
+ //----------------------------------------------------------------//
+}
+
+/***********************************************************************
+ * The converter functions
+ **********************************************************************/
+convert::function_type convert::get_converter(
+ const id_type &id,
+ const priority_type prio
+){
+ if (not get_table().has_key(id)) throw uhd::key_error(
+ "Cannot find a conversion routine for " + id.to_pp_string());
+
+ //find a matching priority
+ priority_type best_prio = -1;
+ BOOST_FOREACH(priority_type prio_i, get_table()[id].keys()){
+ if (prio_i == prio) return get_table()[id][prio];
+ best_prio = std::max(best_prio, prio_i);
+ }
+
+ //wanted a specific prio, didnt find
+ if (prio != -1) throw uhd::key_error(
+ "Cannot find a conversion routine [with prio] for " + id.to_pp_string());
+
+ //otherwise, return best prio
+ return get_table()[id][best_prio];
+}
+
+/***********************************************************************
+ * Mappings for item format to byte size for all items we can
+ **********************************************************************/
+typedef uhd::dict<std::string, size_t> item_size_type;
+UHD_SINGLETON_FCN(item_size_type, get_item_size_table);
+
+void convert::register_bytes_per_item(
+ const std::string &format, const size_t size
+){
+ get_item_size_table()[format] = size;
+}
+
+size_t convert::get_bytes_per_item(const std::string &format){
+ if (get_item_size_table().has_key(format)) return get_item_size_table()[format];
+
+ //OK. I am sorry about this.
+ //We didnt find a match, so lets find a match for the first term.
+ //This is partially a hack because of the way I append strings.
+ //But as long as life is kind, we can keep this.
+ const size_t pos = format.find("_");
+ if (pos != std::string::npos){
+ return get_bytes_per_item(format.substr(0, pos));
+ }
+
+ throw uhd::key_error("Cannot find an item size:\n" + format);
+}
+
+UHD_STATIC_BLOCK(convert_register_item_sizes){
+ //register standard complex types
+ convert::register_bytes_per_item("fc64", sizeof(std::complex<double>));
+ convert::register_bytes_per_item("fc32", sizeof(std::complex<float>));
+ convert::register_bytes_per_item("sc64", sizeof(std::complex<boost::int64_t>));
+ convert::register_bytes_per_item("sc32", sizeof(std::complex<boost::int32_t>));
+ convert::register_bytes_per_item("sc16", sizeof(std::complex<boost::int16_t>));
+ convert::register_bytes_per_item("sc8", sizeof(std::complex<boost::int8_t>));
+
+ //register standard real types
+ convert::register_bytes_per_item("f64", sizeof(double));
+ convert::register_bytes_per_item("f32", sizeof(float));
+ convert::register_bytes_per_item("s64", sizeof(boost::int64_t));
+ convert::register_bytes_per_item("s32", sizeof(boost::int32_t));
+ convert::register_bytes_per_item("s16", sizeof(boost::int16_t));
+ convert::register_bytes_per_item("s8", sizeof(boost::int8_t));
+}
diff --git a/host/lib/convert/convert_item32.cpp b/host/lib/convert/convert_item32.cpp
new file mode 100644
index 000000000..57bd64860
--- /dev/null
+++ b/host/lib/convert/convert_item32.cpp
@@ -0,0 +1,44 @@
+//
+// Copyright 2012 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <uhd/utils/byteswap.hpp>
+
+#define __DECLARE_ITEM32_CONVERTER(cpu_type, wire_type, xe, htoxx, xxtoh) \
+ DECLARE_CONVERTER(cpu_type, 1, wire_type ## _item32_ ## xe, 1, PRIORITY_GENERAL){ \
+ const cpu_type ## _t *input = reinterpret_cast<const cpu_type ## _t *>(inputs[0]); \
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]); \
+ xx_to_item32_ ## wire_type<htoxx>(input, output, nsamps, scale_factor); \
+ } \
+ DECLARE_CONVERTER(wire_type ## _item32_ ## xe, 1, cpu_type, 1, PRIORITY_GENERAL){ \
+ const item32_t *input = reinterpret_cast<const item32_t *>(inputs[0]); \
+ cpu_type ## _t *output = reinterpret_cast<cpu_type ## _t *>(outputs[0]); \
+ item32_ ## wire_type ## _to_xx<xxtoh>(input, output, nsamps, scale_factor); \
+ }
+
+#define _DECLARE_ITEM32_CONVERTER(cpu_type, wire_type) \
+ __DECLARE_ITEM32_CONVERTER(cpu_type, wire_type, be, uhd::htonx, uhd::ntohx) \
+ __DECLARE_ITEM32_CONVERTER(cpu_type, wire_type, le, uhd::htowx, uhd::wtohx)
+
+#define DECLARE_ITEM32_CONVERTER(cpu_type) \
+ _DECLARE_ITEM32_CONVERTER(cpu_type, sc8) \
+ _DECLARE_ITEM32_CONVERTER(cpu_type, sc16)
+
+DECLARE_ITEM32_CONVERTER(sc16)
+DECLARE_ITEM32_CONVERTER(fc32)
+DECLARE_ITEM32_CONVERTER(fc64)
+_DECLARE_ITEM32_CONVERTER(sc8, sc8)
diff --git a/host/lib/convert/convert_orc.orc b/host/lib/convert/convert_orc.orc
new file mode 100644
index 000000000..f7075606e
--- /dev/null
+++ b/host/lib/convert/convert_orc.orc
@@ -0,0 +1,80 @@
+.function _convert_fc32_1_to_item32_1_nswap_orc
+.source 8 src
+.dest 4 dst
+.floatparam 4 scalar
+.temp 8 scaled
+.temp 8 converted
+.temp 4 short
+x2 mulf scaled, src, scalar
+x2 convfl converted, scaled
+x2 convlw short, converted
+swapl short, short
+x2 swapw dst, short
+
+.function _convert_fc32_1_to_item32_1_bswap_orc
+.source 8 src
+.dest 4 dst
+.floatparam 4 scalar
+.temp 8 scaled
+.temp 8 converted
+.temp 4 short
+x2 mulf scaled, src, scalar
+x2 convfl converted, scaled
+x2 convlw short, converted
+x2 swapw dst, short
+
+.function _convert_item32_1_to_fc32_1_nswap_orc
+.source 4 src
+.dest 8 dst
+.floatparam 4 scalar
+.temp 4 tmp1
+.temp 8 tmp2
+x2 swapw tmp1, src
+swapl tmp1, tmp1
+x2 convswl tmp2, tmp1
+x2 convlf tmp2, tmp2
+x2 mulf dst, tmp2, scalar
+
+.function _convert_item32_1_to_fc32_1_bswap_orc
+.source 4 src
+.dest 8 dst
+.floatparam 4 scalar
+.temp 4 tmp1
+.temp 8 tmp2
+x2 swapw tmp1, src
+x2 convswl tmp2, tmp1
+x2 convlf tmp2, tmp2
+x2 mulf dst, tmp2, scalar
+
+.function _convert_sc16_1_to_item32_1_nswap_orc
+.source 4 src
+.dest 4 dst
+.temp 4 tmp
+.floatparam 4 scalar
+swapl tmp, src
+x2 swapw dst, tmp
+
+.function _convert_item32_1_to_sc16_1_nswap_orc
+.source 4 src
+.dest 4 dst
+.floatparam 4 scalar
+.temp 4 tmp
+x2 swapw tmp, src
+swapl dst, tmp
+
+.function _convert_swap_byte_pairs_orc
+.source 4 src
+.dest 4 dst
+swapl dst, src
+
+.function _convert_fc32_1_to_sc8_1_nswap_orc
+.source 8 src
+.dest 2 dst
+.temp 8 tmp
+.temp 4 tmp2
+.floatparam 4 scalar
+x2 mulf tmp, src, scalar
+x2 convfl tmp, tmp
+swaplq tmp, tmp
+x2 convlw tmp2, tmp
+x2 convwb dst, tmp2
diff --git a/host/lib/convert/convert_with_neon.cpp b/host/lib/convert/convert_with_neon.cpp
new file mode 100644
index 000000000..e994d97a6
--- /dev/null
+++ b/host/lib/convert/convert_with_neon.cpp
@@ -0,0 +1,60 @@
+//
+// Copyright 2011-2012 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <uhd/utils/byteswap.hpp>
+#include <arm_neon.h>
+
+using namespace uhd::convert;
+
+DECLARE_CONVERTER(fc32, 1, sc16_item32_le, 1, PRIORITY_SIMD){
+ const fc32_t *input = reinterpret_cast<const fc32_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ size_t i;
+
+ float32x4_t Q0 = vdupq_n_f32(float(scale_factor));
+ for (i=0; i < (nsamps & ~0x03); i+=2) {
+ float32x4_t Q1 = vld1q_f32(reinterpret_cast<const float *>(&input[i]));
+ float32x4_t Q2 = vmulq_f32(Q1, Q0);
+ int32x4_t Q3 = vcvtq_s32_f32(Q2);
+ int16x4_t D8 = vmovn_s32(Q3);
+ int16x4_t D9 = vrev32_s16(D8);
+ vst1_s16((reinterpret_cast<int16_t *>(&output[i])), D9);
+ }
+
+ xx_to_item32_sc16<uhd::htowx>(input+i, output+i, nsamps-i, scale_factor);
+}
+
+DECLARE_CONVERTER(sc16_item32_le, 1, fc32, 1, PRIORITY_SIMD){
+ const item32_t *input = reinterpret_cast<const item32_t *>(inputs[0]);
+ fc32_t *output = reinterpret_cast<fc32_t *>(outputs[0]);
+
+ size_t i;
+
+ float32x4_t Q1 = vdupq_n_f32(float(scale_factor));
+ for (i=0; i < (nsamps & ~0x03); i+=2) {
+ int16x4_t D0 = vld1_s16(reinterpret_cast<const int16_t *>(&input[i]));
+ int16x4_t D1 = vrev32_s16(D0);
+ int32x4_t Q2 = vmovl_s16(D1);
+ float32x4_t Q3 = vcvtq_f32_s32(Q2);
+ float32x4_t Q4 = vmulq_f32(Q3, Q1);
+ vst1q_f32((reinterpret_cast<float *>(&output[i])), Q4);
+ }
+
+ item32_sc16_to_xx<uhd::htowx>(input+i, output+i, nsamps-i, scale_factor);
+}
diff --git a/host/lib/convert/convert_with_orc.cpp b/host/lib/convert/convert_with_orc.cpp
new file mode 100644
index 000000000..e44c8ca73
--- /dev/null
+++ b/host/lib/convert/convert_with_orc.cpp
@@ -0,0 +1,65 @@
+//
+// Copyright 2011 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <uhd/utils/byteswap.hpp>
+
+using namespace uhd::convert;
+
+extern "C" {
+extern void _convert_fc32_1_to_item32_1_nswap_orc(void *, const void *, float, int);
+extern void _convert_fc32_1_to_item32_1_bswap_orc(void *, const void *, float, int);
+extern void _convert_item32_1_to_fc32_1_nswap_orc(void *, const void *, float, int);
+extern void _convert_item32_1_to_fc32_1_bswap_orc(void *, const void *, float, int);
+extern void _convert_sc16_1_to_item32_1_nswap_orc(void *, const void *, float, int);
+extern void _convert_item32_1_to_sc16_1_nswap_orc(void *, const void *, float, int);
+extern void _convert_fc32_1_to_sc8_1_nswap_orc(void *, const void *, float, int);
+extern void _convert_swap_byte_pairs_orc(void *, const void *, int);
+}
+
+DECLARE_CONVERTER(fc32, 1, sc16_item32_le, 1, PRIORITY_LIBORC){
+ _convert_fc32_1_to_item32_1_nswap_orc(outputs[0], inputs[0], scale_factor, nsamps);
+}
+
+DECLARE_CONVERTER(fc32, 1, sc16_item32_be, 1, PRIORITY_LIBORC){
+ _convert_fc32_1_to_item32_1_bswap_orc(outputs[0], inputs[0], scale_factor, nsamps);
+}
+
+DECLARE_CONVERTER(sc16_item32_le, 1, fc32, 1, PRIORITY_LIBORC){
+ _convert_item32_1_to_fc32_1_nswap_orc(outputs[0], inputs[0], scale_factor, nsamps);
+}
+
+DECLARE_CONVERTER(sc16_item32_be, 1, fc32, 1, PRIORITY_LIBORC){
+ _convert_item32_1_to_fc32_1_bswap_orc(outputs[0], inputs[0], scale_factor, nsamps);
+}
+
+DECLARE_CONVERTER(sc16, 1, sc16_item32_le, 1, PRIORITY_LIBORC){
+ _convert_sc16_1_to_item32_1_nswap_orc(outputs[0], inputs[0], scale_factor, nsamps);
+}
+
+DECLARE_CONVERTER(sc16_item32_le, 1, sc16, 1, PRIORITY_LIBORC){
+ _convert_item32_1_to_sc16_1_nswap_orc(outputs[0], inputs[0], scale_factor, nsamps);
+}
+
+DECLARE_CONVERTER(fc32, 1, sc8_item32_be, 1, PRIORITY_LIBORC){
+ _convert_fc32_1_to_sc8_1_nswap_orc(outputs[0], inputs[0], scale_factor, nsamps);
+ _convert_swap_byte_pairs_orc(outputs[0], outputs[0], (nsamps + 1)/2);
+}
+
+DECLARE_CONVERTER(fc32, 1, sc8_item32_le, 1, PRIORITY_LIBORC){
+ _convert_fc32_1_to_sc8_1_nswap_orc(outputs[0], inputs[0], scale_factor, nsamps);
+}
diff --git a/host/lib/convert/convert_with_tables.cpp b/host/lib/convert/convert_with_tables.cpp
new file mode 100644
index 000000000..cd7773d4b
--- /dev/null
+++ b/host/lib/convert/convert_with_tables.cpp
@@ -0,0 +1,282 @@
+//
+// Copyright 2011-2012 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <uhd/utils/byteswap.hpp>
+#include <boost/math/special_functions/round.hpp>
+#include <vector>
+
+using namespace uhd::convert;
+
+static const size_t sc16_table_len = size_t(1 << 16);
+
+typedef boost::uint16_t (*tohost16_type)(boost::uint16_t);
+
+/***********************************************************************
+ * Implementation for sc16 to sc8 lookup table
+ * - Lookup the real and imaginary parts individually
+ **********************************************************************/
+template <bool swap>
+class convert_sc16_1_to_sc8_item32_1 : public converter{
+public:
+ convert_sc16_1_to_sc8_item32_1(void): _table(sc16_table_len){}
+
+ void set_scalar(const double scalar){
+ for (size_t i = 0; i < sc16_table_len; i++){
+ const boost::int16_t val = boost::uint16_t(i);
+ _table[i] = boost::int8_t(boost::math::iround(val * scalar / 32767.));
+ }
+ }
+
+ void operator()(const input_type &inputs, const output_type &outputs, const size_t nsamps){
+ const sc16_t *input = reinterpret_cast<const sc16_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const size_t num_pairs = nsamps/2;
+ for (size_t i = 0, j = 0; i < num_pairs; i++, j+=2){
+ output[i] = this->lookup(input[j], input[j+1]);
+ }
+
+ if (nsamps != num_pairs*2){
+ output[num_pairs] = this->lookup(input[nsamps-1], 0);;
+ }
+ }
+
+ item32_t lookup(const sc16_t &in0, const sc16_t &in1){
+ if (swap){ //hope this compiles out, its a template constant
+ return
+ (item32_t(_table[boost::uint16_t(in0.real())]) << 16) |
+ (item32_t(_table[boost::uint16_t(in0.imag())]) << 24) |
+ (item32_t(_table[boost::uint16_t(in1.real())]) << 0) |
+ (item32_t(_table[boost::uint16_t(in1.imag())]) << 8) ;
+ }
+ return
+ (item32_t(_table[boost::uint16_t(in0.real())]) << 8) |
+ (item32_t(_table[boost::uint16_t(in0.imag())]) << 0) |
+ (item32_t(_table[boost::uint16_t(in1.real())]) << 24) |
+ (item32_t(_table[boost::uint16_t(in1.imag())]) << 16) ;
+ }
+
+private:
+ std::vector<boost::uint8_t> _table;
+};
+
+/***********************************************************************
+ * Implementation for sc16 lookup table
+ * - Lookup the real and imaginary parts individually
+ **********************************************************************/
+template <typename type, tohost16_type tohost, size_t re_shift, size_t im_shift>
+class convert_sc16_item32_1_to_fcxx_1 : public converter{
+public:
+ convert_sc16_item32_1_to_fcxx_1(void): _table(sc16_table_len){}
+
+ void set_scalar(const double scalar){
+ for (size_t i = 0; i < sc16_table_len; i++){
+ const boost::uint16_t val = tohost(boost::uint16_t(i & 0xffff));
+ _table[i] = type(boost::int16_t(val)*scalar);
+ }
+ }
+
+ void operator()(const input_type &inputs, const output_type &outputs, const size_t nsamps){
+ const item32_t *input = reinterpret_cast<const item32_t *>(inputs[0]);
+ std::complex<type> *output = reinterpret_cast<std::complex<type> *>(outputs[0]);
+
+ for (size_t i = 0; i < nsamps; i++){
+ const item32_t item = input[i];
+ output[i] = std::complex<type>(
+ _table[boost::uint16_t(item >> re_shift)],
+ _table[boost::uint16_t(item >> im_shift)]
+ );
+ }
+ }
+
+private:
+ std::vector<type> _table;
+};
+
+/***********************************************************************
+ * Implementation for sc8 lookup table
+ * - Lookup the real and imaginary parts together
+ **********************************************************************/
+template <typename type, tohost16_type tohost, size_t lo_shift, size_t hi_shift>
+class convert_sc8_item32_1_to_fcxx_1 : public converter{
+public:
+ convert_sc8_item32_1_to_fcxx_1(void): _table(sc16_table_len){}
+
+ //special case for sc16 type, 32767 undoes float normalization
+ static type conv(const boost::int8_t &num, const double scalar){
+ if (sizeof(type) == sizeof(s16_t)){
+ return type(boost::math::iround(num*scalar*32767));
+ }
+ return type(num*scalar);
+ }
+
+ void set_scalar(const double scalar){
+ for (size_t i = 0; i < sc16_table_len; i++){
+ const boost::uint16_t val = tohost(boost::uint16_t(i & 0xffff));
+ const type real = conv(boost::int8_t(val >> 8), scalar);
+ const type imag = conv(boost::int8_t(val >> 0), scalar);
+ _table[i] = std::complex<type>(real, imag);
+ }
+ }
+
+ void operator()(const input_type &inputs, const output_type &outputs, const size_t nsamps){
+ const item32_t *input = reinterpret_cast<const item32_t *>(size_t(inputs[0]) & ~0x3);
+ std::complex<type> *output = reinterpret_cast<std::complex<type> *>(outputs[0]);
+
+ size_t num_samps = nsamps;
+
+ if ((size_t(inputs[0]) & 0x3) != 0){
+ const item32_t item0 = *input++;
+ *output++ = _table[boost::uint16_t(item0 >> hi_shift)];
+ num_samps--;
+ }
+
+ const size_t num_pairs = num_samps/2;
+ for (size_t i = 0, j = 0; i < num_pairs; i++, j+=2){
+ const item32_t item_i = (input[i]);
+ output[j] = _table[boost::uint16_t(item_i >> lo_shift)];
+ output[j + 1] = _table[boost::uint16_t(item_i >> hi_shift)];
+ }
+
+ if (num_samps != num_pairs*2){
+ const item32_t item_n = input[num_pairs];
+ output[num_samps-1] = _table[boost::uint16_t(item_n >> lo_shift)];
+ }
+ }
+
+private:
+ std::vector<std::complex<type> > _table;
+};
+
+/***********************************************************************
+ * Factory functions and registration
+ **********************************************************************/
+
+#ifdef BOOST_BIG_ENDIAN
+# define SHIFT_PAIR0 16, 0
+# define SHIFT_PAIR1 0, 16
+# define BE_SWAP false
+# define LE_SWAP true
+#else
+# define SHIFT_PAIR0 0, 16
+# define SHIFT_PAIR1 16, 0
+# define BE_SWAP true
+# define LE_SWAP false
+#endif
+
+static converter::sptr make_convert_sc16_item32_be_1_to_fc32_1(void){
+ return converter::sptr(new convert_sc16_item32_1_to_fcxx_1<float, uhd::ntohx, SHIFT_PAIR0>());
+}
+
+static converter::sptr make_convert_sc16_item32_be_1_to_fc64_1(void){
+ return converter::sptr(new convert_sc16_item32_1_to_fcxx_1<double, uhd::ntohx, SHIFT_PAIR0>());
+}
+
+static converter::sptr make_convert_sc16_item32_le_1_to_fc32_1(void){
+ return converter::sptr(new convert_sc16_item32_1_to_fcxx_1<float, uhd::wtohx, SHIFT_PAIR1>());
+}
+
+static converter::sptr make_convert_sc16_item32_le_1_to_fc64_1(void){
+ return converter::sptr(new convert_sc16_item32_1_to_fcxx_1<double, uhd::wtohx, SHIFT_PAIR1>());
+}
+
+static converter::sptr make_convert_sc8_item32_be_1_to_fc32_1(void){
+ return converter::sptr(new convert_sc8_item32_1_to_fcxx_1<float, uhd::ntohx, SHIFT_PAIR1>());
+}
+
+static converter::sptr make_convert_sc8_item32_be_1_to_fc64_1(void){
+ return converter::sptr(new convert_sc8_item32_1_to_fcxx_1<double, uhd::ntohx, SHIFT_PAIR1>());
+}
+
+static converter::sptr make_convert_sc8_item32_le_1_to_fc32_1(void){
+ return converter::sptr(new convert_sc8_item32_1_to_fcxx_1<float, uhd::wtohx, SHIFT_PAIR0>());
+}
+
+static converter::sptr make_convert_sc8_item32_le_1_to_fc64_1(void){
+ return converter::sptr(new convert_sc8_item32_1_to_fcxx_1<double, uhd::wtohx, SHIFT_PAIR0>());
+}
+
+static converter::sptr make_convert_sc8_item32_be_1_to_sc16_1(void){
+ return converter::sptr(new convert_sc8_item32_1_to_fcxx_1<s16_t, uhd::ntohx, SHIFT_PAIR1>());
+}
+
+static converter::sptr make_convert_sc8_item32_le_1_to_sc16_1(void){
+ return converter::sptr(new convert_sc8_item32_1_to_fcxx_1<s16_t, uhd::wtohx, SHIFT_PAIR0>());
+}
+
+static converter::sptr make_convert_sc16_1_to_sc8_item32_be_1(void){
+ return converter::sptr(new convert_sc16_1_to_sc8_item32_1<BE_SWAP>());
+}
+
+static converter::sptr make_convert_sc16_1_to_sc8_item32_le_1(void){
+ return converter::sptr(new convert_sc16_1_to_sc8_item32_1<LE_SWAP>());
+}
+
+UHD_STATIC_BLOCK(register_convert_sc16_item32_1_to_fcxx_1){
+ uhd::convert::id_type id;
+ id.num_inputs = 1;
+ id.num_outputs = 1;
+
+ id.output_format = "fc32";
+ id.input_format = "sc16_item32_be";
+ uhd::convert::register_converter(id, &make_convert_sc16_item32_be_1_to_fc32_1, PRIORITY_TABLE);
+
+ id.output_format = "fc64";
+ id.input_format = "sc16_item32_be";
+ uhd::convert::register_converter(id, &make_convert_sc16_item32_be_1_to_fc64_1, PRIORITY_TABLE);
+
+ id.output_format = "fc32";
+ id.input_format = "sc16_item32_le";
+ uhd::convert::register_converter(id, &make_convert_sc16_item32_le_1_to_fc32_1, PRIORITY_TABLE);
+
+ id.output_format = "fc64";
+ id.input_format = "sc16_item32_le";
+ uhd::convert::register_converter(id, &make_convert_sc16_item32_le_1_to_fc64_1, PRIORITY_TABLE);
+
+ id.output_format = "fc32";
+ id.input_format = "sc8_item32_be";
+ uhd::convert::register_converter(id, &make_convert_sc8_item32_be_1_to_fc32_1, PRIORITY_TABLE);
+
+ id.output_format = "fc64";
+ id.input_format = "sc8_item32_be";
+ uhd::convert::register_converter(id, &make_convert_sc8_item32_be_1_to_fc64_1, PRIORITY_TABLE);
+
+ id.output_format = "fc32";
+ id.input_format = "sc8_item32_le";
+ uhd::convert::register_converter(id, &make_convert_sc8_item32_le_1_to_fc32_1, PRIORITY_TABLE);
+
+ id.output_format = "fc64";
+ id.input_format = "sc8_item32_le";
+ uhd::convert::register_converter(id, &make_convert_sc8_item32_le_1_to_fc64_1, PRIORITY_TABLE);
+
+ id.output_format = "sc16";
+ id.input_format = "sc8_item32_be";
+ uhd::convert::register_converter(id, &make_convert_sc8_item32_be_1_to_sc16_1, PRIORITY_TABLE);
+
+ id.output_format = "sc16";
+ id.input_format = "sc8_item32_le";
+ uhd::convert::register_converter(id, &make_convert_sc8_item32_le_1_to_sc16_1, PRIORITY_TABLE);
+
+ id.input_format = "sc16";
+ id.output_format = "sc8_item32_be";
+ uhd::convert::register_converter(id, &make_convert_sc16_1_to_sc8_item32_be_1, PRIORITY_TABLE);
+
+ id.input_format = "sc16";
+ id.output_format = "sc8_item32_le";
+ uhd::convert::register_converter(id, &make_convert_sc16_1_to_sc8_item32_le_1, PRIORITY_TABLE);
+}
diff --git a/host/lib/convert/gen_convert_general.py b/host/lib/convert/gen_convert_general.py
new file mode 100644
index 000000000..b0790755a
--- /dev/null
+++ b/host/lib/convert/gen_convert_general.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+#
+# Copyright 2011-2012 Ettus Research LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+TMPL_HEADER = """
+#import time
+/***********************************************************************
+ * This file was generated by $file on $time.strftime("%c")
+ **********************************************************************/
+
+\#include "convert_common.hpp"
+\#include <uhd/utils/byteswap.hpp>
+
+using namespace uhd::convert;
+"""
+
+TMPL_CONV_GEN2_ITEM32 = """
+DECLARE_CONVERTER(item32, 1, sc16_item32_$(end), 1, PRIORITY_GENERAL){
+ const item32_t *input = reinterpret_cast<const item32_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ for (size_t i = 0; i < nsamps; i++){
+ output[i] = $(to_wire)(input[i]);
+ }
+}
+
+DECLARE_CONVERTER(sc16_item32_$(end), 1, item32, 1, PRIORITY_GENERAL){
+ const item32_t *input = reinterpret_cast<const item32_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ for (size_t i = 0; i < nsamps; i++){
+ output[i] = $(to_host)(input[i]);
+ }
+}
+"""
+
+TMPL_CONV_USRP1_COMPLEX = """
+DECLARE_CONVERTER($(cpu_type), $(width), sc16_item16_usrp1, 1, PRIORITY_GENERAL){
+ #for $w in range($width)
+ const $(cpu_type)_t *input$(w) = reinterpret_cast<const $(cpu_type)_t *>(inputs[$(w)]);
+ #end for
+ boost::uint16_t *output = reinterpret_cast<boost::uint16_t *>(outputs[0]);
+
+ for (size_t i = 0, j = 0; i < nsamps; i++){
+ #for $w in range($width)
+ output[j++] = $(to_wire)(boost::uint16_t(boost::int16_t(input$(w)[i].real()$(do_scale))));
+ output[j++] = $(to_wire)(boost::uint16_t(boost::int16_t(input$(w)[i].imag()$(do_scale))));
+ #end for
+ }
+}
+
+DECLARE_CONVERTER(sc16_item16_usrp1, 1, $(cpu_type), $(width), PRIORITY_GENERAL){
+ const boost::uint16_t *input = reinterpret_cast<const boost::uint16_t *>(inputs[0]);
+ #for $w in range($width)
+ $(cpu_type)_t *output$(w) = reinterpret_cast<$(cpu_type)_t *>(outputs[$(w)]);
+ #end for
+
+ for (size_t i = 0, j = 0; i < nsamps; i++){
+ #for $w in range($width)
+ output$(w)[i] = $(cpu_type)_t(
+ boost::int16_t($(to_host)(input[j+0]))$(do_scale),
+ boost::int16_t($(to_host)(input[j+1]))$(do_scale)
+ );
+ j += 2;
+ #end for
+ }
+}
+
+DECLARE_CONVERTER(sc8_item16_usrp1, 1, $(cpu_type), $(width), PRIORITY_GENERAL){
+ const boost::uint16_t *input = reinterpret_cast<const boost::uint16_t *>(inputs[0]);
+ #for $w in range($width)
+ $(cpu_type)_t *output$(w) = reinterpret_cast<$(cpu_type)_t *>(outputs[$(w)]);
+ #end for
+
+ for (size_t i = 0, j = 0; i < nsamps; i++){
+ #for $w in range($width)
+ {
+ const boost::uint16_t num = $(to_host)(input[j++]);
+ output$(w)[i] = $(cpu_type)_t(
+ boost::int8_t(num)$(do_scale),
+ boost::int8_t(num >> 8)$(do_scale)
+ );
+ }
+ #end for
+ }
+}
+"""
+
+def parse_tmpl(_tmpl_text, **kwargs):
+ from Cheetah.Template import Template
+ return str(Template(_tmpl_text, kwargs))
+
+if __name__ == '__main__':
+ import sys, os
+ file = os.path.basename(__file__)
+ output = parse_tmpl(TMPL_HEADER, file=file)
+
+ #generate complex converters for all gen2 platforms
+ for end, to_host, to_wire in (
+ ('be', 'uhd::ntohx', 'uhd::htonx'),
+ ('le', 'uhd::wtohx', 'uhd::htowx'),
+ ):
+ output += parse_tmpl(
+ TMPL_CONV_GEN2_ITEM32,
+ end=end, to_host=to_host, to_wire=to_wire
+ )
+
+ #generate complex converters for usrp1 format
+ for width in 1, 2, 4:
+ for cpu_type, do_scale in (
+ ('fc64', '*scale_factor'),
+ ('fc32', '*float(scale_factor)'),
+ ('sc16', ''),
+ ):
+ output += parse_tmpl(
+ TMPL_CONV_USRP1_COMPLEX,
+ width=width, to_host='uhd::wtohx', to_wire='uhd::htowx',
+ cpu_type=cpu_type, do_scale=do_scale
+ )
+ open(sys.argv[1], 'w').write(output)
diff --git a/host/lib/convert/sse2_fc32_to_sc16.cpp b/host/lib/convert/sse2_fc32_to_sc16.cpp
new file mode 100644
index 000000000..90bf0ed04
--- /dev/null
+++ b/host/lib/convert/sse2_fc32_to_sc16.cpp
@@ -0,0 +1,103 @@
+//
+// Copyright 2011-2012 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <uhd/utils/byteswap.hpp>
+#include <emmintrin.h>
+
+using namespace uhd::convert;
+
+DECLARE_CONVERTER(fc32, 1, sc16_item32_le, 1, PRIORITY_SIMD){
+ const fc32_t *input = reinterpret_cast<const fc32_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const __m128 scalar = _mm_set_ps1(float(scale_factor));
+
+ #define convert_fc32_1_to_item32_1_nswap_guts(_al_) \
+ for (; i+3 < nsamps; i+=4){ \
+ /* load from input */ \
+ __m128 tmplo = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+0)); \
+ __m128 tmphi = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+2)); \
+ \
+ /* convert and scale */ \
+ __m128i tmpilo = _mm_cvtps_epi32(_mm_mul_ps(tmplo, scalar)); \
+ __m128i tmpihi = _mm_cvtps_epi32(_mm_mul_ps(tmphi, scalar)); \
+ \
+ /* pack + swap 16-bit pairs */ \
+ __m128i tmpi = _mm_packs_epi32(tmpilo, tmpihi); \
+ tmpi = _mm_shufflelo_epi16(tmpi, _MM_SHUFFLE(2, 3, 0, 1)); \
+ tmpi = _mm_shufflehi_epi16(tmpi, _MM_SHUFFLE(2, 3, 0, 1)); \
+ \
+ /* store to output */ \
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(output+i), tmpi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ switch (size_t(input) & 0xf){
+ case 0x8:
+ xx_to_item32_sc16<uhd::htowx>(input, output, 1, scale_factor); i++;
+ case 0x0:
+ convert_fc32_1_to_item32_1_nswap_guts(_)
+ break;
+ default: convert_fc32_1_to_item32_1_nswap_guts(u_)
+ }
+
+ //convert remainder
+ xx_to_item32_sc16<uhd::htowx>(input+i, output+i, nsamps-i, scale_factor);
+}
+
+DECLARE_CONVERTER(fc32, 1, sc16_item32_be, 1, PRIORITY_SIMD){
+ const fc32_t *input = reinterpret_cast<const fc32_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const __m128 scalar = _mm_set_ps1(float(scale_factor));
+
+ #define convert_fc32_1_to_item32_1_bswap_guts(_al_) \
+ for (; i+3 < nsamps; i+=4){ \
+ /* load from input */ \
+ __m128 tmplo = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+0)); \
+ __m128 tmphi = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+2)); \
+ \
+ /* convert and scale */ \
+ __m128i tmpilo = _mm_cvtps_epi32(_mm_mul_ps(tmplo, scalar)); \
+ __m128i tmpihi = _mm_cvtps_epi32(_mm_mul_ps(tmphi, scalar)); \
+ \
+ /* pack + byteswap -> byteswap 16 bit words */ \
+ __m128i tmpi = _mm_packs_epi32(tmpilo, tmpihi); \
+ tmpi = _mm_or_si128(_mm_srli_epi16(tmpi, 8), _mm_slli_epi16(tmpi, 8)); \
+ \
+ /* store to output */ \
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(output+i), tmpi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ switch (size_t(input) & 0xf){
+ case 0x8:
+ xx_to_item32_sc16<uhd::htonx>(input, output, 1, scale_factor); i++;
+ case 0x0:
+ convert_fc32_1_to_item32_1_bswap_guts(_)
+ break;
+ default: convert_fc32_1_to_item32_1_bswap_guts(u_)
+ }
+
+ //convert remainder
+ xx_to_item32_sc16<uhd::htonx>(input+i, output+i, nsamps-i, scale_factor);
+}
diff --git a/host/lib/convert/sse2_fc32_to_sc8.cpp b/host/lib/convert/sse2_fc32_to_sc8.cpp
new file mode 100644
index 000000000..dd884640d
--- /dev/null
+++ b/host/lib/convert/sse2_fc32_to_sc8.cpp
@@ -0,0 +1,115 @@
+//
+// Copyright 2012 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <uhd/utils/byteswap.hpp>
+#include <emmintrin.h>
+
+using namespace uhd::convert;
+
+template <const int shuf>
+UHD_INLINE __m128i pack_sc32_4x(
+ const __m128 &in0, const __m128 &in1,
+ const __m128 &in2, const __m128 &in3,
+ const __m128 &scalar
+){
+ __m128i tmpi0 = _mm_cvtps_epi32(_mm_mul_ps(in0, scalar));
+ tmpi0 = _mm_shuffle_epi32(tmpi0, shuf);
+ __m128i tmpi1 = _mm_cvtps_epi32(_mm_mul_ps(in1, scalar));
+ tmpi1 = _mm_shuffle_epi32(tmpi1, shuf);
+ const __m128i lo = _mm_packs_epi32(tmpi0, tmpi1);
+
+ __m128i tmpi2 = _mm_cvtps_epi32(_mm_mul_ps(in2, scalar));
+ tmpi2 = _mm_shuffle_epi32(tmpi2, shuf);
+ __m128i tmpi3 = _mm_cvtps_epi32(_mm_mul_ps(in3, scalar));
+ tmpi3 = _mm_shuffle_epi32(tmpi3, shuf);
+ const __m128i hi = _mm_packs_epi32(tmpi2, tmpi3);
+
+ return _mm_packs_epi16(lo, hi);
+}
+
+DECLARE_CONVERTER(fc32, 1, sc8_item32_be, 1, PRIORITY_SIMD){
+ const fc32_t *input = reinterpret_cast<const fc32_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const __m128 scalar = _mm_set_ps1(float(scale_factor));
+ const int shuf = _MM_SHUFFLE(1, 0, 3, 2);
+
+ #define convert_fc32_1_to_sc8_item32_1_bswap_guts(_al_) \
+ for (size_t j = 0; i+7 < nsamps; i+=8, j+=4){ \
+ /* load from input */ \
+ __m128 tmp0 = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+0)); \
+ __m128 tmp1 = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+2)); \
+ __m128 tmp2 = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+4)); \
+ __m128 tmp3 = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+6)); \
+ \
+ /* convert */ \
+ const __m128i tmpi = pack_sc32_4x<shuf>(tmp0, tmp1, tmp2, tmp3, scalar); \
+ \
+ /* store to output */ \
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(output+j), tmpi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ if ((size_t(input) & 0xf) == 0){
+ convert_fc32_1_to_sc8_item32_1_bswap_guts(_)
+ }
+ else{
+ convert_fc32_1_to_sc8_item32_1_bswap_guts(u_)
+ }
+
+ //convert remainder
+ xx_to_item32_sc8<uhd::htonx>(input+i, output+(i/2), nsamps-i, scale_factor);
+}
+
+DECLARE_CONVERTER(fc32, 1, sc8_item32_le, 1, PRIORITY_SIMD){
+ const fc32_t *input = reinterpret_cast<const fc32_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const __m128 scalar = _mm_set_ps1(float(scale_factor));
+ const int shuf = _MM_SHUFFLE(2, 3, 0, 1);
+
+ #define convert_fc32_1_to_sc8_item32_1_nswap_guts(_al_) \
+ for (size_t j = 0; i+7 < nsamps; i+=8, j+=4){ \
+ /* load from input */ \
+ __m128 tmp0 = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+0)); \
+ __m128 tmp1 = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+2)); \
+ __m128 tmp2 = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+4)); \
+ __m128 tmp3 = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+6)); \
+ \
+ /* convert */ \
+ const __m128i tmpi = pack_sc32_4x<shuf>(tmp0, tmp1, tmp2, tmp3, scalar); \
+ \
+ /* store to output */ \
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(output+j), tmpi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ if ((size_t(input) & 0xf) == 0){
+ convert_fc32_1_to_sc8_item32_1_nswap_guts(_)
+ }
+ else{
+ convert_fc32_1_to_sc8_item32_1_nswap_guts(u_)
+ }
+
+ //convert remainder
+ xx_to_item32_sc8<uhd::htowx>(input+i, output+(i/2), nsamps-i, scale_factor);
+}
diff --git a/host/lib/convert/sse2_fc64_to_sc16.cpp b/host/lib/convert/sse2_fc64_to_sc16.cpp
new file mode 100644
index 000000000..f030e9168
--- /dev/null
+++ b/host/lib/convert/sse2_fc64_to_sc16.cpp
@@ -0,0 +1,111 @@
+//
+// Copyright 2011-2012 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <uhd/utils/byteswap.hpp>
+#include <emmintrin.h>
+
+using namespace uhd::convert;
+
+DECLARE_CONVERTER(fc64, 1, sc16_item32_le, 1, PRIORITY_SIMD){
+ const fc64_t *input = reinterpret_cast<const fc64_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const __m128d scalar = _mm_set1_pd(scale_factor);
+
+ #define convert_fc64_1_to_item32_1_nswap_guts(_al_) \
+ for (; i+3 < nsamps; i+=4){ \
+ /* load from input */ \
+ __m128d tmp0 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+0)); \
+ __m128d tmp1 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+1)); \
+ __m128d tmp2 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+2)); \
+ __m128d tmp3 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+3)); \
+ \
+ /* convert and scale */ \
+ __m128i tmpi0 = _mm_cvttpd_epi32(_mm_mul_pd(tmp0, scalar)); \
+ __m128i tmpi1 = _mm_cvttpd_epi32(_mm_mul_pd(tmp1, scalar)); \
+ __m128i tmpilo = _mm_unpacklo_epi64(tmpi0, tmpi1); \
+ __m128i tmpi2 = _mm_cvttpd_epi32(_mm_mul_pd(tmp2, scalar)); \
+ __m128i tmpi3 = _mm_cvttpd_epi32(_mm_mul_pd(tmp3, scalar)); \
+ __m128i tmpihi = _mm_unpacklo_epi64(tmpi2, tmpi3); \
+ \
+ /* pack + swap 16-bit pairs */ \
+ __m128i tmpi = _mm_packs_epi32(tmpilo, tmpihi); \
+ tmpi = _mm_shufflelo_epi16(tmpi, _MM_SHUFFLE(2, 3, 0, 1)); \
+ tmpi = _mm_shufflehi_epi16(tmpi, _MM_SHUFFLE(2, 3, 0, 1)); \
+ \
+ /* store to output */ \
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(output+i), tmpi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ if ((size_t(input) & 0xf) == 0){
+ convert_fc64_1_to_item32_1_nswap_guts(_)
+ }
+ else{
+ convert_fc64_1_to_item32_1_nswap_guts(u_)
+ }
+
+ //convert remainder
+ xx_to_item32_sc16<uhd::htowx>(input+i, output+i, nsamps-i, scale_factor);
+}
+
+DECLARE_CONVERTER(fc64, 1, sc16_item32_be, 1, PRIORITY_SIMD){
+ const fc64_t *input = reinterpret_cast<const fc64_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const __m128d scalar = _mm_set1_pd(scale_factor);
+
+ #define convert_fc64_1_to_item32_1_bswap_guts(_al_) \
+ for (; i+3 < nsamps; i+=4){ \
+ /* load from input */ \
+ __m128d tmp0 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+0)); \
+ __m128d tmp1 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+1)); \
+ __m128d tmp2 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+2)); \
+ __m128d tmp3 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+3)); \
+ \
+ /* convert and scale */ \
+ __m128i tmpi0 = _mm_cvttpd_epi32(_mm_mul_pd(tmp0, scalar)); \
+ __m128i tmpi1 = _mm_cvttpd_epi32(_mm_mul_pd(tmp1, scalar)); \
+ __m128i tmpilo = _mm_unpacklo_epi64(tmpi0, tmpi1); \
+ __m128i tmpi2 = _mm_cvttpd_epi32(_mm_mul_pd(tmp2, scalar)); \
+ __m128i tmpi3 = _mm_cvttpd_epi32(_mm_mul_pd(tmp3, scalar)); \
+ __m128i tmpihi = _mm_unpacklo_epi64(tmpi2, tmpi3); \
+ \
+ /* pack + byteswap -> byteswap 16 bit words */ \
+ __m128i tmpi = _mm_packs_epi32(tmpilo, tmpihi); \
+ tmpi = _mm_or_si128(_mm_srli_epi16(tmpi, 8), _mm_slli_epi16(tmpi, 8)); \
+ \
+ /* store to output */ \
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(output+i), tmpi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ if ((size_t(input) & 0xf) == 0){
+ convert_fc64_1_to_item32_1_bswap_guts(_)
+ }
+ else{
+ convert_fc64_1_to_item32_1_bswap_guts(u_)
+ }
+
+ //convert remainder
+ xx_to_item32_sc16<uhd::htonx>(input+i, output+i, nsamps-i, scale_factor);
+}
diff --git a/host/lib/convert/sse2_fc64_to_sc8.cpp b/host/lib/convert/sse2_fc64_to_sc8.cpp
new file mode 100644
index 000000000..bf3719e13
--- /dev/null
+++ b/host/lib/convert/sse2_fc64_to_sc8.cpp
@@ -0,0 +1,129 @@
+//
+// Copyright 2012 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <uhd/utils/byteswap.hpp>
+#include <emmintrin.h>
+
+using namespace uhd::convert;
+
+UHD_INLINE __m128i pack_sc8_item32_4x(
+ const __m128i &in0, const __m128i &in1,
+ const __m128i &in2, const __m128i &in3
+){
+ const __m128i lo = _mm_packs_epi32(in0, in1);
+ const __m128i hi = _mm_packs_epi32(in2, in3);
+ return _mm_packs_epi16(lo, hi);
+}
+
+UHD_INLINE __m128i pack_sc32_4x(
+ const __m128d &lo, const __m128d &hi,
+ const __m128d &scalar
+){
+ const __m128i tmpi_lo = _mm_cvttpd_epi32(_mm_mul_pd(hi, scalar));
+ const __m128i tmpi_hi = _mm_cvttpd_epi32(_mm_mul_pd(lo, scalar));
+ return _mm_unpacklo_epi64(tmpi_lo, tmpi_hi);
+}
+
+DECLARE_CONVERTER(fc64, 1, sc8_item32_be, 1, PRIORITY_SIMD){
+ const fc64_t *input = reinterpret_cast<const fc64_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const __m128d scalar = _mm_set1_pd(scale_factor);
+
+ #define convert_fc64_1_to_sc8_item32_1_bswap_guts(_al_) \
+ for (size_t j = 0; i+7 < nsamps; i+=8, j+=4){ \
+ /* load from input */ \
+ __m128d tmp0 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+0)); \
+ __m128d tmp1 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+1)); \
+ __m128d tmp2 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+2)); \
+ __m128d tmp3 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+3)); \
+ __m128d tmp4 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+4)); \
+ __m128d tmp5 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+5)); \
+ __m128d tmp6 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+6)); \
+ __m128d tmp7 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+7)); \
+ \
+ /* interleave */ \
+ const __m128i tmpi = pack_sc8_item32_4x( \
+ pack_sc32_4x(tmp0, tmp1, scalar), \
+ pack_sc32_4x(tmp2, tmp3, scalar), \
+ pack_sc32_4x(tmp4, tmp5, scalar), \
+ pack_sc32_4x(tmp6, tmp7, scalar) \
+ ); \
+ \
+ /* store to output */ \
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(output+j), tmpi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ if ((size_t(input) & 0xf) == 0){
+ convert_fc64_1_to_sc8_item32_1_bswap_guts(_)
+ }
+ else{
+ convert_fc64_1_to_sc8_item32_1_bswap_guts(u_)
+ }
+
+ //convert remainder
+ xx_to_item32_sc8<uhd::htonx>(input+i, output+(i/2), nsamps-i, scale_factor);
+}
+
+DECLARE_CONVERTER(fc64, 1, sc8_item32_le, 1, PRIORITY_SIMD){
+ const fc64_t *input = reinterpret_cast<const fc64_t *>(inputs[0]);
+ item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);
+
+ const __m128d scalar = _mm_set1_pd(scale_factor);
+
+ #define convert_fc64_1_to_sc8_item32_1_nswap_guts(_al_) \
+ for (size_t j = 0; i+7 < nsamps; i+=8, j+=4){ \
+ /* load from input */ \
+ __m128d tmp0 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+0)); \
+ __m128d tmp1 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+1)); \
+ __m128d tmp2 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+2)); \
+ __m128d tmp3 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+3)); \
+ __m128d tmp4 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+4)); \
+ __m128d tmp5 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+5)); \
+ __m128d tmp6 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+6)); \
+ __m128d tmp7 = _mm_load ## _al_ ## pd(reinterpret_cast<const double *>(input+i+7)); \
+ \
+ /* interleave */ \
+ __m128i tmpi = pack_sc8_item32_4x( \
+ pack_sc32_4x(tmp1, tmp0, scalar), \
+ pack_sc32_4x(tmp3, tmp2, scalar), \
+ pack_sc32_4x(tmp5, tmp4, scalar), \
+ pack_sc32_4x(tmp7, tmp6, scalar) \
+ ); \
+ tmpi = _mm_or_si128(_mm_srli_epi16(tmpi, 8), _mm_slli_epi16(tmpi, 8)); /*byteswap*/\
+ \
+ /* store to output */ \
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(output+j), tmpi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ if ((size_t(input) & 0xf) == 0){
+ convert_fc64_1_to_sc8_item32_1_nswap_guts(_)
+ }
+ else{
+ convert_fc64_1_to_sc8_item32_1_nswap_guts(u_)
+ }
+
+ //convert remainder
+ xx_to_item32_sc8<uhd::htowx>(input+i, output+(i/2), nsamps-i, scale_factor);
+}
diff --git a/host/lib/convert/sse2_sc16_to_fc32.cpp b/host/lib/convert/sse2_sc16_to_fc32.cpp
new file mode 100644
index 000000000..c03e41585
--- /dev/null
+++ b/host/lib/convert/sse2_sc16_to_fc32.cpp
@@ -0,0 +1,107 @@
+//
+// Copyright 2011-2012 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <uhd/utils/byteswap.hpp>
+#include <emmintrin.h>
+
+using namespace uhd::convert;
+
+DECLARE_CONVERTER(sc16_item32_le, 1, fc32, 1, PRIORITY_SIMD){
+ const item32_t *input = reinterpret_cast<const item32_t *>(inputs[0]);
+ fc32_t *output = reinterpret_cast<fc32_t *>(outputs[0]);
+
+ const __m128 scalar = _mm_set_ps1(float(scale_factor)/(1 << 16));
+ const __m128i zeroi = _mm_setzero_si128();
+
+ #define convert_item32_1_to_fc32_1_nswap_guts(_al_) \
+ for (; i+3 < nsamps; i+=4){ \
+ /* load from input */ \
+ __m128i tmpi = _mm_loadu_si128(reinterpret_cast<const __m128i *>(input+i)); \
+ \
+ /* unpack + swap 16-bit pairs */ \
+ tmpi = _mm_shufflelo_epi16(tmpi, _MM_SHUFFLE(2, 3, 0, 1)); \
+ tmpi = _mm_shufflehi_epi16(tmpi, _MM_SHUFFLE(2, 3, 0, 1)); \
+ __m128i tmpilo = _mm_unpacklo_epi16(zeroi, tmpi); /* value in upper 16 bits */ \
+ __m128i tmpihi = _mm_unpackhi_epi16(zeroi, tmpi); \
+ \
+ /* convert and scale */ \
+ __m128 tmplo = _mm_mul_ps(_mm_cvtepi32_ps(tmpilo), scalar); \
+ __m128 tmphi = _mm_mul_ps(_mm_cvtepi32_ps(tmpihi), scalar); \
+ \
+ /* store to output */ \
+ _mm_store ## _al_ ## ps(reinterpret_cast<float *>(output+i+0), tmplo); \
+ _mm_store ## _al_ ## ps(reinterpret_cast<float *>(output+i+2), tmphi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ switch (size_t(output) & 0xf){
+ case 0x8:
+ item32_sc16_to_xx<uhd::htowx>(input, output, 1, scale_factor); i++;
+ case 0x0:
+ convert_item32_1_to_fc32_1_nswap_guts(_)
+ break;
+ default: convert_item32_1_to_fc32_1_nswap_guts(u_)
+ }
+
+ //convert remainder
+ item32_sc16_to_xx<uhd::htowx>(input+i, output+i, nsamps-i, scale_factor);
+}
+
+DECLARE_CONVERTER(sc16_item32_be, 1, fc32, 1, PRIORITY_SIMD){
+ const item32_t *input = reinterpret_cast<const item32_t *>(inputs[0]);
+ fc32_t *output = reinterpret_cast<fc32_t *>(outputs[0]);
+
+ const __m128 scalar = _mm_set_ps1(float(scale_factor)/(1 << 16));
+ const __m128i zeroi = _mm_setzero_si128();
+
+ #define convert_item32_1_to_fc32_1_bswap_guts(_al_) \
+ for (; i+3 < nsamps; i+=4){ \
+ /* load from input */ \
+ __m128i tmpi = _mm_loadu_si128(reinterpret_cast<const __m128i *>(input+i)); \
+ \
+ /* byteswap + unpack -> byteswap 16 bit words */ \
+ tmpi = _mm_or_si128(_mm_srli_epi16(tmpi, 8), _mm_slli_epi16(tmpi, 8)); \
+ __m128i tmpilo = _mm_unpacklo_epi16(zeroi, tmpi); /* value in upper 16 bits */ \
+ __m128i tmpihi = _mm_unpackhi_epi16(zeroi, tmpi); \
+ \
+ /* convert and scale */ \
+ __m128 tmplo = _mm_mul_ps(_mm_cvtepi32_ps(tmpilo), scalar); \
+ __m128 tmphi = _mm_mul_ps(_mm_cvtepi32_ps(tmpihi), scalar); \
+ \
+ /* store to output */ \
+ _mm_store ## _al_ ## ps(reinterpret_cast<float *>(output+i+0), tmplo); \
+ _mm_store ## _al_ ## ps(reinterpret_cast<float *>(output+i+2), tmphi); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ switch (size_t(output) & 0xf){
+ case 0x8:
+ item32_sc16_to_xx<uhd::htonx>(input, output, 1, scale_factor); i++;
+ case 0x0:
+ convert_item32_1_to_fc32_1_bswap_guts(_)
+ break;
+ default: convert_item32_1_to_fc32_1_bswap_guts(u_)
+ }
+
+ //convert remainder
+ item32_sc16_to_xx<uhd::htonx>(input+i, output+i, nsamps-i, scale_factor);
+}
diff --git a/host/lib/convert/sse2_sc16_to_fc64.cpp b/host/lib/convert/sse2_sc16_to_fc64.cpp
new file mode 100644
index 000000000..66068cad9
--- /dev/null
+++ b/host/lib/convert/sse2_sc16_to_fc64.cpp
@@ -0,0 +1,115 @@
+//
+// Copyright 2011-2012 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <uhd/utils/byteswap.hpp>
+#include <emmintrin.h>
+
+using namespace uhd::convert;
+
+DECLARE_CONVERTER(sc16_item32_le, 1, fc64, 1, PRIORITY_SIMD){
+ const item32_t *input = reinterpret_cast<const item32_t *>(inputs[0]);
+ fc64_t *output = reinterpret_cast<fc64_t *>(outputs[0]);
+
+ const __m128d scalar = _mm_set1_pd(scale_factor/(1 << 16));
+ const __m128i zeroi = _mm_setzero_si128();
+
+ #define convert_item32_1_to_fc64_1_nswap_guts(_al_) \
+ for (; i+3 < nsamps; i+=4){ \
+ /* load from input */ \
+ __m128i tmpi = _mm_loadu_si128(reinterpret_cast<const __m128i *>(input+i)); \
+ \
+ /* unpack + swap 16-bit pairs */ \
+ tmpi = _mm_shufflelo_epi16(tmpi, _MM_SHUFFLE(2, 3, 0, 1)); \
+ tmpi = _mm_shufflehi_epi16(tmpi, _MM_SHUFFLE(2, 3, 0, 1)); \
+ __m128i tmpilo = _mm_unpacklo_epi16(zeroi, tmpi); /* value in upper 16 bits */ \
+ __m128i tmpihi = _mm_unpackhi_epi16(zeroi, tmpi); \
+ \
+ /* convert and scale */ \
+ __m128d tmp0 = _mm_mul_pd(_mm_cvtepi32_pd(tmpilo), scalar); \
+ tmpilo = _mm_unpackhi_epi64(tmpilo, zeroi); \
+ __m128d tmp1 = _mm_mul_pd(_mm_cvtepi32_pd(tmpilo), scalar); \
+ __m128d tmp2 = _mm_mul_pd(_mm_cvtepi32_pd(tmpihi), scalar); \
+ tmpihi = _mm_unpackhi_epi64(tmpihi, zeroi); \
+ __m128d tmp3 = _mm_mul_pd(_mm_cvtepi32_pd(tmpihi), scalar); \
+ \
+ /* store to output */ \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+i+0), tmp0); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+i+1), tmp1); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+i+2), tmp2); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+i+3), tmp3); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ if ((size_t(output) & 0xf) == 0){
+ convert_item32_1_to_fc64_1_nswap_guts(_)
+ }
+ else{
+ convert_item32_1_to_fc64_1_nswap_guts(u_)
+ }
+
+ //convert remainder
+ item32_sc16_to_xx<uhd::htowx>(input+i, output+i, nsamps-i, scale_factor);
+}
+
+DECLARE_CONVERTER(sc16_item32_be, 1, fc64, 1, PRIORITY_SIMD){
+ const item32_t *input = reinterpret_cast<const item32_t *>(inputs[0]);
+ fc64_t *output = reinterpret_cast<fc64_t *>(outputs[0]);
+
+ const __m128d scalar = _mm_set1_pd(scale_factor/(1 << 16));
+ const __m128i zeroi = _mm_setzero_si128();
+
+ #define convert_item32_1_to_fc64_1_bswap_guts(_al_) \
+ for (; i+3 < nsamps; i+=4){ \
+ /* load from input */ \
+ __m128i tmpi = _mm_loadu_si128(reinterpret_cast<const __m128i *>(input+i)); \
+ \
+ /* byteswap + unpack -> byteswap 16 bit words */ \
+ tmpi = _mm_or_si128(_mm_srli_epi16(tmpi, 8), _mm_slli_epi16(tmpi, 8)); \
+ __m128i tmpilo = _mm_unpacklo_epi16(zeroi, tmpi); /* value in upper 16 bits */ \
+ __m128i tmpihi = _mm_unpackhi_epi16(zeroi, tmpi); \
+ \
+ /* convert and scale */ \
+ __m128d tmp0 = _mm_mul_pd(_mm_cvtepi32_pd(tmpilo), scalar); \
+ tmpilo = _mm_unpackhi_epi64(tmpilo, zeroi); \
+ __m128d tmp1 = _mm_mul_pd(_mm_cvtepi32_pd(tmpilo), scalar); \
+ __m128d tmp2 = _mm_mul_pd(_mm_cvtepi32_pd(tmpihi), scalar); \
+ tmpihi = _mm_unpackhi_epi64(tmpihi, zeroi); \
+ __m128d tmp3 = _mm_mul_pd(_mm_cvtepi32_pd(tmpihi), scalar); \
+ \
+ /* store to output */ \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+i+0), tmp0); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+i+1), tmp1); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+i+2), tmp2); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+i+3), tmp3); \
+ } \
+
+ size_t i = 0;
+
+ //dispatch according to alignment
+ if ((size_t(output) & 0xf) == 0){
+ convert_item32_1_to_fc64_1_bswap_guts(_)
+ }
+ else{
+ convert_item32_1_to_fc64_1_bswap_guts(u_)
+ }
+
+ //convert remainder
+ item32_sc16_to_xx<uhd::htonx>(input+i, output+i, nsamps-i, scale_factor);
+}
diff --git a/host/lib/convert/sse2_sc8_to_fc32.cpp b/host/lib/convert/sse2_sc8_to_fc32.cpp
new file mode 100644
index 000000000..c0e561814
--- /dev/null
+++ b/host/lib/convert/sse2_sc8_to_fc32.cpp
@@ -0,0 +1,132 @@
+//
+// Copyright 2012 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <uhd/utils/byteswap.hpp>
+#include <emmintrin.h>
+
+using namespace uhd::convert;
+
+static const __m128i zeroi = _mm_setzero_si128();
+
+template <const int shuf>
+UHD_INLINE void unpack_sc32_4x(
+ const __m128i &in,
+ __m128 &out0, __m128 &out1,
+ __m128 &out2, __m128 &out3,
+ const __m128 &scalar
+){
+ const __m128i tmplo = _mm_unpacklo_epi8(zeroi, in); /* value in upper 8 bits */
+ __m128i tmp0 = _mm_shuffle_epi32(_mm_unpacklo_epi16(zeroi, tmplo), shuf); /* value in upper 16 bits */
+ __m128i tmp1 = _mm_shuffle_epi32(_mm_unpackhi_epi16(zeroi, tmplo), shuf);
+ out0 = _mm_mul_ps(_mm_cvtepi32_ps(tmp0), scalar);
+ out1 = _mm_mul_ps(_mm_cvtepi32_ps(tmp1), scalar);
+
+ const __m128i tmphi = _mm_unpackhi_epi8(zeroi, in);
+ __m128i tmp2 = _mm_shuffle_epi32(_mm_unpacklo_epi16(zeroi, tmphi), shuf);
+ __m128i tmp3 = _mm_shuffle_epi32(_mm_unpackhi_epi16(zeroi, tmphi), shuf);
+ out2 = _mm_mul_ps(_mm_cvtepi32_ps(tmp2), scalar);
+ out3 = _mm_mul_ps(_mm_cvtepi32_ps(tmp3), scalar);
+}
+
+DECLARE_CONVERTER(sc8_item32_be, 1, fc32, 1, PRIORITY_SIMD){
+ const item32_t *input = reinterpret_cast<const item32_t *>(size_t(inputs[0]) & ~0x3);
+ fc32_t *output = reinterpret_cast<fc32_t *>(outputs[0]);
+
+ const __m128 scalar = _mm_set_ps1(float(scale_factor)/(1 << 24));
+ const int shuf = _MM_SHUFFLE(1, 0, 3, 2);
+
+ size_t i = 0, j = 0;
+ fc32_t dummy;
+ size_t num_samps = nsamps;
+
+ if ((size_t(inputs[0]) & 0x3) != 0){
+ item32_sc8_to_xx<uhd::ntohx>(input++, output++, 1, scale_factor);
+ num_samps--;
+ }
+
+ #define convert_sc8_item32_1_to_fc32_1_bswap_guts(_al_) \
+ for (; j+7 < num_samps; j+=8, i+=4){ \
+ /* load from input */ \
+ __m128i tmpi = _mm_loadu_si128(reinterpret_cast<const __m128i *>(input+i)); \
+ \
+ /* unpack + swap 8-bit pairs */ \
+ __m128 tmp0, tmp1, tmp2, tmp3; \
+ unpack_sc32_4x<shuf>(tmpi, tmp0, tmp1, tmp2, tmp3, scalar); \
+ \
+ /* store to output */ \
+ _mm_store ## _al_ ## ps(reinterpret_cast<float *>(output+j+0), tmp0); \
+ _mm_store ## _al_ ## ps(reinterpret_cast<float *>(output+j+2), tmp1); \
+ _mm_store ## _al_ ## ps(reinterpret_cast<float *>(output+j+4), tmp2); \
+ _mm_store ## _al_ ## ps(reinterpret_cast<float *>(output+j+6), tmp3); \
+ }
+
+ //dispatch according to alignment
+ if ((size_t(output) & 0xf) == 0){
+ convert_sc8_item32_1_to_fc32_1_bswap_guts(_)
+ }
+ else{
+ convert_sc8_item32_1_to_fc32_1_bswap_guts(u_)
+ }
+
+ //convert remainder
+ item32_sc8_to_xx<uhd::ntohx>(input+i, output+j, num_samps-j, scale_factor);
+}
+
+DECLARE_CONVERTER(sc8_item32_le, 1, fc32, 1, PRIORITY_SIMD){
+ const item32_t *input = reinterpret_cast<const item32_t *>(size_t(inputs[0]) & ~0x3);
+ fc32_t *output = reinterpret_cast<fc32_t *>(outputs[0]);
+
+ const __m128 scalar = _mm_set_ps1(float(scale_factor)/(1 << 24));
+ const int shuf = _MM_SHUFFLE(2, 3, 0, 1);
+
+ size_t i = 0, j = 0;
+ fc32_t dummy;
+ size_t num_samps = nsamps;
+
+ if ((size_t(inputs[0]) & 0x3) != 0){
+ item32_sc8_to_xx<uhd::wtohx>(input++, output++, 1, scale_factor);
+ num_samps--;
+ }
+
+ #define convert_sc8_item32_1_to_fc32_1_nswap_guts(_al_) \
+ for (; j+7 < num_samps; j+=8, i+=4){ \
+ /* load from input */ \
+ __m128i tmpi = _mm_loadu_si128(reinterpret_cast<const __m128i *>(input+i)); \
+ \
+ /* unpack + swap 8-bit pairs */ \
+ __m128 tmp0, tmp1, tmp2, tmp3; \
+ unpack_sc32_4x<shuf>(tmpi, tmp0, tmp1, tmp2, tmp3, scalar); \
+ \
+ /* store to output */ \
+ _mm_store ## _al_ ## ps(reinterpret_cast<float *>(output+j+0), tmp0); \
+ _mm_store ## _al_ ## ps(reinterpret_cast<float *>(output+j+2), tmp1); \
+ _mm_store ## _al_ ## ps(reinterpret_cast<float *>(output+j+4), tmp2); \
+ _mm_store ## _al_ ## ps(reinterpret_cast<float *>(output+j+6), tmp3); \
+ }
+
+ //dispatch according to alignment
+ if ((size_t(output) & 0xf) == 0){
+ convert_sc8_item32_1_to_fc32_1_nswap_guts(_)
+ }
+ else{
+ convert_sc8_item32_1_to_fc32_1_nswap_guts(u_)
+ }
+
+ //convert remainder
+ item32_sc8_to_xx<uhd::wtohx>(input+i, output+j, num_samps-j, scale_factor);
+}
diff --git a/host/lib/convert/sse2_sc8_to_fc64.cpp b/host/lib/convert/sse2_sc8_to_fc64.cpp
new file mode 100644
index 000000000..ef9c0fdb4
--- /dev/null
+++ b/host/lib/convert/sse2_sc8_to_fc64.cpp
@@ -0,0 +1,151 @@
+//
+// Copyright 2012 Ettus Research LLC
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+//
+
+#include "convert_common.hpp"
+#include <uhd/utils/byteswap.hpp>
+#include <emmintrin.h>
+
+using namespace uhd::convert;
+
+static const __m128i zeroi = _mm_setzero_si128();
+
+UHD_INLINE void unpack_sc32_8x(
+ const __m128i &in,
+ __m128d &out0, __m128d &out1,
+ __m128d &out2, __m128d &out3,
+ __m128d &out4, __m128d &out5,
+ __m128d &out6, __m128d &out7,
+ const __m128d &scalar
+){
+ const int shuf = _MM_SHUFFLE(1, 0, 3, 2);
+ __m128i tmp;
+
+ const __m128i tmplo = _mm_unpacklo_epi8(zeroi, in); /* value in upper 8 bits */
+ tmp = _mm_unpacklo_epi16(zeroi, tmplo); /* value in upper 16 bits */
+ out0 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
+ tmp = _mm_shuffle_epi32(tmp, shuf);
+ out1 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
+ tmp = _mm_unpackhi_epi16(zeroi, tmplo);
+ out2 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
+ tmp = _mm_shuffle_epi32(tmp, shuf);
+ out3 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
+
+ const __m128i tmphi = _mm_unpackhi_epi8(zeroi, in);
+ tmp = _mm_unpacklo_epi16(zeroi, tmphi);
+ out4 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
+ tmp = _mm_shuffle_epi32(tmp, shuf);
+ out5 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
+ tmp = _mm_unpackhi_epi16(zeroi, tmphi);
+ out6 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
+ tmp = _mm_shuffle_epi32(tmp, shuf);
+ out7 = _mm_mul_pd(_mm_cvtepi32_pd(tmp), scalar);
+}
+
+DECLARE_CONVERTER(sc8_item32_be, 1, fc64, 1, PRIORITY_SIMD){
+ const item32_t *input = reinterpret_cast<const item32_t *>(size_t(inputs[0]) & ~0x3);
+ fc64_t *output = reinterpret_cast<fc64_t *>(outputs[0]);
+
+ const __m128d scalar = _mm_set1_pd(scale_factor/(1 << 24));
+
+ size_t i = 0, j = 0;
+ fc32_t dummy;
+ size_t num_samps = nsamps;
+
+ if ((size_t(inputs[0]) & 0x3) != 0){
+ item32_sc8_to_xx<uhd::ntohx>(input++, output++, 1, scale_factor);
+ num_samps--;
+ }
+
+ #define convert_sc8_item32_1_to_fc64_1_bswap_guts(_al_) \
+ for (; j+7 < num_samps; j+=8, i+=4){ \
+ /* load from input */ \
+ __m128i tmpi = _mm_loadu_si128(reinterpret_cast<const __m128i *>(input+i)); \
+ \
+ /* unpack */ \
+ __m128d tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; \
+ unpack_sc32_8x(tmpi, tmp1, tmp0, tmp3, tmp2, tmp5, tmp4, tmp7, tmp6, scalar); \
+ \
+ /* store to output */ \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+0), tmp0); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+1), tmp1); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+2), tmp2); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+3), tmp3); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+4), tmp4); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+5), tmp5); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+6), tmp6); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+7), tmp7); \
+ }
+
+ //dispatch according to alignment
+ if ((size_t(output) & 0xf) == 0){
+ convert_sc8_item32_1_to_fc64_1_bswap_guts(_)
+ }
+ else{
+ convert_sc8_item32_1_to_fc64_1_bswap_guts(u_)
+ }
+
+ //convert remainder
+ item32_sc8_to_xx<uhd::ntohx>(input+i, output+j, num_samps-j, scale_factor);
+}
+
+DECLARE_CONVERTER(sc8_item32_le, 1, fc64, 1, PRIORITY_SIMD){
+ const item32_t *input = reinterpret_cast<const item32_t *>(size_t(inputs[0]) & ~0x3);
+ fc64_t *output = reinterpret_cast<fc64_t *>(outputs[0]);
+
+ const __m128d scalar = _mm_set1_pd(scale_factor/(1 << 24));
+
+ size_t i = 0, j = 0;
+ fc32_t dummy;
+ size_t num_samps = nsamps;
+
+ if ((size_t(inputs[0]) & 0x3) != 0){
+ item32_sc8_to_xx<uhd::wtohx>(input++, output++, 1, scale_factor);
+ num_samps--;
+ }
+
+ #define convert_sc8_item32_1_to_fc64_1_nswap_guts(_al_) \
+ for (; j+7 < num_samps; j+=8, i+=4){ \
+ /* load from input */ \
+ __m128i tmpi = _mm_loadu_si128(reinterpret_cast<const __m128i *>(input+i)); \
+ \
+ /* unpack */ \
+ __m128d tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; \
+ tmpi = _mm_or_si128(_mm_srli_epi16(tmpi, 8), _mm_slli_epi16(tmpi, 8)); /*byteswap*/\
+ unpack_sc32_8x(tmpi, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, scalar); \
+ \
+ /* store to output */ \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+0), tmp0); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+1), tmp1); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+2), tmp2); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+3), tmp3); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+4), tmp4); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+5), tmp5); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+6), tmp6); \
+ _mm_store ## _al_ ## pd(reinterpret_cast<double *>(output+j+7), tmp7); \
+ }
+
+ //dispatch according to alignment
+ if ((size_t(output) & 0xf) == 0){
+ convert_sc8_item32_1_to_fc64_1_nswap_guts(_)
+ }
+ else{
+ convert_sc8_item32_1_to_fc64_1_nswap_guts(u_)
+ }
+
+ //convert remainder
+ item32_sc8_to_xx<uhd::wtohx>(input+i, output+j, num_samps-j, scale_factor);
+}