aboutsummaryrefslogtreecommitdiffstats
path: root/host/lib/convert/sse2_fc32_to_sc16.cpp
blob: f562074c6dfb00479ef0eaf98a892f5124252106 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
//
// Copyright 2011-2012 Ettus Research LLC
// Copyright 2018 Ettus Research, a National Instruments Company
//
// SPDX-License-Identifier: GPL-3.0-or-later
//

#include "convert_common.hpp"
#include <uhd/utils/byteswap.hpp>
#include <emmintrin.h>

using namespace uhd::convert;

DECLARE_CONVERTER(fc32, 1, sc16_item32_le, 1, PRIORITY_SIMD){
    const fc32_t *input = reinterpret_cast<const fc32_t *>(inputs[0]);
    item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);

    const __m128 scalar = _mm_set_ps1(float(scale_factor));

    // this macro converts values faster by using SSE intrinsics to convert 4 values at a time
    #define convert_fc32_1_to_item32_1_nswap_guts(_al_)                 \
    for (; i+3 < nsamps; i+=4){                                         \
        /* load from input */                                           \
        __m128 tmplo = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+0)); \
        __m128 tmphi = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+2)); \
                                                                        \
        /* convert and scale */                                         \
        __m128i tmpilo = _mm_cvtps_epi32(_mm_mul_ps(tmplo, scalar));    \
        __m128i tmpihi = _mm_cvtps_epi32(_mm_mul_ps(tmphi, scalar));    \
                                                                        \
        /* pack + swap 16-bit pairs */                                  \
        __m128i tmpi = _mm_packs_epi32(tmpilo, tmpihi);                 \
        tmpi = _mm_shufflelo_epi16(tmpi, _MM_SHUFFLE(2, 3, 0, 1));      \
        tmpi = _mm_shufflehi_epi16(tmpi, _MM_SHUFFLE(2, 3, 0, 1));      \
                                                                        \
        /* store to output */                                           \
        _mm_storeu_si128(reinterpret_cast<__m128i *>(output+i), tmpi);  \
    }                                                                   \

    size_t i = 0;

    // need to dispatch according to alignment for fastest conversion
    switch (size_t(input) & 0xf){
    case 0x0:
        // the data is 16-byte aligned, so do the fast processing of the bulk of the samples
        convert_fc32_1_to_item32_1_nswap_guts(_)
        break;
    case 0x8:
        // the first sample is 8-byte aligned - process it to align the remainder of the samples to 16-bytes
        xx_to_item32_sc16<uhd::htowx>(input, output, 1, scale_factor);
        i++;
        // do faster processing of the bulk of the samples now that we are 16-byte aligned
        convert_fc32_1_to_item32_1_nswap_guts(_)
        break;
    default:
        // we are not 8 or 16-byte aligned, so do fast processing with the unaligned load
        convert_fc32_1_to_item32_1_nswap_guts(u_)
    }

    // convert any remaining samples
    xx_to_item32_sc16<uhd::htowx>(input+i, output+i, nsamps-i, scale_factor);
}

DECLARE_CONVERTER(fc32, 1, sc16_item32_be, 1, PRIORITY_SIMD){
    const fc32_t *input = reinterpret_cast<const fc32_t *>(inputs[0]);
    item32_t *output = reinterpret_cast<item32_t *>(outputs[0]);

    const __m128 scalar = _mm_set_ps1(float(scale_factor));

    // this macro converts values faster by using SSE intrinsics to convert 4 values at a time
    #define convert_fc32_1_to_item32_1_bswap_guts(_al_)                 \
    for (; i+3 < nsamps; i+=4){                                         \
        /* load from input */                                           \
        __m128 tmplo = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+0)); \
        __m128 tmphi = _mm_load ## _al_ ## ps(reinterpret_cast<const float *>(input+i+2)); \
                                                                        \
        /* convert and scale */                                         \
        __m128i tmpilo = _mm_cvtps_epi32(_mm_mul_ps(tmplo, scalar));    \
        __m128i tmpihi = _mm_cvtps_epi32(_mm_mul_ps(tmphi, scalar));    \
                                                                        \
        /* pack + byteswap -> byteswap 16 bit words */                  \
        __m128i tmpi = _mm_packs_epi32(tmpilo, tmpihi);                 \
        tmpi = _mm_or_si128(_mm_srli_epi16(tmpi, 8), _mm_slli_epi16(tmpi, 8)); \
                                                                        \
        /* store to output */                                           \
        _mm_storeu_si128(reinterpret_cast<__m128i *>(output+i), tmpi);  \
    }                                                                   \

    size_t i = 0;

    // need to dispatch according to alignment for fastest conversion
    switch (size_t(input) & 0xf){
    case 0x0:
        // the data is 16-byte aligned, so do the fast processing of the bulk of the samples
        convert_fc32_1_to_item32_1_bswap_guts(_)
        break;
    case 0x8:
        // the first value is 8-byte aligned - process it and prepare the bulk of the data for fast conversion
        xx_to_item32_sc16<uhd::htonx>(input, output, 1, scale_factor);
        i++;
        // do faster processing of the remaining samples now that we are 16-byte aligned
        convert_fc32_1_to_item32_1_bswap_guts(_)
        break;
    default:
        // we are not 8 or 16-byte aligned, so do fast processing with the unaligned load
        convert_fc32_1_to_item32_1_bswap_guts(u_)
    }

    // convert any remaining samples
    xx_to_item32_sc16<uhd::htonx>(input+i, output+i, nsamps-i, scale_factor);
}