1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
|
//
// Copyright 2018 Ettus Research, a National Instruments Company
//
// SPDX-License-Identifier: GPL-3.0-or-later
//
#include "uhd_dpdk_ctx.h"
#include "uhd_dpdk_wait.h"
#include "uhd_dpdk_udp.h"
#include "uhd_dpdk_driver.h"
#include <stdlib.h>
#include <sched.h>
#include <rte_cycles.h>
#include <rte_errno.h>
#include <rte_malloc.h>
#include <rte_log.h>
/* FIXME: Descriptor ring size: Replace with configurable values */
#define DEFAULT_RING_SIZE 512
/* FIXME: This needs to be protected */
struct uhd_dpdk_ctx *ctx = NULL;
/**
* TODO: Probably should provide way to get access to thread for a given port
* UHD's first calling thread will be the master thread
* In UHD, maybe check thread, and if it is different, pass work to that thread and optionally wait() on it (some condition variable)
*/
/* TODO: For nice scheduling options later, make sure to separate RX and TX activity */
int uhd_dpdk_port_count(void)
{
if (!ctx)
return -ENODEV;
return ctx->num_ports;
}
int uhd_dpdk_port_link_status(unsigned int portid)
{
if (!ctx)
return -ENODEV;
struct uhd_dpdk_port *p = find_port(portid);
if (p) {
struct rte_eth_link link;
rte_eth_link_get_nowait(p->id, &link);
return link.link_status;
}
return -ENODEV;
}
struct eth_addr uhd_dpdk_get_eth_addr(unsigned int portid)
{
struct eth_addr retval;
memset(retval.addr, 0xff, ETHER_ADDR_LEN);
struct uhd_dpdk_port *p = find_port(portid);
if (p) {
memcpy(retval.addr, p->mac_addr.addr_bytes, ETHER_ADDR_LEN);
}
return retval;
}
int uhd_dpdk_get_ipv4_addr(unsigned int portid, uint32_t *ipv4_addr, uint32_t *netmask)
{
if (!ipv4_addr)
return -EINVAL;
struct uhd_dpdk_port *p = find_port(portid);
if (p) {
*ipv4_addr = p->ipv4_addr;
if (netmask) {
*netmask = p->netmask;
}
return 0;
}
return -ENODEV;
}
int uhd_dpdk_set_ipv4_addr(unsigned int portid, uint32_t ipv4_addr, uint32_t netmask)
{
struct uhd_dpdk_port *p = find_port(portid);
if (p) {
p->ipv4_addr = ipv4_addr;
p->netmask = netmask;
return 0;
}
return -ENODEV;
}
/*
* Initialize a given port using default settings and with the RX buffers
* coming from the mbuf_pool passed as a parameter.
* FIXME: Starting with assumption of one thread/core per port
*/
static inline int uhd_dpdk_port_init(struct uhd_dpdk_port *port,
struct rte_mempool *rx_mbuf_pool,
unsigned int mtu)
{
int retval;
/* Check for a valid port */
if (port->id >= rte_eth_dev_count())
return -ENODEV;
/* Set up Ethernet device with defaults (1 RX ring, 1 TX ring) */
retval = rte_eth_dev_set_mtu(port->id, mtu);
if (retval) {
uint16_t actual_mtu;
RTE_LOG(WARNING, EAL, "%d: Could not set mtu to %d\n", retval, mtu);
rte_eth_dev_get_mtu(port->id, &actual_mtu);
RTE_LOG(WARNING, EAL, "Current mtu=%d\n", actual_mtu);
mtu = actual_mtu;
}
// Require checksum offloads
struct rte_eth_dev_info dev_info;
rte_eth_dev_info_get(port->id, &dev_info);
uint64_t rx_offloads = DEV_RX_OFFLOAD_IPV4_CKSUM;
uint64_t tx_offloads = DEV_TX_OFFLOAD_IPV4_CKSUM;
if ((dev_info.rx_offload_capa & rx_offloads) != rx_offloads) {
RTE_LOG(WARNING, EAL, "%d: Only supports RX offloads 0x%0llx\n", port->id, dev_info.rx_offload_capa);
rte_exit(EXIT_FAILURE, "Missing required RX offloads\n");
}
if ((dev_info.tx_offload_capa & tx_offloads) != tx_offloads) {
RTE_LOG(WARNING, EAL, "%d: Only supports TX offloads 0x%0llx\n", port->id, dev_info.tx_offload_capa);
rte_exit(EXIT_FAILURE, "Missing required TX offloads\n");
}
struct rte_eth_conf port_conf = {
.rxmode = {
.offloads = rx_offloads | DEV_RX_OFFLOAD_JUMBO_FRAME,
.max_rx_pkt_len = mtu,
.jumbo_frame = 1,
.hw_ip_checksum = 1,
.ignore_offload_bitfield = 0,
},
.txmode = {
.offloads = tx_offloads,
}
};
retval = rte_eth_dev_configure(port->id, 1, 1, &port_conf);
if (retval != 0)
return retval;
uint16_t rx_desc = DEFAULT_RING_SIZE;
uint16_t tx_desc = DEFAULT_RING_SIZE;
retval = rte_eth_dev_adjust_nb_rx_tx_desc(port->id, &rx_desc, &tx_desc);
if (retval != 0)
return retval;
if (rx_desc != DEFAULT_RING_SIZE)
RTE_LOG(WARNING, EAL, "RX descriptors changed to %d\n", rx_desc);
if (tx_desc != DEFAULT_RING_SIZE)
RTE_LOG(WARNING, EAL, "TX descriptors changed to %d\n", tx_desc);
retval = rte_eth_rx_queue_setup(port->id, 0, rx_desc,
rte_eth_dev_socket_id(port->id), NULL, rx_mbuf_pool);
if (retval < 0)
return retval;
struct rte_eth_txconf txconf = {
.offloads = DEV_TX_OFFLOAD_IPV4_CKSUM
};
retval = rte_eth_tx_queue_setup(port->id, 0, tx_desc,
rte_eth_dev_socket_id(port->id), &txconf);
if (retval < 0)
goto port_init_fail;
/* Create the hash table for the RX sockets */
char name[32];
snprintf(name, sizeof(name), "rx_table_%u", port->id);
struct rte_hash_parameters hash_params = {
.name = name,
.entries = UHD_DPDK_MAX_SOCKET_CNT,
.key_len = sizeof(struct uhd_dpdk_ipv4_5tuple),
.hash_func = NULL,
.hash_func_init_val = 0,
};
port->rx_table = rte_hash_create(&hash_params);
if (port->rx_table == NULL) {
retval = rte_errno;
goto port_init_fail;
}
/* Create ARP table */
snprintf(name, sizeof(name), "arp_table_%u", port->id);
hash_params.name = name;
hash_params.entries = UHD_DPDK_MAX_SOCKET_CNT;
hash_params.key_len = sizeof(uint32_t);
hash_params.hash_func = NULL;
hash_params.hash_func_init_val = 0;
port->arp_table = rte_hash_create(&hash_params);
if (port->arp_table == NULL) {
retval = rte_errno;
goto free_rx_table;
}
/* Set up list for TX queues */
LIST_INIT(&port->txq_list);
/* Start the Ethernet port. */
retval = rte_eth_dev_start(port->id);
if (retval < 0) {
goto free_arp_table;
}
/* Display the port MAC address. */
rte_eth_macaddr_get(port->id, &port->mac_addr);
RTE_LOG(INFO, EAL, "Port %u MAC: %02x %02x %02x %02x %02x %02x\n",
(unsigned)port->id,
port->mac_addr.addr_bytes[0], port->mac_addr.addr_bytes[1],
port->mac_addr.addr_bytes[2], port->mac_addr.addr_bytes[3],
port->mac_addr.addr_bytes[4], port->mac_addr.addr_bytes[5]);
return 0;
free_arp_table:
rte_hash_free(port->arp_table);
free_rx_table:
rte_hash_free(port->rx_table);
port_init_fail:
return rte_errno;
}
static int uhd_dpdk_thread_init(struct uhd_dpdk_thread *thread, unsigned int lcore)
{
if (!ctx || !thread)
return -EINVAL;
unsigned int socket_id = rte_lcore_to_socket_id(lcore);
thread->lcore = lcore;
thread->rx_pktbuf_pool = ctx->rx_pktbuf_pools[socket_id];
thread->tx_pktbuf_pool = ctx->tx_pktbuf_pools[socket_id];
LIST_INIT(&thread->port_list);
char name[32];
snprintf(name, sizeof(name), "sockreq_ring_%u", lcore);
thread->sock_req_ring = rte_ring_create(
name,
UHD_DPDK_MAX_PENDING_SOCK_REQS,
socket_id,
RING_F_SC_DEQ
);
if (!thread->sock_req_ring)
return -ENOMEM;
snprintf(name, sizeof(name), "waiter_ring_%u", lcore);
thread->waiter_ring = rte_ring_create(
name,
UHD_DPDK_MAX_WAITERS,
socket_id,
RING_F_SC_DEQ
);
if (!thread->waiter_ring)
return -ENOMEM;
return 0;
}
int uhd_dpdk_init(int argc, const char **argv)
{
/* Init context only once */
if (ctx)
return 1;
/* Grabs arguments intended for DPDK's EAL */
int ret = rte_eal_init(argc, (char **) argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
ctx = (struct uhd_dpdk_ctx *) rte_zmalloc("uhd_dpdk_ctx", sizeof(*ctx), rte_socket_id());
if (!ctx)
return -ENOMEM;
ctx->num_threads = rte_lcore_count();
if (ctx->num_threads <= 1)
rte_exit(EXIT_FAILURE, "Error: No worker threads enabled\n");
/* Check that we have ports to send/receive on */
ctx->num_ports = rte_eth_dev_count();
if (ctx->num_ports < 1)
rte_exit(EXIT_FAILURE, "Error: Found no ports\n");
/* Get memory for thread and port data structures */
ctx->threads = rte_zmalloc("uhd_dpdk_thread", RTE_MAX_LCORE*sizeof(struct uhd_dpdk_thread), 0);
if (!ctx->threads)
rte_exit(EXIT_FAILURE, "Error: Could not allocate memory for thread data\n");
ctx->ports = rte_zmalloc("uhd_dpdk_port", ctx->num_ports*sizeof(struct uhd_dpdk_port), 0);
if (!ctx->ports)
rte_exit(EXIT_FAILURE, "Error: Could not allocate memory for port data\n");
for (size_t i = 0; i < ctx->num_ports; i++) {
struct uhd_dpdk_port *port = &ctx->ports[i];
port->id = i;
rte_eth_macaddr_get(port->id, &port->mac_addr);
}
return 0;
}
int uhd_dpdk_start(unsigned int num_ports, int *port_thread_mapping,
int num_mbufs, int mbuf_cache_size, int mtu)
{
if (!ctx)
return -EIO;
if ((num_ports == 0) || (port_thread_mapping == NULL)) {
return -EINVAL;
}
if (ctx->num_ports < num_ports)
rte_exit(EXIT_FAILURE, "Error: User requested more ports than available\n");
/* Initialize the thread data structures */
for (int i = rte_get_next_lcore(-1, 1, 0);
(i < RTE_MAX_LCORE);
i = rte_get_next_lcore(i, 1, 0))
{
/* Do one mempool of RX/TX per socket */
unsigned int socket_id = rte_lcore_to_socket_id(i);
/* FIXME Probably want to take into account actual number of ports per socket */
if (ctx->tx_pktbuf_pools[socket_id] == NULL) {
/* Creates a new mempool in memory to hold the mbufs.
* This is done for each CPU socket
*/
const int mbuf_size = mtu + 2048 + RTE_PKTMBUF_HEADROOM;
char name[32];
snprintf(name, sizeof(name), "rx_mbuf_pool_%u", socket_id);
ctx->rx_pktbuf_pools[socket_id] = rte_pktmbuf_pool_create(
name,
ctx->num_ports*num_mbufs,
mbuf_cache_size,
0,
mbuf_size,
socket_id
);
snprintf(name, sizeof(name), "tx_mbuf_pool_%u", socket_id);
ctx->tx_pktbuf_pools[socket_id] = rte_pktmbuf_pool_create(
name,
ctx->num_ports*num_mbufs,
mbuf_cache_size,
0,
mbuf_size,
socket_id
);
if ((ctx->rx_pktbuf_pools[socket_id]== NULL) ||
(ctx->tx_pktbuf_pools[socket_id]== NULL))
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
}
if (uhd_dpdk_thread_init(&ctx->threads[i], i) < 0)
rte_exit(EXIT_FAILURE, "Error initializing thread %i\n", i);
}
unsigned master_lcore = rte_get_master_lcore();
/* Assign ports to threads and initialize the port data structures */
for (unsigned int i = 0; i < num_ports; i++) {
int thread_id = port_thread_mapping[i];
if (thread_id < 0)
continue;
if (((unsigned int) thread_id) == master_lcore)
RTE_LOG(WARNING, EAL, "User requested master lcore for port %u\n", i);
if (ctx->threads[thread_id].lcore != (unsigned int) thread_id)
rte_exit(EXIT_FAILURE, "Requested inactive lcore %u for port %u\n", (unsigned int) thread_id, i);
struct uhd_dpdk_port *port = &ctx->ports[i];
port->parent = &ctx->threads[thread_id];
ctx->threads[thread_id].num_ports++;
LIST_INSERT_HEAD(&ctx->threads[thread_id].port_list, port, port_entry);
/* Initialize port. */
if (uhd_dpdk_port_init(port, port->parent->rx_pktbuf_pool, mtu) != 0)
rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
i);
}
RTE_LOG(INFO, EAL, "Waiting for links to come up...\n");
rte_delay_ms(1000);
for (unsigned int i = 0; i < num_ports; i++) {
struct uhd_dpdk_port *port = &ctx->ports[i];
if (!port->parent)
continue;
struct rte_eth_link link;
rte_eth_link_get(i, &link);
RTE_LOG(INFO, EAL, "Port %u UP: %d, %u Mbps\n", i,
link.link_status, link.link_speed);
}
RTE_LOG(INFO, EAL, "Init DONE!\n");
/* FIXME: Create functions to do this */
RTE_LOG(INFO, EAL, "Starting I/O threads!\n");
cpu_set_t io_cpuset;
CPU_ZERO(&io_cpuset);
for (int i = rte_get_next_lcore(-1, 1, 0);
(i < RTE_MAX_LCORE);
i = rte_get_next_lcore(i, 1, 0))
{
struct uhd_dpdk_thread *t = &ctx->threads[i];
if (!LIST_EMPTY(&t->port_list)) {
rte_eal_remote_launch(_uhd_dpdk_driver_main, NULL, ctx->threads[i].lcore);
struct uhd_dpdk_wait_req *waiter = uhd_dpdk_waiter_alloc(UHD_DPDK_WAIT_SIMPLE);
if (!waiter) {
rte_exit(EXIT_FAILURE, "%s: Failed to get wait request\n", __func__);
}
uhd_dpdk_waiter_get(waiter);
uhd_dpdk_waiter_wait(waiter, -1, &ctx->threads[i]);
uhd_dpdk_waiter_put(waiter);
CPU_OR(&io_cpuset, &io_cpuset, &t->cpu_affinity);
}
}
cpu_set_t user_cpuset;
CPU_ZERO(&user_cpuset);
for (int i = 0; i < CPU_SETSIZE; i++) {
CPU_SET(i, &user_cpuset);
}
CPU_XOR(&user_cpuset, &user_cpuset, &io_cpuset);
if (pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &user_cpuset)) {
rte_exit(EXIT_FAILURE, "%s: Failed to set CPU affinity\n", __func__);
}
return 0;
}
/* FIXME: This will be changed once we have functions to handle the threads */
int uhd_dpdk_destroy(void)
{
if (!ctx)
return -ENODEV;
struct uhd_dpdk_config_req *req = (struct uhd_dpdk_config_req *) rte_zmalloc(NULL, sizeof(*req), 0);
if (!req)
return -ENOMEM;
req->waiter = uhd_dpdk_waiter_alloc(UHD_DPDK_WAIT_SIMPLE);
if (!req->waiter) {
rte_free(req);
return -ENOMEM;
}
req->req_type = UHD_DPDK_LCORE_TERM;
for (int i = rte_get_next_lcore(-1, 1, 0);
(i < RTE_MAX_LCORE);
i = rte_get_next_lcore(i, 1, 0))
{
struct uhd_dpdk_thread *t = &ctx->threads[i];
if (LIST_EMPTY(&t->port_list))
continue;
if (rte_eal_get_lcore_state(t->lcore) == FINISHED)
continue;
if (rte_ring_enqueue(t->sock_req_ring, req)) {
RTE_LOG(ERR, USER2, "Failed to terminate thread %d\n", i);
rte_free(req->waiter);
rte_free(req);
return -ENOSPC;
}
uhd_dpdk_config_req_submit(req, 1, t);
}
uhd_dpdk_waiter_put(req->waiter);
rte_free(req);
return 0;
}
|