Data Accelerator Offload
Loading...
Searching...
No Matches
dao_virtio_netdev.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: Marvell-Proprietary
2 * Copyright (c) 2023 Marvell
3 */
4
11#ifndef __INCLUDE_DAO_VIRTIO_NET_H__
12#define __INCLUDE_DAO_VIRTIO_NET_H__
13
14#include <rte_prefetch.h>
15
16#include <dao_util.h>
17#include <dao_virtio.h>
18
19#include <spec/virtio_net.h>
20
24 uint16_t status;
31 uint32_t speed;
39 uint8_t duplex;
40};
41
45 uint16_t pem_devid;
47#define DAO_VIRTIO_NETDEV_EXTBUF DAO_BIT_ULL(0)
48 uint16_t flags;
49 union {
50 struct {
52 struct rte_mempool *pool;
53 };
55 struct {
56 uint16_t dataroom_size;
57 };
58 };
60 uint16_t dma_vchan;
66 uint16_t reta_size;
68 uint16_t hash_key_size;
70 uint16_t mtu;
72 uint8_t mac[VIRTIO_NET_ETHER_ADDR_LEN];
76 bool csum_en;
77};
78
79/* End of structure dao_virtio_netdev_conf. */
80
86 uint16_t deq_fn_id;
88 uint16_t enq_fn_id;
90 uint16_t mgmt_fn_id;
92#define DAO_VIRTIO_NETDEV_MEM_SZ 8192
93 uint8_t reserved[DAO_VIRTIO_NETDEV_MEM_SZ];
94};
95
97struct __rte_packed_begin dao_virtio_net_hdr {
99 uint64_t desc_data[2];
101 struct virtio_net_hdr hdr;
102} __rte_packed_end;
103
106
107/* Fast path data */
109typedef uint16_t (*dao_virtio_net_deq_fn_t)(void *q, struct rte_mbuf **mbufs, uint16_t nb_mbufs);
111typedef uint16_t (*dao_virtio_net_enq_fn_t)(void *q, struct rte_mbuf **mbufs, uint16_t nb_mbufs);
113typedef int (*dao_net_desc_manage_fn_t)(uint16_t devid, uint16_t qp_count);
114
127
129typedef uint16_t (*dao_virtio_net_deq_ext_fn_t)(void *q, void **vbufs, uint16_t nb_bufs);
131typedef uint16_t (*dao_virtio_net_enq_ext_fn_t)(void *q, void **vbufs, uint16_t nb_bufs);
136
138typedef int (*dao_virtio_netdev_rss_cb_t)(uint16_t devid, struct virtio_net_ctrl_rss *rss);
140typedef int (*dao_virtio_netdev_status_cb_t)(uint16_t devid, uint8_t status);
142typedef int (*dao_virtio_netdev_promisc_cb_t)(uint16_t devid, uint8_t enable);
144typedef int (*dao_virtio_netdev_allmulti_cb_t)(uint16_t devid, uint8_t enable);
146typedef int (*dao_virtio_netdev_mac_set_cb_t)(uint16_t devid, uint8_t *mac);
148typedef int (*dao_virtio_netdev_mac_add_cb_t)(uint16_t devid, struct virtio_net_ctrl_mac *mac_tbl,
149 uint8_t type);
151typedef int (*dao_virtio_netdev_mq_cfg_t)(uint16_t devid, bool qmap_set);
153typedef int (*dao_virtio_netdev_vlan_t)(uint16_t devid, uint16_t vlan_tci);
154typedef int (*dao_virtio_netdev_extbuf_get)(uint16_t devid, void *buffs[], uint16_t nb_buffs);
155typedef int (*dao_virtio_netdev_extbuf_put)(uint16_t devid, void *buffs[], uint16_t nb_buffs);
156
182
183/* End of structure dao_virtio_netdev_cbs. */
184
195int dao_virtio_netdev_init(uint16_t devid, struct dao_virtio_netdev_conf *conf);
196
206int dao_virtio_netdev_fini(uint16_t devid);
207
215
220
230
239uint64_t dao_virtio_netdev_feature_bits_get(uint16_t devid);
240
253int dao_virtio_netdev_queue_count_max(uint16_t pem_devid, uint16_t devid);
254
266
275uint8_t dao_virtio_netdev_hdrlen_get(uint16_t devid);
276
277/* Fast path routines */
278
292static __rte_always_inline int
293dao_virtio_net_desc_manage(uint16_t devid, uint16_t qp_count)
294{
295 struct dao_virtio_netdev *netdev = &dao_virtio_netdevs[devid];
297 mgmt_fn = dao_net_desc_manage_fns[netdev->mgmt_fn_id];
298
299 return (*mgmt_fn)(devid, qp_count);
300}
301
302static __rte_always_inline int
303dao_virtio_net_desc_manage_ops(uint16_t devid, uint16_t qp_count)
304{
305 struct dao_virtio_netdev *netdev = &dao_virtio_netdevs[devid];
307
308 mgmt_fn = dao_net_desc_manage_ops_fns[netdev->mgmt_fn_id];
309
310 return (*mgmt_fn)(devid, qp_count);
311}
312
327static __rte_always_inline uint16_t
328dao_virtio_net_dequeue_burst(uint16_t devid, uint16_t qid, struct rte_mbuf **mbufs,
329 uint16_t nb_mbufs)
330{
331 struct dao_virtio_netdev *netdev = &dao_virtio_netdevs[devid];
333 void *q = netdev->qs[qid];
334
335 if (unlikely(!q))
336 return 0;
337
338 deq_fn = dao_virtio_net_deq_fns[netdev->deq_fn_id];
339
340 return (*deq_fn)(q, mbufs, nb_mbufs);
341}
342
359static __rte_always_inline uint16_t
360dao_virtio_net_dequeue_burst_ops(uint16_t devid, uint16_t qid, struct rte_mbuf **mbufs,
361 uint16_t nb_mbufs)
362{
363 struct dao_virtio_netdev *netdev = &dao_virtio_netdevs[devid];
365 void *q = netdev->qs[qid];
366
367 if (unlikely(!q))
368 return 0;
369
370 rte_prefetch0(q);
371 rte_prefetch0(RTE_PTR_ADD(q, RTE_CACHE_LINE_SIZE * 2));
372 rte_prefetch0(RTE_PTR_ADD(q, RTE_CACHE_LINE_SIZE * 6));
373
374 deq_fn = dao_virtio_net_deq_ops_fns[netdev->deq_fn_id];
375
376 return (*deq_fn)(q, mbufs, nb_mbufs);
377}
378
393static __rte_always_inline uint16_t
394dao_virtio_net_enqueue_burst(uint16_t devid, uint16_t qid, struct rte_mbuf **mbufs,
395 uint16_t nb_mbufs)
396{
397 struct dao_virtio_netdev *netdev = &dao_virtio_netdevs[devid];
399 void *q = netdev->qs[qid];
400
401 if (unlikely(!q))
402 return 0;
403
404 enq_fn = dao_virtio_net_enq_fns[netdev->enq_fn_id];
405
406 return (*enq_fn)(q, mbufs, nb_mbufs);
407}
408
425static __rte_always_inline uint16_t
426dao_virtio_net_enqueue_burst_ops(uint16_t devid, uint16_t qid, struct rte_mbuf **mbufs,
427 uint16_t nb_mbufs)
428{
429 struct dao_virtio_netdev *netdev = &dao_virtio_netdevs[devid];
431 void *q = netdev->qs[qid];
432
433 if (unlikely(!q))
434 return 0;
435
436 rte_prefetch0(q);
437 rte_prefetch0(RTE_PTR_ADD(q, RTE_CACHE_LINE_SIZE * 2));
438 rte_prefetch0(RTE_PTR_ADD(q, RTE_CACHE_LINE_SIZE * 6));
439
440 enq_fn = dao_virtio_net_enq_ops_fns[netdev->enq_fn_id];
441
442 return (*enq_fn)(q, mbufs, nb_mbufs);
443}
444
459static __rte_always_inline uint16_t
460dao_virtio_net_dequeue_burst_ext(uint16_t devid, uint16_t qid, void **vbufs, uint16_t nb_bufs)
461{
462 struct dao_virtio_netdev *netdev = &dao_virtio_netdevs[devid];
464 void *q = netdev->qs[qid];
465
466 if (unlikely(!q))
467 return 0;
468
469 deq_fn = dao_virtio_net_deq_ext_fns[netdev->deq_fn_id];
470
471 return (*deq_fn)(q, vbufs, nb_bufs);
472}
473
488static __rte_always_inline uint16_t
489dao_virtio_net_enqueue_burst_ext(uint16_t devid, uint16_t qid, void **vbufs, uint16_t nb_bufs)
490{
491 struct dao_virtio_netdev *netdev = &dao_virtio_netdevs[devid];
493 void *q = netdev->qs[qid];
494
495 if (unlikely(!q))
496 return 0;
497
498 enq_fn = dao_virtio_net_enq_ext_fns[netdev->enq_fn_id];
499
500 return (*enq_fn)(q, vbufs, nb_bufs);
501}
502
503#endif /* __INCLUDE_DAO_VIRTIO_NET_H__ */
#define DAO_VIRTIO_MAX_QUEUES
Definition dao_virtio.h:21
dao_net_desc_manage_fn_t dao_net_desc_manage_ops_fns[]
static __rte_always_inline uint16_t dao_virtio_net_enqueue_burst_ext(uint16_t devid, uint16_t qid, void **vbufs, uint16_t nb_bufs)
static __rte_always_inline uint16_t dao_virtio_net_enqueue_burst(uint16_t devid, uint16_t qid, struct rte_mbuf **mbufs, uint16_t nb_mbufs)
uint8_t dao_virtio_netdev_hdrlen_get(uint16_t devid)
int dao_virtio_netdev_init(uint16_t devid, struct dao_virtio_netdev_conf *conf)
int(* dao_virtio_netdev_allmulti_cb_t)(uint16_t devid, uint8_t enable)
void dao_virtio_netdev_cb_register(struct dao_virtio_netdev_cbs *cbs)
dao_virtio_net_enq_fn_t dao_virtio_net_enq_fns[]
uint16_t(* dao_virtio_net_enq_ext_fn_t)(void *q, void **vbufs, uint16_t nb_bufs)
int(* dao_virtio_netdev_mac_add_cb_t)(uint16_t devid, struct virtio_net_ctrl_mac *mac_tbl, uint8_t type)
dao_virtio_net_enq_ext_fn_t dao_virtio_net_enq_ext_fns[]
static __rte_always_inline int dao_virtio_net_desc_manage(uint16_t devid, uint16_t qp_count)
static __rte_always_inline uint16_t dao_virtio_net_enqueue_burst_ops(uint16_t devid, uint16_t qid, struct rte_mbuf **mbufs, uint16_t nb_mbufs)
static __rte_always_inline uint16_t dao_virtio_net_dequeue_burst(uint16_t devid, uint16_t qid, struct rte_mbuf **mbufs, uint16_t nb_mbufs)
uint16_t(* dao_virtio_net_deq_ext_fn_t)(void *q, void **vbufs, uint16_t nb_bufs)
int dao_virtio_netdev_link_sts_update(uint16_t devid, struct dao_virtio_netdev_link_info *info)
dao_virtio_net_deq_fn_t dao_virtio_net_deq_ops_fns[]
dao_virtio_net_deq_fn_t dao_virtio_net_deq_fns[]
int(* dao_virtio_netdev_mq_cfg_t)(uint16_t devid, bool qmap_set)
void dao_virtio_netdev_cb_unregister(void)
dao_net_desc_manage_fn_t dao_net_desc_manage_fns[]
struct dao_virtio_netdev dao_virtio_netdevs[]
int(* dao_virtio_netdev_vlan_t)(uint16_t devid, uint16_t vlan_tci)
uint16_t(* dao_virtio_net_enq_fn_t)(void *q, struct rte_mbuf **mbufs, uint16_t nb_mbufs)
int(* dao_virtio_netdev_status_cb_t)(uint16_t devid, uint8_t status)
static __rte_always_inline uint16_t dao_virtio_net_dequeue_burst_ext(uint16_t devid, uint16_t qid, void **vbufs, uint16_t nb_bufs)
dao_virtio_net_deq_ext_fn_t dao_virtio_net_deq_ext_fns[]
int dao_virtio_netdev_fini(uint16_t devid)
#define DAO_VIRTIO_NETDEV_MEM_SZ
int(* dao_virtio_netdev_promisc_cb_t)(uint16_t devid, uint8_t enable)
static __rte_always_inline uint16_t dao_virtio_net_dequeue_burst_ops(uint16_t devid, uint16_t qid, struct rte_mbuf **mbufs, uint16_t nb_mbufs)
uint16_t(* dao_virtio_net_deq_fn_t)(void *q, struct rte_mbuf **mbufs, uint16_t nb_mbufs)
int(* dao_virtio_netdev_mac_set_cb_t)(uint16_t devid, uint8_t *mac)
int(* dao_virtio_netdev_rss_cb_t)(uint16_t devid, struct virtio_net_ctrl_rss *rss)
dao_virtio_net_enq_fn_t dao_virtio_net_enq_ops_fns[]
int dao_virtio_netdev_queue_count_max(uint16_t pem_devid, uint16_t devid)
int dao_virtio_netdev_queue_count(uint16_t devid)
int(* dao_net_desc_manage_fn_t)(uint16_t devid, uint16_t qp_count)
uint64_t dao_virtio_netdev_feature_bits_get(uint16_t devid)
dao_virtio_netdev_rss_cb_t rss_cb
dao_virtio_netdev_status_cb_t status_cb
dao_virtio_netdev_promisc_cb_t promisc_cb
dao_virtio_netdev_mac_set_cb_t mac_set
dao_virtio_netdev_extbuf_get extbuf_get
dao_virtio_netdev_vlan_t vlan_add
dao_virtio_netdev_extbuf_put extbuf_put
dao_virtio_netdev_vlan_t vlan_del
dao_virtio_netdev_allmulti_cb_t allmulti_cb
dao_virtio_netdev_mac_add_cb_t mac_add
dao_virtio_netdev_mq_cfg_t mq_configure
uint8_t mac[VIRTIO_NET_ETHER_ADDR_LEN]
struct dao_virtio_netdev_link_info link_info
struct rte_mempool * pool
void *qs[DAO_VIRTIO_MAX_QUEUES] __rte_cache_aligned