Data Accelerator Offload
Loading...
Searching...
No Matches
dao_virtio_blkdev.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: Marvell-Proprietary
2 * Copyright (c) 2025 Marvell
3 */
4
11#ifndef __INCLUDE_DAO_VIRTIO_BLK_H__
12#define __INCLUDE_DAO_VIRTIO_BLK_H__
13
14#include <dao_virtio.h>
15#include <dao_util.h>
16
17#include <spec/virtio_blk.h>
18
22 uint16_t pem_devid;
24 uint64_t capacity;
26 uint32_t blk_size;
28 uint32_t seg_size_max;
30 uint32_t seg_max;
32 uint16_t dma_vchan;
33#define DAO_VIRTIO_BLKDEV_EXTBUF DAO_BIT_ULL(0)
34 /* Config flags */
35 uint16_t flags;
36 union {
37 struct {
39 struct rte_mempool *pool;
40 };
42 struct {
43 uint16_t dataroom_size;
44 };
45 };
49 uint64_t feat_bits;
52};
53
54/* End of structure dao_virtio_blkdev_conf. */
55
59 uint64_t desc_data[2];
61 uint8_t *status;
63 uint32_t tot_len;
65 uint32_t tot_segs;
67 uint32_t tot_bufs;
69 uint8_t hdr_data[];
70} __rte_packed;
71
73typedef struct dao_virtio_blkdev {
77 uint16_t deq_fn_id;
79 uint16_t compl_fn_id;
81 uint16_t mgmt_fn_id;
82
83#define DAO_VIRTIO_BLKDEV_MEM_SZ 8192
84 uint8_t reserved[DAO_VIRTIO_BLKDEV_MEM_SZ];
86
94
97
98/* Fast path data */
100typedef uint16_t (*dao_virtio_blk_deq_fn_t)(void *q, void **mbufs,
101 uint16_t nb_mbufs);
103typedef uint16_t (*dao_virtio_blk_deq_ext_fn_t)(void *q, void **vbufs,
104 uint16_t nb_bufs);
106typedef uint16_t (*dao_virtio_blk_process_compl_fn_t)(void *q,
107 void **mbufs, uint16_t nb_compl);
109typedef uint16_t (*dao_virtio_blk_process_compl_ext_fn_t)(void *q, void **vbufs,
110 uint16_t nb_compl);
112typedef int (*dao_virtio_blk_desc_manage_fn_t)(uint16_t devid, uint16_t qp_count);
114typedef int (*dao_virtio_blk_desc_manage_ext_fn_t)(uint16_t devid, uint16_t qp_count);
115
128
130typedef int (*dao_virtio_blkdev_status_cb_t)(uint16_t devid, uint8_t status);
132typedef int (*dao_virtio_blkdev_mq_cfg_t)(uint16_t devid, bool qmap_set);
134typedef int (*dao_virtio_blkdev_extbuf_get)(uint16_t devid, void *buffs[],
135 uint16_t nb_buffs);
137typedef int (*dao_virtio_blkdev_extbuf_put)(uint16_t devid, void *buffs[],
138 uint16_t nb_buffs);
139
149
150/* End of structure dao_virtio_blkdev_cbs. */
151
162int dao_virtio_blkdev_init(uint16_t devid, struct dao_virtio_blkdev_conf *conf);
163
173int dao_virtio_blkdev_fini(uint16_t devid);
174
182
187
197
206uint64_t dao_virtio_blkdev_feature_bits_get(uint16_t devid);
207
220int dao_virtio_blkdev_queue_count_max(uint16_t pem_devid, uint16_t devid);
221
235static __rte_always_inline int
236dao_virtio_blk_io_desc_manage(uint16_t devid, uint16_t q_count)
237{
238 struct dao_virtio_blkdev *blkdev = &dao_virtio_blkdevs[devid];
240
241 mgmt_fn = dao_blk_desc_manage_fns[blkdev->mgmt_fn_id];
242
243 return (*mgmt_fn)(devid, q_count);
244}
245
260static __rte_always_inline uint16_t
261dao_virtio_blk_process_compl(uint16_t devid, uint16_t qid,
262 void **mbufs, uint16_t nb_compl)
263{
264 struct dao_virtio_blkdev *blkdev = &dao_virtio_blkdevs[devid];
266 void *q = blkdev->qs[qid];
267
268 if (unlikely(!q))
269 return 0;
270
272
273 return (*compl_fn)(q, mbufs, nb_compl);
274}
275
290static __rte_always_inline uint16_t
291dao_virtio_blk_process_compl_ext(uint16_t devid, uint16_t qid, void **vbufs,
292 uint16_t nb_compl)
293{
294 struct dao_virtio_blkdev *blkdev = &dao_virtio_blkdevs[devid];
296 void *q = blkdev->qs[qid];
297
298 if (unlikely(!q))
299 return 0;
300
302
303 return (*compl_fn)(q, vbufs, nb_compl);
304}
305
320static __rte_always_inline uint16_t
321dao_virtio_blk_dequeue_burst(uint16_t devid, uint16_t qid,
322 void **mbufs, uint16_t nb_mbufs)
323{
324 struct dao_virtio_blkdev *blkdev = &dao_virtio_blkdevs[devid];
326 void *q = blkdev->qs[qid];
327
328 if (unlikely(!q))
329 return 0;
330
331 deq_fn = dao_virtio_blk_deq_fns[blkdev->deq_fn_id];
332
333 return (*deq_fn)(q, mbufs, nb_mbufs);
334}
335
350static __rte_always_inline uint16_t
351dao_virtio_blk_dequeue_burst_ext(uint16_t devid, uint16_t qid, void **vbufs,
352 uint16_t nb_bufs)
353{
354 struct dao_virtio_blkdev *blkdev = &dao_virtio_blkdevs[devid];
356 void *q = blkdev->qs[qid];
357
358 if (unlikely(!q))
359 return 0;
360
361 deq_fn = dao_virtio_blk_deq_ext_fns[blkdev->deq_fn_id];
362
363 return (*deq_fn)(q, vbufs, nb_bufs);
364}
365
366#endif /* __INCLUDE_DAO_VIRTIO_BLK_H__ */
#define DAO_VIRTIO_MAX_QUEUES
Definition dao_virtio.h:21
struct dao_virtio_blkdev dao_virtio_blkdevs[]
void dao_virtio_blkdev_cb_register(struct dao_virtio_blkdev_cbs *cbs)
uint16_t(* dao_virtio_blk_deq_fn_t)(void *q, void **mbufs, uint16_t nb_mbufs)
static __rte_always_inline uint16_t dao_virtio_blk_dequeue_burst(uint16_t devid, uint16_t qid, void **mbufs, uint16_t nb_mbufs)
struct dao_virtio_blkdev dao_virtio_blkdev_t
dao_virtio_blk_desc_manage_fn_t dao_blk_desc_manage_fns[]
int(* dao_virtio_blkdev_mq_cfg_t)(uint16_t devid, bool qmap_set)
uint16_t(* dao_virtio_blk_process_compl_fn_t)(void *q, void **mbufs, uint16_t nb_compl)
dao_virtio_blk_process_compl_ext_fn_t dao_virtio_blk_process_compl_ext_fns[]
void dao_virtio_blkdev_cb_unregister(void)
uint16_t(* dao_virtio_blk_process_compl_ext_fn_t)(void *q, void **vbufs, uint16_t nb_compl)
dao_virtio_blk_deq_fn_t dao_virtio_blk_deq_fns[]
static __rte_always_inline uint16_t dao_virtio_blk_dequeue_burst_ext(uint16_t devid, uint16_t qid, void **vbufs, uint16_t nb_bufs)
int(* dao_virtio_blkdev_extbuf_put)(uint16_t devid, void *buffs[], uint16_t nb_buffs)
int(* dao_virtio_blkdev_extbuf_get)(uint16_t devid, void *buffs[], uint16_t nb_buffs)
int dao_virtio_blkdev_queue_count_max(uint16_t pem_devid, uint16_t devid)
uint64_t dao_virtio_blkdev_feature_bits_get(uint16_t devid)
dao_virtio_blk_deq_ext_fn_t dao_virtio_blk_deq_ext_fns[]
static __rte_always_inline uint16_t dao_virtio_blk_process_compl_ext(uint16_t devid, uint16_t qid, void **vbufs, uint16_t nb_compl)
dao_virtio_blk_process_compl_fn_t dao_virtio_blk_process_compl_fns[]
static __rte_always_inline int dao_virtio_blk_io_desc_manage(uint16_t devid, uint16_t q_count)
int(* dao_virtio_blk_desc_manage_fn_t)(uint16_t devid, uint16_t qp_count)
dao_virtio_blk_desc_manage_ext_fn_t dao_blk_desc_manage_ext_fns[]
static __rte_always_inline uint16_t dao_virtio_blk_process_compl(uint16_t devid, uint16_t qid, void **mbufs, uint16_t nb_compl)
int(* dao_virtio_blkdev_status_cb_t)(uint16_t devid, uint8_t status)
int dao_virtio_blkdev_init(uint16_t devid, struct dao_virtio_blkdev_conf *conf)
uint16_t(* dao_virtio_blk_deq_ext_fn_t)(void *q, void **vbufs, uint16_t nb_bufs)
int dao_virtio_blkdev_fini(uint16_t devid)
int dao_virtio_blkdev_queue_count(uint16_t devid)
dao_virtio_blk_req_status_t
@ DAO_VIRTIO_BLK_REQ_IN_PROGRESS
@ DAO_VIRTIO_BLK_REQ_COMPLETE
int(* dao_virtio_blk_desc_manage_ext_fn_t)(uint16_t devid, uint16_t qp_count)
dao_virtio_blkdev_extbuf_put extbuf_put
dao_virtio_blkdev_status_cb_t status_cb
dao_virtio_blkdev_extbuf_get extbuf_get
struct rte_mempool * pool
void *qs[DAO_VIRTIO_MAX_QUEUES] __rte_cache_aligned