Data Accelerator Offload
Loading...
Searching...
No Matches
dao_virtio_cryptodev.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: Marvell-MIT
2 * Copyright (c) 2025 Marvell
3 */
4
11#ifndef __INCLUDE_DAO_VIRTIO_CRYPTO_H__
12#define __INCLUDE_DAO_VIRTIO_CRYPTO_H__
13
14#include <rte_common.h>
15#include <rte_crypto.h>
16#include <rte_crypto_asym.h>
17
18#include <dao_virtio.h>
19
20#define DAO_VIRTIO_CRYPTO_DEV_MAX 1
21/* TODO - should be 64 */
22#define DAO_VIRTIO_CRYPTO_QP_MAX 128
23
24#define DAO_VIRTIO_INVALID_ID 0xFFFF
25
26#define DAO_VIRTIO_CRYPTO_RX_BUF_CACHE_SZ 128
27#define DAO_VIRTIO_CRYPTO_TX_BUF_CACHE_SZ 512
28
29#define DAO_VIRTIO_CRYPTO_MAX_CHAIN_READ_DESC 4
30#define DAO_VIRTIO_CRYPTO_MAX_CHAIN_WRITE_DESC 4
31
35 uint16_t pem_devid;
37 uint16_t dma_vchan;
39 struct rte_mempool *pool;
41 uint16_t cdev_id;
42};
43
44/* End of structure dao_virtio_cryptodev_conf. */
45
51 uint16_t deq_fn_id;
53 uint16_t enq_fn_id;
55 uint16_t mgmt_fn_id;
57 uint16_t cdev_id;
59 uint8_t cdev_qp_id_map[DAO_VIRTIO_CRYPTO_QP_MAX];
60#define DAO_VIRTIO_CRYPTODEV_MEM_SZ 8192
61 uint8_t reserved[DAO_VIRTIO_CRYPTODEV_MEM_SZ];
62};
63
67 struct {
68 union {
70 struct {
72 uint16_t id;
74 uint16_t qp_id;
77 struct {
79 uint16_t dev_id;
81 uint16_t q_id;
83 };
84 /* Count of packets from same queue */
85 uint16_t cnt;
87
88 uint32_t output_len;
89 rte_iova_t output_addr;
90 struct rte_crypto_op cop;
91 struct rte_crypto_asym_op asym;
92 uint8_t reserved[];
93};
94
97 uint16_t virtio_dev_id;
98 uint16_t virtio_queue_id;
99};
100
103
104/* Fast path data */
106typedef uint16_t (*dao_virtio_crypto_deq_fn_t)(void *q, struct rte_crypto_op **cops,
107 uint16_t nb_cops);
109typedef uint16_t (*dao_virtio_crypto_enq_fn_t)(void *q, struct rte_crypto_op **cops,
110 uint16_t nb_cops);
112typedef int (*dao_crypto_desc_manage_fn_t)(uint16_t devid, uint16_t qp_count);
113
120
122typedef int (*dao_virtio_cryptodev_status_cb_t)(uint16_t devid, uint8_t status);
123
125typedef uint64_t (*dao_virtio_cryptodev_sym_sess_create_cb_t)(uint16_t dev_id,
126 struct rte_crypto_sym_xform *x);
128typedef uint64_t (*dao_virtio_cryptodev_asym_sess_create_cb_t)(uint16_t dev_id,
129 struct rte_crypto_asym_xform *x);
131typedef void (*dao_virtio_cryptodev_session_destroy_cb_t)(uint16_t dev_id, uint64_t session_id);
132
146
147/* End of structure dao_virtio_cryptodev_cbs. */
148
159int dao_virtio_cryptodev_init(uint16_t devid, struct dao_virtio_cryptodev_conf *conf);
160
170int dao_virtio_cryptodev_fini(uint16_t devid);
171
179
184
194
204
209
222int dao_virtio_cryptodev_cdev_add(uint16_t dev_id, uint16_t qp_count,
223 struct rte_mempool *mempool[]);
224
234
245int dao_virtio_cryptodev_cdev_queue_assign(uint16_t virt_dev_id, uint16_t virt_queue_id);
246
257int dao_virtio_cryptodev_cdev_queue_release(uint16_t virt_dev_id, uint16_t virt_queue_id);
258
275int dao_virtio_cryptodev_cdev_map_queue_get(uint16_t virt_dev_id, uint16_t virt_queue_id,
276 uint16_t *cdev_id, uint16_t *cdev_qp_id,
277 struct rte_mempool **mempool);
278
293int dao_virtio_cryptodev_virt_dev_map_queue_get(uint16_t cdev_id, uint16_t cdev_qp_id,
294 uint16_t *virt_dev_id, uint16_t *virt_queue_id);
305const struct dao_virtio_cryptodev_vdev_q *
307
321static __rte_always_inline int
322dao_virtio_crypto_desc_manage(uint16_t devid, uint16_t qp_count)
323{
324 struct dao_virtio_cryptodev *cryptodev = &dao_virtio_cryptodevs[devid];
326
327 mgmt_fn = dao_crypto_desc_manage_fns[cryptodev->mgmt_fn_id];
328
329 return (*mgmt_fn)(devid, qp_count);
330}
331
346static __rte_always_inline uint16_t
347dao_virtio_crypto_host_rx(uint16_t devid, uint16_t qid, struct rte_crypto_op **cops,
348 uint16_t nb_cops)
349{
350 struct dao_virtio_cryptodev *cryptodev = &dao_virtio_cryptodevs[devid];
352 void *q = cryptodev->qs[qid];
353
354 if (unlikely(!q))
355 return 0;
356
357 deq_fn = dao_virtio_crypto_deq_fns[cryptodev->deq_fn_id];
358
359 return (*deq_fn)(q, cops, nb_cops);
360}
361
376static __rte_always_inline uint16_t
377dao_virtio_crypto_host_tx(uint16_t devid, uint16_t qid, struct rte_crypto_op **cops,
378 uint16_t nb_cops)
379{
380 struct dao_virtio_cryptodev *cryptodev = &dao_virtio_cryptodevs[devid];
382 void *q = cryptodev->qs[qid];
383
384 if (unlikely(q == NULL))
385 return 0;
386
387 enq_fn = dao_virtio_crypto_enq_fns[cryptodev->enq_fn_id];
388
389 return (*enq_fn)(q, cops, nb_cops);
390}
391
402static __rte_always_inline uint16_t
403dao_virtio_cdev_id_get(uint16_t virt_dev_id)
404{
405 struct dao_virtio_cryptodev *cryptodev = &dao_virtio_cryptodevs[virt_dev_id];
406
407 return cryptodev->cdev_id;
408}
409
422static __rte_always_inline uint16_t
423dao_virtio_cdev_qp_id_get(uint16_t virt_dev_id, uint16_t virt_q_id)
424{
425 struct dao_virtio_cryptodev *cryptodev = &dao_virtio_cryptodevs[virt_dev_id];
426
427 return cryptodev->cdev_qp_id_map[virt_q_id];
428}
429
439void dao_virtio_crypto_tx_desc_dma_completion(uint16_t devid, uint16_t qid);
440
441#endif /* __INCLUDE_DAO_VIRTIO_CRYPTO_H__ */
#define DAO_VIRTIO_MAX_QUEUES
Definition dao_virtio.h:21
uint64_t(* dao_virtio_cryptodev_sym_sess_create_cb_t)(uint16_t dev_id, struct rte_crypto_sym_xform *x)
int dao_virtio_cryptodev_virt_dev_map_queue_get(uint16_t cdev_id, uint16_t cdev_qp_id, uint16_t *virt_dev_id, uint16_t *virt_queue_id)
void dao_virtio_cryptodev_common_cfg_init(void)
void dao_virtio_cryptodev_cb_register(struct dao_virtio_cryptodev_cbs *cbs)
int(* dao_virtio_cryptodev_status_cb_t)(uint16_t devid, uint8_t status)
void dao_virtio_crypto_tx_desc_dma_completion(uint16_t devid, uint16_t qid)
uint16_t dao_virtio_cryptodev_data_queue_cnt_get(uint16_t dev_id)
int dao_virtio_cryptodev_cdev_queue_release(uint16_t virt_dev_id, uint16_t virt_queue_id)
const struct dao_virtio_cryptodev_vdev_q * dao_virtio_cryptodev_cdev_map_all_queues_get(uint16_t cdev_id)
static __rte_always_inline uint16_t dao_virtio_crypto_host_rx(uint16_t devid, uint16_t qid, struct rte_crypto_op **cops, uint16_t nb_cops)
uint16_t dao_virtio_cryptodev_max_dataqueue_cnt_get(uint16_t dev_id)
int dao_virtio_cryptodev_cdev_remove(uint16_t dev_id)
dao_virtio_crypto_enq_fn_t dao_virtio_crypto_enq_fns[]
static __rte_always_inline uint16_t dao_virtio_cdev_qp_id_get(uint16_t virt_dev_id, uint16_t virt_q_id)
void(* dao_virtio_cryptodev_session_destroy_cb_t)(uint16_t dev_id, uint64_t session_id)
int(* dao_crypto_desc_manage_fn_t)(uint16_t devid, uint16_t qp_count)
int dao_virtio_cryptodev_cdev_queue_assign(uint16_t virt_dev_id, uint16_t virt_queue_id)
int dao_virtio_cryptodev_init(uint16_t devid, struct dao_virtio_cryptodev_conf *conf)
uint16_t(* dao_virtio_crypto_enq_fn_t)(void *q, struct rte_crypto_op **cops, uint16_t nb_cops)
uint16_t(* dao_virtio_crypto_deq_fn_t)(void *q, struct rte_crypto_op **cops, uint16_t nb_cops)
dao_virtio_crypto_deq_fn_t dao_virtio_crypto_deq_fns[]
int dao_virtio_cryptodev_cdev_add(uint16_t dev_id, uint16_t qp_count, struct rte_mempool *mempool[])
int dao_virtio_cryptodev_cdev_map_queue_get(uint16_t virt_dev_id, uint16_t virt_queue_id, uint16_t *cdev_id, uint16_t *cdev_qp_id, struct rte_mempool **mempool)
dao_crypto_desc_manage_fn_t dao_crypto_desc_manage_fns[]
static __rte_always_inline uint16_t dao_virtio_cdev_id_get(uint16_t virt_dev_id)
struct dao_virtio_cryptodev dao_virtio_cryptodevs[]
static __rte_always_inline uint16_t dao_virtio_crypto_host_tx(uint16_t devid, uint16_t qid, struct rte_crypto_op **cops, uint16_t nb_cops)
static __rte_always_inline int dao_virtio_crypto_desc_manage(uint16_t devid, uint16_t qp_count)
void dao_virtio_cryptodev_cb_unregister(void)
int dao_virtio_cryptodev_fini(uint16_t devid)
uint64_t(* dao_virtio_cryptodev_asym_sess_create_cb_t)(uint16_t dev_id, struct rte_crypto_asym_xform *x)
struct dao_virtio_crypto_buffer::@17 metadata
struct dao_virtio_crypto_buffer::@17::@18::@20 cdev
struct dao_virtio_crypto_buffer::@17::@18::@21 virt
dao_virtio_cryptodev_session_destroy_cb_t sym_sess_destroy_cb
dao_virtio_cryptodev_status_cb_t status_cb
dao_virtio_cryptodev_session_destroy_cb_t asym_sess_destroy_cb
dao_virtio_cryptodev_sym_sess_create_cb_t sym_sess_create_cb
dao_virtio_cryptodev_asym_sess_create_cb_t asym_sess_create_cb
uint8_t cdev_qp_id_map[DAO_VIRTIO_CRYPTO_QP_MAX]
void *qs[DAO_VIRTIO_MAX_QUEUES] __rte_cache_aligned