5#ifndef _DAO_LIB_WORKERS_H_ 
    6#define _DAO_LIB_WORKERS_H_ 
   58#include <rte_common.h> 
   59#include <rte_compat.h> 
   62#include <rte_bitops.h> 
   63#include <rte_atomic.h> 
   74#define DAO_WORKER_INVALID_INDEX UINT32_C(~0) 
   76#define DAO_WORKERS_LOG dao_dbg 
  110    size_t app_private_size;
 
  113    uint8_t app_private[];
 
 
  116typedef struct dao_workers_main {
 
  123    uint16_t num_workers;
 
  128    uint32_t control_core_index;
 
  131    size_t per_worker_sz;
 
  134    size_t workers_main_sz;
 
  137    int barrier_recursion_level;
 
  141    uint64_t parked_at_barrier;
 
  145    uint64_t barrier_count;
 
  149    dao_worker_t workers[];
 
  165static inline dao_worker_t *
 
  169    RTE_VERIFY(worker_index <= dwm->num_workers);
 
  171    return ((dao_worker_t *)((uint8_t *)dwm->workers + (worker_index * dwm->per_worker_sz)));
 
 
  190            *app_data = wrkr->app_private;
 
  193            *size = wrkr->app_private_size;
 
 
  213    return wrkr->core_index;
 
 
  229    return wrkr->worker_index;
 
 
  241    return wrkr->dpdk_numa_id;
 
 
  253    return worker->is_main;
 
 
  265static inline dao_worker_t *
 
  269        return (dwm->workers + dwm->control_core_index);
 
 
  286__dao_workers_num_workers_get(dao_workers_main_t *dwm)
 
  288    return dwm->num_workers;
 
  312    return dwm->num_cores;
 
 
  323static inline dao_worker_t *
 
  327    dao_worker_t *wrkr = NULL;
 
  328    unsigned int lcore_id;
 
  331    lcore_id = rte_lcore_id();
 
  333    for (i = 0; i < wm->num_cores; i++) {
 
  337        assert(wrkr->core_index == i);
 
  338        assert(wm == wrkr->dao_workers);
 
  340        if (wrkr->dpdk_lcore_id != lcore_id)
 
  343        if (LCORE_ID_ANY == lcore_id)
 
  348    if (i < wm->num_cores)
 
 
  378int dao_workers_init(uint64_t core_mask, uint32_t control_core_index, 
size_t per_core_app_data_sz);
 
  429    dao_workers_main_t *dwm = (dao_workers_main_t *)worker->dao_workers;
 
  431    if (unlikely(__atomic_load_n(&dwm->parked_at_barrier, __ATOMIC_ACQUIRE))) {
 
  432        DAO_WORKERS_LOG(
"Worker%d: going to barrier 0x%lx ",
 
  434                __atomic_load_n(&dwm->barrier_count, __ATOMIC_RELAXED));
 
  436        __atomic_add_fetch(&dwm->barrier_count, 1, __ATOMIC_RELEASE);
 
  439        while (__atomic_load_n(&dwm->parked_at_barrier, __ATOMIC_RELAXED))
 
  442        DAO_WORKERS_LOG(
"Worker: %d released from barrier",
 
 
static int dao_workers_core_index_get(dao_worker_t *wrkr)
 
static int dao_workers_numa_get(dao_worker_t *wrkr)
 
dao_workers_main_t * __dao_workers
 
static dao_workers_main_t * dao_workers_get(void)
 
static void dao_workers_barrier_check(dao_worker_t *worker)
 
static dao_worker_t * dao_workers_control_worker_get(dao_workers_main_t *dwm)
 
static int dao_workers_is_control_worker(dao_worker_t *worker)
 
static int dao_workers_app_data_get(dao_worker_t *wrkr, void **app_data, size_t *size)
 
static dao_worker_t * dao_workers_worker_get(dao_workers_main_t *dwm, uint8_t worker_index)
 
int dao_workers_init(uint64_t core_mask, uint32_t control_core_index, size_t per_core_app_data_sz)
 
static int dao_workers_num_cores_get(void)
 
int dao_workers_barrier_release(dao_worker_t *worker)
 
int dao_workers_fini(void)
 
#define DAO_WORKER_INVALID_INDEX
 
struct dao_worker __rte_cache_aligned
 
static int dao_workers_num_workers_get(void)
 
static dao_worker_t * dao_workers_self_worker_get(void)
 
int dao_workers_barrier_sync(dao_worker_t *worker)
 
static int dao_workers_worker_index_get(dao_worker_t *wrkr)
 
unsigned int dpdk_lcore_id