diff --git a/app/src/app_base.h b/app/src/app_base.h index be8e0b08d..e3973c647 100644 --- a/app/src/app_base.h +++ b/app/src/app_base.h @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -41,6 +42,10 @@ #define ST_APP_MAX_TX_ANC_SESSIONS (180) #define ST_APP_MAX_RX_ANC_SESSIONS (180) +/* FMD = Fast Metadata (ST2110-41) */ +#define ST_APP_MAX_TX_FMD_SESSIONS (180) +#define ST_APP_MAX_RX_FMD_SESSIONS (180) + #define ST_APP_MAX_LCORES (32) #define ST_APP_DEFAULT_FB_CNT (3) @@ -226,6 +231,37 @@ struct st_app_tx_anc_session { uint32_t st40_seq_id; }; +struct st_app_tx_fmd_session { + int idx; + st41_tx_handle handle; + + uint16_t framebuff_cnt; + + uint16_t framebuff_producer_idx; + uint16_t framebuff_consumer_idx; + struct st_tx_frame* framebuffs; + + uint32_t st41_frame_done_cnt; + uint32_t st41_packet_done_cnt; + + char st41_source_url[ST_APP_URL_MAX_LEN + 1]; + int st41_source_fd; + pcap_t* st41_pcap; + bool st41_pcap_input; + uint32_t st41_dit; + uint32_t st41_k_bit; + bool st41_rtp_input; + uint8_t* st41_source_begin; + uint8_t* st41_source_end; + uint8_t* st41_frame_cursor; /* cursor to current frame */ + pthread_t st41_app_thread; + bool st41_app_thread_stop; + pthread_cond_t st41_wake_cond; + pthread_mutex_t st41_wake_mutex; + uint32_t st41_rtp_tmstamp; + uint32_t st41_seq_id; +}; + struct st_app_rx_video_session { int idx; mtl_handle st; @@ -327,6 +363,19 @@ struct st_app_rx_anc_session { uint64_t stat_frame_first_rx_time; }; +struct st_app_rx_fmd_session { + int idx; + st41_rx_handle handle; + pthread_t st41_app_thread; + pthread_cond_t st41_wake_cond; + pthread_mutex_t st41_wake_mutex; + bool st41_app_thread_stop; + + /* stat */ + int stat_frame_total_received; + uint64_t stat_frame_first_rx_time; +}; + struct st22_app_tx_session { int idx; st22_tx_handle handle; @@ -610,6 +659,12 @@ struct st_app_context { int tx_anc_rtp_ring_size; /* the ring size for tx anc rtp type */ bool tx_anc_dedicate_queue; + struct st_app_tx_fmd_session* tx_fmd_sessions; + char tx_fmd_url[ST_APP_URL_MAX_LEN]; + int tx_fmd_session_cnt; + int tx_fmd_rtp_ring_size; /* the ring size for tx fmd rtp type */ + bool tx_fmd_dedicate_queue; + char tx_st22p_url[ST_APP_URL_MAX_LEN]; /* send st22p content url*/ struct st_app_tx_st22p_session* tx_st22p_sessions; int tx_st22p_session_cnt; @@ -640,6 +695,9 @@ struct st_app_context { struct st_app_rx_anc_session* rx_anc_sessions; int rx_anc_session_cnt; + struct st_app_rx_fmd_session* rx_fmd_sessions; + int rx_fmd_session_cnt; + struct st_app_rx_st22p_session* rx_st22p_sessions; int rx_st22p_session_cnt; diff --git a/app/src/args.c b/app/src/args.c index d89eaf387..cd5eef182 100644 --- a/app/src/args.c +++ b/app/src/args.c @@ -37,6 +37,9 @@ enum st_args_cmd { ST_ARG_TX_ANC_URL, ST_ARG_TX_ANC_SESSIONS_CNT, ST_ARG_TX_ANC_RTP_RING_SIZE, + ST_ARG_TX_FMD_URL, + ST_ARG_TX_FMD_SESSIONS_CNT, + ST_ARG_TX_FMD_RTP_RING_SIZE, ST22_ARG_TX_SESSIONS_CNT, ST22_ARG_TX_URL, ST_ARG_RX_VIDEO_SESSIONS_CNT, @@ -48,6 +51,7 @@ enum st_args_cmd { ST_ARG_RX_AUDIO_RTP_RING_SIZE, ST_ARG_RX_AUDIO_DUMP_TIME_S, ST_ARG_RX_ANC_SESSIONS_CNT, + ST_ARG_RX_FMD_SESSIONS_CNT, ST22_ARG_RX_SESSIONS_CNT, ST_ARG_HDR_SPLIT, ST_ARG_PACING_WAY, @@ -134,6 +138,7 @@ enum st_args_cmd { ST_ARG_AUDIO_RL_OFFSET_US, ST_ARG_AUDIO_FIFO_SIZE, ST_ARG_ANC_DEDICATE_QUEUE, + ST_ARG_FMD_DEDICATE_QUEUE, ST_ARG_TX_NO_BURST_CHECK, ST_ARG_DHCP, ST_ARG_IOVA_MODE, @@ -183,6 +188,9 @@ static struct option st_app_args_options[] = { {"tx_anc_url", required_argument, 0, ST_ARG_TX_ANC_URL}, {"tx_anc_sessions_count", required_argument, 0, ST_ARG_TX_ANC_SESSIONS_CNT}, {"tx_anc_rtp_ring_size", required_argument, 0, ST_ARG_TX_ANC_RTP_RING_SIZE}, + {"tx_fmd_url", required_argument, 0, ST_ARG_TX_FMD_URL}, + {"tx_fmd_sessions_count", required_argument, 0, ST_ARG_TX_FMD_SESSIONS_CNT}, + {"tx_fmd_rtp_ring_size", required_argument, 0, ST_ARG_TX_FMD_RTP_RING_SIZE}, {"tx_st22_sessions_count", required_argument, 0, ST22_ARG_TX_SESSIONS_CNT}, {"tx_st22_url", required_argument, 0, ST22_ARG_TX_URL}, @@ -195,6 +203,7 @@ static struct option st_app_args_options[] = { {"rx_audio_rtp_ring_size", required_argument, 0, ST_ARG_RX_AUDIO_RTP_RING_SIZE}, {"rx_audio_dump_time_s", required_argument, 0, ST_ARG_RX_AUDIO_DUMP_TIME_S}, {"rx_anc_sessions_count", required_argument, 0, ST_ARG_RX_ANC_SESSIONS_CNT}, + {"rx_fmd_sessions_count", required_argument, 0, ST_ARG_RX_FMD_SESSIONS_CNT}, {"rx_st22_sessions_count", required_argument, 0, ST22_ARG_RX_SESSIONS_CNT}, {"hdr_split", no_argument, 0, ST_ARG_HDR_SPLIT}, {"pacing_way", required_argument, 0, ST_ARG_PACING_WAY}, @@ -277,6 +286,7 @@ static struct option st_app_args_options[] = { {"audio_rl_offset", required_argument, 0, ST_ARG_AUDIO_RL_OFFSET_US}, {"audio_fifo_size", required_argument, 0, ST_ARG_AUDIO_FIFO_SIZE}, {"anc_dedicate_queue", no_argument, 0, ST_ARG_ANC_DEDICATE_QUEUE}, + {"fmd_dedicate_queue", no_argument, 0, ST_ARG_FMD_DEDICATE_QUEUE}, {"tx_no_burst_check", no_argument, 0, ST_ARG_TX_NO_BURST_CHECK}, {"dhcp", no_argument, 0, ST_ARG_DHCP}, {"iova_mode", required_argument, 0, ST_ARG_IOVA_MODE}, @@ -351,12 +361,14 @@ static int app_args_json(struct st_app_context* ctx, struct mtl_init_params* p, ctx->tx_video_session_cnt = ctx->json_ctx->tx_video_session_cnt; ctx->tx_audio_session_cnt = ctx->json_ctx->tx_audio_session_cnt; ctx->tx_anc_session_cnt = ctx->json_ctx->tx_anc_session_cnt; + ctx->tx_fmd_session_cnt = ctx->json_ctx->tx_fmd_session_cnt; ctx->tx_st22p_session_cnt = ctx->json_ctx->tx_st22p_session_cnt; ctx->tx_st20p_session_cnt = ctx->json_ctx->tx_st20p_session_cnt; ctx->tx_st30p_session_cnt = ctx->json_ctx->tx_st30p_session_cnt; ctx->rx_video_session_cnt = ctx->json_ctx->rx_video_session_cnt; ctx->rx_audio_session_cnt = ctx->json_ctx->rx_audio_session_cnt; ctx->rx_anc_session_cnt = ctx->json_ctx->rx_anc_session_cnt; + ctx->rx_fmd_session_cnt = ctx->json_ctx->rx_fmd_session_cnt; ctx->rx_st22p_session_cnt = ctx->json_ctx->rx_st22p_session_cnt; ctx->rx_st20p_session_cnt = ctx->json_ctx->rx_st20p_session_cnt; ctx->rx_st30p_session_cnt = ctx->json_ctx->rx_st30p_session_cnt; @@ -513,6 +525,15 @@ int st_app_parse_args(struct st_app_context* ctx, struct mtl_init_params* p, int case ST_ARG_TX_ANC_SESSIONS_CNT: ctx->tx_anc_session_cnt = atoi(optarg); break; + case ST_ARG_TX_FMD_URL: + snprintf(ctx->tx_fmd_url, sizeof(ctx->tx_fmd_url), "%s", optarg); + break; + case ST_ARG_TX_FMD_RTP_RING_SIZE: + ctx->tx_fmd_rtp_ring_size = atoi(optarg); + break; + case ST_ARG_TX_FMD_SESSIONS_CNT: + ctx->tx_fmd_session_cnt = atoi(optarg); + break; case ST_ARG_RX_VIDEO_SESSIONS_CNT: ctx->rx_video_session_cnt = atoi(optarg); break; @@ -540,6 +561,9 @@ int st_app_parse_args(struct st_app_context* ctx, struct mtl_init_params* p, int case ST_ARG_RX_ANC_SESSIONS_CNT: ctx->rx_anc_session_cnt = atoi(optarg); break; + case ST_ARG_RX_FMD_SESSIONS_CNT: + ctx->rx_fmd_session_cnt = atoi(optarg); + break; case ST22_ARG_TX_SESSIONS_CNT: ctx->tx_st22_session_cnt = atoi(optarg); break; @@ -856,6 +880,9 @@ int st_app_parse_args(struct st_app_context* ctx, struct mtl_init_params* p, int case ST_ARG_ANC_DEDICATE_QUEUE: ctx->tx_anc_dedicate_queue = true; break; + case ST_ARG_FMD_DEDICATE_QUEUE: + ctx->tx_fmd_dedicate_queue = true; + break; case ST_ARG_DHCP: for (int port = 0; port < MTL_PORT_MAX; ++port) p->net_proto[port] = MTL_PROTO_DHCP; diff --git a/app/src/fmt.h b/app/src/fmt.h index 057dafb87..35f6b2fc5 100644 --- a/app/src/fmt.h +++ b/app/src/fmt.h @@ -13,6 +13,7 @@ #define ST_APP_PAYLOAD_TYPE_AUDIO (111) #define ST_APP_PAYLOAD_TYPE_ANCILLARY (113) #define ST_APP_PAYLOAD_TYPE_ST22 (114) +#define ST_APP_PAYLOAD_TYPE_FASTMETADATA (115) #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0])) diff --git a/app/src/meson.build b/app/src/meson.build index c8748de30..b55c3c145 100644 --- a/app/src/meson.build +++ b/app/src/meson.build @@ -5,6 +5,7 @@ sources = files('rxtx_app.c', 'args.c', 'parse_json.c', 'fmt.c', 'tx_st20p_app.c', 'rx_st20p_app.c', 'tx_st22p_app.c', 'rx_st22p_app.c', 'tx_ancillary_app.c', 'rx_ancillary_app.c', + 'tx_fastmetadata_app.c', 'rx_fastmetadata_app.c', 'tx_st30p_app.c', 'rx_st30p_app.c', ) diff --git a/app/src/parse_json.c b/app/src/parse_json.c index 44dc0e4a8..e9f3708bc 100644 --- a/app/src/parse_json.c +++ b/app/src/parse_json.c @@ -1219,6 +1219,134 @@ static int st_json_parse_rx_anc(int idx, json_object* anc_obj, return ST_JSON_SUCCESS; } +static int st_json_parse_tx_fmd(int idx, json_object* fmd_obj, + st_json_fastmetadata_session_t* fmd) { + if (fmd_obj == NULL || fmd == NULL) { + err("%s, can not parse tx fmd session\n", __func__); + return -ST_JSON_NULL; + } + int ret; + + /* parse udp port */ + ret = parse_base_udp_port(fmd_obj, &fmd->base, idx); + if (ret < 0) return ret; + + /* parse payload type */ + ret = parse_base_payload_type(fmd_obj, &fmd->base); + if (ret < 0) { + err("%s, use default pt %u\n", __func__, ST_APP_PAYLOAD_TYPE_FASTMETADATA); + fmd->base.payload_type = ST_APP_PAYLOAD_TYPE_FASTMETADATA; + } + + /* parse fmd type */ + const char* type = json_object_get_string(st_json_object_object_get(fmd_obj, "type")); + REQUIRED_ITEM(type); + if (strcmp(type, "frame") == 0) { + fmd->info.type = ST41_TYPE_FRAME_LEVEL; + } else if (strcmp(type, "rtp") == 0) { + fmd->info.type = ST41_TYPE_RTP_LEVEL; + } else { + err("%s, invalid fmd type %s\n", __func__, type); + return -ST_JSON_NOT_VALID; + } + + /* parse fmd data item type */ + json_object* fmd_dit_obj = + st_json_object_object_get(fmd_obj, "fastmetadata_data_item_type"); + if (fmd_dit_obj) { + uint32_t fmd_dit = json_object_get_int(fmd_dit_obj); + if (fmd_dit < 0 || fmd_dit > 0x3fffff) { + err("%s, invalid fastmetadata_data_item_type 0x%x\n", __func__, fmd_dit); + return -ST_JSON_NOT_VALID; + } + fmd->info.fmd_dit = fmd_dit; + info("%s, fastmetadata_data_item_type = 0x%x\n", __func__, fmd_dit); + } else { + err("%s, No fastmetadata_data_item_type !\n", __func__); + return -ST_JSON_NULL; + } + + /* parse fmd data item K-bit */ + json_object* fmd_k_bit_obj = st_json_object_object_get(fmd_obj, "fastmetadata_k_bit"); + if (fmd_k_bit_obj) { + uint8_t fmd_k_bit = json_object_get_int(fmd_k_bit_obj); + if (fmd_k_bit < 0 || fmd_k_bit > 1) { + err("%s, invalid fastmetadata_k_bit 0x%x\n", __func__, fmd_k_bit); + return -ST_JSON_NOT_VALID; + } + fmd->info.fmd_k_bit = fmd_k_bit; + info("%s, fastmetadata_k_bit = 0x%x\n", __func__, fmd_k_bit); + } else { + err("%s, No fastmetadata_k_bit !\n", __func__); + return -ST_JSON_NULL; + } + + /* parse fmd fps */ + const char* fmd_fps = + json_object_get_string(st_json_object_object_get(fmd_obj, "fastmetadata_fps")); + REQUIRED_ITEM(fmd_fps); + if (strcmp(fmd_fps, "p59") == 0) { + fmd->info.fmd_fps = ST_FPS_P59_94; + } else if (strcmp(fmd_fps, "p50") == 0) { + fmd->info.fmd_fps = ST_FPS_P50; + } else if (strcmp(fmd_fps, "p25") == 0) { + fmd->info.fmd_fps = ST_FPS_P25; + } else if (strcmp(fmd_fps, "p29") == 0) { + fmd->info.fmd_fps = ST_FPS_P29_97; + } else { + err("%s, invalid fmd fps %s\n", __func__, fmd_fps); + return -ST_JSON_NOT_VALID; + } + + /* parse fmd interlaced */ + json_object* fmd_interlaced = st_json_object_object_get(fmd_obj, "interlaced"); + if (fmd_interlaced) { + fmd->info.interlaced = json_object_get_boolean(fmd_interlaced); + } + + /* parse fmd url */ + ret = parse_url(fmd_obj, "fastmetadata_url", fmd->info.fmd_url); + if (ret < 0) return ret; + + /* parse enable rtcp */ + fmd->enable_rtcp = + json_object_get_boolean(st_json_object_object_get(fmd_obj, "enable_rtcp")); + + return ST_JSON_SUCCESS; +} + +static int st_json_parse_rx_fmd(int idx, json_object* fmd_obj, + st_json_fastmetadata_session_t* fmd) { + if (fmd_obj == NULL || fmd == NULL) { + err("%s, can not parse rx fmd session\n", __func__); + return -ST_JSON_NULL; + } + int ret; + + /* parse udp port */ + ret = parse_base_udp_port(fmd_obj, &fmd->base, idx); + if (ret < 0) return ret; + + /* parse payload type */ + ret = parse_base_payload_type(fmd_obj, &fmd->base); + if (ret < 0) { + err("%s, use default pt %u\n", __func__, ST_APP_PAYLOAD_TYPE_FASTMETADATA); + fmd->base.payload_type = ST_APP_PAYLOAD_TYPE_FASTMETADATA; + } + + /* parse fmd interlaced */ + json_object* fmd_interlaced = st_json_object_object_get(fmd_obj, "interlaced"); + if (fmd_interlaced) { + fmd->info.interlaced = json_object_get_boolean(fmd_interlaced); + } + + /* parse enable rtcp */ + fmd->enable_rtcp = + json_object_get_boolean(st_json_object_object_get(fmd_obj, "enable_rtcp")); + + return ST_JSON_SUCCESS; +} + static int parse_st22p_width(json_object* st22p_obj, st_json_st22p_session_t* st22p) { int width = json_object_get_int(st_json_object_object_get(st22p_obj, "width")); if (width <= 0) { @@ -1265,7 +1393,7 @@ static int parse_st22p_fps(json_object* st22p_obj, st_json_st22p_session_t* st22 } else if (strcmp(fps, "p23") == 0) { st22p->info.fps = ST_FPS_P23_98; } else { - err("%s, invalid anc fps %s\n", __func__, fps); + err("%s, invalid st22 fps %s\n", __func__, fps); return -ST_JSON_NOT_VALID; } return ST_JSON_SUCCESS; @@ -1611,7 +1739,7 @@ static int parse_st20p_fps(json_object* st20p_obj, st_json_st20p_session_t* st20 } else if (strcmp(fps, "p23") == 0) { st20p->info.fps = ST_FPS_P23_98; } else { - err("%s, invalid anc fps %s\n", __func__, fps); + err("%s, invalid st20 fps %s\n", __func__, fps); return -ST_JSON_NOT_VALID; } return ST_JSON_SUCCESS; @@ -2001,6 +2129,10 @@ void st_app_free_json(st_json_context_t* ctx) { st_app_free(ctx->tx_anc_sessions); ctx->tx_anc_sessions = NULL; } + if (ctx->tx_fmd_sessions) { + st_app_free(ctx->tx_fmd_sessions); + ctx->tx_fmd_sessions = NULL; + } if (ctx->tx_st22p_sessions) { st_app_free(ctx->tx_st22p_sessions); ctx->tx_st22p_sessions = NULL; @@ -2025,6 +2157,10 @@ void st_app_free_json(st_json_context_t* ctx) { st_app_free(ctx->rx_anc_sessions); ctx->rx_anc_sessions = NULL; } + if (ctx->rx_fmd_sessions) { + st_app_free(ctx->rx_fmd_sessions); + ctx->rx_fmd_sessions = NULL; + } if (ctx->rx_st22p_sessions) { st_app_free(ctx->rx_st22p_sessions); ctx->rx_st22p_sessions = NULL; @@ -2176,6 +2312,10 @@ int st_app_parse_json(st_json_context_t* ctx, const char* filename) { num = parse_session_num(tx_group, "ancillary"); if (num < 0) goto error; ctx->tx_anc_session_cnt += num; + /* parse tx fastmetadata sessions */ + num = parse_session_num(tx_group, "fastmetadata"); + if (num < 0) goto error; + ctx->tx_fmd_session_cnt += num; /* parse tx st22p sessions */ num = parse_session_num(tx_group, "st22p"); if (num < 0) goto error; @@ -2212,6 +2352,13 @@ int st_app_parse_json(st_json_context_t* ctx, const char* filename) { ret = -ST_JSON_NULL; goto error; } + ctx->tx_fmd_sessions = (st_json_fastmetadata_session_t*)st_app_zmalloc( + ctx->tx_fmd_session_cnt * sizeof(st_json_fastmetadata_session_t)); + if (!ctx->tx_fmd_sessions) { + err("%s, failed to allocate tx_fmd_sessions\n", __func__); + ret = -ST_JSON_NULL; + goto error; + } ctx->tx_st22p_sessions = (st_json_st22p_session_t*)st_app_zmalloc( ctx->tx_st22p_session_cnt * sizeof(st_json_st22p_session_t)); if (!ctx->tx_st22p_sessions) { @@ -2238,6 +2385,7 @@ int st_app_parse_json(st_json_context_t* ctx, const char* filename) { int num_video = 0; int num_audio = 0; int num_anc = 0; + int num_fmd = 0; int num_st22p = 0; int num_st20p = 0; int num_st30p = 0; @@ -2401,6 +2549,37 @@ int st_app_parse_json(st_json_context_t* ctx, const char* filename) { } } + /* parse tx fastmetadata sessions */ + json_object* fmd_array = st_json_object_object_get(tx_group, "fastmetadata"); + if (fmd_array != NULL && json_object_get_type(fmd_array) == json_type_array) { + for (int j = 0; j < json_object_array_length(fmd_array); ++j) { + json_object* fmd_session = json_object_array_get_idx(fmd_array, j); + int replicas = + json_object_get_int(st_json_object_object_get(fmd_session, "replicas")); + if (replicas < 0) { + err("%s, invalid replicas number: %d\n", __func__, replicas); + ret = -ST_JSON_NOT_VALID; + goto error; + } + for (int k = 0; k < replicas; ++k) { + parse_session_ip(json_object_get_string(dip_p), + &ctx->tx_fmd_sessions[num_fmd].base, MTL_SESSION_PORT_P); + ctx->tx_fmd_sessions[num_fmd].base.inf[0] = &ctx->interfaces[inf_p]; + ctx->interfaces[inf_p].tx_fmd_sessions_cnt++; + if (num_inf == 2) { + parse_session_ip(json_object_get_string(dip_r), + &ctx->tx_fmd_sessions[num_fmd].base, MTL_SESSION_PORT_R); + ctx->tx_fmd_sessions[num_fmd].base.inf[1] = &ctx->interfaces[inf_r]; + ctx->interfaces[inf_r].tx_fmd_sessions_cnt++; + } + ctx->tx_fmd_sessions[num_fmd].base.num_inf = num_inf; + ret = st_json_parse_tx_fmd(k, fmd_session, &ctx->tx_fmd_sessions[num_fmd]); + if (ret) goto error; + num_fmd++; + } + } + } + /* parse tx st22p sessions */ json_object* st22p_array = st_json_object_object_get(tx_group, "st22p"); if (st22p_array != NULL && json_object_get_type(st22p_array) == json_type_array) { @@ -2528,6 +2707,10 @@ int st_app_parse_json(st_json_context_t* ctx, const char* filename) { num = parse_session_num(rx_group, "ancillary"); if (num < 0) goto error; ctx->rx_anc_session_cnt += num; + /* parse rx fastmetadata sessions */ + num = parse_session_num(rx_group, "fastmetadata"); + if (num < 0) goto error; + ctx->rx_fmd_session_cnt += num; /* parse rx st22p sessions */ num = parse_session_num(rx_group, "st22p"); if (num < 0) goto error; @@ -2568,6 +2751,13 @@ int st_app_parse_json(st_json_context_t* ctx, const char* filename) { ret = -ST_JSON_NULL; goto error; } + ctx->rx_fmd_sessions = (st_json_fastmetadata_session_t*)st_app_zmalloc( + ctx->rx_fmd_session_cnt * sizeof(st_json_fastmetadata_session_t)); + if (!ctx->rx_fmd_sessions) { + err("%s, failed to allocate rx_fmd_sessions\n", __func__); + ret = -ST_JSON_NULL; + goto error; + } ctx->rx_st22p_sessions = (st_json_st22p_session_t*)st_app_zmalloc( ctx->rx_st22p_session_cnt * sizeof(st_json_st22p_session_t)); if (!ctx->rx_st22p_sessions) { @@ -2601,6 +2791,7 @@ int st_app_parse_json(st_json_context_t* ctx, const char* filename) { int num_video = 0; int num_audio = 0; int num_anc = 0; + int num_fmd = 0; int num_st22p = 0; int num_st20p = 0; int num_st20r = 0; @@ -2805,6 +2996,44 @@ int st_app_parse_json(st_json_context_t* ctx, const char* filename) { } } + /* parse rx fastmetadata sessions */ + json_object* fmd_array = st_json_object_object_get(rx_group, "fastmetadata"); + if (fmd_array != NULL && json_object_get_type(fmd_array) == json_type_array) { + for (int j = 0; j < json_object_array_length(fmd_array); ++j) { + json_object* fmd_session = json_object_array_get_idx(fmd_array, j); + int replicas = + json_object_get_int(st_json_object_object_get(fmd_session, "replicas")); + if (replicas < 0) { + err("%s, invalid replicas number: %d\n", __func__, replicas); + ret = -ST_JSON_NOT_VALID; + goto error; + } + for (int k = 0; k < replicas; ++k) { + parse_session_ip(json_object_get_string(ip_p), + &ctx->rx_fmd_sessions[num_fmd].base, MTL_SESSION_PORT_P); + if (mcast_src_ip_p) + parse_mcast_src_ip(json_object_get_string(mcast_src_ip_p), + &ctx->rx_fmd_sessions[num_fmd].base, MTL_SESSION_PORT_P); + ctx->rx_fmd_sessions[num_fmd].base.inf[0] = &ctx->interfaces[inf_p]; + ctx->interfaces[inf_p].rx_fmd_sessions_cnt++; + if (num_inf == 2) { + parse_session_ip(json_object_get_string(ip_r), + &ctx->rx_fmd_sessions[num_fmd].base, MTL_SESSION_PORT_R); + if (mcast_src_ip_r) + parse_mcast_src_ip(json_object_get_string(mcast_src_ip_r), + &ctx->rx_fmd_sessions[num_fmd].base, + MTL_SESSION_PORT_R); + ctx->rx_fmd_sessions[num_fmd].base.inf[1] = &ctx->interfaces[inf_r]; + ctx->interfaces[inf_r].rx_fmd_sessions_cnt++; + } + ctx->rx_fmd_sessions[num_fmd].base.num_inf = num_inf; + ret = st_json_parse_rx_fmd(k, fmd_session, &ctx->rx_fmd_sessions[num_fmd]); + if (ret) goto error; + num_fmd++; + } + } + } + /* parse rx st22p sessions */ json_object* st22p_array = st_json_object_object_get(rx_group, "st22p"); if (st22p_array != NULL && json_object_get_type(st22p_array) == json_type_array) { diff --git a/app/src/parse_json.h b/app/src/parse_json.h index 15c139e4a..e6283ce29 100644 --- a/app/src/parse_json.h +++ b/app/src/parse_json.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -126,6 +127,8 @@ typedef struct st_json_interface { int rx_audio_sessions_cnt; /* st30 on interface level */ int tx_anc_sessions_cnt; /* st40 on interface level */ int rx_anc_sessions_cnt; /* st40 on interface level */ + int tx_fmd_sessions_cnt; /* st41 on interface level */ + int rx_fmd_sessions_cnt; /* st41 on interface level */ } st_json_interface_t; enum st_json_ip_type { @@ -177,6 +180,16 @@ typedef struct st_json_ancillary_info { char anc_url[ST_APP_URL_MAX_LEN]; } st_json_ancillary_info_t; +typedef struct st_json_fastmetadata_info { + enum st41_type type; + uint32_t fmd_dit; + uint8_t fmd_k_bit; + enum st_fps fmd_fps; + bool interlaced; + + char fmd_url[ST_APP_URL_MAX_LEN]; +} st_json_fastmetadata_info_t; + typedef struct st_json_st22p_info { enum st_frame_fmt format; enum st21_pacing pacing; @@ -238,6 +251,13 @@ typedef struct st_json_ancillary_session { bool enable_rtcp; } st_json_ancillary_session_t; +typedef struct st_json_fastmetadata_session { + st_json_session_base_t base; + st_json_fastmetadata_info_t info; + + bool enable_rtcp; +} st_json_fastmetadata_session_t; + typedef struct st_json_st22p_session { st_json_session_base_t base; st_json_st22p_info_t info; @@ -275,6 +295,8 @@ typedef struct st_json_context { int tx_audio_session_cnt; st_json_ancillary_session_t* tx_anc_sessions; int tx_anc_session_cnt; + st_json_fastmetadata_session_t* tx_fmd_sessions; + int tx_fmd_session_cnt; st_json_st22p_session_t* tx_st22p_sessions; int tx_st22p_session_cnt; st_json_st20p_session_t* tx_st20p_sessions; @@ -288,6 +310,8 @@ typedef struct st_json_context { int rx_audio_session_cnt; st_json_ancillary_session_t* rx_anc_sessions; int rx_anc_session_cnt; + st_json_fastmetadata_session_t* rx_fmd_sessions; + int rx_fmd_session_cnt; st_json_st22p_session_t* rx_st22p_sessions; int rx_st22p_session_cnt; st_json_st20p_session_t* rx_st20p_sessions; diff --git a/app/src/rx_fastmetadata_app.c b/app/src/rx_fastmetadata_app.c new file mode 100644 index 000000000..ec6e27bb0 --- /dev/null +++ b/app/src/rx_fastmetadata_app.c @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * SPDX-FileCopyrightText: Copyright (c) 2024 Intel Corporation + */ + +#include "rx_fastmetadata_app.h" + +static void app_rx_fmd_handle_rtp(struct st_app_rx_fmd_session* s) { + dbg("%s(%d).\n", __func__, s->idx); + + s->stat_frame_total_received++; + if (!s->stat_frame_first_rx_time) + s->stat_frame_first_rx_time = st_app_get_monotonic_time(); +} + +static void* app_rx_fmd_read_thread(void* arg) { + struct st_app_rx_fmd_session* s = arg; + int idx = s->idx; + void* usrptr; + uint16_t len; + void* mbuf; + + info("%s(%d), start\n", __func__, idx); + while (!s->st41_app_thread_stop) { + mbuf = st41_rx_get_mbuf(s->handle, &usrptr, &len); + if (!mbuf) { + /* no buffer */ + st_pthread_mutex_lock(&s->st41_wake_mutex); + if (!s->st41_app_thread_stop) + st_pthread_cond_wait(&s->st41_wake_cond, &s->st41_wake_mutex); + st_pthread_mutex_unlock(&s->st41_wake_mutex); + continue; + } + /* parse the packet */ + app_rx_fmd_handle_rtp(s); + st41_rx_put_mbuf(s->handle, mbuf); + } + info("%s(%d), stop\n", __func__, idx); + + return NULL; +} + +static int app_rx_fmd_rtp_ready(void* priv) { + struct st_app_rx_fmd_session* s = priv; + + st_pthread_mutex_lock(&s->st41_wake_mutex); + st_pthread_cond_signal(&s->st41_wake_cond); + st_pthread_mutex_unlock(&s->st41_wake_mutex); + return 0; +} + +static int app_rx_fmd_uinit(struct st_app_rx_fmd_session* s) { + int ret, idx = s->idx; + s->st41_app_thread_stop = true; + if (s->st41_app_thread) { + /* wake up the thread */ + st_pthread_mutex_lock(&s->st41_wake_mutex); + st_pthread_cond_signal(&s->st41_wake_cond); + st_pthread_mutex_unlock(&s->st41_wake_mutex); + info("%s(%d), wait app thread stop\n", __func__, idx); + pthread_join(s->st41_app_thread, NULL); + } + if (s->handle) { + ret = st41_rx_free(s->handle); + if (ret < 0) err("%s(%d), st30_rx_free fail %d\n", __func__, idx, ret); + s->handle = NULL; + } + st_pthread_mutex_destroy(&s->st41_wake_mutex); + st_pthread_cond_destroy(&s->st41_wake_cond); + + return 0; +} + +static int app_rx_fmd_init(struct st_app_context* ctx, + st_json_fastmetadata_session_t* fmd, + struct st_app_rx_fmd_session* s) { + int idx = s->idx, ret; + struct st41_rx_ops ops; + char name[32]; + st41_rx_handle handle; + memset(&ops, 0, sizeof(ops)); + + snprintf(name, 32, "app_rx_fmd%d", idx); + ops.name = name; + ops.priv = s; + ops.num_port = fmd ? fmd->base.num_inf : ctx->para.num_ports; + memcpy( + ops.ip_addr[MTL_SESSION_PORT_P], + fmd ? st_json_ip(ctx, &fmd->base, MTL_SESSION_PORT_P) : ctx->rx_ip_addr[MTL_PORT_P], + MTL_IP_ADDR_LEN); + memcpy(ops.mcast_sip_addr[MTL_SESSION_PORT_P], + fmd ? fmd->base.mcast_src_ip[MTL_PORT_P] : ctx->rx_mcast_sip_addr[MTL_PORT_P], + MTL_IP_ADDR_LEN); + snprintf(ops.port[MTL_SESSION_PORT_P], MTL_PORT_MAX_LEN, "%s", + fmd ? fmd->base.inf[MTL_SESSION_PORT_P]->name : ctx->para.port[MTL_PORT_P]); + ops.udp_port[MTL_SESSION_PORT_P] = fmd ? fmd->base.udp_port : (10200 + s->idx); + if (ops.num_port > 1) { + memcpy(ops.ip_addr[MTL_SESSION_PORT_R], + fmd ? st_json_ip(ctx, &fmd->base, MTL_SESSION_PORT_R) + : ctx->rx_ip_addr[MTL_PORT_R], + MTL_IP_ADDR_LEN); + memcpy(ops.mcast_sip_addr[MTL_SESSION_PORT_R], + fmd ? fmd->base.mcast_src_ip[MTL_PORT_R] : ctx->rx_mcast_sip_addr[MTL_PORT_R], + MTL_IP_ADDR_LEN); + snprintf(ops.port[MTL_SESSION_PORT_R], MTL_PORT_MAX_LEN, "%s", + fmd ? fmd->base.inf[MTL_SESSION_PORT_R]->name : ctx->para.port[MTL_PORT_R]); + ops.udp_port[MTL_SESSION_PORT_R] = fmd ? fmd->base.udp_port : (10200 + s->idx); + } + ops.rtp_ring_size = 1024; + ops.payload_type = fmd ? fmd->base.payload_type : ST_APP_PAYLOAD_TYPE_FASTMETADATA; + ops.interlaced = fmd ? fmd->info.interlaced : false; + ops.notify_rtp_ready = app_rx_fmd_rtp_ready; + if (fmd && fmd->enable_rtcp) ops.flags |= ST41_RX_FLAG_ENABLE_RTCP; + st_pthread_mutex_init(&s->st41_wake_mutex, NULL); + st_pthread_cond_init(&s->st41_wake_cond, NULL); + + handle = st41_rx_create(ctx->st, &ops); + if (!handle) { + err("%s(%d), st41_rx_create fail\n", __func__, idx); + return -EIO; + } + s->handle = handle; + + ret = pthread_create(&s->st41_app_thread, NULL, app_rx_fmd_read_thread, s); + if (ret < 0) { + err("%s, st41_app_thread create fail %d\n", __func__, ret); + return -EIO; + } + + char thread_name[32]; + snprintf(thread_name, sizeof(thread_name), "rx_fmd_%d", idx); + mtl_thread_setname(s->st41_app_thread, thread_name); + + return 0; +} + +static bool app_rx_fmd_fps_check(double framerate) { + double expect; + + for (enum st_fps fps = 0; fps < ST_FPS_MAX; fps++) { + expect = st_frame_rate(fps); + if (ST_APP_EXPECT_NEAR(framerate, expect, expect * 0.05)) return true; + } + + return false; +} + +static int app_rx_fmd_result(struct st_app_rx_fmd_session* s) { + int idx = s->idx; + uint64_t cur_time_ns = st_app_get_monotonic_time(); + double time_sec = (double)(cur_time_ns - s->stat_frame_first_rx_time) / NS_PER_S; + double framerate = s->stat_frame_total_received / time_sec; + + if (!s->stat_frame_total_received) return -EINVAL; + + critical("%s(%d), %s, fps %f, %d frame received\n", __func__, idx, + app_rx_fmd_fps_check(framerate) ? "OK" : "FAILED", framerate, + s->stat_frame_total_received); + return 0; +} + +int st_app_rx_fmd_sessions_init(struct st_app_context* ctx) { + int ret, i; + struct st_app_rx_fmd_session* s; + ctx->rx_fmd_sessions = (struct st_app_rx_fmd_session*)st_app_zmalloc( + sizeof(struct st_app_rx_fmd_session) * ctx->rx_fmd_session_cnt); + if (!ctx->rx_fmd_sessions) return -ENOMEM; + for (i = 0; i < ctx->rx_fmd_session_cnt; i++) { + s = &ctx->rx_fmd_sessions[i]; + s->idx = i; + + ret = app_rx_fmd_init(ctx, ctx->json_ctx ? &ctx->json_ctx->rx_fmd_sessions[i] : NULL, + s); + if (ret < 0) { + err("%s(%d), app_rx_fmd_session_init fail %d\n", __func__, i, ret); + return ret; + } + } + + return 0; +} + +int st_app_rx_fmd_sessions_uinit(struct st_app_context* ctx) { + int i; + struct st_app_rx_fmd_session* s; + if (!ctx->rx_fmd_sessions) return 0; + for (i = 0; i < ctx->rx_fmd_session_cnt; i++) { + s = &ctx->rx_fmd_sessions[i]; + app_rx_fmd_uinit(s); + } + st_app_free(ctx->rx_fmd_sessions); + return 0; +} + +int st_app_rx_fmd_sessions_result(struct st_app_context* ctx) { + int i, ret = 0; + struct st_app_rx_fmd_session* s; + if (!ctx->rx_fmd_sessions) return 0; + + for (i = 0; i < ctx->rx_fmd_session_cnt; i++) { + s = &ctx->rx_fmd_sessions[i]; + ret += app_rx_fmd_result(s); + } + + return ret; +} diff --git a/app/src/rx_fastmetadata_app.h b/app/src/rx_fastmetadata_app.h new file mode 100644 index 000000000..e4b50f923 --- /dev/null +++ b/app/src/rx_fastmetadata_app.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * SPDX-FileCopyrightText: Copyright (c) 2024 Intel Corporation + */ + +#include +#include +#include +#include + +#include "app_base.h" +#include "log.h" + +#ifndef _RX_APP_FMD_HEAD_H_ +#define _RX_APP_FMD_HEAD_H_ + +int st_app_rx_fmd_sessions_init(struct st_app_context* ctx); + +int st_app_rx_fmd_sessions_uinit(struct st_app_context* ctx); + +int st_app_rx_fmd_sessions_result(struct st_app_context* ctx); + +#endif /* _RX_APP_FMD_HEAD_H_ */ \ No newline at end of file diff --git a/app/src/rxtx_app.c b/app/src/rxtx_app.c index 5c17be47f..e8f211f66 100644 --- a/app/src/rxtx_app.c +++ b/app/src/rxtx_app.c @@ -19,10 +19,12 @@ #include "log.h" #include "player.h" #include "rx_ancillary_app.h" +#include "rx_fastmetadata_app.h" #include "rx_st20p_app.h" #include "rx_st22p_app.h" #include "rx_st30p_app.h" #include "tx_ancillary_app.h" +#include "tx_fastmetadata_app.h" #include "tx_st20p_app.h" #include "tx_st22p_app.h" #include "tx_st30p_app.h" @@ -182,6 +184,8 @@ static void st_app_ctx_init(struct st_app_context* ctx) { ctx->tx_audio_session_cnt = 0; snprintf(ctx->tx_anc_url, sizeof(ctx->tx_anc_url), "%s", "test.txt"); ctx->tx_anc_session_cnt = 0; + snprintf(ctx->tx_fmd_url, sizeof(ctx->tx_fmd_url), "%s", "test.txt"); + ctx->tx_fmd_session_cnt = 0; snprintf(ctx->tx_st22_url, sizeof(ctx->tx_st22_url), "%s", "test.raw"); ctx->tx_st22_session_cnt = 0; snprintf(ctx->tx_st22p_url, sizeof(ctx->tx_st22p_url), "%s", "test_rfc4175.yuv"); @@ -193,6 +197,7 @@ static void st_app_ctx_init(struct st_app_context* ctx) { ctx->rx_video_session_cnt = 0; ctx->rx_audio_session_cnt = 0; ctx->rx_anc_session_cnt = 0; + ctx->rx_fmd_session_cnt = 0; ctx->rx_st22_session_cnt = 0; ctx->rx_st22p_session_cnt = 0; ctx->rx_st20p_session_cnt = 0; @@ -266,6 +271,7 @@ static void st_app_ctx_free(struct st_app_context* ctx) { st_app_tx_video_sessions_uinit(ctx); st_app_tx_audio_sessions_uinit(ctx); st_app_tx_anc_sessions_uinit(ctx); + st_app_tx_fmd_sessions_uinit(ctx); st_app_tx_st22p_sessions_uinit(ctx); st_app_tx_st20p_sessions_uinit(ctx); st_app_tx_st30p_sessions_uinit(ctx); @@ -274,6 +280,7 @@ static void st_app_ctx_free(struct st_app_context* ctx) { st_app_rx_video_sessions_uinit(ctx); st_app_rx_audio_sessions_uinit(ctx); st_app_rx_anc_sessions_uinit(ctx); + st_app_rx_fmd_sessions_uinit(ctx); st_app_rx_st22p_sessions_uinit(ctx); st_app_rx_st20p_sessions_uinit(ctx); st_app_rx_st30p_sessions_uinit(ctx); @@ -316,6 +323,7 @@ static int st_app_result(struct st_app_context* ctx) { result += st_app_rx_video_sessions_result(ctx); result += st_app_rx_audio_sessions_result(ctx); result += st_app_rx_anc_sessions_result(ctx); + result += st_app_rx_fmd_sessions_result(ctx); result += st_app_rx_st22p_sessions_result(ctx); result += st_app_rx_st20p_sessions_result(ctx); result += st_app_rx_st30p_sessions_result(ctx); @@ -371,12 +379,14 @@ int main(int argc, char** argv) { ctx->tx_st20p_session_cnt > ST_APP_MAX_TX_VIDEO_SESSIONS || ctx->tx_audio_session_cnt > ST_APP_MAX_TX_AUDIO_SESSIONS || ctx->tx_anc_session_cnt > ST_APP_MAX_TX_ANC_SESSIONS || + ctx->tx_fmd_session_cnt > ST_APP_MAX_TX_FMD_SESSIONS || ctx->rx_video_session_cnt > ST_APP_MAX_RX_VIDEO_SESSIONS || ctx->rx_st22_session_cnt > ST_APP_MAX_RX_VIDEO_SESSIONS || ctx->rx_st22p_session_cnt > ST_APP_MAX_RX_VIDEO_SESSIONS || ctx->rx_st20p_session_cnt > ST_APP_MAX_RX_VIDEO_SESSIONS || ctx->rx_audio_session_cnt > ST_APP_MAX_RX_AUDIO_SESSIONS || - ctx->rx_anc_session_cnt > ST_APP_MAX_RX_ANC_SESSIONS) { + ctx->rx_anc_session_cnt > ST_APP_MAX_RX_ANC_SESSIONS || + ctx->rx_fmd_session_cnt > ST_APP_MAX_RX_FMD_SESSIONS) { err("%s, session cnt invalid, pass the restriction\n", __func__); return -EINVAL; } @@ -394,10 +404,12 @@ int main(int argc, char** argv) { ctx->para.tx_queues_cnt[i] = st_tx_sessions_queue_cnt(ctx->json_ctx->interfaces[i].tx_video_sessions_cnt, ctx->json_ctx->interfaces[i].tx_audio_sessions_cnt, - ctx->json_ctx->interfaces[i].tx_anc_sessions_cnt); + ctx->json_ctx->interfaces[i].tx_anc_sessions_cnt, + ctx->json_ctx->interfaces[i].tx_fmd_sessions_cnt); } else { - ctx->para.tx_queues_cnt[i] = st_tx_sessions_queue_cnt( - tx_st20_sessions, ctx->tx_audio_session_cnt, ctx->tx_anc_session_cnt); + ctx->para.tx_queues_cnt[i] = + st_tx_sessions_queue_cnt(tx_st20_sessions, ctx->tx_audio_session_cnt, + ctx->tx_anc_session_cnt, ctx->tx_fmd_session_cnt); } if (ctx->para.tx_queues_cnt[i] && (ctx->para.pmd[i] == MTL_PMD_DPDK_USER)) { ctx->para.tx_queues_cnt[i] += 4; /* add extra 4 queues for recovery */ @@ -409,10 +421,12 @@ int main(int argc, char** argv) { ctx->para.rx_queues_cnt[i] = st_rx_sessions_queue_cnt(ctx->json_ctx->interfaces[i].rx_video_sessions_cnt, ctx->json_ctx->interfaces[i].rx_audio_sessions_cnt, - ctx->json_ctx->interfaces[i].rx_anc_sessions_cnt); + ctx->json_ctx->interfaces[i].rx_anc_sessions_cnt, + ctx->json_ctx->interfaces[i].rx_fmd_sessions_cnt); } else { - ctx->para.rx_queues_cnt[i] = st_rx_sessions_queue_cnt( - rx_st20_sessions, ctx->rx_audio_session_cnt, ctx->rx_anc_session_cnt); + ctx->para.rx_queues_cnt[i] = + st_rx_sessions_queue_cnt(rx_st20_sessions, ctx->rx_audio_session_cnt, + ctx->rx_anc_session_cnt, ctx->rx_fmd_session_cnt); } } } @@ -482,6 +496,13 @@ int main(int argc, char** argv) { return -EIO; } + ret = st_app_tx_fmd_sessions_init(ctx); + if (ret < 0) { + err("%s, st_app_tx_fmd_sessions_init fail %d\n", __func__, ret); + st_app_ctx_free(ctx); + return -EIO; + } + ret = st_app_tx_st22p_sessions_init(ctx); if (ret < 0) { err("%s, st_app_tx_st22p_sessions_init fail %d\n", __func__, ret); @@ -531,6 +552,13 @@ int main(int argc, char** argv) { return -EIO; } + ret = st_app_rx_fmd_sessions_init(ctx); + if (ret < 0) { + err("%s, st_app_rx_fmd_sessions_init fail %d\n", __func__, ret); + st_app_ctx_free(ctx); + return -EIO; + } + ret = st22_app_rx_sessions_init(ctx); if (ret < 0) { err("%s, st22_app_rx_sessions_init fail %d\n", __func__, ret); diff --git a/app/src/tx_ancillary_app.c b/app/src/tx_ancillary_app.c index 9e57ae072..6c329bf84 100644 --- a/app/src/tx_ancillary_app.c +++ b/app/src/tx_ancillary_app.c @@ -481,7 +481,7 @@ static int app_tx_anc_init(struct st_app_context* ctx, st_json_ancillary_session handle = st40_tx_create(ctx->st, &ops); if (!handle) { - err("%s(%d), st30_tx_create fail\n", __func__, idx); + err("%s(%d), st40_tx_create fail\n", __func__, idx); app_tx_anc_uinit(s); return -EIO; } @@ -492,14 +492,14 @@ static int app_tx_anc_init(struct st_app_context* ctx, st_json_ancillary_session ret = app_tx_anc_open_source(s); if (ret < 0) { - err("%s(%d), app_tx_audio_session_open_source fail\n", __func__, idx); + err("%s(%d), app_tx_anc_session_open_source fail\n", __func__, idx); app_tx_anc_uinit(s); return ret; } ret = app_tx_anc_start_source(s); if (ret < 0) { - err("%s(%d), app_tx_audio_session_start_source fail %d\n", __func__, idx, ret); + err("%s(%d), app_tx_anc_session_start_source fail %d\n", __func__, idx, ret); app_tx_anc_uinit(s); return ret; } diff --git a/app/src/tx_fastmetadata_app.c b/app/src/tx_fastmetadata_app.c new file mode 100644 index 000000000..b3c422ecf --- /dev/null +++ b/app/src/tx_fastmetadata_app.c @@ -0,0 +1,542 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * SPDX-FileCopyrightText: Copyright (c) 2024 Intel Corporation + */ + +#include "tx_fastmetadata_app.h" + +#define ST_PKT_ST41_PAYLOAD_MAX_BYTES (1460 - sizeof(struct st41_rtp_hdr) - 8) + +static int app_tx_fmd_next_frame(void* priv, uint16_t* next_frame_idx, + struct st41_tx_frame_meta* meta) { + struct st_app_tx_fmd_session* s = priv; + int ret; + uint16_t consumer_idx = s->framebuff_consumer_idx; + struct st_tx_frame* framebuff = &s->framebuffs[consumer_idx]; + MTL_MAY_UNUSED(meta); + + st_pthread_mutex_lock(&s->st41_wake_mutex); + if (ST_TX_FRAME_READY == framebuff->stat) { + dbg("%s(%d), next frame idx %u, epoch %" PRIu64 ", tai %" PRIu64 "\n", __func__, + s->idx, consumer_idx, meta->epoch, + st10_get_tai(meta->tfmt, meta->timestamp, ST10_VIDEO_SAMPLING_RATE_90K)); + ret = 0; + framebuff->stat = ST_TX_FRAME_IN_TRANSMITTING; + *next_frame_idx = consumer_idx; + /* point to next */ + consumer_idx++; + if (consumer_idx >= s->framebuff_cnt) consumer_idx = 0; + s->framebuff_consumer_idx = consumer_idx; + } else { + /* not ready */ + dbg("%s(%d), idx %u err stat %d\n", __func__, s->idx, consumer_idx, framebuff->stat); + ret = -EIO; + } + st_pthread_cond_signal(&s->st41_wake_cond); + st_pthread_mutex_unlock(&s->st41_wake_mutex); + return ret; +} + +static int app_tx_fmd_frame_done(void* priv, uint16_t frame_idx, + struct st41_tx_frame_meta* meta) { + struct st_app_tx_fmd_session* s = priv; + int ret; + struct st_tx_frame* framebuff = &s->framebuffs[frame_idx]; + MTL_MAY_UNUSED(meta); + + st_pthread_mutex_lock(&s->st41_wake_mutex); + if (ST_TX_FRAME_IN_TRANSMITTING == framebuff->stat) { + ret = 0; + framebuff->stat = ST_TX_FRAME_FREE; + dbg("%s(%d), done frame idx %u, epoch %" PRIu64 ", tai %" PRIu64 "\n", __func__, + s->idx, frame_idx, meta->epoch, + st10_get_tai(meta->tfmt, meta->timestamp, ST10_VIDEO_SAMPLING_RATE_90K)); + } else { + ret = -EIO; + err("%s(%d), err status %d for frame %u\n", __func__, s->idx, framebuff->stat, + frame_idx); + } + st_pthread_cond_signal(&s->st41_wake_cond); + st_pthread_mutex_unlock(&s->st41_wake_mutex); + + s->st41_frame_done_cnt++; + dbg("%s(%d), framebuffer index %d\n", __func__, s->idx, frame_idx); + return ret; +} + +static int app_tx_fmd_rtp_done(void* priv) { + struct st_app_tx_fmd_session* s = priv; + st_pthread_mutex_lock(&s->st41_wake_mutex); + st_pthread_cond_signal(&s->st41_wake_cond); + st_pthread_mutex_unlock(&s->st41_wake_mutex); + s->st41_packet_done_cnt++; + return 0; +} + +static void app_tx_fmd_build_frame(struct st_app_tx_fmd_session* s, + struct st41_frame* dst) { + uint16_t data_item_length_bytes = + s->st41_source_end - s->st41_frame_cursor > ST_PKT_ST41_PAYLOAD_MAX_BYTES + ? ST_PKT_ST41_PAYLOAD_MAX_BYTES + : s->st41_source_end - s->st41_frame_cursor; + dst->data_item_length_bytes = data_item_length_bytes; + dst->data = s->st41_frame_cursor; + s->st41_frame_cursor += data_item_length_bytes; + if (s->st41_frame_cursor == s->st41_source_end) + s->st41_frame_cursor = s->st41_source_begin; +} + +static void* app_tx_fmd_frame_thread(void* arg) { + struct st_app_tx_fmd_session* s = arg; + int idx = s->idx; + uint16_t producer_idx; + struct st_tx_frame* framebuff; + + info("%s(%d), start\n", __func__, idx); + while (!s->st41_app_thread_stop) { + st_pthread_mutex_lock(&s->st41_wake_mutex); + producer_idx = s->framebuff_producer_idx; + framebuff = &s->framebuffs[producer_idx]; + if (ST_TX_FRAME_FREE != framebuff->stat) { + /* not in free */ + if (!s->st41_app_thread_stop) + st_pthread_cond_wait(&s->st41_wake_cond, &s->st41_wake_mutex); + st_pthread_mutex_unlock(&s->st41_wake_mutex); + continue; + } + st_pthread_mutex_unlock(&s->st41_wake_mutex); + + struct st41_frame* frame_addr = st41_tx_get_framebuffer(s->handle, producer_idx); + app_tx_fmd_build_frame(s, frame_addr); + + st_pthread_mutex_lock(&s->st41_wake_mutex); + framebuff->size = sizeof(*frame_addr); + framebuff->stat = ST_TX_FRAME_READY; + /* point to next */ + producer_idx++; + if (producer_idx >= s->framebuff_cnt) producer_idx = 0; + s->framebuff_producer_idx = producer_idx; + st_pthread_mutex_unlock(&s->st41_wake_mutex); + } + info("%s(%d), stop\n", __func__, idx); + + return NULL; +} + +static void* app_tx_fmd_pcap_thread(void* arg) { + struct st_app_tx_fmd_session* s = arg; + int idx = s->idx; + void* mbuf; + void* usrptr = NULL; + struct pcap_pkthdr hdr; + uint8_t* packet; + struct ether_header* eth_hdr; + struct ip* ip_hdr; + struct udphdr* udp_hdr; + uint16_t udp_data_len; + + info("%s(%d), start\n", __func__, idx); + while (!s->st41_app_thread_stop) { + /* get available buffer*/ + mbuf = st41_tx_get_mbuf(s->handle, &usrptr); + if (!mbuf) { + st_pthread_mutex_lock(&s->st41_wake_mutex); + /* try again */ + mbuf = st41_tx_get_mbuf(s->handle, &usrptr); + if (mbuf) { + st_pthread_mutex_unlock(&s->st41_wake_mutex); + } else { + if (!s->st41_app_thread_stop) + st_pthread_cond_wait(&s->st41_wake_cond, &s->st41_wake_mutex); + st_pthread_mutex_unlock(&s->st41_wake_mutex); + continue; + } + } + udp_data_len = 0; + packet = (uint8_t*)pcap_next(s->st41_pcap, &hdr); + if (packet) { + eth_hdr = (struct ether_header*)packet; + if (ntohs(eth_hdr->ether_type) == ETHERTYPE_IP) { + ip_hdr = (struct ip*)(packet + sizeof(struct ether_header)); + if (ip_hdr->ip_p == IPPROTO_UDP) { + udp_hdr = + (struct udphdr*)(packet + sizeof(struct ether_header) + sizeof(struct ip)); + udp_data_len = ntohs(udp_hdr->len) - sizeof(struct udphdr); + mtl_memcpy(usrptr, + packet + sizeof(struct ether_header) + sizeof(struct ip) + + sizeof(struct udphdr), + udp_data_len); + } + } + } else { + char err_buf[PCAP_ERRBUF_SIZE]; + pcap_close(s->st41_pcap); + /* open capture file for offline processing */ + s->st41_pcap = pcap_open_offline(s->st41_source_url, err_buf); + if (s->st41_pcap == NULL) { + err("pcap_open_offline %s() failed: %s\n:", s->st41_source_url, err_buf); + return NULL; + } + } + + st41_tx_put_mbuf(s->handle, mbuf, udp_data_len); + } + info("%s(%d), stop\n", __func__, idx); + + return NULL; +} + +static void app_tx_fmd_build_rtp(struct st_app_tx_fmd_session* s, void* usrptr, + uint16_t* mbuf_len) { + /* generate one fmd rtp for test purpose */ + struct st41_rtp_hdr* hdr = (struct st41_rtp_hdr*)usrptr; + uint8_t* payload_hdr = (uint8_t*)(&hdr[1]); + uint16_t data_item_length_bytes = + s->st41_source_end - s->st41_frame_cursor > (MTL_PKT_MAX_RTP_BYTES - 16) + ? (MTL_PKT_MAX_RTP_BYTES - 16) + : s->st41_source_end - s->st41_frame_cursor; + uint16_t data_item_length; + data_item_length = + (data_item_length_bytes + 3) / 4; /* expressed in number of 4-byte words */ + hdr->base.marker = 1; + hdr->base.payload_type = ST_APP_PAYLOAD_TYPE_FASTMETADATA; + hdr->base.version = 2; + hdr->base.extension = 0; + hdr->base.padding = 0; + hdr->base.csrc_count = 0; + hdr->base.tmstamp = s->st41_rtp_tmstamp; + hdr->base.ssrc = htonl(0x88888888 + s->idx); + /* update rtp seq*/ + hdr->base.seq_number = htons((uint16_t)s->st41_seq_id); + s->st41_seq_id++; + s->st41_rtp_tmstamp++; + + for (int i = 0; i < data_item_length_bytes; i++) { + payload_hdr[i] = s->st41_frame_cursor[i]; + } + /* filling with 0's the remianing bytes of last 4-byte word */ + for (int i = data_item_length_bytes; i < data_item_length * 4; i++) { + payload_hdr[i] = 0; + } + + *mbuf_len = sizeof(struct st41_rtp_hdr) + data_item_length * 4; + hdr->st41_hdr_chunk.data_item_length = data_item_length; + hdr->st41_hdr_chunk.data_item_type = s->st41_dit; + hdr->st41_hdr_chunk.data_item_k_bit = s->st41_k_bit; + hdr->swaped_st41_hdr_chunk = htonl(hdr->swaped_st41_hdr_chunk); + + s->st41_frame_cursor += data_item_length_bytes; + if (s->st41_frame_cursor == s->st41_source_end) + s->st41_frame_cursor = s->st41_source_begin; +} + +static void* app_tx_fmd_rtp_thread(void* arg) { + struct st_app_tx_fmd_session* s = arg; + int idx = s->idx; + void* mbuf; + void* usrptr = NULL; + uint16_t mbuf_len = 0; + + info("%s(%d), start\n", __func__, idx); + while (!s->st41_app_thread_stop) { + /* get available buffer*/ + mbuf = st41_tx_get_mbuf(s->handle, &usrptr); + if (!mbuf) { + st_pthread_mutex_lock(&s->st41_wake_mutex); + /* try again */ + mbuf = st41_tx_get_mbuf(s->handle, &usrptr); + if (mbuf) { + st_pthread_mutex_unlock(&s->st41_wake_mutex); + } else { + if (!s->st41_app_thread_stop) + st_pthread_cond_wait(&s->st41_wake_cond, &s->st41_wake_mutex); + st_pthread_mutex_unlock(&s->st41_wake_mutex); + continue; + } + } + + /* build the rtp pkt */ + app_tx_fmd_build_rtp(s, usrptr, &mbuf_len); + + st41_tx_put_mbuf(s->handle, mbuf, mbuf_len); + } + info("%s(%d), stop\n", __func__, idx); + + return NULL; +} + +static int app_tx_fmd_open_source(struct st_app_tx_fmd_session* s) { + if (!s->st41_pcap_input) { + struct stat i; + + s->st41_source_fd = st_open(s->st41_source_url, O_RDONLY); + if (s->st41_source_fd >= 0) { + if (fstat(s->st41_source_fd, &i) < 0) { + err("%s, fstat %s fail\n", __func__, s->st41_source_url); + close(s->st41_source_fd); + s->st41_source_fd = -1; + return -EIO; + } + + uint8_t* m = mmap(NULL, i.st_size, PROT_READ, MAP_SHARED, s->st41_source_fd, 0); + + if (MAP_FAILED != m) { + s->st41_source_begin = m; + s->st41_frame_cursor = m; + s->st41_source_end = m + i.st_size; + } else { + err("%s, mmap fail '%s'\n", __func__, s->st41_source_url); + close(s->st41_source_fd); + s->st41_source_fd = -1; + return -EIO; + } + } else { + err("%s, open fail '%s'\n", __func__, s->st41_source_url); + return -EIO; + } + } else { + char err_buf[PCAP_ERRBUF_SIZE]; + + /* open capture file for offline processing */ + s->st41_pcap = pcap_open_offline(s->st41_source_url, err_buf); + if (!s->st41_pcap) { + err("pcap_open_offline %s() failed: %s\n:", s->st41_source_url, err_buf); + return -EIO; + } + } + + return 0; +} + +static int app_tx_fmd_close_source(struct st_app_tx_fmd_session* s) { + if (s->st41_source_fd >= 0) { + munmap(s->st41_source_begin, s->st41_source_end - s->st41_source_begin); + close(s->st41_source_fd); + s->st41_source_fd = -1; + } + if (s->st41_pcap) { + pcap_close(s->st41_pcap); + s->st41_pcap = NULL; + } + + return 0; +} + +static int app_tx_fmd_start_source(struct st_app_tx_fmd_session* s) { + int ret = -EINVAL; + int idx = s->idx; + + s->st41_app_thread_stop = false; + if (s->st41_pcap_input) + ret = pthread_create(&s->st41_app_thread, NULL, app_tx_fmd_pcap_thread, (void*)s); + else if (s->st41_rtp_input) + ret = pthread_create(&s->st41_app_thread, NULL, app_tx_fmd_rtp_thread, (void*)s); + else + ret = pthread_create(&s->st41_app_thread, NULL, app_tx_fmd_frame_thread, (void*)s); + if (ret < 0) { + err("%s(%d), thread create fail err = %d\n", __func__, idx, ret); + return ret; + } + + char thread_name[32]; + snprintf(thread_name, sizeof(thread_name), "tx_fmd_%d", idx); + mtl_thread_setname(s->st41_app_thread, thread_name); + + return 0; +} + +static void app_tx_fmd_stop_source(struct st_app_tx_fmd_session* s) { + if (s->st41_source_fd >= 0) { + s->st41_app_thread_stop = true; + /* wake up the thread */ + st_pthread_mutex_lock(&s->st41_wake_mutex); + st_pthread_cond_signal(&s->st41_wake_cond); + st_pthread_mutex_unlock(&s->st41_wake_mutex); + if (s->st41_app_thread) (void)pthread_join(s->st41_app_thread, NULL); + } +} + +int app_tx_fmd_uinit(struct st_app_tx_fmd_session* s) { + int ret; + + app_tx_fmd_stop_source(s); + + if (s->handle) { + ret = st41_tx_free(s->handle); + if (ret < 0) err("%s(%d), st_tx_fmd_session_free fail %d\n", __func__, s->idx, ret); + s->handle = NULL; + } + + app_tx_fmd_close_source(s); + + if (s->framebuffs) { + st_app_free(s->framebuffs); + s->framebuffs = NULL; + } + + st_pthread_mutex_destroy(&s->st41_wake_mutex); + st_pthread_cond_destroy(&s->st41_wake_cond); + + return 0; +} + +static int app_tx_fmd_init(struct st_app_context* ctx, + st_json_fastmetadata_session_t* fmd, + struct st_app_tx_fmd_session* s) { + int idx = s->idx, ret; + struct st41_tx_ops ops; + char name[32]; + st41_tx_handle handle; + memset(&ops, 0, sizeof(ops)); + + s->framebuff_cnt = 2; + s->st41_seq_id = 1; + + s->framebuffs = + (struct st_tx_frame*)st_app_zmalloc(sizeof(*s->framebuffs) * s->framebuff_cnt); + if (!s->framebuffs) { + return -ENOMEM; + } + for (uint16_t j = 0; j < s->framebuff_cnt; j++) { + s->framebuffs[j].stat = ST_TX_FRAME_FREE; + s->framebuffs[j].lines_ready = 0; + } + + s->st41_source_fd = -1; + st_pthread_mutex_init(&s->st41_wake_mutex, NULL); + st_pthread_cond_init(&s->st41_wake_cond, NULL); + + snprintf(name, 32, "app_tx_fastmetadata%d", idx); + ops.name = name; + ops.priv = s; + ops.num_port = fmd ? fmd->base.num_inf : ctx->para.num_ports; + memcpy(ops.dip_addr[MTL_SESSION_PORT_P], + fmd ? st_json_ip(ctx, &fmd->base, MTL_SESSION_PORT_P) + : ctx->tx_dip_addr[MTL_PORT_P], + MTL_IP_ADDR_LEN); + snprintf(ops.port[MTL_SESSION_PORT_P], MTL_PORT_MAX_LEN, "%s", + fmd ? fmd->base.inf[MTL_SESSION_PORT_P]->name : ctx->para.port[MTL_PORT_P]); + ops.udp_port[MTL_SESSION_PORT_P] = fmd ? fmd->base.udp_port : (10200 + s->idx); + if (ctx->has_tx_dst_mac[MTL_PORT_P]) { + memcpy(&ops.tx_dst_mac[MTL_SESSION_PORT_P][0], ctx->tx_dst_mac[MTL_PORT_P], + MTL_MAC_ADDR_LEN); + ops.flags |= ST41_TX_FLAG_USER_P_MAC; + } + if (ops.num_port > 1) { + memcpy(ops.dip_addr[MTL_SESSION_PORT_R], + fmd ? st_json_ip(ctx, &fmd->base, MTL_SESSION_PORT_R) + : ctx->tx_dip_addr[MTL_PORT_R], + MTL_IP_ADDR_LEN); + snprintf(ops.port[MTL_SESSION_PORT_R], MTL_PORT_MAX_LEN, "%s", + fmd ? fmd->base.inf[MTL_SESSION_PORT_R]->name : ctx->para.port[MTL_PORT_R]); + ops.udp_port[MTL_SESSION_PORT_R] = fmd ? fmd->base.udp_port : (10200 + s->idx); + if (ctx->has_tx_dst_mac[MTL_PORT_R]) { + memcpy(&ops.tx_dst_mac[MTL_SESSION_PORT_R][0], ctx->tx_dst_mac[MTL_PORT_R], + MTL_MAC_ADDR_LEN); + ops.flags |= ST41_TX_FLAG_USER_R_MAC; + } + } + ops.get_next_frame = app_tx_fmd_next_frame; + ops.notify_frame_done = app_tx_fmd_frame_done; + ops.notify_rtp_done = app_tx_fmd_rtp_done; + ops.framebuff_cnt = s->framebuff_cnt; + ops.fps = fmd ? fmd->info.fmd_fps : ST_FPS_P59_94; + ops.fmd_dit = fmd->info.fmd_dit; + ops.fmd_k_bit = fmd->info.fmd_k_bit; + s->st41_pcap_input = false; + ops.type = fmd ? fmd->info.type : ST41_TYPE_FRAME_LEVEL; + ops.interlaced = fmd ? fmd->info.interlaced : false; + ops.payload_type = fmd ? fmd->base.payload_type : ST_APP_PAYLOAD_TYPE_FASTMETADATA; + /* select rtp type for pcap file or tx_video_rtp_ring_size */ + if (strstr(s->st41_source_url, ".pcap")) { + ops.type = ST41_TYPE_RTP_LEVEL; + s->st41_pcap_input = true; + } else if (ctx->tx_fmd_rtp_ring_size > 0) { + ops.type = ST41_TYPE_RTP_LEVEL; + s->st41_rtp_input = true; + } + if (ops.type == ST41_TYPE_RTP_LEVEL) { + s->st41_rtp_input = true; + if (ctx->tx_fmd_rtp_ring_size > 0) + ops.rtp_ring_size = ctx->tx_fmd_rtp_ring_size; + else + ops.rtp_ring_size = 16; + } + if (fmd && fmd->enable_rtcp) ops.flags |= ST41_TX_FLAG_ENABLE_RTCP; + if (ctx->tx_fmd_dedicate_queue) ops.flags |= ST41_TX_FLAG_DEDICATE_QUEUE; + + handle = st41_tx_create(ctx->st, &ops); + if (!handle) { + err("%s(%d), st41_tx_create fail\n", __func__, idx); + app_tx_fmd_uinit(s); + return -EIO; + } + + /* copying frame fields for RTP mode to function*/ + s->st41_dit = fmd->info.fmd_dit; + s->st41_k_bit = fmd->info.fmd_k_bit; + + s->handle = handle; + snprintf(s->st41_source_url, sizeof(s->st41_source_url), "%s", + fmd ? fmd->info.fmd_url : ctx->tx_fmd_url); + + ret = app_tx_fmd_open_source(s); + if (ret < 0) { + err("%s(%d), app_tx_fmd_session_open_source fail\n", __func__, idx); + app_tx_fmd_uinit(s); + return ret; + } + + ret = app_tx_fmd_start_source(s); + if (ret < 0) { + err("%s(%d), app_tx_fmd_session_start_source fail %d\n", __func__, idx, ret); + app_tx_fmd_uinit(s); + return ret; + } + + return 0; +} + +int st_app_tx_fmd_sessions_stop(struct st_app_context* ctx) { + struct st_app_tx_fmd_session* s; + if (!ctx->tx_fmd_sessions) return 0; + for (int i = 0; i < ctx->tx_fmd_session_cnt; i++) { + s = &ctx->tx_fmd_sessions[i]; + app_tx_fmd_stop_source(s); + } + + return 0; +} + +int st_app_tx_fmd_sessions_init(struct st_app_context* ctx) { + int ret; + struct st_app_tx_fmd_session* s; + ctx->tx_fmd_sessions = (struct st_app_tx_fmd_session*)st_app_zmalloc( + sizeof(struct st_app_tx_fmd_session) * ctx->tx_fmd_session_cnt); + if (!ctx->tx_fmd_sessions) return -ENOMEM; + + for (int i = 0; i < ctx->tx_fmd_session_cnt; i++) { + s = &ctx->tx_fmd_sessions[i]; + s->idx = i; + ret = app_tx_fmd_init(ctx, ctx->json_ctx ? &ctx->json_ctx->tx_fmd_sessions[i] : NULL, + s); + if (ret < 0) { + err("%s(%d), app_tx_fmd_session_init fail %d\n", __func__, i, ret); + return ret; + } + } + + return 0; +} + +int st_app_tx_fmd_sessions_uinit(struct st_app_context* ctx) { + struct st_app_tx_fmd_session* s; + if (!ctx->tx_fmd_sessions) return 0; + + for (int i = 0; i < ctx->tx_fmd_session_cnt; i++) { + s = &ctx->tx_fmd_sessions[i]; + app_tx_fmd_uinit(s); + } + st_app_free(ctx->tx_fmd_sessions); + + return 0; +} diff --git a/app/src/tx_fastmetadata_app.h b/app/src/tx_fastmetadata_app.h new file mode 100644 index 000000000..06f79b0fd --- /dev/null +++ b/app/src/tx_fastmetadata_app.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * SPDX-FileCopyrightText: Copyright (c) 2024 Intel Corporation + */ + +#include +#include +#include +#include + +#include "app_base.h" +#include "log.h" + +#ifndef _TX_APP_FMD_HEAD_H_ +#define _TX_APP_FMD_HEAD_H_ +int st_app_tx_fmd_sessions_init(struct st_app_context* ctx); + +int st_app_tx_fmd_sessions_uinit(struct st_app_context* ctx); +int st_app_tx_fmd_sessions_stop(struct st_app_context* ctx); +#endif /* _TX_APP_FMD_HEAD_H_ */ \ No newline at end of file diff --git a/include/meson.build b/include/meson.build index 5ad34c321..c8fba4895 100644 --- a/include/meson.build +++ b/include/meson.build @@ -2,7 +2,7 @@ # Copyright 2022 Intel Corporation mtl_header_files = files('mtl_api.h', 'st_api.h', 'st_convert_api.h', 'st_convert_internal.h', - 'st_pipeline_api.h', 'st20_api.h', 'st30_api.h', 'st40_api.h', + 'st_pipeline_api.h', 'st20_api.h', 'st30_api.h', 'st40_api.h', 'st41_api.h', 'mudp_api.h', 'mudp_sockfd_api.h', 'mudp_sockfd_internal.h', 'mtl_lcore_shm_api.h', 'mtl_sch_api.h', 'st30_pipeline_api.h') diff --git a/include/st41_api.h b/include/st41_api.h new file mode 100644 index 000000000..acd4c583a --- /dev/null +++ b/include/st41_api.h @@ -0,0 +1,440 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * SPDX-FileCopyrightText: Copyright (c) 2024 Intel Corporation + */ + +/** + * @file st41_api.h + * + * Interfaces for st2110-41 transport. + * + */ + +#include "st_api.h" + +#ifndef _ST41_API_HEAD_H_ +#define _ST41_API_HEAD_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +/** + * Handle to tx st2110-41(fast metadata) session + */ +typedef struct st_tx_fastmetadata_session_handle_impl* st41_tx_handle; +/** + * Handle to rx st2110-41(fast metadata) session + */ +typedef struct st_rx_fastmetadata_session_handle_impl* st41_rx_handle; + +/** + * Flag bit in flags of struct st41_tx_ops. + * P TX destination mac assigned by user + */ +#define ST41_TX_FLAG_USER_P_MAC (MTL_BIT32(0)) +/** + * Flag bit in flags of struct st41_tx_ops. + * R TX destination mac assigned by user + */ +#define ST41_TX_FLAG_USER_R_MAC (MTL_BIT32(1)) +/** + * Flag bit in flags of struct st41_tx_ops. + * User control the frame pacing by pass a timestamp in st41_tx_frame_meta, + * lib will wait until timestamp is reached for each frame. + */ +#define ST41_TX_FLAG_USER_PACING (MTL_BIT32(3)) +/** + * Flag bit in flags of struct st41_tx_ops. + * If enabled, lib will assign the rtp timestamp to the value in + * st41_tx_frame_meta(ST10_TIMESTAMP_FMT_MEDIA_CLK is used) + */ +#define ST41_TX_FLAG_USER_TIMESTAMP (MTL_BIT32(4)) +/** + * Flag bit in flags of struct st41_tx_ops. + * If enable the rtcp. + */ +#define ST41_TX_FLAG_ENABLE_RTCP (MTL_BIT32(5)) +/** + * Flag bit in flags of struct st41_tx_ops. + * If use dedicated queue for TX. + */ +#define ST41_TX_FLAG_DEDICATE_QUEUE (MTL_BIT32(6)) + +/** + * Flag bit in flags of struct st30_rx_ops, for non MTL_PMD_DPDK_USER. + * If set, it's application duty to set the rx flow(queue) and multicast join/drop. + * Use st41_rx_get_queue_meta to get the queue meta(queue number etc) info. + */ +#define ST41_RX_FLAG_DATA_PATH_ONLY (MTL_BIT32(0)) +/** + * Flag bit in flags of struct st41_rx_ops. + * If enable the rtcp. + */ +#define ST41_RX_FLAG_ENABLE_RTCP (MTL_BIT32(1)) + +/** + * Session type of st2110-41(fast metadata) streaming + */ +enum st41_type { + ST41_TYPE_FRAME_LEVEL = 0, /**< app interface lib based on frame level */ + ST41_TYPE_RTP_LEVEL, /**< app interface lib based on RTP level */ + ST41_TYPE_MAX, /**< max value of this enum */ +}; + +/** + * A structure describing a st2110-41(fast metadata) rtp header + */ +#ifdef MTL_LITTLE_ENDIAN +MTL_PACK(struct st41_rtp_hdr { + /** Rtp rfc3550 base hdr */ + struct st_rfc3550_rtp_hdr base; + union { + struct { + /** Data Item Contents - Number of 32-bit data elements that follow */ + uint32_t data_item_length : 9; + /** Data Item K-bit */ + uint32_t data_item_k_bit : 1; + /** Data Item Type */ + uint32_t data_item_type : 22; + } st41_hdr_chunk; + /** Handle to make operating on st41_hdr_chunk buffer easier */ + uint32_t swaped_st41_hdr_chunk; + }; +}); +#else +MTL_PACK(struct st41_rtp_hdr { + /** Rtp rfc3550 base hdr */ + struct st_rfc3550_rtp_hdr base; + union { + struct { + /** Data Item Type */ + uint32_t data_item_type : 22; + /** Data Item K-bit */ + uint32_t data_item_k_bit : 1; + /** Data Item Contents - Number of 32-bit data elements that follow */ + uint32_t data_item_length : 9; + } st41_hdr_chunk; + /** Handle to make operating on st41_hdr_chunk buffer easier */ + uint32_t swaped_st41_hdr_chunk; + }; +}); +#endif + +/** + * Structure for ST2110-41(fast metadata) frame + */ +struct st41_frame { + uint16_t data_item_length_bytes; /** Size of the User Data Words */ + uint8_t* data; /**< Handle to data buffer */ +}; + +/** + * Frame meta data of st2110-41(fast metadata) tx streaming + */ +struct st41_tx_frame_meta { + /** Frame fps */ + enum st_fps fps; + /** Frame timestamp format */ + enum st10_timestamp_fmt tfmt; + /** Frame timestamp value */ + uint64_t timestamp; + /** epoch */ + uint64_t epoch; + /** Second field type indicate for interlaced mode, set by user */ + bool second_field; + /** Timestamp value in the rtp header */ + uint32_t rtp_timestamp; +}; + +/** + * The structure describing how to create a tx st2110-41(fast metadata) session. + * Include the PCIE port and other required info. + */ +struct st41_tx_ops { + /** Mandatory. destination IP address */ + uint8_t dip_addr[MTL_SESSION_PORT_MAX][MTL_IP_ADDR_LEN]; + /** Mandatory. Pcie BDF path like 0000:af:00.0, should align to BDF of mtl_init */ + char port[MTL_SESSION_PORT_MAX][MTL_PORT_MAX_LEN]; + /** Mandatory. 1 or 2, num of ports this session attached to */ + uint8_t num_port; + /** Mandatory. UDP destination port number */ + uint16_t udp_port[MTL_SESSION_PORT_MAX]; + + /** Mandatory. Session streaming type, frame or RTP */ + enum st41_type type; + /** Mandatory. Session fps */ + enum st_fps fps; + /** Mandatory. 7 bits payload type define in RFC3550 */ + uint8_t payload_type; + /** Mandatory. 22 bits data item type */ + uint32_t fmd_dit; + /** Mandatory. 1 bit data item K-bit */ + uint8_t fmd_k_bit; + + /** Mandatory. interlaced or not */ + bool interlaced; + + /** Optional. Synchronization source defined in RFC3550, if zero the session will assign + * a random value */ + uint32_t ssrc; + /** Optional. name */ + const char* name; + /** Optional. private data to the callback function */ + void* priv; + /** Optional. see ST41_TX_FLAG_* for possible flags */ + uint32_t flags; + + /** + * Mandatory for ST41_TYPE_FRAME_LEVEL. + * the frame buffer count requested for one st41 tx session, + */ + uint16_t framebuff_cnt; + /** + * Mandatory for ST41_TYPE_FRAME_LEVEL. callback when lib require a new frame for + * sending. User should provide the next available frame index to next_frame_idx. It + * implicit means the frame ownership will be transferred to lib, And only non-block + * method can be used in this callback as it run from lcore tasklet routine. + */ + int (*get_next_frame)(void* priv, uint16_t* next_frame_idx, + struct st41_tx_frame_meta* meta); + /** + * Optional for ST41_TYPE_FRAME_LEVEL. callback when lib finish sending one frame. + * frame_idx indicate the frame which finish the transmit. + * It implicit means the frame ownership is transferred to app. + * And only non-block method can be used in this callback as it run from lcore tasklet + * routine. + */ + int (*notify_frame_done)(void* priv, uint16_t frame_idx, + struct st41_tx_frame_meta* meta); + + /** Optional. UDP source port number, leave as 0 to use same port as dst */ + uint16_t udp_src_port[MTL_SESSION_PORT_MAX]; + /** + * Optional. tx destination mac address. + * Valid if ST41_TX_FLAG_USER_P(R)_MAC is enabled + */ + uint8_t tx_dst_mac[MTL_SESSION_PORT_MAX][MTL_MAC_ADDR_LEN]; + + /** Mandatory for ST41_TYPE_RTP_LEVEL. rtp ring queue size, must be power of 2 */ + uint32_t rtp_ring_size; + /** + * Optional for ST41_TYPE_RTP_LEVEL. callback when lib finish the sending of one rtp + * packet, And only non-block method can be used in this callback as it run from lcore + * tasklet routine. + */ + int (*notify_rtp_done)(void* priv); +}; + +/** + * The structure describing how to create a rx st2110-41(fast metadata) session. + * Include the PCIE port and other required info + */ +struct st41_rx_ops { + union { + /** Mandatory. multicast IP address or sender IP for unicast */ + uint8_t ip_addr[MTL_SESSION_PORT_MAX][MTL_IP_ADDR_LEN]; + /** deprecated, use ip_addr instead, sip_addr is confused */ + uint8_t sip_addr[MTL_SESSION_PORT_MAX][MTL_IP_ADDR_LEN] __mtl_deprecated_msg( + "Use ip_addr instead"); + }; + /** Mandatory. 1 or 2, num of ports this session attached to */ + uint8_t num_port; + /** Mandatory. Pcie BDF path like 0000:af:00.0, should align to BDF of mtl_init */ + char port[MTL_SESSION_PORT_MAX][MTL_PORT_MAX_LEN]; + /** Mandatory. UDP dest port number */ + uint16_t udp_port[MTL_SESSION_PORT_MAX]; + + /** Mandatory. 7 bits payload type define in RFC3550. Zero means disable the + * payload_type check on the RX pkt path */ + uint8_t payload_type; + /** Mandatory. interlaced or not */ + bool interlaced; + + /** Optional. source filter IP address of multicast */ + uint8_t mcast_sip_addr[MTL_SESSION_PORT_MAX][MTL_IP_ADDR_LEN]; + /** Optional. Synchronization source defined in RFC3550, RX session will check the + * incoming RTP packets match the ssrc. Leave to zero to disable the ssrc check */ + uint32_t ssrc; + /** Optional. name */ + const char* name; + /** Optional. private data to the callback function */ + void* priv; + /** Optional. see ST41_RX_FLAG_* for possible flags */ + uint32_t flags; + + /** Mandatory. rtp ring queue size, must be power of 2 */ + uint32_t rtp_ring_size; + /** + * Optional. the callback when lib finish the sending of one rtp packet. And only + * non-block method can be used in this callback as it run from lcore tasklet routine. + */ + int (*notify_rtp_ready)(void* priv); +}; + +/** + * Create one tx st2110-41(fast metadata) session. + * + * @param mt + * The handle to the media transport device context. + * @param ops + * The pointer to the structure describing how to create a tx st2110-41(fast metadata) + * session. + * @return + * - NULL on error. + * - Otherwise, the handle to the tx st2110-41(fast metadata) session. + */ +st41_tx_handle st41_tx_create(mtl_handle mt, struct st41_tx_ops* ops); + +/** + * Free the tx st2110-41(fast metadata) session. + * + * @param handle + * The handle to the tx st2110-41(fast metadata) session. + * @return + * - 0: Success, tx st2110-41(fast metadata) session freed. + * - <0: Error code of the tx st2110-41(fast metadata) session free. + */ +int st41_tx_free(st41_tx_handle handle); + +/** + * Online update the destination info for the tx st2110-41(fast metadata) session. + * + * @param handle + * The handle to the tx st2110-41(fast metadata) session. + * @param dst + * The pointer to the tx st2110-41(fast metadata) destination info. + * @return + * - 0: Success, tx st2110-41(fast metadata) session destination update succ. + * - <0: Error code of the rx st2110-41(fast metadata) session destination update. + */ +int st41_tx_update_destination(st41_tx_handle handle, struct st_tx_dest_info* dst); + +/** + * Get the framebuffer pointer from the tx st2110-41(fast metadata) session. + * For ST41_TYPE_FRAME_LEVEL. + * + * @param handle + * The handle to the tx st2110-41(fast metadata) session. + * @param idx + * The framebuffer index, should be in range [0, framebuff_cnt of st41_tx_ops]. + * @return + * - NULL on error. + * - Otherwise, the framebuffer pointer. + */ +void* st41_tx_get_framebuffer(st41_tx_handle handle, uint16_t idx); + +/** + * Get the mbuf pointer and usrptr of the mbuf from the tx st2110-41(fast metadata) + * session. For ST41_TYPE_RTP_LEVEL. Must call st41_tx_put_mbuf to return the mbuf after + * rtp pack done. + * + * @param handle + * The handle to the tx st2110-41(fast metadata) session. + * @param usrptr + * *usrptr will be point to the user data(rtp) area inside the mbuf. + * @return + * - NULL if no available mbuf in the ring. + * - Otherwise, the dpdk mbuf pointer. + */ +void* st41_tx_get_mbuf(st41_tx_handle handle, void** usrptr); + +/** + * Put back the mbuf which get by st41_tx_get_mbuf to the tx st2110-41(fast metadata) + * session. For ST41_TYPE_RTP_LEVEL. + * + * @param handle + * The handle to the tx st2110-41(fast metadata) session. + * @param mbuf + * the dpdk mbuf pointer by st41_tx_get_mbuf. + * @param len + * the rtp package length, include both header and payload. + * @return + * - 0 if successful. + * - <0: Error code if put fail. + */ +int st41_tx_put_mbuf(st41_tx_handle handle, void* mbuf, uint16_t len); + +/** + * Create one rx st2110-41(fast metadata) session. + * + * @param mt + * The handle to the media transport device context. + * @param ops + * The pointer to the structure describing how to create a rx + * st2110-41(fast metadata) session. + * @return + * - NULL on error. + * - Otherwise, the handle to the rx st2110-41(fast metadata) session. + */ +st41_rx_handle st41_rx_create(mtl_handle mt, struct st41_rx_ops* ops); + +/** + * Online update the source info for the rx st2110-41(fast metadata) session. + * + * @param handle + * The handle to the rx st2110-41(fast metadata) session. + * @param src + * The pointer to the rx st2110-41(fast metadata) source info. + * @return + * - 0: Success, rx st2110-41(fast metadata) session source update succ. + * - <0: Error code of the rx st2110-41(fast metadata) session source update. + */ +int st41_rx_update_source(st41_rx_handle handle, struct st_rx_source_info* src); + +/** + * Free the rx st2110-41(fast metadata) session. + * + * @param handle + * The handle to the rx st2110-41(fast metadata) session. + * @return + * - 0: Success, rx st2110-41(fast metadata) session freed. + * - <0: Error code of the rx st2110-41(fast metadata) session free. + */ +int st41_rx_free(st41_rx_handle handle); + +/** + * Get the mbuf pointer and usrptr of the mbuf from the rx st2110-41(fast metadata) + * session. For ST41_TYPE_RTP_LEVEL. Must call st41_rx_put_mbuf to return the mbuf after + * consume it. + * + * @param handle + * The handle to the tx st2110-41(fast metadata) session. + * @param usrptr + * *usrptr will be point to the user data(rtp) area inside the mbuf. + * @param len + * The length of the rtp packet, include both the header and payload. + * @return + * - NULL if no available mbuf in the ring. + * - Otherwise, the dpdk mbuf pointer. + */ +void* st41_rx_get_mbuf(st41_rx_handle handle, void** usrptr, uint16_t* len); + +/** + * Put back the mbuf which get by st41_rx_get_mbuf to the rx st2110-41(fast metadata) + * session. For ST41_TYPE_RTP_LEVEL. + * + * @param handle + * The handle to the rx st2110-41(fast metadata) session. + * @param mbuf + * the dpdk mbuf pointer by st41_rx_get_mbuf. + */ +void st41_rx_put_mbuf(st41_rx_handle handle, void* mbuf); + +/** + * Get the queue meta attached to rx st2110-41(fast metadata) session. + * + * @param handle + * The handle to the rx st2110-41(fast metadata) session. + * @param meta + * the rx queue meta info. + * @return + * - 0: Success. + * - <0: Error code. + */ +int st41_rx_get_queue_meta(st41_rx_handle handle, struct st_queue_meta* meta); + +#if defined(__cplusplus) +} +#endif + +#endif /* _ST41_API_HEAD_H_ */ diff --git a/include/st_api.h b/include/st_api.h index 0b93d6d64..b1e10203d 100644 --- a/include/st_api.h +++ b/include/st_api.h @@ -229,6 +229,8 @@ struct st_var_info { uint16_t st30_tx_sessions_cnt; /** st40 tx session count */ uint16_t st40_tx_sessions_cnt; + /** st41 tx session count */ + uint16_t st41_tx_sessions_cnt; /** st20 rx session count */ uint16_t st20_rx_sessions_cnt; /** st22 rx session count */ @@ -237,6 +239,8 @@ struct st_var_info { uint16_t st30_rx_sessions_cnt; /** st40 rx session count */ uint16_t st40_rx_sessions_cnt; + /** st41 rx session count */ + uint16_t st41_rx_sessions_cnt; }; /** @@ -364,17 +368,21 @@ static inline uint32_t st10_get_media_clk(enum st10_timestamp_fmt tfmt, * st20 tx sessions count. * @param st30_sessions * st30 tx sessions count. - * @param st30_sessions - * st30 sessions count. + * @param st40_sessions + * st40 sessions count. + * @param st41_sessions + * st41 sessions count. * @return * queues count. */ static inline uint16_t st_tx_sessions_queue_cnt(uint16_t st20_sessions, uint16_t st30_sessions, - uint16_t st40_sessions) { + uint16_t st40_sessions, + uint16_t st41_sessions) { uint16_t queues = st20_sessions; if (st30_sessions) queues++; if (st40_sessions) queues++; + if (st41_sessions) queues++; return queues; } @@ -385,15 +393,18 @@ static inline uint16_t st_tx_sessions_queue_cnt(uint16_t st20_sessions, * st20 tx sessions count. * @param st30_sessions * st30 tx sessions count. - * @param st30_sessions - * st30 sessions count. + * @param st40_sessions + * st40 sessions count. + * @param st41_sessions + * st41 sessions count. * @return * queues count. */ static inline uint16_t st_rx_sessions_queue_cnt(uint16_t st20_sessions, uint16_t st30_sessions, - uint16_t st40_sessions) { - return st20_sessions + st30_sessions + st40_sessions; + uint16_t st40_sessions, + uint16_t st41_sessions) { + return st20_sessions + st30_sessions + st40_sessions + st41_sessions; } #if defined(__cplusplus) diff --git a/lib/src/mt_header.h b/lib/src/mt_header.h index 7030a526e..1b5605d2d 100644 --- a/lib/src/mt_header.h +++ b/lib/src/mt_header.h @@ -30,6 +30,8 @@ enum mt_handle_type { MT_ST20_HANDLE_DEV_CONVERT = 29, MT_ST30_HANDLE_PIPELINE_TX = 30, MT_ST30_HANDLE_PIPELINE_RX = 31, + MT_HANDLE_TX_FMD = 32, + MT_HANDLE_RX_FMD = 33, MT_HANDLE_UDMA = 40, MT_HANDLE_UDP = 41, diff --git a/lib/src/mt_main.c b/lib/src/mt_main.c index f09160323..989b2f127 100644 --- a/lib/src/mt_main.c +++ b/lib/src/mt_main.c @@ -1029,10 +1029,12 @@ int st_get_var_info(mtl_handle mt, struct st_var_info* info) { info->st22_tx_sessions_cnt = rte_atomic32_read(&impl->st22_tx_sessions_cnt); info->st30_tx_sessions_cnt = rte_atomic32_read(&impl->st30_tx_sessions_cnt); info->st40_tx_sessions_cnt = rte_atomic32_read(&impl->st40_tx_sessions_cnt); + info->st41_tx_sessions_cnt = rte_atomic32_read(&impl->st41_tx_sessions_cnt); info->st20_rx_sessions_cnt = rte_atomic32_read(&impl->st20_rx_sessions_cnt); info->st22_rx_sessions_cnt = rte_atomic32_read(&impl->st22_rx_sessions_cnt); info->st30_rx_sessions_cnt = rte_atomic32_read(&impl->st30_rx_sessions_cnt); info->st40_rx_sessions_cnt = rte_atomic32_read(&impl->st40_rx_sessions_cnt); + info->st41_rx_sessions_cnt = rte_atomic32_read(&impl->st41_rx_sessions_cnt); return 0; } diff --git a/lib/src/mt_main.h b/lib/src/mt_main.h index 5be810e89..b17cb41f4 100644 --- a/lib/src/mt_main.h +++ b/lib/src/mt_main.h @@ -556,6 +556,17 @@ struct mtl_sch_impl { bool rx_anc_init; pthread_mutex_t rx_anc_mgr_mutex; /* protect rx_anc_mgr */ + /* one tx fast metadata sessions mgr/transmitter for one sch */ + struct st_tx_fastmetadata_sessions_mgr tx_fmd_mgr; + struct st_fastmetadata_transmitter_impl fmd_trs; + bool tx_fmd_init; + pthread_mutex_t tx_fmd_mgr_mutex; /* protect tx_fmd_mgr */ + + /* one rx fast metadata sessions mgr for one sch */ + struct st_rx_fastmetadata_sessions_mgr rx_fmd_mgr; + bool rx_fmd_init; + pthread_mutex_t rx_fmd_mgr_mutex; /* protect rx_fmd_mgr */ + /* sch sleep info */ bool allow_sleep; pthread_cond_t sleep_wake_cond; @@ -1248,10 +1259,12 @@ struct mtl_main_impl { rte_atomic32_t st22_tx_sessions_cnt; rte_atomic32_t st30_tx_sessions_cnt; rte_atomic32_t st40_tx_sessions_cnt; + rte_atomic32_t st41_tx_sessions_cnt; rte_atomic32_t st20_rx_sessions_cnt; rte_atomic32_t st22_rx_sessions_cnt; rte_atomic32_t st30_rx_sessions_cnt; rte_atomic32_t st40_rx_sessions_cnt; + rte_atomic32_t st41_rx_sessions_cnt; /* active lcore cnt */ rte_atomic32_t lcore_cnt; diff --git a/lib/src/mt_sch.c b/lib/src/mt_sch.c index 55c4fde32..9e55ea472 100644 --- a/lib/src/mt_sch.c +++ b/lib/src/mt_sch.c @@ -12,9 +12,11 @@ #include "mtl_lcore_shm_api.h" #include "st2110/st_rx_ancillary_session.h" #include "st2110/st_rx_audio_session.h" +#include "st2110/st_rx_fastmetadata_session.h" #include "st2110/st_rx_video_session.h" #include "st2110/st_tx_ancillary_session.h" #include "st2110/st_tx_audio_session.h" +#include "st2110/st_tx_fastmetadata_session.h" #include "st2110/st_tx_video_session.h" static inline void sch_mgr_lock(struct mt_sch_mgr* mgr) { @@ -948,6 +950,9 @@ int mt_sch_mrg_init(struct mtl_main_impl* impl, int data_quota_mbs_limit) { /* init mgr lock for anc */ mt_pthread_mutex_init(&sch->tx_anc_mgr_mutex, NULL); mt_pthread_mutex_init(&sch->rx_anc_mgr_mutex, NULL); + /* init mgr lock for fmd */ + mt_pthread_mutex_init(&sch->tx_fmd_mgr_mutex, NULL); + mt_pthread_mutex_init(&sch->rx_fmd_mgr_mutex, NULL); mt_stat_register(impl, sch_stat, sch, "sch"); } @@ -981,6 +986,9 @@ int mt_sch_mrg_uinit(struct mtl_main_impl* impl) { mt_pthread_mutex_destroy(&sch->tx_anc_mgr_mutex); mt_pthread_mutex_destroy(&sch->rx_anc_mgr_mutex); + mt_pthread_mutex_destroy(&sch->tx_fmd_mgr_mutex); + mt_pthread_mutex_destroy(&sch->rx_fmd_mgr_mutex); + mt_pthread_mutex_destroy(&sch->sleep_wake_mutex); mt_pthread_cond_destroy(&sch->sleep_wake_cond); @@ -1056,6 +1064,14 @@ int mt_sch_put(struct mtl_sch_impl* sch, int quota_mbs) { st_rx_ancillary_sessions_sch_uinit(sch); mt_pthread_mutex_unlock(&sch->rx_anc_mgr_mutex); + mt_pthread_mutex_lock(&sch->tx_fmd_mgr_mutex); + st_tx_fastmetadata_sessions_sch_uinit(sch); + mt_pthread_mutex_unlock(&sch->tx_fmd_mgr_mutex); + + mt_pthread_mutex_lock(&sch->rx_fmd_mgr_mutex); + st_rx_fastmetadata_sessions_sch_uinit(sch); + mt_pthread_mutex_unlock(&sch->rx_fmd_mgr_mutex); + sch_free(sch); } diff --git a/lib/src/mt_usdt.h b/lib/src/mt_usdt.h index a59e3da41..27e5c2eea 100644 --- a/lib/src/mt_usdt.h +++ b/lib/src/mt_usdt.h @@ -199,6 +199,17 @@ #define MT_USDT_ST40_RX_MBUF_ENQUEUE_FAIL(m_idx, s_idx, mbuf, tmstamp) \ MT_DTRACE_PROBE4(st40, rx_mbuf_enqueue_fail, m_idx, s_idx, mbuf, tmstamp) +#define MT_USDT_ST41_TX_FRAME_NEXT(m_idx, s_idx, f_idx, va, meta_num, total_udw) \ + MT_DTRACE_PROBE6(st41, tx_frame_next, m_idx, s_idx, f_idx, va, meta_num, total_udw) +#define MT_USDT_ST41_TX_FRAME_DONE(m_idx, s_idx, f_idx, tmstamp) \ + MT_DTRACE_PROBE4(st41, tx_frame_done, m_idx, s_idx, f_idx, tmstamp) +#define MT_USDT_ST41_RX_MBUF_AVAILABLE(m_idx, s_idx, mbuf, tmstamp, data_size) \ + MT_DTRACE_PROBE5(st41, rx_mbuf_available, m_idx, s_idx, mbuf, tmstamp, data_size) +#define MT_USDT_ST41_RX_MBUF_PUT(m_idx, s_idx, mbuf) \ + MT_DTRACE_PROBE3(st41, rx_mbuf_put, m_idx, s_idx, mbuf) +#define MT_USDT_ST41_RX_MBUF_ENQUEUE_FAIL(m_idx, s_idx, mbuf, tmstamp) \ + MT_DTRACE_PROBE4(st41, rx_mbuf_enqueue_fail, m_idx, s_idx, mbuf, tmstamp) + #define MT_USDT_ST22_TX_FRAME_NEXT(m_idx, s_idx, f_idx, va, tmstamp, sz) \ MT_DTRACE_PROBE6(st22, tx_frame_next, m_idx, s_idx, f_idx, va, tmstamp, sz) #define MT_USDT_ST22_TX_FRAME_DONE(m_idx, s_idx, f_idx, tmstamp) \ diff --git a/lib/src/mt_usdt_provider.d b/lib/src/mt_usdt_provider.d index 85b6d8768..027c02a00 100644 --- a/lib/src/mt_usdt_provider.d +++ b/lib/src/mt_usdt_provider.d @@ -64,6 +64,16 @@ provider st40 { probe rx_mbuf_put(int m_idx, int s_idx, void* mbuf); } +provider st41 { + /* tx */ + probe tx_frame_next(int m_idx, int s_idx, int f_idx, void* va, uint32_t meta_num, int total_udw); + probe tx_frame_done(int m_idx, int s_idx, int f_idx, uint32_t tmstamp); + /* rx */ + probe rx_mbuf_available(int m_idx, int s_idx, void* mbuf, uint32_t tmstamp, uint32_t data_size); + probe rx_mbuf_enqueue_fail(int m_idx, int s_idx, void* mbuf, uint32_t tmstamp); + probe rx_mbuf_put(int m_idx, int s_idx, void* mbuf); +} + provider st20p { /* tx */ probe tx_frame_get(int idx, int f_idx, void* va); diff --git a/lib/src/st2110/meson.build b/lib/src/st2110/meson.build index b8c94f614..ee2055646 100644 --- a/lib/src/st2110/meson.build +++ b/lib/src/st2110/meson.build @@ -12,6 +12,9 @@ sources += files( 'st_rx_ancillary_session.c', 'st_ancillary_transmitter.c', 'st_ancillary.c', + 'st_tx_fastmetadata_session.c', + 'st_rx_fastmetadata_session.c', + 'st_fastmetadata_transmitter.c', 'st_avx2.c', 'st_avx512.c', 'st_avx512_vbmi.c', diff --git a/lib/src/st2110/st_fastmetadata_transmitter.c b/lib/src/st2110/st_fastmetadata_transmitter.c new file mode 100644 index 000000000..081116144 --- /dev/null +++ b/lib/src/st2110/st_fastmetadata_transmitter.c @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * SPDX-FileCopyrightText: Copyright (c) 2024 Intel Corporation + */ + +#include "st_fastmetadata_transmitter.h" + +#include "../datapath/mt_queue.h" +#include "../mt_log.h" +#include "st_err.h" +#include "st_tx_fastmetadata_session.h" + +static int st_fastmetadata_trs_tasklet_start(void* priv) { + struct st_fastmetadata_transmitter_impl* trs = priv; + int idx = trs->idx; + struct st_tx_fastmetadata_sessions_mgr* mgr = trs->mgr; + + rte_atomic32_set(&mgr->transmitter_started, 1); + + info("%s(%d), succ\n", __func__, idx); + return 0; +} + +static int st_fastmetadata_trs_tasklet_stop(void* priv) { + struct st_fastmetadata_transmitter_impl* trs = priv; + struct mtl_main_impl* impl = trs->parent; + struct st_tx_fastmetadata_sessions_mgr* mgr = trs->mgr; + int idx = trs->idx, port; + + rte_atomic32_set(&mgr->transmitter_started, 0); + + for (port = 0; port < mt_num_ports(impl); port++) { + /* flush all the pkts in the tx ring desc */ + if (mgr->queue[port]) mt_txq_flush(mgr->queue[port], mt_get_pad(impl, port)); + if (mgr->ring[port]) { + mt_ring_dequeue_clean(mgr->ring[port]); + info("%s(%d), port %d, remaining entries %d\n", __func__, idx, port, + rte_ring_count(mgr->ring[port])); + } + + if (trs->inflight[port]) { + rte_pktmbuf_free(trs->inflight[port]); + trs->inflight[port] = NULL; + } + } + mgr->st41_stat_pkts_burst = 0; + + return 0; +} + +/* pacing handled by session itself */ +static int st_fastmetadata_trs_session_tasklet( + struct st_fastmetadata_transmitter_impl* trs, + struct st_tx_fastmetadata_sessions_mgr* mgr, enum mtl_port port) { + struct rte_ring* ring = mgr->ring[port]; + int ret; + uint16_t n; + struct rte_mbuf* pkt; + + if (!ring) return 0; + + /* check if any inflight pkts in transmitter */ + pkt = trs->inflight[port]; + if (pkt) { + n = mt_txq_burst(mgr->queue[port], &pkt, 1); + if (n >= 1) { + trs->inflight[port] = NULL; + } else { + mgr->stat_trs_ret_code[port] = -STI_TSCTRS_BURST_INFLIGHT_FAIL; + return MTL_TASKLET_HAS_PENDING; + } + mgr->st41_stat_pkts_burst += n; + } + + /* try to dequeue */ + for (int i = 0; i < mgr->max_idx; i++) { + /* try to dequeue */ + ret = rte_ring_sc_dequeue(ring, (void**)&pkt); + if (ret < 0) { + mgr->stat_trs_ret_code[port] = -STI_TSCTRS_DEQUEUE_FAIL; + return MTL_TASKLET_ALL_DONE; /* all done */ + } + + n = mt_txq_burst(mgr->queue[port], &pkt, 1); + mgr->st41_stat_pkts_burst += n; + if (n < 1) { + trs->inflight[port] = pkt; + trs->inflight_cnt[port]++; + mgr->stat_trs_ret_code[port] = -STI_TSCTRS_BURST_INFLIGHT_FAIL; + return MTL_TASKLET_HAS_PENDING; + } + } + + mgr->stat_trs_ret_code[port] = 0; + return MTL_TASKLET_HAS_PENDING; /* may has pending pkt in the ring */ +} + +static int st_fastmetadata_trs_tasklet_handler(void* priv) { + struct st_fastmetadata_transmitter_impl* trs = priv; + struct mtl_main_impl* impl = trs->parent; + struct st_tx_fastmetadata_sessions_mgr* mgr = trs->mgr; + int port; + int pending = MTL_TASKLET_ALL_DONE; + + for (port = 0; port < mt_num_ports(impl); port++) { + pending += st_fastmetadata_trs_session_tasklet(trs, mgr, port); + } + + return pending; +} + +int st_fastmetadata_transmitter_init(struct mtl_main_impl* impl, struct mtl_sch_impl* sch, + struct st_tx_fastmetadata_sessions_mgr* mgr, + struct st_fastmetadata_transmitter_impl* trs) { + int idx = sch->idx; + struct mtl_tasklet_ops ops; + + trs->parent = impl; + trs->idx = idx; + trs->mgr = mgr; + + rte_atomic32_set(&mgr->transmitter_started, 0); + + memset(&ops, 0x0, sizeof(ops)); + ops.priv = trs; + ops.name = "fastmetadata_transmitter"; + ops.start = st_fastmetadata_trs_tasklet_start; + ops.stop = st_fastmetadata_trs_tasklet_stop; + ops.handler = st_fastmetadata_trs_tasklet_handler; + + trs->tasklet = mtl_sch_register_tasklet(sch, &ops); + if (!trs->tasklet) { + err("%s(%d), mtl_sch_register_tasklet fail\n", __func__, idx); + return -EIO; + } + + info("%s(%d), succ\n", __func__, idx); + return 0; +} + +int st_fastmetadata_transmitter_uinit(struct st_fastmetadata_transmitter_impl* trs) { + int idx = trs->idx; + + if (trs->tasklet) { + mtl_sch_unregister_tasklet(trs->tasklet); + trs->tasklet = NULL; + } + + for (int i = 0; i < mt_num_ports(trs->parent); i++) { + info("%s(%d), succ, inflight %d:%d\n", __func__, idx, i, trs->inflight_cnt[i]); + } + return 0; +} diff --git a/lib/src/st2110/st_fastmetadata_transmitter.h b/lib/src/st2110/st_fastmetadata_transmitter.h new file mode 100644 index 000000000..78eb2991b --- /dev/null +++ b/lib/src/st2110/st_fastmetadata_transmitter.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * SPDX-FileCopyrightText: Copyright (c) 2024 Intel Corporation + */ + +#ifndef _ST_LIB_FASTMETADATA_TRANSMITTER_HEAD_H_ +#define _ST_LIB_FASTMETADATA_TRANSMITTER_HEAD_H_ + +#include "st_main.h" + +int st_fastmetadata_transmitter_init(struct mtl_main_impl* impl, struct mtl_sch_impl* sch, + struct st_tx_fastmetadata_sessions_mgr* mgr, + struct st_fastmetadata_transmitter_impl* trs); +int st_fastmetadata_transmitter_uinit(struct st_fastmetadata_transmitter_impl* trs); + +#endif diff --git a/lib/src/st2110/st_header.h b/lib/src/st2110/st_header.h index 4042c0abe..ba63ec846 100644 --- a/lib/src/st2110/st_header.h +++ b/lib/src/st2110/st_header.h @@ -9,6 +9,7 @@ #include "st20_api.h" #include "st30_api.h" #include "st40_api.h" +#include "st41_api.h" #include "st_convert.h" #include "st_fmt.h" #include "st_pipeline_api.h" @@ -43,16 +44,21 @@ /* data size for each pkt in block packing mode */ #define ST_VIDEO_BPM_SIZE (1260) -/* max tx/rx audio(st_30) sessions */ +/* max tx/rx audio(st30) sessions */ #define ST_SCH_MAX_TX_AUDIO_SESSIONS (512) /* max audio tx sessions per sch lcore */ #define ST_TX_AUDIO_SESSIONS_RING_SIZE (ST_SCH_MAX_TX_AUDIO_SESSIONS * 2) #define ST_SCH_MAX_RX_AUDIO_SESSIONS (512 * 2) /* max audio rx sessions per sch lcore */ -/* max tx/rx anc(st_40) sessions */ +/* max tx/rx anc(st40) sessions */ #define ST_MAX_TX_ANC_SESSIONS (180) #define ST_TX_ANC_SESSIONS_RING_SIZE (512) #define ST_MAX_RX_ANC_SESSIONS (180) +/* max tx/rx fmd(st41) sessions */ +#define ST_MAX_TX_FMD_SESSIONS (180) +#define ST_TX_FMD_SESSIONS_RING_SIZE (512) +#define ST_MAX_RX_FMD_SESSIONS (180) + /* max dl plugin lib number */ #define ST_MAX_DL_PLUGINS (8) /* max encoder devices number */ @@ -91,6 +97,12 @@ enum st40_tx_frame_status { ST40_TX_STAT_SENDING_PKTS, }; +enum st41_tx_frame_status { + ST41_TX_STAT_UNKNOWN = 0, + ST41_TX_STAT_WAIT_FRAME, + ST41_TX_STAT_SENDING_PKTS, +}; + struct st_tx_muf_priv_data { uint64_t tsc_time_stamp; /* tsc time stamp of current mbuf */ uint64_t ptp_time_stamp; /* ptp time stamp of current mbuf */ @@ -144,6 +156,7 @@ struct st_frame_trans { struct st30_tx_frame_meta ta_meta; struct st30_rx_frame_meta ra_meta; /* not use now */ struct st40_tx_frame_meta tc_meta; + struct st41_tx_frame_meta tf_meta; }; }; @@ -1205,6 +1218,175 @@ struct st_ancillary_transmitter_impl { int inflight_cnt[MTL_PORT_MAX]; /* for stats */ }; +struct st_tx_fastmetadata_session_pacing { + double frame_time; /* time of the frame in nanoseconds */ + double frame_time_sampling; /* time of the frame in sampling(90k) */ + uint64_t cur_epochs; /* epoch of current frame */ + /* timestamp for rtp header */ + uint32_t rtp_time_stamp; + /* timestamp for pacing */ + uint32_t pacing_time_stamp; + uint64_t cur_epoch_time; + double tsc_time_cursor; /* in ns, tsc time cursor for packet pacing */ + /* ptp time may onward */ + uint32_t max_onward_epochs; +}; + +struct st_tx_fastmetadata_session_impl { + int idx; /* index for current session */ + int socket_id; + struct st_tx_fastmetadata_sessions_mgr* mgr; + struct st41_tx_ops ops; + char ops_name[ST_MAX_NAME_LEN]; + + enum mtl_port port_maps[MTL_SESSION_PORT_MAX]; + struct rte_mempool* mbuf_mempool_hdr[MTL_SESSION_PORT_MAX]; + struct rte_mempool* mbuf_mempool_chain; + bool tx_mono_pool; /* if reuse tx mono pool */ + bool tx_no_chain; /* if tx not use chain mbuf */ + /* if the eth dev support chain buff */ + bool eth_has_chain[MTL_SESSION_PORT_MAX]; + /* if the eth dev support ipv4 checksum offload */ + bool eth_ipv4_cksum_offload[MTL_SESSION_PORT_MAX]; + struct rte_mbuf* inflight[MTL_SESSION_PORT_MAX]; + int inflight_cnt[MTL_SESSION_PORT_MAX]; /* for stats */ + struct rte_ring* packet_ring; + bool second_field; + + /* dedicated queue tx mode */ + struct mt_txq_entry* queue[MTL_SESSION_PORT_MAX]; + bool shared_queue; + + uint32_t max_pkt_len; /* max data len(byte) for each pkt */ + + uint16_t st41_frames_cnt; /* numbers of frames requested */ + struct st_frame_trans* st41_frames; + uint16_t st41_frame_idx; /* current frame index */ + enum st41_tx_frame_status st41_frame_stat; + + uint16_t st41_src_port[MTL_SESSION_PORT_MAX]; /* udp port */ + uint16_t st41_dst_port[MTL_SESSION_PORT_MAX]; /* udp port */ + struct st41_fmd_hdr hdr[MTL_SESSION_PORT_MAX]; + + struct st_tx_fastmetadata_session_pacing pacing; + bool calculate_time_cursor; + bool check_frame_done_time; + struct st_fps_timing fps_tm; + + uint16_t st41_seq_id; /* seq id for each pkt */ + int st41_total_pkts; /* total pkts in one frame */ + int st41_pkt_idx; /* pkt index in current frame */ + int st41_rtp_time; /* record rtp time */ + + int stat_build_ret_code; + + struct mt_rtcp_tx* rtcp_tx[MTL_SESSION_PORT_MAX]; + + /* stat */ + rte_atomic32_t st41_stat_frame_cnt; + int st41_stat_pkt_cnt[MTL_SESSION_PORT_MAX]; + /* count of frame not match the epoch */ + uint32_t stat_epoch_mismatch; + uint32_t stat_epoch_drop; + uint32_t stat_epoch_onward; + uint32_t stat_error_user_timestamp; + uint32_t stat_exceed_frame_time; + uint64_t stat_last_time; + uint32_t stat_max_next_frame_us; + uint32_t stat_max_notify_frame_us; + /* for tasklet session time measure */ + struct mt_stat_u64 stat_time; + /* interlace */ + uint32_t stat_interlace_first_field; + uint32_t stat_interlace_second_field; +}; + +struct st_tx_fastmetadata_sessions_mgr { + struct mtl_main_impl* parent; + int socket_id; + int idx; /* index for current sessions mgr */ + int max_idx; /* max session index */ + struct mt_sch_tasklet_impl* tasklet; + + /* all fmd sessions share same ring/queue */ + struct rte_ring* ring[MTL_PORT_MAX]; + struct mt_txq_entry* queue[MTL_PORT_MAX]; + + struct st_tx_fastmetadata_session_impl* sessions[ST_MAX_TX_FMD_SESSIONS]; + /* protect session, spin(fast) lock as it call from tasklet aslo */ + rte_spinlock_t mutex[ST_MAX_TX_FMD_SESSIONS]; + + rte_atomic32_t transmitter_started; + rte_atomic32_t transmitter_clients; + + /* status */ + int st41_stat_pkts_burst; + + int stat_trs_ret_code[MTL_PORT_MAX]; +}; + +struct st_rx_fastmetadata_session_impl { + int idx; /* index for current session */ + int socket_id; + struct st_rx_fastmetadata_sessions_mgr* mgr; + bool attached; + struct st41_rx_ops ops; + char ops_name[ST_MAX_NAME_LEN]; + struct st_rx_session_priv priv[MTL_SESSION_PORT_MAX]; + struct st_rx_fastmetadata_session_handle_impl* st41_handle; + + enum mtl_port port_maps[MTL_SESSION_PORT_MAX]; + struct mt_rxq_entry* rxq[MTL_SESSION_PORT_MAX]; + struct rte_ring* packet_ring; + + uint16_t st41_dst_port[MTL_SESSION_PORT_MAX]; /* udp port */ + bool mcast_joined[MTL_SESSION_PORT_MAX]; + + int latest_seq_id; /* latest seq id */ + + struct mt_rtcp_rx* rtcp_rx[MTL_SESSION_PORT_MAX]; + + uint32_t tmstamp; + /* status */ + rte_atomic32_t st41_stat_frames_received; + int st41_stat_pkts_dropped; + int st41_stat_pkts_redundant; + int st41_stat_pkts_out_of_order; + int st41_stat_pkts_enqueue_fail; + int st41_stat_pkts_wrong_pt_dropped; + int st41_stat_pkts_wrong_ssrc_dropped; + int st41_stat_pkts_received; + uint64_t st41_stat_last_time; + uint32_t stat_max_notify_rtp_us; + /* for tasklet session time measure */ + struct mt_stat_u64 stat_time; + /* for interlace */ + uint32_t stat_interlace_first_field; + uint32_t stat_interlace_second_field; + int stat_pkts_wrong_interlace_dropped; +}; + +struct st_rx_fastmetadata_sessions_mgr { + struct mtl_main_impl* parent; + int idx; /* index for current session mgr */ + int max_idx; /* max session index */ + struct mt_sch_tasklet_impl* tasklet; + + struct st_rx_fastmetadata_session_impl* sessions[ST_MAX_RX_FMD_SESSIONS]; + /* protect session, spin(fast) lock as it call from tasklet aslo */ + rte_spinlock_t mutex[ST_MAX_RX_FMD_SESSIONS]; +}; + +struct st_fastmetadata_transmitter_impl { + struct mtl_main_impl* parent; + struct st_tx_fastmetadata_sessions_mgr* mgr; + struct mt_sch_tasklet_impl* tasklet; + int idx; /* index for current transmitter */ + + struct rte_mbuf* inflight[MTL_PORT_MAX]; /* inflight mbuf */ + int inflight_cnt[MTL_PORT_MAX]; /* for stats */ +}; + struct st22_get_encoder_request { enum st_plugin_device device; struct st22_encoder_create_req req; @@ -1350,6 +1532,14 @@ struct st_tx_ancillary_session_handle_impl { struct st_tx_ancillary_session_impl* impl; }; +struct st_tx_fastmetadata_session_handle_impl { + struct mtl_main_impl* parent; + enum mt_handle_type type; + struct mtl_sch_impl* sch; /* the sch this session attached */ + int quota_mbs; /* data quota for this session */ + struct st_tx_fastmetadata_session_impl* impl; +}; + struct st_rx_video_session_handle_impl { struct mtl_main_impl* parent; enum mt_handle_type type; @@ -1382,6 +1572,14 @@ struct st_rx_ancillary_session_handle_impl { struct st_rx_ancillary_session_impl* impl; }; +struct st_rx_fastmetadata_session_handle_impl { + struct mtl_main_impl* parent; + enum mt_handle_type type; + struct mtl_sch_impl* sch; /* the sch this session attached */ + int quota_mbs; /* data quota for this session */ + struct st_rx_fastmetadata_session_impl* impl; +}; + static inline bool st20_is_frame_type(enum st20_type type) { if ((type == ST20_TYPE_FRAME_LEVEL) || (type == ST20_TYPE_SLICE_LEVEL)) return true; diff --git a/lib/src/st2110/st_pkt.h b/lib/src/st2110/st_pkt.h index deffabc0b..a5bfa06fe 100644 --- a/lib/src/st2110/st_pkt.h +++ b/lib/src/st2110/st_pkt.h @@ -12,6 +12,7 @@ #define ST_RVRTP_PAYLOAD_TYPE_RAW_VIDEO (112) #define ST_RARTP_PAYLOAD_TYPE_PCM_AUDIO (111) #define ST_RANCRTP_PAYLOAD_TYPE_ANCILLARY (113) +#define ST_RFMDRTP_PAYLOAD_TYPE_FASTMETADATA (115) #define ST_TP_CINST_DRAIN_FACTOR (1.1f) /* Drain factor */ @@ -60,6 +61,14 @@ struct st_rfc8331_anc_hdr { struct st40_rfc8331_rtp_hdr rtp; /* size: 20 */ } __attribute__((__packed__)) __rte_aligned(2); +/* total size: 58 */ +struct st41_fmd_hdr { + struct rte_ether_hdr eth; /* size: 14 */ + struct rte_ipv4_hdr ipv4; /* size: 20 */ + struct rte_udp_hdr udp; /* size: 8 */ + struct st41_rtp_hdr rtp; /* size: 16 */ +} __attribute__((__packed__)) __rte_aligned(2); + #define ST_PKT_VIDEO_HDR_LEN \ (sizeof(struct st_rfc4175_video_hdr) - sizeof(struct rte_ether_hdr)) @@ -72,6 +81,8 @@ struct st_rfc8331_anc_hdr { #define ST_PKT_ANC_HDR_LEN \ (sizeof(struct st_rfc8331_anc_hdr) - sizeof(struct rte_ether_hdr)) +#define ST_PKT_FMD_HDR_LEN (sizeof(struct st41_fmd_hdr) - sizeof(struct rte_ether_hdr)) + /* standard UDP is 1460 bytes */ #define ST_PKT_MAX_ETHER_BYTES \ (1460 + sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr)) diff --git a/lib/src/st2110/st_rx_fastmetadata_session.c b/lib/src/st2110/st_rx_fastmetadata_session.c new file mode 100644 index 000000000..e915e3a09 --- /dev/null +++ b/lib/src/st2110/st_rx_fastmetadata_session.c @@ -0,0 +1,973 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * SPDX-FileCopyrightText: Copyright (c) 2024 Intel Corporation + */ + +#include "st_rx_fastmetadata_session.h" + +#include "../datapath/mt_queue.h" +#include "../mt_log.h" +#include "../mt_stat.h" +#include "st_fastmetadata_transmitter.h" + +/* call rx_fastmetadata_session_put always if get successfully */ +static inline struct st_rx_fastmetadata_session_impl* rx_fastmetadata_session_get( + struct st_rx_fastmetadata_sessions_mgr* mgr, int idx) { + rte_spinlock_lock(&mgr->mutex[idx]); + struct st_rx_fastmetadata_session_impl* s = mgr->sessions[idx]; + if (!s) rte_spinlock_unlock(&mgr->mutex[idx]); + return s; +} + +/* call rx_fastmetadata_session_put always if get successfully */ +static inline struct st_rx_fastmetadata_session_impl* rx_fastmetadata_session_try_get( + struct st_rx_fastmetadata_sessions_mgr* mgr, int idx) { + if (!rte_spinlock_trylock(&mgr->mutex[idx])) return NULL; + struct st_rx_fastmetadata_session_impl* s = mgr->sessions[idx]; + if (!s) rte_spinlock_unlock(&mgr->mutex[idx]); + return s; +} + +/* call rx_fastmetadata_session_put always if get successfully */ +static inline struct st_rx_fastmetadata_session_impl* rx_fastmetadata_session_get_timeout( + struct st_rx_fastmetadata_sessions_mgr* mgr, int idx, int timeout_us) { + if (!mt_spinlock_lock_timeout(mgr->parent, &mgr->mutex[idx], timeout_us)) return NULL; + struct st_rx_fastmetadata_session_impl* s = mgr->sessions[idx]; + if (!s) rte_spinlock_unlock(&mgr->mutex[idx]); + return s; +} + +/* call rx_fastmetadata_session_put always if get successfully */ +static inline bool rx_fastmetadata_session_get_empty( + struct st_rx_fastmetadata_sessions_mgr* mgr, int idx) { + rte_spinlock_lock(&mgr->mutex[idx]); + struct st_rx_fastmetadata_session_impl* s = mgr->sessions[idx]; + if (s) { + rte_spinlock_unlock(&mgr->mutex[idx]); /* not null, unlock it */ + return false; + } else { + return true; + } +} + +static inline void rx_fastmetadata_session_put( + struct st_rx_fastmetadata_sessions_mgr* mgr, int idx) { + rte_spinlock_unlock(&mgr->mutex[idx]); +} + +static inline uint16_t rx_fastmetadata_queue_id(struct st_rx_fastmetadata_session_impl* s, + enum mtl_session_port s_port) { + return mt_rxq_queue_id(s->rxq[s_port]); +} + +static int rx_fastmetadata_session_init(struct st_rx_fastmetadata_sessions_mgr* mgr, + struct st_rx_fastmetadata_session_impl* s, + int idx) { + MTL_MAY_UNUSED(mgr); + s->idx = idx; + return 0; +} + +static int rx_fastmetadata_sessions_tasklet_start(void* priv) { + struct st_rx_fastmetadata_sessions_mgr* mgr = priv; + int idx = mgr->idx; + + info("%s(%d), succ\n", __func__, idx); + return 0; +} + +static int rx_fastmetadata_sessions_tasklet_stop(void* priv) { + struct st_rx_fastmetadata_sessions_mgr* mgr = priv; + int idx = mgr->idx; + + info("%s(%d), succ\n", __func__, idx); + return 0; +} + +static int rx_fastmetadata_session_handle_pkt(struct mtl_main_impl* impl, + struct st_rx_fastmetadata_session_impl* s, + struct rte_mbuf* mbuf, + enum mtl_session_port s_port) { + struct st41_rx_ops* ops = &s->ops; + size_t hdr_offset = sizeof(struct st_rfc3550_hdr) - sizeof(struct st_rfc3550_rtp_hdr); + struct st_rfc3550_rtp_hdr* rtp = + rte_pktmbuf_mtod_offset(mbuf, struct st_rfc3550_rtp_hdr*, hdr_offset); + uint16_t seq_id = ntohs(rtp->seq_number); + uint8_t payload_type = rtp->payload_type; + MTL_MAY_UNUSED(s_port); + uint32_t pkt_len = mbuf->data_len - sizeof(struct st41_rtp_hdr); + MTL_MAY_UNUSED(pkt_len); + uint32_t tmstamp = ntohl(rtp->tmstamp); + + if (ops->payload_type && (payload_type != ops->payload_type)) { + dbg("%s(%d,%d), get payload_type %u but expect %u\n", __func__, s->idx, s_port, + payload_type, ops->payload_type); + s->st41_stat_pkts_wrong_pt_dropped++; + return -EINVAL; + } + if (ops->ssrc) { + uint32_t ssrc = ntohl(rtp->ssrc); + if (ssrc != ops->ssrc) { + dbg("%s(%d,%d), get ssrc %u but expect %u\n", __func__, s->idx, s_port, ssrc, + ops->ssrc); + s->st41_stat_pkts_wrong_ssrc_dropped++; + return -EINVAL; + } + } + + /* set if it is first pkt */ + if (unlikely(s->latest_seq_id == -1)) s->latest_seq_id = seq_id - 1; + /* drop old packet */ + if (st_rx_seq_drop(seq_id, s->latest_seq_id, 5)) { + dbg("%s(%d,%d), drop as pkt seq %d is old\n", __func__, s->idx, s_port, seq_id); + s->st41_stat_pkts_redundant++; + return 0; + } + if (seq_id != (uint16_t)(s->latest_seq_id + 1)) { + s->st41_stat_pkts_out_of_order++; + } + /* update seq id */ + s->latest_seq_id = seq_id; + + /* enqueue to packet ring to let app to handle */ + int ret = rte_ring_sp_enqueue(s->packet_ring, (void*)mbuf); + if (ret < 0) { + err("%s(%d), can not enqueue to the rte ring, packet drop, pkt seq %d\n", __func__, + s->idx, seq_id); + s->st41_stat_pkts_enqueue_fail++; + MT_USDT_ST41_RX_MBUF_ENQUEUE_FAIL(s->mgr->idx, s->idx, mbuf, tmstamp); + return 0; + } + rte_mbuf_refcnt_update(mbuf, 1); /* free when app put */ + + if (tmstamp != s->tmstamp) { + rte_atomic32_inc(&s->st41_stat_frames_received); + s->tmstamp = tmstamp; + } + s->st41_stat_pkts_received++; + + /* get a valid packet */ + uint64_t tsc_start = 0; + bool time_measure = mt_sessions_time_measure(impl); + if (time_measure) tsc_start = mt_get_tsc(impl); + ops->notify_rtp_ready(ops->priv); + if (time_measure) { + uint32_t delta_us = (mt_get_tsc(impl) - tsc_start) / NS_PER_US; + s->stat_max_notify_rtp_us = RTE_MAX(s->stat_max_notify_rtp_us, delta_us); + } + + MT_USDT_ST41_RX_MBUF_AVAILABLE(s->mgr->idx, s->idx, mbuf, tmstamp, pkt_len); + return 0; +} + +static int rx_fastmetadata_session_handle_mbuf(void* priv, struct rte_mbuf** mbuf, + uint16_t nb) { + struct st_rx_session_priv* s_priv = priv; + struct st_rx_fastmetadata_session_impl* s = s_priv->session; + struct mtl_main_impl* impl = s_priv->impl; + enum mtl_session_port s_port = s_priv->s_port; + + if (!s->attached) { + dbg("%s(%d,%d), session not ready\n", __func__, s->idx, s_port); + return -EIO; + } + + for (uint16_t i = 0; i < nb; i++) + rx_fastmetadata_session_handle_pkt(impl, s, mbuf[i], s_port); + + return 0; +} + +static int rx_fastmetadata_session_tasklet(struct st_rx_fastmetadata_session_impl* s) { + struct rte_mbuf* mbuf[ST_RX_FASTMETADATA_BURST_SIZE]; + uint16_t rv; + int num_port = s->ops.num_port; + bool done = true; + + for (int s_port = 0; s_port < num_port; s_port++) { + if (!s->rxq[s_port]) continue; + + rv = mt_rxq_burst(s->rxq[s_port], &mbuf[0], ST_RX_FASTMETADATA_BURST_SIZE); + if (rv) { + rx_fastmetadata_session_handle_mbuf(&s->priv[s_port], &mbuf[0], rv); + rte_pktmbuf_free_bulk(&mbuf[0], rv); + } + + if (rv) done = false; + } + + return done ? MTL_TASKLET_ALL_DONE : MTL_TASKLET_HAS_PENDING; +} + +static int rx_fastmetadata_sessions_tasklet_handler(void* priv) { + struct st_rx_fastmetadata_sessions_mgr* mgr = priv; + struct mtl_main_impl* impl = mgr->parent; + struct st_rx_fastmetadata_session_impl* s; + int pending = MTL_TASKLET_ALL_DONE; + uint64_t tsc_s = 0; + bool time_measure = mt_sessions_time_measure(impl); + + for (int sidx = 0; sidx < mgr->max_idx; sidx++) { + s = rx_fastmetadata_session_try_get(mgr, sidx); + if (!s) continue; + if (time_measure) tsc_s = mt_get_tsc(impl); + + pending += rx_fastmetadata_session_tasklet(s); + + if (time_measure) { + uint64_t delta_ns = mt_get_tsc(impl) - tsc_s; + mt_stat_u64_update(&s->stat_time, delta_ns); + } + rx_fastmetadata_session_put(mgr, sidx); + } + + return pending; +} + +static int rx_fastmetadata_session_uinit_hw(struct st_rx_fastmetadata_session_impl* s) { + int num_port = s->ops.num_port; + + for (int i = 0; i < num_port; i++) { + if (s->rxq[i]) { + mt_rxq_put(s->rxq[i]); + s->rxq[i] = NULL; + } + } + + return 0; +} + +static int rx_fastmetadata_session_init_hw(struct mtl_main_impl* impl, + struct st_rx_fastmetadata_session_impl* s) { + int idx = s->idx, num_port = s->ops.num_port; + struct mt_rxq_flow flow; + enum mtl_port port; + + for (int i = 0; i < num_port; i++) { + port = mt_port_logic2phy(s->port_maps, i); + + s->priv[i].session = s; + s->priv[i].impl = impl; + s->priv[i].s_port = i; + + memset(&flow, 0, sizeof(flow)); + rte_memcpy(flow.dip_addr, s->ops.ip_addr[i], MTL_IP_ADDR_LEN); + if (mt_is_multicast_ip(flow.dip_addr)) + rte_memcpy(flow.sip_addr, s->ops.mcast_sip_addr[i], MTL_IP_ADDR_LEN); + else + rte_memcpy(flow.sip_addr, mt_sip_addr(impl, port), MTL_IP_ADDR_LEN); + flow.dst_port = s->st41_dst_port[i]; + if (mt_has_cni_rx(impl, port)) flow.flags |= MT_RXQ_FLOW_F_FORCE_CNI; + + /* no flow for data path only */ + if (s->ops.flags & ST41_RX_FLAG_DATA_PATH_ONLY) { + info("%s(%d), rxq get without flow for port %d as data path only\n", __func__, + s->idx, i); + s->rxq[i] = mt_rxq_get(impl, port, NULL); + } else { + s->rxq[i] = mt_rxq_get(impl, port, &flow); + } + if (!s->rxq[i]) { + rx_fastmetadata_session_uinit_hw(s); + return -EIO; + } + + info("%s(%d), port(l:%d,p:%d), queue %d udp %d\n", __func__, idx, i, port, + rx_fastmetadata_queue_id(s, i), flow.dst_port); + } + + return 0; +} + +static int rx_fastmetadata_session_uinit_mcast( + struct mtl_main_impl* impl, struct st_rx_fastmetadata_session_impl* s) { + struct st41_rx_ops* ops = &s->ops; + enum mtl_port port; + + for (int i = 0; i < ops->num_port; i++) { + if (!s->mcast_joined[i]) continue; + port = mt_port_logic2phy(s->port_maps, i); + mt_mcast_leave(impl, mt_ip_to_u32(ops->ip_addr[i]), + mt_ip_to_u32(ops->mcast_sip_addr[i]), port); + } + + return 0; +} + +static int rx_fastmetadata_session_init_mcast(struct mtl_main_impl* impl, + struct st_rx_fastmetadata_session_impl* s) { + struct st41_rx_ops* ops = &s->ops; + int ret; + enum mtl_port port; + + for (int i = 0; i < ops->num_port; i++) { + if (!mt_is_multicast_ip(ops->ip_addr[i])) continue; + port = mt_port_logic2phy(s->port_maps, i); + if (ops->flags & ST20_RX_FLAG_DATA_PATH_ONLY) { + info("%s(%d), skip mcast join for port %d\n", __func__, s->idx, i); + return 0; + } + ret = mt_mcast_join(impl, mt_ip_to_u32(ops->ip_addr[i]), + mt_ip_to_u32(ops->mcast_sip_addr[i]), port); + if (ret < 0) return ret; + s->mcast_joined[i] = true; + } + + return 0; +} + +static int rx_fastmetadata_session_init_sw(struct st_rx_fastmetadata_sessions_mgr* mgr, + struct st_rx_fastmetadata_session_impl* s) { + char ring_name[32]; + struct rte_ring* ring; + unsigned int flags, count; + int mgr_idx = mgr->idx, idx = s->idx; + + snprintf(ring_name, 32, "%sM%dS%d_PKT", ST_RX_FASTMETADATA_PREFIX, mgr_idx, idx); + flags = RING_F_SP_ENQ | RING_F_SC_DEQ; /* single-producer and single-consumer */ + count = s->ops.rtp_ring_size; + ring = rte_ring_create(ring_name, count, s->socket_id, flags); + if (count <= 0) { + err("%s(%d,%d), invalid rtp_ring_size %d\n", __func__, mgr_idx, idx, count); + return -ENOMEM; + } + if (!ring) { + err("%s(%d,%d), rte_ring_create fail\n", __func__, mgr_idx, idx); + return -ENOMEM; + } + s->packet_ring = ring; + info("%s(%d,%d), rtp_ring_size %d\n", __func__, mgr_idx, idx, count); + return 0; +} + +static int rx_fastmetadata_session_uinit_sw(struct st_rx_fastmetadata_session_impl* s) { + if (s->packet_ring) { + mt_ring_dequeue_clean(s->packet_ring); + rte_ring_free(s->packet_ring); + s->packet_ring = NULL; + } + + return 0; +} + +static int rx_fastmetadata_session_uinit(struct mtl_main_impl* impl, + struct st_rx_fastmetadata_session_impl* s) { + rx_fastmetadata_session_uinit_mcast(impl, s); + rx_fastmetadata_session_uinit_sw(s); + rx_fastmetadata_session_uinit_hw(s); + return 0; +} + +static int rx_fastmetadata_session_attach(struct mtl_main_impl* impl, + struct st_rx_fastmetadata_sessions_mgr* mgr, + struct st_rx_fastmetadata_session_impl* s, + struct st41_rx_ops* ops) { + int ret; + int idx = s->idx, num_port = ops->num_port; + char* ports[MTL_SESSION_PORT_MAX]; + + for (int i = 0; i < num_port; i++) ports[i] = ops->port[i]; + ret = mt_build_port_map(impl, ports, s->port_maps, num_port); + if (ret < 0) return ret; + + s->mgr = mgr; + if (ops->name) { + snprintf(s->ops_name, sizeof(s->ops_name), "%s", ops->name); + } else { + snprintf(s->ops_name, sizeof(s->ops_name), "RX_FMD_M%dS%d", mgr->idx, idx); + } + s->ops = *ops; + for (int i = 0; i < num_port; i++) { + s->st41_dst_port[i] = (ops->udp_port[i]) ? (ops->udp_port[i]) : (30000 + idx * 2); + } + + s->latest_seq_id = -1; + s->st41_stat_pkts_received = 0; + s->st41_stat_pkts_dropped = 0; + s->st41_stat_last_time = mt_get_monotonic_time(); + rte_atomic32_set(&s->st41_stat_frames_received, 0); + mt_stat_u64_init(&s->stat_time); + + ret = rx_fastmetadata_session_init_hw(impl, s); + if (ret < 0) { + err("%s(%d), rx_audio_session_init_hw fail %d\n", __func__, idx, ret); + rx_fastmetadata_session_uinit(impl, s); + return ret; + } + + ret = rx_fastmetadata_session_init_sw(mgr, s); + if (ret < 0) { + err("%s(%d), rx_fastmetadata_session_init_rtps fail %d\n", __func__, idx, ret); + rx_fastmetadata_session_uinit(impl, s); + return ret; + } + + ret = rx_fastmetadata_session_init_mcast(impl, s); + if (ret < 0) { + err("%s(%d), rx_fastmetadata_session_init_mcast fail %d\n", __func__, idx, ret); + rx_fastmetadata_session_uinit(impl, s); + return -EIO; + } + + s->attached = true; + info("%s(%d), flags 0x%x pt %u, %s\n", __func__, idx, ops->flags, ops->payload_type, + ops->interlaced ? "interlace" : "progressive"); + return 0; +} + +static void rx_fastmetadata_session_stat(struct st_rx_fastmetadata_session_impl* s) { + int idx = s->idx; + uint64_t cur_time_ns = mt_get_monotonic_time(); + double time_sec = (double)(cur_time_ns - s->st41_stat_last_time) / NS_PER_S; + int frames_received = rte_atomic32_read(&s->st41_stat_frames_received); + double framerate = frames_received / time_sec; + + rte_atomic32_set(&s->st41_stat_frames_received, 0); + + notice("RX_FMD_SESSION(%d:%s): fps %f frames %d pkts %d\n", idx, s->ops_name, framerate, + frames_received, s->st41_stat_pkts_received); + s->st41_stat_pkts_received = 0; + s->st41_stat_last_time = cur_time_ns; + + if (s->st41_stat_pkts_redundant) { + notice("RX_FMD_SESSION(%d): redundant pkts %d\n", idx, s->st41_stat_pkts_redundant); + s->st41_stat_pkts_redundant = 0; + } + if (s->st41_stat_pkts_dropped) { + notice("RX_FMD_SESSION(%d): dropped pkts %d\n", idx, s->st41_stat_pkts_dropped); + s->st41_stat_pkts_dropped = 0; + } + if (s->st41_stat_pkts_out_of_order) { + warn("RX_FMD_SESSION(%d): out of order pkts %d\n", idx, + s->st41_stat_pkts_out_of_order); + s->st41_stat_pkts_out_of_order = 0; + } + if (s->st41_stat_pkts_wrong_pt_dropped) { + notice("RX_FMD_SESSION(%d): wrong hdr payload_type dropped pkts %d\n", idx, + s->st41_stat_pkts_wrong_pt_dropped); + s->st41_stat_pkts_wrong_pt_dropped = 0; + } + if (s->st41_stat_pkts_wrong_ssrc_dropped) { + notice("RX_FMD_SESSION(%d): wrong hdr ssrc dropped pkts %d\n", idx, + s->st41_stat_pkts_wrong_ssrc_dropped); + s->st41_stat_pkts_wrong_ssrc_dropped = 0; + } + if (s->stat_pkts_wrong_interlace_dropped) { + notice("RX_FMD_SESSION(%d): wrong hdr interlace dropped pkts %d\n", idx, + s->stat_pkts_wrong_interlace_dropped); + s->stat_pkts_wrong_interlace_dropped = 0; + } + if (s->st41_stat_pkts_enqueue_fail) { + notice("RX_FMD_SESSION(%d): enqueue failed pkts %d\n", idx, + s->st41_stat_pkts_enqueue_fail); + s->st41_stat_pkts_enqueue_fail = 0; + } + if (s->ops.interlaced) { + notice("RX_FMD_SESSION(%d): interlace first field %u second field %u\n", idx, + s->stat_interlace_first_field, s->stat_interlace_second_field); + s->stat_interlace_first_field = 0; + s->stat_interlace_second_field = 0; + } + + struct mt_stat_u64* stat_time = &s->stat_time; + if (stat_time->cnt) { + uint64_t avg_ns = stat_time->sum / stat_time->cnt; + notice("RX_FMD_SESSION(%d): tasklet time avg %.2fus max %.2fus min %.2fus\n", idx, + (float)avg_ns / NS_PER_US, (float)stat_time->max / NS_PER_US, + (float)stat_time->min / NS_PER_US); + mt_stat_u64_init(stat_time); + } + if (s->stat_max_notify_rtp_us > 8) { + notice("RX_FMD_SESSION(%d): notify rtp max %uus\n", idx, s->stat_max_notify_rtp_us); + } + s->stat_max_notify_rtp_us = 0; +} + +static int rx_fastmetadata_session_detach(struct mtl_main_impl* impl, + struct st_rx_fastmetadata_session_impl* s) { + s->attached = false; + rx_fastmetadata_session_stat(s); + rx_fastmetadata_session_uinit(impl, s); + return 0; +} + +static int rx_fastmetadata_session_update_src(struct mtl_main_impl* impl, + struct st_rx_fastmetadata_session_impl* s, + struct st_rx_source_info* src) { + int ret = -EIO; + int idx = s->idx, num_port = s->ops.num_port; + struct st41_rx_ops* ops = &s->ops; + + rx_fastmetadata_session_uinit_mcast(impl, s); + rx_fastmetadata_session_uinit_hw(s); + + /* update ip and port */ + for (int i = 0; i < num_port; i++) { + memcpy(ops->ip_addr[i], src->ip_addr[i], MTL_IP_ADDR_LEN); + memcpy(ops->mcast_sip_addr[i], src->mcast_sip_addr[i], MTL_IP_ADDR_LEN); + ops->udp_port[i] = src->udp_port[i]; + s->st41_dst_port[i] = (ops->udp_port[i]) ? (ops->udp_port[i]) : (30000 + idx * 2); + } + /* reset seq id */ + s->latest_seq_id = -1; + + ret = rx_fastmetadata_session_init_hw(impl, s); + if (ret < 0) { + err("%s(%d), init hw fail %d\n", __func__, idx, ret); + return ret; + } + + ret = rx_fastmetadata_session_init_mcast(impl, s); + if (ret < 0) { + err("%s(%d), init mcast fail %d\n", __func__, idx, ret); + return ret; + } + + return 0; +} + +static int rx_fastmetadata_sessions_mgr_update_src( + struct st_rx_fastmetadata_sessions_mgr* mgr, + struct st_rx_fastmetadata_session_impl* s, struct st_rx_source_info* src) { + int ret = -EIO, midx = mgr->idx, idx = s->idx; + + s = rx_fastmetadata_session_get(mgr, idx); /* get the lock */ + if (!s) { + err("%s(%d,%d), get session fail\n", __func__, midx, idx); + return -EIO; + } + + ret = rx_fastmetadata_session_update_src(mgr->parent, s, src); + rx_fastmetadata_session_put(mgr, idx); + if (ret < 0) { + err("%s(%d,%d), fail %d\n", __func__, midx, idx, ret); + return ret; + } + + return 0; +} + +static int st_rx_fastmetadata_sessions_stat(void* priv) { + struct st_rx_fastmetadata_sessions_mgr* mgr = priv; + struct st_rx_fastmetadata_session_impl* s; + + for (int j = 0; j < mgr->max_idx; j++) { + s = rx_fastmetadata_session_get_timeout(mgr, j, ST_SESSION_STAT_TIMEOUT_US); + if (!s) continue; + rx_fastmetadata_session_stat(s); + rx_fastmetadata_session_put(mgr, j); + } + + return 0; +} + +static int rx_fastmetadata_sessions_mgr_init( + struct mtl_main_impl* impl, struct mtl_sch_impl* sch, + struct st_rx_fastmetadata_sessions_mgr* mgr) { + int idx = sch->idx; + struct mtl_tasklet_ops ops; + + mgr->parent = impl; + mgr->idx = idx; + + for (int i = 0; i < ST_MAX_RX_FMD_SESSIONS; i++) { + rte_spinlock_init(&mgr->mutex[i]); + } + + memset(&ops, 0x0, sizeof(ops)); + ops.priv = mgr; + ops.name = "rx_fmd_sessions_mgr"; + ops.start = rx_fastmetadata_sessions_tasklet_start; + ops.stop = rx_fastmetadata_sessions_tasklet_stop; + ops.handler = rx_fastmetadata_sessions_tasklet_handler; + + mgr->tasklet = mtl_sch_register_tasklet(sch, &ops); + if (!mgr->tasklet) { + err("%s(%d), mtl_sch_register_tasklet fail\n", __func__, idx); + return -EIO; + } + + mt_stat_register(mgr->parent, st_rx_fastmetadata_sessions_stat, mgr, "rx_fmd"); + info("%s(%d), succ\n", __func__, idx); + return 0; +} + +static struct st_rx_fastmetadata_session_impl* rx_fastmetadata_sessions_mgr_attach( + struct mtl_sch_impl* sch, struct st41_rx_ops* ops) { + struct st_rx_fastmetadata_sessions_mgr* mgr = &sch->rx_fmd_mgr; + int midx = mgr->idx; + int ret; + struct st_rx_fastmetadata_session_impl* s; + int socket = mt_sch_socket_id(sch); + + /* find one empty slot in the mgr */ + for (int i = 0; i < ST_MAX_RX_FMD_SESSIONS; i++) { + if (!rx_fastmetadata_session_get_empty(mgr, i)) continue; + + s = mt_rte_zmalloc_socket(sizeof(*s), socket); + if (!s) { + err("%s(%d), session malloc fail on %d\n", __func__, midx, i); + rx_fastmetadata_session_put(mgr, i); + return NULL; + } + s->socket_id = socket; + ret = rx_fastmetadata_session_init(mgr, s, i); + if (ret < 0) { + err("%s(%d), init fail on %d\n", __func__, midx, i); + rx_fastmetadata_session_put(mgr, i); + mt_rte_free(s); + return NULL; + } + ret = rx_fastmetadata_session_attach(mgr->parent, mgr, s, ops); + if (ret < 0) { + err("%s(%d), attach fail on %d\n", __func__, midx, i); + rx_fastmetadata_session_put(mgr, i); + mt_rte_free(s); + return NULL; + } + + mgr->sessions[i] = s; + mgr->max_idx = RTE_MAX(mgr->max_idx, i + 1); + rx_fastmetadata_session_put(mgr, i); + return s; + } + + err("%s(%d), fail\n", __func__, midx); + return NULL; +} + +static int rx_fastmetadata_sessions_mgr_detach( + struct st_rx_fastmetadata_sessions_mgr* mgr, + struct st_rx_fastmetadata_session_impl* s) { + int midx = mgr->idx; + int idx = s->idx; + + s = rx_fastmetadata_session_get(mgr, idx); /* get the lock */ + if (!s) { + err("%s(%d,%d), get session fail\n", __func__, midx, idx); + return -EIO; + } + + rx_fastmetadata_session_detach(mgr->parent, s); + mgr->sessions[idx] = NULL; + mt_rte_free(s); + + rx_fastmetadata_session_put(mgr, idx); + + return 0; +} + +static int rx_fastmetadata_sessions_mgr_update( + struct st_rx_fastmetadata_sessions_mgr* mgr) { + int max_idx = 0; + + for (int i = 0; i < ST_MAX_RX_FMD_SESSIONS; i++) { + if (mgr->sessions[i]) max_idx = i + 1; + } + + mgr->max_idx = max_idx; + return 0; +} + +static int rx_fastmetadata_sessions_mgr_uinit( + struct st_rx_fastmetadata_sessions_mgr* mgr) { + int m_idx = mgr->idx; + struct st_rx_fastmetadata_session_impl* s; + + mt_stat_unregister(mgr->parent, st_rx_fastmetadata_sessions_stat, mgr); + + if (mgr->tasklet) { + mtl_sch_unregister_tasklet(mgr->tasklet); + mgr->tasklet = NULL; + } + + for (int i = 0; i < ST_MAX_RX_FMD_SESSIONS; i++) { + s = rx_fastmetadata_session_get(mgr, i); + if (!s) continue; + + warn("%s(%d), session %d still attached\n", __func__, m_idx, i); + rx_fastmetadata_sessions_mgr_detach(mgr, s); + rx_fastmetadata_session_put(mgr, i); + } + + info("%s(%d), succ\n", __func__, m_idx); + return 0; +} + +static int rx_fastmetadata_ops_check(struct st41_rx_ops* ops) { + int num_ports = ops->num_port, ret; + uint8_t* ip = NULL; + + if ((num_ports > MTL_SESSION_PORT_MAX) || (num_ports <= 0)) { + err("%s, invalid num_ports %d\n", __func__, num_ports); + return -EINVAL; + } + + for (int i = 0; i < num_ports; i++) { + ip = ops->ip_addr[i]; + ret = mt_ip_addr_check(ip); + if (ret < 0) { + err("%s(%d), invalid ip %d.%d.%d.%d\n", __func__, i, ip[0], ip[1], ip[2], ip[3]); + return -EINVAL; + } + } + + if (num_ports > 1) { + if (0 == memcmp(ops->ip_addr[0], ops->ip_addr[1], MTL_IP_ADDR_LEN)) { + err("%s, same %d.%d.%d.%d for both ip\n", __func__, ip[0], ip[1], ip[2], ip[3]); + return -EINVAL; + } + } + + if (ops->rtp_ring_size <= 0) { + err("%s, invalid rtp_ring_size %d\n", __func__, ops->rtp_ring_size); + return -EINVAL; + } + + if (!ops->notify_rtp_ready) { + err("%s, pls set notify_rtp_ready\n", __func__); + return -EINVAL; + } + + /* Zero means disable the payload_type check */ + if (!st_is_valid_payload_type(ops->payload_type)) { + err("%s, invalid payload_type %d\n", __func__, ops->payload_type); + return -EINVAL; + } + + return 0; +} + +static int st_rx_fmd_init(struct mtl_main_impl* impl, struct mtl_sch_impl* sch) { + int ret; + + if (sch->rx_fmd_init) return 0; + + /* create rx fastmetadata context */ + ret = rx_fastmetadata_sessions_mgr_init(impl, sch, &sch->rx_fmd_mgr); + if (ret < 0) { + err("%s, rx_fastmetadata_sessions_mgr_init fail\n", __func__); + return ret; + } + + sch->rx_fmd_init = true; + return 0; +} + +int st_rx_fastmetadata_sessions_sch_uinit(struct mtl_sch_impl* sch) { + if (!sch->rx_fmd_init) return 0; + + rx_fastmetadata_sessions_mgr_uinit(&sch->rx_fmd_mgr); + + sch->rx_fmd_init = false; + return 0; +} + +st41_rx_handle st41_rx_create(mtl_handle mt, struct st41_rx_ops* ops) { + struct mtl_main_impl* impl = mt; + struct mtl_sch_impl* sch; + struct st_rx_fastmetadata_session_handle_impl* s_impl; + struct st_rx_fastmetadata_session_impl* s; + int ret; + int quota_mbs; + + notice("%s, start for %s\n", __func__, mt_string_safe(ops->name)); + + if (impl->type != MT_HANDLE_MAIN) { + err("%s, invalid type %d\n", __func__, impl->type); + return NULL; + } + + ret = rx_fastmetadata_ops_check(ops); + if (ret < 0) { + err("%s, st_rx_audio_ops_check fail %d\n", __func__, ret); + return NULL; + } + + enum mtl_port port = mt_port_by_name(impl, ops->port[MTL_SESSION_PORT_P]); + if (port >= MTL_PORT_MAX) return NULL; + int socket = mt_socket_id(impl, port); + + s_impl = mt_rte_zmalloc_socket(sizeof(*s_impl), socket); + if (!s_impl) { + err("%s, s_impl malloc fail on socket %d\n", __func__, socket); + return NULL; + } + + quota_mbs = 0; + sch = + mt_sch_get_by_socket(impl, quota_mbs, MT_SCH_TYPE_DEFAULT, MT_SCH_MASK_ALL, socket); + if (!sch) { + mt_rte_free(s_impl); + err("%s, get sch fail\n", __func__); + return NULL; + } + + mt_pthread_mutex_lock(&sch->rx_fmd_mgr_mutex); + ret = st_rx_fmd_init(impl, sch); + mt_pthread_mutex_unlock(&sch->rx_fmd_mgr_mutex); + if (ret < 0) { + err("%s, st_rx_fmd_init fail %d\n", __func__, ret); + mt_sch_put(sch, quota_mbs); + mt_rte_free(s_impl); + return NULL; + } + + mt_pthread_mutex_lock(&sch->rx_fmd_mgr_mutex); + s = rx_fastmetadata_sessions_mgr_attach(sch, ops); + mt_pthread_mutex_unlock(&sch->rx_fmd_mgr_mutex); + if (!s) { + err("%s, rx_fastmetadata_sessions_mgr_attach fail\n", __func__); + mt_sch_put(sch, quota_mbs); + mt_rte_free(s_impl); + return NULL; + } + + s_impl->parent = impl; + s_impl->type = MT_HANDLE_RX_FMD; + s_impl->sch = sch; + s_impl->quota_mbs = quota_mbs; + s_impl->impl = s; + s->st41_handle = s_impl; + + rte_atomic32_inc(&impl->st41_rx_sessions_cnt); + notice("%s(%d,%d), succ on %p\n", __func__, sch->idx, s->idx, s); + return s_impl; +} + +int st41_rx_update_source(st41_rx_handle handle, struct st_rx_source_info* src) { + struct st_rx_fastmetadata_session_handle_impl* s_impl = handle; + struct st_rx_fastmetadata_session_impl* s; + struct mtl_sch_impl* sch; + int idx, ret, sch_idx; + + if (s_impl->type != MT_HANDLE_RX_FMD) { + err("%s, invalid type %d\n", __func__, s_impl->type); + return -EIO; + } + + s = s_impl->impl; + idx = s->idx; + sch = s_impl->sch; + sch_idx = sch->idx; + + ret = st_rx_source_info_check(src, s->ops.num_port); + if (ret < 0) return ret; + + ret = rx_fastmetadata_sessions_mgr_update_src(&sch->rx_fmd_mgr, s, src); + if (ret < 0) { + err("%s(%d,%d), online update fail %d\n", __func__, sch_idx, idx, ret); + return ret; + } + + info("%s(%d,%d), succ\n", __func__, sch_idx, idx); + return 0; +} + +int st41_rx_free(st41_rx_handle handle) { + struct st_rx_fastmetadata_session_handle_impl* s_impl = handle; + struct mtl_main_impl* impl; + struct st_rx_fastmetadata_session_impl* s; + struct mtl_sch_impl* sch; + int ret, idx; + int sch_idx; + + if (s_impl->type != MT_HANDLE_RX_FMD) { + err("%s, invalid type %d\n", __func__, s_impl->type); + return -EIO; + } + + impl = s_impl->parent; + s = s_impl->impl; + idx = s->idx; + sch = s_impl->sch; + sch_idx = sch->idx; + notice("%s(%d,%d), start\n", __func__, sch_idx, idx); + + mt_pthread_mutex_lock(&sch->rx_fmd_mgr_mutex); + ret = rx_fastmetadata_sessions_mgr_detach(&sch->rx_fmd_mgr, s); + mt_pthread_mutex_unlock(&sch->rx_fmd_mgr_mutex); + if (ret < 0) err("%s(%d, %d), mgr detach fail\n", __func__, sch_idx, idx); + + ret = mt_sch_put(sch, s_impl->quota_mbs); + if (ret < 0) err("%s(%d, %d), mt_sch_put fail\n", __func__, sch_idx, idx); + + mt_rte_free(s_impl); + + /* update max idx */ + mt_pthread_mutex_lock(&sch->rx_fmd_mgr_mutex); + rx_fastmetadata_sessions_mgr_update(&sch->rx_fmd_mgr); + mt_pthread_mutex_unlock(&sch->rx_fmd_mgr_mutex); + + rte_atomic32_dec(&impl->st41_rx_sessions_cnt); + notice("%s(%d,%d), succ\n", __func__, sch_idx, idx); + return 0; +} + +void* st41_rx_get_mbuf(st41_rx_handle handle, void** usrptr, uint16_t* len) { + struct st_rx_fastmetadata_session_handle_impl* s_impl = handle; + struct rte_mbuf* pkt; + struct st_rx_fastmetadata_session_impl* s; + struct rte_ring* packet_ring; + int idx, ret; + + if (s_impl->type != MT_HANDLE_RX_FMD) { + err("%s, invalid type %d\n", __func__, s_impl->type); + return NULL; + } + + s = s_impl->impl; + idx = s->idx; + packet_ring = s->packet_ring; + if (!packet_ring) { + err("%s(%d), packet ring is not created\n", __func__, idx); + return NULL; + } + + ret = rte_ring_sc_dequeue(packet_ring, (void**)&pkt); + if (ret == 0) { + int header_len = sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) + + sizeof(struct rte_udp_hdr); + *len = pkt->data_len - header_len; + *usrptr = rte_pktmbuf_mtod_offset(pkt, void*, header_len); + return (void*)pkt; + } + + return NULL; +} + +void st41_rx_put_mbuf(st41_rx_handle handle, void* mbuf) { + struct st_rx_fastmetadata_session_handle_impl* s_impl = handle; + struct rte_mbuf* pkt = (struct rte_mbuf*)mbuf; + struct st_rx_fastmetadata_session_impl* s; + + if (s_impl->type != MT_HANDLE_RX_FMD) { + err("%s, invalid type %d\n", __func__, s_impl->type); + return; + } + + s = s_impl->impl; + MTL_MAY_UNUSED(s); + + if (pkt) rte_pktmbuf_free(pkt); + MT_USDT_ST41_RX_MBUF_PUT(s->mgr->idx, s->idx, mbuf); +} + +int st41_rx_get_queue_meta(st41_rx_handle handle, struct st_queue_meta* meta) { + struct st_rx_fastmetadata_session_handle_impl* s_impl = handle; + struct st_rx_fastmetadata_session_impl* s; + + if (s_impl->type != MT_HANDLE_RX_FMD) { + err("%s, invalid type %d\n", __func__, s_impl->type); + return -EIO; + } + + s = s_impl->impl; + + memset(meta, 0x0, sizeof(*meta)); + meta->num_port = RTE_MIN(s->ops.num_port, MTL_SESSION_PORT_MAX); + for (uint8_t i = 0; i < meta->num_port; i++) { + meta->queue_id[i] = rx_fastmetadata_queue_id(s, i); + } + + return 0; +} diff --git a/lib/src/st2110/st_rx_fastmetadata_session.h b/lib/src/st2110/st_rx_fastmetadata_session.h new file mode 100644 index 000000000..a1014ab6e --- /dev/null +++ b/lib/src/st2110/st_rx_fastmetadata_session.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * SPDX-FileCopyrightText: Copyright (c) 2024 Intel Corporation + */ + +#ifndef _ST_LIB_RX_FASTMETADATA_SESSION_HEAD_H_ +#define _ST_LIB_RX_FASTMETADATA_SESSION_HEAD_H_ + +#include "st_main.h" + +#define ST_RX_FASTMETADATA_BURST_SIZE (128) + +#define ST_RX_FASTMETADATA_PREFIX "RC_" + +int st_rx_fastmetadata_sessions_sch_uinit(struct mtl_sch_impl* sch); + +#endif /* _ST_LIB_RX_FASTMETADATA_SESSION_HEAD_H_ */ \ No newline at end of file diff --git a/lib/src/st2110/st_tx_fastmetadata_session.c b/lib/src/st2110/st_tx_fastmetadata_session.c new file mode 100644 index 000000000..adfc49e59 --- /dev/null +++ b/lib/src/st2110/st_tx_fastmetadata_session.c @@ -0,0 +1,2132 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * SPDX-FileCopyrightText: Copyright (c) 2024 Intel Corporation + */ + +#include "st_tx_fastmetadata_session.h" + +#include "../datapath/mt_queue.h" +#include "../mt_log.h" +#include "../mt_stat.h" +#include "st_err.h" +#include "st_fastmetadata_transmitter.h" + +/* call tx_fastmetadata_session_put always if get successfully */ +static inline struct st_tx_fastmetadata_session_impl* tx_fastmetadata_session_get( + struct st_tx_fastmetadata_sessions_mgr* mgr, int idx) { + rte_spinlock_lock(&mgr->mutex[idx]); + struct st_tx_fastmetadata_session_impl* s = mgr->sessions[idx]; + if (!s) rte_spinlock_unlock(&mgr->mutex[idx]); + return s; +} + +/* call tx_fastmetadata_session_put always if get successfully */ +static inline struct st_tx_fastmetadata_session_impl* tx_fastmetadata_session_try_get( + struct st_tx_fastmetadata_sessions_mgr* mgr, int idx) { + if (!rte_spinlock_trylock(&mgr->mutex[idx])) return NULL; + struct st_tx_fastmetadata_session_impl* s = mgr->sessions[idx]; + if (!s) rte_spinlock_unlock(&mgr->mutex[idx]); + return s; +} + +/* call tx_fastmetadata_session_put always if get successfully */ +static inline struct st_tx_fastmetadata_session_impl* tx_fastmetadata_session_get_timeout( + struct st_tx_fastmetadata_sessions_mgr* mgr, int idx, int timeout_us) { + if (!mt_spinlock_lock_timeout(mgr->parent, &mgr->mutex[idx], timeout_us)) return NULL; + struct st_tx_fastmetadata_session_impl* s = mgr->sessions[idx]; + if (!s) rte_spinlock_unlock(&mgr->mutex[idx]); + return s; +} + +/* call rx_fastmetadata_session_put always if get successfully */ +static inline bool tx_fastmetadata_session_get_empty( + struct st_tx_fastmetadata_sessions_mgr* mgr, int idx) { + rte_spinlock_lock(&mgr->mutex[idx]); + struct st_tx_fastmetadata_session_impl* s = mgr->sessions[idx]; + if (s) { + rte_spinlock_unlock(&mgr->mutex[idx]); /* not null, unlock it */ + return false; + } else { + return true; + } +} + +static inline void tx_fastmetadata_session_put( + struct st_tx_fastmetadata_sessions_mgr* mgr, int idx) { + rte_spinlock_unlock(&mgr->mutex[idx]); +} + +static int tx_fastmetadata_session_free_frames( + struct st_tx_fastmetadata_session_impl* s) { + if (s->st41_frames) { + struct st_frame_trans* frame; + + /* dec ref for current frame */ + frame = &s->st41_frames[s->st41_frame_idx]; + if (rte_atomic32_read(&frame->refcnt)) rte_atomic32_dec(&frame->refcnt); + + for (int i = 0; i < s->st41_frames_cnt; i++) { + frame = &s->st41_frames[i]; + st_frame_trans_uinit(frame, NULL); + } + + mt_rte_free(s->st41_frames); + s->st41_frames = NULL; + } + + dbg("%s(%d), succ\n", __func__, s->idx); + return 0; +} + +static int tx_fastmetadata_session_alloc_frames( + struct st_tx_fastmetadata_session_impl* s) { + int soc_id = s->socket_id; + int idx = s->idx; + struct st_frame_trans* frame_info; + + if (s->st41_frames) { + err("%s(%d), st41_frames already alloc\n", __func__, idx); + return -EIO; + } + + s->st41_frames = + mt_rte_zmalloc_socket(sizeof(*s->st41_frames) * s->st41_frames_cnt, soc_id); + if (!s->st41_frames) { + err("%s(%d), st30_frames malloc fail\n", __func__, idx); + return -ENOMEM; + } + + for (int i = 0; i < s->st41_frames_cnt; i++) { + frame_info = &s->st41_frames[i]; + rte_atomic32_set(&frame_info->refcnt, 0); + frame_info->idx = i; + } + + for (int i = 0; i < s->st41_frames_cnt; i++) { + frame_info = &s->st41_frames[i]; + + void* frame = mt_rte_zmalloc_socket(sizeof(struct st41_frame), soc_id); + if (!frame) { + err("%s(%d), frame malloc fail at %d\n", __func__, idx, i); + tx_fastmetadata_session_free_frames(s); + return -ENOMEM; + } + frame_info->iova = rte_mem_virt2iova(frame); + frame_info->addr = frame; + frame_info->flags = ST_FT_FLAG_RTE_MALLOC; + } + + dbg("%s(%d), succ with %u frames\n", __func__, idx, s->st41_frames_cnt); + return 0; +} + +static int tx_fastmetadata_session_init_hdr(struct mtl_main_impl* impl, + struct st_tx_fastmetadata_sessions_mgr* mgr, + struct st_tx_fastmetadata_session_impl* s, + enum mtl_session_port s_port) { + MTL_MAY_UNUSED(mgr); + int idx = s->idx; + enum mtl_port port = mt_port_logic2phy(s->port_maps, s_port); + struct st41_tx_ops* ops = &s->ops; + int ret; + struct st41_fmd_hdr* hdr = &s->hdr[s_port]; + struct rte_ether_hdr* eth = &hdr->eth; + struct rte_ipv4_hdr* ipv4 = &hdr->ipv4; + struct rte_udp_hdr* udp = &hdr->udp; + struct st41_rtp_hdr* rtp = &hdr->rtp; + uint8_t* dip = ops->dip_addr[s_port]; + uint8_t* sip = mt_sip_addr(impl, port); + struct rte_ether_addr* d_addr = mt_eth_d_addr(eth); + + /* ether hdr */ + if ((s_port == MTL_SESSION_PORT_P) && (ops->flags & ST41_TX_FLAG_USER_P_MAC)) { + rte_memcpy(d_addr->addr_bytes, &ops->tx_dst_mac[s_port][0], RTE_ETHER_ADDR_LEN); + info("%s, USER_P_TX_MAC\n", __func__); + } else if ((s_port == MTL_SESSION_PORT_R) && (ops->flags & ST41_TX_FLAG_USER_R_MAC)) { + rte_memcpy(d_addr->addr_bytes, &ops->tx_dst_mac[s_port][0], RTE_ETHER_ADDR_LEN); + info("%s, USER_R_TX_MAC\n", __func__); + } else { + ret = mt_dst_ip_mac(impl, dip, d_addr, port, impl->arp_timeout_ms); + if (ret < 0) { + err("%s(%d), get mac fail %d for %d.%d.%d.%d\n", __func__, idx, ret, dip[0], dip[1], + dip[2], dip[3]); + return ret; + } + } + + ret = mt_macaddr_get(impl, port, mt_eth_s_addr(eth)); + if (ret < 0) { + err("%s(%d), macaddr get fail %d for port %d\n", __func__, idx, ret, port); + return ret; + } + eth->ether_type = htons(RTE_ETHER_TYPE_IPV4); + + /* ipv4 hdr */ + memset(ipv4, 0x0, sizeof(*ipv4)); + ipv4->version_ihl = (4 << 4) | (sizeof(struct rte_ipv4_hdr) / 4); + ipv4->time_to_live = 64; + ipv4->type_of_service = 0; + ipv4->packet_id = 0; + ipv4->fragment_offset = MT_IP_DONT_FRAGMENT_FLAG; + ipv4->next_proto_id = IPPROTO_UDP; + mtl_memcpy(&ipv4->src_addr, sip, MTL_IP_ADDR_LEN); + mtl_memcpy(&ipv4->dst_addr, dip, MTL_IP_ADDR_LEN); + + /* udp hdr */ + udp->src_port = htons(s->st41_src_port[s_port]); + udp->dst_port = htons(s->st41_dst_port[s_port]); + udp->dgram_cksum = 0; + + /* rtp hdr */ + memset(rtp, 0x0, sizeof(*rtp)); + rtp->base.csrc_count = 0; + rtp->base.extension = 0; + rtp->base.padding = 0; + rtp->base.version = ST_RVRTP_VERSION_2; + rtp->base.marker = 0; + rtp->base.payload_type = + ops->payload_type ? ops->payload_type : ST_RFMDRTP_PAYLOAD_TYPE_FASTMETADATA; + uint32_t ssrc = ops->ssrc ? ops->ssrc : s->idx + 0x323450; + rtp->base.ssrc = htonl(ssrc); + s->st41_seq_id = 0; + + info("%s(%d,%d), ip %u.%u.%u.%u port %u:%u\n", __func__, idx, s_port, dip[0], dip[1], + dip[2], dip[3], s->st41_src_port[s_port], s->st41_dst_port[s_port]); + info("%s(%d), mac: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx, ssrc %u\n", __func__, idx, + d_addr->addr_bytes[0], d_addr->addr_bytes[1], d_addr->addr_bytes[2], + d_addr->addr_bytes[3], d_addr->addr_bytes[4], d_addr->addr_bytes[5], ssrc); + return 0; +} + +static int tx_fastmetadata_session_init_pacing( + struct st_tx_fastmetadata_session_impl* s) { + int idx = s->idx; + struct st_tx_fastmetadata_session_pacing* pacing = &s->pacing; + double frame_time = (double)1000000000.0 * s->fps_tm.den / s->fps_tm.mul; + + pacing->frame_time = frame_time; + pacing->frame_time_sampling = + (double)(s->fps_tm.sampling_clock_rate) * s->fps_tm.den / s->fps_tm.mul; + pacing->max_onward_epochs = (double)(NS_PER_S * 1) / frame_time; /* 1s */ + dbg("%s[%02d], max_onward_epochs %u\n", __func__, idx, pacing->max_onward_epochs); + + info("%s[%02d], frame_time %f frame_time_sampling %f\n", __func__, idx, + pacing->frame_time, pacing->frame_time_sampling); + return 0; +} + +static int tx_fastmetadata_session_init_pacing_epoch( + struct mtl_main_impl* impl, struct st_tx_fastmetadata_session_impl* s) { + uint64_t ptp_time = mt_get_ptp_time(impl, MTL_PORT_P); + struct st_tx_fastmetadata_session_pacing* pacing = &s->pacing; + pacing->cur_epochs = ptp_time / pacing->frame_time; + return 0; +} + +static inline double tx_fastmetadata_pacing_time( + struct st_tx_fastmetadata_session_pacing* pacing, uint64_t epochs) { + return epochs * pacing->frame_time; +} + +static inline uint32_t tx_fastmetadata_pacing_time_stamp( + struct st_tx_fastmetadata_session_pacing* pacing, uint64_t epochs) { + uint64_t tmstamp64 = epochs * pacing->frame_time_sampling; + uint32_t tmstamp32 = tmstamp64; + + return tmstamp32; +} + +static uint64_t tx_fastmetadata_pacing_required_tai( + struct st_tx_fastmetadata_session_impl* s, enum st10_timestamp_fmt tfmt, + uint64_t timestamp) { + uint64_t required_tai = 0; + + if (!(s->ops.flags & ST41_TX_FLAG_USER_PACING)) return 0; + if (!timestamp) return 0; + + if (tfmt == ST10_TIMESTAMP_FMT_MEDIA_CLK) { + if (timestamp > 0xFFFFFFFF) { + err("%s(%d), invalid timestamp %" PRIu64 "\n", __func__, s->idx, timestamp); + } + required_tai = st10_media_clk_to_ns((uint32_t)timestamp, 90 * 1000); + } else { + required_tai = timestamp; + } + + return required_tai; +} + +static int tx_fastmetadata_session_sync_pacing(struct mtl_main_impl* impl, + struct st_tx_fastmetadata_session_impl* s, + bool sync, uint64_t required_tai, + bool second_field) { + struct st_tx_fastmetadata_session_pacing* pacing = &s->pacing; + double frame_time = pacing->frame_time; + /* always use MTL_PORT_P for ptp now */ + uint64_t ptp_time = mt_get_ptp_time(impl, MTL_PORT_P); + uint64_t next_epochs = pacing->cur_epochs + 1; + uint64_t epochs; + double to_epoch; + bool interlaced = s->ops.interlaced; + + if (required_tai) { + uint64_t ptp_epochs = ptp_time / frame_time; + epochs = required_tai / frame_time; + dbg("%s(%d), required tai %" PRIu64 " ptp_epochs %" PRIu64 " epochs %" PRIu64 "\n", + __func__, s->idx, required_tai, ptp_epochs, epochs); + if (epochs < ptp_epochs) s->stat_error_user_timestamp++; + } else { + epochs = ptp_time / frame_time; + } + + dbg("%s(%d), epochs %" PRIu64 " %" PRIu64 "\n", __func__, s->idx, epochs, + pacing->cur_epochs); + if (epochs <= pacing->cur_epochs) { + uint64_t diff = pacing->cur_epochs - epochs; + if (diff < pacing->max_onward_epochs) { + /* point to next epoch since if it in the range of onward */ + epochs = next_epochs; + } + } + + if (interlaced) { + if (second_field) { /* align to odd epoch */ + if (!(epochs & 0x1)) epochs++; + s->stat_interlace_second_field++; + } else { /* align to even epoch */ + if (epochs & 0x1) epochs++; + s->stat_interlace_first_field++; + } + } + + to_epoch = tx_fastmetadata_pacing_time(pacing, epochs) - ptp_time; + if (to_epoch < 0) { + /* time bigger than the assigned epoch time */ + s->stat_epoch_mismatch++; + to_epoch = 0; /* send asap */ + } + + if (epochs > next_epochs) s->stat_epoch_drop += (epochs - next_epochs); + if (epochs < next_epochs) s->stat_epoch_onward += (next_epochs - epochs); + + pacing->cur_epochs = epochs; + pacing->cur_epoch_time = tx_fastmetadata_pacing_time(pacing, epochs); + pacing->pacing_time_stamp = tx_fastmetadata_pacing_time_stamp(pacing, epochs); + pacing->rtp_time_stamp = pacing->pacing_time_stamp; + pacing->tsc_time_cursor = (double)mt_get_tsc(impl) + to_epoch; + dbg("%s(%d), epochs %" PRIu64 " time_stamp %u time_cursor %f to_epoch %f\n", __func__, + s->idx, pacing->cur_epochs, pacing->pacing_time_stamp, pacing->tsc_time_cursor, + to_epoch); + + if (sync) { + dbg("%s(%d), delay to epoch_time %f, cur %" PRIu64 "\n", __func__, s->idx, + pacing->tsc_time_cursor, mt_get_tsc(impl)); + mt_tsc_delay_to(impl, pacing->tsc_time_cursor); + } + + return 0; +} + +static int tx_fastmetadata_session_init_next_meta( + struct st_tx_fastmetadata_session_impl* s, struct st41_tx_frame_meta* meta) { + struct st_tx_fastmetadata_session_pacing* pacing = &s->pacing; + struct st41_tx_ops* ops = &s->ops; + + memset(meta, 0, sizeof(*meta)); + meta->fps = ops->fps; + if (ops->interlaced) { /* init second_field but user still can customize also */ + meta->second_field = s->second_field; + } + /* point to next epoch */ + meta->epoch = pacing->cur_epochs + 1; + meta->tfmt = ST10_TIMESTAMP_FMT_TAI; + meta->timestamp = tx_fastmetadata_pacing_time(pacing, meta->epoch); + return 0; +} + +static int tx_fastmetadata_session_init(struct st_tx_fastmetadata_sessions_mgr* mgr, + struct st_tx_fastmetadata_session_impl* s, + int idx) { + MTL_MAY_UNUSED(mgr); + s->idx = idx; + return 0; +} + +static int tx_fastmetadata_sessions_tasklet_start(void* priv) { + struct st_tx_fastmetadata_sessions_mgr* mgr = priv; + struct mtl_main_impl* impl = mgr->parent; + struct st_tx_fastmetadata_session_impl* s; + + for (int sidx = 0; sidx < mgr->max_idx; sidx++) { + s = tx_fastmetadata_session_get(mgr, sidx); + if (!s) continue; + + tx_fastmetadata_session_init_pacing_epoch(impl, s); + tx_fastmetadata_session_put(mgr, sidx); + } + + return 0; +} + +static int tx_fastmetadata_session_update_redundant( + struct st_tx_fastmetadata_session_impl* s, struct rte_mbuf* pkt_r) { + struct mt_udp_hdr* hdr = rte_pktmbuf_mtod(pkt_r, struct mt_udp_hdr*); + struct rte_ipv4_hdr* ipv4 = &hdr->ipv4; + struct rte_udp_hdr* udp = &hdr->udp; + + /* update the hdr: eth, ip, udp */ + rte_memcpy(hdr, &s->hdr[MTL_SESSION_PORT_R], sizeof(*hdr)); + + ipv4->total_length = htons(pkt_r->pkt_len - pkt_r->l2_len); + + udp->dgram_len = htons(pkt_r->pkt_len - pkt_r->l2_len - pkt_r->l3_len); + if (!s->eth_ipv4_cksum_offload[MTL_SESSION_PORT_R]) { + /* generate cksum if no offload */ + ipv4->hdr_checksum = rte_ipv4_cksum(ipv4); + } + + return 0; +} + +static void tx_fastmetadata_session_build_packet( + struct st_tx_fastmetadata_session_impl* s, struct rte_mbuf* pkt) { + struct mt_udp_hdr* hdr; + struct rte_ipv4_hdr* ipv4; + struct rte_udp_hdr* udp; + struct st41_rtp_hdr* rtp; + + hdr = rte_pktmbuf_mtod(pkt, struct mt_udp_hdr*); + ipv4 = &hdr->ipv4; + udp = &hdr->udp; + rtp = (struct st41_rtp_hdr*)&udp[1]; + + /* copy the hdr: eth, ip, udp */ + rte_memcpy(&hdr->eth, &s->hdr[MTL_SESSION_PORT_P].eth, sizeof(hdr->eth)); + rte_memcpy(ipv4, &s->hdr[MTL_SESSION_PORT_P].ipv4, sizeof(hdr->ipv4)); + rte_memcpy(udp, &s->hdr[MTL_SESSION_PORT_P].udp, sizeof(hdr->udp)); + + /* update mbuf */ + mt_mbuf_init_ipv4(pkt); + pkt->data_len = sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) + + sizeof(struct rte_udp_hdr); + + rte_memcpy(rtp, &s->hdr[MTL_SESSION_PORT_P].rtp, sizeof(*rtp)); + + /* update rtp */ + rtp->base.seq_number = htons(s->st41_seq_id); + s->st41_seq_id++; + rtp->base.tmstamp = htonl(s->pacing.rtp_time_stamp); + + /* Set place for payload just behind rtp header */ + uint8_t* payload = (uint8_t*)&rtp[1]; + struct st_frame_trans* frame_info = &s->st41_frames[s->st41_frame_idx]; + uint32_t offset = s->st41_pkt_idx * s->max_pkt_len; + void* src_addr = frame_info->addr + offset; + struct st41_frame* src = src_addr; + uint16_t data_item_length_bytes = src->data_item_length_bytes; + uint16_t data_item_length = + (data_item_length_bytes + 3) / 4; /* expressed in number of 4-byte words */ + + if (!(data_item_length_bytes > s->max_pkt_len)) { + int offset = 0; + for (int i = 0; i < data_item_length_bytes; i++) { + payload[i] = src->data[offset++]; + } + /* filling with 0's the remianing bytes of last 4-byte word */ + for (int i = data_item_length_bytes; i < data_item_length * 4; i++) { + payload[i] = 0; + } + } + + pkt->data_len += sizeof(struct st41_rtp_hdr) + data_item_length * 4; + pkt->pkt_len = pkt->data_len; + rtp->st41_hdr_chunk.data_item_type = s->ops.fmd_dit; + rtp->st41_hdr_chunk.data_item_k_bit = s->ops.fmd_k_bit; + rtp->st41_hdr_chunk.data_item_length = data_item_length; + rtp->swaped_st41_hdr_chunk = htonl(rtp->swaped_st41_hdr_chunk); + dbg("%s(%d), payload_size (data_item_length_bytes) %d\n", __func__, s->idx, + data_item_length_bytes); + + udp->dgram_len = htons(pkt->pkt_len - pkt->l2_len - pkt->l3_len); + ipv4->total_length = htons(pkt->pkt_len - pkt->l2_len); + + if (!s->eth_ipv4_cksum_offload[MTL_SESSION_PORT_P]) { + /* generate cksum if no offload */ + ipv4->hdr_checksum = rte_ipv4_cksum(ipv4); + } + + return; +} + +static void tx_fastmetadata_session_build_rtp_packet( + struct st_tx_fastmetadata_session_impl* s, struct rte_mbuf* pkt) { + struct st41_rtp_hdr* rtp; + + rtp = rte_pktmbuf_mtod(pkt, struct st41_rtp_hdr*); + rte_memcpy(rtp, &s->hdr[MTL_SESSION_PORT_P].rtp, sizeof(*rtp)); + + /* update rtp */ + rtp->base.seq_number = htons(s->st41_seq_id); + s->st41_seq_id++; + rtp->base.tmstamp = htonl(s->pacing.rtp_time_stamp); + + /* Set place for payload just behind rtp header */ + uint8_t* payload = (uint8_t*)&rtp[1]; + struct st_frame_trans* frame_info = &s->st41_frames[s->st41_frame_idx]; + uint32_t offset = s->st41_pkt_idx * s->max_pkt_len; + void* src_addr = frame_info->addr + offset; + struct st41_frame* src = src_addr; + uint16_t data_item_length_bytes = src->data_item_length_bytes; + uint16_t data_item_length = + (data_item_length_bytes + 3) / 4; /* expressed in number of 4-byte words */ + + if (!(data_item_length_bytes > s->max_pkt_len)) { + int offset = 0; + for (int i = 0; i < data_item_length_bytes; i++) { + payload[i] = src->data[offset++]; + } + /* filling with 0's the remianing bytes of last 4-byte word */ + for (int i = data_item_length_bytes; i < data_item_length * 4; i++) { + payload[i] = 0; + } + } + + pkt->data_len = sizeof(struct st41_rtp_hdr) + data_item_length * 4; + pkt->pkt_len = pkt->data_len; + rtp->st41_hdr_chunk.data_item_type = s->ops.fmd_dit; + rtp->st41_hdr_chunk.data_item_k_bit = s->ops.fmd_k_bit; + rtp->st41_hdr_chunk.data_item_length = data_item_length; + rtp->swaped_st41_hdr_chunk = htonl(rtp->swaped_st41_hdr_chunk); + + dbg("%s(%d), payload_size (data_item_length_bytes) %d\n", __func__, s->idx, + data_item_length_bytes); + + return; +} + +static int tx_fastmetadata_session_rtp_update_packet( + struct mtl_main_impl* impl, struct st_tx_fastmetadata_session_impl* s, + struct rte_mbuf* pkt) { + struct mt_udp_hdr* hdr; + struct rte_ipv4_hdr* ipv4; + struct st_rfc3550_rtp_hdr* rtp; + struct rte_udp_hdr* udp; + + hdr = rte_pktmbuf_mtod(pkt, struct mt_udp_hdr*); + ipv4 = &hdr->ipv4; + udp = &hdr->udp; + rtp = + rte_pktmbuf_mtod_offset(pkt, struct st_rfc3550_rtp_hdr*, sizeof(struct mt_udp_hdr)); + + /* copy the hdr: eth, ip, udp */ + rte_memcpy(&hdr->eth, &s->hdr[MTL_SESSION_PORT_P].eth, sizeof(hdr->eth)); + rte_memcpy(ipv4, &s->hdr[MTL_SESSION_PORT_P].ipv4, sizeof(hdr->ipv4)); + rte_memcpy(udp, &s->hdr[MTL_SESSION_PORT_P].udp, sizeof(hdr->udp)); + + if (rtp->tmstamp != s->st41_rtp_time) { + /* start of a new frame */ + s->st41_pkt_idx = 0; + rte_atomic32_inc(&s->st41_stat_frame_cnt); + s->st41_rtp_time = rtp->tmstamp; + bool second_field = false; + + tx_fastmetadata_session_sync_pacing(impl, s, false, 0, second_field); + } + if (s->ops.flags & ST41_TX_FLAG_USER_TIMESTAMP) { + s->pacing.rtp_time_stamp = ntohl(rtp->tmstamp); + } + rtp->tmstamp = htonl(s->pacing.rtp_time_stamp); + + /* update mbuf */ + mt_mbuf_init_ipv4(pkt); + + /* update udp header */ + udp->dgram_len = htons(pkt->pkt_len - pkt->l2_len - pkt->l3_len); + ipv4->total_length = htons(pkt->pkt_len - pkt->l2_len); + if (!s->eth_ipv4_cksum_offload[MTL_SESSION_PORT_P]) { + /* generate cksum if no offload */ + ipv4->hdr_checksum = rte_ipv4_cksum(ipv4); + } + + return 0; +} + +static int tx_fastmetadata_session_build_packet_chain( + struct mtl_main_impl* impl, struct st_tx_fastmetadata_session_impl* s, + struct rte_mbuf* pkt, struct rte_mbuf* pkt_rtp, enum mtl_session_port s_port) { + struct mt_udp_hdr* hdr; + struct rte_ipv4_hdr* ipv4; + struct rte_udp_hdr* udp; + struct st41_tx_ops* ops = &s->ops; + + hdr = rte_pktmbuf_mtod(pkt, struct mt_udp_hdr*); + ipv4 = &hdr->ipv4; + udp = &hdr->udp; + + /* copy the hdr: eth, ip, udp */ + rte_memcpy(&hdr->eth, &s->hdr[s_port].eth, sizeof(hdr->eth)); + rte_memcpy(ipv4, &s->hdr[s_port].ipv4, sizeof(hdr->ipv4)); + rte_memcpy(udp, &s->hdr[s_port].udp, sizeof(hdr->udp)); + + /* update only for primary */ + if (s_port == MTL_SESSION_PORT_P) { + /* update rtp time for rtp path */ + if (ops->type == ST41_TYPE_RTP_LEVEL) { + struct st41_rtp_hdr* rtp = rte_pktmbuf_mtod(pkt_rtp, struct st41_rtp_hdr*); + if (rtp->base.tmstamp != s->st41_rtp_time) { + /* start of a new frame */ + s->st41_pkt_idx = 0; + rte_atomic32_inc(&s->st41_stat_frame_cnt); + s->st41_rtp_time = rtp->base.tmstamp; + bool second_field = false; + tx_fastmetadata_session_sync_pacing(impl, s, false, 0, second_field); + } + if (s->ops.flags & ST41_TX_FLAG_USER_TIMESTAMP) { + s->pacing.rtp_time_stamp = ntohl(rtp->base.tmstamp); + } + rtp->base.tmstamp = htonl(s->pacing.rtp_time_stamp); + } + } + + /* update mbuf */ + mt_mbuf_init_ipv4(pkt); + pkt->data_len = sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) + + sizeof(struct rte_udp_hdr); + pkt->pkt_len = pkt->data_len; + + /* chain the pkt */ + rte_pktmbuf_chain(pkt, pkt_rtp); + + udp->dgram_len = htons(pkt->pkt_len - pkt->l2_len - pkt->l3_len); + ipv4->total_length = htons(pkt->pkt_len - pkt->l2_len); + if (!s->eth_ipv4_cksum_offload[s_port]) { + /* generate cksum if no offload */ + ipv4->hdr_checksum = rte_ipv4_cksum(ipv4); + } + + /* rtp packet used twice for redundant path */ + if (s_port == MTL_SESSION_PORT_R) rte_mbuf_refcnt_update(pkt_rtp, 1); + + return 0; +} + +static inline int tx_fastmetadata_session_send_pkt( + struct st_tx_fastmetadata_sessions_mgr* mgr, + struct st_tx_fastmetadata_session_impl* s, enum mtl_session_port s_port, + struct rte_mbuf* pkt) { + int ret; + enum mtl_port port = mt_port_logic2phy(s->port_maps, s_port); + struct rte_ring* ring = mgr->ring[port]; + + if (s->queue[s_port]) { + uint16_t tx = mt_txq_burst(s->queue[s_port], &pkt, 1); + if (tx < 1) + ret = -EIO; + else + ret = 0; + } else { + ret = rte_ring_mp_enqueue(ring, (void*)pkt); + } + return ret; +} + +static int tx_fastmetadata_session_tasklet_frame( + struct mtl_main_impl* impl, struct st_tx_fastmetadata_sessions_mgr* mgr, + struct st_tx_fastmetadata_session_impl* s) { + int idx = s->idx; + struct st41_tx_ops* ops = &s->ops; + struct st_tx_fastmetadata_session_pacing* pacing = &s->pacing; + int ret; + bool send_r = false; + enum mtl_port port_p = mt_port_logic2phy(s->port_maps, MTL_SESSION_PORT_P); + struct rte_mempool* hdr_pool_p = s->mbuf_mempool_hdr[MTL_SESSION_PORT_P]; + struct rte_mempool* hdr_pool_r = NULL; + struct rte_mempool* chain_pool = s->mbuf_mempool_chain; + struct rte_ring* ring_p = mgr->ring[port_p]; + + if (ring_p && rte_ring_full(ring_p)) { + s->stat_build_ret_code = -STI_FRAME_RING_FULL; + return MTL_TASKLET_ALL_DONE; + } + + if (ops->num_port > 1) { + send_r = true; + hdr_pool_r = s->mbuf_mempool_hdr[MTL_SESSION_PORT_R]; + } + + /* check if any inflight pkts */ + if (s->inflight[MTL_SESSION_PORT_P]) { + ret = tx_fastmetadata_session_send_pkt(mgr, s, MTL_SESSION_PORT_P, + s->inflight[MTL_SESSION_PORT_P]); + if (ret == 0) { + s->inflight[MTL_SESSION_PORT_P] = NULL; + } else { + s->stat_build_ret_code = -STI_FRAME_INFLIGHT_ENQUEUE_FAIL; + return MTL_TASKLET_ALL_DONE; + } + } + + if (send_r && s->inflight[MTL_SESSION_PORT_R]) { + ret = tx_fastmetadata_session_send_pkt(mgr, s, MTL_SESSION_PORT_R, + s->inflight[MTL_SESSION_PORT_R]); + if (ret == 0) { + s->inflight[MTL_SESSION_PORT_R] = NULL; + } else { + s->stat_build_ret_code = -STI_FRAME_INFLIGHT_R_ENQUEUE_FAIL; + return MTL_TASKLET_ALL_DONE; + } + } + + if (ST41_TX_STAT_WAIT_FRAME == s->st41_frame_stat) { + uint16_t next_frame_idx; + int data_item_length_bytes = 0; + struct st41_tx_frame_meta meta; + + if (s->check_frame_done_time) { + uint64_t frame_end_time = mt_get_tsc(impl); + if (frame_end_time > pacing->tsc_time_cursor) { + s->stat_exceed_frame_time++; + dbg("%s(%d), frame %d build time out %" PRIu64 " us\n", __func__, idx, + s->st41_frame_idx, (frame_end_time - pacing->tsc_time_cursor) / NS_PER_US); + } + s->check_frame_done_time = false; + } + + tx_fastmetadata_session_init_next_meta(s, &meta); + /* Query next frame buffer idx */ + uint64_t tsc_start = 0; + bool time_measure = mt_sessions_time_measure(impl); + if (time_measure) tsc_start = mt_get_tsc(impl); + ret = ops->get_next_frame(ops->priv, &next_frame_idx, &meta); + if (time_measure) { + uint32_t delta_us = (mt_get_tsc(impl) - tsc_start) / NS_PER_US; + s->stat_max_next_frame_us = RTE_MAX(s->stat_max_next_frame_us, delta_us); + } + if (ret < 0) { /* no frame ready from app */ + dbg("%s(%d), get_next_frame fail %d\n", __func__, idx, ret); + s->stat_build_ret_code = -STI_FRAME_APP_GET_FRAME_BUSY; + return MTL_TASKLET_ALL_DONE; + } + /* check frame refcnt */ + struct st_frame_trans* frame = &s->st41_frames[next_frame_idx]; + int refcnt = rte_atomic32_read(&frame->refcnt); + if (refcnt) { + err("%s(%d), frame %u refcnt not zero %d\n", __func__, idx, next_frame_idx, refcnt); + s->stat_build_ret_code = -STI_FRAME_APP_ERR_TX_FRAME; + return MTL_TASKLET_ALL_DONE; + } + rte_atomic32_inc(&frame->refcnt); + frame->tf_meta = meta; + s->st41_frame_idx = next_frame_idx; + dbg("%s(%d), next_frame_idx %d start\n", __func__, idx, next_frame_idx); + s->st41_frame_stat = ST41_TX_STAT_SENDING_PKTS; + struct st41_frame* src = (struct st41_frame*)frame->addr; + data_item_length_bytes += src->data_item_length_bytes; + int total_size = data_item_length_bytes; + s->st41_pkt_idx = 0; + s->st41_total_pkts = total_size / s->max_pkt_len; + if (total_size % s->max_pkt_len) s->st41_total_pkts++; + /* how do we split if it need two or more pkts? */ + dbg("%s(%d), st41_total_pkts %d data_item_length_bytes %d src %p\n", __func__, idx, + s->st41_total_pkts, data_item_length_bytes, src); + if (s->st41_total_pkts > 1) { + err("%s(%d), frame %u invalid st41_total_pkts %d\n", __func__, idx, next_frame_idx, + s->st41_total_pkts); + s->stat_build_ret_code = -STI_FRAME_APP_ERR_TX_FRAME; + return MTL_TASKLET_ALL_DONE; + } + + MT_USDT_ST41_TX_FRAME_NEXT(s->mgr->idx, s->idx, next_frame_idx, frame->addr, 0, + data_item_length_bytes); + } + + /* sync pacing */ + if (s->calculate_time_cursor) { + struct st_frame_trans* frame = &s->st41_frames[s->st41_frame_idx]; + /* user timestamp control if any */ + uint64_t required_tai = tx_fastmetadata_pacing_required_tai(s, frame->tf_meta.tfmt, + frame->tf_meta.timestamp); + bool second_field = frame->tf_meta.second_field; + tx_fastmetadata_session_sync_pacing(impl, s, false, required_tai, second_field); + if (ops->flags & ST41_TX_FLAG_USER_TIMESTAMP && + (frame->ta_meta.tfmt == ST10_TIMESTAMP_FMT_MEDIA_CLK)) { + pacing->rtp_time_stamp = (uint32_t)frame->tf_meta.timestamp; + } + frame->tf_meta.tfmt = ST10_TIMESTAMP_FMT_TAI; + frame->tf_meta.timestamp = pacing->cur_epoch_time; + frame->tf_meta.rtp_timestamp = pacing->rtp_time_stamp; + /* init to next field */ + if (ops->interlaced) { + s->second_field = second_field ? false : true; + } + s->calculate_time_cursor = false; /* clear */ + } + + uint64_t cur_tsc = mt_get_tsc(impl); + uint64_t target_tsc = pacing->tsc_time_cursor; + if (cur_tsc < target_tsc) { + uint64_t delta = target_tsc - cur_tsc; + // dbg("%s(%d), cur_tsc %"PRIu64" target_tsc %"PRIu64"\n", __func__, idx, cur_tsc, + // target_tsc); + if (likely(delta < NS_PER_S)) { + s->stat_build_ret_code = -STI_TSCTRS_TARGET_TSC_NOT_REACH; + return delta < mt_sch_schedule_ns(impl) ? MTL_TASKLET_HAS_PENDING + : MTL_TASKLET_ALL_DONE; + } else { + err("%s(%d), invalid tsc cur %" PRIu64 " target %" PRIu64 "\n", __func__, idx, + cur_tsc, target_tsc); + } + } + + struct rte_mbuf* pkt = NULL; + struct rte_mbuf* pkt_r = NULL; + + pkt = rte_pktmbuf_alloc(hdr_pool_p); + if (!pkt) { + err("%s(%d), rte_pktmbuf_alloc fail\n", __func__, idx); + s->stat_build_ret_code = -STI_FRAME_PKT_ALLOC_FAIL; + return MTL_TASKLET_ALL_DONE; + } + + if (!s->tx_no_chain) { + struct rte_mbuf* pkt_rtp = rte_pktmbuf_alloc(chain_pool); + if (!pkt_rtp) { + err("%s(%d), pkt_rtp alloc fail\n", __func__, idx); + rte_pktmbuf_free(pkt); + s->stat_build_ret_code = -STI_FRAME_PKT_ALLOC_FAIL; + return MTL_TASKLET_ALL_DONE; + } + tx_fastmetadata_session_build_rtp_packet(s, pkt_rtp); + tx_fastmetadata_session_build_packet_chain(impl, s, pkt, pkt_rtp, MTL_SESSION_PORT_P); + + if (send_r) { + pkt_r = rte_pktmbuf_alloc(hdr_pool_r); + if (!pkt_r) { + err("%s(%d), rte_pktmbuf_alloc fail\n", __func__, idx); + s->stat_build_ret_code = -STI_FRAME_PKT_ALLOC_FAIL; + rte_pktmbuf_free(pkt); + rte_pktmbuf_free(pkt_rtp); + return MTL_TASKLET_ALL_DONE; + } + tx_fastmetadata_session_build_packet_chain(impl, s, pkt_r, pkt_rtp, + MTL_SESSION_PORT_R); + } + } else { + tx_fastmetadata_session_build_packet(s, pkt); + if (send_r) { + pkt_r = rte_pktmbuf_copy(pkt, hdr_pool_r, 0, UINT32_MAX); + if (!pkt_r) { + err("%s(%d), rte_pktmbuf_copy redundant fail\n", __func__, idx); + rte_pktmbuf_free(pkt); + s->stat_build_ret_code = -STI_FRAME_PKT_ALLOC_FAIL; + return MTL_TASKLET_ALL_DONE; + } + tx_fastmetadata_session_update_redundant(s, pkt_r); + } + } + + st_tx_mbuf_set_idx(pkt, s->st41_pkt_idx); + st_tx_mbuf_set_tsc(pkt, pacing->tsc_time_cursor); + s->st41_stat_pkt_cnt[MTL_SESSION_PORT_P]++; + if (send_r) { + st_tx_mbuf_set_idx(pkt_r, s->st41_pkt_idx); + st_tx_mbuf_set_tsc(pkt_r, pacing->tsc_time_cursor); + s->st41_stat_pkt_cnt[MTL_SESSION_PORT_R]++; + } + + s->st41_pkt_idx++; + pacing->tsc_time_cursor += pacing->frame_time; + s->calculate_time_cursor = true; + + bool done = false; + ret = tx_fastmetadata_session_send_pkt(mgr, s, MTL_SESSION_PORT_P, pkt); + if (ret != 0) { + s->inflight[MTL_SESSION_PORT_P] = pkt; + s->inflight_cnt[MTL_SESSION_PORT_P]++; + done = true; + s->stat_build_ret_code = -STI_FRAME_PKT_ENQUEUE_FAIL; + } + if (send_r) { + ret = tx_fastmetadata_session_send_pkt(mgr, s, MTL_SESSION_PORT_R, pkt_r); + if (ret != 0) { + s->inflight[MTL_SESSION_PORT_R] = pkt_r; + s->inflight_cnt[MTL_SESSION_PORT_R]++; + done = true; + s->stat_build_ret_code = -STI_FRAME_PKT_R_ENQUEUE_FAIL; + } + } + + if (s->st41_pkt_idx >= s->st41_total_pkts) { + dbg("%s(%d), frame %d done\n", __func__, idx, s->st41_frame_idx); + struct st_frame_trans* frame = &s->st41_frames[s->st41_frame_idx]; + struct st41_tx_frame_meta* tf_meta = &frame->tf_meta; + uint64_t tsc_start = 0; + bool time_measure = mt_sessions_time_measure(impl); + if (time_measure) tsc_start = mt_get_tsc(impl); + /* end of current frame */ + if (s->ops.notify_frame_done) + ops->notify_frame_done(ops->priv, s->st41_frame_idx, tf_meta); + if (time_measure) { + uint32_t delta_us = (mt_get_tsc(impl) - tsc_start) / NS_PER_US; + s->stat_max_notify_frame_us = RTE_MAX(s->stat_max_notify_frame_us, delta_us); + } + rte_atomic32_dec(&frame->refcnt); + s->st41_frame_stat = ST41_TX_STAT_WAIT_FRAME; + s->st41_pkt_idx = 0; + rte_atomic32_inc(&s->st41_stat_frame_cnt); + pacing->tsc_time_cursor = 0; + + MT_USDT_ST41_TX_FRAME_DONE(s->mgr->idx, s->idx, s->st41_frame_idx, + tf_meta->rtp_timestamp); + } + + return done ? MTL_TASKLET_ALL_DONE : MTL_TASKLET_HAS_PENDING; +} + +static int tx_fastmetadata_session_tasklet_rtp( + struct mtl_main_impl* impl, struct st_tx_fastmetadata_sessions_mgr* mgr, + struct st_tx_fastmetadata_session_impl* s) { + int idx = s->idx; + int ret; + struct st_tx_fastmetadata_session_pacing* pacing = &s->pacing; + bool send_r = false; + enum mtl_port port_p = mt_port_logic2phy(s->port_maps, MTL_SESSION_PORT_P); + struct rte_mempool* hdr_pool_p = s->mbuf_mempool_hdr[MTL_SESSION_PORT_P]; + struct rte_mempool* hdr_pool_r = NULL; + struct rte_ring* ring_p = mgr->ring[port_p]; + + if (ring_p && rte_ring_full(ring_p)) { + s->stat_build_ret_code = -STI_RTP_RING_FULL; + return MTL_TASKLET_ALL_DONE; + } + + if (s->ops.num_port > 1) { + send_r = true; + hdr_pool_r = s->mbuf_mempool_hdr[MTL_SESSION_PORT_R]; + } + + /* check if any inflight pkts */ + if (s->inflight[MTL_SESSION_PORT_P]) { + ret = tx_fastmetadata_session_send_pkt(mgr, s, MTL_SESSION_PORT_P, + s->inflight[MTL_SESSION_PORT_P]); + if (ret == 0) { + s->inflight[MTL_SESSION_PORT_P] = NULL; + } else { + s->stat_build_ret_code = -STI_RTP_INFLIGHT_ENQUEUE_FAIL; + return MTL_TASKLET_ALL_DONE; + } + } + + if (send_r && s->inflight[MTL_SESSION_PORT_R]) { + ret = tx_fastmetadata_session_send_pkt(mgr, s, MTL_SESSION_PORT_R, + s->inflight[MTL_SESSION_PORT_R]); + if (ret == 0) { + s->inflight[MTL_SESSION_PORT_R] = NULL; + } else { + s->stat_build_ret_code = -STI_RTP_INFLIGHT_ENQUEUE_FAIL; + return MTL_TASKLET_ALL_DONE; + } + } + + uint64_t cur_tsc = mt_get_tsc(impl); + uint64_t target_tsc = pacing->tsc_time_cursor; + if (cur_tsc < target_tsc) { + uint64_t delta = target_tsc - cur_tsc; + // dbg("%s(%d), cur_tsc %"PRIu64" target_tsc %"PRIu64"\n", __func__, idx, cur_tsc, + // target_tsc); + if (likely(delta < NS_PER_S)) { + s->stat_build_ret_code = -STI_TSCTRS_TARGET_TSC_NOT_REACH; + return delta < mt_sch_schedule_ns(impl) ? MTL_TASKLET_HAS_PENDING + : MTL_TASKLET_ALL_DONE; + } else { + err("%s(%d), invalid tsc cur %" PRIu64 " target %" PRIu64 "\n", __func__, idx, + cur_tsc, target_tsc); + } + } + + struct rte_mbuf* pkt = NULL; + struct rte_mbuf* pkt_r = NULL; + struct rte_mbuf* pkt_rtp = NULL; + + if (rte_ring_sc_dequeue(s->packet_ring, (void**)&pkt_rtp) != 0) { + dbg("%s(%d), rtp pkts not ready %d\n", __func__, idx, ret); + s->stat_build_ret_code = -STI_RTP_APP_DEQUEUE_FAIL; + return MTL_TASKLET_ALL_DONE; + } + + s->ops.notify_rtp_done(s->ops.priv); + + if (!s->tx_no_chain) { + pkt = rte_pktmbuf_alloc(hdr_pool_p); + if (!pkt) { + err("%s(%d), rte_pktmbuf_alloc fail\n", __func__, idx); + rte_pktmbuf_free(pkt_rtp); + s->stat_build_ret_code = -STI_RTP_PKT_ALLOC_FAIL; + return MTL_TASKLET_ALL_DONE; + } + if (send_r) { + pkt_r = rte_pktmbuf_alloc(hdr_pool_r); + if (!pkt_r) { + err("%s(%d), rte_pktmbuf_alloc fail\n", __func__, idx); + rte_pktmbuf_free(pkt); + rte_pktmbuf_free(pkt_rtp); + s->stat_build_ret_code = -STI_RTP_PKT_ALLOC_FAIL; + return MTL_TASKLET_ALL_DONE; + } + } + } + + if (s->tx_no_chain) { + pkt = pkt_rtp; + tx_fastmetadata_session_rtp_update_packet(impl, s, pkt); + } else { + tx_fastmetadata_session_build_packet_chain(impl, s, pkt, pkt_rtp, MTL_SESSION_PORT_P); + } + st_tx_mbuf_set_idx(pkt, s->st41_pkt_idx); + st_tx_mbuf_set_tsc(pkt, pacing->tsc_time_cursor); + s->st41_stat_pkt_cnt[MTL_SESSION_PORT_P]++; + + if (send_r) { + if (s->tx_no_chain) { + pkt_r = rte_pktmbuf_copy(pkt, hdr_pool_r, 0, UINT32_MAX); + if (!pkt_r) { + err("%s(%d), rte_pktmbuf_copy fail\n", __func__, idx); + rte_pktmbuf_free(pkt); + s->stat_build_ret_code = -STI_RTP_PKT_ALLOC_FAIL; + return MTL_TASKLET_ALL_DONE; + } + tx_fastmetadata_session_update_redundant(s, pkt_r); + } else { + tx_fastmetadata_session_build_packet_chain(impl, s, pkt_r, pkt_rtp, + MTL_SESSION_PORT_R); + } + st_tx_mbuf_set_idx(pkt_r, s->st41_pkt_idx); + st_tx_mbuf_set_tsc(pkt_r, pacing->tsc_time_cursor); + s->st41_stat_pkt_cnt[MTL_SESSION_PORT_R]++; + } + + bool done = true; + ret = tx_fastmetadata_session_send_pkt(mgr, s, MTL_SESSION_PORT_P, pkt); + if (ret != 0) { + s->inflight[MTL_SESSION_PORT_P] = pkt; + s->inflight_cnt[MTL_SESSION_PORT_P]++; + done = false; + s->stat_build_ret_code = -STI_RTP_PKT_ENQUEUE_FAIL; + } + if (send_r) { + ret = tx_fastmetadata_session_send_pkt(mgr, s, MTL_SESSION_PORT_R, pkt_r); + if (ret != 0) { + s->inflight[MTL_SESSION_PORT_R] = pkt_r; + s->inflight_cnt[MTL_SESSION_PORT_R]++; + done = false; + s->stat_build_ret_code = -STI_RTP_PKT_R_ENQUEUE_FAIL; + } + } + + return done ? MTL_TASKLET_ALL_DONE : MTL_TASKLET_HAS_PENDING; +} + +static int tx_fastmetadata_sessions_tasklet_handler(void* priv) { + struct st_tx_fastmetadata_sessions_mgr* mgr = priv; + struct mtl_main_impl* impl = mgr->parent; + struct st_tx_fastmetadata_session_impl* s; + int pending = MTL_TASKLET_ALL_DONE; + uint64_t tsc_s = 0; + bool time_measure = mt_sessions_time_measure(impl); + + for (int sidx = 0; sidx < mgr->max_idx; sidx++) { + s = tx_fastmetadata_session_try_get(mgr, sidx); + if (!s) continue; + if (time_measure) tsc_s = mt_get_tsc(impl); + + s->stat_build_ret_code = 0; + if (s->ops.type == ST41_TYPE_FRAME_LEVEL) + pending += tx_fastmetadata_session_tasklet_frame(impl, mgr, s); + else + pending += tx_fastmetadata_session_tasklet_rtp(impl, mgr, s); + + if (time_measure) { + uint64_t delta_ns = mt_get_tsc(impl) - tsc_s; + mt_stat_u64_update(&s->stat_time, delta_ns); + } + tx_fastmetadata_session_put(mgr, sidx); + } + + return pending; +} + +static int tx_fastmetadata_sessions_mgr_uinit_hw( + struct st_tx_fastmetadata_sessions_mgr* mgr, enum mtl_port port) { + if (mgr->ring[port]) { + rte_ring_free(mgr->ring[port]); + mgr->ring[port] = NULL; + } + if (mgr->queue[port]) { + struct rte_mbuf* pad = mt_get_pad(mgr->parent, port); + /* flush all the pkts in the tx ring desc */ + if (pad) mt_txq_flush(mgr->queue[port], pad); + mt_txq_put(mgr->queue[port]); + mgr->queue[port] = NULL; + } + + dbg("%s(%d,%d), succ\n", __func__, mgr->idx, port); + return 0; +} + +static int tx_fastmetadata_sessions_mgr_init_hw( + struct mtl_main_impl* impl, struct st_tx_fastmetadata_sessions_mgr* mgr, + enum mtl_port port) { + unsigned int flags, count; + struct rte_ring* ring; + char ring_name[32]; + int mgr_idx = mgr->idx; + + if (mgr->queue[port]) return 0; /* init already */ + + struct mt_txq_flow flow; + memset(&flow, 0, sizeof(flow)); + mgr->queue[port] = mt_txq_get(impl, port, &flow); + if (!mgr->queue[port]) { + return -EIO; + } + + snprintf(ring_name, 32, "%sM%dP%d", ST_TX_FASTMETADATA_PREFIX, mgr_idx, port); + flags = RING_F_MP_HTS_ENQ | RING_F_SC_DEQ; /* multi-producer and single-consumer */ + count = ST_TX_FMD_SESSIONS_RING_SIZE; + ring = rte_ring_create(ring_name, count, mgr->socket_id, flags); + if (!ring) { + err("%s(%d), rte_ring_create fail for port %d\n", __func__, mgr_idx, port); + tx_fastmetadata_sessions_mgr_uinit_hw(mgr, port); + return -ENOMEM; + } + mgr->ring[port] = ring; + info("%s(%d,%d), succ, queue %d\n", __func__, mgr_idx, port, + mt_txq_queue_id(mgr->queue[port])); + + return 0; +} + +static int tx_fastmetadata_session_sq_flush_port( + struct st_tx_fastmetadata_sessions_mgr* mgr, enum mtl_port port) { + struct mtl_main_impl* impl = mgr->parent; + int ret; + int burst_pkts = mt_if_nb_tx_desc(impl, port); + struct rte_mbuf* pad = mt_get_pad(impl, port); + + for (int i = 0; i < burst_pkts; i++) { + rte_mbuf_refcnt_update(pad, 1); + int retry = 0; + do { + ret = rte_ring_mp_enqueue(mgr->ring[port], (void*)pad); + if (ret != 0) { + dbg("%s(%d), timeout at %d, ret %d\n", __func__, mgr->idx, i, ret); + retry++; + if (retry > 100) { + err("%s(%d), timeout at %d\n", __func__, mgr->idx, i); + return -EIO; + } + mt_sleep_ms(1); + } + } while (ret != 0); + } + + return 0; +} + +/* wa to flush the fastmetadata transmitter tx queue */ +static int tx_fastmetadata_session_flush(struct st_tx_fastmetadata_sessions_mgr* mgr, + struct st_tx_fastmetadata_session_impl* s) { + int mgr_idx = mgr->idx, s_idx = s->idx; + + if (!s->shared_queue) return 0; /* skip as not shared queue */ + + for (int i = 0; i < MTL_SESSION_PORT_MAX; i++) { + struct rte_mempool* pool = s->mbuf_mempool_hdr[i]; + if (pool && rte_mempool_in_use_count(pool) && + rte_atomic32_read(&mgr->transmitter_started)) { + info("%s(%d,%d), start to flush port %d\n", __func__, mgr_idx, s_idx, i); + tx_fastmetadata_session_sq_flush_port(mgr, mt_port_logic2phy(s->port_maps, i)); + info("%s(%d,%d), flush port %d end\n", __func__, mgr_idx, s_idx, i); + + int retry = 100; /* max 1000ms */ + while (retry > 0) { + retry--; + if (!rte_mempool_in_use_count(pool)) break; + mt_sleep_ms(10); + } + info("%s(%d,%d), check in_use retry %d\n", __func__, mgr_idx, s_idx, retry); + } + } + + return 0; +} + +int tx_fastmetadata_session_mempool_free(struct st_tx_fastmetadata_session_impl* s) { + int ret; + + if (s->mbuf_mempool_chain && !s->tx_mono_pool) { + ret = mt_mempool_free(s->mbuf_mempool_chain); + if (ret >= 0) s->mbuf_mempool_chain = NULL; + } + + for (int i = 0; i < MTL_SESSION_PORT_MAX; i++) { + if (s->mbuf_mempool_hdr[i] && !s->tx_mono_pool) { + ret = mt_mempool_free(s->mbuf_mempool_hdr[i]); + if (ret >= 0) s->mbuf_mempool_hdr[i] = NULL; + } + } + + return 0; +} + +static bool tx_fastmetadata_session_has_chain_buf( + struct st_tx_fastmetadata_session_impl* s) { + struct st41_tx_ops* ops = &s->ops; + int num_ports = ops->num_port; + + for (int port = 0; port < num_ports; port++) { + if (!s->eth_has_chain[port]) return false; + } + + /* all ports capable chain */ + return true; +} + +static int tx_fastmetadata_session_mempool_init( + struct mtl_main_impl* impl, struct st_tx_fastmetadata_sessions_mgr* mgr, + struct st_tx_fastmetadata_session_impl* s) { + struct st41_tx_ops* ops = &s->ops; + int num_port = ops->num_port, idx = s->idx; + enum mtl_port port; + unsigned int n; + + uint16_t hdr_room_size = sizeof(struct mt_udp_hdr); + uint16_t chain_room_size = ST_PKT_MAX_ETHER_BYTES - hdr_room_size; + + if (s->tx_no_chain) { + hdr_room_size += chain_room_size; /* enlarge hdr to attach chain */ + } + + for (int i = 0; i < num_port; i++) { + port = mt_port_logic2phy(s->port_maps, i); + + if (s->tx_mono_pool) { + s->mbuf_mempool_hdr[i] = mt_sys_tx_mempool(impl, port); + info("%s(%d), use tx mono hdr mempool(%p) for port %d\n", __func__, idx, + s->mbuf_mempool_hdr[i], i); + } else if (s->mbuf_mempool_hdr[i]) { + warn("%s(%d), use previous hdr mempool for port %d\n", __func__, idx, i); + } else { + n = mt_if_nb_tx_desc(impl, port) + ST_TX_FMD_SESSIONS_RING_SIZE; + if (ops->type == ST41_TYPE_RTP_LEVEL) n += ops->rtp_ring_size; + char pool_name[32]; + snprintf(pool_name, 32, "%sM%dS%dP%d_HDR", ST_TX_FASTMETADATA_PREFIX, mgr->idx, idx, + i); + struct rte_mempool* mbuf_pool = mt_mempool_create_by_socket( + impl, pool_name, n, MT_MBUF_CACHE_SIZE, sizeof(struct mt_muf_priv_data), + hdr_room_size, s->socket_id); + if (!mbuf_pool) { + tx_fastmetadata_session_mempool_free(s); + return -ENOMEM; + } + s->mbuf_mempool_hdr[i] = mbuf_pool; + } + } + + /* allocate payload(chain) pool */ + if (!s->tx_no_chain) { + port = mt_port_logic2phy(s->port_maps, MTL_SESSION_PORT_P); + n = mt_if_nb_tx_desc(impl, port) + ST_TX_FMD_SESSIONS_RING_SIZE; + if (ops->type == ST41_TYPE_RTP_LEVEL) n += ops->rtp_ring_size; + + if (s->tx_mono_pool) { + s->mbuf_mempool_chain = mt_sys_tx_mempool(impl, port); + info("%s(%d), use tx mono chain mempool(%p)\n", __func__, idx, + s->mbuf_mempool_chain); + } else if (s->mbuf_mempool_chain) { + warn("%s(%d), use previous chain mempool\n", __func__, idx); + } else { + char pool_name[32]; + snprintf(pool_name, 32, "%sM%dS%d_CHAIN", ST_TX_FASTMETADATA_PREFIX, mgr->idx, idx); + struct rte_mempool* mbuf_pool = mt_mempool_create_by_socket( + impl, pool_name, n, MT_MBUF_CACHE_SIZE, sizeof(struct mt_muf_priv_data), + chain_room_size, s->socket_id); + if (!mbuf_pool) { + tx_fastmetadata_session_mempool_free(s); + return -ENOMEM; + } + s->mbuf_mempool_chain = mbuf_pool; + } + } + + return 0; +} + +static int tx_fastmetadata_session_init_rtp(struct st_tx_fastmetadata_sessions_mgr* mgr, + struct st_tx_fastmetadata_session_impl* s) { + char ring_name[32]; + struct rte_ring* ring; + unsigned int flags, count = s->ops.rtp_ring_size; + int mgr_idx = mgr->idx, idx = s->idx; + + snprintf(ring_name, 32, "%sM%dS%d_PKT", ST_TX_FASTMETADATA_PREFIX, mgr_idx, idx); + flags = RING_F_SP_ENQ | RING_F_SC_DEQ; /* single-producer and single-consumer */ + ring = rte_ring_create(ring_name, count, s->socket_id, flags); + if (!ring) { + err("%s(%d,%d), rte_ring_create fail\n", __func__, mgr_idx, idx); + tx_fastmetadata_session_mempool_free(s); + return -ENOMEM; + } + s->packet_ring = ring; + info("%s(%d,%d), succ\n", __func__, mgr_idx, idx); + return 0; +} + +static int tx_fastmetadata_session_uinit_sw(struct st_tx_fastmetadata_sessions_mgr* mgr, + struct st_tx_fastmetadata_session_impl* s) { + int idx = s->idx, num_port = s->ops.num_port; + + for (int port = 0; port < num_port; port++) { + if (s->inflight[port]) { + info("%s(%d), free inflight buf for port %d\n", __func__, idx, port); + rte_pktmbuf_free(s->inflight[port]); + s->inflight[port] = NULL; + } + } + + if (s->packet_ring) { + mt_ring_dequeue_clean(s->packet_ring); + rte_ring_free(s->packet_ring); + s->packet_ring = NULL; + } + + tx_fastmetadata_session_flush(mgr, s); + tx_fastmetadata_session_mempool_free(s); + + tx_fastmetadata_session_free_frames(s); + + return 0; +} + +static int tx_fastmetadata_session_init_sw(struct mtl_main_impl* impl, + struct st_tx_fastmetadata_sessions_mgr* mgr, + struct st_tx_fastmetadata_session_impl* s) { + struct st41_tx_ops* ops = &s->ops; + int idx = s->idx, ret; + + /* free the pool if any in previous session */ + tx_fastmetadata_session_mempool_free(s); + ret = tx_fastmetadata_session_mempool_init(impl, mgr, s); + if (ret < 0) { + err("%s(%d), fail %d\n", __func__, idx, ret); + tx_fastmetadata_session_uinit_sw(mgr, s); + return ret; + } + + if (ops->type == ST41_TYPE_RTP_LEVEL) { + ret = tx_fastmetadata_session_init_rtp(mgr, s); + } else { + ret = tx_fastmetadata_session_alloc_frames(s); + } + if (ret < 0) { + err("%s(%d), fail %d\n", __func__, idx, ret); + tx_fastmetadata_session_uinit_sw(mgr, s); + return ret; + } + + return 0; +} + +static int tx_fastmetadata_session_uinit_queue( + struct mtl_main_impl* impl, struct st_tx_fastmetadata_session_impl* s) { + MTL_MAY_UNUSED(impl); + + for (int i = 0; i < s->ops.num_port; i++) { + enum mtl_port port = mt_port_logic2phy(s->port_maps, i); + + if (s->queue[i]) { + mt_txq_flush(s->queue[i], mt_get_pad(impl, port)); + mt_txq_put(s->queue[i]); + s->queue[i] = NULL; + } + } + return 0; +} + +static int tx_fastmetadata_session_init_queue(struct mtl_main_impl* impl, + struct st_tx_fastmetadata_session_impl* s) { + int idx = s->idx; + enum mtl_port port; + uint16_t queue_id; + + for (int i = 0; i < s->ops.num_port; i++) { + port = mt_port_logic2phy(s->port_maps, i); + + struct mt_txq_flow flow; + memset(&flow, 0, sizeof(flow)); + mtl_memcpy(&flow.dip_addr, &s->ops.dip_addr[i], MTL_IP_ADDR_LEN); + flow.dst_port = s->ops.udp_port[i]; + flow.gso_sz = ST_PKT_MAX_ETHER_BYTES; + +#ifdef MTL_HAS_RDMA_BACKEND + int num_mrs = 1; /* always no tx chain for rdma_ud fmd */ + void* mrs_bufs[num_mrs]; + size_t mrs_sizes[num_mrs]; + if (mt_pmd_is_rdma_ud(impl, port)) { + /* register mempool memory to rdma */ + struct rte_mempool* pool = s->mbuf_mempool_hdr[i]; + mrs_bufs[0] = mt_mempool_mem_addr(pool); + mrs_sizes[0] = mt_mempool_mem_size(pool); + flow.num_mrs = num_mrs; + flow.mrs_bufs = mrs_bufs; + flow.mrs_sizes = mrs_sizes; + } +#endif + + s->queue[i] = mt_txq_get(impl, port, &flow); + if (!s->queue[i]) { + tx_fastmetadata_session_uinit_queue(impl, s); + return -EIO; + } + queue_id = mt_txq_queue_id(s->queue[i]); + info("%s(%d), port(l:%d,p:%d), queue %d\n", __func__, idx, i, port, queue_id); + } + + return 0; +} + +static int tx_fastmetadata_session_uinit(struct st_tx_fastmetadata_sessions_mgr* mgr, + struct st_tx_fastmetadata_session_impl* s) { + tx_fastmetadata_session_uinit_queue(mgr->parent, s); + tx_fastmetadata_session_uinit_sw(mgr, s); + return 0; +} + +static int tx_fastmetadata_session_attach(struct mtl_main_impl* impl, + struct st_tx_fastmetadata_sessions_mgr* mgr, + struct st_tx_fastmetadata_session_impl* s, + struct st41_tx_ops* ops) { + int ret; + int idx = s->idx, num_port = ops->num_port; + char* ports[MTL_SESSION_PORT_MAX]; + bool rdma_ud = false; + + for (int i = 0; i < num_port; i++) ports[i] = ops->port[i]; + ret = mt_build_port_map(impl, ports, s->port_maps, num_port); + if (ret < 0) return ret; + + /* use dedicated queue for rdma_ud */ + for (int i = 0; i < num_port; i++) { + enum mtl_port port = mt_port_logic2phy(s->port_maps, i); + if (mt_pmd_is_rdma_ud(impl, port)) rdma_ud = true; + } + + s->mgr = mgr; + if (ops->name) { + snprintf(s->ops_name, sizeof(s->ops_name), "%s", ops->name); + } else { + snprintf(s->ops_name, sizeof(s->ops_name), "TX_FMD_M%dS%d", mgr->idx, idx); + } + s->ops = *ops; + + /* if disable shared queue */ + s->shared_queue = true; + if (ops->flags & ST41_TX_FLAG_DEDICATE_QUEUE) s->shared_queue = false; + if (rdma_ud) s->shared_queue = false; + + for (int i = 0; i < num_port; i++) { + s->st41_dst_port[i] = (ops->udp_port[i]) ? (ops->udp_port[i]) : (10200 + idx * 2); + if (mt_user_random_src_port(impl)) + s->st41_src_port[i] = mt_random_port(s->st41_dst_port[i]); + else + s->st41_src_port[i] = + (ops->udp_src_port[i]) ? (ops->udp_src_port[i]) : s->st41_dst_port[i]; + enum mtl_port port = mt_port_logic2phy(s->port_maps, i); + s->eth_ipv4_cksum_offload[i] = mt_if_has_offload_ipv4_cksum(impl, port); + s->eth_has_chain[i] = mt_if_has_multi_seg(impl, port); + + if (s->shared_queue) { + ret = tx_fastmetadata_sessions_mgr_init_hw(impl, mgr, port); + if (ret < 0) { + err("%s(%d), mgr init hw fail for port %d\n", __func__, idx, port); + return ret; + } + } + } + s->tx_mono_pool = mt_user_tx_mono_pool(impl); + /* manually disable chain or any port can't support chain */ + s->tx_no_chain = mt_user_tx_no_chain(impl) || !tx_fastmetadata_session_has_chain_buf(s); + if (rdma_ud) s->tx_no_chain = true; + s->max_pkt_len = ST_PKT_MAX_ETHER_BYTES - sizeof(struct st41_fmd_hdr); + + s->st41_frames_cnt = ops->framebuff_cnt; + + s->st41_frame_stat = ST41_TX_STAT_WAIT_FRAME; + s->st41_frame_idx = 0; + rte_atomic32_set(&s->st41_stat_frame_cnt, 0); + s->stat_last_time = mt_get_monotonic_time(); + mt_stat_u64_init(&s->stat_time); + + for (int i = 0; i < num_port; i++) { + s->inflight[i] = NULL; + s->inflight_cnt[i] = 0; + } + + ret = st_get_fps_timing(ops->fps, &s->fps_tm); + if (ret < 0) { + err("%s(%d), invalid fps %d\n", __func__, idx, ops->fps); + return ret; + } + + ret = tx_fastmetadata_session_init_pacing(s); + if (ret < 0) { + err("%s(%d), init pacing fail %d\n", __func__, idx, ret); + return ret; + } + + for (int i = 0; i < num_port; i++) { + ret = tx_fastmetadata_session_init_hdr(impl, mgr, s, i); + if (ret < 0) { + err("%s(%d), port(%d) init hdr fail %d\n", __func__, idx, i, ret); + return ret; + } + } + + ret = tx_fastmetadata_session_init_sw(impl, mgr, s); + if (ret < 0) { + err("%s(%d), init sw fail %d\n", __func__, idx, ret); + tx_fastmetadata_session_uinit(mgr, s); + return ret; + } + + if (!s->shared_queue) { + ret = tx_fastmetadata_session_init_queue(impl, s); + if (ret < 0) { + err("%s(%d), init dedicated queue fail %d\n", __func__, idx, ret); + tx_fastmetadata_session_uinit(mgr, s); + return ret; + } + } else { + rte_atomic32_inc(&mgr->transmitter_clients); + } + + info("%s(%d), type %d flags 0x%x pt %u, %s\n", __func__, idx, ops->type, ops->flags, + ops->payload_type, ops->interlaced ? "interlace" : "progressive"); + return 0; +} + +static void tx_fastmetadata_session_stat(struct st_tx_fastmetadata_session_impl* s) { + int idx = s->idx; + int frame_cnt = rte_atomic32_read(&s->st41_stat_frame_cnt); + uint64_t cur_time_ns = mt_get_monotonic_time(); + double time_sec = (double)(cur_time_ns - s->stat_last_time) / NS_PER_S; + double framerate = frame_cnt / time_sec; + + rte_atomic32_set(&s->st41_stat_frame_cnt, 0); + s->stat_last_time = cur_time_ns; + + notice("TX_FMD_SESSION(%d:%s): fps %f frames %d pkts %d:%d\n", idx, s->ops_name, + framerate, frame_cnt, s->st41_stat_pkt_cnt[MTL_SESSION_PORT_P], + s->st41_stat_pkt_cnt[MTL_SESSION_PORT_R]); + s->st41_stat_pkt_cnt[MTL_SESSION_PORT_P] = 0; + s->st41_stat_pkt_cnt[MTL_SESSION_PORT_R] = 0; + + if (s->stat_epoch_mismatch) { + notice("TX_FMD_SESSION(%d): st41 epoch mismatch %d\n", idx, s->stat_epoch_mismatch); + s->stat_epoch_mismatch = 0; + } + if (s->stat_epoch_drop) { + notice("TX_FMD_SESSION(%d): epoch drop %u\n", idx, s->stat_epoch_drop); + s->stat_epoch_drop = 0; + } + if (s->stat_epoch_onward) { + notice("TX_FMD_SESSION(%d): epoch onward %d\n", idx, s->stat_epoch_onward); + s->stat_epoch_onward = 0; + } + if (s->stat_exceed_frame_time) { + notice("TX_AUDIO_SESSION(%d): build timeout frames %u\n", idx, + s->stat_exceed_frame_time); + s->stat_exceed_frame_time = 0; + } + if (frame_cnt <= 0) { + warn("TX_FMD_SESSION(%d): build ret %d\n", idx, s->stat_build_ret_code); + } + if (s->ops.interlaced) { + notice("TX_FMD_SESSION(%d): interlace first field %u second field %u\n", idx, + s->stat_interlace_first_field, s->stat_interlace_second_field); + s->stat_interlace_first_field = 0; + s->stat_interlace_second_field = 0; + } + + if (s->stat_error_user_timestamp) { + notice("TX_FMD_SESSION(%d): error user timestamp %u\n", idx, + s->stat_error_user_timestamp); + s->stat_error_user_timestamp = 0; + } + + struct mt_stat_u64* stat_time = &s->stat_time; + if (stat_time->cnt) { + uint64_t avg_ns = stat_time->sum / stat_time->cnt; + notice("TX_FMD_SESSION(%d): tasklet time avg %.2fus max %.2fus min %.2fus\n", idx, + (float)avg_ns / NS_PER_US, (float)stat_time->max / NS_PER_US, + (float)stat_time->min / NS_PER_US); + mt_stat_u64_init(stat_time); + } + if (s->stat_max_next_frame_us > 8 || s->stat_max_notify_frame_us > 8) { + notice("TX_FMD_SESSION(%d): get next frame max %uus, notify done max %uus\n", idx, + s->stat_max_next_frame_us, s->stat_max_notify_frame_us); + } + s->stat_max_next_frame_us = 0; + s->stat_max_notify_frame_us = 0; +} + +static int tx_fastmetadata_session_detach(struct st_tx_fastmetadata_sessions_mgr* mgr, + struct st_tx_fastmetadata_session_impl* s) { + tx_fastmetadata_session_stat(s); + tx_fastmetadata_session_uinit(mgr, s); + if (s->shared_queue) { + rte_atomic32_dec(&mgr->transmitter_clients); + } + return 0; +} + +static int tx_fastmetadata_session_update_dst(struct mtl_main_impl* impl, + struct st_tx_fastmetadata_sessions_mgr* mgr, + struct st_tx_fastmetadata_session_impl* s, + struct st_tx_dest_info* dest) { + int ret = -EIO; + int idx = s->idx, num_port = s->ops.num_port; + struct st41_tx_ops* ops = &s->ops; + + /* update ip and port */ + for (int i = 0; i < num_port; i++) { + memcpy(ops->dip_addr[i], dest->dip_addr[i], MTL_IP_ADDR_LEN); + ops->udp_port[i] = dest->udp_port[i]; + s->st41_dst_port[i] = (ops->udp_port[i]) ? (ops->udp_port[i]) : (30000 + idx * 2); + s->st41_src_port[i] = + (ops->udp_src_port[i]) ? (ops->udp_src_port[i]) : s->st41_dst_port[i]; + + /* update hdr */ + ret = tx_fastmetadata_session_init_hdr(impl, mgr, s, i); + if (ret < 0) { + err("%s(%d), init hdr fail %d\n", __func__, idx, ret); + return ret; + } + } + + return 0; +} + +static int tx_fastmetadata_sessions_mgr_update_dst( + struct st_tx_fastmetadata_sessions_mgr* mgr, + struct st_tx_fastmetadata_session_impl* s, struct st_tx_dest_info* dest) { + int ret = -EIO, midx = mgr->idx, idx = s->idx; + + s = tx_fastmetadata_session_get(mgr, idx); /* get the lock */ + if (!s) { + err("%s(%d,%d), get session fail\n", __func__, midx, idx); + return -EIO; + } + + ret = tx_fastmetadata_session_update_dst(mgr->parent, mgr, s, dest); + tx_fastmetadata_session_put(mgr, idx); + if (ret < 0) { + err("%s(%d,%d), fail %d\n", __func__, midx, idx, ret); + return ret; + } + + return 0; +} + +static int st_tx_fastmetadata_sessions_stat(void* priv) { + struct st_tx_fastmetadata_sessions_mgr* mgr = priv; + struct st_tx_fastmetadata_session_impl* s; + + for (int j = 0; j < mgr->max_idx; j++) { + s = tx_fastmetadata_session_get_timeout(mgr, j, ST_SESSION_STAT_TIMEOUT_US); + if (!s) continue; + tx_fastmetadata_session_stat(s); + tx_fastmetadata_session_put(mgr, j); + } + if (mgr->st41_stat_pkts_burst > 0) { + notice("TX_FMD_MGR, pkts burst %d\n", mgr->st41_stat_pkts_burst); + mgr->st41_stat_pkts_burst = 0; + } else { + int32_t clients = rte_atomic32_read(&mgr->transmitter_clients); + if ((clients > 0) && (mgr->max_idx > 0)) { + for (int i = 0; i < mt_num_ports(mgr->parent); i++) { + warn("TX_FMD_MGR: trs ret %d:%d\n", i, mgr->stat_trs_ret_code[i]); + } + } + } + + return 0; +} + +static int tx_fastmetadata_sessions_mgr_init( + struct mtl_main_impl* impl, struct mtl_sch_impl* sch, + struct st_tx_fastmetadata_sessions_mgr* mgr) { + int idx = sch->idx; + struct mtl_tasklet_ops ops; + int i; + + RTE_BUILD_BUG_ON(sizeof(struct st41_fmd_hdr) != 58); + + mgr->parent = impl; + mgr->idx = idx; + mgr->socket_id = mt_sch_socket_id(sch); + + for (i = 0; i < ST_MAX_TX_FMD_SESSIONS; i++) { + rte_spinlock_init(&mgr->mutex[i]); + } + + memset(&ops, 0x0, sizeof(ops)); + ops.priv = mgr; + ops.name = "tx_fastmetadata_sessions_mgr"; + ops.start = tx_fastmetadata_sessions_tasklet_start; + ops.handler = tx_fastmetadata_sessions_tasklet_handler; + + mgr->tasklet = mtl_sch_register_tasklet(sch, &ops); + if (!mgr->tasklet) { + err("%s(%d), mtl_sch_register_tasklet fail\n", __func__, idx); + return -EIO; + } + + mt_stat_register(mgr->parent, st_tx_fastmetadata_sessions_stat, mgr, "tx_fmd"); + info("%s(%d), succ\n", __func__, idx); + return 0; +} + +static struct st_tx_fastmetadata_session_impl* tx_fastmetadata_sessions_mgr_attach( + struct mtl_sch_impl* sch, struct st41_tx_ops* ops) { + struct st_tx_fastmetadata_sessions_mgr* mgr = &sch->tx_fmd_mgr; + int midx = mgr->idx; + int ret; + struct st_tx_fastmetadata_session_impl* s; + int socket = mt_sch_socket_id(sch); + + /* find one empty slot in the mgr */ + for (int i = 0; i < ST_MAX_TX_FMD_SESSIONS; i++) { + if (!tx_fastmetadata_session_get_empty(mgr, i)) continue; + + s = mt_rte_zmalloc_socket(sizeof(*s), socket); + if (!s) { + err("%s(%d), session malloc fail on %d\n", __func__, midx, i); + tx_fastmetadata_session_put(mgr, i); + return NULL; + } + s->socket_id = socket; + ret = tx_fastmetadata_session_init(mgr, s, i); + if (ret < 0) { + err("%s(%d), init fail on %d\n", __func__, midx, i); + tx_fastmetadata_session_put(mgr, i); + mt_rte_free(s); + return NULL; + } + ret = tx_fastmetadata_session_attach(mgr->parent, mgr, s, ops); + if (ret < 0) { + err("%s(%d), attach fail on %d\n", __func__, midx, i); + tx_fastmetadata_session_put(mgr, i); + mt_rte_free(s); + return NULL; + } + + mgr->sessions[i] = s; + mgr->max_idx = RTE_MAX(mgr->max_idx, i + 1); + tx_fastmetadata_session_put(mgr, i); + return s; + } + + err("%s(%d), fail\n", __func__, midx); + return NULL; +} + +static int tx_fastmetadata_sessions_mgr_detach( + struct st_tx_fastmetadata_sessions_mgr* mgr, + struct st_tx_fastmetadata_session_impl* s) { + int midx = mgr->idx; + int idx = s->idx; + + s = tx_fastmetadata_session_get(mgr, idx); /* get the lock */ + if (!s) { + err("%s(%d,%d), get session fail\n", __func__, midx, idx); + return -EIO; + } + + tx_fastmetadata_session_detach(mgr, s); + mgr->sessions[idx] = NULL; + mt_rte_free(s); + + tx_fastmetadata_session_put(mgr, idx); + + return 0; +} + +static int tx_fastmetadata_sessions_mgr_update( + struct st_tx_fastmetadata_sessions_mgr* mgr) { + int max_idx = 0; + + for (int i = 0; i < ST_MAX_TX_FMD_SESSIONS; i++) { + if (mgr->sessions[i]) max_idx = i + 1; + } + + mgr->max_idx = max_idx; + return 0; +} + +static int tx_fastmetadata_sessions_mgr_uinit( + struct st_tx_fastmetadata_sessions_mgr* mgr) { + int m_idx = mgr->idx; + struct mtl_main_impl* impl = mgr->parent; + struct st_tx_fastmetadata_session_impl* s; + + mt_stat_unregister(mgr->parent, st_tx_fastmetadata_sessions_stat, mgr); + + if (mgr->tasklet) { + mtl_sch_unregister_tasklet(mgr->tasklet); + mgr->tasklet = NULL; + } + + for (int i = 0; i < ST_MAX_TX_FMD_SESSIONS; i++) { + s = tx_fastmetadata_session_get(mgr, i); + if (!s) continue; + + warn("%s(%d), session %d still attached\n", __func__, m_idx, i); + tx_fastmetadata_sessions_mgr_detach(mgr, s); + tx_fastmetadata_session_put(mgr, i); + } + + for (int i = 0; i < mt_num_ports(impl); i++) { + tx_fastmetadata_sessions_mgr_uinit_hw(mgr, i); + } + + info("%s(%d), succ\n", __func__, m_idx); + return 0; +} + +static int tx_fastmetadata_ops_check(struct st41_tx_ops* ops) { + int num_ports = ops->num_port, ret; + uint8_t* ip = NULL; + + if ((num_ports > MTL_SESSION_PORT_MAX) || (num_ports <= 0)) { + err("%s, invalid num_ports %d\n", __func__, num_ports); + return -EINVAL; + } + + for (int i = 0; i < num_ports; i++) { + ip = ops->dip_addr[i]; + ret = mt_ip_addr_check(ip); + if (ret < 0) { + err("%s(%d), invalid ip %d.%d.%d.%d\n", __func__, i, ip[0], ip[1], ip[2], ip[3]); + return -EINVAL; + } + } + + if (num_ports > 1) { + if (0 == memcmp(ops->dip_addr[0], ops->dip_addr[1], MTL_IP_ADDR_LEN)) { + err("%s, same %d.%d.%d.%d for both ip\n", __func__, ip[0], ip[1], ip[2], ip[3]); + return -EINVAL; + } + } + + if (ops->type == ST41_TYPE_FRAME_LEVEL) { + if (ops->framebuff_cnt < 1) { + err("%s, invalid framebuff_cnt %d\n", __func__, ops->framebuff_cnt); + return -EINVAL; + } + if (!ops->get_next_frame) { + err("%s, pls set get_next_frame\n", __func__); + return -EINVAL; + } + } else if (ops->type == ST41_TYPE_RTP_LEVEL) { + if (ops->rtp_ring_size <= 0) { + err("%s, invalid rtp_ring_size %d\n", __func__, ops->rtp_ring_size); + return -EINVAL; + } + if (!ops->notify_rtp_done) { + err("%s, pls set notify_rtp_done\n", __func__); + return -EINVAL; + } + } + + if (!st_is_valid_payload_type(ops->payload_type)) { + err("%s, invalid payload_type %d\n", __func__, ops->payload_type); + return -EINVAL; + } + + return 0; +} + +static int st_tx_fmd_init(struct mtl_main_impl* impl, struct mtl_sch_impl* sch) { + int ret; + + if (sch->tx_fmd_init) return 0; + + /* create tx fastmetadata context */ + ret = tx_fastmetadata_sessions_mgr_init(impl, sch, &sch->tx_fmd_mgr); + if (ret < 0) { + err("%s, tx_fastmetadata_sessions_mgr_init fail\n", __func__); + return ret; + } + ret = st_fastmetadata_transmitter_init(impl, sch, &sch->tx_fmd_mgr, &sch->fmd_trs); + if (ret < 0) { + tx_fastmetadata_sessions_mgr_uinit(&sch->tx_fmd_mgr); + err("%s, st_fastmetadata_transmitter_init fail %d\n", __func__, ret); + return ret; + } + + sch->tx_fmd_init = true; + return 0; +} + +int st_tx_fastmetadata_sessions_sch_uinit(struct mtl_sch_impl* sch) { + if (!sch->tx_fmd_init) return 0; + + /* free tx fastmetadata context */ + st_fastmetadata_transmitter_uinit(&sch->fmd_trs); + tx_fastmetadata_sessions_mgr_uinit(&sch->tx_fmd_mgr); + + sch->tx_fmd_init = false; + return 0; +} + +st41_tx_handle st41_tx_create(mtl_handle mt, struct st41_tx_ops* ops) { + struct mtl_main_impl* impl = mt; + struct st_tx_fastmetadata_session_handle_impl* s_impl; + struct st_tx_fastmetadata_session_impl* s; + struct mtl_sch_impl* sch; + int quota_mbs, ret; + + notice("%s, start for %s\n", __func__, mt_string_safe(ops->name)); + + if (impl->type != MT_HANDLE_MAIN) { + err("%s, invalid type %d\n", __func__, impl->type); + return NULL; + } + + ret = tx_fastmetadata_ops_check(ops); + if (ret < 0) { + err("%s, st_tx_fastmetadata_ops_check fail %d\n", __func__, ret); + return NULL; + } + + enum mtl_port port = mt_port_by_name(impl, ops->port[MTL_SESSION_PORT_P]); + if (port >= MTL_PORT_MAX) return NULL; + int socket = mt_socket_id(impl, port); + + s_impl = mt_rte_zmalloc_socket(sizeof(*s_impl), socket); + if (!s_impl) { + err("%s, s_impl malloc fail on socket %d\n", __func__, socket); + return NULL; + } + + quota_mbs = 0; + sch = + mt_sch_get_by_socket(impl, quota_mbs, MT_SCH_TYPE_DEFAULT, MT_SCH_MASK_ALL, socket); + if (!sch) { + mt_rte_free(s_impl); + err("%s, get sch fail\n", __func__); + return NULL; + } + + mt_pthread_mutex_lock(&sch->tx_fmd_mgr_mutex); + ret = st_tx_fmd_init(impl, sch); + mt_pthread_mutex_unlock(&sch->tx_fmd_mgr_mutex); + if (ret < 0) { + err("%s, st_tx_fmd_init fail %d\n", __func__, ret); + mt_sch_put(sch, quota_mbs); + mt_rte_free(s_impl); + return NULL; + } + + mt_pthread_mutex_lock(&sch->tx_fmd_mgr_mutex); + s = tx_fastmetadata_sessions_mgr_attach(sch, ops); + mt_pthread_mutex_unlock(&sch->tx_fmd_mgr_mutex); + if (!s) { + err("%s, tx_fastmetadata_sessions_mgr_attach fail\n", __func__); + mt_sch_put(sch, quota_mbs); + mt_rte_free(s_impl); + return NULL; + } + + s_impl->parent = impl; + s_impl->type = MT_HANDLE_TX_FMD; + s_impl->impl = s; + s_impl->sch = sch; + s_impl->quota_mbs = quota_mbs; + + rte_atomic32_inc(&impl->st41_tx_sessions_cnt); + notice("%s(%d,%d), succ on %p\n", __func__, sch->idx, s->idx, s); + return s_impl; +} + +void* st41_tx_get_mbuf(st41_tx_handle handle, void** usrptr) { + struct st_tx_fastmetadata_session_handle_impl* s_impl = handle; + struct rte_mbuf* pkt = NULL; + struct st_tx_fastmetadata_session_impl* s; + int idx; + struct rte_ring* packet_ring; + + if (s_impl->type != MT_HANDLE_TX_FMD) { + err("%s, invalid type %d\n", __func__, s_impl->type); + return NULL; + } + + s = s_impl->impl; + idx = s->idx; + packet_ring = s->packet_ring; + if (!packet_ring) { + err("%s(%d), packet ring is not created\n", __func__, idx); + return NULL; + } + + if (rte_ring_full(packet_ring)) { + dbg("%s(%d), packet ring is full\n", __func__, idx); + return NULL; + } + + struct rte_mempool* mp = + s->tx_no_chain ? s->mbuf_mempool_hdr[MTL_SESSION_PORT_P] : s->mbuf_mempool_chain; + pkt = rte_pktmbuf_alloc(mp); + if (!pkt) { + dbg("%s(%d), pkt alloc fail\n", __func__, idx); + return NULL; + } + + size_t hdr_offset = s->tx_no_chain ? sizeof(struct mt_udp_hdr) : 0; + *usrptr = rte_pktmbuf_mtod_offset(pkt, void*, hdr_offset); + return pkt; +} + +int st41_tx_put_mbuf(st41_tx_handle handle, void* mbuf, uint16_t len) { + struct st_tx_fastmetadata_session_handle_impl* s_impl = handle; + struct rte_mbuf* pkt = (struct rte_mbuf*)mbuf; + struct st_tx_fastmetadata_session_impl* s; + struct rte_ring* packet_ring; + int idx, ret; + + if (s_impl->type != MT_HANDLE_TX_FMD) { + err("%s, invalid type %d\n", __func__, s_impl->type); + return -EIO; + } + + if (!mt_rtp_len_valid(len)) { + if (len) err("%s, invalid len %d\n", __func__, len); + rte_pktmbuf_free(mbuf); + return -EIO; + } + + s = s_impl->impl; + idx = s->idx; + packet_ring = s->packet_ring; + if (!packet_ring) { + err("%s(%d), packet ring is not created\n", __func__, idx); + rte_pktmbuf_free(mbuf); + return -EIO; + } + + if (s->tx_no_chain) len += sizeof(struct mt_udp_hdr); + + pkt->data_len = pkt->pkt_len = len; + ret = rte_ring_sp_enqueue(packet_ring, (void*)pkt); + if (ret < 0) { + err("%s(%d), can not enqueue to the rte ring\n", __func__, idx); + rte_pktmbuf_free(mbuf); + return -EBUSY; + } + + return 0; +} + +int st41_tx_update_destination(st41_tx_handle handle, struct st_tx_dest_info* dst) { + struct st_tx_fastmetadata_session_handle_impl* s_impl = handle; + struct st_tx_fastmetadata_session_impl* s; + struct mtl_sch_impl* sch; + int idx, ret, sch_idx; + + if (s_impl->type != MT_HANDLE_TX_FMD) { + err("%s, invalid type %d\n", __func__, s_impl->type); + return -EIO; + } + + s = s_impl->impl; + idx = s->idx; + sch = s_impl->sch; + sch_idx = sch->idx; + + ret = st_tx_dest_info_check(dst, s->ops.num_port); + if (ret < 0) return ret; + + ret = tx_fastmetadata_sessions_mgr_update_dst(&sch->tx_fmd_mgr, s, dst); + if (ret < 0) { + err("%s(%d,%d), online update fail %d\n", __func__, sch_idx, idx, ret); + return ret; + } + + info("%s(%d,%d), succ\n", __func__, sch_idx, idx); + return 0; +} + +int st41_tx_free(st41_tx_handle handle) { + struct st_tx_fastmetadata_session_handle_impl* s_impl = handle; + struct st_tx_fastmetadata_session_impl* s; + struct mtl_sch_impl* sch; + struct mtl_main_impl* impl; + int ret, idx; + int sch_idx; + + if (s_impl->type != MT_HANDLE_TX_FMD) { + err("%s, invalid type %d\n", __func__, s_impl->type); + return -EIO; + } + + impl = s_impl->parent; + s = s_impl->impl; + idx = s->idx; + sch = s_impl->sch; + sch_idx = sch->idx; + notice("%s(%d,%d), start\n", __func__, sch_idx, idx); + + mt_pthread_mutex_lock(&sch->tx_fmd_mgr_mutex); + ret = tx_fastmetadata_sessions_mgr_detach(&sch->tx_fmd_mgr, s); + mt_pthread_mutex_unlock(&sch->tx_fmd_mgr_mutex); + if (ret < 0) err("%s(%d), tx_fastmetadata_sessions_mgr_detach fail\n", __func__, idx); + + ret = mt_sch_put(sch, s_impl->quota_mbs); + if (ret < 0) err("%s(%d, %d), mt_sch_put fail\n", __func__, sch_idx, idx); + + mt_rte_free(s_impl); + + /* update max idx */ + mt_pthread_mutex_lock(&sch->tx_fmd_mgr_mutex); + tx_fastmetadata_sessions_mgr_update(&sch->tx_fmd_mgr); + mt_pthread_mutex_unlock(&sch->tx_fmd_mgr_mutex); + + rte_atomic32_dec(&impl->st41_tx_sessions_cnt); + notice("%s(%d,%d), succ\n", __func__, sch_idx, idx); + return 0; +} + +void* st41_tx_get_framebuffer(st41_tx_handle handle, uint16_t idx) { + struct st_tx_fastmetadata_session_handle_impl* s_impl = handle; + struct st_tx_fastmetadata_session_impl* s; + + if (s_impl->type != MT_HANDLE_TX_FMD) { + err("%s, invalid type %d\n", __func__, s_impl->type); + return NULL; + } + + s = s_impl->impl; + if (idx >= s->st41_frames_cnt) { + err("%s, invalid idx %d, should be in range [0, %d]\n", __func__, idx, + s->st41_frames_cnt); + return NULL; + } + if (!s->st41_frames) { + err("%s, st41_frames not allocated\n", __func__); + return NULL; + } + + struct st_frame_trans* frame_info = &s->st41_frames[idx]; + + return frame_info->addr; +} diff --git a/lib/src/st2110/st_tx_fastmetadata_session.h b/lib/src/st2110/st_tx_fastmetadata_session.h new file mode 100644 index 000000000..1169ee1e0 --- /dev/null +++ b/lib/src/st2110/st_tx_fastmetadata_session.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * SPDX-FileCopyrightText: Copyright (c) 2024 Intel Corporation + */ + +#ifndef _ST_LIB_TX_FASTMETADATA_SESSION_HEAD_H_ +#define _ST_LIB_TX_FASTMETADATA_SESSION_HEAD_H_ + +#include "st_main.h" + +#define ST_TX_FASTMETADATA_PREFIX "TC_" + +int st_tx_fastmetadata_sessions_sch_uinit(struct mtl_sch_impl* sch); + +#endif /* _ST_LIB_TX_FASTMETADATA_SESSION_HEAD_H_ */