lavfi/dnn: Extract Common Parts from get_output functions
The frame allocation and filling the TaskItem with execution parameters is common in the three backends. This commit shifts this logic to dnn_backend_common. Signed-off-by: Shubhanshu Saxena <shubhanshu.e01@gmail.com>
This commit is contained in:
parent
4d627acefa
commit
009b2e5b5e
@ -142,3 +142,29 @@ DNNAsyncStatusType ff_dnn_get_async_result_common(Queue *task_queue, AVFrame **i
|
||||
|
||||
return DAST_SUCCESS;
|
||||
}
|
||||
|
||||
DNNReturnType ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
|
||||
{
|
||||
AVFrame *in_frame = NULL;
|
||||
AVFrame *out_frame = NULL;
|
||||
|
||||
in_frame = av_frame_alloc();
|
||||
if (!in_frame) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input frame\n");
|
||||
return DNN_ERROR;
|
||||
}
|
||||
|
||||
out_frame = av_frame_alloc();
|
||||
if (!out_frame) {
|
||||
av_frame_free(&in_frame);
|
||||
av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output frame\n");
|
||||
return DNN_ERROR;
|
||||
}
|
||||
|
||||
in_frame->width = input_width;
|
||||
in_frame->height = input_height;
|
||||
exec_params->in_frame = in_frame;
|
||||
exec_params->out_frame = out_frame;
|
||||
|
||||
return ff_dnn_fill_task(task, exec_params, backend_model, 0, 0);
|
||||
}
|
||||
|
@ -137,4 +137,20 @@ DNNReturnType ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_
|
||||
*/
|
||||
DNNAsyncStatusType ff_dnn_get_async_result_common(Queue *task_queue, AVFrame **in, AVFrame **out);
|
||||
|
||||
/**
|
||||
* Allocate input and output frames and fill the Task
|
||||
* with execution parameters.
|
||||
*
|
||||
* @param task pointer to the allocated task
|
||||
* @param exec_params pointer to execution parameters
|
||||
* @param backend_model void pointer to the backend model
|
||||
* @param input_height height of input frame
|
||||
* @param input_width width of input frame
|
||||
* @param ctx pointer to the backend context
|
||||
*
|
||||
* @retval DNN_SUCCESS if successful
|
||||
* @retval DNN_ERROR if allocation fails
|
||||
*/
|
||||
DNNReturnType ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx);
|
||||
|
||||
#endif
|
||||
|
@ -640,10 +640,15 @@ static DNNReturnType get_output_ov(void *model, const char *input_name, int inpu
|
||||
OVContext *ctx = &ov_model->ctx;
|
||||
TaskItem task;
|
||||
OVRequestItem *request;
|
||||
AVFrame *in_frame = NULL;
|
||||
AVFrame *out_frame = NULL;
|
||||
IEStatusCode status;
|
||||
input_shapes_t input_shapes;
|
||||
DNNExecBaseParams exec_params = {
|
||||
.input_name = input_name,
|
||||
.output_names = &output_name,
|
||||
.nb_output = 1,
|
||||
.in_frame = NULL,
|
||||
.out_frame = NULL,
|
||||
};
|
||||
|
||||
if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Get output dim only when processing frame.\n");
|
||||
@ -669,51 +674,29 @@ static DNNReturnType get_output_ov(void *model, const char *input_name, int inpu
|
||||
}
|
||||
}
|
||||
|
||||
in_frame = av_frame_alloc();
|
||||
if (!in_frame) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input frame\n");
|
||||
if (ff_dnn_fill_gettingoutput_task(&task, &exec_params, ov_model, input_height, input_width, ctx) != DNN_SUCCESS) {
|
||||
return DNN_ERROR;
|
||||
}
|
||||
in_frame->width = input_width;
|
||||
in_frame->height = input_height;
|
||||
|
||||
out_frame = av_frame_alloc();
|
||||
if (!out_frame) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output frame\n");
|
||||
av_frame_free(&in_frame);
|
||||
return DNN_ERROR;
|
||||
}
|
||||
|
||||
task.do_ioproc = 0;
|
||||
task.async = 0;
|
||||
task.input_name = input_name;
|
||||
task.in_frame = in_frame;
|
||||
task.output_names = &output_name;
|
||||
task.out_frame = out_frame;
|
||||
task.nb_output = 1;
|
||||
task.model = ov_model;
|
||||
|
||||
if (extract_inference_from_task(ov_model->model->func_type, &task, ov_model->inference_queue, NULL) != DNN_SUCCESS) {
|
||||
av_frame_free(&out_frame);
|
||||
av_frame_free(&in_frame);
|
||||
av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
|
||||
return DNN_ERROR;
|
||||
ret = DNN_ERROR;
|
||||
goto err;
|
||||
}
|
||||
|
||||
request = ff_safe_queue_pop_front(ov_model->request_queue);
|
||||
if (!request) {
|
||||
av_frame_free(&out_frame);
|
||||
av_frame_free(&in_frame);
|
||||
av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
|
||||
return DNN_ERROR;
|
||||
ret = DNN_ERROR;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = execute_model_ov(request, ov_model->inference_queue);
|
||||
*output_width = out_frame->width;
|
||||
*output_height = out_frame->height;
|
||||
|
||||
av_frame_free(&out_frame);
|
||||
av_frame_free(&in_frame);
|
||||
*output_width = task.out_frame->width;
|
||||
*output_height = task.out_frame->height;
|
||||
err:
|
||||
av_frame_free(&task.out_frame);
|
||||
av_frame_free(&task.in_frame);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -310,36 +310,20 @@ static DNNReturnType get_output_tf(void *model, const char *input_name, int inpu
|
||||
DNNReturnType ret;
|
||||
TFModel *tf_model = model;
|
||||
TFContext *ctx = &tf_model->ctx;
|
||||
AVFrame *in_frame = av_frame_alloc();
|
||||
AVFrame *out_frame = NULL;
|
||||
TaskItem task;
|
||||
TFRequestItem *request;
|
||||
DNNExecBaseParams exec_params = {
|
||||
.input_name = input_name,
|
||||
.output_names = &output_name,
|
||||
.nb_output = 1,
|
||||
.in_frame = NULL,
|
||||
.out_frame = NULL,
|
||||
};
|
||||
|
||||
if (!in_frame) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input frame\n");
|
||||
ret = DNN_ERROR;
|
||||
if (ff_dnn_fill_gettingoutput_task(&task, &exec_params, tf_model, input_height, input_width, ctx) != DNN_SUCCESS) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
out_frame = av_frame_alloc();
|
||||
if (!out_frame) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output frame\n");
|
||||
ret = DNN_ERROR;
|
||||
goto err;
|
||||
}
|
||||
|
||||
in_frame->width = input_width;
|
||||
in_frame->height = input_height;
|
||||
|
||||
task.do_ioproc = 0;
|
||||
task.async = 0;
|
||||
task.input_name = input_name;
|
||||
task.in_frame = in_frame;
|
||||
task.output_names = &output_name;
|
||||
task.out_frame = out_frame;
|
||||
task.model = tf_model;
|
||||
task.nb_output = 1;
|
||||
|
||||
if (extract_inference_from_task(&task, tf_model->inference_queue) != DNN_SUCCESS) {
|
||||
av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
|
||||
ret = DNN_ERROR;
|
||||
@ -354,12 +338,12 @@ static DNNReturnType get_output_tf(void *model, const char *input_name, int inpu
|
||||
}
|
||||
|
||||
ret = execute_model_tf(request, tf_model->inference_queue);
|
||||
*output_width = out_frame->width;
|
||||
*output_height = out_frame->height;
|
||||
*output_width = task.out_frame->width;
|
||||
*output_height = task.out_frame->height;
|
||||
|
||||
err:
|
||||
av_frame_free(&out_frame);
|
||||
av_frame_free(&in_frame);
|
||||
av_frame_free(&task.out_frame);
|
||||
av_frame_free(&task.in_frame);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user