Commit 2558e627 authored by Guo, Yejun's avatar Guo, Yejun Committed by Pedro Arthur

avfilter/dnn: unify the layer load function in native mode

Signed-off-by: 's avatarGuo, Yejun <yejun.guo@intel.com>
Signed-off-by: 's avatarPedro Arthur <bygrandao@gmail.com>
parent 3fd5ac7e
...@@ -25,10 +25,7 @@ ...@@ -25,10 +25,7 @@
#include "dnn_backend_native.h" #include "dnn_backend_native.h"
#include "libavutil/avassert.h" #include "libavutil/avassert.h"
#include "dnn_backend_native_layer_pad.h"
#include "dnn_backend_native_layer_conv2d.h" #include "dnn_backend_native_layer_conv2d.h"
#include "dnn_backend_native_layer_depth2space.h"
#include "dnn_backend_native_layer_maximum.h"
#include "dnn_backend_native_layers.h" #include "dnn_backend_native_layers.h"
static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output) static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
...@@ -104,13 +101,9 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename) ...@@ -104,13 +101,9 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
int version, header_size, major_version_expected = 0; int version, header_size, major_version_expected = 0;
ConvolutionalNetwork *network = NULL; ConvolutionalNetwork *network = NULL;
AVIOContext *model_file_context; AVIOContext *model_file_context;
int file_size, dnn_size, kernel_size, i; int file_size, dnn_size, parsed_size;
int32_t layer; int32_t layer;
DNNLayerType layer_type; DNNLayerType layer_type;
ConvolutionalParams *conv_params;
DepthToSpaceParams *depth_to_space_params;
LayerPadParams *pad_params;
DnnLayerMaximumParams *maximum_params;
model = av_malloc(sizeof(DNNModel)); model = av_malloc(sizeof(DNNModel));
if (!model){ if (!model){
...@@ -189,104 +182,21 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename) ...@@ -189,104 +182,21 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
for (layer = 0; layer < network->layers_num; ++layer){ for (layer = 0; layer < network->layers_num; ++layer){
layer_type = (int32_t)avio_rl32(model_file_context); layer_type = (int32_t)avio_rl32(model_file_context);
dnn_size += 4; dnn_size += 4;
network->layers[layer].type = layer_type;
switch (layer_type){ if (layer_type >= DLT_COUNT) {
case DLT_CONV2D:
conv_params = av_malloc(sizeof(ConvolutionalParams));
if (!conv_params){
avio_closep(&model_file_context);
ff_dnn_free_model_native(&model);
return NULL;
}
conv_params->dilation = (int32_t)avio_rl32(model_file_context);
conv_params->padding_method = (int32_t)avio_rl32(model_file_context);
conv_params->activation = (int32_t)avio_rl32(model_file_context);
conv_params->input_num = (int32_t)avio_rl32(model_file_context);
conv_params->output_num = (int32_t)avio_rl32(model_file_context);
conv_params->kernel_size = (int32_t)avio_rl32(model_file_context);
kernel_size = conv_params->input_num * conv_params->output_num *
conv_params->kernel_size * conv_params->kernel_size;
dnn_size += 24 + (kernel_size + conv_params->output_num << 2);
if (dnn_size > file_size || conv_params->input_num <= 0 ||
conv_params->output_num <= 0 || conv_params->kernel_size <= 0){
avio_closep(&model_file_context);
av_freep(&conv_params);
ff_dnn_free_model_native(&model);
return NULL;
}
conv_params->kernel = av_malloc(kernel_size * sizeof(float));
conv_params->biases = av_malloc(conv_params->output_num * sizeof(float));
if (!conv_params->kernel || !conv_params->biases){
avio_closep(&model_file_context);
av_freep(&conv_params->kernel);
av_freep(&conv_params->biases);
av_freep(&conv_params);
ff_dnn_free_model_native(&model);
return NULL;
}
for (i = 0; i < kernel_size; ++i){
conv_params->kernel[i] = av_int2float(avio_rl32(model_file_context));
}
for (i = 0; i < conv_params->output_num; ++i){
conv_params->biases[i] = av_int2float(avio_rl32(model_file_context));
}
network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
dnn_size += 8;
network->layers[layer].params = conv_params;
break;
case DLT_DEPTH_TO_SPACE:
depth_to_space_params = av_malloc(sizeof(DepthToSpaceParams));
if (!depth_to_space_params){
avio_closep(&model_file_context);
ff_dnn_free_model_native(&model);
return NULL;
}
depth_to_space_params->block_size = (int32_t)avio_rl32(model_file_context);
dnn_size += 4;
network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
dnn_size += 8;
network->layers[layer].params = depth_to_space_params;
break;
case DLT_MIRROR_PAD:
pad_params = av_malloc(sizeof(LayerPadParams));
if (!pad_params){
avio_closep(&model_file_context);
ff_dnn_free_model_native(&model);
return NULL;
}
pad_params->mode = (int32_t)avio_rl32(model_file_context);
dnn_size += 4;
for (i = 0; i < 4; ++i) {
pad_params->paddings[i][0] = avio_rl32(model_file_context);
pad_params->paddings[i][1] = avio_rl32(model_file_context);
dnn_size += 8;
}
network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
dnn_size += 8;
network->layers[layer].params = pad_params;
break;
case DLT_MAXIMUM:
maximum_params = av_malloc(sizeof(*maximum_params));
if (!maximum_params){
avio_closep(&model_file_context); avio_closep(&model_file_context);
ff_dnn_free_model_native(&model); ff_dnn_free_model_native(&model);
return NULL; return NULL;
} }
maximum_params->val.u32 = avio_rl32(model_file_context);
dnn_size += 4; network->layers[layer].type = layer_type;
network->layers[layer].params = maximum_params; parsed_size = layer_funcs[layer_type].pf_load(&network->layers[layer], model_file_context, file_size);
network->layers[layer].input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context); if (!parsed_size) {
network->layers[layer].output_operand_index = (int32_t)avio_rl32(model_file_context);
dnn_size += 8;
break;
default:
avio_closep(&model_file_context); avio_closep(&model_file_context);
ff_dnn_free_model_native(&model); ff_dnn_free_model_native(&model);
return NULL; return NULL;
} }
dnn_size += parsed_size;
} }
for (int32_t i = 0; i < network->operands_num; ++i){ for (int32_t i = 0; i < network->operands_num; ++i){
...@@ -341,7 +251,7 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output ...@@ -341,7 +251,7 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNData *output
for (layer = 0; layer < network->layers_num; ++layer){ for (layer = 0; layer < network->layers_num; ++layer){
DNNLayerType layer_type = network->layers[layer].type; DNNLayerType layer_type = network->layers[layer].type;
layer_funcs[layer_type](network->operands, layer_funcs[layer_type].pf_exec(network->operands,
network->layers[layer].input_operand_indexes, network->layers[layer].input_operand_indexes,
network->layers[layer].output_operand_index, network->layers[layer].output_operand_index,
network->layers[layer].params); network->layers[layer].params);
......
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
/** /**
* the enum value of DNNLayerType should not be changed, * the enum value of DNNLayerType should not be changed,
* the same values are used in convert_from_tensorflow.py * the same values are used in convert_from_tensorflow.py
* and, it is used to index the layer execution function pointer. * and, it is used to index the layer execution/load function pointer.
*/ */
typedef enum { typedef enum {
DLT_INPUT = 0, DLT_INPUT = 0,
......
...@@ -23,6 +23,52 @@ ...@@ -23,6 +23,52 @@
#define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x))) #define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x)))
int dnn_load_layer_conv2d(Layer *layer, AVIOContext *model_file_context, int file_size)
{
ConvolutionalParams *conv_params;
int kernel_size;
int dnn_size = 0;
conv_params = av_malloc(sizeof(*conv_params));
if (!conv_params)
return 0;
conv_params->dilation = (int32_t)avio_rl32(model_file_context);
conv_params->padding_method = (int32_t)avio_rl32(model_file_context);
conv_params->activation = (int32_t)avio_rl32(model_file_context);
conv_params->input_num = (int32_t)avio_rl32(model_file_context);
conv_params->output_num = (int32_t)avio_rl32(model_file_context);
conv_params->kernel_size = (int32_t)avio_rl32(model_file_context);
kernel_size = conv_params->input_num * conv_params->output_num *
conv_params->kernel_size * conv_params->kernel_size;
dnn_size += 24 + (kernel_size + conv_params->output_num << 2);
if (dnn_size > file_size || conv_params->input_num <= 0 ||
conv_params->output_num <= 0 || conv_params->kernel_size <= 0){
av_freep(&conv_params);
return 0;
}
conv_params->kernel = av_malloc(kernel_size * sizeof(float));
conv_params->biases = av_malloc(conv_params->output_num * sizeof(float));
if (!conv_params->kernel || !conv_params->biases){
av_freep(&conv_params->kernel);
av_freep(&conv_params->biases);
av_freep(&conv_params);
return 0;
}
for (int i = 0; i < kernel_size; ++i){
conv_params->kernel[i] = av_int2float(avio_rl32(model_file_context));
}
for (int i = 0; i < conv_params->output_num; ++i){
conv_params->biases[i] = av_int2float(avio_rl32(model_file_context));
}
layer->params = conv_params;
layer->input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
layer->output_operand_index = (int32_t)avio_rl32(model_file_context);
dnn_size += 8;
return dnn_size;
}
int dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_operand_indexes, int dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_operand_indexes,
int32_t output_operand_index, const void *parameters) int32_t output_operand_index, const void *parameters)
{ {
......
...@@ -35,6 +35,7 @@ typedef struct ConvolutionalParams{ ...@@ -35,6 +35,7 @@ typedef struct ConvolutionalParams{
float *biases; float *biases;
} ConvolutionalParams; } ConvolutionalParams;
int dnn_load_layer_conv2d(Layer *layer, AVIOContext *model_file_context, int file_size);
int dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_operand_indexes, int dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_operand_indexes,
int32_t output_operand_index, const void *parameters); int32_t output_operand_index, const void *parameters);
#endif #endif
...@@ -27,6 +27,24 @@ ...@@ -27,6 +27,24 @@
#include "libavutil/avassert.h" #include "libavutil/avassert.h"
#include "dnn_backend_native_layer_depth2space.h" #include "dnn_backend_native_layer_depth2space.h"
int dnn_load_layer_depth2space(Layer *layer, AVIOContext *model_file_context, int file_size)
{
DepthToSpaceParams *params;
int dnn_size = 0;
params = av_malloc(sizeof(*params));
if (!params)
return 0;
params->block_size = (int32_t)avio_rl32(model_file_context);
dnn_size += 4;
layer->input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
layer->output_operand_index = (int32_t)avio_rl32(model_file_context);
dnn_size += 8;
layer->params = params;
return dnn_size;
}
int dnn_execute_layer_depth2space(DnnOperand *operands, const int32_t *input_operand_indexes, int dnn_execute_layer_depth2space(DnnOperand *operands, const int32_t *input_operand_indexes,
int32_t output_operand_index, const void *parameters) int32_t output_operand_index, const void *parameters)
{ {
......
...@@ -34,6 +34,7 @@ typedef struct DepthToSpaceParams{ ...@@ -34,6 +34,7 @@ typedef struct DepthToSpaceParams{
int block_size; int block_size;
} DepthToSpaceParams; } DepthToSpaceParams;
int dnn_load_layer_depth2space(Layer *layer, AVIOContext *model_file_context, int file_size);
int dnn_execute_layer_depth2space(DnnOperand *operands, const int32_t *input_operand_indexes, int dnn_execute_layer_depth2space(DnnOperand *operands, const int32_t *input_operand_indexes,
int32_t output_operand_index, const void *parameters); int32_t output_operand_index, const void *parameters);
......
...@@ -27,6 +27,24 @@ ...@@ -27,6 +27,24 @@
#include "libavutil/avassert.h" #include "libavutil/avassert.h"
#include "dnn_backend_native_layer_maximum.h" #include "dnn_backend_native_layer_maximum.h"
int dnn_load_layer_maximum(Layer *layer, AVIOContext *model_file_context, int file_size)
{
DnnLayerMaximumParams *params;
int dnn_size = 0;
params = av_malloc(sizeof(*params));
if (!params)
return 0;
params->val.u32 = avio_rl32(model_file_context);
dnn_size += 4;
layer->params = params;
layer->input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
layer->output_operand_index = (int32_t)avio_rl32(model_file_context);
dnn_size += 8;
return dnn_size;
}
int dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_operand_indexes, int dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_operand_indexes,
int32_t output_operand_index, const void *parameters) int32_t output_operand_index, const void *parameters)
{ {
......
...@@ -37,6 +37,7 @@ typedef struct DnnLayerMaximumParams{ ...@@ -37,6 +37,7 @@ typedef struct DnnLayerMaximumParams{
}val; }val;
} DnnLayerMaximumParams; } DnnLayerMaximumParams;
int dnn_load_layer_maximum(Layer *layer, AVIOContext *model_file_context, int file_size);
int dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_operand_indexes, int dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_operand_indexes,
int32_t output_operand_index, const void *parameters); int32_t output_operand_index, const void *parameters);
......
...@@ -22,6 +22,29 @@ ...@@ -22,6 +22,29 @@
#include "libavutil/avassert.h" #include "libavutil/avassert.h"
#include "dnn_backend_native_layer_pad.h" #include "dnn_backend_native_layer_pad.h"
int dnn_load_layer_pad(Layer *layer, AVIOContext *model_file_context, int file_size)
{
LayerPadParams *params;
int dnn_size = 0;
params = av_malloc(sizeof(*params));
if (!params)
return 0;
params->mode = (int32_t)avio_rl32(model_file_context);
dnn_size += 4;
for (int i = 0; i < 4; ++i) {
params->paddings[i][0] = avio_rl32(model_file_context);
params->paddings[i][1] = avio_rl32(model_file_context);
dnn_size += 8;
}
layer->input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
layer->output_operand_index = (int32_t)avio_rl32(model_file_context);
dnn_size += 8;
layer->params = params;
return dnn_size;
}
static int before_get_buddy(int given, int paddings, LayerPadModeParam mode) static int before_get_buddy(int given, int paddings, LayerPadModeParam mode)
{ {
if (mode == LPMP_SYMMETRIC) { if (mode == LPMP_SYMMETRIC) {
......
...@@ -36,6 +36,7 @@ typedef struct LayerPadParams{ ...@@ -36,6 +36,7 @@ typedef struct LayerPadParams{
float constant_values; float constant_values;
} LayerPadParams; } LayerPadParams;
int dnn_load_layer_pad(Layer *layer, AVIOContext *model_file_context, int file_size);
int dnn_execute_layer_pad(DnnOperand *operands, const int32_t *input_operand_indexes, int dnn_execute_layer_pad(DnnOperand *operands, const int32_t *input_operand_indexes,
int32_t output_operand_index, const void *parameters); int32_t output_operand_index, const void *parameters);
......
...@@ -25,10 +25,10 @@ ...@@ -25,10 +25,10 @@
#include "dnn_backend_native_layer_depth2space.h" #include "dnn_backend_native_layer_depth2space.h"
#include "dnn_backend_native_layer_maximum.h" #include "dnn_backend_native_layer_maximum.h"
LAYER_EXEC_FUNC layer_funcs[DLT_COUNT] = { LayerFunc layer_funcs[DLT_COUNT] = {
NULL, {NULL, NULL},
dnn_execute_layer_conv2d, {dnn_execute_layer_conv2d, dnn_load_layer_conv2d},
dnn_execute_layer_depth2space, {dnn_execute_layer_depth2space, dnn_load_layer_depth2space},
dnn_execute_layer_pad, {dnn_execute_layer_pad, dnn_load_layer_pad},
dnn_execute_layer_maximum, {dnn_execute_layer_maximum, dnn_load_layer_maximum},
}; };
...@@ -26,7 +26,13 @@ ...@@ -26,7 +26,13 @@
typedef int (*LAYER_EXEC_FUNC)(DnnOperand *operands, const int32_t *input_operand_indexes, typedef int (*LAYER_EXEC_FUNC)(DnnOperand *operands, const int32_t *input_operand_indexes,
int32_t output_operand_index, const void *parameters); int32_t output_operand_index, const void *parameters);
typedef int (*LAYER_LOAD_FUNC)(Layer *layer, AVIOContext *model_file_context, int file_size);
extern LAYER_EXEC_FUNC layer_funcs[DLT_COUNT]; typedef struct LayerFunc {
LAYER_EXEC_FUNC pf_exec;
LAYER_LOAD_FUNC pf_load;
}LayerFunc;
extern LayerFunc layer_funcs[DLT_COUNT];
#endif #endif
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment