Commit 247664af authored by Linshizhi's avatar Linshizhi

update

parent 82c82306
...@@ -19,6 +19,7 @@ option(DEBUG "Only enable during development" OFF) ...@@ -19,6 +19,7 @@ option(DEBUG "Only enable during development" OFF)
include_directories(${INCLUDE}) include_directories(${INCLUDE})
if(DEBUG) if(DEBUG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address")
message("Fetching googletest...") message("Fetching googletest...")
# Google Test # Google Test
include(FetchContent) include(FetchContent)
...@@ -91,8 +92,10 @@ install(DIRECTORY ${ROOT}/src/proto ...@@ -91,8 +92,10 @@ install(DIRECTORY ${ROOT}/src/proto
if(DEBUG) if(DEBUG)
# Tests # Tests
set(Tests ${ROOT}/tests) set(Tests ${ROOT}/tests)
set(TestCases set(TestCases
${Tests}/helper.cc
${Tests}/ioctxTestCases.cc) ${Tests}/ioctxTestCases.cc)
add_executable(unittest ${TestCases} ${SRC_Files}) add_executable(unittest ${TestCases} ${SRC_Files})
add_dependencies(unittest libav) add_dependencies(unittest libav)
......
...@@ -16,6 +16,7 @@ static AVFormatContext* AVFormatInputContextConstructor( ...@@ -16,6 +16,7 @@ static AVFormatContext* AVFormatInputContextConstructor(
if (avformat_open_input(&ctx, path.c_str(), nullptr, nullptr) < 0) { if (avformat_open_input(&ctx, path.c_str(), nullptr, nullptr) < 0) {
return nullptr; return nullptr;
} }
if (avformat_find_stream_info(ctx, nullptr) < 0) { if (avformat_find_stream_info(ctx, nullptr) < 0) {
avformat_close_input(&ctx); avformat_close_input(&ctx);
return nullptr; return nullptr;
......
#include <stdio.h>
#include <stdint.h>
#include <malloc.h>
#include <iostream>
#include <chrono>
#include <ctime>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavutil/avutil.h>
}
///////////////////////////////////////////////////////////////////////////////
// Encoding Part //
///////////////////////////////////////////////////////////////////////////////
static int width_ = 0;
static int height_ = 0;
static int framerate_ = 0;
static int timescale = 90000;
static AVFrame *rgbaFrame;
static unsigned frameIndex = 0;
static AVFrame *frame;
static AVPacket *packet;
static AVCodecContext *cc;
static struct SwsContext* swsCtx = NULL;
static AVPacket *lastPkt = NULL;
// Bitstream Filter
static AVBSFContext *bsCtx = NULL;
static const AVBitStreamFilter *bsFilter = NULL;
int getPackets(uint8_t *buffer, uint32_t size, uint32_t *osize);
uint8_t encodeInit(int width, int height, int fps) {
int ret = 0;
width_ = width;
height_ = height;
framerate_ = fps;
const AVCodec *encoder = avcodec_find_encoder_by_name("libx264");
if (encoder == NULL) {
fprintf(stderr, "Unable to find H.264 decoder\n");
return 1;
}
cc = avcodec_alloc_context3(encoder);
if (cc == NULL) {
fprintf(stderr, "Unable to alloc codec context\n");
return 2;
}
// Setup encode parameters
cc->width = width;
cc->height = height;
cc->pix_fmt = AV_PIX_FMT_YUV420P;
cc->time_base = (AVRational){1, timescale};
cc->gop_size = 0;
if ((ret = avcodec_open2(cc, encoder, NULL) < 0)) {
fprintf(stderr, "Unable to open codec context\n");
return 3;
}
packet = av_packet_alloc();
if (packet == NULL) {
fprintf(stderr, "Could not allocate packet\n");
}
frame = av_frame_alloc();
frame->format = cc->pix_fmt;
frame->width = cc->width;
frame->height = cc->height;
ret = av_frame_get_buffer(frame, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate the video frame data\n");
return 4;
}
swsCtx = sws_getCachedContext(swsCtx, width, height, AV_PIX_FMT_RGBA,
width, height, AV_PIX_FMT_YUV420P,
SWS_BICUBIC,
NULL, NULL, NULL);
// Init bitstream filter
bsFilter = av_bsf_get_by_name("h264_mp4toannexb");
if (bsFilter == NULL) {
printf("Fail to get h264_mp4toannexb\n");
return 5;
}
av_bsf_alloc(bsFilter, &bsCtx);
if (bsCtx == NULL) {
printf("Fail to alloc bitstream filter context\n");
return 6;
}
avcodec_parameters_from_context(bsCtx->par_in, cc);
avcodec_parameters_from_context(bsCtx->par_out, cc);
if (av_bsf_init(bsCtx) < 0) {
printf("failed to init bitstream filter context\n");
return 7;
}
return 0;
}
static std::chrono::duration<double> total;
/* Ret Values:
* 0: Success
* 1: AGAIN
* 2: Buffer too small
* 3: ERROR */
int encode(uint8_t *data, uint8_t *buffer, uint32_t size, uint32_t *osize) {
int ret = 0;
if (av_frame_make_writable(frame) < 0) {
fprintf(stderr, "Fail to make frame writable\n");
}
rgbaFrame = rgbaFrame == nullptr ? av_frame_alloc() : rgbaFrame;
rgbaFrame->format =AV_PIX_FMT_RGBA;
rgbaFrame->height = cc->height;
rgbaFrame->width = cc->width;
avpicture_fill((AVPicture*)rgbaFrame, data, AV_PIX_FMT_RGBA, width_, height_);
//转换的YUV数据存放在frame
int outSliceH = sws_scale(swsCtx, (const uint8_t* const*)rgbaFrame->data, rgbaFrame->linesize, 0, height_,
frame->data, frame->linesize);
if (outSliceH <= 0) {
printf("outSliceH <= 0 \n");
return 3;
}
frame->pts = timescale / framerate_ * frameIndex;
frame->pict_type = AV_PICTURE_TYPE_I;
++frameIndex;
// Encode
ret = avcodec_send_frame(cc, frame);
if (ret < 0) {
fprintf(stderr, "Fail to encoding\n");
}
ret = getPackets(buffer, size, osize);
return ret;
}
int totalFrame = 0;
/* Ret Values:
* 0: Success
* 1: AGAIN or EOF
* 2: Buffer too small
* 3: ERROR */
int getPackets(uint8_t *buffer, uint32_t size, uint32_t *osize) {
int ret = 0;
uint8_t *pos = buffer;
int remainSize = size;
*osize = 0;
if (lastPkt != NULL && lastPkt->size < remainSize) {
printf("Get last packet\n");
memcpy(pos, lastPkt->data, lastPkt->size);
pos += lastPkt->size;
remainSize -= lastPkt->size;
av_packet_unref(lastPkt);
lastPkt = NULL;
} else if (lastPkt != NULL) {
/* Buffer is too small to containe the packet */
printf("lastPkt size is %d, remain is %d\n", lastPkt->size, remainSize);
return 2;
}
while (true) {
ret = avcodec_receive_packet(cc, packet);
if (ret < 0) {
ret = 1;
goto DONE;
}
++totalFrame;
printf("Frame: %d\n", totalFrame);
printf("WASM Encode: Packet Size %d\n", packet->size);
// For video frame avcodec_receive_packet should return
// only once.
if (remainSize > packet->size) {
memcpy(pos, packet->data, packet->size);
pos += packet->size;
remainSize -= packet->size;
} else {
printf("Last Pkt\n");
lastPkt = packet;
break;
}
av_packet_unref(packet);
}
DONE:
*osize = size - remainSize;
return ret;
}
int flushEncoder() {
if (avcodec_send_frame(cc, NULL) < 0) {
return 1;
}
return 0;
}
#include <stdint.h>
int flushEncoder();
int getPackets(uint8_t *buffer, uint32_t size, uint32_t *osize);
uint8_t encodeInit(int width, int height, int fps);
int encode(uint8_t *data, uint8_t *buffer, uint32_t size, uint32_t *osize);
...@@ -5,6 +5,9 @@ ...@@ -5,6 +5,9 @@
#include "proto/transientMemProto.h" #include "proto/transientMemProto.h"
#include "proto/movMemProto.h" #include "proto/movMemProto.h"
#include <thread> #include <thread>
#include "./helper.h"
#include <algorithm>
#include <cmath>
extern "C" { extern "C" {
...@@ -172,6 +175,141 @@ protected: ...@@ -172,6 +175,141 @@ protected:
IOCtx::OutCtx oCtx { outFilePath }; IOCtx::OutCtx oCtx { outFilePath };
}; };
TEST_F(IOCTX_With_MovMem_Proto_Fixture, EncodeRGBAFrame) {
const int RGBAFrameSize = 1920 * 1080 * 4;
// Prepare MM InCtx;
IOProto::MovMemProto::MovMemProto *mmProto =
new IOProto::MovMemProto::MovMemProto(nullptr, IOProto::read);
std::thread t1([mmProto]() {
int ret;
uint32_t osize;
uint8_t rgbaFrame[RGBAFrameSize];
uint8_t bufferPool[6000];
uint8_t *mem;
// Fill datas into Rgba Frame
for (int i = 0; i < RGBAFrameSize; ++i) {
rgbaFrame[i] = i;
}
int pktCount = 0;
encodeInit(1920, 1080, 30);
for (int i = 0; i < 30; ++i) {
ret = encode(rgbaFrame, bufferPool, RGBAFrameSize, &osize);
if (osize > 0) {
pktCount++;
}
if (ret != 0) {
printf("ret is %d\n", ret);
if (ret != 1) break;
}
if (osize == 0)
continue;
mem = new uint8_t[osize];
memcpy(mem, bufferPool, osize);
mmProto->push(mem, osize);
printf("%d\n", i);
}
flushEncoder();
printf("Flush\n");
while (true) {
ret = getPackets(bufferPool, 6000, &osize);
if (osize > 0) {
pktCount++;
}
if (ret != 0 && osize == 0) {
printf("while ret is %d\n", ret);
break;
}
printf("Output Size %d, %d\n", pktCount, osize);
mem = new uint8_t[osize];
memcpy(mem, bufferPool, osize);
mmProto->push(mem, osize);
}
printf("Done\n");
mmProto->eof();
});
t1.detach();
std::this_thread::sleep_for(std::chrono::milliseconds(5000));
inCtxMM = new IOCtx::InCtx("", mmProto);
AVPacket packet;
AVStream *s = inCtxMM->getStream([](AVStream *s) {
return s->codecpar->codec_type == AVMEDIA_TYPE_VIDEO;
});
if (s == NULL) {
abort();
}
if (oCtx.newStream(s->codecpar) == IOCtx::ERROR)
throw std::runtime_error("Failed to init fixture");
oCtx.writeHeader();
char errStr[256] = { 0 };
bool init = false;
int timescale = 90000;
int fps = 30;
int duration = timescale/fps;
int lastPTS = 0, lastDTS = 0;
int i = 0;
while (true) {
int ret = inCtxMM->readFrame_(&packet);
if (ret < 0) {
av_strerror(ret, errStr, sizeof(errStr));
printf("Break: %s\n", errStr);
break;
}
if (init == false) {
packet.pts = 0;
packet.dts = -duration;
packet.duration = duration;
lastPTS = packet.pts;
lastDTS = packet.dts;
init = true;
} else {
packet.pts = lastPTS + duration;
packet.dts = lastDTS + duration;
lastPTS += duration;
lastDTS += duration;
}
packet.stream_index = 0;
packet.pos = -1;
printf("PTS: %ld, DTS: %ld\n", packet.pts, packet.dts);
printf("Size of pkt is %d,%d\n", ++i, packet.size);
oCtx.writeFrame(&packet);
av_packet_unref(&packet);
}
oCtx.writeTrailer();
delete inCtxMM;
}
TEST_F(IOCTX_With_MovMem_Proto_Fixture, Initialize) { TEST_F(IOCTX_With_MovMem_Proto_Fixture, Initialize) {
...@@ -256,7 +394,7 @@ TEST_F(IOCTX_With_MovMem_Proto_Fixture, Initialize) { ...@@ -256,7 +394,7 @@ TEST_F(IOCTX_With_MovMem_Proto_Fixture, Initialize) {
int duration = timescale/fps; int duration = timescale/fps;
int lastPTS = 0, lastDTS = 0; int lastPTS = 0, lastDTS = 0;
printf("Done");
while (true) { while (true) {
AVPacket packet; AVPacket packet;
int ret = inCtxMM->readFrame_(&packet); int ret = inCtxMM->readFrame_(&packet);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment