前篇有紀錄如何自行編譯 FFMpeg,本篇要記錄 FFMpeg 的使用方式,現在的 FFMpeg Library 已經算很好用惹,官方也有釋出一些 example 可以參考,網路上也很多高手的教學,真低棒!
FFMpeg 是一個開源免費跨平臺的視訊和音訊流方案,提供一些 Library 給那些想自行編寫應用的開發者,也就看到市面上的一些影音軟體會採用 FFMpeg 去做編寫。
網路上有很多參考範例,但由於工作上要用到裝置串流影像,並即時顯示在應用程式上,那些官方範例都是讀取影音檔案然後就知道該檔案所需要的解碼與一些資訊,所以這邊紀錄當時的方法。
步驟說明:
avcodec_send_packet
與 avcodec_receive_frame
來解碼,並獲取解碼後的 FrameSwsContext
。需要它來執行 sws_scale()
進行縮放/轉換操作
int initFFmpeg(AVCodecContext **outCodecCtx, unsigned width, unsigned height)
{
AVCodec *pCodec = avcodec_find_decoder(AV_CODEC_ID_MJPEG);
if(!pCodec)
{
return -1;
}
AVCodecContext *pCodecCtx = avcodec_alloc_context3(pCodec);
pCodecCtx->thread_count = 2;
pCodecCtx->thread_type = FF_THREAD_SLICE;
pCodecCtx->width = width;
pCodecCtx->height = height;
if(avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{
avcodec_free_context(&pCodecCtx);
pCodecCtx = 0;
pCodec = 0;
return -1;
}
*outCodecCtx = pCodecCtx;
return 0;
}
int jpgDecode(const unsigned char *src, unsigned len, unsigned width, unsigned height, unsigned char *buffer)
{
AVCodecContext *pCodecCtx = 0;
AVFrame *pFrame;
AVFrame *pFrameRGB;
AVPacket *packet;
unsigned char *inBuffer = (unsigned char *)src;
unsigned char *outBuffer = (unsigned char *)buffer;
if(initFFmpeg(&pCodecCtx, width, height) < 0)
{
return -1;
}
pFrame = av_frame_alloc();
if(!pFrame)
{
freeFFmpeg(pCodecCtx);
return -1;
}
pFrameRGB = av_frame_alloc();
if(!pFrameRGB)
{
av_free(pFrame);
freeFFmpeg(pCodecCtx);
ReleaseMutex(mutex);
return -1;
}
packet = av_packet_alloc();
packet->data = inBuffer;
packet->size = len;
// decode
int ret = avcodec_send_packet(pCodecCtx, packet);
if (ret == AVERROR(EAGAIN)){
// Decoder can't take packets right now. Make sure you are draining it.
return -1;
}else if (ret < 0){
// Failed to send the packet to the decoder
return -1;
}
if(ret == 0)
{
int decodeFrame = avcodec_receive_frame(pCodecCtx, pFrame);
if (decodeFrame == AVERROR(EAGAIN) || decodeFrame == AVERROR_EOF){
// The decoder doesn't have enough data to produce a frame
// Not an error unless we reached the end of the stream
// Just pass more packets until it has enough to produce a frame
av_frame_free(&pFrame);
av_freep(pFrame);
return -1;
}else if (decodeFrame < 0){
// Failed to get a frame from the decoder
av_frame_free(&pFrame);
av_freep(pFrame);
return -1;
}
}
// 由於該版本 AV_PIX_FMT_YUVJ420P...等 要被棄用,因此目前只能先強制換成新的 type
AVPixelFormat pixFormat;
switch (pCodecCtx->pix_fmt) {
case AV_PIX_FMT_YUVJ420P :
pixFormat = AV_PIX_FMT_YUV420P;
break;
case AV_PIX_FMT_YUVJ422P :
pixFormat = AV_PIX_FMT_YUV422P;
break;
case AV_PIX_FMT_YUVJ444P :
pixFormat = AV_PIX_FMT_YUV444P;
break;
case AV_PIX_FMT_YUVJ440P :
pixFormat = AV_PIX_FMT_YUV440P;
break;
default:
pixFormat = pCodecCtx->pix_fmt;
}
struct SwsContext *img_convert_ctx;
img_convert_ctx = sws_getContext(pCodecCtx->width, // 原始影像寬度
pCodecCtx->height, // 原始影像高度
pixFormat, // 原始影像格式
pCodecCtx->width, // 輸出影像寬度
pCodecCtx->height, // 原始影像高度
AV_PIX_FMT_RGB32, // 輸出影像格式
SWS_FAST_BILINEAR, // 變換尺寸方法
NULL,
NULL,
NULL);
if(!img_convert_ctx)
{
av_frame_free(&pFrame);
av_frame_free(&pFrameRGB);
freeFFmpeg(pCodecCtx);
return -1;
}
if(!outBuffer)
{
sws_freeContext(img_convert_ctx);
av_frame_free(&pFrame);
av_frame_free(&pFrameRGB);
freeFFmpeg(pCodecCtx);
return -1;
}
if(pCodecCtx->width != width || pCodecCtx->height != height){
return 0;
}
// Assign appropriate parts of buffer to image planes in pFrameRGB
av_image_fill_arrays(pFrameRGB->data,
pFrameRGB->linesize,
outBuffer,
AV_PIX_FMT_RGB32,
pCodecCtx->width,
pCodecCtx->height,
1);
sws_scale(img_convert_ctx,
pFrame->data,
pFrame->linesize,
0,
pCodecCtx->height,
pFrameRGB->data,
pFrameRGB->linesize);
sws_freeContext(img_convert_ctx);
av_frame_free(&pFrame);
av_frame_free(&pFrameRGB);
av_packet_free(&packet);
avcodec_free_context(&pCodecCtx);
return 0;
}
https://github.com/leandromoreira/ffmpeg-libav-tutorial
https://github.com/FFmpeg/FFmpeg/tree/master/doc/examples
https://kkc.github.io/2019/01/12/ffmpeg-libav-decode-note/
https://www.lmlphp.com/user/901/article/item/13844