当前位置: 首页 > news >正文

12 - FFmpeg 编码 H264

---------------------------- 视频编码 ---------------------------- 
ffmpeg命令
    ffmpeg -s 768*432 -pix_fmt yuv420p -i hong.yuv -vcodec libx264 -b:v 4096k -bf 0 -g 10 -r 30 out1.h264
参数介绍:
    -s 指定视频大小
    -pix_fmt 指定图形颜色空间
    -b:v 指定视频平均码率
    -bf 指定B帧数目
    -g 指定两个l帧之间的间隔
    -r 指定视频帧率

---------------------------- 编码流程 ---------------------------- 
1、查找编码器 --- avcodec_find_encoder_by_name
2、创建编码器上下文 --- avcodec_alloc_context3
3、设置编码参数 --- avcode_open2
4、打开编码器 --- av_frame_alloc
5、读取yuv数据 --- av_image_get_buffer_size
6、开始编码 --- av_image_fill_arrays
8、写入编码数据 --- avcodec_send_frame + avcodec_receive_packet

方法一:

int writePacketCount = 0;
int encodeVideo(AVCodecContext *encoderCtx, AVFrame *frame, AVPacket *packet, FILE *dest_fp)
{int ret = avcodec_send_frame(encoderCtx, frame);if (ret < 0){av_log(NULL, AV_LOG_ERROR, "send frame to encoder failed: %s\n", av_err2str(ret));return -1;}while (ret >= 0){ret = avcodec_receive_packet(encoderCtx, packet);if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){av_log(NULL, AV_LOG_INFO, "[int encodeVideo] -- AVERROR(EAGAIN) || AVERROR_EOF \n");return 0;}else if (ret < 0){av_log(NULL, AV_LOG_ERROR, "encoder frame failed:%s\n", av_err2str(ret));return -1;}fwrite(packet->data, 1, packet->size, dest_fp);writePacketCount++;av_log(NULL, AV_LOG_INFO, "writePacketCount:%d\n", writePacketCount);av_packet_unref(packet);}return 0;
}
int YUVencodeH264(const char *inFileName, const char *outFileName, const char *encoderName, const char *videoSize)
{int ret = 0;/***************************************************************************************************/FILE *src_fp = fopen(inFileName, "rb");if (src_fp == NULL){av_log(NULL, AV_LOG_ERROR, "open infile %s failed!\n", inFileName);ret = -1;goto end;}FILE *dest_fp = fopen(outFileName, "wb+");if (dest_fp == NULL){av_log(NULL, AV_LOG_ERROR, "open outfile %s failed!\n", outFileName);ret = -1;goto end;}/***************************************************************************************************/int width = 0, height = 0;ret = av_parse_video_size(&width, &height, videoSize);if (ret < 0){av_log(NULL, AV_LOG_ERROR, "parse video size failed:%s\n", av_err2str(ret));return -1;}av_log(NULL, AV_LOG_INFO, "getWidth:%d, getHeight:%d \n", width, height);enum AVPixelFormat pixFmt = AV_PIX_FMT_YUV420P;int fps = 24;AVCodec *encoder = avcodec_find_encoder_by_name(encoderName);if (encoder == NULL){av_log(NULL, AV_LOG_ERROR, "find encoder %s failed\n", encoderName);return -1;}AVCodecContext *encoderCtx = avcodec_alloc_context3(encoder);if (encoderCtx == NULL){av_log(NULL, AV_LOG_ERROR, "alloc encoder context!\n");return -1;}encoderCtx->codec_type = AVMEDIA_TYPE_VIDEO;encoderCtx->pix_fmt = pixFmt;encoderCtx->width = width;encoderCtx->height = height;encoderCtx->time_base = (AVRational){1, fps};encoderCtx->bit_rate = 4096000;encoderCtx->max_b_frames = 0;encoderCtx->gop_size = 10;// 打开编码器ret = avcodec_open2(encoderCtx, encoder, NULL);if (ret < 0){av_log(NULL, AV_LOG_ERROR, "open encoder failed:%s\n", av_err2str(ret));goto end;}AVFrame *frame = av_frame_alloc();int frameSize = av_image_get_buffer_size(pixFmt, width, height, 1);uint8_t *frameBuffer = av_malloc(frameSize);av_image_fill_arrays(frame->data, frame->linesize, frameBuffer, pixFmt, width, height, 1);frame->format = pixFmt;frame->width = width;frame->height = height;int pictureSize = width * height;AVPacket packet;av_init_packet(&packet);int readFrameCount = 0;while (fread(frameBuffer, 1, pictureSize * 3 / 2, src_fp) == pictureSize * 3 / 2){// Y 1 | U 1/4 | V 1/4frame->data[0] = frameBuffer;frame->data[1] = frameBuffer + pictureSize;frame->data[2] = frameBuffer + pictureSize + pictureSize / 4;frame->pts = readFrameCount; // 帧的展示顺序readFrameCount++;av_log(NULL, AV_LOG_INFO, "readFrameCount:%d\n", readFrameCount);encodeVideo(encoderCtx, frame, &packet, dest_fp);}encodeVideo(encoderCtx, NULL, &packet, dest_fp);
end:if (encoderCtx){avcodec_free_context(&encoderCtx);}if (src_fp){fclose(src_fp);}if (dest_fp){fclose(dest_fp);}if (frameBuffer){av_freep(&frameBuffer);}return 0;
}

-----------------------------------------------------------------------------------------------------------------------------------------------------

方法二:

int64_t GetTime()
{// 获取当前的时间戳return av_gettime_relative() / 1000; // 换算成毫秒
}
int EncodeVideoInterface(AVCodecContext *encoderCtx, AVFrame *frame, AVPacket *packet, FILE *outfile, uint16_t *frameCount)
{if (frame){av_log(NULL, AV_LOG_INFO, "[%s] Send frame pts %3ld frameCount:%d -- line:%d \n", __FUNCTION__, frame->pts, *frameCount, __LINE__);(*frameCount)++;}// 使用x264进行编码时,具体缓存帧是在x264源码进行,不会增加 avframe 应对 buffer 的 referenceint ret = avcodec_send_frame(encoderCtx, frame);if (ret < 0){av_log(NULL, AV_LOG_ERROR, "[%s] sending the frame to the encoder error! -- line:%d\n", __FUNCTION__, __LINE__);return -1;}while (ret >= 0){ret = avcodec_receive_packet(encoderCtx, packet);if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){return 0;}else if (ret < 0){av_log(NULL, AV_LOG_ERROR, "[%s] encoding audio frame error! -- line:%d\n", __FUNCTION__, __LINE__);return -1;}// 判断有没有关键帧 - 这个例子没有设置关键帧if (packet->flags){av_log(NULL, AV_LOG_INFO, "[%s] Write packet flags:%d pts:%3ld dts:%3ld (size:%5d) -- line:%d\n", __FUNCTION__, packet->flags, packet->pts, packet->dts, packet->size, __LINE__);}if (!packet->flags){av_log(NULL, AV_LOG_INFO, "[%s] Write packet flags:%d pts:%3ld dts:%3ld (size:%5d) -- line:%d\n", __FUNCTION__, packet->flags, packet->pts, packet->dts, packet->size, __LINE__);}fwrite(packet->data, 1, packet->size, outfile);}return 0;
}
int EncodeVideo(const char *yuvFileName, const char *h264FileName, const char *encoderName)
{FILE *inFile = fopen(yuvFileName, "rb");FILE *outFile = fopen(h264FileName, "wb");if (inFile == NULL || outFile == NULL){av_log(NULL, AV_LOG_ERROR, "[%s] open %s or %s file failed -- line:%d \n", __FUNCTION__, yuvFileName, h264FileName, __LINE__);goto _end;}// 查找指定的编码器AVCodec *encoder = avcodec_find_encoder_by_name(encoderName);if (encoder == NULL){av_log(NULL, AV_LOG_ERROR, "[%s] Codec found error! -- line:%d\n", __FUNCTION__, __LINE__);goto _end;}// 创建编码器上下文AVCodecContext *codecCtx = avcodec_alloc_context3(encoder);if (codecCtx == NULL){av_log(NULL, AV_LOG_ERROR, "[%s] Conld not allocate video codec context -- line:%d\n", __FUNCTION__, __LINE__);goto _end;}// 设置分辨率codecCtx->width = 1920;codecCtx->height = 1080;// 设置 time basecodecCtx->time_base = (AVRational){1, 25};codecCtx->framerate = (AVRational){25, 1};// 设置 I 帧间隔// 如果 frame->pict_type 设置为 AV_PICTURE_TYPE_I,则忽略gop_size的设置,一直当做I帧进行编码codecCtx->gop_size = 25;    // I 帧的间隔 (每秒都有一个I帧)codecCtx->max_b_frames = 0; // 如果不想包含B帧则设置为0(直播一般都设置成0)codecCtx->pix_fmt = AV_PIX_FMT_YUV420P;int ret;if (encoder->id == AV_CODEC_ID_H264){ // 相关的参数  libx264.c 的 AVOption options/** preset 预设是一系列参数的集合,这个集合能够在编码速度和压缩率之间做出一个权衡。* 一个编码速度稍慢的预设会提供更高的压缩效率(压缩效率是以文件大小来衡量的)。* 这就是说,假如你想得到一个指定大小的文件或者采用恒定比特率编码模式,你可以采用一个较慢的预设来获得更好的质量。* 同样的,对于恒定质量编码模式,你可以通过选择一个较慢的预设轻松地节省比特率。* 如果你很有耐心,通常的建议是使用最慢的预设。* 目前所有的预设按照编码速度降序排列为:* ultrafast superfast veryfast faster fast medium[default] preset slow slower veryslow* 默认为medium级别。*/ret = av_opt_set(codecCtx->priv_data, "preset", "veryslow", 0);if (ret != 0){av_log(NULL, AV_LOG_ERROR, "[%s] av_opt_set preset failed -- line:%d\n", __FUNCTION__, __LINE__);}/*** 所有的profile 包括:* 1. baseline profile: 基本画质。支持I/P 帧,只支持无交错(Progressive)和CAVLC;* 2. extended profile:进阶画质。支持I/P/B/SP/SI帧,只支持无交错(Progressive)和CAVLC;* 3. main profile:主流画质。提供I/P/B 帧,支持无交错(Progressive)和交错(Interlaced),也支持CAVLC 和CABAC 的支持* 4. high profile:高级画质。在 main Profile 的基础上增加了8x8内部预测、自定义量化、无损视频编码和更多的 YUV 格式:*/ret = av_opt_set(codecCtx->priv_data, "profile", "high", 0);if (ret != 0){av_log(NULL, AV_LOG_ERROR, "[%s] av_opt_set profile failed -- line:%d\n", __FUNCTION__, __LINE__);}/** tune* tune 是 x264 中重要性仅次于preset的选项,它是视觉优化的参数,tune可以理解为视频偏好(或者视频类型),* tune不是一个单一的参数,而是由一组参数构成 -tune 来改变参数设置。当前的 tune包括:* film:电影类型,对视频的质量非常严格时使用该选项* animation:动画片,压缩的视频是动画片时使用该选项* grain:颗粒物很重,该选项适用于颗粒感很重的视频* stillimage:静态图像,该选项主要用于静止画面比较多的视频* psnr:提高psnr,该选项编码出来的视频psnr比较高* ssim:提高ssim,该选项编码出来的视频ssim比较高* fastdecode:快速解码,该选项有利于快速解码* zerolatency:零延迟,该选项主要用于视频直播**/ret = av_opt_set(codecCtx->priv_data, "tune", "film", 0);if (ret != 0){av_log(NULL, AV_LOG_ERROR, "[%s] av_opt_set tune failed -- line:%d\n", __FUNCTION__, __LINE__);}}// 设置编码器参数codecCtx->bit_rate = 16 * 1024 * 1024; // 极高码率codecCtx->thread_count = 8; // 开了多线程后会导致帧输出延时,需要缓存 thread_count 帧后再编程codecCtx->thread_type = FF_THREAD_FRAME;// 对于 H264 AV_CODEC_FLAG_GLOBAL_HEADER 设置则只包含I帧,此时sps pps 需要从 codec_ctx->extradata 读取// 不设置则每一帧都带 sps pps sei// 将 codecCtx 和 codec 进行绑定ret = avcodec_open2(codecCtx, encoder, NULL);if (ret < 0){av_log(NULL, AV_LOG_ERROR, "[%s] Could not open codec -- line:%d\n", __FUNCTION__, __LINE__);goto _end;}av_log(NULL, AV_LOG_INFO, "[%s] thread_count:%d, thread_type:%d -- line:%d\n", __FUNCTION__, codecCtx->thread_count, codecCtx->thread_type, __LINE__);AVPacket *packet = av_packet_alloc();if (!packet){av_log(NULL, AV_LOG_ERROR, "[%s] packet alloc error! -- line:%d \n", __FUNCTION__, __LINE__);goto _end;}AVFrame *frame = av_frame_alloc();if (!frame){av_log(NULL, AV_LOG_ERROR, "[%s] Could not allocate video frame -- line:%d\n", __FUNCTION__, __LINE__);goto _end;}// 为 frame 分配 bufferframe->format = codecCtx->pix_fmt;frame->width = codecCtx->width;frame->height = codecCtx->height;// 为frame分配bufferret = av_frame_get_buffer(frame, 0);if (ret < 0){av_log(NULL, AV_LOG_ERROR, "[%s] Could not allocate audio data buffers -- line:%d\n", __FUNCTION__, __LINE__);goto _end;}// 计算出每一帧的数据 像素格式 * 宽 * 高int frameByteSize = av_image_get_buffer_size(frame->format, frame->width, frame->height, 1);av_log(NULL, AV_LOG_INFO, "[%s] frameByteSize: %d -- line:%d\n", __FUNCTION__, frameByteSize, __LINE__);uint8_t *yuvBuf = (uint8_t *)malloc(frameByteSize);if (!yuvBuf){av_log(NULL, AV_LOG_ERROR, "[%s] yuvBuf malloc failed -- line:%d\n", __FUNCTION__, __LINE__);goto _end;}int64_t beginTime = GetTime();int64_t endTime = beginTime;int64_t allBeginTime = GetTime();int64_t allendTime = allBeginTime;int64_t pts = 0;av_log(NULL, AV_LOG_INFO, "\n[%s] ------------------------ start enode ------------------------ line:%d \n", __FUNCTION__, __LINE__);uint16_t frameCount = 0;while (1){memset(yuvBuf, 0, frameByteSize);size_t readByteSize = fread(yuvBuf, 1, frameByteSize, inFile);if (readByteSize <= 0){av_log(NULL, AV_LOG_INFO, "[%s] read file finish -- line:%d \n", __FUNCTION__, __LINE__);break;}if ((av_frame_make_writable(frame)) != 0){av_log(NULL, AV_LOG_INFO, "[%s] Failed to make frame writable -- line:%d\n", __FUNCTION__, __LINE__);if (frame->buf && frame->buf[0]){av_log(NULL, AV_LOG_INFO, "[%s] frame buffer is not writable, ref_count = %d -- line:%d\n", __FUNCTION__, av_buffer_get_ref_count(frame->buf[0]), __LINE__);}goto _end;}int needSize = av_image_fill_arrays(frame->data, frame->linesize, yuvBuf, frame->format, frame->width, frame->height, 1);if (needSize != frameByteSize){av_log(NULL, AV_LOG_INFO, "[%s] av_image_fill_array failed, needSize:%d, frame_bytes:%d\n", __FUNCTION__, needSize, frameByteSize);break;}pts += 40;// 设置 ptsframe->pts = pts; // 使用采样率作为 pts 的单位,具体换算成秒 pts * 1 / 采样率beginTime = GetTime();ret = EncodeVideoInterface(codecCtx, frame, packet, outFile, &frameCount);if (ret < 0){av_log(NULL, AV_LOG_ERROR, "[%s] encode failed -- line:%d\n", __FUNCTION__, __LINE__);break;}endTime = GetTime();av_log(NULL, AV_LOG_INFO, "[%s] The encoding time of this frame is: %ld ms -- line:%d\n", __FUNCTION__, endTime - beginTime, __LINE__);}/*冲刷编码器*/EncodeVideoInterface(codecCtx, NULL, packet, outFile, &frameCount);_end:if (inFile){fclose(inFile);}if (outFile){fclose(outFile);}if (yuvBuf){free(yuvBuf);}if (packet){av_packet_free(&packet);}if (frame){av_frame_free(&frame);}if (codecCtx){avcodec_free_context(&codecCtx);}return ret;
}

相关文章:

  • 北京网站建设多少钱?
  • 辽宁网页制作哪家好_网站建设
  • 高端品牌网站建设_汉中网站制作
  • 前端Web-JavaScript(上)
  • P10838 『FLA - I』庭中有奇树
  • 人工智能时代,程序员如何保持核心竞争力?
  • “艺启创作 智绘未来”AI漫画创意大赛,燃动国漫新纪元!
  • 我的256天 创作纪念日
  • 【动态规划-最大子段和】力扣1191. K 次串联后最大子数组之和
  • 分享一个基于Node.js和Vue的农产品销售与交流平台(源码、调试、LW、开题、PPT)
  • XAI在教育领域的应用:偏见与公平
  • 【C++/STL】map和set的封装(红黑树)
  • 常见锁策略
  • anaconda下载库的方法
  • JAVA 继承和多态
  • AI 时代,Java 程序员不可不知的两个开发框架
  • 二分查找法
  • 2024年,5款高效的文献翻译工具清单。
  • 【vuex入门系列02】mutation接收单个参数和多个参数
  • 【个人向】《HTTP图解》阅后小结
  • Apache的80端口被占用以及访问时报错403
  • Laravel5.4 Queues队列学习
  • Python十分钟制作属于你自己的个性logo
  • spring + angular 实现导出excel
  • 从0到1:PostCSS 插件开发最佳实践
  • 关于 Linux 进程的 UID、EUID、GID 和 EGID
  • 前端每日实战 2018 年 7 月份项目汇总(共 29 个项目)
  • 悄悄地说一个bug
  • 写给高年级小学生看的《Bash 指南》
  • 用element的upload组件实现多图片上传和压缩
  • Nginx惊现漏洞 百万网站面临“拖库”风险
  • Prometheus VS InfluxDB
  • #!/usr/bin/python与#!/usr/bin/env python的区别
  • #AngularJS#$sce.trustAsResourceUrl
  • #pragma data_seg 共享数据区(转)
  • #我与Java虚拟机的故事#连载15:完整阅读的第一本技术书籍
  • $var=htmlencode(“‘);alert(‘2“); 的个人理解
  • (DFS + 剪枝)【洛谷P1731】 [NOI1999] 生日蛋糕
  • (二)原生js案例之数码时钟计时
  • (附源码)计算机毕业设计ssm本地美食推荐平台
  • (简单有案例)前端实现主题切换、动态换肤的两种简单方式
  • (一)使用Mybatis实现在student数据库中插入一个学生信息
  • (转)Scala的“=”符号简介
  • (转)如何上传第三方jar包至Maven私服让maven项目可以使用第三方jar包
  • (转)真正的中国天气api接口xml,json(求加精) ...
  • .gitignore文件忽略的内容不生效问题解决
  • .net core webapi 大文件上传到wwwroot文件夹
  • .NET core 自定义过滤器 Filter 实现webapi RestFul 统一接口数据返回格式
  • .net core使用ef 6
  • .Net MVC + EF搭建学生管理系统
  • .NET Project Open Day(2011.11.13)
  • .NET开源项目介绍及资源推荐:数据持久层
  • .NET框架类在ASP.NET中的使用(2) ——QA
  • @JSONField或@JsonProperty注解使用
  • @RequestMapping-占位符映射
  • [ vulhub漏洞复现篇 ] AppWeb认证绕过漏洞(CVE-2018-8715)
  • [ArcPy百科]第三节: Geometry信息中的空间参考解析
  • [ASP.NET MVC]Ajax与CustomErrors的尴尬