一.windows
1.使用ffmpeg录制mp4视频
#获取音视频设备
ffmpeg -list_devices true -f dshow -i dummy
#录制视频
ffmpeg -f dshow -i video="xxxx" -c"v libx264 -pix_fmt yuv420p output.mp4
2.将mp4转换为yuv数据
//-i:输入文件选项;-an:不处理音频流;
//-c:v rawvideo 视频编码器的选项,使用原始视频格式进行编码(不进行压缩);
//-pix_fmt yuv420p:输出视频的像素格式
ffmpeg -i output.mp4 -an -c:v rawvideo -pix_fmt yuv420p out.yuv
//播放
ffplay -f rawvideo -pixel_format yuv420p -video_size 1280x720 out1.yuv
3.也可以单独播放单独分量
//播放yuv数据
ffplay -s 1280x720 -vf extractplanes='y' out.yuv
//提取yuv三个分量为三个文件
ffmpeg -s 1280x720 -pix_fmt yuv420p -i out.yuv -filter_complex "extractplanes=y+u+v[y][u][v]" -map "[y]" y.yuv -map "[u]" u.yuv -map "[v]" v.yuv
//播放单个分量 格式为gray
ffplay -s 1280x720 -pix_fmt gray y.yuv
播放出来y、u、v都是没有颜色的,为什么u和v表示蓝色和红色的差分量也没有颜色呢?
因为u和v分量的值是相对于y分量的,没有y分量它们都无法提供完整的颜色信息。
4.api调用
#可以查看摄像头输出的视频数据的编码格式和像素格式
ffmpeg -f dshow -i "Integrated Camera" -vcodec copy -f null -
5.整体代码
int read_video()
{int ret = 0;char errors[1024];AVFormatContext *fmt_ctx = NULL;AVDictionary *options = NULL;AVAudioFifo *fifo = nullptr;// FILE *outfile = fopen("./out.yuv", "wb+");FILE *outfile_yuv = fopen("./out1.yuv", "wb+");if (outfile_yuv == nullptr){printf("filed open out file\n");}AVPacket pkt;av_init_packet(&pkt);int frame_count = 0;// 找到采集工具const AVInputFormat *iformat = av_find_input_format("dshow");if (iformat == NULL){printf("AVInputFormat find failed \n");return -1;}// 打开视频设备av_dict_set(&options, "video_size", "1280x720", 0);av_dict_set(&options, "framerate", "30", 0);// av_dict_set(&options, "pixel_format", "yuv420p", 0);ret = open_dev(&fmt_ctx, "video=Integrated Camera", iformat, &options);AVCodecContext *codec_ctx;// 找到视频流int video_stream_index = -1;for (int i = 0; i < fmt_ctx->nb_streams; i++){if (fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO){video_stream_index = i;break;}}// 获取解码器AVCodecParameters *codecpar = fmt_ctx->streams[video_stream_index]->codecpar;// AVCodecIDprintf("codecParameters:%d , AVCodecID : %d \n", codecpar->format, codecpar->codec_id);const AVCodec *codec = avcodec_find_decoder(codecpar->codec_id);codec_ctx = avcodec_alloc_context3(codec);avcodec_parameters_to_context(codec_ctx, codecpar);if (avcodec_open2(codec_ctx, codec, NULL) < 0){fprintf(stderr, "Could not open codec\n");avformat_close_input(&fmt_ctx);return -1;}// 初始化格式转换上下文struct SwsContext *sws_ctx = sws_getContext(1280, 720, AV_PIX_FMT_YUVJ422P, // 输入图像的宽度、高度和像素格式1280, 720, AV_PIX_FMT_YUV420P, // 输出图像的宽度、高度和像素格式SWS_BILINEAR, // 转换算法NULL, NULL, NULL // 额外参数);int count = 0;static bool finished = false;AVFrame *frame = av_frame_alloc();AVFrame *outFrame = av_frame_alloc();// 设置 YUV 422P 帧frame->format = AV_PIX_FMT_YUV422P;frame->width = 1280;frame->height = 720;// 设置 YUV 420P 帧outFrame->format = AV_PIX_FMT_YUV420P;outFrame->width = 1280;outFrame->height = 720;// 分配数据int ret420 = av_image_alloc(outFrame->data, outFrame->linesize, 1280, 720, AV_PIX_FMT_YUV420P, 1);if (ret420 < 0){fprintf(stderr, "无法分配图像数据\n");return -1;}// AVPixelFormat;av_dump_format(fmt_ctx, 0, "video=Integrated Camera", 0);// 读取数据包并解码size_t written_y = 0, written_u = 0, written_v = 0;while (av_read_frame(fmt_ctx, &pkt) >= 0){ret = avcodec_send_packet(codec_ctx, &pkt);if (ret < 0){fprintf(stderr, "Error sending packet to decoder: %s\n", av_err2str(ret));av_packet_unref(&pkt);continue; // 继续处理下一个数据包}while (avcodec_receive_frame(codec_ctx, frame) >= 0){sws_scale(sws_ctx, frame->data, frame->linesize, 0, 720, outFrame->data, outFrame->linesize);// // 计算 YUV 数据大小int size_y = outFrame->width * outFrame->height;int size_u = (outFrame->width / 2) * (outFrame->height);int size_v = (outFrame->width / 2) * (outFrame->height);int total_size = size_y + size_u + size_v;printf("%d Frame-format %d: Y size = %d, U size = %d, V size = %d, Total size = %d\n", count,outFrame->format, size_y, size_u, size_v, total_size);// 将解码后的 YUV 数据写入文件written_y = fwrite(outFrame->data[0], 1, size_y, outfile_yuv); // Yfflush(outfile_yuv);written_u = fwrite(outFrame->data[1], 1, size_u, outfile_yuv); // Ufflush(outfile_yuv);written_v = fwrite(outFrame->data[2], 1, size_v, outfile_yuv); // Vfflush(outfile_yuv);}av_packet_unref(&pkt);if (count++ > 50)break;}// 发送结束信号avcodec_send_packet(codec_ctx, NULL);while (avcodec_receive_frame(codec_ctx, outFrame) >= 0){printf("get one frame %d ,frame_w:%d,frame_h:%d\n", count, outFrame->width, outFrame->height);fwrite(frame->data[0], 1, outFrame->width * outFrame->height, outfile_yuv); // Yfwrite(frame->data[1], 1, (outFrame->width / 2) * (outFrame->height), outfile_yuv); // Ufwrite(frame->data[2], 1, (outFrame->width / 2) * (outFrame->height), outfile_yuv); // Vfflush(outfile_yuv);count++;}av_frame_free(&frame);avcodec_free_context(&codec_ctx);avformat_close_input(&fmt_ctx);fclose(outfile_yuv);av_log(NULL, AV_LOG_DEBUG, "end");return 0;
}
6.遇到问题
1.通过av_read_frame获取到的视频数据不是yuv420p的原始数据,而是通过mjpeg编码的yuv422p的数据。