结尾加上 2>>D:/DATA/TestVideo/log.txt

完整命令:

E:\毕设结果\code\x264\帧间没有intra\x264-master\build\x264.exe --preset medium --input-res 250x362 --input-depth 8 --fps 30  "D:\DATA\TestVideo\ori_yuv\Set14\comic.yuv" --qp 35 --ipratio=1 --pbratio=1 -o "D:\DATA\TestVideo\264\QP35\Set14\comic.264" --psnr --ssim 2>>D:/DATA/TestVideo/log.txt
                    结尾加上    2>>D:/DATA/TestVideo/log.txt完整命令:E:\毕设结果\code\x264\帧间没有intra\x264-master\build\x264.exe --preset medium --input-res 250x362 --input-depth 8 --fps 30  "D:\DATA\TestVideo\ori_yuv\Set14\comic.yuv" --qp 35 --ipratio=1 --pbratio=1 -o "D:\DATA\Te
#title = 'https://8*8*5*r*i*.com'
title = 'http://www.gaoqing.la/'
txtRoute = 'D:\\MySeGF\\'
contextGF = []
logList = []
failFlag = 0
logPrintDebug = 0
#函数...
				
在H.264裡面,有一項功能是開啟 trace file功能,用來記錄每個macroblock (又叫「宏塊」,以下簡稱MB) 的資訊,以利觀察每個MB的資訊。我這邊要說明如何解讀trace file的每一行代表的意義,這篇文章基本上是參考大陸的論壇文章來寫的。         首先,先到HHI這機構所公布的網頁去尋找他們的reference software:H.264/AVC
libx264中输出日志的API函数x264_log() /**************************************************************************** * x264_log: ****************************************************************************/ #include <rockchip/rk_mpi.h> #include <rockchip/rk_rga.h> #include <rockchip/rockface/rockface.h> extern "C" { #include <libavcodec/avcodec.h> #include <libavformat/avformat.h> #include <libavutil/opt.h> #include <libavutil/imgutils.h> #include <libswscale/swscale.h> static AVFormatContext *ctx; static AVCodecContext *codec_ctx; static AVCodec *codec; static int video_stream_index; static AVStream *video_stream; static AVFrame *frame; static AVPacket pkt; static void init_ffmpeg(const char *url, int width, int height, int fps) av_register_all(); codec = avcodec_find_encoder_by_name("libx264"); if (!codec) { qDebug() << "Codec not found"; exit(1); codec_ctx = avcodec_alloc_context3(codec); if (!codec_ctx) { qDebug() << "Could not allocate video codec context"; exit(1); codec_ctx->bit_rate = 400000; codec_ctx->width = width; codec_ctx->height = height; codec_ctx->time_base = {1, fps}; codec_ctx->framerate = {fps, 1}; codec_ctx->gop_size = 10; codec_ctx->max_b_frames = 1; codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P; if (codec->id == AV_CODEC_ID_H264) { av_opt_set(codec_ctx->priv_data, "preset", "ultrafast", 0); if (avcodec_open2(codec_ctx, codec, NULL) < 0) { qDebug() << "Could not open codec"; exit(1); avformat_alloc_output_context2(&ctx, NULL, "flv", url); if (!ctx) { qDebug() << "Could not allocate output context"; exit(1); if (ctx->oformat->video_codec != AV_CODEC_ID_NONE) { video_stream = avformat_new_stream(ctx, NULL); if (!video_stream) { qDebug() << "Could not create new stream"; exit(1); video_stream_index = video_stream->index; AVCodecParameters *codecpar = video_stream->codecpar; codecpar->codec_id = ctx->oformat->video_codec; codecpar->codec_type = AVMEDIA_TYPE_VIDEO; codecpar->width = width; codecpar->height = height; codecpar->format = AV_PIX_FMT_YUV420P; codecpar->codec_tag = 0; avcodec_parameters_to_context(codec_ctx, codecpar); av_dump_format(ctx, 0, url, 1); if (!(ctx->oformat->flags & AVFMT_NOFILE)) { if (avio_open(&ctx->pb, url, AVIO_FLAG_WRITE) < 0) { qDebug() << "Could not open output file"; exit(1); if (avformat_write_header(ctx, NULL) < 0) { qDebug() << "Error occurred when opening output file"; exit(1); frame = av_frame_alloc(); if (!frame) { qDebug() << "Could not allocate video frame"; exit(1); frame->format = codec_ctx->pix_fmt; frame->width = codec_ctx->width; frame->height = codec_ctx->height; if (av_frame_get_buffer(frame, 32) < 0) { qDebug() << "Could not allocate the video frame data"; exit(1); static void encode_and_write_frame(AVFrame *frame) int ret; av_init_packet(&pkt); pkt.data = NULL; pkt.size = 0; ret = avcodec_send_frame(codec_ctx, frame); if (ret < 0) { qDebug() << "Error sending a frame for encoding"; exit(1); while (ret >= 0) { ret = avcodec_receive_packet(codec_ctx, &pkt); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) return; else if (ret < 0) { qDebug() << "Error during encoding"; exit(1); av_write_frame(ctx, &pkt); av_packet_unref(&pkt); int main(int argc, char *argv[]) QCoreApplication a(argc, argv); // Initialize RMMPP RK_MPI_SYS_Init(); RK_MPI_RGN_Init(); RK_MPI_RGN_Setup(); // Initialize FFmpeg init_ffmpeg("rtmp://example.com/live/stream", 640, 480, 30); // Capture JPEG image and encode to H.264 while (true) { // Capture JPEG image QImage img(640, 480, QImage::Format_RGB888); img.fill(Qt::red); // Convert to YUV420P QByteArray yuv_data; yuv_data.resize(640 * 480 * 3 / 2); uchar *y_data = (uchar *)yuv_data.data(); uchar *u_data = y_data + 640 * 480; uchar *v_data = u_data + 640 * 480 / 4; QImage yuv_image(y_data, 640, 480, QImage::Format_YUV420P); QImage rgb_image = img.convertToFormat(QImage::Format_RGB888); for (int y = 0; y < 480; y++) { memcpy(y_data + y * 640, rgb_image.scanLine(y), 640 * 3); for (int y = 0; y < 480 / 2; y++) { for (int x = 0; x < 640 / 2; x++) { int r = *(rgb_image.scanLine(y * 2) + x * 6 + 0); int g = *(rgb_image.scanLine(y * 2) + x * 6 + 1); int b = *(rgb_image.scanLine(y * 2) + x * 6 + 2); *u_data++ = ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128; r = *(rgb_image.scanLine(y * 2) + x * 6 + 3); g = *(rgb_image.scanLine(y * 2) + x * 6 + 4); b = *(rgb_image.scanLine(y * 2) + x * 6 + 5); *v_data++ = ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128; // Encode to H.264 and write to output stream frame->pts = QDateTime::currentMSecsSinceEpoch() * 90; frame->data[0] = y_data; frame->data[1] = u_data - 640 * 480 / 4; frame->data[2] = v_data - 640 * 480 / 4; encode_and_write_frame(frame); // Clean up av_write_trailer(ctx); avcodec_free_context(&codec_ctx); av_frame_free(&frame); if (!(ctx->oformat->flags & AVFMT_NOFILE)) { avio_closep(&ctx->pb); avformat_free_context(ctx); RK_MPI_RGN_Exit(); RK_MPI_SYS_Exit(); return a.exec(); 这个程序中使用了 `RK_MPI_RGN_Setup()` 和 `RK_MPI_RGN_Exit()` 函数来初始化和清理 Rockchip RMMPP 硬件编码器。请注意,这些函数可能与您的系统有所不同,具体取决于您使用的硬件。 此程序同时使用了 FFmpeg 和 Rockchip RMMPP,因此需要链接以下库: ```txt -lavcodec -lavformat -lavutil -lswscale -lrockface -lrk_mpi -lrk_rga -lrk_vpu 请注意,此程序仅提供作为示例使用,实际使用中您需要根据您的需求进行修改。