I wrote a small program using ffmpeg's libraries. which does the following-
1)decode a frame. 2)convert frame to rgb24 . 3)convert rgb24 frame back to yuv420p. 4)encode the yuv420p frame and pack it into video file.
But the end video is not same as the input video. there is some artifacts in final video (horizontal lines).i also get a warning when the rgbToYuv method gets called - Warning: data is not aligned! This can lead to a speedloss
i suspect something is wrong with my format conversion methods because when i comment the coversion steps from my program the output video is identical to input video.
following are my methods -
int VideoFileInstance::convertToRGBFrame(AVFrame **yuvframe,AVFrame **rgbPictInfo) {
int ret;
int width = ifmt_ctx->streams[VIDEO_STREAM_INDEX]->codec->width;
int height = ifmt_ctx->streams[VIDEO_STREAM_INDEX]->codec->height;
int m_bufferSize = avpicture_get_size(PIX_FMT_RGB24,width, height);
uint8_t *buffer = (uint8_t *)av_malloc(m_bufferSize);
//init context if not done already.
if (imgConvertCtxYUVToRGB == NULL) {
//init once
imgConvertCtxYUVToRGB = sws_getContext(width, height, PIX_FMT_YUV420P, width, height, PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
if(imgConvertCtxYUVToRGB == NULL) {
av_log(NULL,AV_LOG_ERROR,"error creating img context");
return -1;
}
}
avpicture_fill((AVPicture*)(*rgbPictInfo), buffer,
PIX_FMT_RGB24,
width, height);
uint8_t *inDate[3] = {
(*yuvframe)->data[0] ,
(*yuvframe)->data[1] ,
(*yuvframe)->data[2]
};
int destLineSize[1] = {3*width};
ret = sws_scale(imgConvertCtxYUVToRGB, inDate, (*yuvframe)->linesize, 0, height,
(*rgbPictInfo)->data, destLineSize);
av_free(buffer);
return ret;
}
int VideoFileInstance::convertToYuvFrame (AVFrame **rgbFrame , AVFrame ** yuvFrame) {
int ret = 0;
int width = ifmt_ctx->streams[VIDEO_STREAM_INDEX]->codec->width;
int height = ifmt_ctx->streams[VIDEO_STREAM_INDEX]->codec->height;
int m_bufferSize = avpicture_get_size(PIX_FMT_YUV420P, width, height);
uint8_t *buffer = (uint8_t *)av_malloc(m_bufferSize);
avpicture_fill((AVPicture*)(*yuvFrame), buffer, PIX_FMT_YUV420P,
width, height);
if(imgConvertCtxRGBToYUV == NULL) {
imgConvertCtxRGBToYUV = sws_getContext(width, height, PIX_FMT_RGB24, width, height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
if(imgConvertCtxRGBToYUV == NULL){
av_log(NULL,AV_LOG_ERROR,"error creating img context");
return -1;
}
}
avpicture_fill((AVPicture*)(*yuvFrame), buffer,
PIX_FMT_YUV420P,
width, height);
sws_scale(imgConvertCtxRGBToYUV,(*rgbFrame)->data , (*rgbFrame)->linesize, 0, height,
(*yuvFrame)->data , (*yuvFrame)->linesize);
av_free(buffer);
return ret;
}
The dimention of input video is 424 X 200. Is there anything wrong with my conversion functions.