I decode video via libavcodec, using the following code:
//Open input file
if(avformat_open_input(&ctx, filename, NULL, NULL)!=0)
return FALSE; // Couldn't open file
if(avformat_find_stream_info(ctx, NULL)<0)
return FALSE; // Couldn't find stream information
videoStream = -1;
//find video stream
for(i=0; i<ctx->nb_streams; i++)
{
if((ctx->streams[i])->codec->codec_type==AVMEDIA_TYPE_VIDEO)
{
videoStream=i;
break;
}
}
if (videoStream == -1)
return FALSE; // Didn't find a video stream
video_codec_ctx=ctx->streams[videoStream]->codec;
//find decoder
video_codec=avcodec_find_decoder(video_codec_ctx->codec_id);
if(video_codec==NULL)
return FALSE; // Codec not found
if(avcodec_open(video_codec_ctx, video_codec)<0)
return -1; // Could not open codec
video_frame=avcodec_alloc_frame();
scaled_frame=avcodec_alloc_frame();
static struct SwsContext *img_convert_ctx;
if(img_convert_ctx == NULL)
{
int w = video_codec_ctx->width;
int h = video_codec_ctx->height;
img_convert_ctx = sws_getContext(w, h,
video_codec_ctx->pix_fmt,
w, h, dst_pix_fmt, SWS_BICUBIC,
NULL, NULL, NULL);
if(img_convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context!\n");
return FALSE;
}
}
while(b_play)
{
if (av_read_frame(ctx, &packet) < 0)
{
break;
}
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(video_codec_ctx, video_frame, &frameFinished,
&packet);
// Did we get a video frame?
if(frameFinished)
{
if (video_codec_ctx->pix_fmt != dst_pix_fmt)
{
if (video_codec_ctx->pix_fmt != dst_pix_fmt)
sws_scale(img_convert_ctx, video_frame->data,
video_frame->linesize, 0,
video_codec_ctx->height,
scaled_frame->data, scaled_frame->linesize);
}
}
}
av_free_packet(&packet);
}
The code works correctly, but it is necessary to convert each frame to the required format. Is it possible to set the pixel format for decoding to get the correct format without sws_scale?
Many thanks for your answers.