我一直在使用AVFoundation。我已经设置(视频设置)捕获的视频,并获得outputsamplebuffer kCVPixelFormatType_420YpCbCr8BiPlanarFullRange格式。 但是,我需要作进一步处理YUV420格式。
对于我使用libyuv框架。
LIBYUV_API
int NV12ToI420(const uint8* src_y, int src_stride_y,
const uint8* src_uv, int src_stride_uv,
uint8* dst_y, int dst_stride_y,
uint8* dst_u, int dst_stride_u,
uint8* dst_v, int dst_stride_v,
int width, int height);
libyuv::NV12ToI420(src_yplane, inWidth ,
src_uvplane, inWidth,
dst_yplane, inWidth,
dst_vplane, inWidth / 2,
dst_uplane, inWidth / 2,
inWidth, inHeight);
但我得到的输出缓冲区已满绿色? 我做的任何错误该进程请帮助我吗?
期待权。 确保您的src_uvplane点src_yplane + inWidth * inHeight
你需要你的数据转换为I420,我处理过的相机,但在Android上。 我想应该是在iOS类似。 Android的原相机NV21或者NV16格式,我从NV21或者NV16到YV12转换,I420是几乎相同YV12:
BYTE m_y[BIG_VIDEO_CX * BIG_VIDEO_CY],
m_u[(BIG_VIDEO_CX/2) * (BIG_VIDEO_CY/2)],
m_v[(BIG_VIDEO_CX/2) * (BIG_VIDEO_CY/2)];
void NV21_TO_YV12(BYTE *data)
{
int width = BIG_VIDEO_CX;
int height = BIG_VIDEO_CY;
m_y2=data;
data=&data[width*height];
for (uint32_t i=0; i<(width/2)*(height/2); ++i)
{
m_v[i]=*data;
m_u[i]=*(data+1);
data+=2;
}
}
void NV16_TO_YV12(BYTE *data)
{
int width = BIG_VIDEO_CX;
int height = BIG_VIDEO_CY;
m_y2=data;
const BYTE* src_uv = (const BYTE*)&data[width*height];
BYTE* dst_u = m_u;
BYTE* dst_v = m_v;
for (uint32_t y=0; y<height/2; ++y)
{
const BYTE* src_uv2 = src_uv + width;
for (uint32_t x=0; x<width/2; ++x)
{
dst_u[x]=(src_uv[0]+src_uv2[0]+1)>>1;
dst_v[x]=(src_uv[1]+src_uv2[1]+1)>>1;
src_uv+=2;
src_uv2+=2;
}
src_uv=src_uv2;
dst_u+=width/2;
dst_v+=width/2;
}
}
Android是NV21,它libyuv支持与ARM和英特尔。 它也可以通过90,180或270旋转,需要转换如果的一部分定向。 在ARM优化的版本是2倍左右比C快
C
NV12ToI420_Opt(782毫秒)
NV21ToI420_Opt(764毫秒)
臂(氖优化)
NV12ToI420_Opt(398毫秒)
NV21ToI420_Opt(381毫秒)
好奇的你使用NV16在Android上。 我期望NV61与NV21的一致性。 您的代码看起来是正确的,但使用vrhadd.u8将很好地优化成霓虹灯。 文件libyuv问题,如果你想看到这一点。 https://code.google.com/p/libyuv/issues/list
下面是我如何做到这一点在iOS在我captureOutput后,我得到AVCaptureSession(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)原始视频帧:
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
CVImageBufferRef videoFrame = CMSampleBufferGetImageBuffer(sampleBuffer);
CFRetain(sampleBuffer);
CVPixelBufferLockBaseAddress(videoFrame, 0);
size_t _width = CVPixelBufferGetWidth(videoFrame);
size_t _height = CVPixelBufferGetHeight(videoFrame);
const uint8* plane1 = (uint8*)CVPixelBufferGetBaseAddressOfPlane(videoFrame,0);
const uint8* plane2 = (uint8*)CVPixelBufferGetBaseAddressOfPlane(videoFrame,1);
size_t plane1_stride = CVPixelBufferGetBytesPerRowOfPlane (videoFrame, 0);
size_t plane2_stride = CVPixelBufferGetBytesPerRowOfPlane (videoFrame, 1);
size_t plane1_size = plane1_stride * CVPixelBufferGetHeightOfPlane(videoFrame, 0);
size_t plane2_size = CVPixelBufferGetBytesPerRowOfPlane (videoFrame, 1) * CVPixelBufferGetHeightOfPlane(videoFrame, 1);
size_t frame_size = plane1_size + plane2_size;
uint8* buffer = new uint8[ frame_size ];
uint8* dst_u = buffer + plane1_size;
uint8* dst_v = dst_u + plane1_size/4;
// Let libyuv convert
libyuv::NV12ToI420(/*const uint8* src_y=*/plane1, /*int src_stride_y=*/plane1_stride,
/*const uint8* src_uv=*/plane2, /*int src_stride_uv=*/plane2_stride,
/*uint8* dst_y=*/buffer, /*int dst_stride_y=*/plane1_stride,
/*uint8* dst_u=*/dst_u, /*int dst_stride_u=*/plane2_stride/2,
/*uint8* dst_v=*/dst_v, /*int dst_stride_v=*/plane2_stride/2,
_width, _height);
CVPixelBufferUnlockBaseAddress(videoFrame, 0);
CFRelease( sampleBuffer)
// TODO: call your method here with 'buffer' variable. note that you need to deallocated the buffer after using it
}
我做了一下代码更具描述性的清晰度。
文章来源: How to convert a kCVPixelFormatType_420YpCbCr8BiPlanarFullRange Buffer to YUV420 using libyuv library in ios?