以前我读使用一个完整的音频文件,音频采样CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer
。 现在我想这样做使用范围相同(即我指定的范围,时间..读音频的小块按时间,然后再回去读)。 为什么要使用时间范围内的原因是二/三我想控制每个读出的大小(适合在一个分组具有最大尺寸)。
出于某种原因,总有各自读之间的凸起。 在我的代码,你会发现,我开始AVAssetReader每次我设置的时间范围时结束它,这就是B / C我不能动态调整时间范围,读者开始后(见此处了解详情)。
难道是开始和结束的读者是太昂贵,产生连续的实时体验? 或者有没有这样做,我是不知道的其他方式?
另外请注意,这种抖动或延迟发生在任何一点上,我设置的时间间隔是..这使我相信,开始和结束的读者,我的方式是一种用于实时音频播放太贵了。
- (void) setupReader
{
NSURL *assetURL = [NSURL URLWithString:@"ipod-library://item/item.m4a?id=1053020204400037178"];
songAsset = [AVURLAsset URLAssetWithURL:assetURL options:nil];
track = [songAsset.tracks objectAtIndex:0];
nativeTrackASBD = [self getTrackNativeSettings:track];
// set CM time parameters
assetCMTime = songAsset.duration;
CMTimeReadDurationInSeconds = CMTimeMakeWithSeconds(1, assetCMTime.timescale);
currentCMTime = CMTimeMake(0,assetCMTime.timescale);
}
-(void)readVBRPackets
{
// make sure assetCMTime is greater than currentCMTime
while (CMTimeCompare(assetCMTime,currentCMTime) == 1 )
{
NSError * error = nil;
reader = [[AVAssetReader alloc] initWithAsset:songAsset error:&error];
readerOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:track
outputSettings:nil];
[reader addOutput:readerOutput];
reader.timeRange = CMTimeRangeMake(currentCMTime, CMTimeReadDurationInSeconds);
[reader startReading];
while ((sample = [readerOutput copyNextSampleBuffer])) {
CMItemCount numSamples = CMSampleBufferGetNumSamples(sample);
if (numSamples == 0) {
continue;
}
NSLog(@"reading sample");
CMBlockBufferRef CMBuffer = CMSampleBufferGetDataBuffer( sample );
AudioBufferList audioBufferList;
OSStatus err = CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
sample,
NULL,
&audioBufferList,
sizeof(audioBufferList),
NULL,
NULL,
kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment,
&CMBuffer
);
const AudioStreamPacketDescription * inPacketDescriptions;
size_t packetDescriptionsSizeOut;
size_t inNumberPackets;
CheckError(CMSampleBufferGetAudioStreamPacketDescriptionsPtr(sample,
&inPacketDescriptions,
&packetDescriptionsSizeOut),
"could not read sample packet descriptions");
inNumberPackets = packetDescriptionsSizeOut/sizeof(AudioStreamPacketDescription);
AudioBuffer audioBuffer = audioBufferList.mBuffers[0];
for (int i = 0; i < inNumberPackets; ++i)
{
SInt64 dataOffset = inPacketDescriptions[i].mStartOffset;
UInt32 packetSize = inPacketDescriptions[i].mDataByteSize;
size_t packetSpaceRemaining;
packetSpaceRemaining = bufferByteSize - bytesFilled;
// if the space remaining in the buffer is not
// enough for the data contained in this packet
// then just write it
if (packetSpaceRemaining < packetSize)
{
[self enqueueBuffer];
}
// copy data to the audio queue buffer
AudioQueueBufferRef fillBuf = audioQueueBuffers[fillBufferIndex];
memcpy((char*)fillBuf->mAudioData + bytesFilled,
(const char*)(audioBuffer.mData + dataOffset), packetSize);
// fill out packet description
packetDescs[packetsFilled] = inPacketDescriptions[i];
packetDescs[packetsFilled].mStartOffset = bytesFilled;
bytesFilled += packetSize;
packetsFilled += 1;
// if this is the last packet, then ship it
size_t packetsDescsRemaining = kAQMaxPacketDescs - packetsFilled;
if (packetsDescsRemaining == 0) {
[self enqueueBuffer];
}
}
CFRelease(CMBuffer);
CMSampleBufferInvalidate(sample);
CFRelease(sample);
}
[reader cancelReading];
reader = NULL;
readerOutput = NULL;
currentCMTime = CMTimeAdd(currentCMTime, CMTimeReadDurationInSeconds);
}
}