Over the weekend I hit a stumbling block learning how to program audio synthesis on iOS. I have been developing on iOS for several years, but I am just getting into the audio synthesis aspect. Right now, I am just programming demo apps to help me learn the concepts. I have currently been able to build and stack sine waves in a playback renderer for Audio Units without a problem. But, I want to understand what is going on in the renderer so I can render 2 separate sine waves in each Left and Right Channel. Currently, I assume that in my init audio section I would need to make the following changes:
From:
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = kSampleRate;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
To:
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = kSampleRate;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 2;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 4;
audioFormat.mBytesPerFrame = 4;
But, the renderer is somewhat greek to me. I have been working off of any tutorial or sample code I can find. I can make things work for the given context of a mono signal, but I cannot make the renderer generate stereo signals. All I want is one distinct frequency in a left channel and a different frequency in a right channel - but I honestly don't understand the renderer enough to get it working. I have attempted the memcpy function into mBuffers[0] and mbuffers[1], but that crashes the app. My render is below (it currently contains stacked sine waves, but for the stereo example I can just use one wave of a set frequency in each channel though).
#define kOutputBus 0
#define kSampleRate 44100
//44100.0f
#define kWaveform (M_PI * 2.0f / kSampleRate)
OSStatus playbackCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
HomeViewController *me = (HomeViewController *)inRefCon;
static int phase = 1;
static int phase1 = 1;
for(UInt32 i = 0; i < ioData->mNumberBuffers; i++) {
int samples = ioData->mBuffers[i].mDataByteSize / sizeof(SInt16);
SInt16 values[samples];
float waves;
float volume=.5;
float wave1;
for(int j = 0; j < samples; j++) {
waves = 0;
wave1 = 0;
MyManager *sharedManager = [MyManager sharedManager];
wave1 = sin(kWaveform * sharedManager.globalFr1 * phase1)*sharedManager.globalVol1;
if (0.000001f > wave1) {
[me setFr1:sharedManager.globalFr1];
phase1 = 0;
//NSLog(@"switch");
}
waves += wave1;
waves += sin(kWaveform * sharedManager.globalFr2 * phase)*sharedManager.globalVol2;
waves += sin(kWaveform * sharedManager.globalFr3 * phase)*sharedManager.globalVol3;
waves += sin(kWaveform * sharedManager.globalFr4 * phase)*sharedManager.globalVol4;
waves += sin(kWaveform * sharedManager.globalFr5 * phase)*sharedManager.globalVol5;
waves += sin(kWaveform * sharedManager.globalFr6 * phase)*sharedManager.globalVol6;
waves += sin(kWaveform * sharedManager.globalFr7 * phase)*sharedManager.globalVol7;
waves += sin(kWaveform * sharedManager.globalFr8 * phase)*sharedManager.globalVol8;
waves += sin(kWaveform * sharedManager.globalFr9 * phase)*sharedManager.globalVol9;
waves *= 32767 / 9; // <--------- make sure to divide by how many waves you're stacking
values[j] = (SInt16)waves;
values[j] += values[j]<<16;
phase++;
phase1++;
}
memcpy(ioData->mBuffers[i].mData, values, samples * sizeof(SInt16));
}
return noErr;
}
Thanks in advance for any help!