diff --git a/AudioDSPUtils/Novocaine.h b/AudioDSPUtils/Novocaine.h
index 12cb253..ab18709 100644
--- a/AudioDSPUtils/Novocaine.h
+++ b/AudioDSPUtils/Novocaine.h
@@ -71,7 +71,7 @@ typedef void (^InputBlock)(float *data, UInt32 numFrames, UInt32 numChannels);
@interface Novocaine : NSObject
-@property AudioUnit inputUnit;
+@property AudioUnit audioUnit;
@property AudioBufferList *inputBuffer;
@property (nonatomic, copy) OutputBlock outputBlock;
@property (nonatomic, copy) InputBlock inputBlock;
@@ -79,7 +79,7 @@ typedef void (^InputBlock)(float *data, UInt32 numFrames, UInt32 numChannels);
@property (nonatomic, retain) NSString *inputRoute;
@property UInt32 numInputChannels;
@property UInt32 numOutputChannels;
-@property Float64 samplingRate;
+@property (readonly) Float64 samplingRate;
@property BOOL isInterleaved;
@property BOOL isSetUp;
@property UInt32 numBytesPerSample;
diff --git a/AudioDSPUtils/Novocaine.m b/AudioDSPUtils/Novocaine.m
index 12b3f42..9b40a66 100644
--- a/AudioDSPUtils/Novocaine.m
+++ b/AudioDSPUtils/Novocaine.m
@@ -24,6 +24,7 @@
//
// TODO:
// Switching mic and speaker on/off
+// Updating audio route from user
//
// HOUSEKEEPING AND NICE FEATURES:
// Disambiguate outputFormat (the AUHAL's stream format)
@@ -62,6 +63,7 @@ @interface Novocaine()
@property (nonatomic, strong) NSTimer *audioFileTimer;
@property (nonatomic) float *outputBuffer;
@property float phaseIncrement;
+@property Float64 samplingRate;
- (void)setupAudio;
@@ -92,16 +94,6 @@ + (Novocaine *) audioManager
return _sharedInstance;
}
-//+ (id)allocWithZone:(NSZone *)zone {
-// @synchronized(self) {
-// if (audioManager == nil) {
-// audioManager = [super allocWithZone:zone];
-// return audioManager; // assignment and return on first allocation
-// }
-// }
-// return nil; // on subsequent allocation attempts return nil
-//}
-
- (id)init
{
@@ -112,11 +104,14 @@ - (id)init
_outputBlock = nil;
_inputBlock = nil;
+ AVAudioSession *session = [AVAudioSession sharedInstance];
+ self.samplingRate = session.sampleRate; // need this setup right away
+
// Initialize a float buffer to hold audio
_inData = (float *)calloc(8192, sizeof(float)); // probably more than we'll need
_outData = (float *)calloc(8192, sizeof(float));
- _outputBuffer = (float *)calloc(2*44100.0, sizeof(float));
+ _outputBuffer = (float *)calloc(2*self.samplingRate, sizeof(float));
pthread_mutex_init(&outputAudioFileLock, NULL);
_playing = NO;
@@ -210,6 +205,68 @@ -(void)setOutputBlockToPlaySineWave:(float)frequency{
}];
}
+-(void)setAudioRoute{
+ // NSArray* inputs = [session availableInputs];
+ //
+ // // Locate the Port corresponding to the built-in microphone.
+ // AVAudioSessionPortDescription* builtInMicPort = nil;
+ // for (AVAudioSessionPortDescription* port in inputs)
+ // {
+ // if ([port.portType isEqualToString:AVAudioSessionPortBuiltInMic])
+ // {
+ // builtInMicPort = port;
+ // break;
+ // }
+ // }
+ //
+ // // Print out a description of the data sources for the built-in microphone
+ // NSLog(@"There are %u data sources for port :\"%@\"", (unsigned)[builtInMicPort.dataSources count], builtInMicPort);
+ // NSLog(@"%@", builtInMicPort.dataSources);
+ //
+ // // loop over the built-in mic's data sources and attempt to locate the front microphone
+ // AVAudioSessionDataSourceDescription* frontDataSource = nil;
+ // for (AVAudioSessionDataSourceDescription* source in builtInMicPort.dataSources)
+ // {
+ // // other options:
+ // // AVAudioSessionOrientation( Top | {Front} | Back | Bottom )
+ // if ([source.orientation isEqual:AVAudioSessionOrientationBottom])
+ // {
+ // frontDataSource = source;
+ // break;
+ // }
+ // } // end data source iteration
+ //
+ // if (frontDataSource)
+ // {
+ // NSLog(@"Currently selected source is \"%@\" for port \"%@\"", builtInMicPort.selectedDataSource.dataSourceName, builtInMicPort.portName);
+ // NSLog(@"Attempting to select source \"%@\" on port \"%@\"", frontDataSource, builtInMicPort.portName);
+ //
+ // // Set a preference for the front data source.
+ // error = nil;
+ // if (![builtInMicPort setPreferredDataSource:frontDataSource error:&error])
+ // {
+ // // an error occurred.
+ // NSLog(@"setPreferredDataSource failed");
+ // }
+ // }
+ // else{
+ // NSLog(@"Front Data Source is nil, cannot change source.");
+ // }
+ //
+ // // Make sure the built-in mic is selected for input. This will be a no-op if the built-in mic is
+ // // already the current input Port.
+ // error = nil;
+ // if(![session setPreferredInput:builtInMicPort error:&error]){
+ // NSLog(@"%@ Couldn't set mic as preferred port %@",
+ // NSStringFromSelector(_cmd), [error localizedDescription]);
+ // @throw error;
+ // }
+
+ // Add a property listener, to listen to changes to the Route of Audio Input
+ // NSNotificationCenter *nc = [NSNotificationCenter defaultCenter];
+ // [nc addObserver:self selector:@selector(audioRouteChangedListener:) name:AVAudioSessionRouteChangeNotification object:nil];
+}
+
#pragma mark - Audio Methods
@@ -243,7 +300,7 @@ - (void) teardownAudio {
// Set the audio session to not active
if(![[AVAudioSession sharedInstance] setActive:NO error:&error]){
- NSLog(@"%@ Couldn't activate audio session %@",
+ NSLog(@"%@ Couldn't set activate audio session %@",
NSStringFromSelector(_cmd), [error localizedDescription]);
@throw error;
}
@@ -256,9 +313,9 @@ - (void) teardownAudio {
// CheckError( AudioSessionRemovePropertyListenerWithUserData(kAudioSessionProperty_AudioRouteChange, sessionPropertyListener, self), "Couldn't remove audio session property listener");
// Uninitialize and dispose the audio input unit
- CheckError( AudioUnitUninitialize(self.inputUnit), "Couldn't uninitialize audio input unit");
- CheckError( AudioComponentInstanceDispose(self.inputUnit), "Couldn't dispose of audio input unit");
- self.inputUnit = nil;
+ CheckError( AudioUnitUninitialize(self.audioUnit), "Couldn't uninitialize audio input unit");
+ CheckError( AudioComponentInstanceDispose(self.audioUnit), "Couldn't dispose of audio input unit");
+ self.audioUnit = nil;
_isSetUp = NO;
@@ -300,78 +357,25 @@ - (void)setupAudio
// Code inserted by Eric Larson for setting audio route
// Get the set of available inputs. If there are no audio accessories attached, there will be
// only one available input -- the built in microphone.
-// NSArray* inputs = [session availableInputs];
-//
-// // Locate the Port corresponding to the built-in microphone.
-// AVAudioSessionPortDescription* builtInMicPort = nil;
-// for (AVAudioSessionPortDescription* port in inputs)
-// {
-// if ([port.portType isEqualToString:AVAudioSessionPortBuiltInMic])
-// {
-// builtInMicPort = port;
-// break;
-// }
-// }
-//
-// // Print out a description of the data sources for the built-in microphone
-// NSLog(@"There are %u data sources for port :\"%@\"", (unsigned)[builtInMicPort.dataSources count], builtInMicPort);
-// NSLog(@"%@", builtInMicPort.dataSources);
-//
-// // loop over the built-in mic's data sources and attempt to locate the front microphone
-// AVAudioSessionDataSourceDescription* frontDataSource = nil;
-// for (AVAudioSessionDataSourceDescription* source in builtInMicPort.dataSources)
-// {
-// // other options:
-// // AVAudioSessionOrientation( Top | {Front} | Back | Bottom )
-// if ([source.orientation isEqual:AVAudioSessionOrientationBottom])
-// {
-// frontDataSource = source;
-// break;
-// }
-// } // end data source iteration
-//
-// if (frontDataSource)
-// {
-// NSLog(@"Currently selected source is \"%@\" for port \"%@\"", builtInMicPort.selectedDataSource.dataSourceName, builtInMicPort.portName);
-// NSLog(@"Attempting to select source \"%@\" on port \"%@\"", frontDataSource, builtInMicPort.portName);
-//
-// // Set a preference for the front data source.
-// error = nil;
-// if (![builtInMicPort setPreferredDataSource:frontDataSource error:&error])
-// {
-// // an error occurred.
-// NSLog(@"setPreferredDataSource failed");
-// }
-// }
-// else{
-// NSLog(@"Front Data Source is nil, cannot change source.");
-// }
-//
-// // Make sure the built-in mic is selected for input. This will be a no-op if the built-in mic is
-// // already the current input Port.
-// error = nil;
-// if(![session setPreferredInput:builtInMicPort error:&error]){
-// NSLog(@"%@ Couldn't set mic as preferred port %@",
-// NSStringFromSelector(_cmd), [error localizedDescription]);
-// @throw error;
-// }
-
- // Add a property listener, to listen to changes to the Route of Audio Input
-// NSNotificationCenter *nc = [NSNotificationCenter defaultCenter];
-// [nc addObserver:self selector:@selector(audioRouteChangedListener:) name:AVAudioSessionRouteChangeNotification object:nil];
+ [self setAudioRoute]; // currently does nothing
+
// Set the buffer size, this will affect the number of samples that get rendered every time the audio callback is fired
// A small number will get you lower latency audio, but will make your processor work harder
-#if !TARGET_IPHONE_SIMULATOR
- Float32 preferredBufferSize = 0.0232;
- [session setPreferredIOBufferDuration:preferredBufferSize error:&error];
- //CheckError( AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(preferredBufferSize), &preferredBufferSize), "Couldn't set the preferred buffer duration");
-#endif
+ Float32 preferredBufferSize = 1024.0/self.samplingRate; // 1024/44100 = 0.0232
+ [session setPreferredIOBufferDuration:preferredBufferSize error:&error];
+ if(error!=nil){
+ NSLog(@"Could not set preferred buffer duration, Error: %@",error.localizedDescription);
+ }
+
+ [session setPreferredSampleRate: self.samplingRate error:&error];
+ if(error!=nil){
+ NSLog(@"Could not set preferred sample rate, Error: %@",error.localizedDescription);
+ }
- [self checkSessionProperties];
-
+ [self checkSessionProperties];
// ----- Audio Unit Setup -----
// ----------------------------
@@ -385,12 +389,12 @@ - (void)setupAudio
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &inputDescription);
- CheckError( AudioComponentInstanceNew(inputComponent, &_inputUnit), "Couldn't create the output audio unit");
+ CheckError( AudioComponentInstanceNew(inputComponent, &_audioUnit), "Couldn't create the output audio unit");
// Enable input
UInt32 one = 1;
- CheckError( AudioUnitSetProperty(_inputUnit,
+ CheckError( AudioUnitSetProperty(_audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
@@ -402,7 +406,7 @@ - (void)setupAudio
UInt32 size;
size = sizeof( AudioStreamBasicDescription );
- CheckError( AudioUnitGetProperty( _inputUnit,
+ CheckError( AudioUnitGetProperty( _audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
1,
@@ -412,7 +416,7 @@ - (void)setupAudio
// Check the output stream format
size = sizeof( AudioStreamBasicDescription );
- CheckError( AudioUnitGetProperty( _inputUnit,
+ CheckError( AudioUnitGetProperty( _audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
1,
@@ -420,13 +424,13 @@ - (void)setupAudio
&size ),
"Couldn't get the hardware output stream format");
- _inputFormat.mSampleRate = 44100.0;
- _outputFormat.mSampleRate = 44100.0;
- self.samplingRate = _inputFormat.mSampleRate;
+ _inputFormat.mSampleRate = self.samplingRate;
+ _outputFormat.mSampleRate = self.samplingRate;
+ //self.samplingRate = _inputFormat.mSampleRate;
self.numBytesPerSample = _inputFormat.mBitsPerChannel / 8;
size = sizeof(AudioStreamBasicDescription);
- CheckError(AudioUnitSetProperty(_inputUnit,
+ CheckError(AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
@@ -440,7 +444,7 @@ - (void)setupAudio
UInt32 numFramesPerBuffer;
size = sizeof(UInt32);
- CheckError(AudioUnitGetProperty(_inputUnit,
+ CheckError(AudioUnitGetProperty(_audioUnit,
kAudioUnitProperty_MaximumFramesPerSlice,
kAudioUnitScope_Global,
kOutputBus,
@@ -498,7 +502,7 @@ - (void)setupAudio
callbackStruct.inputProc = inputCallback;
callbackStruct.inputProcRefCon = (__bridge void *)(self);
- CheckError( AudioUnitSetProperty(_inputUnit,
+ CheckError( AudioUnitSetProperty(_audioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
0,
@@ -509,7 +513,7 @@ - (void)setupAudio
callbackStruct.inputProc = renderCallback;
callbackStruct.inputProcRefCon = (__bridge void *)(self);
- CheckError( AudioUnitSetProperty(_inputUnit,
+ CheckError( AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
@@ -520,7 +524,7 @@ - (void)setupAudio
- CheckError(AudioUnitInitialize(_inputUnit), "Couldn't initialize the output unit");
+ CheckError(AudioUnitInitialize(_audioUnit), "Couldn't initialize the output unit");
_isSetUp = YES;
@@ -537,7 +541,7 @@ - (void)pause {
if(self.shouldSaveContinuouslySampledMicrophoneAudioDataToNewFile)
[self closeAudioFileForWritingFromMicrophone];
- CheckError( AudioOutputUnitStop(_inputUnit), "Couldn't stop the output unit");
+ CheckError( AudioOutputUnitStop(_audioUnit), "Couldn't stop the output unit");
self.playing = NO;
}
@@ -548,7 +552,7 @@ - (void)play {
if(self.shouldUseAudioFromFile){ //Play from file
- CheckError( AudioOutputUnitStop(_inputUnit), "Couldn't stop the output unit");
+ CheckError( AudioOutputUnitStop(_audioUnit), "Couldn't stop the output unit");
// setup audio file for continuous reading
float preferredTimeInterval = [self initAudioFileForReadingWithName:self.audioFileName];
@@ -581,7 +585,7 @@ - (void)play {
if(self.shouldSaveContinuouslySampledMicrophoneAudioDataToNewFile)
[self setupAudioFileForWritingFromMicrophone];
- CheckError( AudioOutputUnitStart(self.inputUnit), "Couldn't start the output unit");
+ CheckError( AudioOutputUnitStart(self.audioUnit), "Couldn't start the output unit");
self.playing = YES;
}
@@ -605,6 +609,7 @@ OSStatus inputCallback (void *inRefCon,
Novocaine *sm = (__bridge Novocaine *)inRefCon;
+ // setup rendering callback
if (!sm.playing)
return noErr;
if (sm.inputBlock == nil)
@@ -619,7 +624,8 @@ OSStatus inputCallback (void *inRefCon,
if( inNumberFrames == 471 )
inNumberFrames = 470;
#endif
- CheckError( AudioUnitRender(sm.inputUnit, ioActionFlags, inTimeStamp, inOutputBusNumber, inNumberFrames, sm.inputBuffer), "Couldn't render the output unit");
+ // NSLog(@"Frames: %d",inNumberFrames); // had some weird stuff going on
+ CheckError( AudioUnitRender(sm.audioUnit, ioActionFlags, inTimeStamp, inOutputBusNumber, inNumberFrames, sm.inputBuffer), "Couldn't render the audio unit");
// Convert the audio in something manageable
@@ -816,7 +822,7 @@ - (void)checkAudioSource {
AVAudioSession *session = [AVAudioSession sharedInstance];
-
+
//CheckError( AudioSessionGetProperty(kAudioSessionProperty_AudioRoute, &propertySize, &route), "Couldn't check the audio route");
//self.inputRoute = (NSString *)route;
//CFRelease(route);
@@ -833,13 +839,14 @@ - (void)checkAudioSource {
// To be run ONCE per session property change and once on initialization.
- (void)checkSessionProperties
-{
+{
+ AVAudioSession *session = [AVAudioSession sharedInstance];
+
NSLog(@"Checking session properties");
// Check if there is input, and from where
[self checkAudioSource];
- AVAudioSession *session = [AVAudioSession sharedInstance];
// Check the number of input channels.
// Find the number of channels
@@ -856,9 +863,12 @@ - (void)checkSessionProperties
// Get the hardware sampling rate. This is settable, but here we're only reading.
-
- self.samplingRate = session.sampleRate;
- NSLog(@"Current sampling rate: %f", self.samplingRate);
+ NSLog(@"Current sampling rate: Preferred:%.1f, Actual:%.1f", self.samplingRate, session.sampleRate);
+
+
+ NSLog(@"Actual Buffer Duration: %.4f, length %d", session.IOBufferDuration,
+ (UInt32)(session.IOBufferDuration*session.sampleRate+1));
+
}
@@ -950,7 +960,7 @@ - (float) initAudioFileForReadingWithName: (NSString*)name {
AudioStreamBasicDescription audioFormat;
- audioFormat.mSampleRate = 44100;
+ audioFormat.mSampleRate = self.samplingRate;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat;
audioFormat.mBitsPerChannel = sizeof(Float32) * 8;
@@ -1085,7 +1095,7 @@ -(void)setupAudioFileForWritingFromMicrophone{
AudioStreamBasicDescription audioFormat;
- AudioStreamBasicDescription outputFileDesc = {44100.0, kAudioFormatMPEG4AAC, 0, 0, 1024, 0, self.numInputChannels, 0, 0};
+ AudioStreamBasicDescription outputFileDesc = {self.samplingRate, kAudioFormatMPEG4AAC, 0, 0, 1024, 0, self.numInputChannels, 0, 0};
CheckError(ExtAudioFileCreateWithURL(outputFileURL, kAudioFileM4AType, &outputFileDesc, NULL, kAudioFileFlags_EraseFile, &_audioFileRefOutput), "Creating file");
diff --git a/AudioLabSwift.xcodeproj/project.pbxproj b/AudioLabSwift.xcodeproj/project.pbxproj
index 2ddc72d..e2ec38e 100644
--- a/AudioLabSwift.xcodeproj/project.pbxproj
+++ b/AudioLabSwift.xcodeproj/project.pbxproj
@@ -329,7 +329,7 @@
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
CLANG_ENABLE_MODULES = YES;
CODE_SIGN_STYLE = Automatic;
- DEVELOPMENT_TEAM = APD62CDC25;
+ DEVELOPMENT_TEAM = F69WJ27LN4;
INFOPLIST_FILE = AudioLabSwift/Info.plist;
IPHONEOS_DEPLOYMENT_TARGET = 13.6;
LD_RUNPATH_SEARCH_PATHS = (
@@ -351,7 +351,7 @@
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
CLANG_ENABLE_MODULES = YES;
CODE_SIGN_STYLE = Automatic;
- DEVELOPMENT_TEAM = APD62CDC25;
+ DEVELOPMENT_TEAM = F69WJ27LN4;
INFOPLIST_FILE = AudioLabSwift/Info.plist;
IPHONEOS_DEPLOYMENT_TARGET = 13.6;
LD_RUNPATH_SEARCH_PATHS = (
diff --git a/AudioLabSwift.xcodeproj/project.xcworkspace/xcuserdata/loaner.xcuserdatad/UserInterfaceState.xcuserstate b/AudioLabSwift.xcodeproj/project.xcworkspace/xcuserdata/loaner.xcuserdatad/UserInterfaceState.xcuserstate
new file mode 100644
index 0000000..0be9c52
Binary files /dev/null and b/AudioLabSwift.xcodeproj/project.xcworkspace/xcuserdata/loaner.xcuserdatad/UserInterfaceState.xcuserstate differ
diff --git a/AudioLabSwift.xcodeproj/xcuserdata/loaner.xcuserdatad/xcdebugger/Breakpoints_v2.xcbkptlist b/AudioLabSwift.xcodeproj/xcuserdata/loaner.xcuserdatad/xcdebugger/Breakpoints_v2.xcbkptlist
new file mode 100644
index 0000000..4ce24b4
--- /dev/null
+++ b/AudioLabSwift.xcodeproj/xcuserdata/loaner.xcuserdatad/xcdebugger/Breakpoints_v2.xcbkptlist
@@ -0,0 +1,24 @@
+
+
+
+
+
+
+
+
+
diff --git a/AudioLabSwift.xcodeproj/xcuserdata/loaner.xcuserdatad/xcschemes/xcschememanagement.plist b/AudioLabSwift.xcodeproj/xcuserdata/loaner.xcuserdatad/xcschemes/xcschememanagement.plist
new file mode 100644
index 0000000..8acb524
--- /dev/null
+++ b/AudioLabSwift.xcodeproj/xcuserdata/loaner.xcuserdatad/xcschemes/xcschememanagement.plist
@@ -0,0 +1,14 @@
+
+
+
+
+ SchemeUserState
+
+ AudioLabSwift.xcscheme_^#shared#^_
+
+ orderHint
+ 0
+
+
+
+
diff --git a/AudioLabSwift/AudioModel.swift b/AudioLabSwift/AudioModel.swift
index b176d20..14d7e38 100644
--- a/AudioLabSwift/AudioModel.swift
+++ b/AudioLabSwift/AudioModel.swift
@@ -13,8 +13,14 @@ class AudioModel {
// MARK: Properties
private var BUFFER_SIZE:Int
+ // thse properties are for interfaceing with the API
+ // the user can access these arrays at any time and plot them if they like
var timeData:[Float]
var fftData:[Float]
+ var ptsData:[Float]
+ lazy var samplingRate:Int = {
+ return Int(self.audioManager!.samplingRate)
+ }()
// MARK: Public Methods
init(buffer_size:Int) {
@@ -22,58 +28,38 @@ class AudioModel {
// anything not lazily instatntiated should be allocated here
timeData = Array.init(repeating: 0.0, count: BUFFER_SIZE)
fftData = Array.init(repeating: 0.0, count: BUFFER_SIZE/2)
+ ptsData = Array.init(repeating: 0.0, count: 20)
}
// public function for starting processing of microphone data
func startMicrophoneProcessing(withFps:Double){
- self.audioManager?.inputBlock = self.handleMicrophone
-
- // repeat this fps times per second using the timer class
- Timer.scheduledTimer(timeInterval: 1.0/withFps, target: self,
- selector: #selector(self.runEveryInterval),
- userInfo: nil,
- repeats: true)
- }
-
- // public function for playing from a file reader file
- func startProcesingAudioFileForPlayback(){
- self.audioManager?.outputBlock = self.handleSpeakerQueryWithAudioFile
- self.fileReader?.play()
+ // setup the microphone to copy to circualr buffer
+ if let manager = self.audioManager{
+ manager.inputBlock = self.handleMicrophone
+
+ // repeat this fps times per second using the timer class
+ // every time this is called, we update the arrays "timeData" and "fftData"
+ Timer.scheduledTimer(withTimeInterval: 1.0/withFps, repeats: true) { _ in
+ self.runEveryInterval()
+ }
+
+ }
}
- func startProcessingSinewaveForPlayback(withFreq:Float=330.0){
- sineFrequency = withFreq
- // Two examples are given that use either objective c or that use swift
- // the swift code for loop is slightly slower thatn doing this in c,
- // but the implementations are very similar
- //self.audioManager?.outputBlock = self.handleSpeakerQueryWithSinusoid // swift for loop
- self.audioManager?.setOutputBlockToPlaySineWave(sineFrequency) // c for loop
- }
// You must call this when you want the audio to start being handled by our model
func play(){
- self.audioManager?.play()
+ if let manager = self.audioManager{
+ manager.play()
+ }
}
- // Here is an example function for getting the maximum frequency
- func getMaxFrequencyMagnitude() -> (Float,Float){
- // this is the slow way of getting the maximum...
- // you might look into the Accelerate framework to make things more efficient
- var max:Float = -1000.0
- var maxi:Int = 0
-
- if inputBuffer != nil {
- for i in 0..max){
- max = fftData[i]
- maxi = i
- }
- }
+ func pause(){
+ if let manager = self.audioManager{
+ manager.pause()
}
- let frequency = Float(maxi) / Float(BUFFER_SIZE) * Float(self.audioManager!.samplingRate)
- return (max,frequency)
}
- // for sliding max windows, you might be interested in the following: vDSP_vswmax
+
//==========================================
// MARK: Private Properties
@@ -85,10 +71,6 @@ class AudioModel {
return FFTHelper.init(fftSize: Int32(BUFFER_SIZE))
}()
- private lazy var outputBuffer:CircularBuffer? = {
- return CircularBuffer.init(numChannels: Int64(self.audioManager!.numOutputChannels),
- andBufferSize: Int64(BUFFER_SIZE))
- }()
private lazy var inputBuffer:CircularBuffer? = {
return CircularBuffer.init(numChannels: Int64(self.audioManager!.numInputChannels),
@@ -98,102 +80,46 @@ class AudioModel {
//==========================================
// MARK: Private Methods
- private lazy var fileReader:AudioFileReader? = {
-
- if let url = Bundle.main.url(forResource: "satisfaction", withExtension: "mp3"){
- var tmpFileReader:AudioFileReader? = AudioFileReader.init(audioFileURL: url,
- samplingRate: Float(audioManager!.samplingRate),
- numChannels: audioManager!.numOutputChannels)
-
- tmpFileReader!.currentTime = 0.0
- print("Audio file succesfully loaded for \(url)")
- return tmpFileReader
- }else{
- print("Could not initialize audio input file")
- return nil
- }
- }()
+ // NONE for this model
//==========================================
// MARK: Model Callback Methods
- @objc
private func runEveryInterval(){
if inputBuffer != nil {
- // copy data to swift array
- self.inputBuffer!.fetchFreshData(&timeData, withNumSamples: Int64(BUFFER_SIZE))
+ // copy time data to swift array
+ self.inputBuffer!.fetchFreshData(&timeData, // copied into this array
+ withNumSamples: Int64(BUFFER_SIZE))
- // now take FFT and display it
+ // now take FFT
fftHelper!.performForwardFFT(withData: &timeData,
- andCopydBMagnitudeToBuffer: &fftData)
+ andCopydBMagnitudeToBuffer: &fftData) // fft result is copied into fftData array
+ // at this point, we have saved the data to the arrays:
+ // timeData: the raw audio samples
+ // fftData: the FFT of those same samples
+ // the user can now use these variables however they like
+ let a = BUFFER_SIZE/40
+
+ for i in 0...19 {
+ let b = i * a
+ var max:Float = -1000.0
+ for j in b...(b + a) {
+ if (fftData[j] > max) { max = fftData[j]}
+ }
+ ptsData[i] = max
+ }
}
}
-
-
//==========================================
// MARK: Audiocard Callbacks
// in obj-C it was (^InputBlock)(float *data, UInt32 numFrames, UInt32 numChannels)
// and in swift this translates to:
private func handleMicrophone (data:Optional>, numFrames:UInt32, numChannels: UInt32) {
-// var max:Float = 0.0
-// if let arrayData = data{
-// for i in 0..max){
-// max = abs(arrayData[i])
-// }
-// }
-// }
-// // can this max operation be made faster??
-// print(max)
-
// copy samples from the microphone into circular buffer
self.inputBuffer?.addNewFloatData(data, withNumSamples: Int64(numFrames))
}
- private func handleSpeakerQueryWithAudioFile(data:Optional>, numFrames:UInt32, numChannels: UInt32){
- if let file = self.fileReader{
-
- // read from file, loaidng into data (a float pointer)
- file.retrieveFreshAudio(data,
- numFrames: numFrames,
- numChannels: numChannels)
-
- // set samples to output speaker buffer
- self.outputBuffer?.addNewFloatData(data,
- withNumSamples: Int64(numFrames))
- }
- }
- // _ _ _ _ _ _ _ _ _ _
- // / \ / \ / \ / \ / \ / \ / \ / \ / \ /
- // / \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/
- var sineFrequency:Float = 0.0 { // frequency in Hz (changeable by user)
- didSet{
- // if using swift for generating the sine wave: when changed, we need to update our increment
- //phaseIncrement = Float(2*Double.pi*sineFrequency/audioManager!.samplingRate)
-
- // if using objective c: this changes the frequency in the novocain block
- self.audioManager?.sineFrequency = sineFrequency
- }
- }
- private var phase:Float = 0.0
- private var phaseIncrement:Float = 0.0
- private var sineWaveRepeatMax:Float = Float(2*Double.pi)
-
- private func handleSpeakerQueryWithSinusoid(data:Optional>, numFrames:UInt32, numChannels: UInt32){
- // while pretty fast, this loop is still not quite as fast as
- // writing the code in c, so I placed a function in Novocaine to do it for you
- // use setOutputBlockToPlaySineWave() in Novocaine
- if let arrayData = data{
- var i = 0
- while i= sineWaveRepeatMax) { phase -= sineWaveRepeatMax }
- i+=1
- }
- }
- }
}
diff --git a/AudioLabSwift/Base.lproj/Main.storyboard b/AudioLabSwift/Base.lproj/Main.storyboard
index 31ce0c9..0d90462 100644
--- a/AudioLabSwift/Base.lproj/Main.storyboard
+++ b/AudioLabSwift/Base.lproj/Main.storyboard
@@ -1,26 +1,140 @@
-
-
+
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/AudioLabSwift/Info.plist b/AudioLabSwift/Info.plist
index b8c43bf..5e02b96 100644
--- a/AudioLabSwift/Info.plist
+++ b/AudioLabSwift/Info.plist
@@ -57,10 +57,10 @@
UISupportedInterfaceOrientations~ipad
- UIInterfaceOrientationPortrait
- UIInterfaceOrientationPortraitUpsideDown
UIInterfaceOrientationLandscapeLeft
UIInterfaceOrientationLandscapeRight
+ UIInterfaceOrientationPortrait
+ UIInterfaceOrientationPortraitUpsideDown
diff --git a/AudioLabSwift/MetalGraph.swift b/AudioLabSwift/MetalGraph.swift
index b20b8a0..c15bfc3 100644
--- a/AudioLabSwift/MetalGraph.swift
+++ b/AudioLabSwift/MetalGraph.swift
@@ -2,10 +2,18 @@
// MetalGraph.swift
// AudioLabSwift
//
-// Created by Eric Larson
+// Created by Eric Larson
// Copyright © 2020 Eric Larson. All rights reserved.
//
+
+//TODO:
+// 0. make private things
+// 1. Limit values hi/lo to fit in view
+// 2. grid in the graph
+// 3. values for grid?
+
+
import Foundation
import UIKit
import Metal
@@ -13,31 +21,50 @@ import Accelerate
class MetalGraph {
- var device: MTLDevice!
- var metalLayer: CAMetalLayer!
- var pipelineState: MTLRenderPipelineState!
- var commandQueue: MTLCommandQueue!
- var timer: CADisplayLink!
-
+ //MARK: MTL Properties
+ private var device: MTLDevice!
+ private var metalLayer: CAMetalLayer!
+ private var pipelineState: MTLRenderPipelineState!
+ private var commandQueue: MTLCommandQueue!
+ private var timer: CADisplayLink!
- var vertexData: [String:[Float]] = [String: [Float]]()
- var vertexBuffer: [String:MTLBuffer] = [String:MTLBuffer]()
- var vertexColorBuffer: [String:MTLBuffer] = [String:MTLBuffer]()
- var vertexPointer: [String:UnsafeMutablePointer] = [String:UnsafeMutablePointer]()
- var vertexNormalize: [String:Bool] = [String:Bool]()
- var vertexNum: [String:Int] = [String:Int]()
- var dsFactor: [String:Int] = [String:Int]()
+ private var backgroundColor = MTLClearColor(
+ red: 50.0/255.0,
+ green: 50.0/255.0,
+ blue: 50.0/255.0,
+ alpha: 1.0)
+
+ //MARK: Dictionary Properties for saving state/data from user
+ private var vertexData: [String:[Float32]] = [String: [Float32]]()
+ private var vertexBuffer: [String:MTLBuffer] = [String:MTLBuffer]()
+ private var vertexColorBuffer: [String:MTLBuffer] = [String:MTLBuffer]()
+ private var vertexPointer: [String:UnsafeMutablePointer] = [String:UnsafeMutablePointer]()
+ private var vertexNum: [String:Int] = [String:Int]()
+ private var vertexShowGrid: [String:Bool] = [String:Bool]()
+ private var dsFactor: [String:Int] = [String:Int]()
+ private var vertexGain: [String:Float32] = [String:Float32]()
+ private var vertexBias: [String:Float32] = [String:Float32]()
+ private var boxBuffer:[String:MTLBuffer] = [String:MTLBuffer]()
+ private var boxColorBuffer:[String:MTLBuffer] = [String:MTLBuffer]()
+ private var needsRender = false
- let maxPointsPerGraph = 512 // you can increase this or decrease for different GPU speeds
- var needsRender = false
- let numShaderFloats = 4
+ //MARK: iOS color palette with gradients
+ private let R = [0xFF,0xFF, 0x52,0x5A, 0xFF,0xFF, 0x1A,0x1D, 0xEF,0xC6, 0xDB,0x89, 0x87,0x0B, 0xFF,0xFF]
+ private let G = [0x5E,0x2A, 0xED,0xC8, 0xDB,0xCD, 0xD6,0x62, 0x4D,0x43, 0xDD,0x8C, 0xFC,0xD3, 0x95,0x5E]
+ private let B = [0x3A,0x68, 0xC7,0xFB, 0x4C,0x02, 0xFD,0xF0, 0xB6,0xFC, 0xDE,0x90, 0x70,0x18, 0x00,0x3A]
+
- //iOS color palette with gradients
- let R = [0xFF,0xFF, 0x52,0x5A, 0xFF,0xFF, 0x1A,0x1D, 0xEF,0xC6, 0xDB,0x89, 0x87,0x0B, 0xFF,0xFF]
- let G = [0x5E,0x2A, 0xED,0xC8, 0xDB,0xCD, 0xD6,0x62, 0x4D,0x43, 0xDD,0x8C, 0xFC,0xD3, 0x95,0x5E]
- let B = [0x3A,0x68, 0xC7,0xFB, 0x4C,0x02, 0xFD,0xF0, 0xB6,0xFC, 0xDE,0x90, 0x70,0x18, 0x00,0x3A]
+ //MARK: Constants
+ private struct GraphConstants{
+ static let fftNormalizer:Float = 64.0
+ static let fftAddition:Float = 40.0
+ static let maxPointsPerGraph = 512 // you can increase this or decrease for different GPU speeds
+ static let numShaderFloats = 4
+ }
- init(mainView:UIView)
+ //MARK: Initialization and Rendering Functions
+ // Initialize the class, setup where this view will be drawing to
+ init(userView:UIView)
{
// get device
guard let device = MTLCreateSystemDefaultDevice() else { fatalError("GPU not available") }
@@ -48,83 +75,82 @@ class MetalGraph {
metalLayer.device = self.device
metalLayer.pixelFormat = .bgra8Unorm
metalLayer.framebufferOnly = true
- metalLayer.frame = mainView.layer.frame
- mainView.layer.insertSublayer(metalLayer, at:0)
+ metalLayer.contentsScale = 2.0
+ metalLayer.frame = userView.bounds
+ userView.layer.insertSublayer(metalLayer, at:0)
+
commandQueue = self.device.makeCommandQueue()
+ // setup a repeating render function
timer = CADisplayLink(target: self, selector: #selector(gameloop))
timer.add(to: RunLoop.main, forMode: .default)
+ // add in shaders to the program
guard let defaultLibrary = device.makeDefaultLibrary(),
let fragmentProgram = defaultLibrary.makeFunction(name: "passThroughFragment"),
- let vertexProgram = defaultLibrary.makeFunction(name: "passThroughVertex") else { fatalError() }
+ let vertexProgram = defaultLibrary.makeFunction(name: "passThroughVertex") else { fatalError("Could not find Shaders.metal file.") }
let pipelineStateDescriptor = MTLRenderPipelineDescriptor()
pipelineStateDescriptor.vertexFunction = vertexProgram
pipelineStateDescriptor.fragmentFunction = fragmentProgram
- pipelineStateDescriptor.colorAttachments[0].pixelFormat = .bgra8Unorm
+ pipelineStateDescriptor.colorAttachments[0].pixelFormat = metalLayer.pixelFormat
pipelineStateDescriptor.colorAttachments[0].isBlendingEnabled = false
pipelineState = try! device.makeRenderPipelineState(descriptor: pipelineStateDescriptor)
+
}
- func addGraph(withName:String,
- shouldNormalize:Bool,
- numPointsInGraph:Int){
-
- //setup graph
- let key = withName
- let numGraphs = Int(vertexData.count)
+
+ private var gridLength:Int = 0
+ private func createGraphGrid(name:String,min:Float,max:Float){
+ let mid = (max-min)/2.0+min
+ let box:[Float32] = [-0.99, min, 0.0, 0.0, // primitve draw protect
+ -0.99, min, 0.0, 1.0,
+ -0.99, max, 0.0, 1.0,
+ 0.99, max, 0.0, 1.0,
+ 0.99, min, 0.0, 1.0,
+ -0.99, min, 0.0, 1.0, // outer box
+ -0.75, min, 0.0, 1.0,
+ -0.75, max, 0.0, 1.0,
+ 0.75, max, 0.0, 1.0,
+ 0.75, min, 0.0, 1.0,
+ -0.75, min, 0.0, 1.0, // outer quartile box
+ -0.25, min, 0.0, 1.0,
+ -0.25, max, 0.0, 1.0,
+ 0.25, max, 0.0, 1.0,
+ 0.25, min, 0.0, 1.0,
+ -0.25, min, 0.0, 1.0, // inner quartile box
+ -0.5, min, 0.0, 1.0,
+ -0.5, max, 0.0, 1.0,
+ 0.5, max, 0.0, 1.0,
+ 0.5, min, 0.0, 1.0,
+ -0.5, min, 0.0, 1.0, // mid quartile box
+ 0.0, min, 0.0, 1.0,
+ 0.0, max, 0.0, 1.0, // mid line
+ -0.99, max, 0.0, 1.0, // center line
+ -0.99, mid, 0.0, 1.0,
+ 0.99, mid, 0.0, 1.0,
+ 0.99, mid, 0.0, 0.0 // primitve draw protect
+ ]
- dsFactor[key] = Int(numPointsInGraph/maxPointsPerGraph) // downsample factor for each graph
- if dsFactor[key]!<1 { dsFactor[key] = 1 }
+ let boxColor:[Float32] = [Float32].init(repeating: 0.5, count:box.count)
+ gridLength = box.count
- vertexData[key] = Array.init(repeating: 0.0, count: (numPointsInGraph/dsFactor[key]!)*numShaderFloats)
- vertexNormalize[key] = shouldNormalize
- vertexNum[key] = numGraphs
+ var dataSize = box.count * MemoryLayout.size(ofValue: box[0])
+ boxBuffer[name] = device.makeBuffer(bytes: box,
+ length: dataSize,
+ options: []) //cpuCacheModeWriteCombined
- // we use a 4D location, so copy over the right things
- let maxIdx = Int(vertexData[key]!.count/numShaderFloats)
- for j in 0..= maxIdx-2{
+ vertexColorData[j*GraphConstants.numShaderFloats] = 0.0
+ vertexColorData[j*GraphConstants.numShaderFloats+1] = 0.0
+ vertexColorData[j*GraphConstants.numShaderFloats+2] = 0.0
+ vertexColorData[j*GraphConstants.numShaderFloats+3] = 0.0
+ }
+ }
+ vertexColorBuffer[key] = device.makeBuffer(bytes: vertexColorData, length: dataSize, options: [])
+
+ // now save if we should have a grid for this graph
+ vertexShowGrid[key] = showGrid
+
+ }
+
+ func makeGrids(){
+ for (forKey,_) in vertexBuffer{
+ if vertexShowGrid[forKey]!{
+ let numGraphs = Float(vertexData.count)
+ let addToPlot = -1.0 + 2*(Float(vertexNum[forKey]!) / numGraphs) + 1.0/numGraphs
+ // get to midpoint of plot on screen
+ let minVal:Float = addToPlot - (0.9 / numGraphs)
+ let maxVal:Float = addToPlot + (0.9 / numGraphs)
+ createGraphGrid(name:forKey, min: minVal, max: maxVal)
+ }
+ }
+ }
+
func updateGraph(data:[Float], forKey:String){
if vertexData.keys.contains(forKey) {
@@ -171,32 +339,46 @@ class MetalGraph {
var multiplier:Float = 1.0
- if vertexNormalize[forKey]! {
- // normalize for fft values
- addToPlot += 84.0/(64.0 * numGraphs)
- multiplier = 1.0/(64.0 * numGraphs)
- }else{
- // normalize for microphone values
- multiplier = 3.0/numGraphs
- }
+ // get to midpoint of plot on screen
+ var minVal:Float = addToPlot - (0.89 / numGraphs)
+ var maxVal:Float = addToPlot + (0.89 / numGraphs)
+
+ // now add custom normalizations
+ addToPlot += vertexBias[forKey]!/(vertexGain[forKey]! * numGraphs)
+ multiplier = 1.0/(vertexGain[forKey]! * numGraphs)
+
+
- // multiply by \(multiplier) and add in \(addToPlot), strided by 3 and starting at element one of array
+ // multiply by \(multiplier) and add in \(addToPlot), strided by dsFactor and starting at element one of array
// there is a lot to unpack here, trust me it works and is awesomely fast
- //vDSP_vsmsa(data, vDSP_Stride(dsFactor[forKey]!), &multiplier, &addToPlot, &(vertexData[forKey]![1]), vDSP_Stride(3), vDSP_Length(data.count/dsFactor[forKey]!))
- vDSP_vsmsa(data, vDSP_Stride(dsFactor[forKey]!), &multiplier, &addToPlot,
- &(vertexPointer[forKey]![1]), vDSP_Stride(numShaderFloats),
- vDSP_Length(data.count/dsFactor[forKey]!))
+ // vector:scalar-multiply:scalar-addition
+ vDSP_vsmsa(data, // go through this data
+ vDSP_Stride(dsFactor[forKey]!), // down sample input
+ &multiplier, &addToPlot, // scalars to mult and add
+ &(vertexPointer[forKey]![1]),// save to this data (keep zeroth element the same so line do not connect)
+ vDSP_Stride(GraphConstants.numShaderFloats), // skip through 4D location
+ vDSP_Length(data.count/dsFactor[forKey]!)) // do this many adds
// here is what te above code does, but using SIMD
//for i in 0..