Microsoft Project Oxford has a nice Speech Recognition API and instructions for Objective-C on IOS. I build it easily following the getting started instructions. However, I am having hard time to convert it to Swift language.
I created a swift project first. I created the bridge header file (ProjectName-Bridging-Header.h) and inserted following code to this file:
#import "SpeechRecognitionService.h"
I want to convert Objective-C both header and implementation files into ViewController.swift.
contents of ViewController.h:
#import <UIKit/UIKit.h>
#import "SpeechRecognitionService.h"
@interface ViewController : UIViewController<SpeechRecognitionProtocol>
{
NSMutableString* textOnScreen;
DataRecognitionClient* dataClient;
MicrophoneRecognitionClient* micClient;
SpeechRecognitionMode recoMode;
bool isMicrophoneReco;
bool isIntent;
int waitSeconds;
}
@property (nonatomic, strong) IBOutlet UIButton* startButton;
/* In our UI, we have a text box to show the reco results.*/
@property (nonatomic, strong) IBOutlet UITextView* quoteText;
/* Action for pressing the "Start" button */
-(IBAction)startButtonTapped:(id)sender;
@end
contents of ViewController.m:
#import "ViewController.h"
#import <AVFoundation/AVAudioSession.h>
@interface ViewController (/*private*/)
/* Create a recognition request to interact with the Speech Service.*/
-(void)initializeRecoClient;
@end
NSString* ConvertSpeechRecoConfidenceEnumToString(Confidence confidence);
/* The Main App */
@implementation ViewController
/* Initialization to be done when app starts. */
-(void)viewDidLoad
{
[super viewDidLoad];
textOnScreen = [NSMutableString stringWithCapacity: 1000];
recoMode = SpeechRecognitionMode_ShortPhrase;
isMicrophoneReco = true;
isIntent = false;
waitSeconds = recoMode == SpeechRecognitionMode_ShortPhrase ? 20 : 200;
[self initializeRecoClient];
}
/* Called when a partial response is received. */
-(void)onPartialResponseReceived:(NSString*) response
{
dispatch_async(dispatch_get_main_queue(), ^{
[textOnScreen appendFormat:(@"%@\n"), response];
self.quoteText.text = response;
});
}
/* Called when a final response is received. */
-(void)onFinalResponseReceived:(RecognitionResult*)response
{
bool isFinalDicationMessage = recoMode == SpeechRecognitionMode_LongDictation &&
(response.RecognitionStatus == RecognitionStatus_EndOfDictation ||
response.RecognitionStatus == RecognitionStatus_DictationEndSilenceTimeout);
if (isMicrophoneReco && ((recoMode == SpeechRecognitionMode_ShortPhrase) || isFinalDicationMessage)) {
[micClient endMicAndRecognition];
}
if ((recoMode == SpeechRecognitionMode_ShortPhrase) || isFinalDicationMessage) {
dispatch_async(dispatch_get_main_queue(), ^{
[[self startButton] setEnabled:YES];
});
}
}
NSString* ConvertSpeechErrorToString(int errorCode)
{
switch ((SpeechClientStatus)errorCode) {
case SpeechClientStatus_SecurityFailed: return @"SpeechClientStatus_SecurityFailed";
case SpeechClientStatus_LoginFailed: return @"SpeechClientStatus_LoginFailed";
case SpeechClientStatus_Timeout: return @"SpeechClientStatus_Timeout";
case SpeechClientStatus_ConnectionFailed: return @"SpeechClientStatus_ConnectionFailed";
case SpeechClientStatus_NameNotFound: return @"SpeechClientStatus_NameNotFound";
case SpeechClientStatus_InvalidService: return @"SpeechClientStatus_InvalidService";
case SpeechClientStatus_InvalidProxy: return @"SpeechClientStatus_InvalidProxy";
case SpeechClientStatus_BadResponse: return @"SpeechClientStatus_BadResponse";
case SpeechClientStatus_InternalError: return @"SpeechClientStatus_InternalError";
case SpeechClientStatus_AuthenticationError: return @"SpeechClientStatus_AuthenticationError";
case SpeechClientStatus_AuthenticationExpired: return @"SpeechClientStatus_AuthenticationExpired";
case SpeechClientStatus_LimitsExceeded: return @"SpeechClientStatus_LimitsExceeded";
case SpeechClientStatus_AudioOutputFailed: return @"SpeechClientStatus_AudioOutputFailed";
case SpeechClientStatus_MicrophoneInUse: return @"SpeechClientStatus_MicrophoneInUse";
case SpeechClientStatus_MicrophoneUnavailable: return @"SpeechClientStatus_MicrophoneUnavailable";
case SpeechClientStatus_MicrophoneStatusUnknown:return @"SpeechClientStatus_MicrophoneStatusUnknown";
case SpeechClientStatus_InvalidArgument: return @"SpeechClientStatus_InvalidArgument";
}
return [[NSString alloc] initWithFormat:@"Unknown error: %d\n", errorCode];
}
/* Called when an error is received. */
-(void)onError:(NSString*)errorMessage withErrorCode:(int)errorCode
{
dispatch_async(dispatch_get_main_queue(), ^{
[[self startButton] setEnabled:YES];
[textOnScreen appendString:(@"********* Error Detected *********\n")];
[textOnScreen appendFormat:(@"%@ %@\n"), errorMessage, ConvertSpeechErrorToString(errorCode)];
self.quoteText.text = textOnScreen;
});
}
/* Event fired when the microphone recording status has changed. */
-(void)onMicrophoneStatus:(Boolean)recording
{
if (!recording) {
[micClient endMicAndRecognition];
}
dispatch_async(dispatch_get_main_queue(), ^{
if (!recording) {
[[self startButton] setEnabled:YES];
}
self.quoteText.text = textOnScreen;
});
}
/* Create a recognition request to interact with the Speech Recognition Service.*/
-(void)initializeRecoClient
{
NSString* language = @"en-us";
NSString* path = [[NSBundle mainBundle] pathForResource:@"settings" ofType:@"plist"];
NSDictionary* settings = [[NSDictionary alloc] initWithContentsOfFile:path];
NSString* primaryOrSecondaryKey = [settings objectForKey:(@"primaryKey")];
NSString* luisAppID = [settings objectForKey:(@"luisAppID")];
NSString* luisSubscriptionID = [settings objectForKey:(@"luisSubscriptionID")];
if (isMicrophoneReco) {
if (!isIntent) {
micClient = [SpeechRecognitionServiceFactory createMicrophoneClient:(recoMode)
withLanguage:(language)
withKey:(primaryOrSecondaryKey)
withProtocol:(self)];
}
else {
MicrophoneRecognitionClientWithIntent* micIntentClient;
micIntentClient = [SpeechRecognitionServiceFactory createMicrophoneClientWithIntent:(language)
withKey:(primaryOrSecondaryKey)
withLUISAppID:(luisAppID)
withLUISSecret:(luisSubscriptionID)
withProtocol:(self)];
micClient = micIntentClient;
}
}
else {
if (!isIntent) {
dataClient = [SpeechRecognitionServiceFactory createDataClient:(recoMode)
withLanguage:(language)
withKey:(primaryOrSecondaryKey)
withProtocol:(self)];
}
else {
DataRecognitionClientWithIntent* dataIntentClient;
dataIntentClient = [SpeechRecognitionServiceFactory createDataClientWithIntent:(language)
withKey:(primaryOrSecondaryKey)
withLUISAppID:(luisAppID)
withLUISSecret:(luisSubscriptionID)
withProtocol:(self)];
dataClient = dataIntentClient;
}
}
}
/* Take enum value and produce NSString */
NSString* ConvertSpeechRecoConfidenceEnumToString(Confidence confidence)
{
switch (confidence) {
case SpeechRecoConfidence_None:
return @"None";
case SpeechRecoConfidence_Low:
return @"Low";
case SpeechRecoConfidence_Normal:
return @"Normal";
case SpeechRecoConfidence_High:
return @"High";
}
}
/* Action for pressing the "Start" button */
-(IBAction)startButtonTapped:(id)sender
{
[textOnScreen setString:(@"")];
self.quoteText.text = textOnScreen;
[[self startButton] setEnabled:NO];
if (isMicrophoneReco) {
OSStatus status = [micClient startMicAndRecognition];
if (status) {
[textOnScreen appendFormat:(@"Error starting audio. %@\n"), ConvertSpeechErrorToString(status)];
}
}
}
/* Action for low memory */
-(void)didReceiveMemoryWarning
{
[super didReceiveMemoryWarning];
}
@end
I am new in ios programming. I will appreciate any help on this. Thanks.
Please convert your objective-c view controller to swift. Dont import it via bridging-header.
2.use the new converted class same as u were using previously in objective-c version
3.just import frame work header files in bridging header.
To convert objective-c code to swift use swiftify
Here is the converted Code
Both Files Are Combined