We'd like to pipe microphone directly to waton speech to text service , but it seems that we have to go through .wav first ? please take a look at the following codes, In particular I was trying to get the microphone streamed directly to the speechToText service. I believe this is the most common way of using mic, not piping it into a .wav and then stream the .wav file to stt:
var mic;
var SpeechToTextV1 = require('watson-developer-cloud/speech-to-text/v1');
var fs = require('fs');
var watson = require('watson-developer-cloud');
var cp = require('child_process');
mic = cp.spawn('arecord', ['--device=plughw:1,0', '--format=S16_LE', '--rate=44100', '--channels=1']); //, '--duration=10'
mic.stderr.pipe(process.stderr);
stt();
function stt() {
console.log("openCMDS");
var speech_to_text = new SpeechToTextV1({
username: '',
password: ''
});
var params = {
content_type: 'audio/wav',
model: 'zh-CN_BroadbandModel',
continuous: true,
inactivity_timeout: -1
};
recognizeStream = speech_to_text.createRecognizeStream(params);
mic.stdout.pipe(recognizeStream);
//mic.stdout.pipe(require('fs').createWriteStream('test.wav'));
// Pipe in the audio.
fs.createReadStream('test.wav').pipe(recognizeStream);
recognizeStream.pipe(fs.createWriteStream('transcription.txt'));
recognizeStream.setEncoding('utf8');
console.log("start record");
recognizeStream.on('data', function(event) { onEvent('Data:', event); });
recognizeStream.on('error', function(event) { onEvent('Error:', event); });
recognizeStream.on('close', function(event) { onEvent('Close:', event); });
// Display events on the console.
function onEvent(name, event) {
console.log(name, JSON.stringify(event, null, 2));
}
}