I want to play my audio file as microphone input (without sending my live voice but my audio file) to the WebRTC connected user. Can anybody tell me how could it be done?
I have done some following tries in the JS code, like:
1. base64 Audio
<script>
var base64string = "T2dnUwACAAAAAAA..";
var snd = new Audio("data:audio/wav;base64," + base64string);
snd.play();
var Sound = (function () {
var df = document.createDocumentFragment();
return function Sound(src) {
var snd = new Audio(src);
df.appendChild(snd);
snd.addEventListener('ended', function () {df.removeChild(snd);});
snd.play();
return snd;
}
}());
var snd = Sound("data:audio/wav;base64," + base64string);
</script>
2. AudioBuffer
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var audioContext = new AudioContext();
var isPlaying = false;
var sourceNode = null;
var theBuffer = null;
window.onload = function() {
var request = new XMLHttpRequest();
request.open("GET", "sounds/DEMO_positive_resp.wav", true);
request.responseType = "arraybuffer";
request.onload = function() {
audioContext.decodeAudioData( request.response, function(buffer) {
theBuffer = buffer;
} );
}
request.send();
}
function togglePlayback() {
var now = audioContext.currentTime;
if (isPlaying) {
//stop playing and return
sourceNode.stop( now );
sourceNode = null;
analyser = null;
isPlaying = false;
if (!window.cancelAnimationFrame)
window.cancelAnimationFrame = window.webkitCancelAnimationFrame;
//window.cancelAnimationFrame( rafID );
return "start";
}
sourceNode = audioContext.createBufferSource();
sourceNode.buffer = theBuffer;
sourceNode.loop = true;
analyser = audioContext.createAnalyser();
analyser.fftSize = 2048;
sourceNode.connect( analyser );
analyser.connect( audioContext.destination );
sourceNode.start( now );
isPlaying = true;
isLiveInput = true;
return "stop";
}
Please help me out in this case. It would be highly appreciable.