Am attempting to implement, for lack of a different description, an offline media context.
The concept is to create 1 second Blob
s of recorded media, with the ability to
- Play the 1 second
Blobs
independently at anHTMLMediaElement
- Play the full media resource from concatenated
Blob
s
The issue is that once the Blob
s are concatenated the media resource does not play at HTMLMedia
element using either a Blob URL
or MediaSource
.
The created Blob URL
only plays 1 second of the concatenated Blob
's. MediaSource
throws two exceptions
DOMException: Failed to execute 'addSourceBuffer' on 'MediaSource': The MediaSource's readyState is not 'open'
and
DOMException: Failed to execute 'appendBuffer' on 'SourceBuffer': This SourceBuffer has been removed from the parent media source.
How to properly encode the concatenated Blob
s or otherwise implement a workaround to play the media fragments as a single re-constituted media resource?
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<script>
const src = "https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4";
fetch(src)
.then(response => response.blob())
.then(blob => {
const blobURL = URL.createObjectURL(blob);
const chunks = [];
const mimeCodec = "vdeo/webm; codecs=opus";
let duration;
let media = document.createElement("video");
media.onloadedmetadata = () => {
media.onloadedmetadata = null;
duration = Math.ceil(media.duration);
let arr = Array.from({
length: duration
}, (_, index) => index);
// record each second of media
arr.reduce((p, index) =>
p.then(() =>
new Promise(resolve => {
let recorder;
let video = document.createElement("video");
video.onpause = e => {
video.onpause = null;
console.log(e);
recorder.stop();
}
video.oncanplay = () => {
video.oncanplay = null;
video.play();
let stream = video.captureStream();
recorder = new MediaRecorder(stream);
recorder.start();
recorder.ondataavailable = e => {
console.log("data event", recorder.state, e.data);
chunks.push(e.data);
}
recorder.onstop = e => {
resolve();
}
}
video.src = `${blobURL}#t=${index},${index+1}`;
})
), Promise.resolve())
.then(() => {
console.log(chunks);
let video = document.createElement("video");
video.controls = true;
document.body.appendChild(video);
let select = document.createElement("select");
document.body.appendChild(select);
let option = new Option("select a segment");
select.appendChild(option);
for (let chunk of chunks) {
let index = chunks.indexOf(chunk);
let option = new Option(`Play ${index}-${index + 1} seconds of media`, index);
select.appendChild(option)
}
let fullMedia = new Blob(chunks, {
type: mimeCodec
});
let opt = new Option("Play full media", "Play full media");
select.appendChild(opt);
select.onchange = () => {
if (select.value !== "Play full media") {
video.src = URL.createObjectURL(chunks[select.value])
} else {
const mediaSource = new MediaSource();
video.src = URL.createObjectURL(mediaSource);
mediaSource.addEventListener("sourceopen", sourceOpen);
function sourceOpen(event) {
// if the media type is supported by `mediaSource`
// fetch resource, begin stream read,
// append stream to `sourceBuffer`
if (MediaSource.isTypeSupported(mimeCodec)) {
var sourceBuffer = mediaSource.addSourceBuffer(mimeCodec);
// set `sourceBuffer` `.mode` to `"sequence"`
sourceBuffer.mode = "segments";
fetch(URL.createObjectURL(fullMedia))
// return `ReadableStream` of `response`
.then(response => response.body.getReader())
.then(reader => {
const processStream = (data) => {
if (data.done) {
return;
}
// append chunk of stream to `sourceBuffer`
sourceBuffer.appendBuffer(data.value);
}
// at `sourceBuffer` `updateend` call `reader.read()`,
// to read next chunk of stream, append chunk to
// `sourceBuffer`
sourceBuffer.addEventListener("updateend", function() {
reader.read().then(processStream);
});
// start processing stream
reader.read().then(processStream);
// do stuff `reader` is closed,
// read of stream is complete
return reader.closed.then(() => {
// signal end of stream to `mediaSource`
mediaSource.endOfStream();
return mediaSource.readyState;
})
})
// do stuff when `reader.closed`, `mediaSource` stream ended
.then(msg => console.log(msg))
.catch(err => console.log(err))
}
// if `mimeCodec` is not supported by `MediaSource`
else {
alert(mimeCodec + " not supported");
}
};
}
}
})
}
media.src = blobURL;
})
</script>
</body>
</html>
using Blob URL
at else
statement at select
change
event, which only plays first second of media resource
video.src = URL.createObjectURL(fullMedia);
plnkr http://plnkr.co/edit/dNznvxe504JX7RWY658T?p=preview version 1 Blob URL
, version 2 MediaSource
There is currently no Web API targeted to video editing.
The MediaStream and MediaRecorder APIs are meant to deal with live sources.
Because of the structure of video files, you can't just slice a part of it to make a new video, nor can you just concatenate small video files to make one longer. In both cases, you need to rebuild its metadata in order to make a new video file.
The only current API able to produce MediaFiles is the MediaRecorder.
There is currently only two implementors of the MediaRecorder API, but they support about 3 different codecs in two different containers, which does mean that you would need to build yourself at least 5 metadata parsers to only support current implementations (which will keep growing in number, and which may need update as implementations are updated).
Sounds like a tough job.
Maybe the incoming WebAssembly API will allow us to port ffmpeg to browsers, which would make it a lot simpler, but I have to admit I don't know WA at all, so I'm not even sure it is really doable.
I hear you saying "Ok, there is no tool made just for that, but we are hackers, and we have other tools, with great power."
Well, yes. If we're really willing to do it, we can hack something...
As said before, the MediaStream and MediaRecorder are meant for live video. We can thus convert static video files to live streams with the
[HTMLVideoElement | HTMLCanvasElement].captureStream()
methods.We can also record those live-streams to a static File thanks to the MediaRecorder API.
What we cannot do however is to change the current stream-source a MediaRecorder as been fed with.
So in order to merge small video Files into one longer, we'll need to
<video>
elements<video>
elements on a<canvas>
element in wanted order<video>
elementsBut this means that the merging is actually a re-recording of all the videos, and this can only be done in real-time (speed = x1)
Here is a live proof of concept where we first slice an original Video File in multiple smaller parts, shuffle these parts to mimic some montage, then create a canvas based player, also able to record this montage and export it.
NotaBene : This is the first version, and I still have a lot of bugs (notabely in Firefox, should work almost fine in chrome).