一、captureStream
captureStream是一个Web API方法,用于捕获指定元素的媒体流。该方法通常用于从<video>、<audio>或<canvas>元素中捕获实时视频流或音频流,以便进行进一步的处理,如直播、录制或分析。
captureStream()
方法能够实时捕获视频流,适用于直播、实时监控等场景。
它返回一个MediaStream对象,该对象包含了捕获的视频流数据。相比其他视频捕获方法,captureStream()
提供了较低的延迟和更好的灵活性。
1、captureStream()
返回的 MediaStream
对象为空
- 视频元素尚未加载完成,导致无法捕获视频流。确保视频元素已经加载完成,可以通过监听
loadedmetadata
或canplay
事件来确认。- 视频元素的源路径不正确或无法访问。检查视频元素的源路径是否正确,并确保视频文件可以正常访问。
javascript">const videoElement = document.createElement('video');
videoElement.src = 'video.mp4';videoElement.addEventListener('loadedmetadata', () => {const mediaStream = videoElement.captureStream();if (mediaStream) {// 处理捕获的视频流} else {console.error('Failed to capture stream');}
});
2、处理 captureStream()
返回的 MediaStream
对象
- 使用
MediaRecorder
将MediaStream
对象录制为视频文件。 - 使用
RTCPeerConnection
将MediaStream
对象推送到 WebRTC 服务器,实现实时通信。
javascript">const mediaRecorder = new MediaRecorder(mediaStream);mediaRecorder.ondataavailable = (event) => {if (event.data.size > 0) {// 处理录制的视频数据}
};
mediaRecorder.start();
3、AudioContext对象
如果浏览器不支持在video、audio对象上使用captureStream获取流数据,可以考虑使用AudioContext对象进行声音数据获取。
javascript">const audioContext = new (window.AudioContext || window.webkitAudioContext)()
const source = audioContext.createMediaElementSource(videoElement)
const destination = audioContext.createMediaStreamDestination()
source.connect(destination)
const audioStream = destination.stream
二、MediaStream()
构造函数**MediaStream()
** 返回新建的 MediaStream 实例,该实例作为媒体流的内容的集合载体,其可能包含多个媒体数据轨,每个数据轨则由一个 MediaStreamTrack 对象表示。如果给出相应参数,在指定的数据轨则被添加到新的流中。否则,该流中不包含任何数据轨。
javascript">newStream = new MediaStream();
newStream = new MediaStream(stream);
newStream = new MediaStream(tracks[]);
参数:
stream:这是另一个 MediaStream 对象,其数据轨会被自动添加到新建的流中。且这些数据轨不会从原流中移除,即变成了两条流共享的数据。
tracks:这是 MediaStreamTrack 对象的 Array 类型的成员,代表了每一个添加到流中的数据轨。
返回值:
新建的 MediaStream 对象,会包含创建时已给的数据轨内容,若没有给定任何数据轨则内容为空。
合并录制的视频流和音频流
javascript">let canvas = document.getElementById('canvas')
let stream = canvas.captureStream(30)
let video = document.getElementById('video')
let audioStream = video.captureStream(30)// 创建一个新的MediaStream,将视频跟音频流合并进去
let combinedStream = new MediaStream([ ...stream.getTracks(), ...audioStream.getAudioTracks()
]);
三、录制Demo
javascript">// 判断是否是微信环境
export function isWeChatEnv() {const userAgent = navigator.userAgent.toLowerCase();return /micromessenger/.test(userAgent);
}
javascript"> let recorder = null; // 录制对象// 录制视频recordVideo(type) {console.error('录制视频')// 开始执行渲染canvas和播放video的代码逻辑、、、、、、this.recording = truelet canvas = document.getElementById('canvas')let stream = canvas.captureStream(30)// 获取音频上下文let audioStreamArr = []// let audioContextArr = []// let sourceArr = []for(let key in this.videoEles) {if(this.videoEles[key]?.captureStream) {const audioStream = this.videoEles[key].captureStream(30)audioStreamArr.push(audioStream)} else {const audioContext = new (window.AudioContext || window.webkitAudioContext)()const source = audioContext.createMediaElementSource(this.videoEles[key])const destination = audioContext.createMediaStreamDestination()source.connect(destination)const audioStream = destination.streamaudioStreamArr.push(audioStream)// const source = new MediaElementAudioSourceNode(audioContext, {// mediaElement: this.videoEles[key]// })// const gainNode = new GainNode(audioContext)// source.connect(gainNode)// const destination = audioContext.createMediaStreamDestination()// gainNode.connect(destination)// const audioStream = destination.stream// audioContextArr.push(audioContext)// sourceArr.push(source)}}let audioChunks = []audioStreamArr.map(it => {audioChunks.push(...it.getAudioTracks())})// 创建一个新的MediaStream,将视频跟音频流合并进去let combinedStream = new MediaStream([ ...stream.getTracks(), ...audioChunks]);const userAgent = navigator.userAgent || navigator.vendorconsole.error('userAgent::', userAgent)let options = {}/*if ('MediaSource' in window && MediaSource.isTypeSupported('video/webm; codecs="vp9"')) {console.log('Your browser supports VP9 via MediaSource Extensions');} else {console.log('Your browser does NOT support VP9 via MediaSource Extensions');}*/// Androidif (userAgent.match(/Android/i)) {options = {audioBitsPerSecond: 128000,videoBitsPerSecond: 2500000,// mimeType: 'video/webm; codecs="vp8,opus"',mimeType: isWeChatEnv() ? 'video/webm; codecs="vp8,opus"' : 'video/mp4; codecs="avc1.64001e"',}} else if (/iPad|iPhone|iPod/.test(userAgent)) {options = {audioBitsPerSecond: 128000,videoBitsPerSecond: 2500000,mimeType: 'video/mp4; codecs="avc1"',}} else {options = {audioBitsPerSecond: 128000,videoBitsPerSecond: 2500000,// mimeType: 'video/webm; codecs="vp8"',mimeType: 'video/mp4; codecs="avc1"',}}// 初始化录制对象recorder = new MediaRecorder(combinedStream, options)let recordData = []// 收集录制数据recorder.ondataavailable = function(event) {if(event.data && event.data.size) {recordData.push(event.data)}}// 监听录制结束事件recorder.onstop = async () => {console.log('recorder onstop')let blobType = 'video/mp4'let url = URL.createObjectURL(new Blob(recordData, {type: blobType}))console.log(url)// const a = document.createElement('a');// a.href = url;// a.download = 'recording.webm';// a.click();// URL.revokeObjectURL(url);const time = new Date().toJSON()if (window.navigator && window.navigator?.msSaveOrOpenBlob) {window.navigator.msSaveBlob(url, `${'recording'}_${time}.mp4`);console.log('IOS下载', url)} else {console.log('安卓下载', url)const link = document.createElement("a");link.style.display = "none";link.href = url;// link.target = '_blank';link.setAttribute("download", `${'recording'}_${time}.mp4`);document.body.appendChild(link);link.click();document.body.removeChild(link);}}recorder.start(10000)}// 结束录制recorder.stop()