添加链接
link管理
链接快照平台
  • 输入网页链接,自动生成快照
  • 标签化管理网页链接

javascript video to audio。前端视频转(提取)音频。使用FileReader加载视频,然后decodeAudioData对其进行解码,并使用OfflineAudioContext重新渲染,最后将audiobuffer转换为wav。

demo: https://237005722.github.io/video-to-audio/

一、引用参考

二、示例代码

1.视频转换(提取)音频

* video-to-audio * creater:qc * reference://github.com/mdn/webaudio-examples/tree/master/offline-audio-context-promise const videoToAudio = async ( file ) = > { try { console . log ( 'videoToAudio file' , file ) const fileData = new Blob ( [ file ] ) // video file const arrayBuffer = await new Promise ( ( resolve ) = > { const reader = new FileReader ( ) reader . onload = ( ) = > { const arrayBuffer = reader . result resolve ( arrayBuffer ) reader . readAsArrayBuffer ( fileData ) console . log ( 'arrayBuffer' , arrayBuffer ) const audioContext = new ( window . AudioContext || window . webkitAudioContext || window . mozAudioContext || window . msAudioContext ) ( ) const decodedAudioData = await audioContext . decodeAudioData ( arrayBuffer ) console . log ( 'decodedAudioData' , decodedAudioData ) const fileDuration = durationTrans ( decodedAudioData . duration ) console . log ( 'fileDuration' , fileDuration ) const offlineAudioContext = new OfflineAudioContext ( decodedAudioData . numberOfChannels , decodedAudioData . sampleRate * decodedAudioData . duration , decodedAudioData . sampleRate ) const soundSource = offlineAudioContext . createBufferSource ( ) soundSource . buffer = decodedAudioData soundSource . connect ( offlineAudioContext . destination ) soundSource . start ( ) const renderedBuffer = await offlineAudioContext . startRendering ( ) console . log ( 'renderedBuffer' , renderedBuffer ) // outputs audiobuffer const wav = audioBufferToWav ( renderedBuffer ) const fileType = `wav` const fileName = `$ { file . name } . $ { fileType } ` downloadWav ( wav , fileName ) return { fileName , fileType , fileDuration } } catch ( error ) { // {code: 0, name: 'EncodingError', message: 'Unable to decode audio data'} Case:No audio in the video file ? Maybe console . log ( 'videoToAudio error' , error ) return null } finally { console . log ( 'videoToAudio finally' )

2.音频流转wav

* audiobuffer-to-wav * creater:https://github.com/Jam3/audiobuffer-to-wav const audioBufferToWav = ( buffer , opt ) = > { opt = opt || { } var numChannels = buffer . numberOfChannels var sampleRate = buffer . sampleRate var format = opt . float32 ? 3 : 1 var bitDepth = format == = 3 ? 32 : 16 var result if ( numChannels == = 2 ) { result = interleave ( buffer . getChannelData ( 0 ) , buffer . getChannelData ( 1 ) ) } else { result = buffer . getChannelData ( 0 ) return encodeWAV ( result , format , sampleRate , numChannels , bitDepth ) const encodeWAV = ( samples , format , sampleRate , numChannels , bitDepth ) = > { var bytesPerSample = bitDepth / 8 var blockAlign = numChannels * bytesPerSample var buffer = new ArrayBuffer ( 44 + samples . length * bytesPerSample ) var view = new DataView ( buffer ) /* RIFF identifier */ writeString ( view , 0 , 'RIFF' ) /* RIFF chunk length */ view . setUint32 ( 4 , 36 + samples . length * bytesPerSample , true ) /* RIFF type */ writeString ( view , 8 , 'WAVE' ) /* format chunk identifier */ writeString ( view , 12 , 'fmt ' ) /* format chunk length */ view . setUint32 ( 16 , 16 , true ) /* sample format (raw) */ view . setUint16 ( 20 , format , true ) /* channel count */ view . setUint16 ( 22 , numChannels , true ) /* sample rate */ view . setUint32 ( 24 , sampleRate , true ) /* byte rate (sample rate * block align) */ view . setUint32 ( 28 , sampleRate * blockAlign , true ) /* block align (channel count * bytes per sample) */ view . setUint16 ( 32 , blockAlign , true ) /* bits per sample */ view . setUint16 ( 34 , bitDepth , true ) /* data chunk identifier */ writeString ( view , 36 , 'data' ) /* data chunk length */ view . setUint32 ( 40 , samples . length * bytesPerSample , true ) if ( format == = 1 ) { // Raw PCM floatTo16BitPCM ( view , 44 , samples ) } else { writeFloat32 ( view , 44 , samples ) return buffer const interleave = ( inputL , inputR ) = > { var length = inputL . length + inputR . length var result = new Float32Array ( length ) var index = 0 var inputIndex = 0 while ( index < length ) { result [ index ++ ] = inputL [ inputIndex ] result [ index ++ ] = inputR [ inputIndex ] inputIndex ++ return result const writeFloat32 = ( output , offset , input ) = > { for ( var i = 0 ; i < input . length ; i ++ , offset + = 4 ) { output . setFloat32 ( offset , input [ i ] , true ) const floatTo16BitPCM = ( output , offset , input ) = > { for ( var i = 0 ; i < input . length ; i ++ , offset + = 2 ) { var s = Math . max ( - 1 , Math . min ( 1 , input [ i ] ) ) output . setInt16 ( offset , s < 0 ? s * 0x8000 : s * 0x7FFF , true ) const writeString = ( view , offset , string ) = > { for ( var i = 0 ; i < string . length ; i ++ ) { view . setUint8 ( offset + i , string . charCodeAt ( i ) )

3.下载保存

const downloadWav = (wav, fileName = 'audio.wav') => {
  try {
    const blob = new window.Blob([ new DataView(wav) ], {
      type: 'audio/wav'
    if ('download' in document.createElement('a')) {
      const url = window.URL.createObjectURL(blob)
      const anchor = document.createElement('a')
      document.body.appendChild(anchor)
      anchor.style = 'display: none'
      anchor.




    
href = url
      anchor.download = fileName
      anchor.click()
      window.URL.revokeObjectURL(url)
      document.body.removeChild(anchor)
    } else {
      navigator.msSaveBlob(blob, fileName)
  } catch (error) {
    console.log('downloadWav error', error)
  } finally {
    console.log('downloadWav finally')
const durationTrans = (a) => {
  let b = ''
  let h = parseInt(a/3600),
      m = parseInt(a%3600/60),
      s = parseInt(a%3600%60)
  if (h > 0) {
    h = h < 10 ? '0' + h : h
    b += h + ':'
  m = m < 10 ? '0' + m : m 
  s = s < 10 ? '0' + s : s 
  b += m + ":" + s
  return b

index.css

button {
  position: absolute;
  top: calc(50vh - 30px);
  left: 10%;
  width: 80%;
  height: 60px;
  background-color: transparent;
  border: 1px solid gainsboro;
  border-radius: 10px;
  padding: 10px;
input[type=file]{
  position: absolute;
  top: 0;
  left: 0;
  width: 100%;
  height: 100%;
  opacity: 0;
  filter: alpha(opacity=0);
  cursor: pointer;

index.html
把前面的1,2,3代码块合成index.js

<!DOCTYPE html>
<html lang="en">
  <meta charset="UTF-8">
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  <title>Video To Audio</title>
  <link rel="stylesheet" href="index.css"/>
</head>
  <button>
    <label for="file" id="filename">选择视频文件</label>
    <input type="file" name="file" id="file" accept="video/*,audio/*" onchange="fileChange(this)">
  </button>
  <script src="index.js"></script>
  <script>
    const fileChange = (input) => {
      const file = input.files[0]
      if(!file) return
      const label = document.getElementById('filename')
      label.innerHTML = file.name
      videoToAudio(file).then(audio => {
        console.log('audio', audio)
        audio && (label.innerHTML = audio.fileName)
  </script>
</body>
</html>

github地址

project:https://github.com/237005722/video-to-audio

demo:https://237005722.github.io/video-to-audio/

javascript video to audio。前端视频转音频。使用FileReader加载视频,然后decodeAudioData对其进行解码,并使用OfflineAudioContext重新渲染,最后将audiobuffer转换为wav。 <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="ie=edge"> <title>pcmtowav</title> </head>
前端视频文件处理音频文件有很多人都觉得不可能,一开始我也觉得做不到,网上查了很多资料,github上也找了,基本上都没有,后来在bejson上看到了视频音频,然后就copy了一下。 我的处理: 两个处理js文件:deal.js(处理音频文件下载),work.js(处理视频音频) deal.js (function(n) { var t, i; if (n.URL = n.URL || n.webkitURL, n.Blob && n.URL) try { new Blob;
使用 AudioContext 对象播放音频 <进阶>通过 AudioContext音频进行精细化处理:失真、滤波,变调 <进阶>通过 AudioContext.createBuffer()生成一段音频 使用 audio 标签播放音频 使用 audio 标签播放音乐, 加载音频文件可以通过直接在标签上的 src 写好, 或通过 audio.setAttribute(“. 获取视频总时长 var elevideo = document.getElementById("video"); elevideo.addEventListener('loadedmetadata', function () { //加载数据 //视频的总长度 ```javascript const audioContext = new (window.AudioContext || window.webkitAudioContext)(); const bufferSize = 2048; const recordingTime = 5000; let audioData = []; navigator.mediaDevices.getUserMedia({ audio: true }) .then(stream => { const mediaRecorder = new MediaRecorder(stream); mediaRecorder.start(); const audioChunks = []; mediaRecorder.addEventListener("dataavailable", event => { audioChunks.push(event.data); setTimeout(() => { mediaRecorder.stop(); stream.getTracks().forEach(track => track.stop()); const audioBlob = new Blob(audioChunks); const reader = new FileReader(); reader.readAsArrayBuffer(audioBlob); reader.onloadend = () => { audioContext.decodeAudioData(reader.result, (buffer) => { const audioBuffer = convertBuffer(buffer); const audioBlob = bufferToBlob(audioBuffer); sendBlobToServer(audioBlob); }, recordingTime); function convertBuffer(buffer) { const sampleRate = buffer.sampleRate; const numberOfChannels = buffer.numberOfChannels; const length = buffer.length; const newBuffer = audioContext.createBuffer(1, length, sampleRate); const newChannel = newBuffer.getChannelData(0); for (let i = 0; i < length; i++) { let channelSum = 0; for (let j = 0; j < numberOfChannels; j++) { channelSum += buffer.getChannelData(j)[i]; newChannel[i] = channelSum / numberOfChannels; return newBuffer; function bufferToBlob(buffer) { const numberOfChannels = buffer.numberOfChannels; const length = buffer.length; const newBuffer = new ArrayBuffer(length * numberOfChannels * 2); const newView = new DataView(newBuffer); for (let i = 0; i < length; i++) { let offset = i * numberOfChannels * 2; for (let j = 0; j < numberOfChannels; j++) { let sample = Math.max(-1, Math.min(1, buffer.getChannelData(j)[i])); sample = (sample + 1) / 2 * 65535; newView.setInt16(offset, sample, true); offset += 2; return new Blob([newView], { type: "audio/wav" }); function sendBlobToServer(blob) { const xhr = new XMLHttpRequest(); xhr.open("POST", "/api/upload-audio", true); xhr.setRequestHeader("Content-Type", "audio/wav"); xhr.send(blob); 此代码通过调用Web API中的`getUserMedia`方法来获取音频流,使用`MediaRecorder`对象来记录音频数据,然后将记录的音频数据换为blob,并使用`FileReader`对象将其读入内存中。然后,使用`decodeAudioData`方法将Blob数据解码为音频缓冲区,然后使用`convertBuffer`函数将音频缓冲区换为单声道的16k16bits音频缓冲区,最后将缓冲区换为Blob并发送到后端。 请注意,此示例代码仅用于参考。实际应用中可能需要根据具体需求进行修改。 javascript video to audio demo。前端视频转音频。FileReader,decodeAudioData,OfflineAudioContext weixin_51229618: 非常给力,正好在找类似的实现