您好,登录后才能下订单哦!
密码登录
登录注册
点击 登录注册 即表示同意《亿速云用户服务条款》
这篇文章将为大家详细讲解有关js如何实现mp3录音通过websocket实时传送+简易波形图效果?,小编觉得挺实用的,因此分享给大家做个参考,希望大家阅读完这篇文章后可以有所收获。
前端实现:
引入:
<script type="text/javascript" src="/js/recorder/recordmp3.js"></script>
这个跟大佬的js有点不一样,我在里面加了一点东西,而且在这个js里面引入了两个另外的js,lame.min.js和worker-realtime.js,这俩在大佬的代码里有
页面:
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=gb2312"/> <title>测试</title> </head> <body> <button id="intercomBegin">开始对讲</button> <button id="intercomEnd">关闭对讲</button> <canvas id="casvased" ></canvas> </body> <script type="text/javascript" src="/js/jquery-3.3.1.js"></script> <script type="text/javascript" src="/js/recorder/recordmp3.js"></script> <script type="text/javascript"> var begin = document.getElementById('intercomBegin'); var end = document.getElementById('intercomEnd'); var canvas = document.getElementById("casvased"); var canvasCtx = canvas.getContext("2d"); var ws = null; //实现WebSocket var recorder; /* * WebSocket */ function useWebSocket() { ws = new WebSocket("ws://127.0.0.1:8089/send/voice"); ws.binaryType = 'arraybuffer'; //传输的是 ArrayBuffer 类型的数据 ws.onopen = function () { console.log('握手成功'); if (ws.readyState == 1) { //ws进入连接状态,则每隔500毫秒发送一包数据 recorder.start(); } }; ws.onmessage = function (msg) { console.info(msg) } ws.onerror = function (err) { console.info(err) } } /* * 开始对讲 */ begin.onclick = function () { recorder = new MP3Recorder({ debug: true, funOk: function () { console.log('点击录制,开始录音! '); }, funCancel: function (msg) { console.log(msg); recorder = null; } }); } /* * 关闭对讲 */ end.onclick = function () { if (ws) { ws.close(); recorder.stop(); console.log('关闭对讲以及WebSocket'); } } var sendData = function() { //对以获取的数据进行处理(分包) var reader = new FileReader(); reader.onload = e => { var outbuffer = e.target.result; var arr = new Int8Array(outbuffer); if (arr.length > 0) { var tmparr = new Int8Array(1024); var j = 0; for (var i = 0; i < arr.byteLength; i++) { tmparr[j++] = arr[i]; if (((i + 1) % 1024) == 0) { ws.send(tmparr); if (arr.byteLength - i - 1 >= 1024) { tmparr = new Int8Array(1024); } else { tmparr = new Int8Array(arr.byteLength - i - 1); } j = 0; } if ((i + 1 == arr.byteLength) && ((i + 1) % 1024) != 0) { ws.send(tmparr); } } } }; recorder.getMp3Blob(function (blob) { reader.readAsArrayBuffer(blob);//这里拿到mp3格式的音频流写入到reader中 }) }; </script> </html> recordmp3.js (function (exports) { var MP3Recorder = function (config) { var recorder = this; config = config || {}; config.sampleRate = config.sampleRate || 44100; config.bitRate = config.bitRate || 128; navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia; if (navigator.getUserMedia) { navigator.getUserMedia({ audio: true }, function (stream) { var context = new AudioContext(), microphone = context.createMediaStreamSource(stream), processor = context.createScriptProcessor(16384, 1, 1),//bufferSize大小,输入channel数,输出channel数 mp3ReceiveSuccess, currentErrorCallback; var height = 100; var width = 400; const analyser = context.createAnalyser() analyser.fftSize = 1024 //连接到音频源 microphone.connect(analyser); analyser.connect(context.destination); const bufferLength = analyser.frequencyBinCount // 返回的是 analyser的fftsize的一半 const dataArray = new Uint8Array(bufferLength); function draw() { canvasCtx.clearRect(0, 0, width, height); //清除画布 analyser.getByteFrequencyData(dataArray); // 将当前频率数据复制到传入其中的Uint8Array const requestAnimFrame = window.requestAnimationFrame(draw) || window.webkitRequestAnimationFrame(draw); canvasCtx.fillStyle = '#000130'; canvasCtx.fillRect(0, 0, width, height); let barWidth = (width / bufferLength) * 2; let barHeight; let x = 0; let c = 2 for (let i = 0; i < bufferLength; i++) { barHeight = c+(dataArray[i]/400)*height; canvasCtx.fillStyle = 'rgb(0, 255, 30)'; canvasCtx.fillRect(x, height / 2 - barHeight / 2, barWidth, barHeight); x += barWidth + 1; } } draw(); useWebSocket(); config.sampleRate = context.sampleRate; processor.onaudioprocess = function (event) { //边录音边转换 var array = event.inputBuffer.getChannelData(0); realTimeWorker.postMessage({cmd: 'encode', buf: array}); sendData(); }; var realTimeWorker = new Worker('/js/recorder/worker-realtime.js'); realTimeWorker.onmessage = function (e) { switch (e.data.cmd) { case 'init': log('初始化成功'); if (config.funOk) { config.funOk(); } break; case 'end': log('MP3大小:', e.data.buf.length); if (mp3ReceiveSuccess) { mp3ReceiveSuccess(new Blob(e.data.buf, {type: 'audio/mp3'})); } break; case 'error': log('错误信息:' + e.data.error); if (currentErrorCallback) { currentErrorCallback(e.data.error); } break; default: log('未知信息:', e.data); } }; recorder.getMp3Blob = function (onSuccess, onError) { currentErrorCallback = onError; mp3ReceiveSuccess = onSuccess; realTimeWorker.postMessage({cmd: 'finish'}); }; recorder.start = function () { if (processor && microphone) { microphone.connect(processor); processor.connect(context.destination); log('开始录音'); } } recorder.stop = function () { if (processor && microphone) { microphone.disconnect(); processor.disconnect(); log('录音结束'); } } realTimeWorker.postMessage({ cmd: 'init', config: { sampleRate: config.sampleRate, bitRate: config.bitRate } }); }, function (error) { var msg; switch (error.code || error.name) { case 'PERMISSION_DENIED': case 'PermissionDeniedError': msg = '用户拒绝访问麦客风'; break; case 'NOT_SUPPORTED_ERROR': case 'NotSupportedError': msg = '浏览器不支持麦客风'; break; case 'MANDATORY_UNSATISFIED_ERROR': case 'MandatoryUnsatisfiedError': msg = '找不到麦客风设备'; break; default: msg = '无法打开麦克风,异常信息:' + (error.code || error.name); break; } if (config.funCancel) { config.funCancel(msg); } }); } else { if (config.funCancel) { config.funCancel('当前浏览器不支持录音功能'); } } function log(str) { if (config.debug) { console.log(str); } } } exports.MP3Recorder = MP3Recorder; })(window);
后端websocket:
这里实现的是保存为mp3文件
package com.jetosend.common.socket; import com.jetosend.common.utils.Utils; import org.springframework.stereotype.Component; import javax.websocket.*; import javax.websocket.server.PathParam; import javax.websocket.server.ServerEndpoint; import java.io.*; import java.nio.ByteBuffer; import java.util.Hashtable; import java.util.Map; @ServerEndpoint("/send/{key}") @Component public class ServerSocket { private static final Map<String, Session> connections = new Hashtable<>(); ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); /*** * @Description:打开连接 * @Param: [id, 保存对方平台的资源编码 * session] * @Return: void * @Author: Liting * @Date: 2019-10-10 09:22 */ @OnOpen public void onOpen(@PathParam("key") String id, Session session) { System.out.println(id + "连上了"); connections.put(id, session); } /** * 接收消息 */ @OnMessage public void onMessage(@PathParam("key") String id, InputStream inputStream) { System.out.println("来自" + id); try { int rc = 0; byte[] buff = new byte[100]; while ((rc = inputStream.read(buff, 0, 100)) > 0) { byteArrayOutputStream.write(buff, 0, rc); } } catch (Exception e) { e.printStackTrace(); } } /** * 异常处理 * * @param throwable */ @OnError public void onError(Throwable throwable) { throwable.printStackTrace(); //TODO 日志打印异常 } /** * 关闭连接 */ @OnClose public void onClose(@PathParam("key") String id) { System.out.println(id + "断开"); BufferedOutputStream bos = null; FileOutputStream fos = null; File file = null; try { file = new File("D:\\testtest.mp3"); //输出流 fos = new FileOutputStream(file); //缓冲流 bos = new BufferedOutputStream(fos); //将字节数组写出 bos.write(byteArrayOutputStream.toByteArray()); } catch (Exception e) { e.printStackTrace(); } finally { if (bos != null) { try { bos.close(); } catch (IOException e) { e.printStackTrace(); } } if (fos != null) { try { fos.close(); } catch (IOException e) { e.printStackTrace(); } } } connections.remove(id); }
实现效果:
关于js如何实现mp3录音通过websocket实时传送+简易波形图效果?就分享到这里了,希望以上内容可以对大家有一定的帮助,可以学到更多知识。如果觉得文章不错,可以把它分享出去让更多的人看到。
免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。