当前位置:网站首页>TRTC web end imitation Tencent conference microphone mute detection

TRTC web end imitation Tencent conference microphone mute detection

2022-06-24 16:21:00 songs of the people of Chu

Project background

at present Web Of TRTC No mute detection , There is no prompt when speaking with the microphone turned off , Sometimes there are awkward meeting scenes , To enhance the user experience , Here we try to introduce the solution of Tencent conference into .

When the user turns off the microphone , If the surrounding sound exceeds a certain decibel value , Then a prompt .

At present, it is compatible with mainstream PC All frames under the browser ,electron frame , For better access TRTC Web Demo use jquery Library Development ( Mainly interaction and style ), With a few changes, you can adapt to the native and other frameworks .

This code is directly in TRTC Of Demo Write a copy of js The file can be imported , Plug and play . If there is a part that needs to be customized , I also wrote comments in the code .

Usage method :

image.png

notes : This function can only be used on the server , For debugging, please use Live Server.

Effect display :

image.png

Source code

It's easy to say , Actually, it is difficult to do , Basically, you need to use js To process the audio stream , An old interface is used audioContext as well as createMediaStreamSource Relevant methods can be realized , Directly paste the result code here .

var audioContext = null;
var meter = null;
//  If you need to visualize the sound size , Can be canvasContext Uncomment , And in html There's a canvas Element supply and subsequent acquisition 
// var canvasContext = null;
var mediaStreamSource = null;
var WIDTH = 500;
var HEIGHT = 50;
var rafID = null;
var hintlock = false;

// jquery, If you need to be compatible with other frameworks , You can use native 
$('body').append(`   <div id="michint" class="alert alert-warning">
    <strong> Be careful !</strong> Your voice is detected but the microphone is not turned on !.
</div>`)
$('#michint').slideUp(0)

getStream();
function getStream() {

    //  obtain canvas, If you want to visualize volume , stay html Add a height to the 50 Of , Width 500 Of canvas,id by meter that will do , You can also modify parameters by yourself 
    // canvasContext = document.getElementById("meter").getContext("2d");

    window.AudioContext = window.AudioContext || window.webkitAudioContext;


    try {
        navigator.getUserMedia =
            navigator.getUserMedia ||
            navigator.webkitGetUserMedia ||
            navigator.mozGetUserMedia;

        navigator.getUserMedia(
            {
                "audio": {
                    "mandatory": {
                        "googEchoCancellation": "false",
                        "googAutoGainControl": "false",
                        "googNoiseSuppression": "false",
                        "googHighpassFilter": "false"
                    },
                    "optional": []
                },
            }, gotStream, didntGetStream);
    } catch (e) {
        alert('getUserMedia threw exception :' + e);
    }

}


function didntGetStream() {
    alert('Stream generation failed.');
}



function gotStream(stream) {
    audioContext = new AudioContext();
    meter = createAudioMeter(audioContext);
    mediaStreamSource = audioContext.createMediaStreamSource(stream);
    
    mediaStreamSource.connect(meter);
    drawLoop();
}

function drawLoop() {
    // isMicOn yes demo To determine whether the local stream has turned off the microphone , If there are other state variables to judge , Can be added by yourself . The volume determination boundary can also be modified 
    // jquery Dynamic effect provided , And the time limit of anti shake can be modified by yourself .
    if (hintlock == false && meter.volume > 0.05 && isMicOn === false) {
        hintlock = true;
        $('#michint').slideDown(600);

        setTimeout(function () {
            $("#michint").slideUp(600)
            hintlock = false;
        }, 3000)
    }

    if (typeof (canvasContext) != "undefined") {
        canvasContext.clearRect(0, 0, WIDTH, HEIGHT);
        canvasContext.fillStyle = "green";
        canvasContext.fillRect(0, 0, meter.volume * WIDTH * 1.4, HEIGHT);
    }


    rafID = window.requestAnimationFrame(drawLoop);
}

function createAudioMeter(audioContext, clipLevel, averaging, clipLag) {
    var processor = audioContext.createScriptProcessor(512);
    processor.onaudioprocess = volumeAudioProcess;
    /* usage :
        audioNode = createAudioMeter (audioContext clipLevel,averaging,clipLag); What you are using audioContext.
        clipLevel: You'll think about it “ shear ” The level of (0 To 1). The default is 0.98.
        averaging: You want more instruments over time “ smooth ”. belong 0 And less than 1 Between . The default is 0.95.
        clipLag: you hope “ clip ” How long does the indicator show , After cutting occurs , In Milliseconds . The default is 750 ms.
         adopt node.checkClipping() Access clips ; Use nodes . Close to touch .
     */
    processor.clipping = false;
    processor.lastClip = 0;
    processor.volume = 0;
    processor.clipLevel = clipLevel || 0.98;
    processor.averaging = averaging || 0.95;
    processor.clipLag = clipLag || 750;

    processor.connect(audioContext.destination);

    // Shake proof , Control sampling speed , Otherwise, the volume will be superimposed before and after .
    processor.checkClipping =
        function () {
            if (!this.clipping)
                return false;
            if ((this.lastClip + this.clipLag) < window.performance.now())
                this.clipping = false;
            return this.clipping;
        };
    
    // closed 
    processor.shutdown =
        function () {
            this.disconnect();
            this.onaudioprocess = null;
        };

    return processor;
}

function volumeAudioProcess(event) {
    // Volume calculation 
    var buf = event.inputBuffer.getChannelData(0);
    var bufLength = buf.length;
    var sum = 0;
    var x;


    for (var i = 0; i < bufLength; i++) {
        x = buf[i];
        if (Math.abs(x) >= this.clipLevel) {
            this.clipping = true;
            this.lastClip = window.performance.now();
        }
        sum += x * x;
    }

    // Make a root mean square for the sample : Add up the squares ……
    var rms = Math.sqrt(sum / bufLength);

    // Now smooth this out with the average factor applied to the previous sample —— Take the maximum here , Because we want to “ Rapidly increasing , Slow down ”, This value is more consistent with the display , Because reality has echoes , The volume will not drop very fast .
    this.volume = Math.max(rms, this.volume * this.averaging);
}

Core code explanation :

  • audioContext: This is an object for receiving audio context , yes AudioContext Example , This interface was used to cooperate a long time ago audio Used with labels , Can be in js Layer operation audio Various functions of . But later audio Labels are becoming obsolete , But the interface remains , You can get an audio instance . Including music files and microphones , But the microphone needs extra processing .
  • navigator.getUserMedia: be familiar with webRTC All developers know that this stream is used to get the device ,chrome You need to grant permission before you can obtain . Successful execution gotStream And get the media stream , Failed execution didntGetStream
    image.png
  • gotStream Several steps 1. Use audioContext The built-in createMediaStreamSource Method , Will be taken from getMic The context and getUserMedia Get the stream , Then create the node , 2. perform createAudioMeter, Use audio context audioContext, establish meter( Meters ) object 3. take mediaStreamSource Connect to meter in audioContext; 4. use volumeAudioProcess function , To process the audio data in the buffer to get the volume value }
  • createAudioMeter
// This code is used to create audio The buffer 
function createAudioMeter(audioContext, clipLevel, averaging, clipLag) {
    var processor = audioContext.createScriptProcessor(512);
    processor.onaudioprocess = volumeAudioProcess;
    /* usage :
        audioNode = createAudioMeter (audioContext clipLevel,averaging,clipLag); What you are using audioContext.
        clipLevel: You'll think about it “ shear ” The level of (0 To 1). The default is 0.98.
        averaging: You want more instruments over time “ smooth ”. belong 0 And less than 1 Between . The default is 0.95.
        clipLag: you hope “ clip ” How long does the indicator show , After cutting occurs , In Milliseconds . The default is 750 ms.
         adopt node.checkClipping() Access clips ; Use nodes . Close to touch .
     */
    processor.clipping = false;
    processor.lastClip = 0;
    processor.volume = 0;
    processor.clipLevel = clipLevel || 0.98;
    processor.averaging = averaging || 0.95;
    processor.clipLag = clipLag || 750;

    processor.connect(audioContext.destination);

    // Shake proof , Control sampling speed , Otherwise, the volume will be superimposed before and after .
    processor.checkClipping =
        function () {
            if (!this.clipping)
                return false;
            if ((this.lastClip + this.clipLag) < window.performance.now())
                this.clipping = false;
            return this.clipping;
        };
    
    // closed 
    processor.shutdown =
        function () {
            this.disconnect();
            this.onaudioprocess = null;
        };

    return processor;
}
  • volumeAudioProcess
function volumeAudioProcess(event) {
    // Volume calculation 
    var buf = event.inputBuffer.getChannelData(0);
    var bufLength = buf.length;
    var sum = 0;
    var x;


    for (var i = 0; i < bufLength; i++) {
        x = buf[i];
        if (Math.abs(x) >= this.clipLevel) {
            this.clipping = true;
            this.lastClip = window.performance.now();
        }
        sum += x * x;
    }

    // Make a root mean square for the sample : Add up the squares ……
    var rms = Math.sqrt(sum / bufLength);

    // Now smooth this out with the average factor applied to the previous sample —— Take the maximum here , Because we want to “ Rapidly increasing , Slow down ”, This value is more consistent with the display , Because reality has echoes , The volume will not drop very fast .
    this.volume = Math.max(rms, this.volume * this.averaging);
}
原网站

版权声明
本文为[songs of the people of Chu]所创,转载请带上原文链接,感谢
https://yzsam.com/2021/04/20210428011058425r.html

随机推荐