Music Ducking
About Music Ducking
Music ducking is a technique used in voice communication applications to reduce the volume of background music when someone is speaking. This is important because it allows the speaker's voice to be heard clearly without being drowned out by the music. Music ducking works by automatically lowering the volume of the music when the microphone picks up sound, and then gradually increasing it again once the sound has stopped. This technique is commonly used in radio broadcasting, podcasting, and video conferencing applications to ensure that the conversation remains clear and easy to understand. By using music ducking, voice communication applications can provide a more professional and polished experience for their users.
Audio Lab
This example plays an audio file and ducks the playback volume based on the user's microphone input.
Code Example
- JSON
- Swift
- Kotlin
- C++
- JavaScript
{
"nodes": {
{ "id": "audioPlayerNode", "type": "AudioPlayerNode" },
{ "id": "musicDuckingNode", "type": "ChannelSplitterNode" }
},
"connections": {
{ "sourceNode": "audioPlayerNode", "destinationNode": "musicDuckingNode" },
{ "sourceNode": "InputNode", "destinationNode": "musicDuckingNode" },
{ "sourceNode": "musicDuckingNode", "destinationNode": "OutputNode" }
},
"tracks": {
"audioPlayerNode": "example.mp3"
}
}
import SwitchboardSDK
class DuckingExample {
let audioEngine = SBAudioEngine()
let audioGraph = SBAudioGraph()
let audioPlayerNode = SBAudioPlayerNode()
let musicDuckingNode = SBMusicDuckingNode()
init() {
audioGraph.addNode(audioPlayerNode)
audioGraph.addNode(musicDuckingNode)
audioGraph.connect(audioPlayerNode, musicDuckingNode)
audioGraph.connect(audioGraph.inputNode, musicDuckingNode)
audioGraph.connect(musicDuckingNode, audioGraph.outputNode)
audioPlayerNode.load("example.mp3", withFormat: .mp3)
audioPlayerNode.isLoopingEnabled = true
audioEngine.microphoneEnabled = true
}
func start() {
audioEngine.start(audioGraph)
audioPlayerNode.play()
}
func stop() {
audioPlayerNode.stop()
audioEngine.stop()
}
}
import com.synervoz.switchboard.sdk.AudioEngine
import com.synervoz.switchboard.sdk.Codec
import com.synervoz.switchboard.sdk.audiograph.AudioGraph
import com.synervoz.switchboard.sdk.audiographnodes.AudioPlayerNode
import com.synervoz.switchboard.sdk.audiographnodes.MusicDuckingNode
class DuckingExample(context: Context) {
val audioEngine = AudioEngine(context)
val audioGraph = AudioGraph()
val audioPlayerNode = AudioPlayerNode()
val musicDuckingNode = MusicDuckingNode()
init() {
audioGraph.addNode(audioPlayerNode)
audioGraph.addNode(musicDuckingNode)
audioGraph.connect(audioPlayerNode, musicDuckingNode)
audioGraph.connect(audioGraph.inputNode, musicDuckingNode)
audioGraph.connect(musicDuckingNode, audioGraph.outputNode)
audioPlayerNode.load("example.mp3", Codec.MP3)
audioPlayerNode.isLoopingEnabled = true
audioEngine.microphoneEnabled = true
}
fun start() {
audioEngine.start(audioGraph)
audioPlayerNode.play()
}
fun stop() {
audioPlayerNode.stop()
audioEngine.stop()
}
}
#include "AudioGraph.hpp"
#include "AudioPlayerNode.hpp"
#include "MusicDuckingNode.hpp"
using namespace switchboard;
class DuckingExample {
public:
DuckingExample() {
// Adding nodes to audio graph
audioGraph.addNode(audioPlayerNode);
audioGraph.addNode(musicDuckingNode);
// Connecting audio nodes
audioGraph.connect(audioPlayerNode, musicDuckingNode);
audioGraph.connect(audioGraph.getInputNode(), musicDuckingNode);
audioGraph.connect(musicDuckingNode, audioGraph.getOutputNode());
// Loading the track and starting the player
audioPlayerNode.load("example.mp3", ::Mp3);
audioPlayerNode.setLoopingEnabled(true);
audioPlayerNode.play();
// Starting the graph
audioGraph.start();
}
// Example method called by the audio processing pipeline on each buffer
bool process(float** buffers, const uint numberOfChannels, const uint numberOfFrames, const uint sampleRate) {
AudioBuffer<float> inBuffer = AudioBuffer<float>(numberOfChannels, numberOfFrames, false, sampleRate, buffers);
AudioBuffer<float> outBuffer = AudioBuffer<float>(numberOfChannels, numberOfFrames, false, sampleRate, buffers);
AudioBus inBus = AudioBus(&inBuffer);
AudioBus outBus = AudioBus(&outBuffer);
AudioBusList inBusList = AudioBusList(inBus);
AudioBusList outBusList = AudioBusList(outBus);
const bool result = audioGraph->process(inBusList, outBusList);
return result;
}
private:
AudioGraph audioGraph;
AudioPlayerNode audioPlayerNode;
MusicDuckingNode musicDuckingNode;
};
// Change the import to reflect the location of library
// relative to this publically accessible AudioWorklet
import SwitchboardSDK from "../../../libs/SwitchboardSDK.js";
class MusicDuckingWorkletProcessor extends AudioWorkletProcessor {
constructor(options) {
super();
console.log(options);
this.port.onmessage = (e) => {
console.log("MusicDuckingWorkletProcessor - onmessage");
console.log(e);
let event = e.data.event;
if (event === 'wasmLoaded') {
let sdkConfig = e.data.data;
this.configure(sdkConfig);
} else if (event === 'destruct') {
this.destruct();
}
}
}
configure(sdkConfig) {
this.switchboard = new SwitchboardSDK();
this.switchboard.configure(sdkConfig)
.then(response => {
this.constructAudioGraph();
})
}
constructAudioGraph() {
let inputNumChannels = [2, 1];
let outputNumChannels = 2;
let sampleRate = 48000;
let numFrames = 128;
let audioGraph = this.switchboard.createAudioGraph(inputNumChannels, outputNumChannels, numFrames, sampleRate);
let audioGraphInputNode = audioGraph.getInputNode();
let audioGraphOutputNode = audioGraph.getOutputNode();
let musicPassthroughNode = new this.switchboard.classes.PassthroughNode();
let voicePassthroughNode = new this.switchboard.classes.PassthroughNode();
let musicDuckingNode = new this.switchboard.classes.MusicDuckingNode();
audioGraph.addNode(musicPassthroughNode);
audioGraph.addNode(voicePassthroughNode);
audioGraph.addNode(musicDuckingNode);
audioGraph.connect(audioGraphInputNode, musicPassthroughNode);
audioGraph.connect(audioGraphInputNode, voicePassthroughNode);
audioGraph.connect(musicPassthroughNode, musicDuckingNode);
audioGraph.connect(voicePassthroughNode, musicDuckingNode);
audioGraph.connect(musicDuckingNode, audioGraphOutputNode);
audioGraph.start();
this.audioGraph = audioGraph;
this.musicPassthroughNode = musicPassthroughNode;
this.voicePassthroughNode = voicePassthroughNode;
this.musicDuckingNode = musicDuckingNode;
}
destruct() {
this.audioGraph.destruct();
this.musicPassthroughNode.destruct();
this.voicePassthroughNode.destruct();
this.musicDuckingNode.destruct();
}
process(inputs, outputs, parameters) {
return this.audioGraph.processGraph(inputs, outputs);
}
}
registerProcessor("music-ducking-worklet-processor", MusicDuckingWorkletProcessor);