Music Ducking
About Music Ducking
Music ducking is a technique used in voice communication applications to reduce the volume of background music when someone is speaking. This is important because it allows the speaker's voice to be heard clearly without being drowned out by the music. Music ducking works by automatically lowering the volume of the music when the microphone picks up sound, and then gradually increasing it again once the sound has stopped. This technique is commonly used in radio broadcasting, podcasting, and video conferencing applications to ensure that the conversation remains clear and easy to understand. By using music ducking, voice communication applications can provide a more professional and polished experience for their users.
Audio Lab
This example plays an audio file and ducks the playback volume based on the user's microphone input.
Code Example
- JSON
- Swift
- Kotlin
- C++
- JavaScript
{
"nodes": {
{ "id": "audioPlayerNode", "type": "AudioPlayerNode" },
{ "id": "musicDuckingNode", "type": "MusicDuckingNode" }
},
"connections": {
{ "sourceNode": "audioPlayerNode", "destinationNode": "musicDuckingNode" },
{ "sourceNode": "inputNode", "destinationNode": "musicDuckingNode" },
{ "sourceNode": "musicDuckingNode", "destinationNode": "outputNode" }
},
"tracks": {
"audioPlayerNode": "example.mp3"
}
}
import SwitchboardSDK
class DuckingExample {
let audioEngine = SBAudioEngine()
let audioGraph = SBAudioGraph()
let audioPlayerNode = SBAudioPlayerNode()
let musicDuckingNode = SBMusicDuckingNode()
init() {
audioGraph.addNode(audioPlayerNode)
audioGraph.addNode(musicDuckingNode)
audioGraph.connect(audioPlayerNode, musicDuckingNode)
audioGraph.connect(audioGraph.inputNode, musicDuckingNode)
audioGraph.connect(musicDuckingNode, audioGraph.outputNode)
audioPlayerNode.load("example.mp3", withFormat: .mp3)
audioPlayerNode.isLoopingEnabled = true
audioEngine.microphoneEnabled = true
}
func start() {
audioEngine.start(audioGraph)
audioPlayerNode.play()
}
func stop() {
audioPlayerNode.stop()
audioEngine.stop()
}
}
import com.synervoz.switchboard.sdk.AudioEngine
import com.synervoz.switchboard.sdk.Codec
import com.synervoz.switchboard.sdk.audiograph.AudioGraph
import com.synervoz.switchboard.sdk.audiographnodes.AudioPlayerNode
import com.synervoz.switchboard.sdk.audiographnodes.MusicDuckingNode
class DuckingExample(context: Context) {
val audioEngine = AudioEngine(context)
val audioGraph = AudioGraph()
val audioPlayerNode = AudioPlayerNode()
val musicDuckingNode = MusicDuckingNode()
init() {
audioGraph.addNode(audioPlayerNode)
audioGraph.addNode(musicDuckingNode)
audioGraph.connect(audioPlayerNode, musicDuckingNode)
audioGraph.connect(audioGraph.inputNode, musicDuckingNode)
audioGraph.connect(musicDuckingNode, audioGraph.outputNode)
audioPlayerNode.load("example.mp3", Codec.MP3)
audioPlayerNode.isLoopingEnabled = true
audioEngine.microphoneEnabled = true
}
fun start() {
audioEngine.start(audioGraph)
audioPlayerNode.play()
}
fun stop() {
audioPlayerNode.stop()
audioEngine.stop()
}
}
#include "AudioGraph.hpp"
#include "AudioPlayerNode.hpp"
#include "MusicDuckingNode.hpp"
using namespace switchboard;
class DuckingExample {
public:
DuckingExample() {
// Adding nodes to audio graph
audioGraph.addNode(audioPlayerNode);
audioGraph.addNode(musicDuckingNode);
// Connecting audio nodes
audioGraph.connect(audioPlayerNode, musicDuckingNode);
audioGraph.connect(audioGraph.getInputNode(), musicDuckingNode);
audioGraph.connect(musicDuckingNode, audioGraph.getOutputNode());
// Loading the track and starting the player
audioPlayerNode.load("example.mp3", ::Mp3);
audioPlayerNode.setLoopingEnabled(true);
audioPlayerNode.play();
// Starting the graph
audioGraph.start();
}
// Example method called by the audio processing pipeline on each buffer
bool process(float** buffers, const uint numberOfChannels, const uint numberOfFrames, const uint sampleRate) {
AudioBuffer<float> inBuffer = AudioBuffer<float>(numberOfChannels, numberOfFrames, false, sampleRate, buffers);
AudioBuffer<float> outBuffer = AudioBuffer<float>(numberOfChannels, numberOfFrames, false, sampleRate, buffers);
const bool result = audioGraph->processBuffer(&inBuffer, &outBuffer);
return result;
}
private:
AudioGraph audioGraph;
AudioPlayerNode audioPlayerNode;
MusicDuckingNode musicDuckingNode;
};
// Change the import to reflect the location of library
// relative to this publically accessible AudioWorklet
import { SwitchboardSDK } from '/libs/switchboard-sdk/index.js'
class SwitchboardMusicDuckingV2Processor extends AudioWorkletProcessor {
constructor(options) {
super()
this.sampleRate = options.processorOptions.sampleRate
this.port.onmessage = (event) => this.onMessage(event.data)
}
sendMessage(message, transfer = []) {
this.port.postMessage(message, transfer)
}
onMessage(message) {
if (message.command === 'requestUiDefinitions') {
this.sendMessage({ uiDefinitions: uiDefinitions })
}
if (message.wasmArrayBuffer) {
const switchboardSdkConfigObject = {
extensions: [],
wasmBytes: message.wasmArrayBuffer,
...SwitchboardDevelopmentLicense,
}
this.configure(switchboardSdkConfigObject)
}
if (typeof message.duckingAmount !== 'undefined') {
this.musicDuckingNode.setDuckingAmount(message.duckingAmount)
}
if (typeof message.activationThreshold !== 'undefined') {
this.musicDuckingNode.setActivationThreshold(message.activationThreshold)
}
if (typeof message.attackSeconds !== 'undefined') {
this.musicDuckingNode.setAttackSeconds(message.attackSeconds)
}
if (typeof message.releaseSeconds !== 'undefined') {
this.musicDuckingNode.setReleaseSeconds(message.releaseSeconds)
}
if (typeof message.holdSeconds !== 'undefined') {
this.musicDuckingNode.setHoldSeconds(message.holdSeconds)
}
}
configure(sdkConfig) {
this.switchboard = new SwitchboardSDK()
this.switchboard.configure(sdkConfig).then((response) => {
this.constructAudioGraph()
})
}
constructAudioGraph() {
const inputChannelLayout = [2, 2]
const outputChannelLayout = [2]
const maxNumFrames = 128
let audioGraph = this.switchboard.createAudioGraph(
inputChannelLayout,
outputChannelLayout,
maxNumFrames,
this.sampleRate
)
let voiceSplitterNode = new this.switchboard.classes.BusSplitterNode()
let musicPassthroughNode = new this.switchboard.classes.PassthroughNode()
let musicDuckingNode = new this.switchboard.classes.MusicDuckingV2Node()
let mixerNode = new this.switchboard.classes.MixerNode()
let audioGraphInputNode = audioGraph.getInputNode()
let audioGraphOutputNode = audioGraph.getOutputNode()
audioGraph.addNode(voiceSplitterNode)
audioGraph.addNode(musicPassthroughNode)
audioGraph.addNode(musicDuckingNode)
audioGraph.addNode(mixerNode)
audioGraph.connect(audioGraphInputNode, musicPassthroughNode)
audioGraph.connect(audioGraphInputNode, voiceSplitterNode)
audioGraph.connect(musicPassthroughNode, musicDuckingNode)
audioGraph.connect(voiceSplitterNode, musicDuckingNode)
audioGraph.connect(voiceSplitterNode, mixerNode)
audioGraph.connect(musicDuckingNode, mixerNode)
audioGraph.connect(mixerNode, audioGraphOutputNode)
audioGraph.start()
this.musicDuckingNode = musicDuckingNode
this.musicPassthroughNode = musicPassthroughNode
this.mixerNode = mixerNode
this.voiceSplitterNode = voiceSplitterNode
this.audioGraph = audioGraph
}
destruct() {
this.audioGraph.destruct()
this.musicDuckingNode.destruct()
this.multiChannelToMonoNode.destruct()
this.musicPassthroughNode.destruct()
this.voiceSplitterNode.destruct()
this.mixerNode.destruct()
}
process(inputs, outputs, parameters) {
return this.audioGraph.processGraph(inputs, outputs)
}
}
registerProcessor(
'SwitchboardMusicDuckingV2Processor',
SwitchboardMusicDuckingV2Processor
)