synops/frontend/src/lib/mixer.ts
vegard ac8f8c508d Fullfører oppgave 16.7: Stemmeeffekter med robot og monster voice
Robotstemme: Ring-modulasjon via OscillatorNode som modulerer
GainNode.gain — gir metallisk, Dalek-aktig effekt. Justerbar
frekvens (30–300 Hz) og modulasjonsdybde (0–100%).

Monsterstemme: Egenutviklet AudioWorkletProcessor med phase vocoder
for sanntids pitch-shifting. Bruker overlap-add med 2048-sample FFT
og 4x overlap for ~42ms latens ved 48kHz. Pitch-faktor 0.5x–2.0x.

UI: Effektvelger-knapper (Robot/Monster) i FX-seksjon per kanal,
med fargekodede parametersliders som vises når effekten er aktiv.
On/off-state synkroniseres via STDB toggle_effect, parametere er
per-klient (ulike brukere kan ha forskjellige monitorinnstillinger).

STDB: Lagt til set_effect_param reducer for fremtidig param-synk
(krever spacetime CLI for publish — ikke deployet ennå).

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-18 05:34:59 +00:00

827 lines
25 KiB
TypeScript
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

/**
* Web Audio mixer graph for Synops.
*
* Manages the audio processing graph:
* MediaStreamSource (per channel) → AnalyserNode → EQ chain → Voice FX → GainNode → MasterGain → destination
*
* Each remote participant and the local microphone gets a channel.
* AnalyserNodes provide real-time level data for VU meters.
* EQ chain: HighPass(80Hz) → FatBottom(lowshelf 200Hz) → Exciter(WaveShaper+highshelf) → Sparkle(highshelf 10kHz)
* Voice FX: Robot (ring modulation) → Monster (pitch shift via AudioWorklet)
*/
// ─── Types ──────────────────────────────────────────────────────────────────
export type EqEffectName = 'fat_bottom' | 'sparkle' | 'exciter';
export type VoiceEffectName = 'robot' | 'monster';
export type EffectName = EqEffectName | VoiceEffectName;
export interface EqNodes {
highpass: BiquadFilterNode;
fatBottom: BiquadFilterNode;
sparkle: BiquadFilterNode;
exciterShaper: WaveShaperNode;
exciterFilter: BiquadFilterNode;
}
export interface VoiceEffectNodes {
// Robot voice: ring modulation
robotGain: GainNode; // Signal passes through; oscillator modulates .gain
robotOscillator: OscillatorNode;
robotDepth: GainNode; // Controls modulation depth (0=off, 1=full)
robotEnabled: boolean;
robotFrequency: number; // 50200 Hz
robotDepthValue: number; // 0.01.0
// Monster voice: pitch shift via AudioWorklet
pitchShifter: AudioWorkletNode | null; // null until worklet is loaded
monsterEnabled: boolean;
monsterPitchFactor: number; // 0.52.0
}
export interface EqState {
fat_bottom: boolean;
sparkle: boolean;
exciter: boolean;
}
export interface VoiceState {
robot: boolean;
monster: boolean;
}
export interface EqPreset {
name: string;
label: string;
effects: EqState;
}
export const EQ_PRESETS: EqPreset[] = [
{ name: 'off', label: 'Av', effects: { fat_bottom: false, sparkle: false, exciter: false } },
{ name: 'podcast', label: 'Podcast-stemme', effects: { fat_bottom: true, sparkle: true, exciter: false } },
{ name: 'radio', label: 'Radio-stemme', effects: { fat_bottom: true, sparkle: true, exciter: true } },
];
export interface MixerChannel {
identity: string;
source: MediaStreamAudioSourceNode;
analyser: AnalyserNode;
gain: GainNode;
eq: EqNodes;
eqState: EqState;
voice: VoiceEffectNodes;
voiceState: VoiceState;
}
export interface PadState {
buffer: AudioBuffer;
gain: GainNode;
activeSource: AudioBufferSourceNode | null;
}
export interface ChannelLevels {
identity: string;
peak: number; // 0.01.0, peak amplitude
rms: number; // 0.01.0, RMS level (closer to perceived loudness)
}
// ─── State ──────────────────────────────────────────────────────────────────
let audioContext: AudioContext | null = null;
let masterGain: GainNode | null = null;
let masterAnalyser: AnalyserNode | null = null;
const channels = new Map<string, MixerChannel>();
const pads = new Map<string, PadState>();
// Reusable buffer for analyser readings (allocated once per context)
let analyserBuffer: Float32Array | null = null;
// AudioWorklet loading state
let workletLoaded = false;
let workletLoadPromise: Promise<void> | null = null;
// ─── AudioContext lifecycle ─────────────────────────────────────────────────
/**
* Get or create the AudioContext. Must be called from a user gesture
* the first time (browser autoplay policy).
*/
export function ensureAudioContext(): AudioContext {
if (!audioContext || audioContext.state === 'closed') {
audioContext = new AudioContext();
// Create master gain and analyser
masterGain = audioContext.createGain();
masterGain.gain.value = 1.0;
masterAnalyser = audioContext.createAnalyser();
masterAnalyser.fftSize = 256;
masterAnalyser.smoothingTimeConstant = 0.3;
// Master chain: masterGain → masterAnalyser → destination
masterGain.connect(masterAnalyser);
masterAnalyser.connect(audioContext.destination);
analyserBuffer = null; // will be allocated on first use
workletLoaded = false;
workletLoadPromise = null;
}
// Resume if suspended (happens after tab goes inactive)
if (audioContext.state === 'suspended') {
audioContext.resume();
}
return audioContext;
}
export function getAudioContext(): AudioContext | null {
return audioContext;
}
/**
* Load the pitch-shifter AudioWorklet module. Called lazily when monster voice is first enabled.
*/
async function ensurePitchShifterWorklet(): Promise<void> {
if (workletLoaded) return;
if (workletLoadPromise) return workletLoadPromise;
const ctx = ensureAudioContext();
workletLoadPromise = ctx.audioWorklet.addModule('/pitch-shifter-worklet.js')
.then(() => { workletLoaded = true; })
.catch((err) => {
console.error('Failed to load pitch-shifter worklet:', err);
workletLoadPromise = null;
throw err;
});
return workletLoadPromise;
}
// ─── Channel management ────────────────────────────────────────────────────
/**
* Add a channel for a participant's audio track.
* Creates: MediaStreamSource → Analyser → HighPass → FatBottom → Exciter → Sparkle → RobotGain → Gain → Master
* (PitchShifter is inserted between RobotGain and Gain when enabled)
*/
export function addChannel(identity: string, mediaStream: MediaStream): MixerChannel {
const ctx = ensureAudioContext();
// Remove existing channel for this identity first
removeChannel(identity);
const source = ctx.createMediaStreamSource(mediaStream);
const analyser = ctx.createAnalyser();
analyser.fftSize = 256;
analyser.smoothingTimeConstant = 0.3;
// EQ chain nodes
const eq = createEqNodes(ctx);
// Voice effect nodes
const voice = createVoiceEffectNodes(ctx);
const gain = ctx.createGain();
gain.gain.value = 1.0;
// Signal chain: source → analyser → highpass → fatBottom → exciterShaper → exciterFilter → sparkle → robotGain → gain → masterGain
source.connect(analyser);
analyser.connect(eq.highpass);
eq.highpass.connect(eq.fatBottom);
eq.fatBottom.connect(eq.exciterShaper);
eq.exciterShaper.connect(eq.exciterFilter);
eq.exciterFilter.connect(eq.sparkle);
eq.sparkle.connect(voice.robotGain);
voice.robotGain.connect(gain);
gain.connect(masterGain!);
// All effects start bypassed (unity/flat)
const eqState: EqState = { fat_bottom: false, sparkle: false, exciter: false };
const voiceState: VoiceState = { robot: false, monster: false };
const channel: MixerChannel = { identity, source, analyser, gain, eq, eqState, voice, voiceState };
channels.set(identity, channel);
return channel;
}
/**
* Remove a channel and disconnect all its nodes.
*/
export function removeChannel(identity: string): void {
const channel = channels.get(identity);
if (!channel) return;
// Stop robot oscillator
if (channel.voice.robotEnabled) {
try { channel.voice.robotOscillator.stop(); } catch { /* already stopped */ }
}
channel.source.disconnect();
channel.analyser.disconnect();
channel.eq.highpass.disconnect();
channel.eq.fatBottom.disconnect();
channel.eq.exciterShaper.disconnect();
channel.eq.exciterFilter.disconnect();
channel.eq.sparkle.disconnect();
channel.voice.robotGain.disconnect();
channel.voice.robotOscillator.disconnect();
channel.voice.robotDepth.disconnect();
if (channel.voice.pitchShifter) {
channel.voice.pitchShifter.disconnect();
}
channel.gain.disconnect();
channels.delete(identity);
}
/**
* Get a channel by participant identity.
*/
export function getChannel(identity: string): MixerChannel | undefined {
return channels.get(identity);
}
/**
* Get all active channel identities.
*/
export function getChannelIdentities(): string[] {
return Array.from(channels.keys());
}
// ─── Gain control ──────────────────────────────────────────────────────────
/**
* Set the gain for a channel (0.01.5, default 1.0).
*/
export function setChannelGain(identity: string, value: number): void {
const channel = channels.get(identity);
if (!channel) return;
channel.gain.gain.value = Math.max(0, Math.min(1.5, value));
}
/**
* Get the current gain value for a channel.
*/
export function getChannelGain(identity: string): number {
const channel = channels.get(identity);
return channel ? channel.gain.gain.value : 1.0;
}
/**
* Mute a channel by setting gain to 0 with immediate scheduling.
*/
export function muteChannel(identity: string): void {
const channel = channels.get(identity);
if (!channel || !audioContext) return;
channel.gain.gain.setValueAtTime(0, audioContext.currentTime);
}
/**
* Unmute a channel by restoring gain to a value (default 1.0).
*/
export function unmuteChannel(identity: string, value: number = 1.0): void {
const channel = channels.get(identity);
if (!channel || !audioContext) return;
channel.gain.gain.setValueAtTime(Math.max(0, Math.min(1.5, value)), audioContext.currentTime);
}
/**
* Set master gain (0.01.5, default 1.0).
*/
export function setMasterGain(value: number): void {
if (!masterGain) return;
masterGain.gain.value = Math.max(0, Math.min(1.5, value));
}
/**
* Get current master gain value.
*/
export function getMasterGain(): number {
return masterGain ? masterGain.gain.value : 1.0;
}
/**
* Mute master output.
*/
export function muteMaster(): void {
if (!masterGain || !audioContext) return;
masterGain.gain.setValueAtTime(0, audioContext.currentTime);
}
/**
* Unmute master output.
*/
export function unmuteMaster(value: number = 1.0): void {
if (!masterGain || !audioContext) return;
masterGain.gain.setValueAtTime(Math.max(0, Math.min(1.5, value)), audioContext.currentTime);
}
// ─── VU meter levels ───────────────────────────────────────────────────────
/**
* Read current levels from a channel's AnalyserNode.
* Returns peak and RMS values normalized to 0.01.0.
*/
export function getChannelLevels(identity: string): ChannelLevels | null {
const channel = channels.get(identity);
if (!channel) return null;
return readAnalyserLevels(identity, channel.analyser);
}
/**
* Read master output levels.
*/
export function getMasterLevels(): ChannelLevels | null {
if (!masterAnalyser) return null;
return readAnalyserLevels('master', masterAnalyser);
}
/**
* Read levels from all channels at once (efficient for UI rendering).
*/
export function getAllLevels(): ChannelLevels[] {
const levels: ChannelLevels[] = [];
for (const [identity, channel] of channels) {
const l = readAnalyserLevels(identity, channel.analyser);
if (l) levels.push(l);
}
return levels;
}
function readAnalyserLevels(identity: string, analyser: AnalyserNode): ChannelLevels {
const bufferLength = analyser.fftSize;
// Allocate or resize the shared buffer
if (!analyserBuffer || analyserBuffer.length < bufferLength) {
analyserBuffer = new Float32Array(bufferLength);
}
analyser.getFloatTimeDomainData(analyserBuffer);
let peak = 0;
let sumSquares = 0;
for (let i = 0; i < bufferLength; i++) {
const sample = analyserBuffer[i];
const abs = Math.abs(sample);
if (abs > peak) peak = abs;
sumSquares += sample * sample;
}
const rms = Math.sqrt(sumSquares / bufferLength);
return {
identity,
peak: Math.min(1.0, peak),
rms: Math.min(1.0, rms),
};
}
// ─── EQ Effect Chain ────────────────────────────────────────────────────────
/**
* Create EQ processing nodes for a channel. All effects start in bypass (flat) state.
* Chain: HighPass(80Hz) → FatBottom(lowshelf 200Hz) → Exciter(WaveShaper+highshelf) → Sparkle(highshelf 10kHz)
*/
function createEqNodes(ctx: AudioContext): EqNodes {
// High-pass filter at 80Hz — always active, removes rumble
const highpass = ctx.createBiquadFilter();
highpass.type = 'highpass';
highpass.frequency.value = 80;
highpass.Q.value = 0.7;
// Fat bottom: lowshelf at 200Hz — bypassed = 0dB gain
const fatBottom = ctx.createBiquadFilter();
fatBottom.type = 'lowshelf';
fatBottom.frequency.value = 200;
fatBottom.gain.value = 0; // bypassed
// Sparkle: highshelf at 10kHz — bypassed = 0dB gain
const sparkle = ctx.createBiquadFilter();
sparkle.type = 'highshelf';
sparkle.frequency.value = 10000;
sparkle.gain.value = 0; // bypassed
// Exciter: WaveShaperNode for subtle harmonic saturation + highshelf to focus on presence range
const exciterShaper = ctx.createWaveShaper();
exciterShaper.curve = createExciterCurve(0); // bypassed = linear
exciterShaper.oversample = '2x';
const exciterFilter = ctx.createBiquadFilter();
exciterFilter.type = 'highshelf';
exciterFilter.frequency.value = 3500;
exciterFilter.gain.value = 0; // bypassed
return { highpass, fatBottom, sparkle, exciterShaper, exciterFilter };
}
/**
* Generate a WaveShaper curve for the exciter effect.
* amount=0 → linear (bypass), amount=1 → subtle saturation
*/
function createExciterCurve(amount: number): Float32Array {
const samples = 256;
const curve = new Float32Array(samples);
for (let i = 0; i < samples; i++) {
const x = (i * 2) / samples - 1;
if (amount <= 0) {
curve[i] = x; // linear passthrough
} else {
// Soft clipping with adjustable drive
const drive = 1 + amount * 3;
curve[i] = Math.tanh(x * drive) / Math.tanh(drive);
}
}
return curve;
}
/**
* Set an EQ effect on/off for a channel. Updates the Web Audio nodes directly.
*/
export function setChannelEffect(identity: string, effect: EqEffectName, enabled: boolean): void {
const channel = channels.get(identity);
if (!channel) return;
channel.eqState[effect] = enabled;
switch (effect) {
case 'fat_bottom':
// +8dB lowshelf boost when active, 0dB when bypassed
channel.eq.fatBottom.gain.value = enabled ? 8 : 0;
break;
case 'sparkle':
// +4dB highshelf boost when active, 0dB when bypassed
channel.eq.sparkle.gain.value = enabled ? 4 : 0;
break;
case 'exciter':
// Subtle saturation + presence boost when active
channel.eq.exciterShaper.curve = createExciterCurve(enabled ? 0.6 : 0);
channel.eq.exciterFilter.gain.value = enabled ? 4 : 0;
break;
}
}
/**
* Get the current EQ state for a channel.
*/
export function getChannelEqState(identity: string): EqState | null {
const channel = channels.get(identity);
return channel ? { ...channel.eqState } : null;
}
/**
* Apply a preset to a channel (sets all effects at once).
*/
export function applyEqPreset(identity: string, preset: EqPreset): void {
for (const [effect, enabled] of Object.entries(preset.effects)) {
setChannelEffect(identity, effect as EqEffectName, enabled);
}
}
/**
* Apply active_effects JSON from STDB to a channel's Web Audio nodes.
* Handles both EQ effects (boolean) and voice effects (boolean toggle).
*/
export function applyActiveEffectsJson(identity: string, json: string): void {
const channel = channels.get(identity);
if (!channel) return;
try {
const effects = JSON.parse(json || '{}');
for (const name of ['fat_bottom', 'sparkle', 'exciter'] as EqEffectName[]) {
const enabled = effects[name] === true;
if (channel.eqState[name] !== enabled) {
setChannelEffect(identity, name, enabled);
}
}
// Voice effects
if (effects.robot !== undefined) {
const robotEnabled = effects.robot === true;
if (channel.voiceState.robot !== robotEnabled) {
setRobotVoice(identity, robotEnabled);
}
}
if (effects.monster !== undefined) {
const monsterEnabled = effects.monster === true;
if (channel.voiceState.monster !== monsterEnabled) {
setMonsterVoice(identity, monsterEnabled);
}
}
} catch {
// Invalid JSON — ignore
}
}
// ─── Voice Effects ──────────────────────────────────────────────────────────
/**
* Create voice effect nodes for a channel. Effects start disabled (bypass).
*
* Robot voice: Ring modulation — an oscillator modulates the gain of the signal,
* creating a metallic, Dalek-like effect.
*
* Monster voice: Pitch shift via AudioWorklet phase vocoder. The worklet is
* loaded lazily on first use.
*/
function createVoiceEffectNodes(ctx: AudioContext): VoiceEffectNodes {
// Robot voice: signal goes through robotGain, oscillator modulates robotGain.gain
const robotGain = ctx.createGain();
robotGain.gain.value = 1.0; // unity when disabled
const robotOscillator = ctx.createOscillator();
robotOscillator.type = 'sine';
robotOscillator.frequency.value = 80; // default 80Hz
robotOscillator.start();
// Depth control: oscillator → robotDepth → robotGain.gain
const robotDepth = ctx.createGain();
robotDepth.gain.value = 0; // 0 = no modulation (bypass)
robotOscillator.connect(robotDepth);
// Don't connect to robotGain.gain yet — connected when enabled
return {
robotGain,
robotOscillator,
robotDepth,
robotEnabled: false,
robotFrequency: 80,
robotDepthValue: 0.5,
pitchShifter: null,
monsterEnabled: false,
monsterPitchFactor: 0.7,
};
}
/**
* Enable/disable robot voice (ring modulation) for a channel.
*/
export function setRobotVoice(identity: string, enabled: boolean, frequency?: number, depth?: number): void {
const channel = channels.get(identity);
if (!channel) return;
const voice = channel.voice;
if (frequency !== undefined) {
voice.robotFrequency = Math.max(30, Math.min(300, frequency));
voice.robotOscillator.frequency.value = voice.robotFrequency;
}
if (depth !== undefined) {
voice.robotDepthValue = Math.max(0, Math.min(1, depth));
}
if (enabled && !voice.robotEnabled) {
// Enable: connect oscillator depth to gain modulation
voice.robotDepth.gain.value = voice.robotDepthValue;
voice.robotDepth.connect(voice.robotGain.gain);
voice.robotEnabled = true;
} else if (!enabled && voice.robotEnabled) {
// Disable: disconnect modulation, restore unity gain
voice.robotDepth.disconnect(voice.robotGain.gain);
voice.robotDepth.gain.value = 0;
voice.robotGain.gain.value = 1.0;
voice.robotEnabled = false;
} else if (enabled && voice.robotEnabled) {
// Update params while active
voice.robotDepth.gain.value = voice.robotDepthValue;
}
channel.voiceState.robot = enabled;
}
/**
* Set robot voice oscillator frequency (30300 Hz).
*/
export function setRobotFrequency(identity: string, frequency: number): void {
const channel = channels.get(identity);
if (!channel) return;
channel.voice.robotFrequency = Math.max(30, Math.min(300, frequency));
channel.voice.robotOscillator.frequency.value = channel.voice.robotFrequency;
}
/**
* Set robot voice modulation depth (0.01.0).
*/
export function setRobotDepth(identity: string, depth: number): void {
const channel = channels.get(identity);
if (!channel) return;
channel.voice.robotDepthValue = Math.max(0, Math.min(1, depth));
if (channel.voice.robotEnabled) {
channel.voice.robotDepth.gain.value = channel.voice.robotDepthValue;
}
}
/**
* Enable/disable monster voice (pitch shift) for a channel.
* Loads the AudioWorklet on first use (async).
*/
export async function setMonsterVoice(identity: string, enabled: boolean, pitchFactor?: number): Promise<void> {
const channel = channels.get(identity);
if (!channel || !audioContext) return;
const voice = channel.voice;
if (pitchFactor !== undefined) {
voice.monsterPitchFactor = Math.max(0.5, Math.min(2.0, pitchFactor));
}
if (enabled) {
// Load worklet if needed
await ensurePitchShifterWorklet();
if (!voice.pitchShifter) {
// Create and insert pitch shifter into the chain
// Current: robotGain → gain
// New: robotGain → pitchShifter → gain
voice.pitchShifter = new AudioWorkletNode(audioContext, 'pitch-shifter');
// Rewire: disconnect robotGain → gain, insert pitchShifter
voice.robotGain.disconnect(channel.gain);
voice.robotGain.connect(voice.pitchShifter);
voice.pitchShifter.connect(channel.gain);
}
voice.pitchShifter.port.postMessage({
enabled: true,
pitchFactor: voice.monsterPitchFactor,
});
voice.monsterEnabled = true;
} else if (voice.pitchShifter) {
// Disable but keep in chain (avoid reconnection glitches)
voice.pitchShifter.port.postMessage({ enabled: false });
voice.monsterEnabled = false;
}
channel.voiceState.monster = enabled;
}
/**
* Set monster voice pitch factor (0.52.0). Updates the worklet parameter.
*/
export function setMonsterPitchFactor(identity: string, pitchFactor: number): void {
const channel = channels.get(identity);
if (!channel) return;
channel.voice.monsterPitchFactor = Math.max(0.5, Math.min(2.0, pitchFactor));
if (channel.voice.pitchShifter && channel.voice.monsterEnabled) {
channel.voice.pitchShifter.port.postMessage({
pitchFactor: channel.voice.monsterPitchFactor,
});
}
}
/**
* Get the current voice effect state for a channel.
*/
export function getChannelVoiceState(identity: string): { robot: boolean; monster: boolean; robotFrequency: number; robotDepth: number; monsterPitchFactor: number } | null {
const channel = channels.get(identity);
if (!channel) return null;
return {
robot: channel.voiceState.robot,
monster: channel.voiceState.monster,
robotFrequency: channel.voice.robotFrequency,
robotDepth: channel.voice.robotDepthValue,
monsterPitchFactor: channel.voice.monsterPitchFactor,
};
}
// ─── Sound Pads ─────────────────────────────────────────────────────────────
/**
* Load an audio file from a URL into an AudioBuffer for a pad.
* Caches the buffer so subsequent plays are instant.
*/
export async function loadPadAudio(padId: string, url: string): Promise<void> {
const ctx = ensureAudioContext();
const response = await fetch(url);
if (!response.ok) throw new Error(`Failed to fetch pad audio: ${response.status}`);
const arrayBuffer = await response.arrayBuffer();
const audioBuffer = await ctx.decodeAudioData(arrayBuffer);
// Create a dedicated gain node for this pad, connected to master
const existing = pads.get(padId);
const gain = existing?.gain ?? ctx.createGain();
if (!existing) {
gain.gain.value = 1.0;
gain.connect(masterGain!);
}
pads.set(padId, { buffer: audioBuffer, gain, activeSource: existing?.activeSource ?? null });
}
/**
* Check if a pad's audio buffer is loaded and ready to play.
*/
export function isPadLoaded(padId: string): boolean {
return pads.has(padId);
}
/**
* Play a sound pad. Creates a new AudioBufferSourceNode each time
* (they are one-shot — cannot be restarted after stopping).
* If the pad is already playing, stops the current playback first.
*/
export function playPad(padId: string): void {
const pad = pads.get(padId);
if (!pad || !audioContext) return;
// Stop any currently playing instance
stopPad(padId);
const source = audioContext.createBufferSource();
source.buffer = pad.buffer;
source.connect(pad.gain);
// Clean up reference when playback ends naturally
source.onended = () => {
if (pad.activeSource === source) {
pad.activeSource = null;
}
};
pad.activeSource = source;
source.start(0);
}
/**
* Stop a currently playing pad.
*/
export function stopPad(padId: string): void {
const pad = pads.get(padId);
if (!pad?.activeSource) return;
try {
pad.activeSource.stop();
} catch {
// Already stopped — ignore
}
pad.activeSource = null;
}
/**
* Set gain for a specific pad (0.01.5).
*/
export function setPadGain(padId: string, value: number): void {
const pad = pads.get(padId);
if (!pad) return;
pad.gain.gain.value = Math.max(0, Math.min(1.5, value));
}
/**
* Check if a pad is currently playing.
*/
export function isPadPlaying(padId: string): boolean {
const pad = pads.get(padId);
return pad?.activeSource !== null && pad?.activeSource !== undefined;
}
/**
* Unload a pad's audio buffer and disconnect its gain node.
*/
export function unloadPad(padId: string): void {
const pad = pads.get(padId);
if (!pad) return;
stopPad(padId);
pad.gain.disconnect();
pads.delete(padId);
}
/**
* Unload all pads.
*/
export function unloadAllPads(): void {
for (const [padId] of pads) {
unloadPad(padId);
}
}
// ─── Cleanup ───────────────────────────────────────────────────────────────
/**
* Remove all channels and close the AudioContext.
*/
export function destroyMixer(): void {
for (const [identity] of channels) {
removeChannel(identity);
}
unloadAllPads();
if (masterAnalyser) {
masterAnalyser.disconnect();
masterAnalyser = null;
}
if (masterGain) {
masterGain.disconnect();
masterGain = null;
}
if (audioContext && audioContext.state !== 'closed') {
audioContext.close();
audioContext = null;
}
analyserBuffer = null;
workletLoaded = false;
workletLoadPromise = null;
}