Fullfører oppgave 16.7: Stemmeeffekter med robot og monster voice

Robotstemme: Ring-modulasjon via OscillatorNode som modulerer
GainNode.gain — gir metallisk, Dalek-aktig effekt. Justerbar
frekvens (30–300 Hz) og modulasjonsdybde (0–100%).

Monsterstemme: Egenutviklet AudioWorkletProcessor med phase vocoder
for sanntids pitch-shifting. Bruker overlap-add med 2048-sample FFT
og 4x overlap for ~42ms latens ved 48kHz. Pitch-faktor 0.5x–2.0x.

UI: Effektvelger-knapper (Robot/Monster) i FX-seksjon per kanal,
med fargekodede parametersliders som vises når effekten er aktiv.
On/off-state synkroniseres via STDB toggle_effect, parametere er
per-klient (ulike brukere kan ha forskjellige monitorinnstillinger).

STDB: Lagt til set_effect_param reducer for fremtidig param-synk
(krever spacetime CLI for publish — ikke deployet ennå).

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
vegard 2026-03-18 05:34:59 +00:00
parent 6732a15e4d
commit ac8f8c508d
6 changed files with 737 additions and 18 deletions

View file

@ -43,7 +43,7 @@ Hver kanal kan ha en kjede av prosesseringsmoduler som slås av/på individuelt:
Signalflyt per kanal:
```
Kilde → HighPass(80Hz) → FatBottom → Exciter → Sparkle → PitchShift → GainNode(fader) → Master
Kilde → HighPass(80Hz) → FatBottom → Exciter → Sparkle → RobotMod → PitchShift → GainNode(fader) → Master
```
### 3.3 Sound Pads
@ -234,10 +234,10 @@ Lydmixeren aktiveres via `mixer`-traitet på en samlings-node. Krever at
- [x] Highpass-filter (80Hz) alltid aktiv for rumble-fjerning
### Fase E: Stemmeeffekter
- [ ] Robotstemme (ring-modulasjon med OscillatorNode)
- [ ] Monsterstemme (pitch shift via egenutviklet AudioWorklet)
- [ ] Effektvelger-UI per kanal
- [ ] Parameterjustering (pitch-faktor, oscillator-frekvens)
- [x] Robotstemme (ring-modulasjon med OscillatorNode → GainNode.gain, frekvens 30300Hz, dybde 0100%)
- [x] Monsterstemme (pitch shift via egenutviklet AudioWorkletProcessor med phase vocoder, pitch 0.52.0x)
- [x] Effektvelger-UI per kanal (Robot/Monster-knapper med FX-seksjon, fargekodede parametersliders)
- [x] Parameterjustering (pitch-faktor, oscillator-frekvens, modulasjonsdybde)
## 9. Instruks for Claude Code
- Lydmixeren er **ren frontend** — ingen nye Rust-endepunkter nødvendig

View file

@ -25,10 +25,17 @@
setChannelEffect,
applyActiveEffectsJson,
applyEqPreset,
setRobotVoice,
setRobotFrequency,
setRobotDepth,
setMonsterVoice,
setMonsterPitchFactor,
getChannelVoiceState,
EQ_PRESETS,
type ChannelLevels,
type EqEffectName,
type EqState,
type VoiceEffectName,
} from '$lib/mixer';
interface Props {
@ -50,6 +57,24 @@
let channelLevels: Map<string, ChannelLevels> = $state(new Map());
let masterLevels: ChannelLevels | null = $state(null);
// Voice effect local params (per-client, not synced via STDB)
let voiceParams = $state(new Map<string, {
robotFrequency: number;
robotDepth: number;
monsterPitchFactor: number;
showRobotParams: boolean;
showMonsterParams: boolean;
}>());
function getVoiceParams(identity: string) {
let p = voiceParams.get(identity);
if (!p) {
p = { robotFrequency: 80, robotDepth: 0.5, monsterPitchFactor: 0.7, showRobotParams: false, showMonsterParams: false };
voiceParams.set(identity, p);
}
return p;
}
// Animation frame for VU meters
let animFrameId: number | null = null;
@ -222,22 +247,24 @@
// ─── EQ effect handling ─────────────────────────────────────────────────
// Parse active_effects JSON from STDB into typed state
function parseEffects(json: string | undefined): EqState {
if (!json) return { fat_bottom: false, sparkle: false, exciter: false };
function parseEffects(json: string | undefined): EqState & { robot: boolean; monster: boolean } {
if (!json) return { fat_bottom: false, sparkle: false, exciter: false, robot: false, monster: false };
try {
const parsed = JSON.parse(json);
return {
fat_bottom: parsed.fat_bottom === true,
sparkle: parsed.sparkle === true,
exciter: parsed.exciter === true,
robot: parsed.robot === true,
monster: parsed.monster === true,
};
} catch {
return { fat_bottom: false, sparkle: false, exciter: false };
return { fat_bottom: false, sparkle: false, exciter: false, robot: false, monster: false };
}
}
function getSharedEffects(identity: string): EqState {
if (!roomId) return { fat_bottom: false, sparkle: false, exciter: false };
function getSharedEffects(identity: string): EqState & { robot: boolean; monster: boolean } {
if (!roomId) return { fat_bottom: false, sparkle: false, exciter: false, robot: false, monster: false };
const ch = mixerChannelStore.byParticipant(roomId, identity);
return parseEffects(ch?.activeEffects);
}
@ -255,6 +282,46 @@
}
}
// ─── Voice effect handling ──────────────────────────────────────────────
function handleToggleVoiceEffect(identity: string, effect: VoiceEffectName) {
const current = getSharedEffects(identity);
const newEnabled = !current[effect];
const params = getVoiceParams(identity);
if (effect === 'robot') {
setRobotVoice(identity, newEnabled, params.robotFrequency, params.robotDepth);
} else if (effect === 'monster') {
setMonsterVoice(identity, newEnabled, params.monsterPitchFactor);
}
// Sync on/off toggle via STDB (params are local)
const conn = stdb.getConnection();
if (conn && roomId) {
suppressRemoteSync = true;
conn.reducers.toggleEffect({ roomId, targetUserId: identity, effectName: effect, updatedBy: localIdentity });
requestAnimationFrame(() => { suppressRemoteSync = false; });
}
}
function handleRobotFrequencyChange(identity: string, frequency: number) {
const params = getVoiceParams(identity);
params.robotFrequency = frequency;
setRobotFrequency(identity, frequency);
}
function handleRobotDepthChange(identity: string, depth: number) {
const params = getVoiceParams(identity);
params.robotDepth = depth;
setRobotDepth(identity, depth);
}
function handleMonsterPitchChange(identity: string, pitchFactor: number) {
const params = getVoiceParams(identity);
params.monsterPitchFactor = pitchFactor;
setMonsterPitchFactor(identity, pitchFactor);
}
function handleApplyPreset(identity: string, presetName: string) {
const preset = EQ_PRESETS.find(p => p.name === presetName);
if (!preset) return;
@ -357,6 +424,7 @@
{@const isChannelViewer = role === 'viewer'}
{@const effects = getSharedEffects(identity)}
{@const currentPreset = matchingPreset(effects)}
{@const vParams = getVoiceParams(identity)}
<div class="rounded-lg border border-gray-100 bg-gray-50 p-2 sm:p-3
{isChannelViewer ? 'opacity-60' : ''}">
@ -410,8 +478,9 @@
</button>
</div>
<!-- EQ effect toggles -->
<!-- Effect toggles row -->
<div class="mt-2 flex flex-wrap items-center gap-1.5">
<!-- EQ effects -->
<span class="text-[10px] text-gray-400 uppercase tracking-wider mr-1">EQ</span>
<button
onclick={() => handleToggleEffect(identity, 'fat_bottom')}
@ -450,6 +519,36 @@
Exciter
</button>
<!-- Separator -->
<span class="text-gray-300 mx-0.5">|</span>
<!-- Voice effects -->
<span class="text-[10px] text-gray-400 uppercase tracking-wider mr-1">FX</span>
<button
onclick={() => handleToggleVoiceEffect(identity, 'robot')}
disabled={isViewer && identity !== localIdentity}
class="rounded px-2 py-0.5 text-[11px] font-medium transition-colors
{effects.robot
? 'bg-emerald-500 text-white'
: 'bg-gray-200 text-gray-500 hover:bg-gray-300'}
disabled:opacity-40 disabled:cursor-not-allowed"
title="Robotstemme: ring-modulasjon (metallisk, Dalek-aktig)"
>
Robot
</button>
<button
onclick={() => handleToggleVoiceEffect(identity, 'monster')}
disabled={isViewer && identity !== localIdentity}
class="rounded px-2 py-0.5 text-[11px] font-medium transition-colors
{effects.monster
? 'bg-rose-500 text-white'
: 'bg-gray-200 text-gray-500 hover:bg-gray-300'}
disabled:opacity-40 disabled:cursor-not-allowed"
title="Monsterstemme: pitch shift ned (dyp, mørk stemme)"
>
Monster
</button>
<!-- Preset selector -->
<select
onchange={(e) => handleApplyPreset(identity, (e.target as HTMLSelectElement).value)}
@ -468,6 +567,71 @@
{/if}
</select>
</div>
<!-- Robot voice parameter sliders (shown when robot is active) -->
{#if effects.robot}
<div class="mt-2 rounded bg-emerald-50 border border-emerald-100 p-2 space-y-1.5">
<div class="flex items-center gap-2">
<span class="text-[10px] text-emerald-600 font-medium w-14 flex-shrink-0">Frekvens</span>
<input
type="range"
min="30"
max="300"
step="1"
value={vParams.robotFrequency}
oninput={(e) => handleRobotFrequencyChange(identity, parseFloat((e.target as HTMLInputElement).value))}
disabled={isViewer && identity !== localIdentity}
class="flex-1 h-1.5 accent-emerald-600 disabled:opacity-40"
/>
<span class="text-[10px] text-emerald-600 tabular-nums w-12 text-right flex-shrink-0">
{vParams.robotFrequency} Hz
</span>
</div>
<div class="flex items-center gap-2">
<span class="text-[10px] text-emerald-600 font-medium w-14 flex-shrink-0">Dybde</span>
<input
type="range"
min="0"
max="1"
step="0.05"
value={vParams.robotDepth}
oninput={(e) => handleRobotDepthChange(identity, parseFloat((e.target as HTMLInputElement).value))}
disabled={isViewer && identity !== localIdentity}
class="flex-1 h-1.5 accent-emerald-600 disabled:opacity-40"
/>
<span class="text-[10px] text-emerald-600 tabular-nums w-12 text-right flex-shrink-0">
{Math.round(vParams.robotDepth * 100)}%
</span>
</div>
</div>
{/if}
<!-- Monster voice parameter slider (shown when monster is active) -->
{#if effects.monster}
<div class="mt-2 rounded bg-rose-50 border border-rose-100 p-2">
<div class="flex items-center gap-2">
<span class="text-[10px] text-rose-600 font-medium w-14 flex-shrink-0">Pitch</span>
<input
type="range"
min="0.5"
max="2.0"
step="0.05"
value={vParams.monsterPitchFactor}
oninput={(e) => handleMonsterPitchChange(identity, parseFloat((e.target as HTMLInputElement).value))}
disabled={isViewer && identity !== localIdentity}
class="flex-1 h-1.5 accent-rose-600 disabled:opacity-40"
/>
<span class="text-[10px] text-rose-600 tabular-nums w-12 text-right flex-shrink-0">
{vParams.monsterPitchFactor.toFixed(2)}x
</span>
</div>
<div class="flex justify-between text-[9px] text-rose-400 mt-0.5 px-1">
<span>Dyp</span>
<span>Normal</span>
<span>Høy</span>
</div>
</div>
{/if}
</div>
{/each}
</div>

View file

@ -2,16 +2,19 @@
* Web Audio mixer graph for Synops.
*
* Manages the audio processing graph:
* MediaStreamSource (per channel) AnalyserNode EQ chain GainNode MasterGain destination
* MediaStreamSource (per channel) AnalyserNode EQ chain Voice FX GainNode MasterGain destination
*
* Each remote participant and the local microphone gets a channel.
* AnalyserNodes provide real-time level data for VU meters.
* EQ chain: HighPass(80Hz) FatBottom(lowshelf 200Hz) Exciter(WaveShaper+highshelf) Sparkle(highshelf 10kHz)
* Voice FX: Robot (ring modulation) Monster (pitch shift via AudioWorklet)
*/
// ─── Types ──────────────────────────────────────────────────────────────────
export type EqEffectName = 'fat_bottom' | 'sparkle' | 'exciter';
export type VoiceEffectName = 'robot' | 'monster';
export type EffectName = EqEffectName | VoiceEffectName;
export interface EqNodes {
highpass: BiquadFilterNode;
@ -21,12 +24,32 @@ export interface EqNodes {
exciterFilter: BiquadFilterNode;
}
export interface VoiceEffectNodes {
// Robot voice: ring modulation
robotGain: GainNode; // Signal passes through; oscillator modulates .gain
robotOscillator: OscillatorNode;
robotDepth: GainNode; // Controls modulation depth (0=off, 1=full)
robotEnabled: boolean;
robotFrequency: number; // 50200 Hz
robotDepthValue: number; // 0.01.0
// Monster voice: pitch shift via AudioWorklet
pitchShifter: AudioWorkletNode | null; // null until worklet is loaded
monsterEnabled: boolean;
monsterPitchFactor: number; // 0.52.0
}
export interface EqState {
fat_bottom: boolean;
sparkle: boolean;
exciter: boolean;
}
export interface VoiceState {
robot: boolean;
monster: boolean;
}
export interface EqPreset {
name: string;
label: string;
@ -46,6 +69,8 @@ export interface MixerChannel {
gain: GainNode;
eq: EqNodes;
eqState: EqState;
voice: VoiceEffectNodes;
voiceState: VoiceState;
}
export interface PadState {
@ -72,6 +97,10 @@ const pads = new Map<string, PadState>();
// Reusable buffer for analyser readings (allocated once per context)
let analyserBuffer: Float32Array | null = null;
// AudioWorklet loading state
let workletLoaded = false;
let workletLoadPromise: Promise<void> | null = null;
// ─── AudioContext lifecycle ─────────────────────────────────────────────────
/**
@ -94,6 +123,8 @@ export function ensureAudioContext(): AudioContext {
masterAnalyser.connect(audioContext.destination);
analyserBuffer = null; // will be allocated on first use
workletLoaded = false;
workletLoadPromise = null;
}
// Resume if suspended (happens after tab goes inactive)
@ -108,11 +139,31 @@ export function getAudioContext(): AudioContext | null {
return audioContext;
}
/**
* Load the pitch-shifter AudioWorklet module. Called lazily when monster voice is first enabled.
*/
async function ensurePitchShifterWorklet(): Promise<void> {
if (workletLoaded) return;
if (workletLoadPromise) return workletLoadPromise;
const ctx = ensureAudioContext();
workletLoadPromise = ctx.audioWorklet.addModule('/pitch-shifter-worklet.js')
.then(() => { workletLoaded = true; })
.catch((err) => {
console.error('Failed to load pitch-shifter worklet:', err);
workletLoadPromise = null;
throw err;
});
return workletLoadPromise;
}
// ─── Channel management ────────────────────────────────────────────────────
/**
* Add a channel for a participant's audio track.
* Creates: MediaStreamSource Analyser HighPass FatBottom Exciter Sparkle Gain Master
* Creates: MediaStreamSource Analyser HighPass FatBottom Exciter Sparkle RobotGain Gain Master
* (PitchShifter is inserted between RobotGain and Gain when enabled)
*/
export function addChannel(identity: string, mediaStream: MediaStream): MixerChannel {
const ctx = ensureAudioContext();
@ -129,23 +180,28 @@ export function addChannel(identity: string, mediaStream: MediaStream): MixerCha
// EQ chain nodes
const eq = createEqNodes(ctx);
// Voice effect nodes
const voice = createVoiceEffectNodes(ctx);
const gain = ctx.createGain();
gain.gain.value = 1.0;
// Signal chain: source → analyser → highpass → fatBottom → exciterShaper → exciterFilter → sparkle → gain → masterGain
// Signal chain: source → analyser → highpass → fatBottom → exciterShaper → exciterFilter → sparkle → robotGain → gain → masterGain
source.connect(analyser);
analyser.connect(eq.highpass);
eq.highpass.connect(eq.fatBottom);
eq.fatBottom.connect(eq.exciterShaper);
eq.exciterShaper.connect(eq.exciterFilter);
eq.exciterFilter.connect(eq.sparkle);
eq.sparkle.connect(gain);
eq.sparkle.connect(voice.robotGain);
voice.robotGain.connect(gain);
gain.connect(masterGain!);
// All effects start bypassed (unity/flat)
const eqState: EqState = { fat_bottom: false, sparkle: false, exciter: false };
const voiceState: VoiceState = { robot: false, monster: false };
const channel: MixerChannel = { identity, source, analyser, gain, eq, eqState };
const channel: MixerChannel = { identity, source, analyser, gain, eq, eqState, voice, voiceState };
channels.set(identity, channel);
return channel;
@ -158,6 +214,11 @@ export function removeChannel(identity: string): void {
const channel = channels.get(identity);
if (!channel) return;
// Stop robot oscillator
if (channel.voice.robotEnabled) {
try { channel.voice.robotOscillator.stop(); } catch { /* already stopped */ }
}
channel.source.disconnect();
channel.analyser.disconnect();
channel.eq.highpass.disconnect();
@ -165,6 +226,12 @@ export function removeChannel(identity: string): void {
channel.eq.exciterShaper.disconnect();
channel.eq.exciterFilter.disconnect();
channel.eq.sparkle.disconnect();
channel.voice.robotGain.disconnect();
channel.voice.robotOscillator.disconnect();
channel.voice.robotDepth.disconnect();
if (channel.voice.pitchShifter) {
channel.voice.pitchShifter.disconnect();
}
channel.gain.disconnect();
channels.delete(identity);
}
@ -413,6 +480,7 @@ export function applyEqPreset(identity: string, preset: EqPreset): void {
/**
* Apply active_effects JSON from STDB to a channel's Web Audio nodes.
* Handles both EQ effects (boolean) and voice effects (boolean toggle).
*/
export function applyActiveEffectsJson(identity: string, json: string): void {
const channel = channels.get(identity);
@ -426,11 +494,196 @@ export function applyActiveEffectsJson(identity: string, json: string): void {
setChannelEffect(identity, name, enabled);
}
}
// Voice effects
if (effects.robot !== undefined) {
const robotEnabled = effects.robot === true;
if (channel.voiceState.robot !== robotEnabled) {
setRobotVoice(identity, robotEnabled);
}
}
if (effects.monster !== undefined) {
const monsterEnabled = effects.monster === true;
if (channel.voiceState.monster !== monsterEnabled) {
setMonsterVoice(identity, monsterEnabled);
}
}
} catch {
// Invalid JSON — ignore
}
}
// ─── Voice Effects ──────────────────────────────────────────────────────────
/**
* Create voice effect nodes for a channel. Effects start disabled (bypass).
*
* Robot voice: Ring modulation an oscillator modulates the gain of the signal,
* creating a metallic, Dalek-like effect.
*
* Monster voice: Pitch shift via AudioWorklet phase vocoder. The worklet is
* loaded lazily on first use.
*/
function createVoiceEffectNodes(ctx: AudioContext): VoiceEffectNodes {
// Robot voice: signal goes through robotGain, oscillator modulates robotGain.gain
const robotGain = ctx.createGain();
robotGain.gain.value = 1.0; // unity when disabled
const robotOscillator = ctx.createOscillator();
robotOscillator.type = 'sine';
robotOscillator.frequency.value = 80; // default 80Hz
robotOscillator.start();
// Depth control: oscillator → robotDepth → robotGain.gain
const robotDepth = ctx.createGain();
robotDepth.gain.value = 0; // 0 = no modulation (bypass)
robotOscillator.connect(robotDepth);
// Don't connect to robotGain.gain yet — connected when enabled
return {
robotGain,
robotOscillator,
robotDepth,
robotEnabled: false,
robotFrequency: 80,
robotDepthValue: 0.5,
pitchShifter: null,
monsterEnabled: false,
monsterPitchFactor: 0.7,
};
}
/**
* Enable/disable robot voice (ring modulation) for a channel.
*/
export function setRobotVoice(identity: string, enabled: boolean, frequency?: number, depth?: number): void {
const channel = channels.get(identity);
if (!channel) return;
const voice = channel.voice;
if (frequency !== undefined) {
voice.robotFrequency = Math.max(30, Math.min(300, frequency));
voice.robotOscillator.frequency.value = voice.robotFrequency;
}
if (depth !== undefined) {
voice.robotDepthValue = Math.max(0, Math.min(1, depth));
}
if (enabled && !voice.robotEnabled) {
// Enable: connect oscillator depth to gain modulation
voice.robotDepth.gain.value = voice.robotDepthValue;
voice.robotDepth.connect(voice.robotGain.gain);
voice.robotEnabled = true;
} else if (!enabled && voice.robotEnabled) {
// Disable: disconnect modulation, restore unity gain
voice.robotDepth.disconnect(voice.robotGain.gain);
voice.robotDepth.gain.value = 0;
voice.robotGain.gain.value = 1.0;
voice.robotEnabled = false;
} else if (enabled && voice.robotEnabled) {
// Update params while active
voice.robotDepth.gain.value = voice.robotDepthValue;
}
channel.voiceState.robot = enabled;
}
/**
* Set robot voice oscillator frequency (30300 Hz).
*/
export function setRobotFrequency(identity: string, frequency: number): void {
const channel = channels.get(identity);
if (!channel) return;
channel.voice.robotFrequency = Math.max(30, Math.min(300, frequency));
channel.voice.robotOscillator.frequency.value = channel.voice.robotFrequency;
}
/**
* Set robot voice modulation depth (0.01.0).
*/
export function setRobotDepth(identity: string, depth: number): void {
const channel = channels.get(identity);
if (!channel) return;
channel.voice.robotDepthValue = Math.max(0, Math.min(1, depth));
if (channel.voice.robotEnabled) {
channel.voice.robotDepth.gain.value = channel.voice.robotDepthValue;
}
}
/**
* Enable/disable monster voice (pitch shift) for a channel.
* Loads the AudioWorklet on first use (async).
*/
export async function setMonsterVoice(identity: string, enabled: boolean, pitchFactor?: number): Promise<void> {
const channel = channels.get(identity);
if (!channel || !audioContext) return;
const voice = channel.voice;
if (pitchFactor !== undefined) {
voice.monsterPitchFactor = Math.max(0.5, Math.min(2.0, pitchFactor));
}
if (enabled) {
// Load worklet if needed
await ensurePitchShifterWorklet();
if (!voice.pitchShifter) {
// Create and insert pitch shifter into the chain
// Current: robotGain → gain
// New: robotGain → pitchShifter → gain
voice.pitchShifter = new AudioWorkletNode(audioContext, 'pitch-shifter');
// Rewire: disconnect robotGain → gain, insert pitchShifter
voice.robotGain.disconnect(channel.gain);
voice.robotGain.connect(voice.pitchShifter);
voice.pitchShifter.connect(channel.gain);
}
voice.pitchShifter.port.postMessage({
enabled: true,
pitchFactor: voice.monsterPitchFactor,
});
voice.monsterEnabled = true;
} else if (voice.pitchShifter) {
// Disable but keep in chain (avoid reconnection glitches)
voice.pitchShifter.port.postMessage({ enabled: false });
voice.monsterEnabled = false;
}
channel.voiceState.monster = enabled;
}
/**
* Set monster voice pitch factor (0.52.0). Updates the worklet parameter.
*/
export function setMonsterPitchFactor(identity: string, pitchFactor: number): void {
const channel = channels.get(identity);
if (!channel) return;
channel.voice.monsterPitchFactor = Math.max(0.5, Math.min(2.0, pitchFactor));
if (channel.voice.pitchShifter && channel.voice.monsterEnabled) {
channel.voice.pitchShifter.port.postMessage({
pitchFactor: channel.voice.monsterPitchFactor,
});
}
}
/**
* Get the current voice effect state for a channel.
*/
export function getChannelVoiceState(identity: string): { robot: boolean; monster: boolean; robotFrequency: number; robotDepth: number; monsterPitchFactor: number } | null {
const channel = channels.get(identity);
if (!channel) return null;
return {
robot: channel.voiceState.robot,
monster: channel.voiceState.monster,
robotFrequency: channel.voice.robotFrequency,
robotDepth: channel.voice.robotDepthValue,
monsterPitchFactor: channel.voice.monsterPitchFactor,
};
}
// ─── Sound Pads ─────────────────────────────────────────────────────────────
/**
@ -569,4 +822,6 @@ export function destroyMixer(): void {
}
analyserBuffer = null;
workletLoaded = false;
workletLoadPromise = null;
}

View file

@ -0,0 +1,244 @@
/**
* Pitch Shifter AudioWorkletProcessor
*
* Phase vocoder implementation for real-time pitch shifting.
* Used for "monster voice" effect in Synops mixer.
*
* Algorithm: overlap-add with phase vocoder frequency-domain processing.
* - FFT size: 2048 samples (good balance of quality vs latency at 48kHz)
* - Hop size: 512 samples (4x overlap)
* - Latency: ~42ms at 48kHz
*
* Parameters:
* - pitchFactor: 0.5 (octave down) to 2.0 (octave up), default 0.7 (monster)
*/
class PitchShifterProcessor extends AudioWorkletProcessor {
constructor() {
super();
this.pitchFactor = 0.7; // default monster voice
this.enabled = false;
// FFT parameters
this.fftSize = 2048;
this.hopSize = 512; // fftSize / 4
this.overlap = 4;
// Circular input buffer
this.inputBuffer = new Float32Array(this.fftSize * 2);
this.inputWritePos = 0;
this.inputSamplesReady = 0;
// Output overlap-add buffer
this.outputBuffer = new Float32Array(this.fftSize * 2);
this.outputReadPos = 0;
// Phase tracking for vocoder
this.lastInputPhase = new Float32Array(this.fftSize);
this.lastOutputPhase = new Float32Array(this.fftSize);
// Hann window
this.window = new Float32Array(this.fftSize);
for (let i = 0; i < this.fftSize; i++) {
this.window[i] = 0.5 * (1 - Math.cos((2 * Math.PI * i) / this.fftSize));
}
// Working buffers for FFT
this.fftReal = new Float32Array(this.fftSize);
this.fftImag = new Float32Array(this.fftSize);
this.synthReal = new Float32Array(this.fftSize);
this.synthImag = new Float32Array(this.fftSize);
// Pre-compute bit-reversal table
this.bitRev = new Uint32Array(this.fftSize);
const bits = Math.log2(this.fftSize);
for (let i = 0; i < this.fftSize; i++) {
let rev = 0;
let val = i;
for (let b = 0; b < bits; b++) {
rev = (rev << 1) | (val & 1);
val >>= 1;
}
this.bitRev[i] = rev;
}
// Pre-compute twiddle factors
this.twiddleReal = new Float32Array(this.fftSize / 2);
this.twiddleImag = new Float32Array(this.fftSize / 2);
for (let i = 0; i < this.fftSize / 2; i++) {
const angle = (-2 * Math.PI * i) / this.fftSize;
this.twiddleReal[i] = Math.cos(angle);
this.twiddleImag[i] = Math.sin(angle);
}
// Listen for parameter changes
this.port.onmessage = (e) => {
if (e.data.pitchFactor !== undefined) {
this.pitchFactor = Math.max(0.5, Math.min(2.0, e.data.pitchFactor));
}
if (e.data.enabled !== undefined) {
this.enabled = e.data.enabled;
if (!this.enabled) {
// Clear buffers on disable
this.inputBuffer.fill(0);
this.outputBuffer.fill(0);
this.lastInputPhase.fill(0);
this.lastOutputPhase.fill(0);
this.inputSamplesReady = 0;
}
}
};
}
// In-place FFT (Cooley-Tukey radix-2 DIT)
fft(real, imag, inverse) {
const n = this.fftSize;
// Bit-reversal permutation
for (let i = 0; i < n; i++) {
const j = this.bitRev[i];
if (i < j) {
let tmp = real[i]; real[i] = real[j]; real[j] = tmp;
tmp = imag[i]; imag[i] = imag[j]; imag[j] = tmp;
}
}
// Butterfly stages
for (let size = 2; size <= n; size *= 2) {
const halfSize = size / 2;
const step = n / size;
for (let i = 0; i < n; i += size) {
for (let j = 0; j < halfSize; j++) {
const twIdx = j * step;
let twR = this.twiddleReal[twIdx];
let twI = this.twiddleImag[twIdx];
if (inverse) twI = -twI;
const idx1 = i + j;
const idx2 = i + j + halfSize;
const tR = twR * real[idx2] - twI * imag[idx2];
const tI = twR * imag[idx2] + twI * real[idx2];
real[idx2] = real[idx1] - tR;
imag[idx2] = imag[idx1] - tI;
real[idx1] += tR;
imag[idx1] += tI;
}
}
}
if (inverse) {
for (let i = 0; i < n; i++) {
real[i] /= n;
imag[i] /= n;
}
}
}
processFrame() {
const n = this.fftSize;
const hopSize = this.hopSize;
const pitchFactor = this.pitchFactor;
// Extract windowed frame from input buffer
const readStart = ((this.inputWritePos - n) + this.inputBuffer.length) % this.inputBuffer.length;
for (let i = 0; i < n; i++) {
const idx = (readStart + i) % this.inputBuffer.length;
this.fftReal[i] = this.inputBuffer[idx] * this.window[i];
this.fftImag[i] = 0;
}
// Forward FFT
this.fft(this.fftReal, this.fftImag, false);
// Phase vocoder analysis + synthesis
const freqPerBin = sampleRate / n;
const expectedPhaseAdvance = (2 * Math.PI * hopSize) / n;
for (let k = 0; k < n; k++) {
// Analysis: get magnitude and phase
const mag = Math.sqrt(this.fftReal[k] * this.fftReal[k] + this.fftImag[k] * this.fftImag[k]);
const phase = Math.atan2(this.fftImag[k], this.fftReal[k]);
// Phase difference from last frame
let phaseDiff = phase - this.lastInputPhase[k];
this.lastInputPhase[k] = phase;
// Remove expected phase advance
phaseDiff -= k * expectedPhaseAdvance;
// Wrap to [-pi, pi]
phaseDiff = phaseDiff - 2 * Math.PI * Math.round(phaseDiff / (2 * Math.PI));
// True frequency of this bin
const trueFreq = k * freqPerBin + (phaseDiff * freqPerBin) / expectedPhaseAdvance;
// Synthesis: map to new bin position
const newBin = Math.round(k * pitchFactor);
if (newBin >= 0 && newBin < n) {
// Accumulate phase for output
const outputPhaseAdvance = (2 * Math.PI * hopSize * (trueFreq * pitchFactor)) / sampleRate;
this.lastOutputPhase[newBin] += outputPhaseAdvance;
this.synthReal[newBin] = mag * Math.cos(this.lastOutputPhase[newBin]);
this.synthImag[newBin] = mag * Math.sin(this.lastOutputPhase[newBin]);
}
}
// Inverse FFT
this.fft(this.synthReal, this.synthImag, true);
// Overlap-add to output buffer
for (let i = 0; i < n; i++) {
const idx = (this.outputReadPos + i) % this.outputBuffer.length;
this.outputBuffer[idx] += this.synthReal[i] * this.window[i] / this.overlap;
}
// Clear synth buffers for next frame
this.synthReal.fill(0);
this.synthImag.fill(0);
}
process(inputs, outputs) {
const input = inputs[0];
const output = outputs[0];
if (!input || !input[0] || !output || !output[0]) return true;
const inputChannel = input[0];
const outputChannel = output[0];
const blockSize = inputChannel.length; // typically 128
if (!this.enabled) {
// Pass through when disabled
outputChannel.set(inputChannel);
return true;
}
// Feed input samples into circular buffer
for (let i = 0; i < blockSize; i++) {
this.inputBuffer[this.inputWritePos] = inputChannel[i];
this.inputWritePos = (this.inputWritePos + 1) % this.inputBuffer.length;
this.inputSamplesReady++;
// Process a frame every hopSize samples, once we have enough data
if (this.inputSamplesReady >= this.fftSize && this.inputSamplesReady % this.hopSize === 0) {
this.processFrame();
}
}
// Read from output buffer
for (let i = 0; i < blockSize; i++) {
outputChannel[i] = this.outputBuffer[this.outputReadPos];
this.outputBuffer[this.outputReadPos] = 0; // clear after reading
this.outputReadPos = (this.outputReadPos + 1) % this.outputBuffer.length;
}
return true;
}
}
registerProcessor('pitch-shifter', PitchShifterProcessor);

View file

@ -589,6 +589,63 @@ pub fn toggle_effect(
Ok(())
}
/// Sett en numerisk effektparameter i active_effects JSON.
/// Brukes for stemmeeffekter (robot_freq, robot_depth, monster_pitch) som har
/// parameterverdier, ikke bare av/på. Nøkkelen settes til den gitte verdien.
#[reducer]
pub fn set_effect_param(
ctx: &ReducerContext,
room_id: String,
target_user_id: String,
param_name: String,
value: f64,
updated_by: String,
) -> Result<(), String> {
let id = format!("{room_id}:{target_user_id}");
let existing = ctx.db.mixer_channel().id().find(&id)
.ok_or_else(|| format!("Mixer-kanal {} ikke funnet", id))?;
if existing.role == "viewer" && existing.target_user_id != updated_by {
return Err("Viewer kan ikke endre mixer-innstillinger".into());
}
let mut effects = existing.active_effects.clone();
let param_pattern = format!("\"{}\":", param_name);
if effects.contains(&param_pattern) {
// Replace existing value — find the key and replace up to next comma or }
if let Some(start) = effects.find(&param_pattern) {
let value_start = start + param_pattern.len();
// Find end of value (next comma or closing brace)
let rest = &effects[value_start..];
let value_end = rest.find(',').unwrap_or_else(|| rest.find('}').unwrap_or(rest.len()));
effects = format!(
"{}{}{}",
&effects[..value_start],
value,
&effects[value_start + value_end..]
);
}
} else {
// Add new param
if effects == "{}" {
effects = format!("{{\"{}\":{}}}", param_name, value);
} else {
effects = effects.trim_end_matches('}').to_string()
+ &format!(",\"{}\":{}}}", param_name, value);
}
}
ctx.db.mixer_channel().id().update(MixerChannel {
active_effects: effects,
updated_by,
updated_at: ctx.timestamp,
..existing
});
Ok(())
}
/// Slett en mixer-kanal (når deltaker forlater rommet).
#[reducer]
pub fn delete_mixer_channel(

View file

@ -183,8 +183,7 @@ Ref: `docs/features/lydmixer.md`
- [x] 16.4 Delt mixer-kontroll via SpacetimeDB: `MixerChannel`-tabell + reducers (`set_gain`, `set_mute`, `toggle_effect`). Frontend abonnerer og oppdaterer Web Audio-graf ved endring fra andre deltakere. Visuell feedback (sliders beveger seg i sanntid). Tilgangskontroll: eier/admin kan sette deltaker til viewer-modus.
- [x] 16.5 Sound pads: pad-grid UI (4×2), forhåndslast lydfiler fra CAS til `AudioBuffer`. Avspilling ved trykk (`AudioBufferSourceNode`). Pad-konfig i `metadata.mixer.pads` (label, farge, cas_hash). Synkronisert avspilling via LiveKit Data Message.
- [x] 16.6 EQ-effektkjede: fat bottom (`BiquadFilterNode` lowshelf ~200Hz), sparkle (`BiquadFilterNode` highshelf ~10kHz), exciter (`WaveShaperNode` + highshelf). Per-kanal toggles, synkronisert via STDB. Presets (podcast-stemme, radio-stemme).
- [~] 16.7 Stemmeeffekter: robotstemme (ring-modulasjon: `OscillatorNode``GainNode.gain`), monsterstemme (egenutviklet `AudioWorkletProcessor` med phase vocoder for pitch shift). Effektvelger-UI per kanal. Parameterjustering (pitch-faktor, oscillator-frekvens).
> Påbegynt: 2026-03-18T05:25
- [x] 16.7 Stemmeeffekter: robotstemme (ring-modulasjon: `OscillatorNode``GainNode.gain`), monsterstemme (egenutviklet `AudioWorkletProcessor` med phase vocoder for pitch shift). Effektvelger-UI per kanal. Parameterjustering (pitch-faktor, oscillator-frekvens).
## Fase 17: Lydstudio-utbedring