Add emit proxy fix and HRTF audio mode

This commit is contained in:
Jage9
2026-03-09 04:06:46 -04:00
parent ef656b2b39
commit a34a9f7f42
13 changed files with 663 additions and 123 deletions

View File

@@ -106,6 +106,10 @@
"keys": "M",
"description": "Mute/unmute yourself"
},
{
"keys": "H",
"description": "Toggle classic/HRTF spatial audio"
},
{
"keys": "Shift+M",
"description": "Toggle stereo/mono output"

View File

@@ -1,5 +1,5 @@
// Maintainer-controlled web client version metadata.
window.CHGRID_RELEASE_VERSION = "0.1.1";
window.CHGRID_CLIENT_REVISION = "R350";
window.CHGRID_CLIENT_REVISION = "R351";
// Optional display timezone for timestamps. Falls back to America/Detroit if unset/invalid.
window.CHGRID_TIME_ZONE = "America/Detroit";

View File

@@ -7,7 +7,14 @@ import {
type EffectId,
type EffectRuntime,
} from './effects';
import { applySpatialMixToNodes, resolveSpatialMix, SPATIAL_RAMP_SECONDS, SPATIAL_TIME_CONSTANT_SECONDS } from './spatial';
import { resolveSpatialMix } from './spatial';
import {
applySpatialOutput,
createSpatialOutputRuntime,
disconnectSpatialOutputRuntime,
type SpatialOutputRuntime,
type SpatialRenderMode,
} from './spatialOutput';
export type SpatialPeerRuntime = {
nickname: string;
@@ -15,7 +22,7 @@ export type SpatialPeerRuntime = {
y: number;
listenGain?: number;
gain?: GainNode;
panner?: StereoPannerNode;
spatialOutput?: SpatialOutputRuntime;
audioElement?: HTMLAudioElement;
};
@@ -36,7 +43,7 @@ type ActiveSpatialSampleRuntime = {
range: number;
baseGain: number;
gainNode: GainNode;
pannerNode: StereoPannerNode | null;
spatialOutput: SpatialOutputRuntime;
sourceNode: AudioBufferSourceNode;
};
@@ -56,6 +63,7 @@ export class AudioEngine {
private loopbackEnabled = false;
private loopbackRuntime: EffectRuntime | null = null;
private outputMode: OutputMode = 'stereo';
private spatialMode: SpatialRenderMode = 'classic';
private masterVolume = 50;
private voiceLayerEnabled = true;
private effectIndex = EFFECT_SEQUENCE.findIndex((effect) => effect.id === 'off');
@@ -189,6 +197,19 @@ export class AudioEngine {
this.outputMode = mode;
}
setSpatialMode(mode: SpatialRenderMode): void {
this.spatialMode = mode;
}
getSpatialMode(): SpatialRenderMode {
return this.spatialMode;
}
toggleSpatialMode(): SpatialRenderMode {
this.spatialMode = this.spatialMode === 'classic' ? 'hrtf' : 'classic';
return this.spatialMode;
}
setMasterVolume(value: number): number {
const next = Math.max(0, Math.min(100, Number.isFinite(value) ? Math.round(value) : 50));
this.masterVolume = next;
@@ -279,21 +300,20 @@ export class AudioEngine {
const gainNode = this.audioCtx.createGain();
sourceNode.connect(gainNode);
let pannerNode: StereoPannerNode | undefined;
if (this.supportsStereoPanner()) {
pannerNode = this.audioCtx.createStereoPanner();
let spatialOutput: SpatialOutputRuntime = { kind: 'none' };
if (this.voiceLayerEnabled) {
gainNode.connect(pannerNode).connect(this.masterGainNode ?? this.audioCtx.destination);
}
} else {
if (this.voiceLayerEnabled) {
gainNode.connect(this.masterGainNode ?? this.audioCtx.destination);
}
spatialOutput = createSpatialOutputRuntime({
audioCtx: this.audioCtx,
inputNode: gainNode,
destination: this.masterGainNode ?? this.audioCtx.destination,
outputMode: this.outputMode,
spatialMode: this.spatialMode,
});
}
peer.audioElement = audioElement;
peer.gain = gainNode;
peer.panner = pannerNode;
peer.spatialOutput = spatialOutput;
}
updateSpatialAudio(peers: Iterable<SpatialPeerRuntime>, playerPosition: { x: number; y: number }): void {
@@ -310,13 +330,15 @@ export class AudioEngine {
});
const listenGain = Number.isFinite(peer.listenGain) ? Math.max(0, peer.listenGain as number) : 1;
const scaledMix = mix ? { ...mix, gain: mix.gain * listenGain } : null;
applySpatialMixToNodes({
applySpatialOutput({
audioCtx: this.audioCtx,
runtime: peer.spatialOutput ?? { kind: 'none' },
gainNode: peer.gain,
pannerNode: peer.panner ?? null,
mix: scaledMix,
outputMode: this.outputMode,
transition: 'target',
dx: peer.x - playerPosition.x,
dy: peer.y - playerPosition.y,
});
}
}
@@ -375,20 +397,20 @@ export class AudioEngine {
const gainNode = audioCtx.createGain();
gainNode.gain.setValueAtTime(0, audioCtx.currentTime);
source.connect(gainNode);
let pannerNode: StereoPannerNode | null = null;
if (this.supportsStereoPanner() && this.outputMode === 'stereo') {
pannerNode = audioCtx.createStereoPanner();
gainNode.connect(pannerNode).connect(sfxGainNode);
} else {
gainNode.connect(sfxGainNode);
}
const spatialOutput = createSpatialOutputRuntime({
audioCtx,
inputNode: gainNode,
destination: sfxGainNode,
outputMode: this.outputMode,
spatialMode: this.spatialMode,
});
const runtime: ActiveSpatialSampleRuntime = {
sourceX: sourcePosition.x,
sourceY: sourcePosition.y,
range: Math.max(1, range),
baseGain: gain,
gainNode,
pannerNode,
spatialOutput,
sourceNode: source,
};
this.activeSpatialSamples.add(runtime);
@@ -401,7 +423,7 @@ export class AudioEngine {
// Ignore stale graph disconnects.
}
gainNode.disconnect();
pannerNode?.disconnect();
disconnectSpatialOutputRuntime(spatialOutput);
};
source.start();
} catch {
@@ -428,20 +450,20 @@ export class AudioEngine {
const gainNode = audioCtx.createGain();
gainNode.gain.setValueAtTime(0, audioCtx.currentTime);
source.connect(gainNode);
let pannerNode: StereoPannerNode | null = null;
if (this.supportsStereoPanner() && this.outputMode === 'stereo') {
pannerNode = audioCtx.createStereoPanner();
gainNode.connect(pannerNode).connect(sfxGainNode);
} else {
gainNode.connect(sfxGainNode);
}
const spatialOutput = createSpatialOutputRuntime({
audioCtx,
inputNode: gainNode,
destination: sfxGainNode,
outputMode: this.outputMode,
spatialMode: this.spatialMode,
});
const runtime: ActiveSpatialSampleRuntime = {
sourceX: sourcePosition.x,
sourceY: sourcePosition.y,
range: Math.max(1, range),
baseGain: gain,
gainNode,
pannerNode,
spatialOutput,
sourceNode: source,
};
this.activeSpatialSamples.add(runtime);
@@ -455,7 +477,7 @@ export class AudioEngine {
// Ignore stale graph disconnects.
}
gainNode.disconnect();
pannerNode?.disconnect();
disconnectSpatialOutputRuntime(spatialOutput);
resolve();
};
source.start();
@@ -526,10 +548,12 @@ export class AudioEngine {
peer.audioElement.remove();
}
peer.gain?.disconnect();
peer.panner?.disconnect();
if (peer.spatialOutput) {
disconnectSpatialOutputRuntime(peer.spatialOutput);
}
peer.audioElement = undefined;
peer.gain = undefined;
peer.panner = undefined;
peer.spatialOutput = undefined;
}
private rebuildOutboundEffectGraph(): void {
@@ -589,8 +613,6 @@ export class AudioEngine {
: { gain: baseGain, pan: 0 };
if (!resolved) return;
const finalGain = resolved.gain;
const panValue = spec.sourcePosition ? resolved.pan : undefined;
if (finalGain <= 0) return;
const startTime = audioCtx.currentTime + (spec.delay ?? 0);
@@ -603,16 +625,40 @@ export class AudioEngine {
gainNode.gain.exponentialRampToValueAtTime(0.001, startTime + spec.duration);
oscillator.connect(gainNode);
if (panValue !== undefined && this.supportsStereoPanner() && this.outputMode === 'stereo') {
const panner = audioCtx.createStereoPanner();
panner.pan.setValueAtTime(Math.max(-1, Math.min(1, panValue)), startTime);
let spatialOutput: SpatialOutputRuntime | null = null;
if (spec.sourcePosition && this.outputMode === 'stereo') {
if (this.spatialMode === 'hrtf' && typeof audioCtx.createPanner === 'function') {
const panner = audioCtx.createPanner();
panner.panningModel = 'HRTF';
panner.distanceModel = 'inverse';
panner.refDistance = 1;
panner.maxDistance = 10000;
panner.rolloffFactor = 0;
panner.positionX.setValueAtTime(spec.sourcePosition.x, startTime);
panner.positionY.setValueAtTime(0, startTime);
panner.positionZ.setValueAtTime(-spec.sourcePosition.y, startTime);
gainNode.connect(panner).connect(sfxGainNode);
spatialOutput = { kind: 'hrtf', node: panner };
} else if (this.supportsStereoPanner()) {
const panner = audioCtx.createStereoPanner();
panner.pan.setValueAtTime(Math.max(-1, Math.min(1, resolved.pan)), startTime);
gainNode.connect(panner).connect(sfxGainNode);
spatialOutput = { kind: 'classic', node: panner };
} else {
gainNode.connect(sfxGainNode);
}
} else {
gainNode.connect(sfxGainNode);
}
oscillator.start(startTime);
oscillator.stop(startTime + spec.duration);
oscillator.onended = () => {
if (spatialOutput) {
disconnectSpatialOutputRuntime(spatialOutput);
}
gainNode.disconnect();
};
}
private applySpatialSampleRuntime(
@@ -628,22 +674,27 @@ export class AudioEngine {
baseGain: sample.baseGain,
});
if (initial) {
const gainValue = mix?.gain ?? 0;
sample.gainNode.gain.setTargetAtTime(gainValue, this.audioCtx.currentTime, ONE_SHOT_ATTACK_SECONDS);
if (sample.pannerNode) {
const panValue = mix?.pan ?? 0;
const resolvedPan = this.outputMode === 'mono' ? 0 : Math.max(-1, Math.min(1, panValue));
sample.pannerNode.pan.setValueAtTime(resolvedPan, this.audioCtx.currentTime);
}
applySpatialOutput({
audioCtx: this.audioCtx,
runtime: sample.spatialOutput,
gainNode: sample.gainNode,
mix,
outputMode: this.outputMode,
transition: 'linear',
dx: sample.sourceX - playerPosition.x,
dy: sample.sourceY - playerPosition.y,
});
return;
}
applySpatialMixToNodes({
applySpatialOutput({
audioCtx: this.audioCtx,
runtime: sample.spatialOutput,
gainNode: sample.gainNode,
pannerNode: sample.pannerNode,
mix,
outputMode: this.outputMode,
transition: 'target',
dx: sample.sourceX - playerPosition.x,
dy: sample.sourceY - playerPosition.y,
});
}

View File

@@ -0,0 +1,298 @@
## Goal
Add an optional HRTF-based spatial audio mode for Chat Grid so positional sounds use browser 3D panning rather than the current shared left/right stereo pan model.
This must preserve the current source-specific behavior of grid audio. Different sounds already originate from different positions and runtimes, and that should remain true after any HRTF work.
## Feasibility
This is feasible in the current client architecture.
Why:
- The client already has shared spatial math in [`spatial.ts`](/home/jjm/code/chgrid/client/src/audio/spatial.ts).
- Most spatial sources already route through a small set of audio modules.
- The browser Web Audio API supports `PannerNode` with `panningModel = "HRTF"`.
What is not true today:
- There is not one single central spatial node for all sources.
- Most spatial sources still create their own `StereoPannerNode` directly.
So the right plan is not "flip one switch." The right plan is to introduce a shared spatial output abstraction, then migrate the existing spatial sources to it.
## Current Spatial Coverage
The current spatial system already covers most of the sources you care about:
- peer voice in [`audioEngine.ts`](/home/jjm/code/chgrid/client/src/audio/audioEngine.ts)
- remote footsteps / teleports / item-use one-shots in [`audioEngine.ts`](/home/jjm/code/chgrid/client/src/audio/audioEngine.ts) and [`main.ts`](/home/jjm/code/chgrid/client/src/main.ts)
- clock announcements in [`clockAnnouncer.ts`](/home/jjm/code/chgrid/client/src/audio/clockAnnouncer.ts)
- radios in [`radioStationRuntime.ts`](/home/jjm/code/chgrid/client/src/audio/radioStationRuntime.ts)
- item emit sounds in [`itemEmitRuntime.ts`](/home/jjm/code/chgrid/client/src/audio/itemEmitRuntime.ts)
- piano notes in [`pianoSynth.ts`](/home/jjm/code/chgrid/client/src/audio/pianoSynth.ts)
The common part today is the gain/pan math in [`spatial.ts`](/home/jjm/code/chgrid/client/src/audio/spatial.ts), not the actual Web Audio node graph.
## Main Constraint
The current spatial model computes:
- gain
- stereo pan
HRTF needs more than that:
- source position on X/Y/Z axes
- listener position
- listener orientation
- `PannerNode` distance model and cone settings
So the plan should keep the existing spatial math for range/directional audibility, but move pan handling into a shared HRTF-aware node builder.
## Recommended Design
### 1. Introduce a spatial mode setting
Add a new audio spatial mode concept, separate from the current output mode:
- `stereo`
- `mono`
- `hrtf`
Do not overload the existing `mono` / `stereo` toggle with HRTF semantics.
Why:
- mono/stereo is a speaker/downmix preference
- HRTF is a spatial rendering mode
If you want to keep the current command surface small, the first pass can expose:
- output mode: `mono` / `stereo`
- spatial mode: `classic` / `hrtf`
Where:
- `classic` means current gain + `StereoPannerNode` behavior
- `hrtf` means current gain plus `PannerNode`
For now, a simple keyboard toggle is reasonable. `H` makes sense as an initial shortcut as long as it does not conflict with an existing command in normal mode.
### 2. Add a shared spatial node helper
Create one shared helper under `client/src/audio/`, for example:
- `spatialGraph.ts`
It should own:
- creation of either `StereoPannerNode` or `PannerNode`
- common connect/disconnect behavior
- common position/orientation updates
- a small runtime type so all spatial sources can be updated uniformly
What it should not do:
- erase the fact that different sound sources have different lifecycles
- collapse radio, voice, emitters, piano, and one-shots into one generic runtime if that loses behavior
The centralization goal should be limited to shared node construction and shared spatial updates, not flattening all audio features into one code path.
This helper should replace direct `createStereoPanner()` calls in:
- [`audioEngine.ts`](/home/jjm/code/chgrid/client/src/audio/audioEngine.ts)
- [`radioStationRuntime.ts`](/home/jjm/code/chgrid/client/src/audio/radioStationRuntime.ts)
- [`itemEmitRuntime.ts`](/home/jjm/code/chgrid/client/src/audio/itemEmitRuntime.ts)
- [`pianoSynth.ts`](/home/jjm/code/chgrid/client/src/audio/pianoSynth.ts)
### 3. Keep current gain/distance logic in `spatial.ts`
The current `resolveSpatialMix()` logic is still useful for:
- audibility cutoff
- gain shaping
- directional attenuation
I would keep that server/game-feel logic and reuse it for HRTF mode as the gain envelope.
What should change:
- `pan` should stop being the main output for HRTF mode
- HRTF mode should instead map source/listener coordinates into a `PannerNode`
So the likely split is:
- `resolveSpatialMix()` continues to return gain and optional directional attenuation
- a new helper computes node position/orientation updates for HRTF
### 4. Add listener orientation support
HRTF only becomes meaningful if the listener orientation is updated.
The natural mapping here is:
- listener position: player `x`, `y`
- listener forward direction: player facing / heading
If the grid does not currently track a stable listening orientation outside movement, define one explicitly and keep it updated in the main loop or audio engine update path.
Without listener orientation, HRTF will still spatialize left/right, but front/back cues will be much weaker and less intentional.
### 5. Preserve current item and source features
This is the main guardrail for the change.
The HRTF work should preserve existing behavior for:
- radio channel routing and radio effect chains
- item emit timing, looping, delays, and effect chains
- piano voice handling and release behavior
- peer voice listen gain
- directional cones / rear attenuation
- distance-gated subscribe / unsubscribe behavior
- current per-source positions on the grid
The correct implementation is:
- keep source-specific runtimes where they still own real behavior
- centralize only the spatial rendering layer they share
If a piece of code looks similar but still owns different behavior, treat it as separate unless the duplication is clearly only about node construction or coordinate updates.
### 6. Convert spatial sources incrementally
Recommended order:
1. peer voice
2. one-shot world sounds in `AudioEngine`
3. radios
4. item emitters
5. piano synth
That order gives the largest user impact first and keeps the early work in the most centralized code.
### 7. Preserve current directional muffling/effects behavior
Directional cones and muffling already exist in the current spatial logic for items/radios.
Do not move that responsibility into `PannerNode` alone.
Instead:
- keep current directional attenuation logic in `spatial.ts`
- optionally later map some of it to `coneInnerAngle`, `coneOuterAngle`, and `coneOuterGain`
For the first pass, software-side directional gain shaping is simpler and more predictable.
## Important Realities
### Not every sound should use HRTF
These should remain non-spatial:
- UI confirmations/cancels
- local footstep/self-confirmation sounds
- menu/help feedback
HRTF should apply only to world-positioned sounds.
### Radio and emitters are continuous sources
These are not one-shot sounds. For them, the implementation needs:
- persistent `PannerNode` lifecycles
- regular listener/source position updates
- no audible zippering/clicks on movement updates
That is why shared spatial node handling matters.
### Voice is the best early target
Peer voice already has:
- per-peer runtime state
- continuous streaming
- position updates every frame
So it is the strongest real-world test for whether HRTF improves the grid.
## Suggested First Pass Scope
First pass should do only this:
- add `classic` vs `hrtf` spatial mode
- add a temporary `H` toggle for that mode
- support HRTF for:
- peer voice
- remote one-shot spatial samples
- radios
- item emitters
- leave piano on the old model until the shared helper is stable if needed
That gets most of the value without forcing every audio path to change at once.
## User Settings / Commands
The current client already stores output mode in [`settingsStore.ts`](/home/jjm/code/chgrid/client/src/settings/settingsStore.ts) and toggles it from [`main.ts`](/home/jjm/code/chgrid/client/src/main.ts).
I would add:
- persisted spatial mode setting
- one command to cycle:
- `classic`
- `hrtf`
For the first pass, mapping that command to `H` is reasonable.
Keep `mono` / `stereo` separate.
If needed, HRTF mode can automatically degrade to `classic` when browser support is missing.
## Testing Plan
### Functional
- peer voice moves around listener and remains audible
- front/back changes are perceptible with facing changes
- radio/item emitters move cleanly with no disconnects
- clock announcements and remote footsteps still play
- mono output still disables spatial left/right behavior cleanly
### Regression
- no breaks in existing media/effect chains
- no stuck nodes after item cleanup / peer disconnect
- no crashes on browsers without useful `PannerNode` support
### Listening
- test with headphones first
- verify that HRTF does not make near-field sounds too quiet or too harsh
- verify that movement/facing updates do not create pumping artifacts
## Recommended Implementation Order
1. Add spatial mode setting and persistence.
2. Add shared spatial node runtime/helper.
3. Convert peer voice and one-shot spatial samples in `AudioEngine`.
4. Convert radios and item emitters.
5. Tune listener orientation and gain curves.
6. Convert piano if the result still feels worth it after the first pass.
## Bottom Line
HRTF is possible here, but the codebase is not yet one-node-centralized enough to make it a trivial switch.
The good news is that the architecture is already close:
- common spatial math exists
- spatial sources are clearly identified
- most of the remaining work is consolidating node creation and adding listener/source position handling for `PannerNode`
That makes this a realistic next-step audio feature, not a speculative rewrite.
The key design rule should be:
- centralize the spatial rendering layer where it is truly shared
- preserve all existing per-source and per-item behavior unless it is demonstrably duplicate

View File

@@ -3,7 +3,8 @@ import { getItemTypeGlobalProperties } from '../items/itemRegistry';
import { AudioEngine } from './audioEngine';
import { connectEffectChain, disconnectEffectRuntime, type EffectId, type EffectRuntime } from './effects';
import { normalizeRadioEffect, normalizeRadioEffectValue } from './radioStationRuntime';
import { applySpatialMixToNodes, resolveSpatialMix } from './spatial';
import { resolveSpatialMix } from './spatial';
import { applySpatialOutput, createSpatialOutputRuntime, disconnectSpatialOutputRuntime, type SpatialOutputRuntime } from './spatialOutput';
import { volumePercentToGain } from './volume';
type EmitOutput = {
@@ -18,7 +19,7 @@ type EmitOutput = {
initialDelaySeconds: number;
loopDelaySeconds: number;
gain: GainNode;
panner: StereoPannerNode | null;
spatialOutput: SpatialOutputRuntime;
};
type EmitResumeState = {
@@ -132,7 +133,7 @@ export class ItemEmitRuntime {
output.effectInput.disconnect();
disconnectEffectRuntime(output.effectRuntime);
output.gain.disconnect();
output.panner?.disconnect();
disconnectSpatialOutputRuntime(output.spatialOutput);
this.outputs.delete(itemId);
}
this.pendingEmitStarts.delete(itemId);
@@ -217,7 +218,6 @@ export class ItemEmitRuntime {
const effectInput = audioCtx.createGain();
const gain = audioCtx.createGain();
gain.gain.value = 0;
let panner: StereoPannerNode | null = null;
source.connect(effectInput);
const effect = normalizeRadioEffect(item.params.emitEffect);
const effectValue = normalizeRadioEffectValue(item.params.emitEffectValue);
@@ -321,12 +321,13 @@ export class ItemEmitRuntime {
}
}
const destination = this.audio.getOutputDestinationNode() ?? audioCtx.destination;
if (this.audio.supportsStereoPanner()) {
panner = audioCtx.createStereoPanner();
gain.connect(panner).connect(destination);
} else {
gain.connect(destination);
}
const spatialOutput = createSpatialOutputRuntime({
audioCtx,
inputNode: gain,
destination,
outputMode: this.audio.getOutputMode(),
spatialMode: this.audio.getSpatialMode(),
});
this.outputs.set(item.id, {
soundUrl,
element,
@@ -339,7 +340,7 @@ export class ItemEmitRuntime {
initialDelaySeconds,
loopDelaySeconds,
gain,
panner,
spatialOutput,
});
if (!matchingResumeState && !this.nextEmitStartAtMs.has(item.id) && initialDelaySeconds > 0) {
this.nextEmitStartAtMs.set(item.id, Date.now() + initialDelaySeconds * 1000);
@@ -422,13 +423,15 @@ export class ItemEmitRuntime {
});
const emitVolume = volumePercentToGain(item.params.emitVolume, 100);
const scaledMix = mix ? { ...mix, gain: mix.gain * emitVolume } : null;
applySpatialMixToNodes({
applySpatialOutput({
audioCtx,
runtime: output.spatialOutput,
gainNode: output.gain,
pannerNode: output.panner,
mix: scaledMix,
outputMode: this.audio.getOutputMode(),
transition: 'target',
dx: item.x - playerPosition.x,
dy: item.y - playerPosition.y,
});
this.tryStartEmitPlayback(itemId, output.element);
}

View File

@@ -0,0 +1,67 @@
const APP_BASE_PATH = import.meta.env.BASE_URL ?? '/';
/** Returns whether a hostname belongs to Dropbox domains that often need proxy support. */
function isDropboxHost(hostname: string): boolean {
const host = hostname.toLowerCase();
return host.endsWith('dropbox.com') || host.endsWith('dropboxusercontent.com');
}
/** Returns whether the URL already points at the local media proxy. */
function isLocalMediaProxyUrl(parsed: URL): boolean {
return parsed.origin === window.location.origin && parsed.pathname.toLowerCase().endsWith('/media_proxy.php');
}
/** Returns whether a direct radio stream URL should use the same-origin media proxy. */
export function shouldProxyRadioStreamUrl(streamUrl: string): boolean {
try {
const parsed = new URL(streamUrl);
if (isLocalMediaProxyUrl(parsed)) {
return false;
}
if (parsed.protocol === 'http:') return true;
if (parsed.protocol === 'https:' && isDropboxHost(parsed.hostname)) return true;
} catch {
return false;
}
return false;
}
/** Returns whether an arbitrary external media URL should be proxied before Web Audio playback. */
export function shouldProxyExternalMediaUrl(streamUrl: string): boolean {
try {
const parsed = new URL(streamUrl);
if (isLocalMediaProxyUrl(parsed)) {
return false;
}
if (parsed.protocol !== 'http:' && parsed.protocol !== 'https:') {
return false;
}
return parsed.origin !== window.location.origin;
} catch {
return false;
}
}
/** Builds the same-origin proxy URL for one remote media URL. */
export function getProxyUrlForMedia(streamUrl: string): string {
const normalizedBase = APP_BASE_PATH.endsWith('/') ? APP_BASE_PATH : `${APP_BASE_PATH}/`;
const proxy = new URL(`${normalizedBase}media_proxy.php`, window.location.origin);
proxy.searchParams.set('url', streamUrl);
return proxy.toString();
}
/** Appends a cache-buster to a radio playback URL to avoid stale stream sessions. */
export function freshRadioPlaybackUrl(streamUrl: string): string {
const playbackSource = shouldProxyRadioStreamUrl(streamUrl) ? getProxyUrlForMedia(streamUrl) : streamUrl;
try {
const parsed = new URL(playbackSource);
const hostname = parsed.hostname.toLowerCase();
if (hostname.endsWith('dropbox.com') || hostname.endsWith('dropboxusercontent.com')) {
return playbackSource;
}
} catch {
// Leave non-URL strings to the generic cache-buster behavior below.
}
const separator = playbackSource.includes('?') ? '&' : '?';
return `${playbackSource}${separator}chgrid_start=${Date.now()}`;
}

View File

@@ -1,12 +1,13 @@
import { HEARING_RADIUS, type WorldItem } from '../state/gameState';
import { EFFECT_IDS, clampEffectLevel, connectEffectChain, disconnectEffectRuntime, type EffectId, type EffectRuntime } from './effects';
import { AudioEngine } from './audioEngine';
import { applySpatialMixToNodes, resolveSpatialMix } from './spatial';
import { resolveSpatialMix } from './spatial';
import { applySpatialOutput, createSpatialOutputRuntime, disconnectSpatialOutputRuntime, type SpatialOutputRuntime } from './spatialOutput';
import { volumePercentToGain } from './volume';
import { freshRadioPlaybackUrl, getProxyUrlForMedia, shouldProxyRadioStreamUrl } from './mediaUrl';
export const RADIO_CHANNEL_OPTIONS = ['stereo', 'mono', 'left', 'right'] as const;
export type RadioChannelMode = (typeof RADIO_CHANNEL_OPTIONS)[number];
const APP_BASE_PATH = import.meta.env.BASE_URL ?? '/';
type SharedRadioSource = {
streamUrl: string;
@@ -29,7 +30,7 @@ type ItemRadioOutput = {
effect: EffectId;
effectValue: number;
gain: GainNode;
panner: StereoPannerNode | null;
spatialOutput: SpatialOutputRuntime;
};
export function normalizeRadioEffect(effect: unknown): EffectId {
@@ -111,50 +112,12 @@ function connectRadioChannelSource(
};
}
/** Returns whether a hostname belongs to Dropbox domains that need proxy support. */
function isDropboxHost(hostname: string): boolean {
const host = hostname.toLowerCase();
return host.endsWith('dropbox.com') || host.endsWith('dropboxusercontent.com');
}
export function shouldProxyStreamUrl(streamUrl: string): boolean {
try {
const parsed = new URL(streamUrl);
if (
parsed.origin === window.location.origin &&
parsed.pathname.toLowerCase().endsWith('/media_proxy.php')
) {
return false;
}
if (parsed.protocol === 'http:') return true;
if (parsed.protocol === 'https:' && isDropboxHost(parsed.hostname)) return true;
} catch {
return false;
}
return false;
return shouldProxyRadioStreamUrl(streamUrl);
}
export function getProxyUrlForStream(streamUrl: string): string {
const normalizedBase = APP_BASE_PATH.endsWith('/') ? APP_BASE_PATH : `${APP_BASE_PATH}/`;
const proxy = new URL(`${normalizedBase}media_proxy.php`, window.location.origin);
proxy.searchParams.set('url', streamUrl);
return proxy.toString();
}
/** Appends a cache-buster query parameter to avoid stale stream buffers between sessions. */
function freshStreamUrl(streamUrl: string): string {
const playbackSource = shouldProxyStreamUrl(streamUrl) ? getProxyUrlForStream(streamUrl) : streamUrl;
try {
const parsed = new URL(playbackSource);
const hostname = parsed.hostname.toLowerCase();
if (hostname.endsWith('dropbox.com') || hostname.endsWith('dropboxusercontent.com')) {
return playbackSource;
}
} catch {
// Leave non-URL strings to the generic cache-buster behavior below.
}
const separator = playbackSource.includes('?') ? '&' : '?';
return `${playbackSource}${separator}chgrid_start=${Date.now()}`;
return getProxyUrlForMedia(streamUrl);
}
type RadioSpatialConfig = {
@@ -207,7 +170,7 @@ export class RadioStationRuntime {
output.effectInput.disconnect();
disconnectEffectRuntime(output.effectRuntime);
output.gain.disconnect();
output.panner?.disconnect();
disconnectSpatialOutputRuntime(output.spatialOutput);
this.itemRadioOutputs.delete(itemId);
this.releaseSharedSource(output.streamUrl);
}
@@ -303,13 +266,15 @@ export class RadioStationRuntime {
rearGain: 0.4,
},
});
applySpatialMixToNodes({
applySpatialOutput({
audioCtx,
runtime: output.spatialOutput,
gainNode: output.gain,
pannerNode: output.panner,
mix,
outputMode: this.audio.getOutputMode(),
transition: 'target',
dx: item.x - playerPosition.x,
dy: item.y - playerPosition.y,
});
}
}
@@ -352,7 +317,7 @@ export class RadioStationRuntime {
}
const audioCtx = this.audio.context;
if (!audioCtx) return null;
const element = new Audio(freshStreamUrl(streamUrl));
const element = new Audio(freshRadioPlaybackUrl(streamUrl));
element.crossOrigin = 'anonymous';
element.loop = true;
element.preload = 'none';
@@ -440,13 +405,13 @@ export class RadioStationRuntime {
const effectValue = normalizeRadioEffectValue(item.params.mediaEffectValue);
const effectRuntime = connectEffectChain(audioCtx, effectInput, gain, effect, effectValue);
const destination = this.audio.getOutputDestinationNode() ?? audioCtx.destination;
let panner: StereoPannerNode | null = null;
if (this.audio.supportsStereoPanner()) {
panner = audioCtx.createStereoPanner();
gain.connect(panner).connect(destination);
} else {
gain.connect(destination);
}
const spatialOutput = createSpatialOutputRuntime({
audioCtx,
inputNode: gain,
destination,
outputMode: this.audio.getOutputMode(),
spatialMode: this.audio.getSpatialMode(),
});
this.itemRadioOutputs.set(item.id, {
streamUrl,
channel,
@@ -461,7 +426,7 @@ export class RadioStationRuntime {
effect,
effectValue,
gain,
panner,
spatialOutput,
});
}

View File

@@ -0,0 +1,110 @@
import { SPATIAL_RAMP_SECONDS, SPATIAL_TIME_CONSTANT_SECONDS, type SpatialMixResult } from './spatial';
export type SpatialOutputMode = 'mono' | 'stereo';
export type SpatialRenderMode = 'classic' | 'hrtf';
export type SpatialOutputRuntime =
| { kind: 'none' }
| { kind: 'classic'; node: StereoPannerNode }
| { kind: 'hrtf'; node: PannerNode };
type CreateSpatialOutputOptions = {
audioCtx: AudioContext;
inputNode: AudioNode;
destination: AudioNode;
outputMode: SpatialOutputMode;
spatialMode: SpatialRenderMode;
};
type ApplySpatialOutputOptions = {
audioCtx: AudioContext;
runtime: SpatialOutputRuntime;
gainNode: GainNode;
mix: SpatialMixResult | null;
outputMode: SpatialOutputMode;
transition: 'linear' | 'target';
dx?: number;
dy?: number;
};
/** Creates one spatial output stage using either stereo pan or HRTF panning. */
export function createSpatialOutputRuntime(options: CreateSpatialOutputOptions): SpatialOutputRuntime {
const { audioCtx, inputNode, destination, outputMode, spatialMode } = options;
if (outputMode === 'mono') {
inputNode.connect(destination);
return { kind: 'none' };
}
if (spatialMode === 'hrtf' && typeof audioCtx.createPanner === 'function') {
const panner = audioCtx.createPanner();
panner.panningModel = 'HRTF';
panner.distanceModel = 'inverse';
panner.refDistance = 1;
panner.maxDistance = 10000;
panner.rolloffFactor = 0;
panner.coneInnerAngle = 360;
panner.coneOuterAngle = 360;
panner.coneOuterGain = 1;
panner.positionX.setValueAtTime(0, audioCtx.currentTime);
panner.positionY.setValueAtTime(0, audioCtx.currentTime);
panner.positionZ.setValueAtTime(-1, audioCtx.currentTime);
inputNode.connect(panner).connect(destination);
return { kind: 'hrtf', node: panner };
}
if (typeof audioCtx.createStereoPanner === 'function') {
const panner = audioCtx.createStereoPanner();
inputNode.connect(panner).connect(destination);
return { kind: 'classic', node: panner };
}
inputNode.connect(destination);
return { kind: 'none' };
}
/** Disconnects the current spatial output stage. */
export function disconnectSpatialOutputRuntime(runtime: SpatialOutputRuntime): void {
if (runtime.kind === 'none') return;
runtime.node.disconnect();
}
/** Applies one resolved spatial mix to either stereo or HRTF output nodes. */
export function applySpatialOutput(options: ApplySpatialOutputOptions): void {
const { audioCtx, runtime, gainNode, mix, outputMode, transition, dx = 0, dy = 0 } = options;
const gainValue = mix?.gain ?? 0;
if (transition === 'linear') {
gainNode.gain.cancelScheduledValues(audioCtx.currentTime);
gainNode.gain.linearRampToValueAtTime(gainValue, audioCtx.currentTime + SPATIAL_RAMP_SECONDS);
} else {
gainNode.gain.setTargetAtTime(gainValue, audioCtx.currentTime, SPATIAL_TIME_CONSTANT_SECONDS);
}
if (runtime.kind === 'none') {
return;
}
if (runtime.kind === 'classic') {
const panValue = outputMode === 'mono' ? 0 : Math.max(-1, Math.min(1, mix?.pan ?? 0));
if (transition === 'linear') {
runtime.node.pan.cancelScheduledValues(audioCtx.currentTime);
runtime.node.pan.linearRampToValueAtTime(panValue, audioCtx.currentTime + SPATIAL_RAMP_SECONDS);
} else {
runtime.node.pan.setTargetAtTime(panValue, audioCtx.currentTime, SPATIAL_TIME_CONSTANT_SECONDS);
}
return;
}
const targetX = dx;
const targetZ = -dy;
if (transition === 'linear') {
runtime.node.positionX.cancelScheduledValues(audioCtx.currentTime);
runtime.node.positionZ.cancelScheduledValues(audioCtx.currentTime);
runtime.node.positionX.linearRampToValueAtTime(targetX, audioCtx.currentTime + SPATIAL_RAMP_SECONDS);
runtime.node.positionZ.linearRampToValueAtTime(targetZ, audioCtx.currentTime + SPATIAL_RAMP_SECONDS);
} else {
runtime.node.positionX.setTargetAtTime(targetX, audioCtx.currentTime, SPATIAL_TIME_CONSTANT_SECONDS);
runtime.node.positionZ.setTargetAtTime(targetZ, audioCtx.currentTime, SPATIAL_TIME_CONSTANT_SECONDS);
}
runtime.node.positionY.setValueAtTime(0, audioCtx.currentTime);
}

View File

@@ -5,6 +5,7 @@ export type MainModeCommand =
| 'editNickname'
| 'toggleMute'
| 'toggleOutputMode'
| 'toggleSpatialMode'
| 'toggleLoopback'
| 'toggleVoiceLayer'
| 'toggleItemLayer'
@@ -45,6 +46,7 @@ export type MainModeCommand =
*/
export function resolveMainModeCommand(code: string, shiftKey: boolean): MainModeCommand | null {
if (code === 'KeyN') return shiftKey ? null : 'editNickname';
if (code === 'KeyH') return shiftKey ? null : 'toggleSpatialMode';
if (code === 'KeyM') return shiftKey ? 'toggleOutputMode' : 'toggleMute';
if (code === 'Digit1') return shiftKey ? 'toggleLoopback' : 'toggleVoiceLayer';
if (code === 'Digit2') return shiftKey ? null : 'toggleItemLayer';

View File

@@ -38,6 +38,14 @@ const MAIN_MODE_COMMANDS: MainModeCommandDescriptor[] = [
section: 'Audio',
isAvailable: () => true,
},
{
id: 'toggleSpatialMode',
label: 'Toggle classic or HRTF spatial audio',
shortcut: 'H',
tooltip: 'Switch between classic stereo panning and HRTF spatial audio.',
section: 'Audio',
isAvailable: () => true,
},
{
id: 'toggleOutputMode',
label: 'Toggle stereo or mono output',

View File

@@ -5,9 +5,8 @@ import {
} from './audio/effects';
import {
RadioStationRuntime,
getProxyUrlForStream,
shouldProxyStreamUrl,
} from './audio/radioStationRuntime';
import { getProxyUrlForMedia, shouldProxyExternalMediaUrl } from './audio/mediaUrl';
import { ItemEmitRuntime } from './audio/itemEmitRuntime';
import { ClockAnnouncer } from './audio/clockAnnouncer';
import { normalizeDegrees } from './audio/spatial';
@@ -260,6 +259,7 @@ let lastFocusedElement: Element | null = null;
let lastAnnouncementText = '';
let lastAnnouncementAt = 0;
let outputMode = settings.loadOutputMode();
let spatialMode = settings.loadSpatialMode();
let activeGridName = DEFAULT_GRID_NAME;
let activeWelcomeMessage = DEFAULT_WELCOME_MESSAGE;
const messageBuffer: string[] = [];
@@ -354,6 +354,7 @@ const itemBehaviorRegistry = new ItemBehaviorRegistry({
});
audio.setOutputMode(outputMode);
audio.setSpatialMode(spatialMode);
loadEffectLevels();
loadAudioLayerState();
@@ -715,6 +716,17 @@ async function applyAudioLayerState(): Promise<void> {
await itemEmitRuntime.setLayerEnabled(audioLayers.item, state.items.values(), listenerPosition);
}
/** Rebuilds active spatial audio node graphs after output or spatial rendering mode changes. */
async function rebuildSpatialAudioGraphs(): Promise<void> {
peerManager.suspendRemoteAudio();
if (audioLayers.voice) {
await peerManager.resumeRemoteAudio();
}
radioRuntime.cleanupAll();
itemEmitRuntime.cleanupAll();
await refreshAudioSubscriptionsAt({ x: state.player.x, y: state.player.y }, true);
}
/** Refreshes distance-gated radio/item stream subscriptions for a listener position. */
async function refreshAudioSubscriptionsAt(listenerPosition: { x: number; y: number }, force = false): Promise<void> {
await refreshAudioSubscriptionsForListeners([listenerPosition], force);
@@ -848,7 +860,7 @@ function resolveIncomingSoundUrl(url: string): string {
const lowered = raw.toLowerCase();
if (lowered === 'none' || lowered === 'off') return '';
if (/^https?:/i.test(raw)) {
return shouldProxyStreamUrl(raw) ? getProxyUrlForStream(raw) : raw;
return shouldProxyExternalMediaUrl(raw) ? getProxyUrlForMedia(raw) : raw;
}
if (/^(data:|blob:)/i.test(raw)) return raw;
if (raw.startsWith('/sounds/')) {
@@ -1735,6 +1747,15 @@ function toggleOutputModeCommand(): void {
mediaSession.saveOutputMode(outputMode);
updateStatus(outputMode === 'mono' ? 'Mono output.' : 'Stereo output.');
audio.sfxUiBlip();
void rebuildSpatialAudioGraphs();
}
function toggleSpatialModeCommand(): void {
spatialMode = audio.toggleSpatialMode();
settings.saveSpatialMode(spatialMode);
updateStatus(spatialMode === 'hrtf' ? 'HRTF spatial audio.' : 'Classic spatial audio.');
audio.sfxUiBlip();
void rebuildSpatialAudioGraphs();
}
function toggleLoopbackCommand(): void {
@@ -2044,6 +2065,7 @@ function escapeCommand(): void {
const mainModeCommandHandlers: Record<MainModeCommand, () => void> = {
editNickname: openNicknameEditor,
toggleMute,
toggleSpatialMode: toggleSpatialModeCommand,
toggleOutputMode: toggleOutputModeCommand,
toggleLoopback: toggleLoopbackCommand,
toggleVoiceLayer: () => toggleAudioLayer('voice'),

View File

@@ -6,6 +6,7 @@ const AUDIO_OUTPUT_STORAGE_KEY = 'chatGridAudioOutputDeviceId';
const AUDIO_INPUT_NAME_STORAGE_KEY = 'chatGridAudioInputDeviceName';
const AUDIO_OUTPUT_NAME_STORAGE_KEY = 'chatGridAudioOutputDeviceName';
const AUDIO_OUTPUT_MODE_STORAGE_KEY = 'chatGridAudioOutputMode';
const AUDIO_SPATIAL_MODE_STORAGE_KEY = 'chatGridAudioSpatialMode';
const AUDIO_LAYER_STATE_STORAGE_KEY = 'chatGridAudioLayers';
const MIC_INPUT_GAIN_STORAGE_KEY = 'chatGridMicInputGain';
const MASTER_VOLUME_STORAGE_KEY = 'chatGridMasterVolume';
@@ -146,6 +147,14 @@ export class SettingsStore {
localStorage.setItem(AUDIO_OUTPUT_MODE_STORAGE_KEY, value);
}
loadSpatialMode(): 'classic' | 'hrtf' {
return localStorage.getItem(AUDIO_SPATIAL_MODE_STORAGE_KEY) === 'hrtf' ? 'hrtf' : 'classic';
}
saveSpatialMode(value: 'classic' | 'hrtf'): void {
localStorage.setItem(AUDIO_SPATIAL_MODE_STORAGE_KEY, value);
}
loadAudioDevicePreferences(): AudioDevicePreferences {
return {
input: {

View File

@@ -42,6 +42,7 @@ This document is the authoritative keymap for the client.
- `V`: Set microphone gain
- `Shift+V`: Microphone calibration
- `M`: Mute/unmute local microphone
- `H`: Toggle classic/HRTF spatial audio
- `Shift+M`: Toggle stereo/mono output
- `Shift+1` (`!`): Toggle loopback monitor
- `1`: Toggle voice layer