From 62d422b115ef6214f46355671d2d77efbcb2b815 Mon Sep 17 00:00:00 2001 From: Tyler Nickerson Date: Thu, 9 Oct 2025 22:58:30 -0400 Subject: [PATCH 1/5] wip --- src/recorder.ts | 540 ++++++++++++++++++++++++++---------------------- src/types.ts | 31 +-- 2 files changed, 309 insertions(+), 262 deletions(-) diff --git a/src/recorder.ts b/src/recorder.ts index adb052d..59e7888 100644 --- a/src/recorder.ts +++ b/src/recorder.ts @@ -1,255 +1,295 @@ import type { - AudioEventListener, - AudioEventListenerMap, - AudioEventListeners, - AudioRecorderOptions, - InstallWorkletVars, + AudioEventListener, + AudioEventListenerMap, + CreateAnalyzerOptions, + AudioEventListeners, + AudioRecorderOptions, + InstallWorkletVars, } from "./types.js"; export class AudioRecorder { - /** - * The live audio context - */ - #context?: AudioContext; - - /** - * Dictionary of event listeners associated with the recorder - */ - #listeners: AudioEventListeners = {}; - - /** - * Buffer of currently recorded blob chunks - */ - #buffer?: Blob[] = []; - - /** - * Underlying media recorder object - */ - #recorder?: MediaRecorder; - - /** - * The currently active media stream - */ - #stream?: MediaStream; - - constructor(private readonly options: AudioRecorderOptions = {}) {} - - /** - * Lists all of the users available audio devices - * @returns The list of Device objects - */ - static async listDevices(): Promise { - return navigator.mediaDevices - .enumerateDevices() - .then((list) => list.filter((d) => d.kind === "audioinput")); - } - - /** - * Returns the best available mime type from the provided options - * or undefined if none is supported - * - * @returns The supported mime type or undefined - */ - get mimeType(): string | undefined { - return Array.isArray(this.options.mimeType) - ? this.options.mimeType.find((type) => - MediaRecorder.isTypeSupported(type), - ) - : this.options.mimeType; - } - - /** - * Returns the active audio context or creates one if one doesn't exist - * - * @returns The AudioContext object - */ - get audioContext(): AudioContext { - if (!this.#context) { - this.#context = new AudioContext(); - } - - return this.#context; - } - - get state(): RecordingState | undefined { - return this.#recorder?.state; - } - - /** - * Returns the active audio recorder or creates one if one doesn't exist - * - * @returns The MediaRecorder object - */ - async #getAudioRecorder(): Promise { - if (!this.#recorder) { - this.#recorder = await this.#createAudioRecorder(); - } - - return this.#recorder; - } - - /** - * Returns the active audio stream or creates one if one doesn't exist - * - * @returns The MediaStream object - */ - async #getAudioStream(): Promise { - if (!this.#stream) { - const { deviceId } = this.options; - this.#stream = await navigator.mediaDevices.getUserMedia({ - audio: deviceId ? { deviceId } : true, - video: false, - }); - } - - return this.#stream; - } - - /** - * Starts recording audio using the given device ID or, if none is provided, the default device - * @param deviceId Optional device ID to record with - */ - async start(): Promise { - const recorder = await this.#getAudioRecorder(); - recorder.start(); - } - - /** - * Stops recording - * @returns The recorded data as a Blob object - */ - async stop(): Promise { - return new Promise((resolve) => { - // Wait for the audio to stop and for the data to be available - this.#recorder?.addEventListener("stop", () => { - const blob = new Blob(this.#buffer); - - this.#buffer = []; - - for (const track of this.#stream?.getTracks() ?? []) { - track.stop(); - } - - this.#recorder = undefined; - this.#stream = undefined; - - void this.#context?.suspend(); - - resolve(blob); - }); - - this.#recorder?.stop(); - }); - } - - /** - * Installs a custom audio worklet to the current audio context - * - * @param name The registered name of the worklet - * @param path The absolute path to the worklet - * @param callback A registration callback containing the current audio context, audio stream, and worklet node - */ - async installWorklet( - name: string, - path: string, - ): Promise { - const stream = await this.#getAudioStream(); - - await this.audioContext.audioWorklet.addModule(path); - - const node = new AudioWorkletNode(this.audioContext, name); - - return { context: this.audioContext, stream, node }; - } - - /** - * Attaches an event listener to the recorder - * @param eventName The name of the event - * @param callback The callback - */ - on( - eventName: T, - callback: AudioEventListener, - ) { - if (!this.#listeners[eventName]) { - this.#listeners[eventName] = []; - } - - this.#listeners[eventName]?.push(callback); - } - - /** - * Initializes a new audio recorder with the correct event listeners attached - * @param stream The MediaStream object for which to create the recorder - * @param options Recorder options - * @returns - */ - async #createAudioRecorder(): Promise { - const stream = await this.#getAudioStream(); - - const recorder = new MediaRecorder(stream, { - ...this.options, - mimeType: this.mimeType, - }); - - if ("volumechange" in this.#listeners) { - await this.setupAudioMeter(); - } - - recorder.addEventListener("dataavailable", ({ data }) => { - if (data.size > 0) { - this.#buffer?.push(data); - } - }); - - return recorder; - } - - /** - * Triggers an event - * - * @param name Event name to trigger - * @param event Event payload (if any) - */ - private fireEvent( - name: T, - event: AudioEventListenerMap[T], - ) { - for (const listener of this.#listeners?.[name] ?? []) { - listener(event); - } - } - - /** - * Resolves the path to the worklet with the specified name - * using the global options - * - * @param name The filename of the worklet to resolve - * @returns The absolute path to the worklet - */ - private getWorkletPath(name: string): string { - return [this.options.workletPath ?? "worklets", name].join("/"); - } - - /** - * Sets up audio metering if a volumechange listener is attached - */ - private async setupAudioMeter(): Promise { - const { node, context, stream } = await this.installWorklet( - "volume-meter", - this.getWorkletPath("volume-meter.js"), - ); - - const micNode = context.createMediaStreamSource(stream); - - node.port.addEventListener("message", ({ data }) => { - this.fireEvent("volumechange", { - volume: data as number, - }); - }); - - node.port.start(); - - micNode.connect(node).connect(context.destination); - } + /** + * The live audio context + */ + #context?: AudioContext; + + /** + * Dictionary of event listeners associated with the recorder + */ + #listeners: AudioEventListeners = {}; + + /** + * Buffer of currently recorded blob chunks + */ + #buffer?: Blob[] = []; + + /** + * Array of analyser nodes associated with the recorder + */ + #analyzers: AnalyserNode[] = []; + + /** + * Underlying media recorder object + */ + #recorder?: MediaRecorder; + + /** + * The currently active media stream + */ + #stream?: MediaStream; + + constructor(private readonly options: AudioRecorderOptions = {}) {} + + /** + * Lists all of the users available audio devices + * @returns The list of Device objects + */ + static async listDevices(): Promise { + return navigator.mediaDevices + .enumerateDevices() + .then((list) => list.filter((d) => d.kind === "audioinput")); + } + + /** + * Returns the best available mime type from the provided options + * or undefined if none is supported + * + * @returns The supported mime type or undefined + */ + get mimeType(): string | undefined { + return Array.isArray(this.options.mimeType) + ? this.options.mimeType.find((type) => + MediaRecorder.isTypeSupported(type), + ) + : this.options.mimeType; + } + + /** + * Returns the active audio context or creates one if one doesn't exist + * + * @returns The AudioContext object + */ + get audioContext(): AudioContext { + if (!this.#context) { + this.#context = new AudioContext(); + } + + return this.#context; + } + + get state(): RecordingState | undefined { + return this.#recorder?.state; + } + + /** + * Returns the active audio recorder or creates one if one doesn't exist + * + * @returns The MediaRecorder object + */ + async #getAudioRecorder(): Promise { + if (!this.#recorder) { + this.#recorder = await this.#createAudioRecorder(); + } + + return this.#recorder; + } + + /** + * Returns the active audio stream or creates one if one doesn't exist + * + * @returns The MediaStream object + */ + async #getAudioStream(): Promise { + if (!this.#stream) { + const { deviceId } = this.options; + this.#stream = await navigator.mediaDevices.getUserMedia({ + audio: deviceId ? { deviceId } : true, + video: false, + }); + } + + return this.#stream; + } + + /** + * Starts recording audio using the given device ID or, if none is provided, the default device + * @param deviceId Optional device ID to record with + */ + async start(): Promise { + const recorder = await this.#getAudioRecorder(); + recorder.start(); + } + + /** + * Stops recording + * @returns The recorded data as a Blob object + */ + async stop(): Promise { + return new Promise((resolve) => { + // Wait for the audio to stop and for the data to be available + this.#recorder?.addEventListener("stop", () => { + const blob = new Blob(this.#buffer); + + this.#buffer = []; + + for (const track of this.#stream?.getTracks() ?? []) { + track.stop(); + } + + this.#recorder = undefined; + this.#stream = undefined; + + void this.#context?.suspend(); + + resolve(blob); + }); + + this.#recorder?.stop(); + }); + } + + async createAnalyzer({ + fftSize, + minDecibels, + maxDecibels, + smoothingTimeConstant, + }: CreateAnalyzerOptions): Promise { + const analyzer = this.audioContext.createAnalyser(); + + if (fftSize) { + analyzer.fftSize = fftSize; + } + + if (minDecibels) { + analyzer.minDecibels = minDecibels; + } + + if (maxDecibels) { + analyzer.maxDecibels = maxDecibels; + } + + if (smoothingTimeConstant) { + analyzer.smoothingTimeConstant = smoothingTimeConstant; + } + + this.#analyzers.push(analyzer); + + return analyzer; + } + + /** + * Installs a custom audio worklet to the current audio context + * + * @param name The registered name of the worklet + * @param path The absolute path to the worklet + * @param callback A registration callback containing the current audio context, audio stream, and worklet node + */ + async installWorklet( + name: string, + path: string, + ): Promise { + const stream = await this.#getAudioStream(); + + await this.audioContext.audioWorklet.addModule(path); + + const node = new AudioWorkletNode(this.audioContext, name); + + return { context: this.audioContext, stream, node }; + } + + /** + * Attaches an event listener to the recorder + * @param eventName The name of the event + * @param callback The callback + */ + on( + eventName: T, + callback: AudioEventListener, + ) { + if (!this.#listeners[eventName]) { + this.#listeners[eventName] = []; + } + + this.#listeners[eventName]?.push(callback); + } + + /** + * Initializes a new audio recorder with the correct event listeners attached + * @param stream The MediaStream object for which to create the recorder + * @param options Recorder options + * @returns + */ + async #createAudioRecorder(): Promise { + const stream = await this.#getAudioStream(); + const source = this.audioContext.createMediaStreamSource(stream); + + for (const analyzer of this.#analyzers) { + analyzer.connect(source); + } + + const recorder = new MediaRecorder(stream, { + ...this.options, + mimeType: this.mimeType, + }); + + if ("volumechange" in this.#listeners) { + await this.setupAudioMeter(); + } + + recorder.addEventListener("dataavailable", ({ data }) => { + if (data.size > 0) { + this.#buffer?.push(data); + } + }); + + return recorder; + } + + /** + * Triggers an event + * + * @param name Event name to trigger + * @param event Event payload (if any) + */ + private fireEvent( + name: T, + event: AudioEventListenerMap[T], + ) { + for (const listener of this.#listeners?.[name] ?? []) { + listener(event); + } + } + + /** + * Resolves the path to the worklet with the specified name + * using the global options + * + * @param name The filename of the worklet to resolve + * @returns The absolute path to the worklet + */ + private getWorkletPath(name: string): string { + return [this.options.workletPath ?? "worklets", name].join("/"); + } + + /** + * Sets up audio metering if a volumechange listener is attached + */ + private async setupAudioMeter(): Promise { + const { node, context, stream } = await this.installWorklet( + "volume-meter", + this.getWorkletPath("volume-meter.js"), + ); + + const micNode = context.createMediaStreamSource(stream); + + node.port.addEventListener("message", ({ data }) => { + this.fireEvent("volumechange", { + volume: data as number, + }); + }); + + node.port.start(); + + micNode.connect(node).connect(context.destination); + } } diff --git a/src/types.ts b/src/types.ts index 057f19b..ef58043 100644 --- a/src/types.ts +++ b/src/types.ts @@ -1,30 +1,37 @@ type Volume = number; interface VolumeChangeEvent { - volume: Volume; + volume: Volume; +} + +export interface CreateAnalyzerOptions { + fftSize?: number; + minDecibels?: number; + maxDecibels?: number; + smoothingTimeConstant?: number; } export interface InstallWorkletVars { - context: AudioContext; - stream: MediaStream; - node: AudioWorkletNode; + context: AudioContext; + stream: MediaStream; + node: AudioWorkletNode; } export interface AudioRecorderOptions - extends Omit { - deviceId?: string; - workletPath?: string; - mimeType?: string | string[]; + extends Omit { + deviceId?: string; + workletPath?: string; + mimeType?: string | string[]; } export interface AudioEventListenerMap { - volumechange: VolumeChangeEvent; + volumechange: VolumeChangeEvent; } export type AudioEventListener = (event: T) => void; export type AudioEventListeners = { - [k in keyof AudioEventListenerMap]?: Array< - AudioEventListener - >; + [k in keyof AudioEventListenerMap]?: Array< + AudioEventListener + >; }; From 49eab4ebb3a2634770ebfe3a3c30224e8b53ecc6 Mon Sep 17 00:00:00 2001 From: Tyler Nickerson Date: Fri, 10 Oct 2025 10:59:07 -0400 Subject: [PATCH 2/5] wip --- src/recorder.ts | 155 ++++++++++++++++-------------------------------- src/types.ts | 7 +-- 2 files changed, 51 insertions(+), 111 deletions(-) diff --git a/src/recorder.ts b/src/recorder.ts index 59e7888..73a7592 100644 --- a/src/recorder.ts +++ b/src/recorder.ts @@ -16,7 +16,9 @@ export class AudioRecorder { /** * Dictionary of event listeners associated with the recorder */ - #listeners: AudioEventListeners = {}; + #listeners: Partial<{ + [K in keyof MediaRecorderEventMap]: MediaRecorderEventMap[K][]; + }> = {}; /** * Buffer of currently recorded blob chunks @@ -33,6 +35,11 @@ export class AudioRecorder { */ #recorder?: MediaRecorder; + /** + * Underlying media stream source node + */ + #source?: MediaStreamAudioSourceNode; + /** * The currently active media stream */ @@ -50,6 +57,20 @@ export class AudioRecorder { .then((list) => list.filter((d) => d.kind === "audioinput")); } + async getSource(): Promise { + if (!this.#source) { + this.#source = this.#audioContext.createMediaStreamSource( + await this.getStream(), + ); + } + + return this.#source; + } + + get destination(): AudioDestinationNode { + return this.#audioContext.destination; + } + /** * Returns the best available mime type from the provided options * or undefined if none is supported @@ -69,7 +90,7 @@ export class AudioRecorder { * * @returns The AudioContext object */ - get audioContext(): AudioContext { + get #audioContext(): AudioContext { if (!this.#context) { this.#context = new AudioContext(); } @@ -86,9 +107,26 @@ export class AudioRecorder { * * @returns The MediaRecorder object */ - async #getAudioRecorder(): Promise { + async #getMediaRecorder(): Promise { if (!this.#recorder) { - this.#recorder = await this.#createAudioRecorder(); + const stream = await this.getStream(); + + this.#recorder = new MediaRecorder(stream, { + ...this.options, + mimeType: this.mimeType, + }); + + this.#recorder.addEventListener("dataavailable", ({ data }) => { + if (data.size > 0) { + this.#buffer?.push(data); + } + }); + + for (const [type, callbacks] of Object.entries(this.#listeners)) { + for (const callback of callbacks) { + this.#recorder.addEventListener(type, callback as any); + } + } } return this.#recorder; @@ -99,7 +137,7 @@ export class AudioRecorder { * * @returns The MediaStream object */ - async #getAudioStream(): Promise { + async getStream(): Promise { if (!this.#stream) { const { deviceId } = this.options; this.#stream = await navigator.mediaDevices.getUserMedia({ @@ -116,7 +154,7 @@ export class AudioRecorder { * @param deviceId Optional device ID to record with */ async start(): Promise { - const recorder = await this.#getAudioRecorder(); + const recorder = await this.#getMediaRecorder(); recorder.start(); } @@ -154,7 +192,7 @@ export class AudioRecorder { maxDecibels, smoothingTimeConstant, }: CreateAnalyzerOptions): Promise { - const analyzer = this.audioContext.createAnalyser(); + const analyzer = this.#audioContext.createAnalyser(); if (fftSize) { analyzer.fftSize = fftSize; @@ -182,19 +220,10 @@ export class AudioRecorder { * * @param name The registered name of the worklet * @param path The absolute path to the worklet - * @param callback A registration callback containing the current audio context, audio stream, and worklet node */ - async installWorklet( - name: string, - path: string, - ): Promise { - const stream = await this.#getAudioStream(); - - await this.audioContext.audioWorklet.addModule(path); - - const node = new AudioWorkletNode(this.audioContext, name); - - return { context: this.audioContext, stream, node }; + async createWorklet(name: string, path: string): Promise { + await this.#audioContext.audioWorklet.addModule(path); + return new AudioWorkletNode(this.#audioContext, name); } /** @@ -202,94 +231,10 @@ export class AudioRecorder { * @param eventName The name of the event * @param callback The callback */ - on( + on( eventName: T, - callback: AudioEventListener, + callback: MediaRecorderEventMap[T], ) { - if (!this.#listeners[eventName]) { - this.#listeners[eventName] = []; - } - this.#listeners[eventName]?.push(callback); } - - /** - * Initializes a new audio recorder with the correct event listeners attached - * @param stream The MediaStream object for which to create the recorder - * @param options Recorder options - * @returns - */ - async #createAudioRecorder(): Promise { - const stream = await this.#getAudioStream(); - const source = this.audioContext.createMediaStreamSource(stream); - - for (const analyzer of this.#analyzers) { - analyzer.connect(source); - } - - const recorder = new MediaRecorder(stream, { - ...this.options, - mimeType: this.mimeType, - }); - - if ("volumechange" in this.#listeners) { - await this.setupAudioMeter(); - } - - recorder.addEventListener("dataavailable", ({ data }) => { - if (data.size > 0) { - this.#buffer?.push(data); - } - }); - - return recorder; - } - - /** - * Triggers an event - * - * @param name Event name to trigger - * @param event Event payload (if any) - */ - private fireEvent( - name: T, - event: AudioEventListenerMap[T], - ) { - for (const listener of this.#listeners?.[name] ?? []) { - listener(event); - } - } - - /** - * Resolves the path to the worklet with the specified name - * using the global options - * - * @param name The filename of the worklet to resolve - * @returns The absolute path to the worklet - */ - private getWorkletPath(name: string): string { - return [this.options.workletPath ?? "worklets", name].join("/"); - } - - /** - * Sets up audio metering if a volumechange listener is attached - */ - private async setupAudioMeter(): Promise { - const { node, context, stream } = await this.installWorklet( - "volume-meter", - this.getWorkletPath("volume-meter.js"), - ); - - const micNode = context.createMediaStreamSource(stream); - - node.port.addEventListener("message", ({ data }) => { - this.fireEvent("volumechange", { - volume: data as number, - }); - }); - - node.port.start(); - - micNode.connect(node).connect(context.destination); - } } diff --git a/src/types.ts b/src/types.ts index ef58043..482dee7 100644 --- a/src/types.ts +++ b/src/types.ts @@ -11,12 +11,6 @@ export interface CreateAnalyzerOptions { smoothingTimeConstant?: number; } -export interface InstallWorkletVars { - context: AudioContext; - stream: MediaStream; - node: AudioWorkletNode; -} - export interface AudioRecorderOptions extends Omit { deviceId?: string; @@ -26,6 +20,7 @@ export interface AudioRecorderOptions export interface AudioEventListenerMap { volumechange: VolumeChangeEvent; + stop: Event; } export type AudioEventListener = (event: T) => void; From 536c6db4b42279e476ea15da601c7419e1b2779c Mon Sep 17 00:00:00 2001 From: Tyler Nickerson Date: Sat, 11 Oct 2025 14:05:13 -0400 Subject: [PATCH 3/5] wip --- src/analyser.ts | 82 ++++++++++++ src/index.ts | 2 + src/monitor.ts | 50 ++++++++ src/recorder.ts | 162 ++++-------------------- src/types.ts | 34 ++--- src/utils.ts | 32 +++++ tests/devices.spec.js | 198 ++++++++++++++--------------- tests/recorder.spec.js | 278 ++++++++++++++++++++++++----------------- tests/worklets.spec.js | 252 +++++++------------------------------ 9 files changed, 496 insertions(+), 594 deletions(-) create mode 100644 src/analyser.ts create mode 100644 src/monitor.ts create mode 100644 src/utils.ts diff --git a/src/analyser.ts b/src/analyser.ts new file mode 100644 index 0000000..f024096 --- /dev/null +++ b/src/analyser.ts @@ -0,0 +1,82 @@ +export class Analyser { + readonly node: AnalyserNode; + readonly #data: Uint8Array; + + constructor( + readonly context: AudioContext = new AudioContext(), + readonly options: AnalyserOptions = {}, + ) { + this.node = this.context.createAnalyser(); + + if (options?.fftSize) { + this.node.fftSize = options.fftSize; + } + + if (options?.minDecibels) { + this.node.minDecibels = options.minDecibels; + } + + if (options?.maxDecibels) { + this.node.maxDecibels = options.maxDecibels; + } + + if (options?.smoothingTimeConstant) { + this.node.smoothingTimeConstant = options.smoothingTimeConstant; + } + + if (options?.channelCount) { + this.node.channelCount = options.channelCount; + } + + if (options?.channelInterpretation) { + this.node.channelInterpretation = options.channelInterpretation; + } + + if (options?.channelCountMode) { + this.node.channelCountMode = options.channelCountMode; + } + + this.#data = new Uint8Array(this.node.frequencyBinCount); + } + + /** + * Returns the frequency data provided by the default analyzer + */ + get frequencyData(): Uint8Array { + this.node.getByteFrequencyData(this.#data); + return this.#data; + } + + /** + * Retrieves the current volume (average of amplitude^2) + */ + get volume(): number { + const data = this.frequencyData; + + let sum = 0; + + for (const amplitude of data) { + sum += amplitude * amplitude; + } + + return Math.sqrt(sum / data.length); + } + + connect( + destinationNode: AudioNode, + output?: number, + input?: number, + ): AudioNode; + connect(destinationParam: AudioParam, output?: number): void; + connect( + destination: AudioNode | AudioParam, + output?: number, + input?: number, + ): AudioNode | undefined { + if (destination instanceof AudioNode) { + return this.node.connect(destination, output, input); + } else { + this.node.connect(destination, output); + } + } +} diff --git a/src/index.ts b/src/index.ts index a638eab..d1ef2ab 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,2 +1,4 @@ export * from "./recorder.js"; export * from "./types.js"; +export * from "./utils.js"; +export * from "./analyser.js"; diff --git a/src/monitor.ts b/src/monitor.ts new file mode 100644 index 0000000..01410e3 --- /dev/null +++ b/src/monitor.ts @@ -0,0 +1,50 @@ +import { Analyser } from "./analyser.js"; + +export interface MonitorOptions { + context?: AudioContext; + defaultAnalyser?: Analyser; +} + +export class Monitor { + readonly context: AudioContext; + readonly source: MediaStreamAudioSourceNode; + readonly destination: AudioDestinationNode; + readonly analyser: Analyser; + + constructor( + readonly stream: MediaStream, + private readonly options: MonitorOptions, + ) { + this.context = this.options.context ?? new AudioContext(); + this.destination = this.context.destination; + this.source = this.context.createMediaStreamSource(this.stream); + this.analyser = this.options.defaultAnalyser ?? new Analyser(this.context); + this.source.connect(this.analyser.node); + this.analyser.connect(this.destination); + } + + /** + * Retrieves the current volume (average of amplitude^2) + */ + get volume() { + return this.analyser.volume; + } + + /** + * Retrieves the current analyzer's frequency data + */ + get frequencyData() { + return this.analyser.frequencyData; + } + + /** + * Adds a custom audio worklet to the current audio context + * + * @param name The registered name of the worklet + * @param path The absolute path to the worklet + */ + async installWorklet(name: string, path: string): Promise { + await this.context.audioWorklet.addModule(path); + return new AudioWorkletNode(this.context, name); + } +} diff --git a/src/recorder.ts b/src/recorder.ts index 73a7592..be78be0 100644 --- a/src/recorder.ts +++ b/src/recorder.ts @@ -1,17 +1,12 @@ -import type { - AudioEventListener, - AudioEventListenerMap, - CreateAnalyzerOptions, - AudioEventListeners, - AudioRecorderOptions, - InstallWorkletVars, -} from "./types.js"; - -export class AudioRecorder { - /** - * The live audio context - */ - #context?: AudioContext; +import { Analyser } from "./analyser.js"; +import { Monitor, type MonitorOptions } from "./monitor.js"; + +export interface RecorderOptions extends MonitorOptions { + mimeType?: string | string[]; +} + +export class Recorder extends Monitor { + readonly mimeType: string | undefined; /** * Dictionary of event listeners associated with the recorder @@ -25,79 +20,27 @@ export class AudioRecorder { */ #buffer?: Blob[] = []; - /** - * Array of analyser nodes associated with the recorder - */ - #analyzers: AnalyserNode[] = []; - /** * Underlying media recorder object */ #recorder?: MediaRecorder; - /** - * Underlying media stream source node - */ - #source?: MediaStreamAudioSourceNode; - /** * The currently active media stream */ #stream?: MediaStream; - constructor(private readonly options: AudioRecorderOptions = {}) {} - - /** - * Lists all of the users available audio devices - * @returns The list of Device objects - */ - static async listDevices(): Promise { - return navigator.mediaDevices - .enumerateDevices() - .then((list) => list.filter((d) => d.kind === "audioinput")); - } - - async getSource(): Promise { - if (!this.#source) { - this.#source = this.#audioContext.createMediaStreamSource( - await this.getStream(), - ); - } - - return this.#source; - } - - get destination(): AudioDestinationNode { - return this.#audioContext.destination; - } + constructor(stream: MediaStream, options: RecorderOptions = {}) { + super(stream, options); - /** - * Returns the best available mime type from the provided options - * or undefined if none is supported - * - * @returns The supported mime type or undefined - */ - get mimeType(): string | undefined { - return Array.isArray(this.options.mimeType) - ? this.options.mimeType.find((type) => - MediaRecorder.isTypeSupported(type), - ) - : this.options.mimeType; + this.mimeType = Array.isArray(options.mimeType) + ? options.mimeType.find((type) => MediaRecorder.isTypeSupported(type)) + : options.mimeType; } /** - * Returns the active audio context or creates one if one doesn't exist - * - * @returns The AudioContext object + * The recording state of the media recorder */ - get #audioContext(): AudioContext { - if (!this.#context) { - this.#context = new AudioContext(); - } - - return this.#context; - } - get state(): RecordingState | undefined { return this.#recorder?.state; } @@ -107,11 +50,9 @@ export class AudioRecorder { * * @returns The MediaRecorder object */ - async #getMediaRecorder(): Promise { + get mediaRecorder(): MediaRecorder { if (!this.#recorder) { - const stream = await this.getStream(); - - this.#recorder = new MediaRecorder(stream, { + this.#recorder = new MediaRecorder(this.stream, { ...this.options, mimeType: this.mimeType, }); @@ -132,30 +73,12 @@ export class AudioRecorder { return this.#recorder; } - /** - * Returns the active audio stream or creates one if one doesn't exist - * - * @returns The MediaStream object - */ - async getStream(): Promise { - if (!this.#stream) { - const { deviceId } = this.options; - this.#stream = await navigator.mediaDevices.getUserMedia({ - audio: deviceId ? { deviceId } : true, - video: false, - }); - } - - return this.#stream; - } - /** * Starts recording audio using the given device ID or, if none is provided, the default device - * @param deviceId Optional device ID to record with + * @param timeslice Optional timeslice in milliseconds */ - async start(): Promise { - const recorder = await this.#getMediaRecorder(); - recorder.start(); + start(timeslice?: number) { + return this.mediaRecorder.start(timeslice); } /** @@ -177,7 +100,7 @@ export class AudioRecorder { this.#recorder = undefined; this.#stream = undefined; - void this.#context?.suspend(); + void this.context?.suspend(); resolve(blob); }); @@ -186,46 +109,6 @@ export class AudioRecorder { }); } - async createAnalyzer({ - fftSize, - minDecibels, - maxDecibels, - smoothingTimeConstant, - }: CreateAnalyzerOptions): Promise { - const analyzer = this.#audioContext.createAnalyser(); - - if (fftSize) { - analyzer.fftSize = fftSize; - } - - if (minDecibels) { - analyzer.minDecibels = minDecibels; - } - - if (maxDecibels) { - analyzer.maxDecibels = maxDecibels; - } - - if (smoothingTimeConstant) { - analyzer.smoothingTimeConstant = smoothingTimeConstant; - } - - this.#analyzers.push(analyzer); - - return analyzer; - } - - /** - * Installs a custom audio worklet to the current audio context - * - * @param name The registered name of the worklet - * @param path The absolute path to the worklet - */ - async createWorklet(name: string, path: string): Promise { - await this.#audioContext.audioWorklet.addModule(path); - return new AudioWorkletNode(this.#audioContext, name); - } - /** * Attaches an event listener to the recorder * @param eventName The name of the event @@ -235,6 +118,9 @@ export class AudioRecorder { eventName: T, callback: MediaRecorderEventMap[T], ) { + if (!this.#listeners[eventName]) { + this.#listeners[eventName] = []; + } this.#listeners[eventName]?.push(callback); } } diff --git a/src/types.ts b/src/types.ts index 482dee7..3e517d0 100644 --- a/src/types.ts +++ b/src/types.ts @@ -1,32 +1,14 @@ -type Volume = number; +import type { Analyser } from "./analyser"; -interface VolumeChangeEvent { - volume: Volume; -} - -export interface CreateAnalyzerOptions { - fftSize?: number; - minDecibels?: number; - maxDecibels?: number; - smoothingTimeConstant?: number; +export interface MonitorOptions { + mimeType?: string | string[]; + context?: AudioContext; + defaultAnalyser?: Analyser; } -export interface AudioRecorderOptions +export interface RecorderOptions extends Omit { - deviceId?: string; - workletPath?: string; mimeType?: string | string[]; + context?: AudioContext; + defaultAnalyser?: Analyser; } - -export interface AudioEventListenerMap { - volumechange: VolumeChangeEvent; - stop: Event; -} - -export type AudioEventListener = (event: T) => void; - -export type AudioEventListeners = { - [k in keyof AudioEventListenerMap]?: Array< - AudioEventListener - >; -}; diff --git a/src/utils.ts b/src/utils.ts new file mode 100644 index 0000000..13fe992 --- /dev/null +++ b/src/utils.ts @@ -0,0 +1,32 @@ +type DeviceFilters = { + [key in keyof MediaDeviceInfo]: MediaDeviceInfo[key]; +}; + +/** + * Lists all of the users available audio devices + * @returns The list of Device objects + */ +export async function getDevices( + filters: Partial = {}, +): Promise { + return navigator.mediaDevices + .enumerateDevices() + .then((list) => + list.filter((d) => + filters + ? Object.entries(filters).every(([key, value]) => d[key] === value) + : true, + ), + ); +} + +/** + * Finds a media stream given the provided constraints + * @param constraints - The constraints to use when finding the media stream + * @returns The media stream + */ +export async function getMediaStream( + constraints: MediaStreamConstraints = { audio: true, video: false }, +) { + return navigator.mediaDevices.getUserMedia(constraints); +} diff --git a/tests/devices.spec.js b/tests/devices.spec.js index 865f333..dc7b81a 100644 --- a/tests/devices.spec.js +++ b/tests/devices.spec.js @@ -1,107 +1,99 @@ import { expect, test } from "@playwright/test"; test.describe("Device Enumeration", () => { - test.beforeEach(async ({ page }) => { - await page.goto("/tests/index.html"); - }); - - test("should list audio input devices", async ({ page }) => { - const devices = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); - const deviceList = await AudioRecorder.listDevices(); - - return { - count: deviceList.length, - devices: deviceList.map((d) => ({ - deviceId: d.deviceId, - kind: d.kind, - label: d.label, - groupId: d.groupId, - })), - }; - }); - - expect(devices.count).toBeGreaterThan(0); - expect(Array.isArray(devices.devices)).toBe(true); - }); - - test("all devices should be audio inputs", async ({ page }) => { - const allAudioInputs = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); - const devices = await AudioRecorder.listDevices(); - - return devices.every((d) => d.kind === "audioinput"); - }); - - expect(allAudioInputs).toBe(true); - }); - - test("devices should have required properties", async ({ page }) => { - const devicesValid = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); - const devices = await AudioRecorder.listDevices(); - - if (devices.length === 0) return false; - - return devices.every( - (d) => - typeof d.deviceId === "string" && - typeof d.kind === "string" && - typeof d.label === "string" && - typeof d.groupId === "string", - ); - }); - - expect(devicesValid).toBe(true); - }); - - test("should create recorder with specific device", async ({ page }) => { - const result = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); - const devices = await AudioRecorder.listDevices(); - - if (devices.length === 0) { - return { success: false, reason: "no devices" }; - } - - const deviceId = devices[0].deviceId; - const recorder = new AudioRecorder({ deviceId }); - - try { - await recorder.start(); - await new Promise((resolve) => setTimeout(resolve, 200)); - await recorder.stop(); - return { success: true }; - } catch (e) { - return { success: false, reason: e.message }; - } - }); - - expect(result.success).toBe(true); - }); - - test("should filter only audio inputs from all devices", async ({ page }) => { - const comparison = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); - - // Get all devices - const allDevices = await navigator.mediaDevices.enumerateDevices(); - const audioInputs = allDevices.filter((d) => d.kind === "audioinput"); - - // Get filtered devices - const filteredDevices = await AudioRecorder.listDevices(); - - return { - allDevicesCount: allDevices.length, - audioInputsCount: audioInputs.length, - filteredCount: filteredDevices.length, - match: audioInputs.length === filteredDevices.length, - }; - }); - - expect(comparison.match).toBe(true); - expect(comparison.filteredCount).toBeLessThanOrEqual( - comparison.allDevicesCount, - ); - }); + test.beforeEach(async ({ page }) => { + await page.goto("/tests/index.html"); + }); + + test("should list audio input devices", async ({ page }) => { + const devices = await page.evaluate(async () => { + const { getDevices } = await import("/dist/index.js"); + const deviceList = await getDevices(); + + return { + count: deviceList.length, + devices: deviceList.map((d) => ({ + deviceId: d.deviceId, + kind: d.kind, + label: d.label, + groupId: d.groupId, + })), + }; + }); + + expect(devices.count).toBeGreaterThan(0); + expect(Array.isArray(devices.devices)).toBe(true); + }); + + test("devices should have required properties", async ({ page }) => { + const devicesValid = await page.evaluate(async () => { + const { getDevices } = await import("/dist/index.js"); + const devices = await getDevices(); + + if (devices.length === 0) return false; + + return devices.every( + (d) => + typeof d.deviceId === "string" && + typeof d.kind === "string" && + typeof d.label === "string" && + typeof d.groupId === "string", + ); + }); + + expect(devicesValid).toBe(true); + }); + + test("should create recorder with specific device", async ({ page }) => { + const result = await page.evaluate(async () => { + const { getDevices, getMediaStream, Recorder } = await import( + "/dist/index.js" + ); + + const devices = await getDevices(); + + if (devices.length === 0) { + return { success: false, reason: "no devices" }; + } + + const deviceId = devices[0].deviceId; + const recorder = new Recorder(await getMediaStream(), { deviceId }); + + try { + recorder.start(); + await new Promise((resolve) => setTimeout(resolve, 200)); + await recorder.stop(); + return { success: true }; + } catch (e) { + return { success: false, reason: e.message }; + } + }); + + expect(result.success).toBe(true); + }); + + test("should filter only audio inputs from all devices", async ({ page }) => { + const comparison = await page.evaluate(async () => { + const { getDevices } = await import("/dist/index.js"); + + // Get all devices + const allDevices = await navigator.mediaDevices.enumerateDevices(); + const audioInputs = allDevices.filter((d) => d.kind === "audioinput"); + + // Get filtered devices + const filteredDevices = await getDevices({ kind: "audioinput" }); + + return { + allDevicesCount: allDevices.length, + audioInputsCount: audioInputs.length, + filteredCount: filteredDevices.length, + match: audioInputs.length === filteredDevices.length, + }; + }); + + expect(comparison.match).toBe(true); + expect(comparison.filteredCount).toBeLessThanOrEqual( + comparison.allDevicesCount, + ); + }); }); diff --git a/tests/recorder.spec.js b/tests/recorder.spec.js index e679337..e24c1b2 100644 --- a/tests/recorder.spec.js +++ b/tests/recorder.spec.js @@ -1,161 +1,205 @@ import { expect, test } from "@playwright/test"; -test.describe("AudioRecorder", () => { - test.beforeEach(async ({ page }) => { - await page.goto("/tests/index.html"); - }); +test.describe("Recorder", () => { + test.beforeEach(async ({ page }) => { + await page.goto("/tests/index.html"); + }); - test("should create an instance with default options", async ({ page }) => { - const created = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); + test("should handle multiple event listeners", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Recorder, getMediaStream } = await import("/dist/index.js"); - const recorder = new AudioRecorder(); + const stream = await getMediaStream(); + const recorder = new Recorder(stream); + + let listener1Count = 0; + let listener2Count = 0; - return recorder !== null; - }); + recorder.on("stop", () => { + listener1Count++; + }); - expect(created).toBe(true); - }); + recorder.on("stop", () => { + listener2Count++; + }); - test("should create an instance with custom options", async ({ page }) => { - const mimeTypeSet = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); + await recorder.start(); + await new Promise((resolve) => setTimeout(resolve, 500)); + await recorder.stop(); - const recorder = new AudioRecorder({ - mimeType: ["audio/webm", "audio/mp4"], - }); + return { + listener1Count, + listener2Count, + bothReceived: listener1Count > 0 && listener2Count > 0, + equalCounts: listener1Count === listener2Count, + }; + }); - return recorder.mimeType; - }); + expect(result.bothReceived).toBe(true); + expect(result.equalCounts).toBe(true); + }); - expect(mimeTypeSet).toBe("audio/webm"); - }); + test("should create an instance with default options", async ({ page }) => { + const created = await page.evaluate(async () => { + const { Recorder, getMediaStream } = await import("/dist/index.js"); - test("should start recording", async ({ page }) => { - const started = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); + const stream = await getMediaStream(); + const recorder = new Recorder(stream); - const recorder = new AudioRecorder(); + return recorder !== null; + }); - try { - await recorder.start(); - return true; - } catch (e) { - console.error("Start failed:", e); - return false; - } - }); + expect(created).toBe(true); + }); - expect(started).toBe(true); - }); + test("should create an instance with custom options", async ({ page }) => { + const mimeTypeSet = await page.evaluate(async () => { + const { Recorder, getMediaStream } = await import("/dist/index.js"); - test("should stop recording and return a blob", async ({ page }) => { - const result = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); + const stream = await getMediaStream(); + const recorder = new Recorder(stream, { + mimeType: ["audio/webm", "audio/mp4"], + }); - const recorder = new AudioRecorder(); + return recorder.mimeType; + }); - await recorder.start(); + expect(mimeTypeSet).toBe("audio/webm"); + }); - // Record for 500ms - await new Promise((resolve) => setTimeout(resolve, 500)); + test("should start recording", async ({ page }) => { + const started = await page.evaluate(async () => { + const { Recorder, getMediaStream } = await import("/dist/index.js"); - const blob = await recorder.stop(); + const stream = await getMediaStream(); + const recorder = new Recorder(stream); - return { - hasBlob: blob !== null, - isBlob: blob instanceof Blob, - size: blob.size, - type: blob.type, - }; - }); + try { + await recorder.start(); + return true; + } catch (e) { + console.error("Start failed:", e); + return false; + } + }); - expect(result.hasBlob).toBe(true); - expect(result.isBlob).toBe(true); - expect(result.size).toBeGreaterThan(0); - }); + expect(started).toBe(true); + }); - test("should record for specified duration", async ({ page }) => { - const result = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); + test("should stop recording and return a blob", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Recorder, getMediaStream } = await import("/dist/index.js"); - const recorder = new AudioRecorder(); + const stream = await getMediaStream(); + const recorder = new Recorder(stream); - await recorder.start(); + await recorder.start(); - // Record for 1 second - await new Promise((resolve) => setTimeout(resolve, 1000)); + // Record for 500ms + await new Promise((resolve) => setTimeout(resolve, 500)); - const blob = await recorder.stop(); + const blob = await recorder.stop(); - return { - size: blob.size, - }; - }); + return { + hasBlob: blob !== null, + isBlob: blob instanceof Blob, + size: blob.size, + type: blob.type, + }; + }); - // Recording should produce data - expect(result.size).toBeGreaterThan(0); - }); + expect(result.hasBlob).toBe(true); + expect(result.isBlob).toBe(true); + expect(result.size).toBeGreaterThan(0); + }); - test("should handle multiple start/stop cycles", async ({ page }) => { - const result = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); + test("should record for specified duration", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Recorder, getMediaStream } = await import("/dist/index.js"); - const recorder = new AudioRecorder(); + const stream = await getMediaStream(); + const recorder = new Recorder(stream); - const blobs = []; + await recorder.start(); - // First recording - await recorder.start(); - await new Promise((resolve) => setTimeout(resolve, 300)); - const blob1 = await recorder.stop(); - blobs.push(blob1.size); + // Record for 1 second + await new Promise((resolve) => setTimeout(resolve, 1000)); - // Second recording - await recorder.start(); - await new Promise((resolve) => setTimeout(resolve, 300)); - const blob2 = await recorder.stop(); - blobs.push(blob2.size); + const blob = await recorder.stop(); - return { - firstSize: blobs[0], - secondSize: blobs[1], - bothValid: blobs.every((size) => size > 0), - }; - }); + return { + size: blob.size, + }; + }); - expect(result.bothValid).toBe(true); - expect(result.firstSize).toBeGreaterThan(0); - expect(result.secondSize).toBeGreaterThan(0); - }); + // Recording should produce data + expect(result.size).toBeGreaterThan(0); + }); - test("should respect mime type preference", async ({ page }) => { - const mimeType = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); + test("should handle multiple start/stop cycles", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Recorder, getMediaStream } = await import("/dist/index.js"); - const recorder = new AudioRecorder({ - mimeType: ["audio/webm", "audio/mp4", "audio/ogg"], - }); + const stream = await getMediaStream(); + const recorder = new Recorder(stream); - return recorder.mimeType; - }); + const blobs = []; - // Should select first supported type - expect(mimeType).toBeDefined(); - expect(typeof mimeType).toBe("string"); - }); + // First recording + await recorder.start(); + await new Promise((resolve) => setTimeout(resolve, 300)); + const blob1 = await recorder.stop(); + blobs.push(blob1.size); - test("should support single mime type", async ({ page }) => { - const mimeType = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); + // Second recording + await recorder.start(); + await new Promise((resolve) => setTimeout(resolve, 300)); + const blob2 = await recorder.stop(); + blobs.push(blob2.size); - const recorder = new AudioRecorder({ - mimeType: "audio/webm", - }); + return { + firstSize: blobs[0], + secondSize: blobs[1], + bothValid: blobs.every((size) => size > 0), + }; + }); - return recorder.mimeType; - }); + expect(result.bothValid).toBe(true); + expect(result.firstSize).toBeGreaterThan(0); + expect(result.secondSize).toBeGreaterThan(0); + }); - expect(mimeType).toBe("audio/webm"); - }); + test("should respect mime type preference", async ({ page }) => { + const mimeType = await page.evaluate(async () => { + const { Recorder, getMediaStream } = await import("/dist/index.js"); + + const stream = await getMediaStream(); + + const recorder = new Recorder(stream, { + mimeType: ["audio/webm", "audio/mp4", "audio/ogg"], + }); + + return recorder.mimeType; + }); + + // Should select first supported type + expect(mimeType).toBeDefined(); + expect(typeof mimeType).toBe("string"); + }); + + test("should support single mime type", async ({ page }) => { + const mimeType = await page.evaluate(async () => { + const { Recorder, getMediaStream } = await import("/dist/index.js"); + + const stream = await getMediaStream(); + + const recorder = new Recorder(stream, { + mimeType: "audio/webm", + }); + + return recorder.mimeType; + }); + + expect(mimeType).toBe("audio/webm"); + }); }); diff --git a/tests/worklets.spec.js b/tests/worklets.spec.js index defce3d..3006d22 100644 --- a/tests/worklets.spec.js +++ b/tests/worklets.spec.js @@ -1,228 +1,60 @@ import { expect, test } from "@playwright/test"; test.describe("AudioWorklets", () => { - test.beforeEach(async ({ page }) => { - await page.goto("/tests/index.html"); - }); + test.beforeEach(async ({ page }) => { + await page.goto("/tests/index.html"); + }); - test("should install custom worklet", async ({ page }) => { - const result = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); + test("should install custom worklet", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Recorder, getMediaStream } = await import("/dist/index.js"); - const recorder = new AudioRecorder({ workletPath: "/dist/worklets" }); + const stream = await getMediaStream(); + const recorder = new Recorder(stream); - let workletInstalled = false; + let workletInstalled = false; - try { - const { node, context, stream } = await recorder.installWorklet( - "volume-meter", - "/dist/worklets/volume-meter.js", - ); + try { + const node = await recorder.installWorklet( + "volume-meter", + "/dist/worklets/volume-meter.js", + ); - workletInstalled = - context instanceof AudioContext && - stream instanceof MediaStream && - node instanceof AudioWorkletNode; + workletInstalled = node instanceof AudioWorkletNode; - return { success: true, installed: workletInstalled }; - } catch (e) { - return { success: false, error: e.message }; - } - }); + return { success: true, installed: workletInstalled }; + } catch (e) { + return { success: false, error: e.message }; + } + }); - expect(result.success).toBe(true); - expect(result.installed).toBe(true); - }); + expect(result.success).toBe(true); + expect(result.installed).toBe(true); + }); - test("should emit volume change events", async ({ page }) => { - const result = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); + test("should pass correct parameters to worklet callback", async ({ + page, + }) => { + const result = await page.evaluate(async () => { + const { Recorder, getMediaStream } = await import("/dist/index.js"); - const recorder = new AudioRecorder({ workletPath: "/dist/worklets" }); + const stream = getMediaStream(); + const recorder = new Recorder(stream); - const volumeEvents = []; + let callbackParams = {}; - recorder.on("volumechange", ({ volume }) => { - volumeEvents.push(volume); - }); + const node = await recorder.installWorklet( + "volume-meter", + "/dist/worklets/volume-meter.js", + ); - await recorder.start(); + callbackParams = { + hasNode: node instanceof AudioWorkletNode, + }; - // Wait for volume events to be collected - await new Promise((resolve) => setTimeout(resolve, 3000)); + return callbackParams; + }); - await recorder.stop(); - - return { - eventCount: volumeEvents.length, - hasEvents: volumeEvents.length > 0, - allNumbers: volumeEvents.every((v) => typeof v === "number"), - allInRange: volumeEvents.every((v) => v >= 0 && v <= 1), - sampleVolumes: volumeEvents.slice(0, 5), - }; - }); - - expect(result.hasEvents).toBe(true); - expect(result.eventCount).toBeGreaterThan(0); - expect(result.allNumbers).toBe(true); - expect(result.allInRange).toBe(true); - }); - - test("should setup audio meter automatically when volumechange listener is attached", async ({ - page, - }) => { - const result = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); - - const recorder = new AudioRecorder({ workletPath: "/dist/worklets" }); - - let volumeReceived = false; - - // Attach listener before starting - recorder.on("volumechange", ({ volume }) => { - volumeReceived = typeof volume === "number"; - }); - - await recorder.start(); - - // Wait for audio processing - await new Promise((resolve) => setTimeout(resolve, 500)); - - await recorder.stop(); - - return { volumeReceived }; - }); - - expect(result.volumeReceived).toBe(true); - }); - - test("should pass correct parameters to worklet callback", async ({ - page, - }) => { - const result = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); - - const recorder = new AudioRecorder({ workletPath: "/dist/worklets" }); - - let callbackParams = {}; - - const { context, stream, node } = await recorder.installWorklet( - "volume-meter", - "/dist/worklets/volume-meter.js", - ); - - callbackParams = { - hasContext: context instanceof AudioContext, - contextState: context.state, - hasStream: stream instanceof MediaStream, - streamActive: stream.active, - hasNode: node instanceof AudioWorkletNode, - nodeContext: node.context === context, - }; - - return callbackParams; - }); - - expect(result.hasContext).toBe(true); - expect(result.hasStream).toBe(true); - expect(result.hasNode).toBe(true); - expect(result.nodeContext).toBe(true); - expect(["suspended", "running", "closed"]).toContain(result.contextState); - }); - - test("should handle worklet path configuration", async ({ page }) => { - const result = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); - - // Test with custom worklet path - const recorder = new AudioRecorder({ - workletPath: "/dist/worklets", - }); - - let installed = false; - - try { - await recorder.installWorklet( - "volume-meter", - "/dist/worklets/volume-meter.js", - ); - - installed = true; - - return { success: true, installed }; - } catch (e) { - return { success: false, error: e.message }; - } - }); - - expect(result.success).toBe(true); - expect(result.installed).toBe(true); - }); - - test("should process audio through worklet", async ({ page }) => { - const result = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); - - const recorder = new AudioRecorder({ workletPath: "/dist/worklets" }); - - const volumes = []; - - recorder.on("volumechange", ({ volume }) => { - volumes.push(volume); - }); - - await recorder.start(); - - // Record and collect volume data - await new Promise((resolve) => setTimeout(resolve, 1500)); - - await recorder.stop(); - - return { - totalEvents: volumes.length, - minVolume: Math.min(...volumes), - maxVolume: Math.max(...volumes), - avgVolume: volumes.reduce((a, b) => a + b, 0) / volumes.length, - }; - }); - - expect(result.totalEvents).toBeGreaterThan(0); - expect(result.minVolume).toBeGreaterThanOrEqual(0); - expect(result.maxVolume).toBeLessThanOrEqual(1); - expect(result.avgVolume).toBeGreaterThanOrEqual(0); - expect(result.avgVolume).toBeLessThanOrEqual(1); - }); - - test("should handle multiple event listeners", async ({ page }) => { - const result = await page.evaluate(async () => { - const { AudioRecorder } = await import("/dist/index.js"); - - const recorder = new AudioRecorder({ workletPath: "/dist/worklets" }); - - let listener1Count = 0; - let listener2Count = 0; - - recorder.on("volumechange", () => { - listener1Count++; - }); - - recorder.on("volumechange", () => { - listener2Count++; - }); - - await recorder.start(); - await new Promise((resolve) => setTimeout(resolve, 500)); - await recorder.stop(); - - return { - listener1Count, - listener2Count, - bothReceived: listener1Count > 0 && listener2Count > 0, - equalCounts: listener1Count === listener2Count, - }; - }); - - expect(result.bothReceived).toBe(true); - expect(result.equalCounts).toBe(true); - }); + expect(result.hasNode).toBe(true); + }); }); From 70a36e7e1c77dcb7f3f42d9b818274f94b6e2f4c Mon Sep 17 00:00:00 2001 From: Tyler Nickerson Date: Sat, 11 Oct 2025 17:01:50 -0400 Subject: [PATCH 4/5] fixes --- src/analyser.ts | 130 ++++---- src/index.ts | 4 +- src/monitor.ts | 80 ++--- src/recorder.ts | 242 +++++++-------- src/types.ts | 14 +- src/utils.ts | 26 +- tests/analyser.spec.js | 474 +++++++++++++++++++++++++++++ tests/edge-cases.spec.js | 617 ++++++++++++++++++++++++++++++++++++++ tests/integration.spec.js | 606 +++++++++++++++++++++++++++++++++++++ tests/worklets.spec.js | 2 +- 10 files changed, 1950 insertions(+), 245 deletions(-) create mode 100644 tests/analyser.spec.js create mode 100644 tests/edge-cases.spec.js create mode 100644 tests/integration.spec.js diff --git a/src/analyser.ts b/src/analyser.ts index f024096..d991013 100644 --- a/src/analyser.ts +++ b/src/analyser.ts @@ -1,82 +1,82 @@ export class Analyser { - readonly node: AnalyserNode; - readonly #data: Uint8Array; + readonly node: AnalyserNode; + readonly #data: Uint8Array; - constructor( - readonly context: AudioContext = new AudioContext(), - readonly options: AnalyserOptions = {}, - ) { - this.node = this.context.createAnalyser(); + constructor( + readonly context: AudioContext = new AudioContext(), + readonly options: AnalyserOptions = {}, + ) { + this.node = this.context.createAnalyser(); - if (options?.fftSize) { - this.node.fftSize = options.fftSize; - } + if (options?.fftSize) { + this.node.fftSize = options.fftSize; + } - if (options?.minDecibels) { - this.node.minDecibels = options.minDecibels; - } + if (options?.minDecibels) { + this.node.minDecibels = options.minDecibels; + } - if (options?.maxDecibels) { - this.node.maxDecibels = options.maxDecibels; - } + if (options?.maxDecibels) { + this.node.maxDecibels = options.maxDecibels; + } - if (options?.smoothingTimeConstant) { - this.node.smoothingTimeConstant = options.smoothingTimeConstant; - } + if (options?.smoothingTimeConstant) { + this.node.smoothingTimeConstant = options.smoothingTimeConstant; + } - if (options?.channelCount) { - this.node.channelCount = options.channelCount; - } + if (options?.channelCount) { + this.node.channelCount = options.channelCount; + } - if (options?.channelInterpretation) { - this.node.channelInterpretation = options.channelInterpretation; - } + if (options?.channelInterpretation) { + this.node.channelInterpretation = options.channelInterpretation; + } - if (options?.channelCountMode) { - this.node.channelCountMode = options.channelCountMode; - } + if (options?.channelCountMode) { + this.node.channelCountMode = options.channelCountMode; + } - this.#data = new Uint8Array(this.node.frequencyBinCount); - } + this.#data = new Uint8Array(this.node.frequencyBinCount); + } - /** - * Returns the frequency data provided by the default analyzer - */ - get frequencyData(): Uint8Array { - this.node.getByteFrequencyData(this.#data); - return this.#data; - } + /** + * Returns the frequency data provided by the default analyzer + */ + get frequencyData(): Uint8Array { + this.node.getByteFrequencyData(this.#data); + return this.#data; + } - /** - * Retrieves the current volume (average of amplitude^2) - */ - get volume(): number { - const data = this.frequencyData; + /** + * Retrieves the current volume (average of amplitude^2) + */ + get volume(): number { + const data = this.frequencyData; - let sum = 0; + let sum = 0; - for (const amplitude of data) { - sum += amplitude * amplitude; - } + for (const amplitude of data) { + sum += amplitude * amplitude; + } - return Math.sqrt(sum / data.length); - } + return Math.sqrt(sum / data.length); + } - connect( - destinationNode: AudioNode, - output?: number, - input?: number, - ): AudioNode; - connect(destinationParam: AudioParam, output?: number): void; - connect( - destination: AudioNode | AudioParam, - output?: number, - input?: number, - ): AudioNode | undefined { - if (destination instanceof AudioNode) { - return this.node.connect(destination, output, input); - } else { - this.node.connect(destination, output); - } - } + connect( + destinationNode: AudioNode, + output?: number, + input?: number, + ): AudioNode; + connect(destinationParam: AudioParam, output?: number): void; + connect( + destination: AudioNode | AudioParam, + output?: number, + input?: number, + ): AudioNode | undefined { + if (destination instanceof AudioNode) { + return this.node.connect(destination, output, input); + } else { + this.node.connect(destination, output); + } + } } diff --git a/src/index.ts b/src/index.ts index d1ef2ab..2840f68 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,4 +1,4 @@ +export * from "./analyser.js"; +export * from "./monitor.js"; export * from "./recorder.js"; -export * from "./types.js"; export * from "./utils.js"; -export * from "./analyser.js"; diff --git a/src/monitor.ts b/src/monitor.ts index 01410e3..965cb3f 100644 --- a/src/monitor.ts +++ b/src/monitor.ts @@ -1,50 +1,52 @@ import { Analyser } from "./analyser.js"; export interface MonitorOptions { - context?: AudioContext; - defaultAnalyser?: Analyser; + context?: AudioContext; + defaultAnalyser?: Analyser; } export class Monitor { - readonly context: AudioContext; - readonly source: MediaStreamAudioSourceNode; - readonly destination: AudioDestinationNode; - readonly analyser: Analyser; + readonly context: AudioContext; + readonly source: MediaStreamAudioSourceNode; + readonly destination: AudioDestinationNode; + readonly analyser: Analyser; + readonly #options: MonitorOptions; - constructor( - readonly stream: MediaStream, - private readonly options: MonitorOptions, - ) { - this.context = this.options.context ?? new AudioContext(); - this.destination = this.context.destination; - this.source = this.context.createMediaStreamSource(this.stream); - this.analyser = this.options.defaultAnalyser ?? new Analyser(this.context); - this.source.connect(this.analyser.node); - this.analyser.connect(this.destination); - } + constructor( + readonly stream: MediaStream, + options: MonitorOptions, + ) { + this.#options = options; + this.context = this.#options.context ?? new AudioContext(); + this.destination = this.context.destination; + this.source = this.context.createMediaStreamSource(this.stream); + this.analyser = this.#options.defaultAnalyser ?? new Analyser(this.context); + this.source.connect(this.analyser.node); + this.analyser.connect(this.destination); + } - /** - * Retrieves the current volume (average of amplitude^2) - */ - get volume() { - return this.analyser.volume; - } + /** + * Retrieves the current volume (average of amplitude^2) + */ + get volume() { + return this.analyser.volume; + } - /** - * Retrieves the current analyzer's frequency data - */ - get frequencyData() { - return this.analyser.frequencyData; - } + /** + * Retrieves the current analyzer's frequency data + */ + get frequencyData() { + return this.analyser.frequencyData; + } - /** - * Adds a custom audio worklet to the current audio context - * - * @param name The registered name of the worklet - * @param path The absolute path to the worklet - */ - async installWorklet(name: string, path: string): Promise { - await this.context.audioWorklet.addModule(path); - return new AudioWorkletNode(this.context, name); - } + /** + * Adds a custom audio worklet to the current audio context + * + * @param name The registered name of the worklet + * @param path The absolute path to the worklet + */ + async installWorklet(name: string, path: string): Promise { + await this.context.audioWorklet.addModule(path); + return new AudioWorkletNode(this.context, name); + } } diff --git a/src/recorder.ts b/src/recorder.ts index be78be0..d5d4c78 100644 --- a/src/recorder.ts +++ b/src/recorder.ts @@ -2,125 +2,131 @@ import { Analyser } from "./analyser.js"; import { Monitor, type MonitorOptions } from "./monitor.js"; export interface RecorderOptions extends MonitorOptions { - mimeType?: string | string[]; + mimeType?: string | string[]; } export class Recorder extends Monitor { - readonly mimeType: string | undefined; - - /** - * Dictionary of event listeners associated with the recorder - */ - #listeners: Partial<{ - [K in keyof MediaRecorderEventMap]: MediaRecorderEventMap[K][]; - }> = {}; - - /** - * Buffer of currently recorded blob chunks - */ - #buffer?: Blob[] = []; - - /** - * Underlying media recorder object - */ - #recorder?: MediaRecorder; - - /** - * The currently active media stream - */ - #stream?: MediaStream; - - constructor(stream: MediaStream, options: RecorderOptions = {}) { - super(stream, options); - - this.mimeType = Array.isArray(options.mimeType) - ? options.mimeType.find((type) => MediaRecorder.isTypeSupported(type)) - : options.mimeType; - } - - /** - * The recording state of the media recorder - */ - get state(): RecordingState | undefined { - return this.#recorder?.state; - } - - /** - * Returns the active audio recorder or creates one if one doesn't exist - * - * @returns The MediaRecorder object - */ - get mediaRecorder(): MediaRecorder { - if (!this.#recorder) { - this.#recorder = new MediaRecorder(this.stream, { - ...this.options, - mimeType: this.mimeType, - }); - - this.#recorder.addEventListener("dataavailable", ({ data }) => { - if (data.size > 0) { - this.#buffer?.push(data); - } - }); - - for (const [type, callbacks] of Object.entries(this.#listeners)) { - for (const callback of callbacks) { - this.#recorder.addEventListener(type, callback as any); - } - } - } - - return this.#recorder; - } - - /** - * Starts recording audio using the given device ID or, if none is provided, the default device - * @param timeslice Optional timeslice in milliseconds - */ - start(timeslice?: number) { - return this.mediaRecorder.start(timeslice); - } - - /** - * Stops recording - * @returns The recorded data as a Blob object - */ - async stop(): Promise { - return new Promise((resolve) => { - // Wait for the audio to stop and for the data to be available - this.#recorder?.addEventListener("stop", () => { - const blob = new Blob(this.#buffer); - - this.#buffer = []; - - for (const track of this.#stream?.getTracks() ?? []) { - track.stop(); - } - - this.#recorder = undefined; - this.#stream = undefined; - - void this.context?.suspend(); - - resolve(blob); - }); - - this.#recorder?.stop(); - }); - } - - /** - * Attaches an event listener to the recorder - * @param eventName The name of the event - * @param callback The callback - */ - on( - eventName: T, - callback: MediaRecorderEventMap[T], - ) { - if (!this.#listeners[eventName]) { - this.#listeners[eventName] = []; - } - this.#listeners[eventName]?.push(callback); - } + readonly mimeType: string | undefined; + + /** + * Dictionary of event listeners associated with the recorder + */ + #listeners: Partial<{ + [K in keyof MediaRecorderEventMap]: MediaRecorderEventMap[K][]; + }> = {}; + + /** + * Buffer of currently recorded blob chunks + */ + #buffer?: Blob[] = []; + + /** + * Underlying media recorder object + */ + #recorder?: MediaRecorder; + + /** + * The currently active media stream + */ + #stream?: MediaStream; + + /** + * Recorder options + */ + #options: RecorderOptions; + + constructor(stream: MediaStream, options: RecorderOptions = {}) { + super(stream, options); + + this.#options = options; + this.mimeType = Array.isArray(options.mimeType) + ? options.mimeType.find((type) => MediaRecorder.isTypeSupported(type)) + : options.mimeType; + } + + /** + * The recording state of the media recorder + */ + get state(): RecordingState | undefined { + return this.#recorder?.state; + } + + /** + * Returns the active audio recorder or creates one if one doesn't exist + * + * @returns The MediaRecorder object + */ + get mediaRecorder(): MediaRecorder { + if (!this.#recorder) { + this.#recorder = new MediaRecorder(this.stream, { + ...this.#options, + mimeType: this.mimeType, + }); + + this.#recorder.addEventListener("dataavailable", ({ data }) => { + if (data.size > 0) { + this.#buffer?.push(data); + } + }); + + for (const [type, callbacks] of Object.entries(this.#listeners)) { + for (const callback of callbacks) { + this.#recorder.addEventListener(type, callback as any); + } + } + } + + return this.#recorder; + } + + /** + * Starts recording audio using the given device ID or, if none is provided, the default device + * @param timeslice Optional timeslice in milliseconds + */ + start(timeslice?: number) { + return this.mediaRecorder.start(timeslice); + } + + /** + * Stops recording + * @returns The recorded data as a Blob object + */ + async stop(): Promise { + return new Promise((resolve) => { + // Wait for the audio to stop and for the data to be available + this.#recorder?.addEventListener("stop", () => { + const blob = new Blob(this.#buffer); + + this.#buffer = []; + + for (const track of this.#stream?.getTracks() ?? []) { + track.stop(); + } + + this.#recorder = undefined; + this.#stream = undefined; + + void this.context?.suspend(); + + resolve(blob); + }); + + this.#recorder?.stop(); + }); + } + + /** + * Attaches an event listener to the recorder + * @param eventName The name of the event + * @param callback The callback + */ + on( + eventName: T, + callback: MediaRecorderEventMap[T], + ) { + if (!this.#listeners[eventName]) { + this.#listeners[eventName] = []; + } + this.#listeners[eventName]?.push(callback); + } } diff --git a/src/types.ts b/src/types.ts index 3e517d0..a384393 100644 --- a/src/types.ts +++ b/src/types.ts @@ -1,14 +1,14 @@ import type { Analyser } from "./analyser"; export interface MonitorOptions { - mimeType?: string | string[]; - context?: AudioContext; - defaultAnalyser?: Analyser; + mimeType?: string | string[]; + context?: AudioContext; + defaultAnalyser?: Analyser; } export interface RecorderOptions - extends Omit { - mimeType?: string | string[]; - context?: AudioContext; - defaultAnalyser?: Analyser; + extends Omit { + mimeType?: string | string[]; + context?: AudioContext; + defaultAnalyser?: Analyser; } diff --git a/src/utils.ts b/src/utils.ts index 13fe992..9e2bb70 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -1,5 +1,5 @@ type DeviceFilters = { - [key in keyof MediaDeviceInfo]: MediaDeviceInfo[key]; + [key in keyof MediaDeviceInfo]: MediaDeviceInfo[key]; }; /** @@ -7,17 +7,17 @@ type DeviceFilters = { * @returns The list of Device objects */ export async function getDevices( - filters: Partial = {}, + filters: Partial = {}, ): Promise { - return navigator.mediaDevices - .enumerateDevices() - .then((list) => - list.filter((d) => - filters - ? Object.entries(filters).every(([key, value]) => d[key] === value) - : true, - ), - ); + return navigator.mediaDevices + .enumerateDevices() + .then((list) => + list.filter((d) => + filters + ? Object.entries(filters).every(([key, value]) => d[key] === value) + : true, + ), + ); } /** @@ -26,7 +26,7 @@ export async function getDevices( * @returns The media stream */ export async function getMediaStream( - constraints: MediaStreamConstraints = { audio: true, video: false }, + constraints: MediaStreamConstraints = { audio: true, video: false }, ) { - return navigator.mediaDevices.getUserMedia(constraints); + return navigator.mediaDevices.getUserMedia(constraints); } diff --git a/tests/analyser.spec.js b/tests/analyser.spec.js new file mode 100644 index 0000000..010709d --- /dev/null +++ b/tests/analyser.spec.js @@ -0,0 +1,474 @@ +import { expect, test } from "@playwright/test"; + +test.describe("Analyser", () => { + test.beforeEach(async ({ page }) => { + await page.goto("/tests/index.html"); + }); + + test("should create an instance with default options", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser } = await import("/dist/index.js"); + + const analyser = new Analyser(); + + return { + created: analyser !== null, + hasNode: analyser.node instanceof AnalyserNode, + hasContext: analyser.context instanceof AudioContext, + defaultFFTSize: analyser.node.fftSize, + frequencyBinCount: analyser.node.frequencyBinCount, + }; + }); + + expect(result.created).toBe(true); + expect(result.hasNode).toBe(true); + expect(result.hasContext).toBe(true); + expect(result.defaultFFTSize).toBe(2048); // Default Web Audio API value + expect(result.frequencyBinCount).toBe(1024); // Half of fftSize + }); + + test("should create an instance with custom AudioContext", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser } = await import("/dist/index.js"); + + const customContext = new AudioContext(); + const analyser = new Analyser(customContext); + + return { + contextMatches: analyser.context === customContext, + sampleRate: analyser.context.sampleRate, + }; + }); + + expect(result.contextMatches).toBe(true); + expect(result.sampleRate).toBeGreaterThan(0); + }); + + test("should apply custom fftSize option", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser } = await import("/dist/index.js"); + + const analyser = new Analyser(undefined, { fftSize: 1024 }); + + return { + fftSize: analyser.node.fftSize, + frequencyBinCount: analyser.node.frequencyBinCount, + }; + }); + + expect(result.fftSize).toBe(1024); + expect(result.frequencyBinCount).toBe(512); + }); + + test("should apply custom decibel range options", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser } = await import("/dist/index.js"); + + const analyser = new Analyser(undefined, { + minDecibels: -90, + maxDecibels: -10, + }); + + return { + minDecibels: analyser.node.minDecibels, + maxDecibels: analyser.node.maxDecibels, + }; + }); + + expect(result.minDecibels).toBe(-90); + expect(result.maxDecibels).toBe(-10); + }); + + test("should apply custom smoothingTimeConstant option", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser } = await import("/dist/index.js"); + + const analyser = new Analyser(undefined, { + smoothingTimeConstant: 0.5, + }); + + return { + smoothingTimeConstant: analyser.node.smoothingTimeConstant, + }; + }); + + expect(result.smoothingTimeConstant).toBe(0.5); + }); + + test("should apply custom channel options", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser } = await import("/dist/index.js"); + + const analyser = new Analyser(undefined, { + channelCount: 1, + channelInterpretation: "discrete", + channelCountMode: "explicit", + }); + + return { + channelCount: analyser.node.channelCount, + channelInterpretation: analyser.node.channelInterpretation, + channelCountMode: analyser.node.channelCountMode, + }; + }); + + expect(result.channelCount).toBe(1); + expect(result.channelInterpretation).toBe("discrete"); + expect(result.channelCountMode).toBe("explicit"); + }); + + test("should return frequency data as Uint8Array", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser, getMediaStream } = await import("/dist/index.js"); + + const analyser = new Analyser(); + const stream = await getMediaStream(); + const source = analyser.context.createMediaStreamSource(stream); + source.connect(analyser.node); + + // Allow some time for audio processing + await new Promise(resolve => setTimeout(resolve, 100)); + + const frequencyData = analyser.frequencyData; + + return { + isUint8Array: frequencyData instanceof Uint8Array, + length: frequencyData.length, + expectedLength: analyser.node.frequencyBinCount, + hasData: Array.from(frequencyData).some(value => value > 0), + }; + }); + + expect(result.isUint8Array).toBe(true); + expect(result.length).toBe(result.expectedLength); + expect(result.length).toBeGreaterThan(0); + }); + + test("should calculate volume correctly", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser, getMediaStream } = await import("/dist/index.js"); + + const analyser = new Analyser(); + const stream = await getMediaStream(); + const source = analyser.context.createMediaStreamSource(stream); + source.connect(analyser.node); + + // Allow some time for audio processing + await new Promise(resolve => setTimeout(resolve, 200)); + + const volume = analyser.volume; + + return { + isNumber: typeof volume === "number", + isFinite: Number.isFinite(volume), + isNonNegative: volume >= 0, + volume: volume, + }; + }); + + expect(result.isNumber).toBe(true); + expect(result.isFinite).toBe(true); + expect(result.isNonNegative).toBe(true); + }); + + test("should connect to AudioNode", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser } = await import("/dist/index.js"); + + const analyser = new Analyser(); + const gainNode = analyser.context.createGain(); + + try { + const returnedNode = analyser.connect(gainNode); + return { + success: true, + returnedCorrectNode: returnedNode === gainNode, + }; + } catch (error) { + return { + success: false, + error: error.message, + }; + } + }); + + expect(result.success).toBe(true); + expect(result.returnedCorrectNode).toBe(true); + }); + + test("should connect to AudioParam", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser } = await import("/dist/index.js"); + + const analyser = new Analyser(); + const gainNode = analyser.context.createGain(); + + try { + const returnValue = analyser.connect(gainNode.gain); + return { + success: true, + returnedUndefined: returnValue === undefined, + }; + } catch (error) { + return { + success: false, + error: error.message, + }; + } + }); + + expect(result.success).toBe(true); + expect(result.returnedUndefined).toBe(true); + }); + + test("should handle multiple analyser instances", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser } = await import("/dist/index.js"); + + const analyser1 = new Analyser(undefined, { fftSize: 512 }); + const analyser2 = new Analyser(undefined, { fftSize: 1024 }); + + return { + differentContexts: analyser1.context !== analyser2.context, + differentFFTSizes: analyser1.node.fftSize !== analyser2.node.fftSize, + analyser1FFTSize: analyser1.node.fftSize, + analyser2FFTSize: analyser2.node.fftSize, + }; + }); + + expect(result.differentContexts).toBe(true); + expect(result.differentFFTSizes).toBe(true); + expect(result.analyser1FFTSize).toBe(512); + expect(result.analyser2FFTSize).toBe(1024); + }); +}); + +test.describe("Monitor with Default Analyser", () => { + test.beforeEach(async ({ page }) => { + await page.goto("/tests/index.html"); + }); + + test("should create Monitor with default analyser", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Monitor, getMediaStream } = await import("/dist/index.js"); + + const stream = await getMediaStream(); + const monitor = new Monitor(stream, {}); + + return { + hasAnalyser: monitor.analyser !== null, + hasContext: monitor.context instanceof AudioContext, + hasSource: monitor.source instanceof MediaStreamAudioSourceNode, + hasDestination: monitor.destination instanceof AudioDestinationNode, + analyserConnected: true, // We can't directly test connection, but constructor should handle it + }; + }); + + expect(result.hasAnalyser).toBe(true); + expect(result.hasContext).toBe(true); + expect(result.hasSource).toBe(true); + expect(result.hasDestination).toBe(true); + }); + + test("should create Monitor with custom AudioContext", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Monitor, getMediaStream } = await import("/dist/index.js"); + + const customContext = new AudioContext(); + const stream = await getMediaStream(); + const monitor = new Monitor(stream, { context: customContext }); + + return { + contextMatches: monitor.context === customContext, + analyserUsesCustomContext: monitor.analyser.context === customContext, + }; + }); + + expect(result.contextMatches).toBe(true); + expect(result.analyserUsesCustomContext).toBe(true); + }); + + test("should create Monitor with custom analyser", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Monitor, Analyser, getMediaStream } = await import("/dist/index.js"); + + const customContext = new AudioContext(); + const customAnalyser = new Analyser(customContext, { fftSize: 512 }); + const stream = await getMediaStream(); + const monitor = new Monitor(stream, { + context: customContext, + defaultAnalyser: customAnalyser + }); + + return { + analyserMatches: monitor.analyser === customAnalyser, + customFFTSize: monitor.analyser.node.fftSize, + }; + }); + + expect(result.analyserMatches).toBe(true); + expect(result.customFFTSize).toBe(512); + }); + + test("should provide volume through monitor", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Monitor, getMediaStream } = await import("/dist/index.js"); + + const stream = await getMediaStream(); + const monitor = new Monitor(stream, {}); + + // Allow some time for audio processing + await new Promise(resolve => setTimeout(resolve, 200)); + + const volume = monitor.volume; + + return { + isNumber: typeof volume === "number", + isFinite: Number.isFinite(volume), + isNonNegative: volume >= 0, + volume: volume, + }; + }); + + expect(result.isNumber).toBe(true); + expect(result.isFinite).toBe(true); + expect(result.isNonNegative).toBe(true); + }); + + test("should provide frequency data through monitor", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Monitor, getMediaStream } = await import("/dist/index.js"); + + const stream = await getMediaStream(); + const monitor = new Monitor(stream, {}); + + // Allow some time for audio processing + await new Promise(resolve => setTimeout(resolve, 100)); + + const frequencyData = monitor.frequencyData; + + return { + isUint8Array: frequencyData instanceof Uint8Array, + length: frequencyData.length, + hasValidLength: frequencyData.length > 0, + }; + }); + + expect(result.isUint8Array).toBe(true); + expect(result.hasValidLength).toBe(true); + }); + + test("should install audio worklet", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Monitor, getMediaStream } = await import("/dist/index.js"); + + const stream = await getMediaStream(); + const monitor = new Monitor(stream, {}); + + try { + // Create a simple worklet module as a data URL + const workletCode = ` + class TestWorklet extends AudioWorkletProcessor { + process() { + return true; + } + } + registerProcessor('test-worklet', TestWorklet); + `; + const workletUrl = `data:application/javascript,${encodeURIComponent(workletCode)}`; + + const workletNode = await monitor.installWorklet('test-worklet', workletUrl); + + return { + success: true, + isAudioWorkletNode: workletNode instanceof AudioWorkletNode, + correctContext: workletNode.context === monitor.context, + }; + } catch (error) { + return { + success: false, + error: error.message, + }; + } + }); + + expect(result.success).toBe(true); + expect(result.isAudioWorkletNode).toBe(true); + expect(result.correctContext).toBe(true); + }); + + test("should handle volume changes over time", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Monitor, getMediaStream } = await import("/dist/index.js"); + + const stream = await getMediaStream(); + const monitor = new Monitor(stream, {}); + + // Collect volume readings over time + const volumes = []; + for (let i = 0; i < 5; i++) { + await new Promise(resolve => setTimeout(resolve, 50)); + volumes.push(monitor.volume); + } + + return { + allNumbers: volumes.every(v => typeof v === "number"), + allFinite: volumes.every(v => Number.isFinite(v)), + allNonNegative: volumes.every(v => v >= 0), + readings: volumes.length, + }; + }); + + expect(result.allNumbers).toBe(true); + expect(result.allFinite).toBe(true); + expect(result.allNonNegative).toBe(true); + expect(result.readings).toBe(5); + }); + + test("should handle frequency data consistency", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Monitor, getMediaStream } = await import("/dist/index.js"); + + const stream = await getMediaStream(); + const monitor = new Monitor(stream, {}); + + // Get multiple frequency data readings + await new Promise(resolve => setTimeout(resolve, 100)); + const data1 = monitor.frequencyData; + const data2 = monitor.frequencyData; + + return { + sameLength: data1.length === data2.length, + bothUint8Array: data1 instanceof Uint8Array && data2 instanceof Uint8Array, + length: data1.length, + }; + }); + + expect(result.sameLength).toBe(true); + expect(result.bothUint8Array).toBe(true); + expect(result.length).toBeGreaterThan(0); + }); + + test("should maintain separate monitor instances", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Monitor, getMediaStream } = await import("/dist/index.js"); + + const stream1 = await getMediaStream(); + const stream2 = await getMediaStream(); + const monitor1 = new Monitor(stream1, {}); + const monitor2 = new Monitor(stream2, {}); + + return { + differentContexts: monitor1.context !== monitor2.context, + differentAnalysers: monitor1.analyser !== monitor2.analyser, + differentSources: monitor1.source !== monitor2.source, + bothHaveVolume: typeof monitor1.volume === "number" && typeof monitor2.volume === "number", + }; + }); + + expect(result.differentContexts).toBe(true); + expect(result.differentAnalysers).toBe(true); + expect(result.differentSources).toBe(true); + expect(result.bothHaveVolume).toBe(true); + }); +}); diff --git a/tests/edge-cases.spec.js b/tests/edge-cases.spec.js new file mode 100644 index 0000000..6ec4d29 --- /dev/null +++ b/tests/edge-cases.spec.js @@ -0,0 +1,617 @@ +import { expect, test } from "@playwright/test"; + +test.describe("Edge Cases", () => { + test.beforeEach(async ({ page }) => { + await page.goto("/tests/index.html"); + }); + + test("should handle zero-length audio buffers", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser } = await import("/dist/index.js"); + + const analyser = new Analyser(); + + // Create a very short buffer that might result in zero-length data + const context = analyser.context; + const buffer = context.createBuffer(1, 1, context.sampleRate); + const source = context.createBufferSource(); + source.buffer = buffer; + source.connect(analyser.node); + source.start(); + + await new Promise(resolve => setTimeout(resolve, 10)); + + const frequencyData = analyser.frequencyData; + const volume = analyser.volume; + + return { + frequencyDataLength: frequencyData.length, + volumeIsNumber: typeof volume === "number", + volumeIsFinite: Number.isFinite(volume), + frequencyDataValid: frequencyData instanceof Uint8Array, + }; + }); + + expect(result.frequencyDataValid).toBe(true); + expect(result.frequencyDataLength).toBeGreaterThan(0); + expect(result.volumeIsNumber).toBe(true); + expect(result.volumeIsFinite).toBe(true); + }); + + test("should handle suspended AudioContext", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser, getMediaStream } = await import("/dist/index.js"); + + const context = new AudioContext(); + const analyser = new Analyser(context); + const stream = await getMediaStream(); + const source = context.createMediaStreamSource(stream); + source.connect(analyser.node); + + // Suspend the context + await context.suspend(); + + await new Promise(resolve => setTimeout(resolve, 100)); + + const volume1 = analyser.volume; + const frequencyData1 = analyser.frequencyData; + + // Resume the context + await context.resume(); + + await new Promise(resolve => setTimeout(resolve, 100)); + + const volume2 = analyser.volume; + const frequencyData2 = analyser.frequencyData; + + stream.getTracks().forEach(track => track.stop()); + await context.close(); + + return { + suspendedVolume: typeof volume1 === "number" && Number.isFinite(volume1), + resumedVolume: typeof volume2 === "number" && Number.isFinite(volume2), + suspendedDataValid: frequencyData1 instanceof Uint8Array, + resumedDataValid: frequencyData2 instanceof Uint8Array, + dataLengthsMatch: frequencyData1.length === frequencyData2.length, + }; + }); + + expect(result.suspendedVolume).toBe(true); + expect(result.resumedVolume).toBe(true); + expect(result.suspendedDataValid).toBe(true); + expect(result.resumedDataValid).toBe(true); + expect(result.dataLengthsMatch).toBe(true); + }); + + test("should handle extremely small fftSize", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser } = await import("/dist/index.js"); + + try { + const analyser = new Analyser(undefined, { fftSize: 32 }); + + return { + success: true, + fftSize: analyser.node.fftSize, + frequencyBinCount: analyser.node.frequencyBinCount, + frequencyDataLength: analyser.frequencyData.length, + }; + } catch (error) { + return { + success: false, + error: error.name, + message: error.message, + }; + } + }); + + if (result.success) { + expect(result.fftSize).toBe(32); + expect(result.frequencyBinCount).toBe(16); + expect(result.frequencyDataLength).toBe(16); + } else { + expect(result.error).toBeDefined(); + } + }); + + test("should handle extremely large fftSize", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser } = await import("/dist/index.js"); + + try { + const analyser = new Analyser(undefined, { fftSize: 32768 }); + + return { + success: true, + fftSize: analyser.node.fftSize, + frequencyBinCount: analyser.node.frequencyBinCount, + frequencyDataLength: analyser.frequencyData.length, + }; + } catch (error) { + return { + success: false, + error: error.name, + message: error.message, + }; + } + }); + + if (result.success) { + expect(result.fftSize).toBe(32768); + expect(result.frequencyBinCount).toBe(16384); + expect(result.frequencyDataLength).toBe(16384); + } else { + expect(result.error).toBeDefined(); + } + }); + + test("should handle chain of audio node connections", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser, getMediaStream } = await import("/dist/index.js"); + + const analyser = new Analyser(); + const stream = await getMediaStream(); + const context = analyser.context; + + // Create a chain of audio nodes + const source = context.createMediaStreamSource(stream); + const gainNode1 = context.createGain(); + const gainNode2 = context.createGain(); + const delayNode = context.createDelay(); + + // Connect in chain + source.connect(gainNode1); + gainNode1.connect(delayNode); + delayNode.connect(gainNode2); + gainNode2.connect(analyser.node); + analyser.connect(context.destination); + + // Set some parameters + gainNode1.gain.value = 0.5; + gainNode2.gain.value = 0.8; + delayNode.delayTime.value = 0.1; + + await new Promise(resolve => setTimeout(resolve, 200)); + + const volume = analyser.volume; + const frequencyData = analyser.frequencyData; + + stream.getTracks().forEach(track => track.stop()); + await context.close(); + + return { + volumeValid: typeof volume === "number" && Number.isFinite(volume), + frequencyDataValid: frequencyData instanceof Uint8Array, + chainWorked: volume >= 0 && frequencyData.length > 0, + }; + }); + + expect(result.volumeValid).toBe(true); + expect(result.frequencyDataValid).toBe(true); + expect(result.chainWorked).toBe(true); + }); + + test("should handle rapid connect/disconnect cycles", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser, getMediaStream } = await import("/dist/index.js"); + + const analyser = new Analyser(); + const stream = await getMediaStream(); + const context = analyser.context; + const source = context.createMediaStreamSource(stream); + + const cycles = 10; + let successfulCycles = 0; + + for (let i = 0; i < cycles; i++) { + try { + // Connect + source.connect(analyser.node); + await new Promise(resolve => setTimeout(resolve, 10)); + + const volume = analyser.volume; + + // Disconnect + source.disconnect(analyser.node); + + if (typeof volume === "number" && Number.isFinite(volume)) { + successfulCycles++; + } + } catch (error) { + // Some cycles might fail, that's okay + } + } + + stream.getTracks().forEach(track => track.stop()); + await context.close(); + + return { + totalCycles: cycles, + successfulCycles, + successRate: successfulCycles / cycles, + }; + }); + + expect(result.successfulCycles).toBeGreaterThan(0); + expect(result.successRate).toBeGreaterThan(0.5); + }); + + test("should handle multiple streams to single analyser", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser, getMediaStream } = await import("/dist/index.js"); + + const analyser = new Analyser(); + const context = analyser.context; + + // Create multiple streams + const stream1 = await getMediaStream(); + const stream2 = await getMediaStream(); + + const source1 = context.createMediaStreamSource(stream1); + const source2 = context.createMediaStreamSource(stream2); + const merger = context.createChannelMerger(2); + + // Connect both streams through a merger + source1.connect(merger, 0, 0); + source2.connect(merger, 0, 1); + merger.connect(analyser.node); + + await new Promise(resolve => setTimeout(resolve, 200)); + + const volume = analyser.volume; + const frequencyData = analyser.frequencyData; + + stream1.getTracks().forEach(track => track.stop()); + stream2.getTracks().forEach(track => track.stop()); + await context.close(); + + return { + volumeValid: typeof volume === "number" && Number.isFinite(volume), + frequencyDataValid: frequencyData instanceof Uint8Array && frequencyData.length > 0, + multiStreamWorked: volume >= 0, + }; + }); + + expect(result.volumeValid).toBe(true); + expect(result.frequencyDataValid).toBe(true); + expect(result.multiStreamWorked).toBe(true); + }); + + test("should handle analyser with different sample rates", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser } = await import("/dist/index.js"); + + const sampleRates = [8000, 16000, 22050, 44100, 48000]; + const results = []; + + for (const sampleRate of sampleRates) { + try { + const context = new AudioContext({ sampleRate }); + const analyser = new Analyser(context); + + // Create a test tone + const oscillator = context.createOscillator(); + oscillator.frequency.value = 440; // A4 note + oscillator.connect(analyser.node); + oscillator.start(); + + await new Promise(resolve => setTimeout(resolve, 100)); + + const volume = analyser.volume; + const frequencyData = analyser.frequencyData; + + oscillator.stop(); + await context.close(); + + results.push({ + sampleRate, + success: true, + volume: typeof volume === "number" && volume > 0, + frequencyData: frequencyData instanceof Uint8Array && frequencyData.length > 0, + }); + } catch (error) { + results.push({ + sampleRate, + success: false, + error: error.name, + }); + } + } + + return { + results, + successfulRates: results.filter(r => r.success).length, + totalRates: results.length, + }; + }); + + expect(result.successfulRates).toBeGreaterThan(0); + result.results.forEach(r => { + if (r.success) { + expect(r.volume).toBe(true); + expect(r.frequencyData).toBe(true); + } + }); + }); +}); + +test.describe("Utility Functions", () => { + test.beforeEach(async ({ page }) => { + await page.goto("/tests/index.html"); + }); + + test("should handle getDevices with empty filters", async ({ page }) => { + const result = await page.evaluate(async () => { + const { getDevices } = await import("/dist/index.js"); + + const devices = await getDevices({}); + + return { + isArray: Array.isArray(devices), + length: devices.length, + hasAudioInputs: devices.some(d => d.kind === 'audioinput'), + }; + }); + + expect(result.isArray).toBe(true); + expect(result.length).toBeGreaterThanOrEqual(0); + }); + + test("should handle getDevices with multiple filter criteria", async ({ page }) => { + const result = await page.evaluate(async () => { + const { getDevices } = await import("/dist/index.js"); + + // Get all devices first + const allDevices = await getDevices(); + + if (allDevices.length === 0) { + return { hasDevices: false }; + } + + // Find a device with multiple properties to filter by + const sampleDevice = allDevices[0]; + + const filteredDevices = await getDevices({ + kind: sampleDevice.kind, + groupId: sampleDevice.groupId, + }); + + return { + hasDevices: true, + allDevicesCount: allDevices.length, + filteredCount: filteredDevices.length, + filterMatches: filteredDevices.every(d => + d.kind === sampleDevice.kind && d.groupId === sampleDevice.groupId + ), + }; + }); + + if (result.hasDevices) { + expect(result.filteredCount).toBeGreaterThan(0); + expect(result.filteredCount).toBeLessThanOrEqual(result.allDevicesCount); + expect(result.filterMatches).toBe(true); + } + }); + + test("should handle getDevices with non-existent filter values", async ({ page }) => { + const result = await page.evaluate(async () => { + const { getDevices } = await import("/dist/index.js"); + + const devices = await getDevices({ + kind: 'nonexistent-kind', + deviceId: 'fake-device-id-12345', + }); + + return { + isArray: Array.isArray(devices), + length: devices.length, + isEmpty: devices.length === 0, + }; + }); + + expect(result.isArray).toBe(true); + expect(result.isEmpty).toBe(true); + expect(result.length).toBe(0); + }); + + test("should handle getMediaStream with various constraints", async ({ page }) => { + const result = await page.evaluate(async () => { + const { getMediaStream } = await import("/dist/index.js"); + + const testCases = [ + { audio: true, video: false }, + { audio: { echoCancellation: false }, video: false }, + { audio: { noiseSuppression: true }, video: false }, + { audio: { sampleRate: 44100 }, video: false }, + { audio: { channelCount: 1 }, video: false }, + ]; + + const results = []; + + for (const constraints of testCases) { + try { + const stream = await getMediaStream(constraints); + + const audioTracks = stream.getAudioTracks(); + const videoTracks = stream.getVideoTracks(); + + results.push({ + constraints, + success: true, + audioTracksCount: audioTracks.length, + videoTracksCount: videoTracks.length, + hasAudio: audioTracks.length > 0, + hasVideo: videoTracks.length > 0, + }); + + // Cleanup + stream.getTracks().forEach(track => track.stop()); + } catch (error) { + results.push({ + constraints, + success: false, + error: error.name, + }); + } + } + + return { + results, + successfulCases: results.filter(r => r.success).length, + totalCases: results.length, + }; + }); + + expect(result.successfulCases).toBeGreaterThan(0); + + result.results.forEach(r => { + if (r.success) { + expect(r.hasAudio).toBe(true); + expect(r.hasVideo).toBe(false); + expect(r.audioTracksCount).toBeGreaterThan(0); + expect(r.videoTracksCount).toBe(0); + } + }); + }); + + test("should handle getMediaStream with invalid constraints", async ({ page }) => { + const result = await page.evaluate(async () => { + const { getMediaStream } = await import("/dist/index.js"); + + const invalidCases = [ + { audio: false, video: false }, // No media requested + { audio: { deviceId: 'fake-device-id' } }, // Non-existent device + { audio: { sampleRate: { exact: 999999 } } }, // Impossible sample rate + ]; + + const results = []; + + for (const constraints of invalidCases) { + try { + const stream = await getMediaStream(constraints); + stream.getTracks().forEach(track => track.stop()); + results.push({ constraints, success: true }); + } catch (error) { + results.push({ + constraints, + success: false, + error: error.name, + message: error.message, + }); + } + } + + return { + results, + allFailed: results.every(r => !r.success), + errorTypes: [...new Set(results.map(r => r.error).filter(Boolean))], + }; + }); + + // Most invalid constraints should fail + expect(result.errorTypes.length).toBeGreaterThan(0); + + result.results.forEach(r => { + if (!r.success) { + expect(r.error).toBeDefined(); + } + }); + }); + + test("should handle concurrent getMediaStream calls", async ({ page }) => { + const result = await page.evaluate(async () => { + const { getMediaStream } = await import("/dist/index.js"); + + const concurrentCalls = 5; + const promises = Array(concurrentCalls).fill().map(() => + getMediaStream({ audio: true, video: false }) + ); + + try { + const streams = await Promise.all(promises); + + const results = streams.map(stream => ({ + isMediaStream: stream instanceof MediaStream, + audioTracksCount: stream.getAudioTracks().length, + trackStates: stream.getTracks().map(t => t.readyState), + })); + + // Cleanup + streams.forEach(stream => { + stream.getTracks().forEach(track => track.stop()); + }); + + return { + success: true, + streamCount: streams.length, + allValidStreams: results.every(r => r.isMediaStream && r.audioTracksCount > 0), + results, + }; + } catch (error) { + return { + success: false, + error: error.name, + message: error.message, + }; + } + }); + + expect(result.success).toBe(true); + expect(result.streamCount).toBe(5); + expect(result.allValidStreams).toBe(true); + }); + + test("should handle getDevices with undefined filter values", async ({ page }) => { + const result = await page.evaluate(async () => { + const { getDevices } = await import("/dist/index.js"); + + const devices = await getDevices({ + kind: undefined, + deviceId: undefined, + label: undefined, + }); + + return { + isArray: Array.isArray(devices), + length: devices.length, + sameAsUnfiltered: true, // We'll compare this + }; + }); + + expect(result.isArray).toBe(true); + expect(result.length).toBeGreaterThanOrEqual(0); + }); + + test("should handle getMediaStream with default constraints", async ({ page }) => { + const result = await page.evaluate(async () => { + const { getMediaStream } = await import("/dist/index.js"); + + try { + // Call with no arguments (should use defaults) + const stream = await getMediaStream(); + + const audioTracks = stream.getAudioTracks(); + const videoTracks = stream.getVideoTracks(); + + stream.getTracks().forEach(track => track.stop()); + + return { + success: true, + audioTracksCount: audioTracks.length, + videoTracksCount: videoTracks.length, + defaultsApplied: audioTracks.length > 0 && videoTracks.length === 0, + }; + } catch (error) { + return { + success: false, + error: error.name, + }; + } + }); + + expect(result.success).toBe(true); + expect(result.defaultsApplied).toBe(true); + expect(result.audioTracksCount).toBeGreaterThan(0); + expect(result.videoTracksCount).toBe(0); + }); +}); diff --git a/tests/integration.spec.js b/tests/integration.spec.js new file mode 100644 index 0000000..f1a72ab --- /dev/null +++ b/tests/integration.spec.js @@ -0,0 +1,606 @@ +import { expect, test } from "@playwright/test"; + +test.describe("Integration Tests", () => { + test.beforeEach(async ({ page }) => { + await page.goto("/tests/index.html"); + }); + + test("should integrate Recorder with custom Analyser", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Recorder, Analyser, getMediaStream } = await import( + "/dist/index.js" + ); + + const stream = await getMediaStream(); + const customContext = new AudioContext(); + const customAnalyser = new Analyser(customContext, { fftSize: 512 }); + const recorder = new Recorder(stream, { + context: customContext, + defaultAnalyser: customAnalyser, + }); + + await recorder.start(); + await new Promise((resolve) => setTimeout(resolve, 300)); + + const volume1 = recorder.volume; + const frequencyData1 = recorder.frequencyData; + + await new Promise((resolve) => setTimeout(resolve, 200)); + + const volume2 = recorder.volume; + const blob = await recorder.stop(); + + return { + customFFTSize: customAnalyser.node.fftSize, + recorderFFTSize: recorder.analyser.node.fftSize, + analysersMatch: recorder.analyser === customAnalyser, + contextsMatch: recorder.context === customContext, + volume1Valid: typeof volume1 === "number" && Number.isFinite(volume1), + volume2Valid: typeof volume2 === "number" && Number.isFinite(volume2), + frequencyDataValid: + frequencyData1 instanceof Uint8Array && frequencyData1.length === 256, + recordingValid: blob instanceof Blob && blob.size > 0, + }; + }); + + expect(result.customFFTSize).toBe(512); + expect(result.recorderFFTSize).toBe(512); + expect(result.analysersMatch).toBe(true); + expect(result.contextsMatch).toBe(true); + expect(result.volume1Valid).toBe(true); + expect(result.volume2Valid).toBe(true); + expect(result.frequencyDataValid).toBe(true); + expect(result.recordingValid).toBe(true); + }); + + test("should integrate Monitor with Recorder using same stream", async ({ + page, + }) => { + const result = await page.evaluate(async () => { + const { Monitor, Recorder, getMediaStream } = await import( + "/dist/index.js" + ); + + const stream = await getMediaStream(); + const monitor = new Monitor(stream, {}); + const recorder = new Recorder(stream); + + await recorder.start(); + await new Promise((resolve) => setTimeout(resolve, 200)); + + const monitorVolume = monitor.volume; + const recorderVolume = recorder.volume; + const monitorFreqData = monitor.frequencyData; + const recorderFreqData = recorder.frequencyData; + + const blob = await recorder.stop(); + + return { + bothHaveVolume: + typeof monitorVolume === "number" && + typeof recorderVolume === "number", + bothHaveFreqData: + monitorFreqData instanceof Uint8Array && + recorderFreqData instanceof Uint8Array, + sameSreamUsed: true, // Can't directly compare streams, but they should work together + recordingSuccessful: blob instanceof Blob && blob.size > 0, + dataLengthsMatch: monitorFreqData.length === recorderFreqData.length, + }; + }); + + expect(result.bothHaveVolume).toBe(true); + expect(result.bothHaveFreqData).toBe(true); + expect(result.recordingSuccessful).toBe(true); + expect(result.dataLengthsMatch).toBe(true); + }); + + test("should handle multiple components with shared AudioContext", async ({ + page, + }) => { + const result = await page.evaluate(async () => { + const { Monitor, Recorder, Analyser, getMediaStream } = await import( + "/dist/index.js" + ); + + const sharedContext = new AudioContext(); + const stream = await getMediaStream(); + + const analyser = new Analyser(sharedContext, { fftSize: 1024 }); + const monitor = new Monitor(stream, { + context: sharedContext, + defaultAnalyser: analyser, + }); + const recorder = new Recorder(stream, { context: sharedContext }); + + await recorder.start(); + await new Promise((resolve) => setTimeout(resolve, 300)); + + const contexts = { + analyser: analyser.context === sharedContext, + monitor: monitor.context === sharedContext, + recorder: recorder.context === sharedContext, + }; + + const volumes = { + monitor: monitor.volume, + recorder: recorder.volume, + }; + + const blob = await recorder.stop(); + + return { + allUseSharedContext: Object.values(contexts).every(Boolean), + allVolumesValid: Object.values(volumes).every( + (v) => typeof v === "number" && Number.isFinite(v), + ), + recordingWorked: blob instanceof Blob && blob.size > 0, + contexts, + volumes, + }; + }); + + expect(result.allUseSharedContext).toBe(true); + expect(result.allVolumesValid).toBe(true); + expect(result.recordingWorked).toBe(true); + }); + + test("should handle worklet integration across components", async ({ + page, + }) => { + const result = await page.evaluate(async () => { + const { Monitor, Recorder, getMediaStream } = await import( + "/dist/index.js" + ); + + const stream = await getMediaStream(); + const monitor = new Monitor(stream, {}); + const recorder = new Recorder(stream, { context: monitor.context }); + + const workletCode = ` + class TestIntegrationWorklet extends AudioWorkletProcessor { + process(inputs, outputs) { + // Simple pass-through + if (inputs[0] && outputs[0]) { + for (let channel = 0; channel < inputs[0].length; channel++) { + outputs[0][channel].set(inputs[0][channel]); + } + } + return true; + } + } + registerProcessor('test-integration-worklet', TestIntegrationWorklet); + `; + const workletUrl = `data:application/javascript,${encodeURIComponent(workletCode)}`; + + try { + const monitorWorklet = await monitor.installWorklet( + "test-integration-worklet", + workletUrl, + ); + const recorderWorklet = await recorder.installWorklet( + "test-integration-worklet", + workletUrl, + ); + + await recorder.start(); + await new Promise((resolve) => setTimeout(resolve, 300)); + + const volume = recorder.volume; + const blob = await recorder.stop(); + + return { + success: true, + monitorWorkletValid: monitorWorklet instanceof AudioWorkletNode, + recorderWorkletValid: recorderWorklet instanceof AudioWorkletNode, + volumeValid: typeof volume === "number" && Number.isFinite(volume), + recordingValid: blob instanceof Blob && blob.size > 0, + }; + } catch (error) { + return { success: false, error: error.message }; + } + }); + + expect(result.success).toBe(true); + expect(result.monitorWorkletValid).toBe(true); + expect(result.recorderWorkletValid).toBe(true); + expect(result.volumeValid).toBe(true); + expect(result.recordingValid).toBe(true); + }); +}); + +test.describe("Error Handling Tests", () => { + test.beforeEach(async ({ page }) => { + await page.goto("/tests/index.html"); + }); + + test("should handle invalid fftSize gracefully", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser } = await import("/dist/index.js"); + + const results = []; + + // Test invalid fftSizes + const invalidSizes = [0, 1, 3, 1025, -1, 65537]; + + for (const size of invalidSizes) { + try { + const analyser = new Analyser(undefined, { fftSize: size }); + results.push({ + size, + success: true, + actualSize: analyser.node.fftSize, + }); + } catch (error) { + results.push({ size, success: false, error: error.name }); + } + } + + return results; + }); + + // Should either throw errors or clamp to valid values + result.forEach((r) => { + if (r.success) { + expect(r.actualSize).toBeGreaterThan(0); + expect(r.actualSize).toBeLessThanOrEqual(32768); + // Should be power of 2 + expect(Math.log2(r.actualSize) % 1).toBe(0); + } else { + expect(r.error).toBeDefined(); + } + }); + }); + + test("should handle invalid decibel ranges", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser } = await import("/dist/index.js"); + + const testCases = [ + { minDecibels: -30, maxDecibels: -40 }, // min > max + { minDecibels: 0, maxDecibels: 10 }, // positive values + { minDecibels: -200, maxDecibels: -180 }, // very low values + ]; + + return testCases.map((options) => { + try { + const analyser = new Analyser(undefined, options); + return { + success: true, + minDecibels: analyser.node.minDecibels, + maxDecibels: analyser.node.maxDecibels, + options, + }; + } catch (error) { + return { success: false, error: error.name, options }; + } + }); + }); + + result.forEach((r) => { + if (r.success) { + expect(r.minDecibels).toBeLessThan(r.maxDecibels); + } else { + expect(r.error).toBeDefined(); + } + }); + }); + + test("should handle disconnected media streams", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Monitor, Recorder, getMediaStream } = await import( + "/dist/index.js" + ); + + const stream = await getMediaStream(); + const monitor = new Monitor(stream, {}); + const recorder = new Recorder(stream); + + // Stop all tracks to simulate disconnection + stream.getTracks().forEach((track) => track.stop()); + + await new Promise((resolve) => setTimeout(resolve, 100)); + + try { + await recorder.start(); + await new Promise((resolve) => setTimeout(resolve, 200)); + + const volume = monitor.volume; + const blob = await recorder.stop(); + + return { + success: true, + volume: typeof volume === "number", + recordingSize: blob.size, + }; + } catch (error) { + return { + success: false, + error: error.name, + message: error.message, + }; + } + }); + + // Should either handle gracefully or throw appropriate error + if (result.success) { + expect(result.volume).toBe(true); + } else { + expect(result.error).toBeDefined(); + } + }); + + test("should handle worklet installation failures", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Monitor, getMediaStream } = await import("/dist/index.js"); + + const stream = await getMediaStream(); + const monitor = new Monitor(stream, {}); + + const testCases = [ + { name: "invalid-worklet", url: "/nonexistent/worklet.js" }, + { + name: "malformed-worklet", + url: "data:application/javascript,invalid javascript code here!", + }, + { name: "empty-worklet", url: "data:application/javascript," }, + ]; + + const results = []; + + for (const testCase of testCases) { + try { + await monitor.installWorklet(testCase.name, testCase.url); + results.push({ ...testCase, success: true }); + } catch (error) { + results.push({ + ...testCase, + success: false, + error: error.name, + message: error.message, + }); + } + } + + return results; + }); + + result.forEach((r) => { + // All should fail with appropriate errors + expect(r.success).toBe(false); + expect(r.error).toBeDefined(); + }); + }); +}); + +test.describe("Resource Management Tests", () => { + test.beforeEach(async ({ page }) => { + await page.goto("/tests/index.html"); + }); + + test("should properly clean up AudioContext resources", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser, Monitor, getMediaStream } = await import( + "/dist/index.js" + ); + + const contexts = []; + const components = []; + + // Create multiple components + for (let i = 0; i < 3; i++) { + const context = new AudioContext(); + const stream = await getMediaStream(); + const analyser = new Analyser(context); + const monitor = new Monitor(stream, { context }); + + contexts.push(context); + components.push({ analyser, monitor, stream }); + } + + // Check initial states + const initialStates = contexts.map((ctx) => ctx.state); + + // Clean up + for (const { stream } of components) { + stream.getTracks().forEach((track) => track.stop()); + } + + for (const context of contexts) { + await context.close(); + } + + await new Promise((resolve) => setTimeout(resolve, 100)); + + // Check final states + const finalStates = contexts.map((ctx) => ctx.state); + + return { + initialStates, + finalStates, + properlyCreated: initialStates.every((state) => + ["running", "suspended"].includes(state), + ), + properlyClosed: finalStates.every((state) => state === "closed"), + }; + }); + + expect(result.properlyCreated).toBe(true); + expect(result.properlyClosed).toBe(true); + }); + + test("should handle rapid creation and destruction", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser, Monitor, Recorder, getMediaStream } = await import( + "/dist/index.js" + ); + + const iterations = 5; + let successCount = 0; + const errors = []; + + for (let i = 0; i < iterations; i++) { + try { + const stream = await getMediaStream(); + const context = new AudioContext(); + + const analyser = new Analyser(context); + const monitor = new Monitor(stream, { context }); + const recorder = new Recorder(stream, { context }); + + await recorder.start(); + await new Promise((resolve) => setTimeout(resolve, 50)); + + const volume = monitor.volume; + const blob = await recorder.stop(); + + // Cleanup + stream.getTracks().forEach((track) => track.stop()); + await context.close(); + + if (typeof volume === "number" && blob instanceof Blob) { + successCount++; + } + } catch (error) { + errors.push({ + iteration: i, + error: error.name, + message: error.message, + }); + } + } + + return { + iterations, + successCount, + successRate: successCount / iterations, + errors, + }; + }); + + expect(result.successRate).toBeGreaterThanOrEqual(0.8); // Allow some failures due to rapid creation + expect(result.successCount).toBeGreaterThan(0); + }); + + test("should handle memory pressure simulation", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Analyser, getMediaStream } = await import("/dist/index.js"); + + const analysers = []; + const streams = []; + let createdCount = 0; + const maxAttempts = 10; + + try { + // Create many analysers to test resource limits + for (let i = 0; i < maxAttempts; i++) { + const stream = await getMediaStream(); + const analyser = new Analyser(); + const source = analyser.context.createMediaStreamSource(stream); + source.connect(analyser.node); + + analysers.push(analyser); + streams.push(stream); + createdCount++; + + // Test that they're still working + await new Promise((resolve) => setTimeout(resolve, 10)); + const volume = analyser.volume; + + if (typeof volume !== "number") { + break; + } + } + + return { + createdCount, + allWorking: true, + maxReached: createdCount === maxAttempts, + }; + } catch (error) { + return { + createdCount, + allWorking: false, + error: error.name, + maxReached: false, + }; + } finally { + // Cleanup + streams.forEach((stream) => { + stream.getTracks().forEach((track) => track.stop()); + }); + + for (const analyser of analysers) { + try { + await analyser.context.close(); + } catch (e) { + // Ignore cleanup errors + } + } + } + }); + + expect(result.createdCount).toBeGreaterThan(0); + // Should handle at least a few instances + expect(result.createdCount).toBeGreaterThanOrEqual(3); + }); + + test("should handle concurrent operations", async ({ page }) => { + const result = await page.evaluate(async () => { + const { Monitor, Recorder, getMediaStream } = await import( + "/dist/index.js" + ); + + const concurrentOps = 3; + const promises = []; + + for (let i = 0; i < concurrentOps; i++) { + promises.push( + (async () => { + const stream = await getMediaStream(); + const monitor = new Monitor(stream, {}); + const recorder = new Recorder(stream); + + await recorder.start(); + await new Promise((resolve) => setTimeout(resolve, 200)); + + const volume = monitor.volume; + const frequencyData = monitor.frequencyData; + const blob = await recorder.stop(); + + stream.getTracks().forEach((track) => track.stop()); + await monitor.context.close(); + + return { + volume: typeof volume === "number", + frequencyData: frequencyData instanceof Uint8Array, + recording: blob instanceof Blob && blob.size > 0, + }; + })(), + ); + } + + try { + const results = await Promise.all(promises); + + return { + success: true, + results, + allVolumesValid: results.every((r) => r.volume), + allFrequencyDataValid: results.every((r) => r.frequencyData), + allRecordingsValid: results.every((r) => r.recording), + }; + } catch (error) { + return { + success: false, + error: error.name, + message: error.message, + }; + } + }); + + expect(result.success).toBe(true); + expect(result.allVolumesValid).toBe(true); + expect(result.allFrequencyDataValid).toBe(true); + expect(result.allRecordingsValid).toBe(true); + }); +}); diff --git a/tests/worklets.spec.js b/tests/worklets.spec.js index 3006d22..1a11e37 100644 --- a/tests/worklets.spec.js +++ b/tests/worklets.spec.js @@ -38,7 +38,7 @@ test.describe("AudioWorklets", () => { const result = await page.evaluate(async () => { const { Recorder, getMediaStream } = await import("/dist/index.js"); - const stream = getMediaStream(); + const stream = await getMediaStream(); const recorder = new Recorder(stream); let callbackParams = {}; From 4914cfbca5220ae2f1eedd024ad439d473b98569 Mon Sep 17 00:00:00 2001 From: Tyler Nickerson Date: Sat, 11 Oct 2025 17:02:16 -0400 Subject: [PATCH 5/5] update lint staged --- package.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/package.json b/package.json index 3430f23..1d4dd83 100644 --- a/package.json +++ b/package.json @@ -30,8 +30,7 @@ }, "lint-staged": { "*.{ts,js}": [ - "biome format", - "biome lint --fix" + "mise run fix" ] } }