Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions packages/test-app-screen/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
.yarn
1 change: 1 addition & 0 deletions packages/test-app-screen/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
export {default as TestScreen} from './src/Test';
12 changes: 12 additions & 0 deletions packages/test-app-screen/package.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
{
"name": "rn-audio-api-test-screen",
"version": "1.0.0",
"description": "react-native test screen for rn-audio-api",
"author": "michal.dydek@swmansion.com",
"type": "commonjs",
"main": "index.ts",
"devDependencies": {
"react-native": "0.82.0",
"react-native-audio-api": "^0.9.3"
}
}
117 changes: 117 additions & 0 deletions packages/test-app-screen/src/AudioBufferTest.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
import { AudioContext, AudioBuffer } from "react-native-audio-api";
import { PCM_DATA } from "./constants";

const SUPPORTED_FORMATS = ['mp3', 'wav', 'aac', 'flac', 'ogg', 'opus', 'm4a', 'mp4'];
const EXPECTED_BUFFER_DURATION = 16;
const EXPECTED_CHANNELS = 2;

const CHANNELS_MAP: Map<number, string> = new Map([
[1, 'https://dl.espressif.com/dl/audio/gs-16b-1c-44100hz.mp3'],
[2, 'https://dl.espressif.com/dl/audio/gs-16b-2c-44100hz.mp3'],
[4, 'https://www.mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Samples/SoundCardAttrition/drmapan.wav'],
[6, 'https://www.mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Samples/Microsoft/6_Channel_ID.wav']
]);

const DURATIONS_MAP: Map<number, number> = new Map([
[1, 16],
[2, 16],
[4, 4.78],
[6, 5.84]
]);

export const audioBufferFormatsTest = async (audioContextRef: React.RefObject<AudioContext | null>, setTestingInfo: (value: React.SetStateAction<string>) => void) => {
let buffers: AudioBuffer[] = [];
for (const format of SUPPORTED_FORMATS) {
const url = 'https://dl.espressif.com/dl/audio/gs-16b-2c-44100hz.' + format;
setTestingInfo(`Loading audio buffer: ${format}`);
await fetch(url, {
headers: {
'User-Agent': 'Mozilla/5.0 (Android; Mobile; rv:122.0) Gecko/122.0 Firefox/122.0',
}
})
.then(response => response.arrayBuffer())
.then(async (arrayBuffer) => {
try {
const audioBuffer = await audioContextRef.current!.decodeAudioData(arrayBuffer);
console.log(`Decoded ${format} buffer:`, audioBuffer);
if (Math.abs(audioBuffer.duration - EXPECTED_BUFFER_DURATION) > 0.3) {
throw new Error(`Unexpected buffer duration: ${audioBuffer.duration}`);
}
if (audioBuffer.numberOfChannels !== EXPECTED_CHANNELS) {
throw new Error(`Unexpected number of channels: ${audioBuffer.numberOfChannels}`);
}
buffers.push(audioBuffer);
} catch (error) {
setTestingInfo(`Error decoding audio buffer: ${format} - ${error}`);
}
})
}
for (let i = 0; i < buffers.length; i++) {
setTestingInfo(`Playing ${SUPPORTED_FORMATS[i]} buffer`);
const bufferSource = audioContextRef.current!.createBufferSource();
bufferSource.buffer = buffers[i];
bufferSource.connect(audioContextRef.current!.destination);
bufferSource.start();
await new Promise(resolve => setTimeout(resolve, 4000));
if (i === buffers.length - 1) {
bufferSource.onEnded = () => {
setTestingInfo('Audio buffer test completed.');
};
}
bufferSource.stop();
}
}

export const audioBufferChannelsTest = async (audioContextRef: React.RefObject<AudioContext | null>, setTestingInfo: (value: React.SetStateAction<string>) => void) => {
for (const channelsStr in CHANNELS_MAP) {
const channels = parseInt(channelsStr, 10);
const url = CHANNELS_MAP.get(channels)!;
const expectedDuration = DURATIONS_MAP.get(channels)!;
setTestingInfo(`Loading audio buffer with ${channels} channels`);
await fetch(url, {
headers: {
'User-Agent': 'Mozilla/5.0 (Android; Mobile; rv:122.0) Gecko/122.0 Firefox/122.0',
}
})
.then(response => response.arrayBuffer())
.then(async (arrayBuffer) => {
try {
const audioBuffer = await audioContextRef.current!.decodeAudioData(arrayBuffer);
console.log(`Decoded buffer with ${channels} channels:`, audioBuffer);
if (Math.abs(audioBuffer.duration - expectedDuration) > 0.3) {
throw new Error(`Unexpected buffer duration: ${audioBuffer.duration}`);
}
if (audioBuffer.numberOfChannels !== channels) {
throw new Error(`Unexpected number of channels: ${audioBuffer.numberOfChannels}`);
} else {
setTestingInfo(`Playing buffer with ${channels} channels`);
const bufferSource = audioContextRef.current!.createBufferSource();
bufferSource.buffer = audioBuffer;
bufferSource.connect(audioContextRef.current!.destination);
bufferSource.start();
await new Promise(resolve => setTimeout(resolve, 4000));
if (channels === Object.keys(CHANNELS_MAP).length) {
bufferSource.onEnded = () => {
setTestingInfo('Audio buffer channels test completed.');
};
}
bufferSource.stop();
}
} catch (error) {
setTestingInfo(`Error decoding audio buffer with ${channels} channels - ${error}`);
}
})
}
}

export const audioBufferBase64Test = async (audioContextRef: React.RefObject<AudioContext | null>, setTestingInfo: (value: React.SetStateAction<string>) => void) => {
const audioBuffer = await audioContextRef.current!.decodePCMInBase64(PCM_DATA, 48000, 1, true);
const bufferSource = audioContextRef.current!.createBufferSource();
bufferSource.buffer = audioBuffer;
bufferSource.connect(audioContextRef.current!.destination);
bufferSource.start();
bufferSource.stop(audioContextRef.current!.currentTime + 5);
setTestingInfo('Playing audio buffer decoded from Base64 PCM data');
await new Promise(resolve => setTimeout(resolve, 5000));
setTestingInfo('Audio buffer Base64 test completed.');
}
32 changes: 32 additions & 0 deletions packages/test-app-screen/src/OscillatorTest.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
import { AudioContext } from "react-native-audio-api";

export const oscillatorTestWithDetune = (audioContextRef: React.RefObject<AudioContext | null>) => {
const oscillatorNode = audioContextRef.current!.createOscillator();
oscillatorNode.connect(audioContextRef.current!.destination);
oscillatorNode.start();
oscillatorNode.detune.setValueAtTime(100, audioContextRef.current!.currentTime + 1);
oscillatorNode.stop(audioContextRef.current!.currentTime + 2);
}

export const oscillatorTestWithGain = (audioContextRef: React.RefObject<AudioContext | null>) => {
const oscillatorNode = audioContextRef.current!.createOscillator();
const gain = audioContextRef.current!.createGain();
oscillatorNode.connect(gain);
gain.connect(audioContextRef.current!.destination);
oscillatorNode.start();
gain.gain.value = 0.5;
gain.gain.linearRampToValueAtTime(0.0, audioContextRef.current!.currentTime + 1.5);
gain.gain.linearRampToValueAtTime(1.5, audioContextRef.current!.currentTime + 3);
oscillatorNode.stop(audioContextRef.current!.currentTime + 4.5);
}

export const oscillatorTestWithStereoPanner = (audioContextRef: React.RefObject<AudioContext | null>) => {
const oscillatorNode = audioContextRef.current!.createOscillator();
const pan = audioContextRef.current!.createStereoPanner();
oscillatorNode.connect(pan);
pan.connect(audioContextRef.current!.destination);
oscillatorNode.start();
pan.pan.linearRampToValueAtTime(1.0, audioContextRef.current!.currentTime + 1.5);
pan.pan.linearRampToValueAtTime(-1.0, audioContextRef.current!.currentTime + 3);
oscillatorNode.stop(audioContextRef.current!.currentTime + 4.5);
}
29 changes: 29 additions & 0 deletions packages/test-app-screen/src/RecorderTest.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import { AudioContext, AudioRecorder, AudioBuffer } from 'react-native-audio-api';

export const recorderTest = (audioContextRef: React.RefObject<AudioContext | null>, buffers: AudioBuffer[]) => {
const recorder = new AudioRecorder({
sampleRate: audioContextRef.current!.sampleRate,
bufferLengthInSamples: audioContextRef.current!.sampleRate
});

recorder.onAudioReady((event) => {
const { buffer, numFrames } = event;
console.log('Audio recorder buffer ready:', numFrames);
buffers.push(buffer);
});
recorder.start();
setTimeout(() => {
recorder.stop();
}, 5000);
}

export const recorderPlaybackTest = async (audioContextRef: React.RefObject<AudioContext | null>, buffers: AudioBuffer[]) => {
let nextStartAt = audioContextRef.current!.currentTime + 0.1;
for (let i = 0; i < buffers.length; i++) {
const source = audioContextRef.current!.createBufferSource();
source.buffer = buffers[i];
source.connect(audioContextRef.current!.destination);
source.start(nextStartAt);
nextStartAt += buffers[i].duration;
}
}
9 changes: 9 additions & 0 deletions packages/test-app-screen/src/StreamingTest.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import { AudioContext } from "react-native-audio-api";

export const streamerTest = (audioContextRef: React.RefObject<AudioContext | null>) => {
const streamer = audioContextRef.current!.createStreamer();
streamer.initialize('https://stream.radioparadise.com/aac-320');
streamer.connect(audioContextRef.current!.destination);
streamer.start(audioContextRef.current!.currentTime);
streamer.stop(audioContextRef.current!.currentTime + 5);
}
142 changes: 142 additions & 0 deletions packages/test-app-screen/src/Test.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
import React, { useRef, FC, useState, useEffect } from 'react';
import {
AudioContext,
AudioBuffer,
AudioManager,
} from 'react-native-audio-api';

import { oscillatorTestWithDetune, oscillatorTestWithGain, oscillatorTestWithStereoPanner } from './OscillatorTest';
import { streamerTest } from './StreamingTest';
import { workletTest } from './WorkletsTest';
import { recorderTest, recorderPlaybackTest } from './RecorderTest';
import { audioBufferFormatsTest, audioBufferChannelsTest, audioBufferBase64Test } from './AudioBufferTest';

import { View, Text, Button } from 'react-native';

const SAMPLE_RATE = 44100;

const Test: FC = () => {
const [testingInfo, setTestingInfo] = useState<string>('');
const [isTesting, setIsTesting] = useState<boolean>(false);
const audioContextRef = useRef<AudioContext | null>(null);

useEffect(() => {
const init = async () => {
try {
await AudioManager.requestRecordingPermissions();
} catch (err) {
console.log(err);
console.error('Recording permission denied', err);
return;
}
AudioManager.setAudioSessionOptions({
iosCategory: 'playAndRecord',
iosMode: 'spokenAudio',
iosOptions: ['defaultToSpeaker', 'allowBluetoothA2DP'],
});
}
init();
return () => {
if (audioContextRef.current) {
audioContextRef.current.close();
audioContextRef.current = null;
}
}
}, []);

const setupAudioContext = async () => {
if (!audioContextRef.current) {
audioContextRef.current = new AudioContext({ sampleRate: SAMPLE_RATE });
}
}

const oscillatorTest = () => {
setIsTesting(true);
setupAudioContext();
setTestingInfo('Oscillator Test with Detune');

oscillatorTestWithDetune(audioContextRef);

setTimeout(() => {
setTestingInfo('Oscillator Test with Gain');
oscillatorTestWithGain(audioContextRef);
}, 2500);

setTimeout(() => {
setTestingInfo('Oscillator Test with Stereo Panner');
oscillatorTestWithStereoPanner(audioContextRef);
}, 7500);

setTimeout(() => {
setTestingInfo('Oscillator test completed.');
setIsTesting(false);
}, 12500);
}

const audioBufferTest = async () => {
setupAudioContext();
setIsTesting(true);
await audioBufferChannelsTest(audioContextRef, setTestingInfo);
await audioBufferFormatsTest(audioContextRef, setTestingInfo);
await audioBufferBase64Test(audioContextRef, setTestingInfo);
setIsTesting(false);
}

const recordingTest = () => {
setupAudioContext();
setIsTesting(true);
setTestingInfo('Recording...');
let buffers: AudioBuffer[] = [];
recorderTest(audioContextRef, buffers);
setTimeout(() => {
setTestingInfo('Stopping recording and playing back...');
recorderPlaybackTest(audioContextRef, buffers);
}, 5500);
setTimeout(() => {
setTestingInfo('Recording test completed.');
setIsTesting(false);
}, 11000);
}

const streamingTest = () => {
setIsTesting(true);
setupAudioContext();
setTestingInfo('Streaming test');
streamerTest(audioContextRef);
setTimeout(() => {
setTestingInfo('Streaming test completed.');
setIsTesting(false);
}, 5000);
}

const workletsTest = () => {
setIsTesting(true);
setupAudioContext();

setTestingInfo('Worklet test that reduces gain to 0.1');
workletTest(audioContextRef);
setTimeout(() => {
setTestingInfo('Worklet test completed.');
setIsTesting(false);
}, 4000);
}


return (
<View style={{ gap: 40, paddingTop: 200, backgroundColor: 'black', height: '100%' }}>
<View style={{ alignItems: 'center', justifyContent: 'center', gap: 5 }}>
<Text style={{ color: 'white' }}>{testingInfo}</Text>
</View>
<View style={{ alignItems: 'center', justifyContent: 'center', gap: 5 }}>
<Button title="oscillator" onPress={oscillatorTest} disabled={isTesting} />
<Button title="audio buffer" onPress={audioBufferTest} disabled={isTesting} />
<Button title="recorder" onPress={recordingTest} disabled={isTesting} />
<Button title="streamer" onPress={streamingTest} disabled={isTesting} />
<Button title="worklet node" onPress={workletsTest} disabled={isTesting} />
<Text style={{color: 'white', paddingTop: 40}}>CHECK IF EVERYTHING WORKS AFTER HOT RELOAD</Text>
</View>
</View>
);
};

export default Test;
31 changes: 31 additions & 0 deletions packages/test-app-screen/src/WorkletsTest.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import { AudioContext } from "react-native-audio-api";

export const workletTest = (audioContextRef: React.RefObject<AudioContext | null>) => {
const processingWorklet = (
inputAudioData: Array<Float32Array>,
outputAudioData: Array<Float32Array>,
framesToProcess: number,
_currentTime: number
) => {
'worklet';
const gain = 0.1;
for (let channel = 0; channel < inputAudioData.length; channel++) {
const inputChannelData = inputAudioData[channel];
const outputChannelData = outputAudioData[channel];
for (let i = 0; i < framesToProcess; i++) {
outputChannelData[i] = inputChannelData[i] * gain;
}
}
};

const workletNode = audioContextRef.current!.createWorkletProcessingNode(
processingWorklet,
'AudioRuntime'
);

const oscillatorNode = audioContextRef.current!.createOscillator();
oscillatorNode.connect(workletNode);
workletNode.connect(audioContextRef.current!.destination);
oscillatorNode.start();
oscillatorNode.stop(audioContextRef.current!.currentTime + 4);
}
13 changes: 13 additions & 0 deletions packages/test-app-screen/src/constants.ts

Large diffs are not rendered by default.

9 changes: 9 additions & 0 deletions packages/test-app-screen/tsconfig.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"moduleSuffixes": [".ios", ".android", ".native", ""],
},
"exclude": [
"**/node_modules",
]
}
Loading