Skip to content

Commit 4717db1

Browse files
committed
Fixed basic channel up-mixing in AudioProcessing
1 parent e665a4a commit 4717db1

File tree

3 files changed

+63
-25
lines changed

3 files changed

+63
-25
lines changed

webrtc-jni/src/main/cpp/src/JNI_AudioProcessing.cpp

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ JNIEXPORT jint JNICALL Java_dev_onvoid_webrtc_media_audio_AudioProcessing_proces
7777

7878
if (srcConfig.num_channels() == 1 && dstConfig.num_channels() == 2) {
7979
// Up-mixing, only mono to stereo.
80-
// For complex channel layouts an audio converter is required.
80+
// For complex channel layouts a channel mixer is required.
8181

8282
const size_t srcNumSamples = srcConfig.num_samples();
8383
const size_t dstNumChannels = dstConfig.num_channels();
@@ -88,14 +88,16 @@ JNIEXPORT jint JNICALL Java_dev_onvoid_webrtc_media_audio_AudioProcessing_proces
8888
}
8989

9090
const int16_t * srcFrame = reinterpret_cast<const int16_t *>(srcPtr);
91-
int16_t * dstFrame = reinterpret_cast<int16_t*>(dstPtr);
91+
int16_t * dstFrame = reinterpret_cast<int16_t *>(dstPtr);
9292

9393
for (int i = srcNumSamples - 1; i >= 0; i--) {
9494
for (size_t j = 0; j < dstNumChannels; ++j) {
9595
dstFrame[dstNumChannels * i + j] = srcFrame[i];
9696
}
9797
}
9898

99+
srcConfig.set_num_channels(dstNumChannels);
100+
99101
result = apm->ProcessStream(dstFrame, srcConfig, dstConfig, dstFrame);
100102
}
101103
else {
@@ -133,7 +135,7 @@ JNIEXPORT jint JNICALL Java_dev_onvoid_webrtc_media_audio_AudioProcessing_proces
133135

134136
if (srcConfig.num_channels() == 1 && dstConfig.num_channels() == 2) {
135137
// Up-mixing, only mono to stereo.
136-
// For complex channel layouts an audio converter is required.
138+
// For complex channel layouts a channel mixer is required.
137139

138140
const size_t srcNumSamples = srcConfig.num_samples();
139141
const size_t dstNumChannels = dstConfig.num_channels();
@@ -152,6 +154,8 @@ JNIEXPORT jint JNICALL Java_dev_onvoid_webrtc_media_audio_AudioProcessing_proces
152154
}
153155
}
154156

157+
srcConfig.set_num_channels(dstNumChannels);
158+
155159
result = apm->ProcessStream(dstFrame, srcConfig, dstConfig, dstFrame);
156160
}
157161
else {

webrtc/src/main/java/dev/onvoid/webrtc/media/audio/AudioProcessing.java

Lines changed: 29 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,35 @@ public AudioProcessing() {
5050
initialize();
5151
}
5252

53+
/**
54+
* Get the audio processing statistics.
55+
*
56+
* @return The audio processing statistics.
57+
*/
58+
public AudioProcessingStats getStatistics() {
59+
updateStats();
60+
61+
return stats;
62+
}
63+
64+
/**
65+
* Calculates the buffer size in bytes for the destination buffer used in
66+
* {@link #processStream} and {@link #processReverseStream}.
67+
*
68+
* @param inputConfig The config that describes the audio input format.
69+
* @param outputConfig The config that describes the desired audio output
70+
* format.
71+
*
72+
* @return The target buffer size in bytes.
73+
*/
74+
public int getTargetBufferSize(AudioProcessingStreamConfig inputConfig,
75+
AudioProcessingStreamConfig outputConfig) {
76+
int nSamplesIn = inputConfig.sampleRate / 100;
77+
int nSamplesOut = outputConfig.sampleRate / 100;
78+
79+
return Math.max(nSamplesIn, nSamplesOut) * outputConfig.channels * 2;
80+
}
81+
5382
/**
5483
* Set the {@link AudioProcessingConfig} to enable/disable processing
5584
* effects. Should be called prior processing, during processing may cause
@@ -92,17 +121,6 @@ public AudioProcessing() {
92121
*/
93122
public native int getStreamDelayMs();
94123

95-
/**
96-
* Get the audio processing statistics.
97-
*
98-
* @return The audio processing statistics.
99-
*/
100-
public AudioProcessingStats getStatistics() {
101-
updateStats();
102-
103-
return stats;
104-
}
105-
106124
/**
107125
* Accepts and produces a 10 ms frame interleaved 16-bit PCM audio as
108126
* specified in {@code inputConfig} and {@code outputConfig}. {@code src}

webrtc/src/test/java/dev/onvoid/webrtc/media/audio/AudioProcessingTest.java

Lines changed: 27 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -69,14 +69,28 @@ void getStats() {
6969

7070
@Test
7171
void processByteStream() {
72-
ProcessBuffer buffer = new ProcessBuffer(48000, 48000, 1);
72+
ProcessBuffer buffer = new ProcessBuffer(48000, 48000, 1, 1);
73+
74+
assertEquals(0, process(audioProcessing, buffer));
75+
}
76+
77+
@Test
78+
void processByteStreamDownMix() {
79+
ProcessBuffer buffer = new ProcessBuffer(48000, 44100, 2, 1);
80+
81+
assertEquals(0, process(audioProcessing, buffer));
82+
}
83+
84+
@Test
85+
void processByteStreamUpMix() {
86+
ProcessBuffer buffer = new ProcessBuffer(48000, 44100, 1, 2);
7387

7488
assertEquals(0, process(audioProcessing, buffer));
7589
}
7690

7791
@Test
7892
void processReverseStream() {
79-
ProcessBuffer buffer = new ProcessBuffer(48000, 48000, 1);
93+
ProcessBuffer buffer = new ProcessBuffer(48000, 48000, 1, 1);
8094

8195
assertEquals(0, processReverse(audioProcessing, buffer));
8296
}
@@ -106,7 +120,8 @@ private static class ProcessBuffer {
106120

107121
final int bytesPerFrame = 2;
108122

109-
final int channels;
123+
final int channelsIn;
124+
final int channelsOut;
110125

111126
final int sampleRateIn;
112127
final int sampleRateOut;
@@ -124,21 +139,22 @@ private static class ProcessBuffer {
124139
AudioProcessingStreamConfig streamConfigOut;
125140

126141

127-
ProcessBuffer(int sampleRateIn, int sampleRateOut, int channels) {
128-
this.channels = channels;
142+
ProcessBuffer(int sampleRateIn, int sampleRateOut, int channelsIn, int channelsOut) {
129143
this.sampleRateIn = sampleRateIn;
130144
this.sampleRateOut = sampleRateOut;
145+
this.channelsIn = channelsIn;
146+
this.channelsOut = channelsOut;
131147

132-
nSamplesIn = sampleRateIn / 100 * channels; // 10 ms frame
133-
nSamplesOut = sampleRateOut / 100 * channels;
134-
frameSizeIn = nSamplesIn * bytesPerFrame;
135-
frameSizeOut = nSamplesOut * bytesPerFrame;
148+
nSamplesIn = sampleRateIn / 100; // 10 ms frame
149+
nSamplesOut = sampleRateOut / 100;
150+
frameSizeIn = nSamplesIn * bytesPerFrame * channelsIn;
151+
frameSizeOut = Math.max(nSamplesIn, nSamplesOut) * bytesPerFrame * channelsOut;
136152

137153
src = new byte[frameSizeIn];
138154
dst = new byte[frameSizeOut];
139155

140-
streamConfigIn = new AudioProcessingStreamConfig(sampleRateIn, channels);
141-
streamConfigOut = new AudioProcessingStreamConfig(sampleRateOut, channels);
156+
streamConfigIn = new AudioProcessingStreamConfig(sampleRateIn, channelsIn);
157+
streamConfigOut = new AudioProcessingStreamConfig(sampleRateOut, channelsOut);
142158
}
143159
}
144160
}

0 commit comments

Comments
 (0)