Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion libwebrtc/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ pub mod video_track;
pub mod native {
pub use webrtc_sys::webrtc::ffi::create_random_uuid;

pub use crate::imp::{apm, audio_resampler, frame_cryptor, yuv_helper};
pub use crate::imp::{apm, audio_mixer, audio_resampler, frame_cryptor, yuv_helper};
}

#[cfg(target_os = "android")]
Expand Down
108 changes: 108 additions & 0 deletions libwebrtc/src/native/audio_mixer.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
// Copyright 2023 LiveKit, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

use crate::audio_frame::AudioFrame;
use cxx::UniquePtr;
use std::sync::Arc;
use webrtc_sys::audio_mixer as sys;
use webrtc_sys::audio_mixer::ffi;

pub struct AudioMixer {
sys_handle: UniquePtr<ffi::AudioMixer>,
}

pub use ffi::AudioFrameInfo;

pub trait AudioMixerSource {
fn ssrc(&self) -> i32;
fn preferred_sample_rate(&self) -> u32;
fn get_audio_frame_with_info<'a>(&self, target_sample_rate: u32) -> Option<AudioFrame>;
}

struct AudioMixerSourceImpl<T> {
inner: T,
}
impl<T: AudioMixerSource> sys::AudioMixerSource for AudioMixerSourceImpl<T> {
fn ssrc(&self) -> i32 {
self.inner.ssrc()
}

fn preferred_sample_rate(&self) -> i32 {
self.inner.preferred_sample_rate() as i32
}

fn get_audio_frame_with_info<'a>(
&self,
target_sample_rate: i32,
native_frame: sys::NativeAudioFrame,
) -> AudioFrameInfo {
if let Some(frame) = self.inner.get_audio_frame_with_info(target_sample_rate as u32) {
let samples_count = (frame.sample_rate as usize / 100) as usize;
assert_eq!(
frame.sample_rate, target_sample_rate as u32,
"sample rate must match target_sample_rate"
);
assert_eq!(
frame.samples_per_channel as usize, samples_count,
"frame must contain 10ms of samples"
);
assert_eq!(
frame.data.len(),
samples_count * frame.num_channels as usize,
"slice must contain 10ms of samples"
);

unsafe {
native_frame.update_frame(
0,
frame.data.as_ptr(),
frame.samples_per_channel as usize,
frame.sample_rate as i32,
frame.num_channels as usize,
);
}
return ffi::AudioFrameInfo::Normal;
} else {
return ffi::AudioFrameInfo::Muted;
}
}
}

impl AudioMixer {
pub fn new() -> Self {
let sys_handle = ffi::create_audio_mixer();
Self { sys_handle }
}

pub fn add_source(&mut self, source: impl AudioMixerSource + 'static) {
let source_impl = AudioMixerSourceImpl { inner: source };
let wrapper = Box::new(sys::AudioMixerSourceWrapper::new(Arc::new(source_impl)));
unsafe {
self.sys_handle.pin_mut().add_source(wrapper);
}
}

pub fn remove_source(&mut self, ssrc: i32) {
unsafe {
self.sys_handle.pin_mut().remove_source(ssrc);
}
}

pub fn mix(&mut self, num_channels: usize) -> &[i16] {
unsafe {
let len = self.sys_handle.pin_mut().mix(num_channels);
std::slice::from_raw_parts(self.sys_handle.data(), len)
}
}
}
1 change: 1 addition & 0 deletions libwebrtc/src/native/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
pub mod android;
pub mod apm;
pub mod audio_resampler;
pub mod audio_mixer;
pub mod audio_source;
pub mod audio_stream;
pub mod audio_track;
Expand Down
2 changes: 2 additions & 0 deletions webrtc-sys/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ fn main() {
"src/android.rs",
"src/prohibit_libsrtp_initialization.rs",
"src/apm.rs",
"src/audio_mixer.rs",
]);

builder.files(&[
Expand Down Expand Up @@ -77,6 +78,7 @@ fn main() {
"src/global_task_queue.cpp",
"src/prohibit_libsrtp_initialization.cpp",
"src/apm.cpp",
"src/audio_mixer.cpp",
]);

let webrtc_dir = webrtc_sys_build::webrtc_dir();
Expand Down
81 changes: 81 additions & 0 deletions webrtc-sys/include/livekit/audio_mixer.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
/*
* Copyright 2023 LiveKit
*
* Licensed under the Apache License, Version 2.0 (the “License”);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an “AS IS” BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once

#include <memory>

#include "api/scoped_refptr.h"
#include "api/audio/audio_mixer.h"
#include "modules/audio_mixer/audio_mixer_impl.h"
#include "modules/audio_processing/audio_buffer.h"

#include "rust/cxx.h"

namespace livekit {
class AudioMixer;
class NativeAudioFrame;
} // namespace livekit

#include "webrtc-sys/src/audio_mixer.rs.h"

namespace livekit {

class NativeAudioFrame {
public:
NativeAudioFrame(webrtc::AudioFrame* frame) : frame_(frame) {}
void update_frame(uint32_t timestamp, const int16_t* data, size_t samples_per_channel, int sample_rate_hz, size_t num_channels);
private:
webrtc::AudioFrame* frame_;
};

class AudioMixerSource: public webrtc::AudioMixer::Source {
public:
AudioMixerSource(rust::Box<AudioMixerSourceWrapper> source);

AudioFrameInfo GetAudioFrameWithInfo(int sample_rate_hz, webrtc::AudioFrame* audio_frame) override;

int Ssrc() const override;

int PreferredSampleRate() const override;

~AudioMixerSource() {}

private:
rust::Box<AudioMixerSourceWrapper> source_;
};

class AudioMixer {
public:
AudioMixer();

void add_source(rust::Box<AudioMixerSourceWrapper> source);

void remove_source(int ssrc);

size_t mix(size_t num_channels);
const int16_t* data() const;

private:
webrtc::AudioFrame frame_;
std::vector<std::shared_ptr<AudioMixerSource>> sources_;
rtc::scoped_refptr<webrtc::AudioMixer> audio_mixer_;
};

std::unique_ptr<AudioMixer> create_audio_mixer();


} // namespace livekit
78 changes: 78 additions & 0 deletions webrtc-sys/src/audio_mixer.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
#include "livekit/audio_mixer.h"
#include "api/audio/audio_frame.h"
#include "api/audio/audio_mixer.h"
#include "modules/audio_mixer/audio_mixer_impl.h"
#include "webrtc-sys/src/audio_mixer.rs.h"

#include <memory>
#include <iostream>

namespace livekit {

AudioMixer::AudioMixer() {
audio_mixer_ = webrtc::AudioMixerImpl::Create();
}

void AudioMixer::add_source(rust::Box<AudioMixerSourceWrapper> source) {
auto native_source = std::make_shared<AudioMixerSource>(std::move(source));

audio_mixer_->AddSource(native_source.get());
sources_.push_back(native_source);
}

void AudioMixer::remove_source(int source_ssrc) {
auto it = std::find_if(sources_.begin(), sources_.end(),
[source_ssrc](const auto& s) { return s->Ssrc() == source_ssrc; });

if (it != sources_.end()) {
audio_mixer_->RemoveSource(it->get());
sources_.erase(it);
}
}

size_t AudioMixer::mix(size_t number_of_channels) {
audio_mixer_->Mix(number_of_channels, &frame_);
return frame_.num_channels() * frame_.samples_per_channel() * sizeof(int16_t);
}

const int16_t* AudioMixer::data() const {
return frame_.data();
}

std::unique_ptr<AudioMixer> create_audio_mixer() {
return std::make_unique<AudioMixer>();
}

AudioMixerSource::AudioMixerSource(rust::Box<AudioMixerSourceWrapper> source) : source_(std::move(source)) {
}

int AudioMixerSource::Ssrc() const {
return source_->ssrc();
}

int AudioMixerSource::PreferredSampleRate() const {
return source_->preferred_sample_rate();
}

webrtc::AudioMixer::Source::AudioFrameInfo AudioMixerSource::GetAudioFrameWithInfo(int sample_rate, webrtc::AudioFrame* audio_frame) {
NativeAudioFrame frame(audio_frame);

livekit::AudioFrameInfo result = source_->get_audio_frame_with_info(sample_rate, frame);

if (result == livekit::AudioFrameInfo::Normal) {
return webrtc::AudioMixer::Source::AudioFrameInfo::kNormal;
} else if (result == livekit::AudioFrameInfo::Muted) {
return webrtc::AudioMixer::Source::AudioFrameInfo::kMuted;
} else {
return webrtc::AudioMixer::Source::AudioFrameInfo::kError;
}
}

void NativeAudioFrame::update_frame(uint32_t timestamp, const int16_t* data,
size_t samples_per_channel, int sample_rate_hz, size_t num_channels) {
frame_->UpdateFrame(timestamp, data, samples_per_channel, sample_rate_hz,
webrtc::AudioFrame::SpeechType::kNormalSpeech, webrtc::AudioFrame::VADActivity::kVadUnknown,
num_channels);
}

} // namespace livekit
Loading
Loading