Skip to content

[BC] Standardization of Transforms/Functionals #152

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 31 commits into from
Jul 24, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
2e6bc4a
more
jamarshon Jul 18, 2019
8040752
more
jamarshon Jul 18, 2019
5c0b693
more
jamarshon Jul 18, 2019
23d2935
more
jamarshon Jul 18, 2019
fce6637
more
jamarshon Jul 18, 2019
99f449b
more
jamarshon Jul 18, 2019
8bd893b
Merge branch 'master' into standardization
jamarshon Jul 18, 2019
f00c46c
small push to save progress
jamarshon Jul 22, 2019
e3085d3
small push to save progress
jamarshon Jul 22, 2019
e9c805f
fix test
jamarshon Jul 22, 2019
a60fc69
Merge branch 'master' into standardization
jamarshon Jul 22, 2019
d090ff6
more
jamarshon Jul 23, 2019
fca025a
remove trailing zero
jamarshon Jul 23, 2019
b435f1b
apply feedback: remove scale and lc2cl
jamarshon Jul 24, 2019
710a236
apply feedback: remove downmix
jamarshon Jul 24, 2019
015dd0e
apply feedback: remove downmix
jamarshon Jul 24, 2019
840707d
apply feedback: rearrange functions
jamarshon Jul 24, 2019
9da5089
apply feedback: rearrange functions
jamarshon Jul 24, 2019
a09bf60
Merge branch 'master' into standardization
jamarshon Jul 24, 2019
afe528a
merge: delete stft
jamarshon Jul 24, 2019
be082f8
merge
jamarshon Jul 24, 2019
a7aa440
remove batch support for istft
jamarshon Jul 24, 2019
af1c8c8
docstring
jamarshon Jul 24, 2019
44e1f4d
docstring
jamarshon Jul 24, 2019
b383f00
docstring
jamarshon Jul 24, 2019
fea5c06
docstring
jamarshon Jul 24, 2019
a4f7d0f
more
jamarshon Jul 24, 2019
3997d12
more
jamarshon Jul 24, 2019
dc226c9
remove unused xfail
jamarshon Jul 24, 2019
ab4ecb6
Revert "remove batch support for istft"
jamarshon Jul 24, 2019
99675a4
rename batch to channel
jamarshon Jul 24, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 2 additions & 52 deletions test/test_functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

import torch
import torchaudio
import torchaudio.functional as F
import pytest
import unittest
import test.common_utils

Expand All @@ -11,10 +13,6 @@
import numpy as np
import librosa

import pytest
import torchaudio.functional as F
xfail = pytest.mark.xfail


class TestFunctional(unittest.TestCase):
data_sizes = [(2, 20), (3, 15), (4, 10)]
Expand Down Expand Up @@ -197,54 +195,6 @@ def _num_stft_bins(signal_len, fft_len, hop_length, pad):
return (signal_len + 2 * pad - fft_len + hop_length) // hop_length


@pytest.mark.parametrize('fft_length', [512])
@pytest.mark.parametrize('hop_length', [256])
@pytest.mark.parametrize('waveform', [
(torch.randn(1, 100000)),
(torch.randn(1, 2, 100000)),
pytest.param(torch.randn(1, 100), marks=xfail(raises=RuntimeError)),
])
@pytest.mark.parametrize('pad_mode', [
# 'constant',
'reflect',
])
@unittest.skipIf(not IMPORT_LIBROSA, 'Librosa is not available')
def test_stft(waveform, fft_length, hop_length, pad_mode):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why might want to bring this back / add it to core later on for the regular torch.stft

"""
Test STFT for multi-channel signals.

Padding: Value in having padding outside of torch.stft?
"""
pad = fft_length // 2
window = torch.hann_window(fft_length)
complex_spec = F.stft(waveform,
fft_length=fft_length,
hop_length=hop_length,
window=window,
pad_mode=pad_mode)
mag_spec, phase_spec = F.magphase(complex_spec)

# == Test shape
expected_size = list(waveform.size()[:-1])
expected_size += [fft_length // 2 + 1, _num_stft_bins(
waveform.size(-1), fft_length, hop_length, pad), 2]
assert complex_spec.dim() == waveform.dim() + 2
assert complex_spec.size() == torch.Size(expected_size)

# == Test values
fft_config = dict(n_fft=fft_length, hop_length=hop_length, pad_mode=pad_mode)
# note that librosa *automatically* pad with fft_length // 2.
expected_complex_spec = np.apply_along_axis(librosa.stft, -1,
waveform.numpy(), **fft_config)
expected_mag_spec, _ = librosa.magphase(expected_complex_spec)
# Convert torch to np.complex
complex_spec = complex_spec.numpy()
complex_spec = complex_spec[..., 0] + 1j * complex_spec[..., 1]

assert np.allclose(complex_spec, expected_complex_spec, atol=1e-5)
assert np.allclose(mag_spec.numpy(), expected_mag_spec, atol=1e-5)


@pytest.mark.parametrize('rate', [0.5, 1.01, 1.3])
@pytest.mark.parametrize('complex_specgrams', [
torch.randn(1, 2, 1025, 400, 2),
Expand Down
104 changes: 12 additions & 92 deletions test/test_jit.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,40 +30,18 @@ def _test_script_module(self, tensor, f, *args):

self.assertTrue(torch.allclose(jit_out, py_out))

def test_torchscript_scale(self):
@torch.jit.script
def jit_method(tensor, factor):
# type: (Tensor, int) -> Tensor
return F.scale(tensor, factor)

tensor = torch.rand((10, 1))
factor = 2

jit_out = jit_method(tensor, factor)
py_out = F.scale(tensor, factor)

self.assertTrue(torch.allclose(jit_out, py_out))

@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_scriptmodule_scale(self):
tensor = torch.rand((10, 1), device="cuda")

self._test_script_module(tensor, transforms.Scale)

def test_torchscript_pad_trim(self):
@torch.jit.script
def jit_method(tensor, ch_dim, max_len, len_dim, fill_value):
# type: (Tensor, int, int, int, float) -> Tensor
return F.pad_trim(tensor, ch_dim, max_len, len_dim, fill_value)
def jit_method(tensor, max_len, fill_value):
# type: (Tensor, int, float) -> Tensor
return F.pad_trim(tensor, max_len, fill_value)

tensor = torch.rand((10, 1))
ch_dim = 1
tensor = torch.rand((1, 10))
max_len = 5
len_dim = 0
fill_value = 3.

jit_out = jit_method(tensor, ch_dim, max_len, len_dim, fill_value)
py_out = F.pad_trim(tensor, ch_dim, max_len, len_dim, fill_value)
jit_out = jit_method(tensor, max_len, fill_value)
py_out = F.pad_trim(tensor, max_len, fill_value)

self.assertTrue(torch.allclose(jit_out, py_out))

Expand All @@ -74,45 +52,6 @@ def test_scriptmodule_pad_trim(self):

self._test_script_module(tensor, transforms.PadTrim, max_len)

def test_torchscript_downmix_mono(self):
@torch.jit.script
def jit_method(tensor, ch_dim):
# type: (Tensor, int) -> Tensor
return F.downmix_mono(tensor, ch_dim)

tensor = torch.rand((10, 1))
ch_dim = 1

jit_out = jit_method(tensor, ch_dim)
py_out = F.downmix_mono(tensor, ch_dim)

self.assertTrue(torch.allclose(jit_out, py_out))

@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_scriptmodule_downmix_mono(self):
tensor = torch.rand((1, 10), device="cuda")

self._test_script_module(tensor, transforms.DownmixMono)

def test_torchscript_LC2CL(self):
@torch.jit.script
def jit_method(tensor):
# type: (Tensor) -> Tensor
return F.LC2CL(tensor)

tensor = torch.rand((10, 1))

jit_out = jit_method(tensor)
py_out = F.LC2CL(tensor)

self.assertTrue(torch.allclose(jit_out, py_out))

@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_scriptmodule_LC2CL(self):
tensor = torch.rand((10, 1), device="cuda")

self._test_script_module(tensor, transforms.LC2CL)

def test_torchscript_spectrogram(self):
@torch.jit.script
def jit_method(sig, pad, window, n_fft, hop, ws, power, normalize):
Expand Down Expand Up @@ -167,7 +106,7 @@ def jit_method(spec, multiplier, amin, db_multiplier, top_db):
# type: (Tensor, float, float, float, Optional[float]) -> Tensor
return F.spectrogram_to_DB(spec, multiplier, amin, db_multiplier, top_db)

spec = torch.rand((10, 1))
spec = torch.rand((6, 201))
multiplier = 10.
amin = 1e-10
db_multiplier = 0.
Expand All @@ -180,7 +119,7 @@ def jit_method(spec, multiplier, amin, db_multiplier, top_db):

@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_scriptmodule_SpectrogramToDB(self):
spec = torch.rand((10, 1), device="cuda")
spec = torch.rand((6, 201), device="cuda")

self._test_script_module(spec, transforms.SpectrogramToDB)

Expand Down Expand Up @@ -211,32 +150,13 @@ def test_scriptmodule_MelSpectrogram(self):

self._test_script_module(tensor, transforms.MelSpectrogram)

def test_torchscript_BLC2CBL(self):
@torch.jit.script
def jit_method(tensor):
# type: (Tensor) -> Tensor
return F.BLC2CBL(tensor)

tensor = torch.rand((10, 1000, 1))

jit_out = jit_method(tensor)
py_out = F.BLC2CBL(tensor)

self.assertTrue(torch.allclose(jit_out, py_out))

@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_scriptmodule_BLC2CBL(self):
tensor = torch.rand((10, 1000, 1), device="cuda")

self._test_script_module(tensor, transforms.BLC2CBL)

def test_torchscript_mu_law_encoding(self):
@torch.jit.script
def jit_method(tensor, qc):
# type: (Tensor, int) -> Tensor
return F.mu_law_encoding(tensor, qc)

tensor = torch.rand((10, 1))
tensor = torch.rand((1, 10))
qc = 256

jit_out = jit_method(tensor, qc)
Expand All @@ -246,7 +166,7 @@ def jit_method(tensor, qc):

@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_scriptmodule_MuLawEncoding(self):
tensor = torch.rand((10, 1), device="cuda")
tensor = torch.rand((1, 10), device="cuda")

self._test_script_module(tensor, transforms.MuLawEncoding)

Expand All @@ -256,7 +176,7 @@ def jit_method(tensor, qc):
# type: (Tensor, int) -> Tensor
return F.mu_law_expanding(tensor, qc)

tensor = torch.rand((10, 1))
tensor = torch.rand((1, 10))
qc = 256

jit_out = jit_method(tensor, qc)
Expand All @@ -266,7 +186,7 @@ def jit_method(tensor, qc):

@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_scriptmodule_MuLawExpanding(self):
tensor = torch.rand((10, 1), device="cuda")
tensor = torch.rand((1, 10), device="cuda")

self._test_script_module(tensor, transforms.MuLawExpanding)

Expand Down
Loading