Skip to content

Commit

Permalink
Address additional lint problems
Browse files Browse the repository at this point in the history
  • Loading branch information
reuben committed Jul 19, 2019
1 parent 4a24cff commit 9a61dfa
Show file tree
Hide file tree
Showing 5 changed files with 14 additions and 12 deletions.
1 change: 1 addition & 0 deletions .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ confidence=
# --disable=W".
disable=missing-docstring,
line-too-long,
fixme,
wrong-import-order,
ungrouped-imports,
wrong-import-position,
Expand Down
3 changes: 2 additions & 1 deletion tests/test_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ def test_in_out(self):


class DecoderTests(unittest.TestCase):
def test_in_out(self):
@staticmethod
def test_in_out():
layer = Decoder(in_features=256, memory_dim=80, r=2, memory_size=4, attn_windowing=False, attn_norm="sigmoid") #FIXME: several missing required parameters for Decoder ctor
dummy_input = T.rand(4, 8, 256)
dummy_memory = T.rand(4, 2, 80)
Expand Down
16 changes: 8 additions & 8 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,7 @@
print(" > Number of GPUs: ", num_gpus)


def setup_loader(is_val=False, verbose=False):
global ap
def setup_loader(ap, is_val=False, verbose=False):
global meta_data_train
global meta_data_eval
if "meta_data_train" not in globals():
Expand Down Expand Up @@ -85,7 +84,7 @@ def setup_loader(is_val=False, verbose=False):

def train(model, criterion, criterion_st, optimizer, optimizer_st, scheduler,
ap, epoch):
data_loader = setup_loader(is_val=False, verbose=(epoch == 0))
data_loader = setup_loader(ap, is_val=False, verbose=(epoch == 0))
if c.use_speaker_embedding:
speaker_mapping = load_speaker_mapping(OUT_PATH)
model.train()
Expand Down Expand Up @@ -273,7 +272,7 @@ def train(model, criterion, criterion_st, optimizer, optimizer_st, scheduler,


def evaluate(model, criterion, criterion_st, ap, current_step, epoch):
data_loader = setup_loader(is_val=True)
data_loader = setup_loader(ap, is_val=True)
if c.use_speaker_embedding:
speaker_mapping = load_speaker_mapping(OUT_PATH)
model.eval()
Expand Down Expand Up @@ -432,7 +431,11 @@ def evaluate(model, criterion, criterion_st, ap, current_step, epoch):
return avg_postnet_loss


def main(args):
#FIXME: move args definition/parsing inside of main?
def main(args): #pylint: disable=redefined-outer-name
# Audio processor
ap = AudioProcessor(**c.audio)

# DISTRUBUTED
if num_gpus > 1:
init_distributed(args.rank, num_gpus, args.group_id,
Expand Down Expand Up @@ -617,9 +620,6 @@ def main(args):
LOG_DIR = OUT_PATH
tb_logger = Logger(LOG_DIR)

# Audio processor
ap = AudioProcessor(**c.audio)

try:
main(args)
except KeyboardInterrupt:
Expand Down
4 changes: 2 additions & 2 deletions utils/audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def __init__(self,

self.sample_rate = sample_rate
self.num_mels = num_mels
self.min_level_db = min_level_db
self.min_level_db = min_level_db or 0
self.frame_shift_ms = frame_shift_ms
self.frame_length_ms = frame_length_ms
self.ref_level_db = ref_level_db
Expand All @@ -40,7 +40,7 @@ def __init__(self,
self.griffin_lim_iters = griffin_lim_iters
self.signal_norm = signal_norm
self.symmetric_norm = symmetric_norm
self.mel_fmin = 0 if mel_fmin is None else mel_fmin
self.mel_fmin = mel_fmin or 0
self.mel_fmax = mel_fmax
self.max_norm = 1.0 if max_norm is None else float(max_norm)
self.clip_norm = clip_norm
Expand Down
2 changes: 1 addition & 1 deletion utils/synthesis.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def synthesis(model,
speaker_id=None,
style_wav=None,
truncated=False,
enable_eos_bos_chars=False,
enable_eos_bos_chars=False, #pylint: disable=unused-argument
do_trim_silence=False):
"""Synthesize voice for the given text.
Expand Down

0 comments on commit 9a61dfa

Please sign in to comment.