Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix lints by ruff scanning #197

Open
wants to merge 7 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Fix lint: ruff E713
  • Loading branch information
Nugine committed May 4, 2024
commit 38f7e96c02a00fe0203e5bb84679a4e2316e54cd
2 changes: 1 addition & 1 deletion bins/svc/preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def preprocess(cfg, args):
continue
dataset_dir = os.path.join(output_path, dataset)
metadata = []
for split in ["train", "test"] if not "eval" in dataset else ["test"]:
for split in ["train", "test"] if "eval" not in dataset else ["test"]:
metadata_file_path = os.path.join(src_dataset_dir, "{}.json".format(split))
with open(metadata_file_path, "r") as f:
metadata.extend(json.load(f))
Expand Down
2 changes: 1 addition & 1 deletion bins/tta/preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def preprocess(cfg, args):
continue
dataset_dir = os.path.join(output_path, dataset)
metadata = []
for split in ["train", "test"] if not "eval" in dataset else ["test"]:
for split in ["train", "test"] if "eval" not in dataset else ["test"]:
metadata_file_path = os.path.join(src_dataset_dir, "{}.json".format(split))
with open(metadata_file_path, "r") as f:
metadata.extend(json.load(f))
Expand Down
2 changes: 1 addition & 1 deletion bins/tts/preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ def preprocess(cfg, args):
continue
dataset_dir = os.path.join(output_path, dataset)
metadata = []
for split in ["train", "test"] if not "eval" in dataset else ["test"]:
for split in ["train", "test"] if "eval" not in dataset else ["test"]:
metadata_file_path = os.path.join(src_dataset_dir, "{}.json".format(split))
with open(metadata_file_path, "r") as f:
metadata.extend(json.load(f))
Expand Down
2 changes: 1 addition & 1 deletion bins/vocoder/preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def preprocess(cfg, args):
continue
dataset_dir = os.path.join(output_path, dataset)
metadata = []
for split in ["train", "test"] if not "eval" in dataset else ["test"]:
for split in ["train", "test"] if "eval" not in dataset else ["test"]:
metadata_file_path = os.path.join(src_dataset_dir, "{}.json".format(split))
with open(metadata_file_path, "r") as f:
metadata.extend(json.load(f))
Expand Down
2 changes: 1 addition & 1 deletion models/tts/base/tts_inferece.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def _load_model(
assert checkpoint_dir is not None
# Load the latest accelerator state dicts
ls = [
str(i) for i in Path(checkpoint_dir).glob("*") if not "audio" in str(i)
str(i) for i in Path(checkpoint_dir).glob("*") if "audio" not in str(i)
]
ls.sort(key=lambda x: int(x.split("_")[-3].split("-")[-1]), reverse=True)
checkpoint_path = ls[0]
Expand Down
4 changes: 2 additions & 2 deletions models/vocoders/vocoder_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,7 @@ def _load_model(self, checkpoint_dir, from_multi_gpu=False):
ls = [
str(i)
for i in Path(checkpoint_dir).glob("*")
if not "audio" in str(i)
if "audio" not in str(i)
]
ls.sort(
key=lambda x: int(x.split("/")[-1].split("_")[0].split("-")[-1]),
Expand Down Expand Up @@ -443,7 +443,7 @@ def load_nnvocoder(
else:
# Load from accelerator state dict
weights_file = os.path.join(weights_file, "checkpoint")
ls = [str(i) for i in Path(weights_file).glob("*") if not "audio" in str(i)]
ls = [str(i) for i in Path(weights_file).glob("*") if "audio" not in str(i)]
ls.sort(key=lambda x: int(x.split("_")[-3].split("-")[-1]), reverse=True)
checkpoint_path = ls[0]
accelerator = accelerate.Accelerator()
Expand Down
8 changes: 4 additions & 4 deletions utils/data_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ def load_frame_pitch(
pitch_statistic = []
for utt_info in meta_data:
utt = utt_info["Dataset"] + "_" + utt_info["Uid"]
if not utt2spk[utt] in spk2utt:
if utt2spk[utt] not in spk2utt:
spk2utt[utt2spk[utt]] = []
spk2utt[utt2spk[utt]].append(utt)

Expand Down Expand Up @@ -242,7 +242,7 @@ def load_phone_pitch(
pitch_statistic = []
for utt_info in tqdm(meta_data):
utt = utt_info["Dataset"] + "_" + utt_info["Uid"]
if not utt2spk[utt] in spk2utt:
if utt2spk[utt] not in spk2utt:
spk2utt[utt2spk[utt]] = []
spk2utt[utt2spk[utt]].append(utt)

Expand Down Expand Up @@ -364,7 +364,7 @@ def load_energy(
energy_statistic = []
for utt_info in meta_data:
utt = utt_info["Dataset"] + "_" + utt_info["Uid"]
if not utt2spk[utt] in spk2utt:
if utt2spk[utt] not in spk2utt:
spk2utt[utt2spk[utt]] = []
spk2utt[utt2spk[utt]].append(utt)

Expand Down Expand Up @@ -438,7 +438,7 @@ def load_frame_energy(
energy_statistic = []
for utt_info in meta_data:
utt = utt_info["Dataset"] + "_" + utt_info["Uid"]
if not utt2spk[utt] in spk2utt:
if utt2spk[utt] not in spk2utt:
spk2utt[utt2spk[utt]] = []
spk2utt[utt2spk[utt]].append(utt)

Expand Down