Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
aluminumbox committed Jan 2, 2025
1 parent 2745d47 commit 77d8cf1
Show file tree
Hide file tree
Showing 11 changed files with 163 additions and 158 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ import torchaudio

**CosyVoice2 Usage**
```python
cosyvoice = CosyVoice2('pretrained_models/CosyVoice2-0.5B', load_jit=True, load_onnx=False, load_trt=False)
cosyvoice = CosyVoice2('pretrained_models/CosyVoice2-0.5B', load_jit=False, load_trt=False, fp16=False)

# NOTE if you want to reproduce the results on https://funaudiollm.github.io/cosyvoice2, please add text_frontend=False during inference
# zero_shot usage
Expand All @@ -151,7 +151,7 @@ for i, j in enumerate(cosyvoice.inference_instruct2('收到好友从远方寄来

**CosyVoice Usage**
```python
cosyvoice = CosyVoice('pretrained_models/CosyVoice-300M-SFT', load_jit=True, load_onnx=False, fp16=True)
cosyvoice = CosyVoice('pretrained_models/CosyVoice-300M-SFT', load_jit=False, load_trt=False, fp16=False)
# sft usage
print(cosyvoice.list_available_spks())
# change stream=True for chunk stream inference
Expand Down
52 changes: 34 additions & 18 deletions cosyvoice/bin/export_jit.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append('{}/../..'.format(ROOT_DIR))
sys.path.append('{}/../../third_party/Matcha-TTS'.format(ROOT_DIR))
from cosyvoice.cli.cosyvoice import CosyVoice
from cosyvoice.cli.cosyvoice import CosyVoice, CosyVoice2


def get_args():
Expand All @@ -37,6 +37,15 @@ def get_args():
return args


def get_optimized_script(model, preserved_attrs=[]):
script = torch.jit.script(model)
if preserved_attrs != []:
script = torch.jit.freeze(script, preserved_attrs=preserved_attrs)
else:
script = torch.jit.freeze(script)
script = torch.jit.optimize_for_inference(script)
return script

def main():
args = get_args()
logging.basicConfig(level=logging.DEBUG,
Expand All @@ -46,28 +55,35 @@ def main():
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)

cosyvoice = CosyVoice(args.model_dir, load_jit=False, load_onnx=False)
try:
model = CosyVoice(args.model_dir)
except:
try:
model = CosyVoice2(args.model_dir)
except:
raise TypeError('no valid model_type!')

# 1. export llm text_encoder
llm_text_encoder = cosyvoice.model.llm.text_encoder.half()
script = torch.jit.script(llm_text_encoder)
script = torch.jit.freeze(script)
script = torch.jit.optimize_for_inference(script)
script.save('{}/llm.text_encoder.fp16.zip'.format(args.model_dir))
if not isinstance(model, CosyVoice2):
# 1. export llm text_encoder
llm_text_encoder = model.model.llm.text_encoder
script = get_optimized_script(llm_text_encoder)
script.save('{}/llm.text_encoder.fp32.zip'.format(args.model_dir))
script = get_optimized_script(llm_text_encoder.half())
script.save('{}/llm.text_encoder.fp16.zip'.format(args.model_dir))

# 2. export llm llm
llm_llm = cosyvoice.model.llm.llm.half()
script = torch.jit.script(llm_llm)
script = torch.jit.freeze(script, preserved_attrs=['forward_chunk'])
script = torch.jit.optimize_for_inference(script)
script.save('{}/llm.llm.fp16.zip'.format(args.model_dir))
# 2. export llm llm
llm_llm = model.model.llm.llm
script = get_optimized_script(llm_llm, ['forward_chunk'])
script.save('{}/llm.llm.fp32.zip'.format(args.model_dir))
script = get_optimized_script(llm_llm.half(), ['forward_chunk'])
script.save('{}/llm.llm.fp16.zip'.format(args.model_dir))

# 3. export flow encoder
flow_encoder = cosyvoice.model.flow.encoder
script = torch.jit.script(flow_encoder)
script = torch.jit.freeze(script)
script = torch.jit.optimize_for_inference(script)
flow_encoder = model.model.flow.encoder
script = get_optimized_script(flow_encoder)
script.save('{}/flow.encoder.fp32.zip'.format(args.model_dir))
script = get_optimized_script(flow_encoder.half())
script.save('{}/flow.encoder.fp16.zip'.format(args.model_dir))


if __name__ == '__main__':
Expand Down
32 changes: 18 additions & 14 deletions cosyvoice/bin/export_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append('{}/../..'.format(ROOT_DIR))
sys.path.append('{}/../../third_party/Matcha-TTS'.format(ROOT_DIR))
from cosyvoice.cli.cosyvoice import CosyVoice
from cosyvoice.cli.cosyvoice import CosyVoice, CosyVoice2


def get_dummy_input(batch_size, seq_len, out_channels, device):
Expand Down Expand Up @@ -56,14 +56,20 @@ def main():
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s')

cosyvoice = CosyVoice(args.model_dir, load_jit=False, load_onnx=False)
try:
model = CosyVoice(args.model_dir)
except:
try:
model = CosyVoice2(args.model_dir)
except:
raise TypeError('no valid model_type!')

# 1. export flow decoder estimator
estimator = cosyvoice.model.flow.decoder.estimator
estimator = model.model.flow.decoder.estimator

device = cosyvoice.model.device
batch_size, seq_len = 1, 256
out_channels = cosyvoice.model.flow.decoder.estimator.out_channels
device = model.model.device
batch_size, seq_len = 2, 256
out_channels = model.model.flow.decoder.estimator.out_channels
x, mask, mu, t, spks, cond = get_dummy_input(batch_size, seq_len, out_channels, device)
torch.onnx.export(
estimator,
Expand All @@ -75,13 +81,11 @@ def main():
input_names=['x', 'mask', 'mu', 't', 'spks', 'cond'],
output_names=['estimator_out'],
dynamic_axes={
'x': {0: 'batch_size', 2: 'seq_len'},
'mask': {0: 'batch_size', 2: 'seq_len'},
'mu': {0: 'batch_size', 2: 'seq_len'},
'cond': {0: 'batch_size', 2: 'seq_len'},
't': {0: 'batch_size'},
'spks': {0: 'batch_size'},
'estimator_out': {0: 'batch_size', 2: 'seq_len'},
'x': {2: 'seq_len'},
'mask': {2: 'seq_len'},
'mu': {2: 'seq_len'},
'cond': {2: 'seq_len'},
'estimator_out': {2: 'seq_len'},
}
)

Expand All @@ -94,7 +98,7 @@ def main():
sess_options=option, providers=providers)

for _ in tqdm(range(10)):
x, mask, mu, t, spks, cond = get_dummy_input(random.randint(1, 6), random.randint(16, 512), out_channels, device)
x, mask, mu, t, spks, cond = get_dummy_input(batch_size, random.randint(16, 512), out_channels, device)
output_pytorch = estimator(x, mask, mu, t, spks, cond)
ort_inputs = {
'x': x.cpu().numpy(),
Expand Down
1 change: 1 addition & 0 deletions cosyvoice/bin/export_trt.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,5 @@ TRT_DIR=<YOUR_TRT_DIR>
MODEL_DIR=<COSYVOICE2_MODEL_DIR>

export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$TRT_DIR/lib:/usr/local/cuda/lib64
$TRT_DIR/bin/trtexec --onnx=$MODEL_DIR/flow.decoder.estimator.fp32.onnx --saveEngine=$MODEL_DIR/flow.decoder.estimator.fp32.mygpu.plan --minShapes=x:2x80x4,mask:2x1x4,mu:2x80x4,cond:2x80x4 --optShapes=x:2x80x193,mask:2x1x193,mu:2x80x193,cond:2x80x193 --maxShapes=x:2x80x6800,mask:2x1x6800,mu:2x80x6800,cond:2x80x6800 --inputIOFormats=fp32:chw,fp32:chw,fp32:chw,fp32:chw,fp32:chw,fp32:chw --outputIOFormats=fp32:chw
$TRT_DIR/bin/trtexec --onnx=$MODEL_DIR/flow.decoder.estimator.fp32.onnx --saveEngine=$MODEL_DIR/flow.decoder.estimator.fp16.mygpu.plan --fp16 --minShapes=x:2x80x4,mask:2x1x4,mu:2x80x4,cond:2x80x4 --optShapes=x:2x80x193,mask:2x1x193,mu:2x80x193,cond:2x80x193 --maxShapes=x:2x80x6800,mask:2x1x6800,mu:2x80x6800,cond:2x80x6800 --inputIOFormats=fp16:chw,fp16:chw,fp16:chw,fp16:chw,fp16:chw,fp16:chw --outputIOFormats=fp16:chw
42 changes: 19 additions & 23 deletions cosyvoice/cli/cosyvoice.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,35 +25,35 @@

class CosyVoice:

def __init__(self, model_dir, load_jit=True, load_onnx=False, fp16=True):
def __init__(self, model_dir, load_jit=False, load_trt=False, fp16=False):
self.instruct = True if '-Instruct' in model_dir else False
self.model_dir = model_dir
self.fp16 = fp16
if not os.path.exists(model_dir):
model_dir = snapshot_download(model_dir)
with open('{}/cosyvoice.yaml'.format(model_dir), 'r') as f:
configs = load_hyperpyyaml(f)
assert get_model_type(configs) == CosyVoiceModel, 'do not use {} for CosyVoice initialization!'.format(model_dir)
assert get_model_type(configs) != CosyVoice2Model, 'do not use {} for CosyVoice initialization!'.format(model_dir)
self.frontend = CosyVoiceFrontEnd(configs['get_tokenizer'],
configs['feat_extractor'],
'{}/campplus.onnx'.format(model_dir),
'{}/speech_tokenizer_v1.onnx'.format(model_dir),
'{}/spk2info.pt'.format(model_dir),
configs['allowed_special'])
self.sample_rate = configs['sample_rate']
if torch.cuda.is_available() is False and (fp16 is True or load_jit is True):
load_jit = False
fp16 = False
logging.warning('cpu do not support fp16 and jit, force set to False')
if torch.cuda.is_available() is False and (load_jit is True or load_trt is True or fp16 is True):
load_jit, load_trt, fp16 = False, False, False
logging.warning('no cuda device, set load_jit/load_trt/fp16 to False')
self.model = CosyVoiceModel(configs['llm'], configs['flow'], configs['hift'], fp16)
self.model.load('{}/llm.pt'.format(model_dir),
'{}/flow.pt'.format(model_dir),
'{}/hift.pt'.format(model_dir))
if load_jit:
self.model.load_jit('{}/llm.text_encoder.fp16.zip'.format(model_dir),
'{}/llm.llm.fp16.zip'.format(model_dir),
'{}/flow.encoder.fp32.zip'.format(model_dir))
if load_onnx:
self.model.load_onnx('{}/flow.decoder.estimator.fp32.onnx'.format(model_dir))
self.model.load_jit('{}/llm.text_encoder.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'),
'{}/llm.llm.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'),
'{}/flow.encoder.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'))
if load_trt:
self.model.load_trt('{}/flow.decoder.estimator.{}.v100.plan'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'))
del configs

def list_available_spks(self):
Expand Down Expand Up @@ -123,9 +123,10 @@ def inference_vc(self, source_speech_16k, prompt_speech_16k, stream=False, speed

class CosyVoice2(CosyVoice):

def __init__(self, model_dir, load_jit=False, load_onnx=False, load_trt=False):
def __init__(self, model_dir, load_jit=False, load_trt=False, fp16=False):
self.instruct = True if '-Instruct' in model_dir else False
self.model_dir = model_dir
self.fp16 = fp16
if not os.path.exists(model_dir):
model_dir = snapshot_download(model_dir)
with open('{}/cosyvoice.yaml'.format(model_dir), 'r') as f:
Expand All @@ -138,22 +139,17 @@ def __init__(self, model_dir, load_jit=False, load_onnx=False, load_trt=False):
'{}/spk2info.pt'.format(model_dir),
configs['allowed_special'])
self.sample_rate = configs['sample_rate']
if torch.cuda.is_available() is False and load_jit is True:
load_jit = False
logging.warning('cpu do not support jit, force set to False')
self.model = CosyVoice2Model(configs['llm'], configs['flow'], configs['hift'])
if torch.cuda.is_available() is False and (load_jit is True or load_trt is True or fp16 is True):
load_jit, load_trt, fp16 = False, False, False
logging.warning('no cuda device, set load_jit/load_trt/fp16 to False')
self.model = CosyVoice2Model(configs['llm'], configs['flow'], configs['hift'], fp16)
self.model.load('{}/llm.pt'.format(model_dir),
'{}/flow.pt'.format(model_dir),
'{}/hift.pt'.format(model_dir))
if load_jit:
self.model.load_jit('{}/flow.encoder.fp32.zip'.format(model_dir))
if load_trt is True and load_onnx is True:
load_onnx = False
logging.warning('can not set both load_trt and load_onnx to True, force set load_onnx to False')
if load_onnx:
self.model.load_onnx('{}/flow.decoder.estimator.fp32.onnx'.format(model_dir))
self.model.load_jit('{}/flow.encoder.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'))
if load_trt:
self.model.load_trt('{}/flow.decoder.estimator.fp16.Volta.plan'.format(model_dir))
self.model.load_trt('{}/flow.decoder.estimator.{}.v100.plan'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'))
del configs

def inference_instruct(self, *args, **kwargs):
Expand Down
Loading

0 comments on commit 77d8cf1

Please sign in to comment.