|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import torch |
|
|
|
class CosyVoiceModel: |
|
|
|
def __init__(self, |
|
llm: torch.nn.Module, |
|
flow: torch.nn.Module, |
|
hift: torch.nn.Module): |
|
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
self.llm = llm |
|
self.flow = flow |
|
self.hift = hift |
|
|
|
def load(self, llm_model, flow_model, hift_model): |
|
self.llm.load_state_dict(torch.load(llm_model, map_location=self.device)) |
|
self.llm.to(self.device).eval() |
|
self.flow.load_state_dict(torch.load(flow_model, map_location=self.device)) |
|
self.flow.to(self.device).eval() |
|
self.hift.load_state_dict(torch.load(hift_model, map_location=self.device)) |
|
self.hift.to(self.device).eval() |
|
|
|
def inference(self, text, text_len, flow_embedding, llm_embedding=torch.zeros(0, 192), |
|
prompt_text=torch.zeros(1, 0, dtype=torch.int32), prompt_text_len=torch.zeros(1, dtype=torch.int32), |
|
llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), llm_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32), |
|
flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), flow_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32), |
|
prompt_speech_feat=torch.zeros(1, 0, 80), prompt_speech_feat_len=torch.zeros(1, dtype=torch.int32)): |
|
tts_speech_token = self.llm.inference(text=text.to(self.device), |
|
text_len=text_len.to(self.device), |
|
prompt_text=prompt_text.to(self.device), |
|
prompt_text_len=prompt_text_len.to(self.device), |
|
prompt_speech_token=llm_prompt_speech_token.to(self.device), |
|
prompt_speech_token_len=llm_prompt_speech_token_len.to(self.device), |
|
embedding=llm_embedding.to(self.device), |
|
beam_size=1, |
|
sampling=25, |
|
max_token_text_ratio=30, |
|
min_token_text_ratio=3) |
|
tts_mel = self.flow.inference(token=tts_speech_token, |
|
token_len=torch.tensor([tts_speech_token.size(1)], dtype=torch.int32).to(self.device), |
|
prompt_token=flow_prompt_speech_token.to(self.device), |
|
prompt_token_len=flow_prompt_speech_token_len.to(self.device), |
|
prompt_feat=prompt_speech_feat.to(self.device), |
|
prompt_feat_len=prompt_speech_feat_len.to(self.device), |
|
embedding=flow_embedding.to(self.device)) |
|
tts_speech = self.hift.inference(mel=tts_mel).cpu() |
|
torch.cuda.empty_cache() |
|
return {'tts_speech': tts_speech} |
|
|
|
def text_to_token(self, text, text_len, flow_embedding, llm_embedding=torch.zeros(0, 192), |
|
prompt_text=torch.zeros(1, 0, dtype=torch.int32), prompt_text_len=torch.zeros(1, dtype=torch.int32), |
|
llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), llm_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32), |
|
flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), flow_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32), |
|
prompt_speech_feat=torch.zeros(1, 0, 80), prompt_speech_feat_len=torch.zeros(1, dtype=torch.int32)): |
|
tts_speech_token = self.llm.inference(text=text.to(self.device), |
|
text_len=text_len.to(self.device), |
|
prompt_text=prompt_text.to(self.device), |
|
prompt_text_len=prompt_text_len.to(self.device), |
|
prompt_speech_token=llm_prompt_speech_token.to(self.device), |
|
prompt_speech_token_len=llm_prompt_speech_token_len.to(self.device), |
|
embedding=llm_embedding.to(self.device), |
|
beam_size=1, |
|
sampling=25, |
|
max_token_text_ratio=30, |
|
min_token_text_ratio=3) |
|
return tts_speech_token |
|
|
|
def token_to_speech(self, tts_speech_token, flow_embedding, llm_embedding=torch.zeros(0, 192), |
|
prompt_text=torch.zeros(1, 0, dtype=torch.int32), prompt_text_len=torch.zeros(1, dtype=torch.int32), |
|
llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), llm_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32), |
|
flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), flow_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32), |
|
prompt_speech_feat=torch.zeros(1, 0, 80), prompt_speech_feat_len=torch.zeros(1, dtype=torch.int32)): |
|
|
|
tts_mel = self.flow.inference(token=tts_speech_token, |
|
token_len=torch.tensor([tts_speech_token.size(1)], dtype=torch.int32).to(self.device), |
|
prompt_token=flow_prompt_speech_token.to(self.device), |
|
prompt_token_len=flow_prompt_speech_token_len.to(self.device), |
|
prompt_feat=prompt_speech_feat.to(self.device), |
|
prompt_feat_len=prompt_speech_feat_len.to(self.device), |
|
embedding=flow_embedding.to(self.device)) |
|
tts_speech = self.hift.inference(mel=tts_mel).cpu() |
|
torch.cuda.empty_cache() |
|
return {'tts_speech': tts_speech} |