diff --git a/cosyvoice/cli/cosyvoice.py b/cosyvoice/cli/cosyvoice.py index 68a2b9f..6b29cb4 100644 --- a/cosyvoice/cli/cosyvoice.py +++ b/cosyvoice/cli/cosyvoice.py @@ -21,7 +21,7 @@ from cosyvoice.utils.file_utils import logging class CosyVoice: - def __init__(self, model_dir): + def __init__(self, model_dir, load_script=True): instruct = True if '-Instruct' in model_dir else False self.model_dir = model_dir if not os.path.exists(model_dir): @@ -39,6 +39,9 @@ class CosyVoice: self.model.load('{}/llm.pt'.format(model_dir), '{}/flow.pt'.format(model_dir), '{}/hift.pt'.format(model_dir)) + if load_script: + self.model.load_script('{}/llm.text_encoder.fp16.zip'.format(model_dir), + '{}/llm.llm.fp16.zip'.format(model_dir)) del configs def list_avaliable_spks(self): diff --git a/cosyvoice/cli/model.py b/cosyvoice/cli/model.py index 863736e..6aa5302 100644 --- a/cosyvoice/cli/model.py +++ b/cosyvoice/cli/model.py @@ -47,11 +47,18 @@ class CosyVoiceModel: def load(self, llm_model, flow_model, hift_model): self.llm.load_state_dict(torch.load(llm_model, map_location=self.device)) self.llm.to(self.device).eval() + self.llm.half() self.flow.load_state_dict(torch.load(flow_model, map_location=self.device)) self.flow.to(self.device).eval() self.hift.load_state_dict(torch.load(hift_model, map_location=self.device)) self.hift.to(self.device).eval() + def load_script(self, llm_text_encoder_model, llm_llm_model): + llm_text_encoder = torch.jit.load(llm_text_encoder_model) + self.llm.text_encoder = llm_text_encoder + llm_llm = torch.jit.load(llm_llm_model) + self.llm.llm = llm_llm + def llm_job(self, text, text_len, prompt_text, prompt_text_len, llm_prompt_speech_token, llm_prompt_speech_token_len, llm_embedding, this_uuid): with self.llm_context: for i in self.llm.inference(text=text.to(self.device), @@ -60,7 +67,7 @@ class CosyVoiceModel: prompt_text_len=prompt_text_len.to(self.device), prompt_speech_token=llm_prompt_speech_token.to(self.device), prompt_speech_token_len=llm_prompt_speech_token_len.to(self.device), - embedding=llm_embedding.to(self.device), + embedding=llm_embedding.to(self.device).half(), sampling=25, max_token_text_ratio=30, min_token_text_ratio=3): diff --git a/cosyvoice/transformer/attention.py b/cosyvoice/transformer/attention.py index cb6723a..8c0c098 100644 --- a/cosyvoice/transformer/attention.py +++ b/cosyvoice/transformer/attention.py @@ -222,7 +222,7 @@ class RelPositionMultiHeadedAttention(MultiHeadedAttention): torch.nn.init.xavier_uniform_(self.pos_bias_u) torch.nn.init.xavier_uniform_(self.pos_bias_v) - def rel_shift(self, x): + def rel_shift(self, x: torch.Tensor) -> torch.Tensor: """Compute relative positional encoding. Args: @@ -233,10 +233,14 @@ class RelPositionMultiHeadedAttention(MultiHeadedAttention): torch.Tensor: Output tensor. """ - zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype) + zero_pad = torch.zeros((x.size()[0], x.size()[1], x.size()[2], 1), + device=x.device, + dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=-1) - x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2)) + x_padded = x_padded.view(x.size()[0], + x.size()[1], + x.size(3) + 1, x.size(2)) x = x_padded[:, :, 1:].view_as(x)[ :, :, :, : x.size(-1) // 2 + 1 ] # only keep the positions from 0 to time2 diff --git a/cosyvoice/transformer/decoder.py b/cosyvoice/transformer/decoder.py index 961c875..98f3a66 100644 --- a/cosyvoice/transformer/decoder.py +++ b/cosyvoice/transformer/decoder.py @@ -174,7 +174,7 @@ class TransformerDecoder(torch.nn.Module): memory_mask) return x - @torch.jit.ignore(drop=True) + @torch.jit.unused def forward_layers_checkpointed(self, x: torch.Tensor, tgt_mask: torch.Tensor, memory: torch.Tensor, diff --git a/cosyvoice/transformer/embedding.py b/cosyvoice/transformer/embedding.py index 46130a5..e32cfc9 100644 --- a/cosyvoice/transformer/embedding.py +++ b/cosyvoice/transformer/embedding.py @@ -212,7 +212,7 @@ class EspnetRelPositionalEncoding(torch.nn.Module): """ - def __init__(self, d_model, dropout_rate, max_len=5000): + def __init__(self, d_model: int, dropout_rate: float, max_len: int=5000): """Construct an PositionalEncoding object.""" super(EspnetRelPositionalEncoding, self).__init__() self.d_model = d_model @@ -221,7 +221,7 @@ class EspnetRelPositionalEncoding(torch.nn.Module): self.pe = None self.extend_pe(torch.tensor(0.0).expand(1, max_len)) - def extend_pe(self, x): + def extend_pe(self, x: torch.Tensor): """Reset the positional encodings.""" if self.pe is not None: # self.pe contains both positive and negative parts @@ -253,7 +253,8 @@ class EspnetRelPositionalEncoding(torch.nn.Module): pe = torch.cat([pe_positive, pe_negative], dim=1) self.pe = pe.to(device=x.device, dtype=x.dtype) - def forward(self, x: torch.Tensor, offset: Union[int, torch.Tensor] = 0): + def forward(self, x: torch.Tensor, offset: Union[int, torch.Tensor] = 0) \ + -> Tuple[torch.Tensor, torch.Tensor]: """Add positional encoding. Args: diff --git a/cosyvoice/transformer/encoder.py b/cosyvoice/transformer/encoder.py index 7e8bd23..5fcb13a 100644 --- a/cosyvoice/transformer/encoder.py +++ b/cosyvoice/transformer/encoder.py @@ -169,7 +169,7 @@ class BaseEncoder(torch.nn.Module): xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad) return xs - @torch.jit.ignore(drop=True) + @torch.jit.unused def forward_layers_checkpointed(self, xs: torch.Tensor, chunk_masks: torch.Tensor, pos_emb: torch.Tensor, @@ -180,6 +180,7 @@ class BaseEncoder(torch.nn.Module): mask_pad) return xs + @torch.jit.export def forward_chunk( self, xs: torch.Tensor, @@ -270,6 +271,7 @@ class BaseEncoder(torch.nn.Module): return (xs, r_att_cache, r_cnn_cache) + @torch.jit.unused def forward_chunk_by_chunk( self, xs: torch.Tensor, diff --git a/examples/libritts/cosyvoice/conf/cosyvoice.fromscratch.yaml b/examples/libritts/cosyvoice/conf/cosyvoice.fromscratch.yaml index 34c1d98..25d7269 100644 --- a/examples/libritts/cosyvoice/conf/cosyvoice.fromscratch.yaml +++ b/examples/libritts/cosyvoice/conf/cosyvoice.fromscratch.yaml @@ -31,7 +31,7 @@ llm: !new:cosyvoice.llm.llm.TransformerLM num_blocks: 3 dropout_rate: 0.1 positional_dropout_rate: 0.1 - attention_dropout_rate: 0 + attention_dropout_rate: 0.0 normalize_before: True input_layer: 'linear' pos_enc_layer_type: 'rel_pos_espnet' @@ -49,7 +49,7 @@ llm: !new:cosyvoice.llm.llm.TransformerLM num_blocks: 7 dropout_rate: 0.1 positional_dropout_rate: 0.1 - attention_dropout_rate: 0 + attention_dropout_rate: 0.0 input_layer: 'linear_legacy' pos_enc_layer_type: 'rel_pos_espnet' selfattention_layer_type: 'rel_selfattn' @@ -102,7 +102,7 @@ flow: !new:cosyvoice.flow.flow.MaskedDiffWithXvec in_channels: 320 out_channels: 80 channels: [256, 256] - dropout: 0 + dropout: 0.0 attention_head_dim: 64 n_blocks: 4 num_mid_blocks: 8 diff --git a/examples/libritts/cosyvoice/conf/cosyvoice.yaml b/examples/libritts/cosyvoice/conf/cosyvoice.yaml index c89611c..bca3898 100644 --- a/examples/libritts/cosyvoice/conf/cosyvoice.yaml +++ b/examples/libritts/cosyvoice/conf/cosyvoice.yaml @@ -31,7 +31,7 @@ llm: !new:cosyvoice.llm.llm.TransformerLM num_blocks: 6 dropout_rate: 0.1 positional_dropout_rate: 0.1 - attention_dropout_rate: 0 + attention_dropout_rate: 0.0 normalize_before: True input_layer: 'linear' pos_enc_layer_type: 'rel_pos_espnet' @@ -49,7 +49,7 @@ llm: !new:cosyvoice.llm.llm.TransformerLM num_blocks: 14 dropout_rate: 0.1 positional_dropout_rate: 0.1 - attention_dropout_rate: 0 + attention_dropout_rate: 0.0 input_layer: 'linear_legacy' pos_enc_layer_type: 'rel_pos_espnet' selfattention_layer_type: 'rel_selfattn' @@ -102,7 +102,7 @@ flow: !new:cosyvoice.flow.flow.MaskedDiffWithXvec in_channels: 320 out_channels: 80 channels: [256, 256] - dropout: 0 + dropout: 0.0 attention_head_dim: 64 n_blocks: 4 num_mid_blocks: 12 diff --git a/webui.py b/webui.py index e608d80..3bef07b 100644 --- a/webui.py +++ b/webui.py @@ -173,7 +173,7 @@ if __name__ == '__main__': default=8000) parser.add_argument('--model_dir', type=str, - default='iic/CosyVoice-300M', + default='pretrained_models/CosyVoice-300M', help='local path or modelscope repo id') args = parser.parse_args() cosyvoice = CosyVoice(args.model_dir)