Merge pull request #926 from Vinkle-hzt/main

fix bistream extra token
This commit is contained in:
Xiang Lyu
2025-01-23 22:02:25 +08:00
committed by GitHub

View File

@@ -382,7 +382,10 @@ class Qwen2LM(TransformerLM):
if text_cache.size(1) >= self.mix_ratio[0]: if text_cache.size(1) >= self.mix_ratio[0]:
lm_input_text = text_cache[:, :self.mix_ratio[0]] lm_input_text = text_cache[:, :self.mix_ratio[0]]
logging.info('append {} text token'.format(lm_input_text.size(1))) logging.info('append {} text token'.format(lm_input_text.size(1)))
lm_input = torch.concat([lm_input, lm_input_text], dim=1) if len(out_tokens) != 0 and out_tokens[-1] == self.speech_token_size + 2:
lm_input = lm_input_text
else:
lm_input = torch.concat([lm_input, lm_input_text], dim=1)
text_cache = text_cache[:, self.mix_ratio[0]:] text_cache = text_cache[:, self.mix_ratio[0]:]
else: else:
logging.info('not enough text token to decode, wait for more') logging.info('not enough text token to decode, wait for more')