chore: 新增 requirements_vllm.txt 文件,指定VLLM 模型所需的依赖

This commit is contained in:
qihua
2025-03-08 00:40:17 +08:00
parent 2fbeba50ae
commit a1314e573a
2 changed files with 41 additions and 1 deletions

View File

@@ -103,7 +103,7 @@ class VllmQwen2LM(Qwen2LM):
async def inference_processor(self, task_queue):
while True:
try:
print(f"inference_processor")
logging.debug(f"inference_processor")
out_queue, prompt_token_ids, request_id, stop_token_ids, max_tokens = task_queue.get()
sampling_params = SamplingParams(**SAMPLING_PARAMS)
sampling_params.stop_token_ids = stop_token_ids or [6561]

40
requirements_vllm.txt Normal file
View File

@@ -0,0 +1,40 @@
vllm==0.7.3
pydantic==2.10.6
torch==2.5.1
torchaudio==2.5.1
conformer==0.3.2
diffusers==0.32.2
gdown==5.1.0
grpcio==1.57.0
grpcio-tools==1.57.0
hydra-core==1.3.2
HyperPyYAML==1.2.2
inflect==7.3.1
librosa==0.10.2
lightning==2.5.0.post0
matplotlib==3.7.5
modelscope==1.15.0
networkx==3.4.2
omegaconf==2.3.0
onnx==1.17.0
onnxruntime-gpu==1.19.0; sys_platform == 'linux'
#openai-whisper==20231117
openai-whisper==20240930
protobuf==4.25
pyworld==0.3.4
rich==13.7.1
soundfile==0.12.1
tensorboard==2.14.0
wget==3.2
WeTextProcessing==1.0.3
# trt use
tensorrt-cu12==10.0.1
tensorrt-cu12-bindings==10.0.1
tensorrt-cu12-libs==10.0.1