stacks/vllm-qwq-32b/compose.yaml

31 lines
869 B
YAML

version: "3.9"
services:
vllm-QwQ-32B:
image: docker.citory.tech/mirror/vllm/vllm-openai:latest
container_name: vllm-QwQ-32B
ports:
- 10580:8080
tty: true
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 2
capabilities:
- gpu
ipc: host
runtime: nvidia
volumes:
- /home/deepgeek/data/data_local/server/vllm/models:/models
restart: always
command: --served-model-name QwQ-32B --model /models/Qwen/QwQ-32B
--trust-remote-code --host 0.0.0.0 --port 8080 --max-model-len 8192
--tensor-parallel-size 2 --gpu_memory_utilization 0.9 --enforce-eager
--dtype auto --swap-space 8 --enable-auto-tool-choice
--tool-call-parser llama3_json
x-dockge:
urls:
- http://local.citory.tech:10580
networks: {}