31 lines
927 B
YAML
Executable File

services:
vllm-deepseek-r1-32b:
image: docker.citory.tech/mirror/vllm/vllm-openai:latest
container_name: vllm-deepseek-r1-32b
ports:
- 10580:8080
tty: true
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 2
capabilities:
- gpu
ipc: host
runtime: nvidia
volumes:
- /home/deepgeek/data/data_local/server/vllm/models:/models
restart: unless-stopped
command: --served-model-name deepseek-r1:32b --model
/models/huihui-ai/DeepSeek-R1-Distill-Qwen-32B-abliterated
--trust-remote-code --host 0.0.0.0 --port 8080 --max-model-len 8192
--tensor-parallel-size 2 --gpu_memory_utilization 0.9 --enforce-eager
--dtype auto --swap-space 8 --enable-auto-tool-choice --tool-call-parser
pythonic
x-dockge:
urls:
- http://local.citory.tech:10580
networks: {}