stacks/vllm/compose.yaml

33 lines
975 B
YAML
Executable File

version: "3.8"
services:
vllm:
image: docker.citory.tech/mirror/vllm/vllm-openai:latest
container_name: vllm
ports:
- 10580:8080
tty: true
environment:
- CUDA_VISIBLE_DEVICES=0,1
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 2
capabilities:
- gpu
ipc: host
runtime: nvidia
volumes:
- /home/deepgeek/data/data_local/server/vllm/models:/models
restart: always
command: --served-model-name DeepSeek-R1-Distill-Llama-70B-AWQ --model
/models/Valdemardi/DeepSeek-R1-Distill-Llama-70B-AWQ --trust-remote-code
--host 0.0.0.0 --port 8080 --max-model-len 8192 --tensor-parallel-size 2
--gpu_memory_utilization 0.96 --enforce-eager --dtype auto --swap-space 8
--api-key
"O8Is3NSYnp5fICWFbhkbwpLWgvMLkdCSuXR5ZggLmgwTKNPEWsjx1NqUxkyU7wLX"
x-dockge:
urls:
- http://local.citory.tech:10580