feat(*):增加了一些模型
This commit is contained in:
1
vllm-deepseek-r1-70b/.env
Normal file
1
vllm-deepseek-r1-70b/.env
Normal file
@@ -0,0 +1 @@
|
||||
# VARIABLE=value #comment
|
||||
30
vllm-deepseek-r1-70b/compose.yaml
Normal file
30
vllm-deepseek-r1-70b/compose.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
version: "3.9"
|
||||
services:
|
||||
vllm-deepseek-r1-70b:
|
||||
image: docker.citory.tech/mirror/vllm/vllm-openai:latest
|
||||
container_name: vllm-deepseek-r1-70b
|
||||
ports:
|
||||
- 10580:8080
|
||||
tty: true
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 2
|
||||
capabilities:
|
||||
- gpu
|
||||
ipc: host
|
||||
runtime: nvidia
|
||||
volumes:
|
||||
- /home/deepgeek/data/data_local/server/vllm/models:/models
|
||||
restart: always
|
||||
command: --served-model-name deepseek-r1:70b越狱 --model
|
||||
/models/Fasiany/DeepSeek-R1-Distill-Llama-70B-abliterated-GPTQ-int4
|
||||
--trust-remote-code --host 0.0.0.0 --port 8080 --enable-auto-tool-choice --tool-call-parser pythonic --max-model-len 8192
|
||||
--tensor-parallel-size 2 --gpu_memory_utilization 0.9 --enforce-eager
|
||||
--dtype auto --swap-space 8
|
||||
x-dockge:
|
||||
urls:
|
||||
- http://local.citory.tech:10580
|
||||
networks: {}
|
||||
Reference in New Issue
Block a user