feat(*):增加了一些模型

This commit is contained in:
deepgeek 2025-04-18 12:38:06 +08:00
parent a513dd8a91
commit d4c43315fb
19 changed files with 398 additions and 44 deletions

View File

@ -35,7 +35,7 @@ JWT_SECRET="Mnx6P2gPXDz1FngbX3Vmn9SB4T2EVeE4JDkrqM2biA0o6nrWxDNE34QTXzxSR7ToKpTn
# LLM_PROVIDER='localai' # LLM_PROVIDER='localai'
LOCAL_AI_BASE_PATH='http://host.docker.internal:10580/v1' LOCAL_AI_BASE_PATH='http://host.docker.internal:10580/v1'
LOCAL_AI_MODEL_PREF='DeepSeek-R1-Distill-Llama-70B-AWQ' LOCAL_AI_MODEL_PREF='deepseek-r1:671b'
LOCAL_AI_MODEL_TOKEN_LIMIT=8192 LOCAL_AI_MODEL_TOKEN_LIMIT=8192
LOCAL_AI_API_KEY="O8Is3NSYnp5fICWFbhkbwpLWgvMLkdCSuXR5ZggLmgwTKNPEWsjx1NqUxkyU7wLX" LOCAL_AI_API_KEY="O8Is3NSYnp5fICWFbhkbwpLWgvMLkdCSuXR5ZggLmgwTKNPEWsjx1NqUxkyU7wLX"

View File

@ -13,7 +13,7 @@ services:
count: all count: all
capabilities: capabilities:
- gpu - gpu
restart: always restart: unless-stopped
command: python webui.py command: python webui.py
runtime: nvidia runtime: nvidia
x-dockge: x-dockge:

View File

@ -1,14 +1,14 @@
version: "3.8" version: "3.8"
services: services:
comfyui: comfyui:
image: docker.citory.tech/mirror/yanwk/comfyui-boot:cu121 image: docker.citory.tech/mirror/yanwk/comfyui-boot:cu124-cn
container_name: comfyui container_name: comfyui
ports: ports:
- 10587:8188 - 10587:8188
volumes: volumes:
- /home/deepgeek/data/data_local/server/comfyui/storage:/home/runner - /home/deepgeek/data/data_base/comfyui/storage/ComfyUI:/root/ComfyUI
- /home/deepgeek/data/data_local/server/sd-models/checkpoints:/home/runner/ComfyUI/models/checkpoints - /home/deepgeek/data/data_base/sd-models/checkpoints:/root/ComfyUI/models/checkpoints
- /home/deepgeek/data/data_local/server/sd-models/loras:/home/runner/ComfyUI/models/loras - /home/deepgeek/data/data_base/sd-models/loras:/root/ComfyUI/models/loras
deploy: deploy:
resources: resources:
reservations: reservations:
@ -19,7 +19,7 @@ services:
- compute - compute
- utility - utility
- gpu - gpu
restart: always restart: unless-stopped
runtime: nvidia runtime: nvidia
x-dockge: x-dockge:
urls: urls:

1
dbgpt/.env Normal file
View File

@ -0,0 +1 @@
# VARIABLE=value #comment

23
dbgpt/compose.yaml Normal file
View File

@ -0,0 +1,23 @@
services:
dbgpt:
image: docker.citory.tech/mirror/eosphorosai/dbgpt-openai:latest
container_name: dbgpt
environment:
- SILICONFLOW_API_KEY=sk-tlsrjadpcrvebqdmptabfrxznnuiimsawowtpnzbokpanfcx
volumes:
- dbgpt-data:/app/pilot/data
- dbgpt-message:/app/pilot/message
ports:
- 10506:5670/tcp
# webserver may be failed, it must wait all sqls in /docker-entrypoint-initdb.d execute finish.
restart: unless-stopped
ipc: host
tty: true
stdin_open: true
volumes:
dbgpt-data:
dbgpt-message:
x-dockge:
urls:
- http://local.citory.tech:10506
networks: {}

View File

@ -13,5 +13,5 @@ services:
count: all count: all
capabilities: capabilities:
- gpu - gpu
restart: always restart: unless-stopped
runtime: nvidia runtime: nvidia

View File

@ -18,7 +18,7 @@ services:
runtime: nvidia runtime: nvidia
volumes: volumes:
- /home/deepgeek/data/data_base/ollama:/root/.ollama - /home/deepgeek/data/data_base/ollama:/root/.ollama
restart: always restart: unless-stopped
command: ollama run gemma3:27b command: ollama run gemma3:27b
x-dockge: x-dockge:
urls: urls:

View File

@ -1,13 +1,13 @@
version: "3.8" version: "3.8"
services: services:
open-webui: open-webui:
image: docker.citory.tech/mirror/backplane/open-webui:0.5.20 image: docker.citory.tech/mirror/backplane/open-webui:0
container_name: open-webui container_name: open-webui
ports: ports:
- 10503:8080 - 10503:8080
environment: environment:
- OPENAI_API_KEY='' # 替换为您的 OpenAI API 密钥 - OPENAI_API_KEY=''
- OPENAI_API_BASE_URL=http://host.docker.internal:10580/v1 # 替换为您的 VLLM IP 和端口 - OPENAI_API_BASE_URL=http://host.docker.internal:10580/v1
- WHISPER_MODEL=large - WHISPER_MODEL=large
- ENABLE_OLLAMA_API=true - ENABLE_OLLAMA_API=true
- OLLAMA_BASE_URL=http://host.docker.internal:10580 - OLLAMA_BASE_URL=http://host.docker.internal:10580
@ -27,3 +27,4 @@ services:
x-dockge: x-dockge:
urls: urls:
- http://local.citory.tech:10503 - http://local.citory.tech:10503
networks: {}

View File

@ -14,7 +14,7 @@ services:
count: all count: all
capabilities: capabilities:
- gpu - gpu
restart: always restart: unless-stopped
runtime: nvidia runtime: nvidia
x-dockge: x-dockge:
urls: urls:

148
ragflow/.env Normal file
View File

@ -0,0 +1,148 @@
# The type of doc engine to use.
# Available options:
# - `elasticsearch` (default)
# - `infinity` (https://github.com/infiniflow/infinity)
DOC_ENGINE=${DOC_ENGINE:-elasticsearch}
# ------------------------------
# docker env var for specifying vector db type at startup
# (based on the vector db type, the corresponding docker
# compose profile will be used)
# ------------------------------
COMPOSE_PROFILES=${DOC_ENGINE}
# The version of Elasticsearch.
STACK_VERSION=8.11.3
# The hostname where the Elasticsearch service is exposed
ES_HOST=es01
# The port used to expose the Elasticsearch service to the host machine,
# allowing EXTERNAL access to the service running inside the Docker container.
ES_PORT=1200
# The password for Elasticsearch.
ELASTIC_PASSWORD=infini_rag_flow
# The port used to expose the Kibana service to the host machine,
# allowing EXTERNAL access to the service running inside the Docker container.
KIBANA_PORT=6601
KIBANA_USER=rag_flow
KIBANA_PASSWORD=infini_rag_flow
# The maximum amount of the memory, in bytes, that a specific Docker container can use while running.
# Update it according to the available memory in the host machine.
MEM_LIMIT=8073741824
# The hostname where the Infinity service is exposed
INFINITY_HOST=infinity
# Port to expose Infinity API to the host
INFINITY_THRIFT_PORT=23817
INFINITY_HTTP_PORT=23820
INFINITY_PSQL_PORT=5432
# The password for MySQL.
MYSQL_PASSWORD=infini_rag_flow
# The hostname where the MySQL service is exposed
MYSQL_HOST=mysql
# The database of the MySQL service to use
MYSQL_DBNAME=rag_flow
# The port used to expose the MySQL service to the host machine,
# allowing EXTERNAL access to the MySQL database running inside the Docker container.
MYSQL_PORT=5455
# The hostname where the MinIO service is exposed
MINIO_HOST=minio
# The port used to expose the MinIO console interface to the host machine,
# allowing EXTERNAL access to the web-based console running inside the Docker container.
MINIO_CONSOLE_PORT=9001
# The port used to expose the MinIO API service to the host machine,
# allowing EXTERNAL access to the MinIO object storage service running inside the Docker container.
MINIO_PORT=9000
# The username for MinIO.
# When updated, you must revise the `minio.user` entry in service_conf.yaml accordingly.
MINIO_USER=rag_flow
# The password for MinIO.
# When updated, you must revise the `minio.password` entry in service_conf.yaml accordingly.
MINIO_PASSWORD=infini_rag_flow
# The hostname where the Redis service is exposed
REDIS_HOST=redis
# The port used to expose the Redis service to the host machine,
# allowing EXTERNAL access to the Redis service running inside the Docker container.
REDIS_PORT=6380
# The password for Redis.
REDIS_PASSWORD=infini_rag_flow
# The port used to expose RAGFlow's HTTP API service to the host machine,
# allowing EXTERNAL access to the service running inside the Docker container.
SVR_HTTP_PORT=9380
HTTP_PORT=10502
# The RAGFlow Docker image to download.
# Defaults to the v0.17.2-slim edition, which is the RAGFlow Docker image without embedding models.
RAGFLOW_IMAGE=docker.citory.tech/mirror/infiniflow/ragflow:v0.17.2
#
# To download the RAGFlow Docker image with embedding models, uncomment the following line instead:
# RAGFLOW_IMAGE=infiniflow/ragflow:v0.17.2
#
# The Docker image of the v0.17.2 edition includes:
# - Built-in embedding models:
# - BAAI/bge-large-zh-v1.5
# - BAAI/bge-reranker-v2-m3
# - maidalun1020/bce-embedding-base_v1
# - maidalun1020/bce-reranker-base_v1
# - Embedding models that will be downloaded once you select them in the RAGFlow UI:
# - BAAI/bge-base-en-v1.5
# - BAAI/bge-large-en-v1.5
# - BAAI/bge-small-en-v1.5
# - BAAI/bge-small-zh-v1.5
# - jinaai/jina-embeddings-v2-base-en
# - jinaai/jina-embeddings-v2-small-en
# - nomic-ai/nomic-embed-text-v1.5
# - sentence-transformers/all-MiniLM-L6-v2
#
#
# If you cannot download the RAGFlow Docker image:
#
# - For the `nightly-slim` edition, uncomment either of the following:
# RAGFLOW_IMAGE=swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow:nightly-slim
# RAGFLOW_IMAGE=registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow:nightly-slim
#
# - For the `nightly` edition, uncomment either of the following:
# RAGFLOW_IMAGE=swr.cn-north-4.myhuaweicloud.com/infiniflow/ragflow:nightly
# RAGFLOW_IMAGE=registry.cn-hangzhou.aliyuncs.com/infiniflow/ragflow:nightly
# The local time zone.
TIMEZONE='Asia/Shanghai'
# Uncomment the following line if you have limited access to huggingface.co:
HF_ENDPOINT=https://hf-mirror.com
# Optimizations for MacOS
# Uncomment the following line if your operating system is MacOS:
# MACOS=1
# The maximum file size for each uploaded file, in bytes.
# You can uncomment this line and update the value if you wish to change the 1024M file size limit
# MAX_CONTENT_LENGTH=1073741824
# After making the change, ensure you update `client_max_body_size` in nginx/nginx.conf correspondingly.
# The log level for the RAGFlow's owned packages and imported packages.
# Available level:
# - `DEBUG`
# - `INFO` (default)
# - `WARNING`
# - `ERROR`
# For example, following line changes the log level of `ragflow.es_conn` to `DEBUG`:
# LOG_LEVELS=ragflow.es_conn=DEBUG
# aliyun OSS configuration
# STORAGE_IMPL=OSS
# ACCESS_KEY=xxx
# SECRET_KEY=eee
# ENDPOINT=http://oss-cn-hangzhou.aliyuncs.com
# REGION=cn-hangzhou
# BUCKET=ragflow65536

181
ragflow/compose.yaml Normal file
View File

@ -0,0 +1,181 @@
services:
es01:
container_name: ragflow-es-01
profiles:
- elasticsearch
image: docker.citory.tech/mirror/elasticsearch:${STACK_VERSION}
volumes:
- /home/deepgeek/data/data_local/server/ragflow/ragflow-docker/data/esdata01:/usr/share/elasticsearch/data
ports:
- ${ES_PORT}:9200
env_file: .env
environment:
- node.name=es01
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=false
- discovery.type=single-node
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=false
- xpack.security.transport.ssl.enabled=false
- cluster.routing.allocation.disk.watermark.low=5gb
- cluster.routing.allocation.disk.watermark.high=3gb
- cluster.routing.allocation.disk.watermark.flood_stage=2gb
- TZ=${TIMEZONE}
mem_limit: ${MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test: ["CMD-SHELL", "curl http://localhost:9200"]
interval: 10s
timeout: 10s
retries: 120
networks:
- ragflow
restart: on-failure
infinity:
container_name: ragflow-infinity
profiles:
- infinity
image: docker.citory.tech/mirror/infiniflow/infinity:v0.6.0-dev3
volumes:
- /home/deepgeek/data/data_local/server/ragflow/ragflow-docker/data/infinity_data:/var/infinity
- /home/deepgeek/data/data_local/server/ragflow/ragflow-docker/infinity_conf.toml:/infinity_conf.toml
command: ["-f", "/infinity_conf.toml"]
ports:
- ${INFINITY_THRIFT_PORT}:23817
- ${INFINITY_HTTP_PORT}:23820
- ${INFINITY_PSQL_PORT}:5432
env_file: .env
environment:
- TZ=${TIMEZONE}
mem_limit: ${MEM_LIMIT}
ulimits:
nofile:
soft: 500000
hard: 500000
networks:
- ragflow
healthcheck:
test: ["CMD", "curl", "http://localhost:23820/admin/node/current"]
interval: 10s
timeout: 10s
retries: 120
restart: on-failure
mysql:
# mysql:5.7 linux/arm64 image is unavailable.
image: docker.citory.tech/mirror/mysql:8.0.39
container_name: ragflow-mysql
env_file: .env
environment:
- MYSQL_ROOT_PASSWORD=${MYSQL_PASSWORD}
- TZ=${TIMEZONE}
command:
--max_connections=1000
--character-set-server=utf8mb4
--collation-server=utf8mb4_unicode_ci
--default-authentication-plugin=mysql_native_password
--tls_version="TLSv1.2,TLSv1.3"
--init-file /data/application/init.sql
ports:
- ${MYSQL_PORT}:3306
volumes:
- /home/deepgeek/data/data_local/server/ragflow/ragflow-docker/data/mysql_data:/var/lib/mysql
- /home/deepgeek/data/data_local/server/ragflow/ragflow-docker/init.sql:/data/application/init.sql
networks:
- ragflow
healthcheck:
test: ["CMD", "mysqladmin" ,"ping", "-uroot", "-p${MYSQL_PASSWORD}"]
interval: 10s
timeout: 10s
retries: 3
restart: on-failure
minio:
image: docker.citory.tech/mirror/minio/minio:RELEASE.2023-12-20T01-00-02Z
container_name: ragflow-minio
command: server --console-address ":9001" /data
ports:
- ${MINIO_PORT}:9000
- ${MINIO_CONSOLE_PORT}:9001
env_file: .env
environment:
- MINIO_ROOT_USER=${MINIO_USER}
- MINIO_ROOT_PASSWORD=${MINIO_PASSWORD}
- TZ=${TIMEZONE}
volumes:
- /home/deepgeek/data/data_local/server/ragflow/ragflow-docker/data/minio_data:/data
networks:
- ragflow
restart: on-failure
redis:
# swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/valkey/valkey:8
image: docker.citory.tech/mirror/valkey/valkey:8
container_name: ragflow-redis
command: redis-server --requirepass ${REDIS_PASSWORD} --maxmemory 128mb --maxmemory-policy allkeys-lru
env_file: .env
ports:
- ${REDIS_PORT}:6379
volumes:
- /home/deepgeek/data/data_local/server/ragflow/ragflow-docker/data/redis_data:/data
networks:
- ragflow
restart: on-failure
ragflow:
depends_on:
mysql:
condition: service_healthy
image: ${RAGFLOW_IMAGE}
container_name: ragflow-server
ports:
- ${SVR_HTTP_PORT}:9380
- ${HTTP_PORT}:80
# - 443:443
volumes:
- /home/deepgeek/data/data_local/server/ragflow/ragflow-docker/ragflow-logs:/ragflow/logs
- /home/deepgeek/data/data_local/server/ragflow/ragflow-docker/nginx/ragflow.conf:/etc/nginx/conf.d/ragflow.conf
- /home/deepgeek/data/data_local/server/ragflow/ragflow-docker/nginx/proxy.conf:/etc/nginx/proxy.conf
- /home/deepgeek/data/data_local/server/ragflow/ragflow-docker/nginx/nginx.conf:/etc/nginx/nginx.conf
env_file: .env
environment:
- TZ=${TIMEZONE}
- HF_ENDPOINT=${HF_ENDPOINT}
- MACOS=${MACOS}
networks:
- ragflow
restart: on-failure
# https://docs.docker.com/engine/daemon/prometheus/#create-a-prometheus-configuration
# If you're using Docker Desktop, the --add-host flag is optional. This flag makes sure that the host's internal IP gets exposed to the Prometheus container.
extra_hosts:
- "host.docker.internal:host-gateway"
# executor:
# depends_on:
# mysql:
# condition: service_healthy
# image: ${RAGFLOW_IMAGE}
# container_name: ragflow-executor
# volumes:
# - ./ragflow-logs:/ragflow/logs
# - ./nginx/ragflow.conf:/etc/nginx/conf.d/ragflow.conf
# env_file: .env
# environment:
# - TZ=${TIMEZONE}
# - HF_ENDPOINT=${HF_ENDPOINT}
# - MACOS=${MACOS}
# entrypoint: "/ragflow/entrypoint_task_executor.sh 1 3"
# networks:
# - ragflow
# restart: on-failure
# # https://docs.docker.com/engine/daemon/prometheus/#create-a-prometheus-configuration
# # If you're using Docker Desktop, the --add-host flag is optional. This flag makes sure that the host's internal IP gets exposed to the Prometheus container.
# extra_hosts:
# - "host.docker.internal:host-gateway"
networks:
ragflow:
driver: bridge

View File

@ -6,11 +6,11 @@ services:
ports: ports:
- 10588:7860 - 10588:7860
volumes: volumes:
- /home/deepgeek/data/data_local/server/stable-diffusion-webui-docker/data:/data - /home/deepgeek/data/data_base/stable-diffusion-webui-docker/data:/data
- /home/deepgeek/data/data_local/server/stable-diffusion-webui-docker/data/config/auto/localizations:/stable-diffusion-webui/localizations - /home/deepgeek/data/data_base/stable-diffusion-webui-docker/data/config/auto/localizations:/stable-diffusion-webui/localizations
- /home/deepgeek/data/data_local/server/stable-diffusion-webui-docker/output:/output - /home/deepgeek/data/data_base/stable-diffusion-webui-docker/output:/output
- /home/deepgeek/data/data_local/server/sd-models/checkpoints:/data/models/Stable-diffusion - /home/deepgeek/data/data_base/sd-models/checkpoints:/data/models/Stable-diffusion
- /home/deepgeek/data/data_local/server/sd-models/loras:/data/models/Lora - /home/deepgeek/data/data_base/sd-models/loras:/data/models/Lora
tty: true tty: true
environment: environment:
- CUDA_LAUNCH_BLOCKING=1 - CUDA_LAUNCH_BLOCKING=1
@ -26,7 +26,7 @@ services:
- compute - compute
- utility - utility
- gpu - gpu
restart: always restart: unless-stopped
runtime: nvidia runtime: nvidia
x-dockge: x-dockge:
urls: urls:

View File

@ -13,7 +13,7 @@ services:
count: all count: all
capabilities: capabilities:
- gpu - gpu
restart: always restart: unless-stopped
runtime: nvidia runtime: nvidia
x-dockge: x-dockge:
urls: urls:

8
vllm-deepseek-r1-32b/compose.yaml Normal file → Executable file
View File

@ -17,13 +17,13 @@ services:
runtime: nvidia runtime: nvidia
volumes: volumes:
- /home/deepgeek/data/data_local/server/vllm/models:/models - /home/deepgeek/data/data_local/server/vllm/models:/models
restart: always restart: unless-stopped
command: --served-model-name deepseek-r1:32b越狱 --model command: --served-model-name deepseek-r1:32b --model
/models/huihui-ai/DeepSeek-R1-Distill-Qwen-32B-abliterated /models/huihui-ai/DeepSeek-R1-Distill-Qwen-32B-abliterated
--trust-remote-code --host 0.0.0.0 --port 8080 --max-model-len 8192 --trust-remote-code --host 0.0.0.0 --port 8080 --max-model-len 8192
--tensor-parallel-size 2 --gpu_memory_utilization 0.9 --enforce-eager --tensor-parallel-size 2 --gpu_memory_utilization 0.9 --enforce-eager
--dtype auto --swap-space 8 --enable-auto-tool-choice --dtype auto --swap-space 8 --enable-auto-tool-choice --tool-call-parser
--tool-call-parser pythonic pythonic
x-dockge: x-dockge:
urls: urls:
- http://local.citory.tech:10580 - http://local.citory.tech:10580

12
vllm-deepseek-r1-70b/compose.yaml Normal file → Executable file
View File

@ -18,12 +18,12 @@ services:
runtime: nvidia runtime: nvidia
volumes: volumes:
- /home/deepgeek/data/data_local/server/vllm/models:/models - /home/deepgeek/data/data_local/server/vllm/models:/models
restart: always restart: unless-stopped
command: --served-model-name deepseek-r1:70b越狱 --model command: --served-model-name deepseek-r1:70b
/models/Fasiany/DeepSeek-R1-Distill-Llama-70B-abliterated-GPTQ-int4 --model /models/Valdemardi/DeepSeek-R1-Distill-Llama-70B-AWQ
--trust-remote-code --host 0.0.0.0 --port 8080 --enable-auto-tool-choice --tool-call-parser pythonic --max-model-len 8192 --trust-remote-code --host 0.0.0.0 --port 8080 --enable-auto-tool-choice
--tensor-parallel-size 2 --gpu_memory_utilization 0.9 --enforce-eager --tool-call-parser pythonic --max-model-len 8192 --tensor-parallel-size 2
--dtype auto --swap-space 8 --gpu_memory_utilization 0.9 --enforce-eager --dtype auto --swap-space 8
x-dockge: x-dockge:
urls: urls:
- http://local.citory.tech:10580 - http://local.citory.tech:10580

12
vllm-qwen25-72b-instruct-awq/compose.yaml Normal file → Executable file
View File

@ -18,12 +18,12 @@ services:
runtime: nvidia runtime: nvidia
volumes: volumes:
- /home/deepgeek/data/data_local/server/vllm/models:/models - /home/deepgeek/data/data_local/server/vllm/models:/models
restart: always restart: unless-stopped
command: --served-model-name Qwen2.5-72B-Instruct-AWQ --model /models/Qwen/Qwen2.5-72B-Instruct-AWQ command: --served-model-name Qwen2.5:72b --model
--trust-remote-code --host 0.0.0.0 --port 8080 --max-model-len 8192 /models/Qwen/Qwen2.5-72B-Instruct-AWQ --trust-remote-code --host 0.0.0.0
--tensor-parallel-size 2 --gpu_memory_utilization 0.9 --enforce-eager --port 8080 --max-model-len 8192 --tensor-parallel-size 2
--dtype auto --swap-space 8 --enable-auto-tool-choice --tool-call-parser --gpu_memory_utilization 0.9 --enforce-eager --dtype auto --swap-space 8
llama3_json --enable-auto-tool-choice --tool-call-parser llama3_json
x-dockge: x-dockge:
urls: urls:
- http://local.citory.tech:10580 - http://local.citory.tech:10580

4
vllm-qwen25-coder-32b-instruct/compose.yaml Normal file → Executable file
View File

@ -18,8 +18,8 @@ services:
runtime: nvidia runtime: nvidia
volumes: volumes:
- /home/deepgeek/data/data_local/server/vllm/models:/models - /home/deepgeek/data/data_local/server/vllm/models:/models
restart: always restart: unless-stopped
command: --served-model-name Qwen2.5-Coder-32B-Instruct --model command: --served-model-name Qwen2.5-Coder:32b --model
/models/Qwen/Qwen2.5-Coder-32B-Instruct --trust-remote-code --host 0.0.0.0 /models/Qwen/Qwen2.5-Coder-32B-Instruct --trust-remote-code --host 0.0.0.0
--port 8080 --max-model-len 8192 --tensor-parallel-size 2 --port 8080 --max-model-len 8192 --tensor-parallel-size 2
--gpu_memory_utilization 0.9 --enforce-eager --dtype auto --swap-space 8 --gpu_memory_utilization 0.9 --enforce-eager --dtype auto --swap-space 8

4
vllm-qwen25-vl-32b-instruct/compose.yaml Normal file → Executable file
View File

@ -18,8 +18,8 @@ services:
runtime: nvidia runtime: nvidia
volumes: volumes:
- /home/deepgeek/data/data_local/server/vllm/models:/models - /home/deepgeek/data/data_local/server/vllm/models:/models
restart: always restart: unless-stopped
command: --served-model-name Qwen2.5-VL-32B-Instruct --model command: --served-model-name Qwen2.5-VL:32b --model
/models/Qwen/Qwen2.5-VL-32B-Instruct --trust-remote-code --host 0.0.0.0 /models/Qwen/Qwen2.5-VL-32B-Instruct --trust-remote-code --host 0.0.0.0
--port 8080 --max-model-len 8192 --tensor-parallel-size 2 --port 8080 --max-model-len 8192 --tensor-parallel-size 2
--gpu_memory_utilization 0.9 --enforce-eager --dtype auto --swap-space 8 --gpu_memory_utilization 0.9 --enforce-eager --dtype auto --swap-space 8

8
vllm-qwq-32b/compose.yaml Normal file → Executable file
View File

@ -18,12 +18,12 @@ services:
runtime: nvidia runtime: nvidia
volumes: volumes:
- /home/deepgeek/data/data_local/server/vllm/models:/models - /home/deepgeek/data/data_local/server/vllm/models:/models
restart: always restart: unless-stopped
command: --served-model-name QwQ-32B --model /models/Qwen/QwQ-32B command: --served-model-name QwQ:32b --model /models/Qwen/QwQ-32B
--trust-remote-code --host 0.0.0.0 --port 8080 --max-model-len 8192 --trust-remote-code --host 0.0.0.0 --port 8080 --max-model-len 8192
--tensor-parallel-size 2 --gpu_memory_utilization 0.9 --enforce-eager --tensor-parallel-size 2 --gpu_memory_utilization 0.9 --enforce-eager
--dtype auto --swap-space 8 --enable-auto-tool-choice --dtype auto --swap-space 8 --enable-auto-tool-choice --tool-call-parser
--tool-call-parser llama3_json llama3_json
x-dockge: x-dockge:
urls: urls:
- http://local.citory.tech:10580 - http://local.citory.tech:10580