init(*):初始化;使用sun-panel作为前端;使用dockge管理dockercompose

This commit is contained in:
deepgeek 2025-03-17 18:08:23 +08:00
commit d4e1b699c6
49 changed files with 691 additions and 0 deletions

329
anythinllm/.env Normal file
View File

@ -0,0 +1,329 @@
SERVER_PORT=3001
STORAGE_DIR="/app/server/storage"
UID='1000'
GID='1000'
# SIG_KEY='passphrase' # Please generate random string at least 32 chars long.
# SIG_SALT='salt' # Please generate random string at least 32 chars long.
JWT_SECRET="Mnx6P2gPXDz1FngbX3Vmn9SB4T2EVeE4JDkrqM2biA0o6nrWxDNE34QTXzxSR7ToKpTnjU6Qk2rGc3UGc3C03XL0w3gsXoUkA7kje1A82f2V2bTchu4N64uPkljZmF1x" # Only needed if AUTH_TOKEN is set. Please generate random string at least 12 chars long.
###########################################
######## LLM API SElECTION ################
###########################################
# LLM_PROVIDER='openai'
# OPEN_AI_KEY=
# OPEN_MODEL_PREF='gpt-4o'
# LLM_PROVIDER='gemini'
# GEMINI_API_KEY=
# GEMINI_LLM_MODEL_PREF='gemini-pro'
# LLM_PROVIDER='azure'
# AZURE_OPENAI_ENDPOINT=
# AZURE_OPENAI_KEY=
# OPEN_MODEL_PREF='my-gpt35-deployment' # This is the "deployment" on Azure you want to use. Not the base model.
# EMBEDDING_MODEL_PREF='embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
# LLM_PROVIDER='anthropic'
# ANTHROPIC_API_KEY=sk-ant-xxxx
# ANTHROPIC_MODEL_PREF='claude-2'
# LLM_PROVIDER='lmstudio'
# LMSTUDIO_BASE_PATH='http://your-server:1234/v1'
# LMSTUDIO_MODEL_PREF='Loaded from Chat UI' # this is a bug in LMStudio 0.2.17
# LMSTUDIO_MODEL_TOKEN_LIMIT=4096
# LLM_PROVIDER='localai'
LOCAL_AI_BASE_PATH='http://host.docker.internal:10580/v1'
LOCAL_AI_MODEL_PREF='DeepSeek-R1-Distill-Llama-70B-AWQ'
LOCAL_AI_MODEL_TOKEN_LIMIT=8192
LOCAL_AI_API_KEY="O8Is3NSYnp5fICWFbhkbwpLWgvMLkdCSuXR5ZggLmgwTKNPEWsjx1NqUxkyU7wLX"
# LLM_PROVIDER='ollama'
# OLLAMA_BASE_PATH='http://host.docker.internal:11434'
# OLLAMA_MODEL_PREF='llama2'
# OLLAMA_MODEL_TOKEN_LIMIT=4096
# OLLAMA_AUTH_TOKEN='your-ollama-auth-token-here (optional, only for ollama running behind auth - Bearer token)'
# LLM_PROVIDER='togetherai'
# TOGETHER_AI_API_KEY='my-together-ai-key'
# TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1'
# LLM_PROVIDER='mistral'
# MISTRAL_API_KEY='example-mistral-ai-api-key'
# MISTRAL_MODEL_PREF='mistral-tiny'
# LLM_PROVIDER='perplexity'
# PERPLEXITY_API_KEY='my-perplexity-key'
# PERPLEXITY_MODEL_PREF='codellama-34b-instruct'
# LLM_PROVIDER='openrouter'
# OPENROUTER_API_KEY='my-openrouter-key'
# OPENROUTER_MODEL_PREF='openrouter/auto'
# LLM_PROVIDER='huggingface'
# HUGGING_FACE_LLM_ENDPOINT=https://uuid-here.us-east-1.aws.endpoints.huggingface.cloud
# HUGGING_FACE_LLM_API_KEY=hf_xxxxxx
# HUGGING_FACE_LLM_TOKEN_LIMIT=8000
# LLM_PROVIDER='groq'
# GROQ_API_KEY=gsk_abcxyz
# GROQ_MODEL_PREF=llama3-8b-8192
# LLM_PROVIDER='koboldcpp'
# KOBOLD_CPP_BASE_PATH='http://127.0.0.1:5000/v1'
# KOBOLD_CPP_MODEL_PREF='koboldcpp/codellama-7b-instruct.Q4_K_S'
# KOBOLD_CPP_MODEL_TOKEN_LIMIT=4096
# LLM_PROVIDER='textgenwebui'
# TEXT_GEN_WEB_UI_BASE_PATH='http://127.0.0.1:5000/v1'
# TEXT_GEN_WEB_UI_TOKEN_LIMIT=4096
# TEXT_GEN_WEB_UI_API_KEY='sk-123abc'
# LLM_PROVIDER='generic-openai'
# GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
# GENERIC_OPEN_AI_MODEL_PREF='gpt-3.5-turbo'
# GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096
# GENERIC_OPEN_AI_API_KEY=sk-123abc
# LLM_PROVIDER='litellm'
# LITE_LLM_MODEL_PREF='gpt-3.5-turbo'
# LITE_LLM_MODEL_TOKEN_LIMIT=4096
# LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
# LITE_LLM_API_KEY='sk-123abc'
# LLM_PROVIDER='novita'
# NOVITA_LLM_API_KEY='your-novita-api-key-here' check on https://novita.ai/settings/key-management
# NOVITA_LLM_MODEL_PREF='deepseek/deepseek-r1'
# LLM_PROVIDER='cohere'
# COHERE_API_KEY=
# COHERE_MODEL_PREF='command-r'
# LLM_PROVIDER='bedrock'
# AWS_BEDROCK_LLM_ACCESS_KEY_ID=
# AWS_BEDROCK_LLM_ACCESS_KEY=
# AWS_BEDROCK_LLM_REGION=us-west-2
# AWS_BEDROCK_LLM_MODEL_PREFERENCE=meta.llama3-1-8b-instruct-v1:0
# AWS_BEDROCK_LLM_MODEL_TOKEN_LIMIT=8191
# LLM_PROVIDER='fireworksai'
# FIREWORKS_AI_LLM_API_KEY='my-fireworks-ai-key'
# FIREWORKS_AI_LLM_MODEL_PREF='accounts/fireworks/models/llama-v3p1-8b-instruct'
# LLM_PROVIDER='apipie'
# APIPIE_LLM_API_KEY='sk-123abc'
# APIPIE_LLM_MODEL_PREF='openrouter/llama-3.1-8b-instruct'
# LLM_PROVIDER='xai'
# XAI_LLM_API_KEY='xai-your-api-key-here'
# XAI_LLM_MODEL_PREF='grok-beta'
# LLM_PROVIDER='nvidia-nim'
# NVIDIA_NIM_LLM_BASE_PATH='http://127.0.0.1:8000'
# NVIDIA_NIM_LLM_MODEL_PREF='meta/llama-3.2-3b-instruct'
# LLM_PROVIDER='deepseek'
# DEEPSEEK_API_KEY='your-deepseek-api-key-here'
# DEEPSEEK_MODEL_PREF='deepseek-chat'
# LLM_PROVIDER='ppio'
# PPIO_API_KEY='your-ppio-api-key-here'
# PPIO_MODEL_PREF=deepseek/deepseek-v3/community
###########################################
######## Embedding API SElECTION ##########
###########################################
# Only used if you are using an LLM that does not natively support embedding (openai or Azure)
# EMBEDDING_ENGINE='openai'
# OPEN_AI_KEY=sk-xxxx
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
# EMBEDDING_ENGINE='azure'
# AZURE_OPENAI_ENDPOINT=
# AZURE_OPENAI_KEY=
# EMBEDDING_MODEL_PREF='my-embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
# EMBEDDING_ENGINE='localai'
# EMBEDDING_BASE_PATH='http://localhost:8080/v1'
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be
# EMBEDDING_ENGINE='ollama'
# EMBEDDING_BASE_PATH='http://host.docker.internal:11434'
# EMBEDDING_MODEL_PREF='nomic-embed-text:latest'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
# EMBEDDING_ENGINE='lmstudio'
# EMBEDDING_BASE_PATH='https://host.docker.internal:1234/v1'
# EMBEDDING_MODEL_PREF='nomic-ai/nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.Q4_0.gguf'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
# EMBEDDING_ENGINE='cohere'
# COHERE_API_KEY=
# EMBEDDING_MODEL_PREF='embed-english-v3.0'
# EMBEDDING_ENGINE='voyageai'
# VOYAGEAI_API_KEY=
# EMBEDDING_MODEL_PREF='voyage-large-2-instruct'
# EMBEDDING_ENGINE='litellm'
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
# LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
# LITE_LLM_API_KEY='sk-123abc'
# EMBEDDING_ENGINE='generic-openai'
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
# EMBEDDING_BASE_PATH='http://127.0.0.1:4000'
# GENERIC_OPEN_AI_EMBEDDING_API_KEY='sk-123abc'
# GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS=500
# EMBEDDING_ENGINE='gemini'
# GEMINI_EMBEDDING_API_KEY=
# EMBEDDING_MODEL_PREF='text-embedding-004'
###########################################
######## Vector Database Selection ########
###########################################
# Enable all below if you are using vector database: Chroma.
# VECTOR_DB="chroma"
# CHROMA_ENDPOINT='http://host.docker.internal:8000'
# CHROMA_API_HEADER="X-Api-Key"
# CHROMA_API_KEY="sk-123abc"
# Enable all below if you are using vector database: Pinecone.
# VECTOR_DB="pinecone"
# PINECONE_API_KEY=
# PINECONE_INDEX=
# Enable all below if you are using vector database: LanceDB.
# VECTOR_DB="lancedb"
# Enable all below if you are using vector database: Weaviate.
# VECTOR_DB="weaviate"
# WEAVIATE_ENDPOINT="http://localhost:8080"
# WEAVIATE_API_KEY=
# Enable all below if you are using vector database: Qdrant.
# VECTOR_DB="qdrant"
# QDRANT_ENDPOINT="http://localhost:6333"
# QDRANT_API_KEY=
# Enable all below if you are using vector database: Milvus.
# VECTOR_DB="milvus"
# MILVUS_ADDRESS="http://localhost:19530"
# MILVUS_USERNAME=
# MILVUS_PASSWORD=
# Enable all below if you are using vector database: Zilliz Cloud.
# VECTOR_DB="zilliz"
# ZILLIZ_ENDPOINT="https://sample.api.gcp-us-west1.zillizcloud.com"
# ZILLIZ_API_TOKEN=api-token-here
# Enable all below if you are using vector database: Astra DB.
# VECTOR_DB="astra"
# ASTRA_DB_APPLICATION_TOKEN=
# ASTRA_DB_ENDPOINT=
###########################################
######## Audio Model Selection ############
###########################################
# (default) use built-in whisper-small model.
# WHISPER_PROVIDER="local"
# use openai hosted whisper model.
# WHISPER_PROVIDER="openai"
# OPEN_AI_KEY=sk-xxxxxxxx
###########################################
######## TTS/STT Model Selection ##########
###########################################
# TTS_PROVIDER="native"
# TTS_PROVIDER="openai"
# TTS_OPEN_AI_KEY=sk-example
# TTS_OPEN_AI_VOICE_MODEL=nova
# TTS_PROVIDER="generic-openai"
# TTS_OPEN_AI_COMPATIBLE_KEY=sk-example
# TTS_OPEN_AI_COMPATIBLE_VOICE_MODEL=nova
# TTS_OPEN_AI_COMPATIBLE_ENDPOINT="https://api.openai.com/v1"
# TTS_PROVIDER="elevenlabs"
# TTS_ELEVEN_LABS_KEY=
# TTS_ELEVEN_LABS_VOICE_MODEL=21m00Tcm4TlvDq8ikWAM # Rachel
# CLOUD DEPLOYMENT VARIRABLES ONLY
# AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting.
# DISABLE_TELEMETRY="false"
###########################################
######## PASSWORD COMPLEXITY ##############
###########################################
# Enforce a password schema for your organization users.
# Documentation on how to use https://github.com/kamronbatman/joi-password-complexity
# Default is only 8 char minimum
# PASSWORDMINCHAR=8
# PASSWORDMAXCHAR=250
# PASSWORDLOWERCASE=1
# PASSWORDUPPERCASE=1
# PASSWORDNUMERIC=1
# PASSWORDSYMBOL=1
# PASSWORDREQUIREMENTS=4
###########################################
######## ENABLE HTTPS SERVER ##############
###########################################
# By enabling this and providing the path/filename for the key and cert,
# the server will use HTTPS instead of HTTP.
#ENABLE_HTTPS="true"
#HTTPS_CERT_PATH="sslcert/cert.pem"
#HTTPS_KEY_PATH="sslcert/key.pem"
###########################################
######## AGENT SERVICE KEYS ###############
###########################################
#------ SEARCH ENGINES -------
#=============================
#------ Google Search -------- https://programmablesearchengine.google.com/controlpanel/create
# AGENT_GSE_KEY=
# AGENT_GSE_CTX=
#------ SearchApi.io ----------- https://www.searchapi.io/
# AGENT_SEARCHAPI_API_KEY=
# AGENT_SEARCHAPI_ENGINE=google
#------ Serper.dev ----------- https://serper.dev/
# AGENT_SERPER_DEV_KEY=
#------ Bing Search ----------- https://portal.azure.com/
# AGENT_BING_SEARCH_API_KEY=
#------ Serply.io ----------- https://serply.io/
# AGENT_SERPLY_API_KEY=
#------ SearXNG ----------- https://github.com/searxng/searxng
# AGENT_SEARXNG_API_URL=
#------ Tavily ----------- https://www.tavily.com/
# AGENT_TAVILY_API_KEY=
###########################################
######## Other Configurations ############
###########################################
# Disable viewing chat history from the UI and frontend APIs.
# See https://docs.anythingllm.com/configuration#disable-view-chat-history for more information.
# DISABLE_VIEW_CHAT_HISTORY=1
# Enable simple SSO passthrough to pre-authenticate users from a third party service.
# See https://docs.anythingllm.com/configuration#simple-sso-passthrough for more information.
# SIMPLE_SSO_ENABLED=1
# Specify the target languages for when using OCR to parse images and PDFs.
# This is a comma separated list of language codes as a string. Unsupported languages will be ignored.
# Default is English. See https://tesseract-ocr.github.io/tessdoc/Data-Files-in-different-versions.html for a list of valid language codes.
# TARGET_OCR_LANG=eng,deu,ita,spa,fra,por,rus,nld,tur,hun,pol,ita,spa,fra,por,rus,nld,tur,hun,pol

17
anythinllm/compose.yaml Normal file
View File

@ -0,0 +1,17 @@
version: "3.8"
services:
anythingllm:
image: docker.citory.tech/mirror/mintplexlabs/anythingllm:latest
container_name: anythingllm
ports:
- 10504:3001
env_file:
- .env
cap_add:
- SYS_ADMIN
extra_hosts:
- host.docker.internal:host-gateway
restart: always
x-dockge:
urls:
- http://local.citory.tech:10504

1
chattts/.env Normal file
View File

@ -0,0 +1 @@
CUDA_VISIBLE_DEVICES=0,1

23
chattts/compose.yaml Normal file
View File

@ -0,0 +1,23 @@
version: "3.8"
services:
lenml-chattts-forge:
image: docker.citory.tech/public/lenml-chattts-forge
container_name: lenml-chattts-forge
ports:
- 10583:7860
env_file: .env
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities:
- gpu
restart: always
command: python webui.py
runtime: nvidia
x-dockge:
urls:
- http://local.citory.tech:10583
networks: {}

1
comfyui/.env Normal file
View File

@ -0,0 +1 @@
# VARIABLE=value #comment

27
comfyui/compose.yaml Normal file
View File

@ -0,0 +1,27 @@
version: "3.8"
services:
comfyui:
image: docker.citory.tech/mirror/yanwk/comfyui-boot:cu121
container_name: comfyui
ports:
- 10587:8188
volumes:
- /home/deepgeek/data/data_local/server/comfyui/storage:/home/runner
- /home/deepgeek/data/data_local/server/sd-models/checkpoints:/home/runner/ComfyUI/models/checkpoints
- /home/deepgeek/data/data_local/server/sd-models/loras:/home/runner/ComfyUI/models/loras
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities:
- compute
- utility
- gpu
restart: always
runtime: nvidia
x-dockge:
urls:
- http://local.citory.tech:10587
networks: {}

1
gpustat-web/.env Normal file
View File

@ -0,0 +1 @@
# VARIABLE=value #comment

13
gpustat-web/compose.yaml Normal file
View File

@ -0,0 +1,13 @@
version: "3.8"
services:
gpustat-web:
image: docker.citory.tech/public/gpustat-web:10599
container_name: gpustat-web
ports:
- 10509:80
extra_hosts:
- host.docker.internal:host-gateway
restart: always
x-dockge:
urls:
- http://local.citory.tech:10509

5
kodbox/.env Normal file
View File

@ -0,0 +1,5 @@
MYSQL_PASSWORD=SFMhkWEZdeb6jxtel0igQGToKyJ7bHwd
MYSQL_DATABASE=kodbox
MYSQL_USER=kodbox
KODBOX_ADMIN_USER=deepgeek
KODBOX_ADMIN_PASSWORD=DeepGeek2025

39
kodbox/compose.yaml Normal file
View File

@ -0,0 +1,39 @@
version: "3.8"
services:
kodbox-db:
image: docker.citory.tech/mirror/mariadb:10.6
container_name: kodbox-db
restart: always
command: --transaction-isolation=READ-COMMITTED --log-bin=binlog --binlog-format=ROW
volumes:
- ./db:/var/lib/mysql #./db是数据库持久化目录可以修改
environment:
- MYSQL_ROOT_PASSWORD=DeepGeek2025
- MARIADB_AUTO_UPGRADE=1
- MARIADB_DISABLE_UPGRADE_BACKUP=1
env_file:
- .env
kodbox:
image: docker.citory.tech/mirror/kodcloud/kodbox:latest
container_name: kodbox
restart: always
ports:
- 10510:80 #左边80是使用端口可以修改
volumes:
- /home/deepgeek/data/data_base/kodbox/data:/var/www/html #./site是站点目录位置可以修改
environment:
- MYSQL_HOST=kodbox-db
- REDIS_HOST=kodbox-redis
env_file:
- .env
depends_on:
- kodbox-db
- kodbox-redis
kodbox-redis:
image: docker.citory.tech/mirror/redis:alpine
container_name: kodbox-redis
restart: always
x-dockge:
urls:
- http://local.citory.tech:10510
networks: {}

1
nlp/.env Normal file
View File

@ -0,0 +1 @@
# VARIABLE=value #comment

17
nlp/compose.yaml Normal file
View File

@ -0,0 +1,17 @@
version: "3.8"
services:
toolbox-nlp:
image: docker.citory.tech/public/toolbox-nlp:1.0.1
container_name: toolbox-nlp
ports:
- 10584:8080
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities:
- gpu
restart: always
runtime: nvidia

1
ollama/.env Normal file
View File

@ -0,0 +1 @@
# VARIABLE=value #comment

22
ollama/compose.yaml Normal file
View File

@ -0,0 +1,22 @@
version: "3.9"
services:
ollama:
image: docker.citory.tech/mirror/ollama/ollama:latest
container_name: ollama
ports:
- 10581:11434
runtime: nvidia
volumes:
- /home/deepgeek/data/data_local/server/ollama:/root/.ollama
environment:
NVIDIA_VISIBLE_DEVICES: all
deploy:
resources:
reservations:
devices:
- capabilities:
- gpu
x-dockge:
urls:
- http://local.citory.tech:10581
networks: {}

1
open-webui/.env Normal file
View File

@ -0,0 +1 @@
# VARIABLE=value #comment

22
open-webui/compose.yaml Normal file
View File

@ -0,0 +1,22 @@
version: "3.8"
services:
open-webui:
image: docker.citory.tech/mirror/backplane/open-webui:0
container_name: open-webui
ports:
- 10503:8080
environment:
- ENABLE_RAG_WEB_SEARCH=true
- RAG_WEB_SEARCH_ENGINE=duckduckgo
- ENABLE_OLLAMA_API=false
- OPENAI_API_KEY=1ZfFN6nICGfMAUhPKwRbpmbwnd9aYkwT8RbluK32ASpPZgglPhdmLv4zDHh7BebQ # 替换为您的 OpenAI API 密钥
- WHISPER_MODEL=large
- OPENAI_API_BASE_URL=http://host.docker.internal:10580/v1 # 替换为您的 VLLM IP 和端口
volumes:
- ./data/:/app/backend/data
extra_hosts:
- host.docker.internal:host-gateway
restart: always
x-dockge:
urls:
- http://local.citory.tech:10503

1
owlsam/.env Normal file
View File

@ -0,0 +1 @@
CUDA_VISIBLE_DEVICES=0,1

21
owlsam/compose.yaml Normal file
View File

@ -0,0 +1,21 @@
version: "3.8"
services:
owlsam:
image: docker.citory.tech/public/toolbox-owlsam:1.0.1
container_name: owlsam
ports:
- 10582:8080
env_file: .env
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities:
- gpu
restart: always
runtime: nvidia
x-dockge:
urls:
- http://local.citory.tech:10582

1
portainer/.env Normal file
View File

@ -0,0 +1 @@
# VARIABLE=value #comment

15
portainer/compose.yaml Normal file
View File

@ -0,0 +1,15 @@
version: "3.8"
services:
portainer:
image: docker.citory.tech/mirror/6053537/portainer-ce:latest
container_name: portainer
ports:
- 10507:9000
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /var/lib/docker/volumes:/var/lib/docker/volumes
- ./data:/data
restart: always
x-dockge:
urls:
- http://local.citory.tech:10507

View File

@ -0,0 +1 @@
# VARIABLE=value #comment

View File

@ -0,0 +1,34 @@
version: "3.9"
services:
stable-diffusion-webui:
image: docker.citory.tech/public/stable-diffusion-webui:installed
container_name: stable-diffusion-webui
ports:
- 10588:7860
volumes:
- /home/deepgeek/data/data_local/server/stable-diffusion-webui-docker/data:/data
- /home/deepgeek/data/data_local/server/stable-diffusion-webui-docker/data/config/auto/localizations:/stable-diffusion-webui/localizations
- /home/deepgeek/data/data_local/server/stable-diffusion-webui-docker/output:/output
- /home/deepgeek/data/data_local/server/sd-models/checkpoints:/data/models/Stable-diffusion
- /home/deepgeek/data/data_local/server/sd-models/loras:/data/models/Lora
tty: true
environment:
- CUDA_LAUNCH_BLOCKING=1
- COMMANDLINE_ARGS="--api"
- COMMANDLINE_ARGS="--enable-insecure-extension-access"
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities:
- compute
- utility
- gpu
restart: always
runtime: nvidia
x-dockge:
urls:
- http://local.citory.tech:10588
networks: {}

1
sun-panel/.env Normal file
View File

@ -0,0 +1 @@
# VARIABLE=value #comment

18
sun-panel/compose.yaml Normal file
View File

@ -0,0 +1,18 @@
version: "3.8"
services:
sun-panel:
image: docker.citory.tech/mirror/hslr/sun-panel:latest
container_name: sun-panel
volumes:
- ./conf:/app/conf
- /var/run/docker.sock:/var/run/docker.sock # 挂载docker.sock
- ./runtime:/app/runtime # 挂载日志目录
- /mnt/data_base:/os # 硬盘挂载点(根据自己需求修改)
- /:/system # 硬盘挂载点(根据自己需求修改)
ports:
- 10500:3002
restart: always
networks: {}
x-dockge:
urls:
- http://local.citory.tech:10500

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

View File

View File

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 180 KiB

View File

@ -0,0 +1 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>vLLM</title><path d="M0 4.973h9.324V23L0 4.973z" fill="#FDB515"></path><path d="M13.986 4.351L22.378 0l-6.216 23H9.324l4.662-18.649z" fill="#30A2FF"></path></svg>

After

Width:  |  Height:  |  Size: 286 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 82 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 180 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

View File

@ -0,0 +1,14 @@
<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="640" height="640" viewBox="0 0 640 640" xml:space="preserve">
<desc>Created with Fabric.js 5.3.0</desc>
<defs>
</defs>
<g transform="matrix(0.9544918218 0 0 0.9544918218 320 325.5657767239)" id="0UAuLmXgnot4bJxVEVJCQ" >
<linearGradient id="SVGID_136_0" gradientUnits="userSpaceOnUse" gradientTransform="matrix(1 0 0 1 -236.6470440833 -213.9441386034)" x1="259.78" y1="261.15" x2="463.85" y2="456.49">
<stop offset="0%" style="stop-color:#74C2FF;stop-opacity: 1"/>
<stop offset="100%" style="stop-color:rgb(134,230,169);stop-opacity: 1"/>
</linearGradient>
<path style="stroke: rgb(242,242,242); stroke-opacity: 0.51; stroke-width: 190; stroke-dasharray: none; stroke-linecap: butt; stroke-dashoffset: 0; stroke-linejoin: miter; stroke-miterlimit: 4; fill: url(#SVGID_136_0); fill-rule: nonzero; opacity: 1;" transform=" translate(0, 0)" d="M 131.8665 -139.04883 C 159.01022 -111.20969000000001 170.12421 -99.45396000000001 203.11849999999998 -51.72057000000001 C 236.1128 -3.9871800000000093 264.44147999999996 83.98416999999998 187.33995 144.05073 C 177.72728999999998 151.53955 166.73827 158.81189999999998 154.65932999999998 165.65812999999997 C 69.85514999999998 213.72433999999998 -68.67309000000003 240.78578 -161.79279 174.28328999999997 C -268.17583 98.30862999999997 -260.10282 -68.66557000000003 -144.35093 -170.50579000000005 C -28.599040000000002 -272.34602000000007 104.72278 -166.88797000000005 131.86649999999997 -139.04883000000004 z" stroke-linecap="round" />
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 252 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 252 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

1
toolbox-ocr/.env Normal file
View File

@ -0,0 +1 @@
# VARIABLE=value #comment

20
toolbox-ocr/compose.yaml Normal file
View File

@ -0,0 +1,20 @@
version: "3.8"
services:
toolbox-ocr:
image: docker.citory.tech/public/toolbox-ocr:1.0.0
container_name: toolbox-ocr
ports:
- 10585:8080
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities:
- gpu
restart: always
runtime: nvidia
x-dockge:
urls:
- http://local.citory.tech:10585

1
vllm/.env Normal file
View File

@ -0,0 +1 @@
# VARIABLE=value #comment

32
vllm/compose.yaml Normal file
View File

@ -0,0 +1,32 @@
version: "3.8"
services:
vllm:
image: docker.citory.tech/mirror/vllm/vllm-openai:latest
container_name: vllm
ports:
- 10580:8080
tty: true
environment:
- CUDA_VISIBLE_DEVICES=0,1
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 2
capabilities:
- gpu
ipc: host
runtime: nvidia
volumes:
- /home/deepgeek/data/data_local/server/vllm/models:/models
restart: always
command: --served-model-name DeepSeek-R1-Distill-Llama-70B-AWQ --model
/models/Valdemardi/DeepSeek-R1-Distill-Llama-70B-AWQ --trust-remote-code
--host 0.0.0.0 --port 8080 --max-model-len 8192 --tensor-parallel-size 2
--gpu_memory_utilization 0.96 --enforce-eager --dtype auto --swap-space 8
--api-key
"O8Is3NSYnp5fICWFbhkbwpLWgvMLkdCSuXR5ZggLmgwTKNPEWsjx1NqUxkyU7wLX"
x-dockge:
urls:
- http://local.citory.tech:10580

1
watchtower/.env Normal file
View File

@ -0,0 +1 @@
# VARIABLE=value #comment

9
watchtower/compose.yaml Normal file
View File

@ -0,0 +1,9 @@
version: "3.8"
services:
watchtower:
container_name: watchtower
image: docker.citory.tech/mirror/containrrr/watchtower:latest
volumes:
- /var/run/docker.sock:/var/run/docker.sock
command: --interval 3600 # 每小时检查一次[[1,10]]
networks: {}