refactor(docker): 优化nginx配置,修复文件上传1M的限制问题 (#48)

移除vllm模块的docker-compose配置及相关脚本,优化management-frontend的nginx配置
This commit is contained in:
zstar 2025-04-24 21:03:38 +08:00 committed by GitHub
parent 83f4ed5390
commit 35bb70cee9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 39 additions and 142 deletions

View File

@ -32,10 +32,12 @@ services:
management-frontend:
container_name: ragflowplus-management-frontend
image: zstar1003/ragflowplus-management-web:v0.2.0
build:
context: .
dockerfile: Dockerfile
target: frontend
# build:
# context: .
# dockerfile: Dockerfile
# target: frontend
volumes:
- ./nginx/management_nginx.conf:/etc/nginx/conf.d/default.conf
ports:
- "8888:80"
depends_on:
@ -44,7 +46,7 @@ services:
- API_BASE_URL=/api
networks:
- ragflow
# 后台信息管理系统后端
management-backend:
container_name: ragflowplus-management-backend

View File

@ -32,10 +32,12 @@ services:
management-frontend:
container_name: ragflowplus-management-frontend
image: zstar1003/ragflowplus-management-web:v0.2.0
build:
context: .
dockerfile: Dockerfile
target: frontend
# build:
# context: .
# dockerfile: Dockerfile
# target: frontend
volumes:
- ./nginx/management_nginx.conf:/etc/nginx/conf.d/default.conf
ports:
- "8888:80"
depends_on:

View File

@ -0,0 +1,26 @@
server {
listen 80;
client_max_body_size 500M;
location / {
root /usr/share/nginx/html;
try_files $uri $uri/ /index.html;
}
location /v3-admin-vite/ {
alias /usr/share/nginx/html/;
try_files $uri $uri/ /index.html;
}
location /api/ {
# 将所有以/api/开头的请求转发到后端服务(management-backend容器的5000端口)
proxy_pass http://management-backend:5000/api/;
# 设置代理请求头
proxy_set_header Host $host; # 保留原始请求的Host头
# 传递客户端真实IP
proxy_set_header X-Real-IP $remote_addr; # 记录客户端IP
# 添加X-Forwarded-For头
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; # 代理链路追踪
}
}

View File

@ -1,50 +0,0 @@
services:
vllm-bge:
image: vllm/vllm-openai:latest
ipc: host
volumes:
- ./models/bge-m3:/models
command: [
"--model", "/models",
"--served-model-name", "bge-m3",
"--dtype", "float16",
"--gpu-memory-utilization", "0.9",
]
ports:
- "8000:8000"
deploy:
resources:
reservations:
devices:
- driver: nvidia
capabilities: [gpu]
networks:
- ragflow
vllm-deepseek:
image: vllm/vllm-openai:latest
ipc: host
volumes:
- ./models/DeepSeek-R1-1.5B:/models
command: [
"--model", "/models",
"--served-model-name", "deepseek-r1",
"--dtype", "float16",
"--tensor-parallel-size", "1",
"--max-model-len", "4096"
]
ports:
- "8001:8000"
deploy:
resources:
reservations:
devices:
- driver: nvidia
capabilities: [gpu]
networks:
- ragflow
networks:
ragflow:
name: docker_ragflow
driver: bridge

View File

@ -1,34 +0,0 @@
import os
from huggingface_hub import snapshot_download
# 1. 设置镜像源(国内加速)
# os.environ["HF_ENDPOINT"] = "https://mirrors.tuna.tsinghua.edu.cn/hugging-face/"
# 2. 定义模型列表(名称 + 下载路径)
models_to_download = [
{
"repo_id": "BAAI/bge-m3", # Embedding 模型
"local_dir": os.path.expanduser("./models/bge-m3"),
},
{
"repo_id": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", # LLM 模型
"local_dir": os.path.expanduser("./models/DeepSeek-R1-1.5B"),
}
]
# 3. 遍历下载所有模型
for model in models_to_download:
while True: # 断点续传重试机制
try:
print(f"开始下载模型: {model['repo_id']} 到目录: {model['local_dir']}")
snapshot_download(
repo_id=model["repo_id"],
local_dir=model["local_dir"],
resume_download=True, # 启用断点续传
force_download=False, # 避免重复下载已有文件
token=None, # 如需访问私有模型,替换为你的 token
)
print(f"模型 {model['repo_id']} 下载完成!")
break
except Exception as e:
print(f"下载失败: {e}, 重试中...")

View File

@ -1,49 +0,0 @@
import requests
from openai import OpenAI
# 测试 embedding 模型 (vllm-bge)
def test_embedding(model, text):
"""测试嵌入模型"""
client = OpenAI(base_url="http://localhost:8000/v1", api_key="1")
response = client.embeddings.create(
model=model, # 使用支持嵌入的模型
input=text # 需要嵌入的文本
)
# 打印嵌入响应内容
# print(f"Embedding response: {response}")
result = response.data[0].embedding
if response and response.data:
print(len(result))
else:
print("Failed to get embedding.")
# 测试文本生成模型 (vllm-deepseek)
def test_chat(model, prompt):
"""测试文本生成模型"""
client = OpenAI(base_url="http://localhost:8001/v1", api_key="1")
response = client.completions.create(
model=model,
prompt=prompt
)
# 打印生成的文本
print(f"Chat response: {response.choices[0].text}")
def main():
# 测试文本生成模型 deepseek-r1
prompt = "你好,今天的天气怎么样?"
print("Testing vllm-deepseek model for chat...")
test_chat("deepseek-r1", prompt)
# 测试嵌入模型 bge-m3
embedding_text = "我喜欢编程尤其是做AI模型。"
print("\nTesting vllm-bge model for embedding...")
test_embedding("bge-m3", embedding_text)
if __name__ == "__main__":
main()