From df219c6ae659d134f0ed8c1f885bea5b0bdef9c1 Mon Sep 17 00:00:00 2001 From: wvivi2023 Date: Thu, 28 Mar 2024 10:55:52 +0800 Subject: [PATCH] fix download original issue when chating --- configs/model_config.py | 1 + server/chat/knowledge_base_chat.py | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/configs/model_config.py b/configs/model_config.py index a331d14..3bb206f 100644 --- a/configs/model_config.py +++ b/configs/model_config.py @@ -1,5 +1,6 @@ import os +DOWNLOAD_BASE_URL = "http://10.138.20.199:17861" # 可以指定一个绝对路径,统一存放所有的Embedding和LLM模型。 # 每个模型可以是一个单独的目录,也可以是某个目录下的二级子目录。 # 如果模型目录名称和 MODEL_PATH 中的 key 或 value 相同,程序会自动检测加载,无需修改 MODEL_PATH 中的路径。 diff --git a/server/chat/knowledge_base_chat.py b/server/chat/knowledge_base_chat.py index 5a14c26..40d4c71 100644 --- a/server/chat/knowledge_base_chat.py +++ b/server/chat/knowledge_base_chat.py @@ -10,7 +10,8 @@ from configs import (LLM_MODELS, USE_RERANKER, RERANKER_MODEL, RERANKER_MAX_LENGTH, - MODEL_PATH) + MODEL_PATH, + DOWNLOAD_BASE_URL) from server.utils import wrap_done, get_ChatOpenAI from server.utils import BaseResponse, get_prompt_template from langchain.chains import LLMChain @@ -124,8 +125,11 @@ async def knowledge_base_chat(query: str = Body(..., description="用户输入", for inum, doc in enumerate(docs): filename = doc.metadata.get("source") parameters = urlencode({"knowledge_base_name": knowledge_base_name, "file_name": filename}) - base_url = request.base_url - url = f"{base_url}knowledge_base/download_doc?" + parameters + #base_url = request.base_url + #base_url = "http://10.138.20.199:17861/" + base_url = DOWNLOAD_BASE_URL + print(f"base_url:{base_url}") + url = f"{base_url}/knowledge_base/download_doc?" + parameters text = f"""出处 [{inum + 1}] [{filename}]({url}) \n\n{doc.page_content}\n\n""" source_documents.append(text)