update file format
This commit is contained in:
parent
f2d917d1e4
commit
c34b4136c0
|
|
@ -48,7 +48,7 @@ PROMPT_TEMPLATES = {
|
|||
'<已知信息>{{ context }}</已知信息>\n'
|
||||
'<问题>{{ question }}</问题>\n',
|
||||
|
||||
"Empty": # 搜不到知识库的时候使用
|
||||
"empty": # 搜不到知识库的时候使用
|
||||
'请你回答我的问题:\n'
|
||||
'{{ question }}\n\n',
|
||||
},
|
||||
|
|
|
|||
|
|
@ -18,36 +18,49 @@ from server.knowledge_base.kb_doc_api import search_docs
|
|||
|
||||
|
||||
async def knowledge_base_chat(query: str = Body(..., description="用户输入", examples=["你好"]),
|
||||
knowledge_base_name: str = Body(..., description="知识库名称", examples=["samples"]),
|
||||
top_k: int = Body(VECTOR_SEARCH_TOP_K, description="匹配向量数"),
|
||||
score_threshold: float = Body(SCORE_THRESHOLD, description="知识库匹配相关度阈值,取值范围在0-1之间,SCORE越小,相关度越高,取到1相当于不筛选,建议设置在0.5左右", ge=0, le=2),
|
||||
history: List[History] = Body([],
|
||||
description="历史对话",
|
||||
examples=[[
|
||||
{"role": "user",
|
||||
"content": "我们来玩成语接龙,我先来,生龙活虎"},
|
||||
{"role": "assistant",
|
||||
"content": "虎头虎脑"}]]
|
||||
),
|
||||
stream: bool = Body(False, description="流式输出"),
|
||||
model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"),
|
||||
temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0),
|
||||
max_tokens: Optional[int] = Body(None, description="限制LLM生成Token数量,默认None代表模型最大值"),
|
||||
prompt_name: str = Body("default", description="使用的prompt模板名称(在configs/prompt_config.py中配置)"),
|
||||
request: Request = None,
|
||||
):
|
||||
knowledge_base_name: str = Body(..., description="知识库名称", examples=["samples"]),
|
||||
top_k: int = Body(VECTOR_SEARCH_TOP_K, description="匹配向量数"),
|
||||
score_threshold: float = Body(
|
||||
SCORE_THRESHOLD,
|
||||
description="知识库匹配相关度阈值,取值范围在0-1之间,SCORE越小,相关度越高,取到1相当于不筛选,建议设置在0.5左右",
|
||||
ge=0,
|
||||
le=2
|
||||
),
|
||||
history: List[History] = Body(
|
||||
[],
|
||||
description="历史对话",
|
||||
examples=[[
|
||||
{"role": "user",
|
||||
"content": "我们来玩成语接龙,我先来,生龙活虎"},
|
||||
{"role": "assistant",
|
||||
"content": "虎头虎脑"}]]
|
||||
),
|
||||
stream: bool = Body(False, description="流式输出"),
|
||||
model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"),
|
||||
temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0),
|
||||
max_tokens: Optional[int] = Body(
|
||||
None,
|
||||
description="限制LLM生成Token数量,默认None代表模型最大值"
|
||||
),
|
||||
prompt_name: str = Body(
|
||||
"default",
|
||||
description="使用的prompt模板名称(在configs/prompt_config.py中配置)"
|
||||
),
|
||||
request: Request = None,
|
||||
):
|
||||
kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
|
||||
if kb is None:
|
||||
return BaseResponse(code=404, msg=f"未找到知识库 {knowledge_base_name}")
|
||||
|
||||
history = [History.from_data(h) for h in history]
|
||||
|
||||
async def knowledge_base_chat_iterator(query: str,
|
||||
top_k: int,
|
||||
history: Optional[List[History]],
|
||||
model_name: str = LLM_MODELS[0],
|
||||
prompt_name: str = prompt_name,
|
||||
) -> AsyncIterable[str]:
|
||||
async def knowledge_base_chat_iterator(
|
||||
query: str,
|
||||
top_k: int,
|
||||
history: Optional[List[History]],
|
||||
model_name: str = LLM_MODELS[0],
|
||||
prompt_name: str = prompt_name,
|
||||
) -> AsyncIterable[str]:
|
||||
nonlocal max_tokens
|
||||
callback = AsyncIteratorCallbackHandler()
|
||||
if isinstance(max_tokens, int) and max_tokens <= 0:
|
||||
|
|
@ -61,8 +74,8 @@ async def knowledge_base_chat(query: str = Body(..., description="用户输入",
|
|||
)
|
||||
docs = search_docs(query, knowledge_base_name, top_k, score_threshold)
|
||||
context = "\n".join([doc.page_content for doc in docs])
|
||||
if len(docs) == 0: ## 如果没有找到相关文档,使用Empty模板
|
||||
prompt_template = get_prompt_template("knowledge_base_chat", "Empty")
|
||||
if len(docs) == 0: # 如果没有找到相关文档,使用empty模板
|
||||
prompt_template = get_prompt_template("knowledge_base_chat", "empty")
|
||||
else:
|
||||
prompt_template = get_prompt_template("knowledge_base_chat", prompt_name)
|
||||
input_msg = History(role="user", content=prompt_template).to_msg_template(False)
|
||||
|
|
@ -80,14 +93,14 @@ async def knowledge_base_chat(query: str = Body(..., description="用户输入",
|
|||
source_documents = []
|
||||
for inum, doc in enumerate(docs):
|
||||
filename = doc.metadata.get("source")
|
||||
parameters = urlencode({"knowledge_base_name": knowledge_base_name, "file_name":filename})
|
||||
parameters = urlencode({"knowledge_base_name": knowledge_base_name, "file_name": filename})
|
||||
base_url = request.base_url
|
||||
url = f"{base_url}knowledge_base/download_doc?" + parameters
|
||||
text = f"""出处 [{inum + 1}] [{filename}]({url}) \n\n{doc.page_content}\n\n"""
|
||||
source_documents.append(text)
|
||||
|
||||
if len(source_documents) == 0: # 没有找到相关文档
|
||||
source_documents.append(f"""<span style='color:red'>未找到相关文档,该回答为大模型自身能力解答!</span>""")
|
||||
if len(source_documents) == 0: # 没有找到相关文档
|
||||
source_documents.append(f"<span style='color:red'>未找到相关文档,该回答为大模型自身能力解答!</span>")
|
||||
|
||||
if stream:
|
||||
async for token in callback.aiter():
|
||||
|
|
@ -108,4 +121,4 @@ async def knowledge_base_chat(query: str = Body(..., description="用户输入",
|
|||
history=history,
|
||||
model_name=model_name,
|
||||
prompt_name=prompt_name),
|
||||
media_type="text/event-stream")
|
||||
media_type="text/event-stream")
|
||||
|
|
|
|||
Loading…
Reference in New Issue