Langchain-Chatchat/webui.py

171 lines
6.3 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import gradio as gr
import os
import shutil
from chains.local_doc_qa import LocalDocQA
from configs.model_config import *
def get_file_list():
if not os.path.exists("content"):
return []
return [f for f in os.listdir("content")]
file_list = get_file_list()
embedding_model_dict_list = list(embedding_model_dict.keys())
llm_model_dict_list = list(llm_model_dict.keys())
local_doc_qa = LocalDocQA()
def upload_file(file):
if not os.path.exists("content"):
os.mkdir("content")
filename = os.path.basename(file.name)
shutil.move(file.name, "content/" + filename)
# file_list首位插入新上传的文件
file_list.insert(0, filename)
return gr.Dropdown.update(choices=file_list, value=filename)
def get_answer(query, vs_path, history):
resp, history = local_doc_qa.get_knowledge_based_answer(
query=query, vs_path=vs_path, chat_history=history)
return history, history
def update_status(history, status):
history = history + [[None, status]]
print(status)
return history
def init_model():
try:
local_doc_qa.init_cfg()
return """模型已成功加载,请选择文件后点击"加载文件"按钮"""
except:
return """模型未成功加载,请重新选择后点击"加载模型"按钮"""
def reinit_model(llm_model, embedding_model, llm_history_len, top_k):
try:
local_doc_qa.init_cfg(llm_model=llm_model,
embedding_model=embedding_model,
llm_history_len=llm_history_len,
top_k=top_k)
return """模型已成功重新加载,请选择文件后点击"加载文件"按钮"""
except:
return """模型未成功重新加载,请重新选择后点击"加载模型"按钮"""
def get_vector_store(filepath):
vs_path = local_doc_qa.init_knowledge_vector_store(["content/" + filepath])
if vs_path:
file_status = "文件已成功加载,请开始提问"
else:
file_status = "文件未成功加载,请重新上传文件"
print(file_status)
return vs_path, file_status
block_css = """.importantButton {
background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important;
border: none !important;
}
.importantButton:hover {
background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important;
border: none !important;
}"""
webui_title = """
# 🎉langchain-ChatGLM WebUI🎉
👍 [https://github.com/imClumsyPanda/langchain-ChatGLM](https://github.com/imClumsyPanda/langchain-ChatGLM)
"""
init_message = """欢迎使用 langchain-ChatGLM Web UI开始提问前请依次如下 3 个步骤:
1. 选择语言模型、Embedding 模型及相关参数后点击"重新加载模型",并等待加载完成提示
2. 上传或选择已有文件作为本地知识文档输入后点击"重新加载文档",并等待加载完成提示
3. 输入要提交的问题后,点击回车提交 """
model_status = init_model()
with gr.Blocks(css=block_css) as demo:
vs_path, history, file_status, model_status = gr.State(""), gr.State([]), gr.State(""), gr.State(model_status)
gr.Markdown(webui_title)
with gr.Row():
with gr.Column(scale=2):
chatbot = gr.Chatbot([[None, init_message], [None, model_status.value]],
elem_id="chat-box",
show_label=False).style(height=750)
query = gr.Textbox(show_label=False,
placeholder="请提问",
lines=1,
value="用200字总结一下"
).style(container=False)
with gr.Column(scale=1):
llm_model = gr.Radio(llm_model_dict_list,
label="LLM 模型",
value=LLM_MODEL,
interactive=True)
llm_history_len = gr.Slider(0,
10,
value=3,
step=1,
label="LLM history len",
interactive=True)
embedding_model = gr.Radio(embedding_model_dict_list,
label="Embedding 模型",
value=EMBEDDING_MODEL,
interactive=True)
top_k = gr.Slider(1,
20,
value=6,
step=1,
label="向量匹配 top k",
interactive=True)
load_model_button = gr.Button("重新加载模型")
# with gr.Column():
with gr.Tab("select"):
selectFile = gr.Dropdown(file_list,
label="content file",
interactive=True,
value=file_list[0] if len(file_list) > 0 else None)
with gr.Tab("upload"):
file = gr.File(label="content file",
file_types=['.txt', '.md', '.docx', '.pdf']
) # .style(height=100)
load_file_button = gr.Button("重新加载文件")
load_model_button.click(reinit_model,
show_progress=True,
inputs=[llm_model, embedding_model, llm_history_len, top_k],
outputs=model_status
).then(update_status, [chatbot, model_status], chatbot)
# 将上传的文件保存到content文件夹下,并更新下拉框
file.upload(upload_file,
inputs=file,
outputs=selectFile)
load_file_button.click(get_vector_store,
show_progress=True,
inputs=selectFile,
outputs=[vs_path, file_status],
).then(
update_status, [chatbot, file_status], chatbot
)
query.submit(get_answer,
[query, vs_path, chatbot],
[chatbot, history],
)
demo.queue(concurrency_count=3).launch(
server_name='0.0.0.0', share=False, inbrowser=False)