streamlit ui 实现LLM流式对话
This commit is contained in:
parent
a1a7484ef4
commit
2c5b6bb0ad
|
|
@ -17,4 +17,5 @@ pydantic~=1.10.11
|
||||||
unstructured[local-inference]
|
unstructured[local-inference]
|
||||||
|
|
||||||
streamlit>=1.25.0
|
streamlit>=1.25.0
|
||||||
streamlit-option-menu
|
streamlit-option-menu
|
||||||
|
streamlit-chatbox>=1.1.0
|
||||||
|
|
|
||||||
52
webui.py
52
webui.py
|
|
@ -1,6 +1,10 @@
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
from streamlit_chatbox import *
|
||||||
|
from webui_utils import *
|
||||||
from streamlit_option_menu import option_menu
|
from streamlit_option_menu import option_menu
|
||||||
import openai
|
|
||||||
|
|
||||||
|
api = ApiRequest()
|
||||||
|
|
||||||
def dialogue_page():
|
def dialogue_page():
|
||||||
with st.sidebar:
|
with st.sidebar:
|
||||||
|
|
@ -8,37 +12,30 @@ def dialogue_page():
|
||||||
["LLM 对话",
|
["LLM 对话",
|
||||||
"知识库问答",
|
"知识库问答",
|
||||||
"Bing 搜索问答"])
|
"Bing 搜索问答"])
|
||||||
|
history_len = st.slider("历史对话轮数:", 1, 10, 1)
|
||||||
if dialogue_mode == "知识库问答":
|
if dialogue_mode == "知识库问答":
|
||||||
selected_kb = st.selectbox("请选择知识库:", ["知识库1", "知识库2"])
|
selected_kb = st.selectbox("请选择知识库:", get_kb_list())
|
||||||
with st.expander(f"{selected_kb} 中已存储文件"):
|
with st.expander(f"{selected_kb} 中已存储文件"):
|
||||||
st.write("123")
|
st.write(get_kb_files(selected_kb))
|
||||||
|
|
||||||
# Display chat messages from history on app rerun
|
# Display chat messages from history on app rerun
|
||||||
for message in st.session_state.messages:
|
chat_box.output_messages()
|
||||||
with st.chat_message(message["role"]):
|
|
||||||
st.markdown(message["content"])
|
|
||||||
|
|
||||||
if prompt := st.chat_input("What is up?"):
|
if prompt := st.chat_input("What is up?"):
|
||||||
st.session_state.messages.append({"role": "user", "content": prompt})
|
chat_box.user_say(prompt)
|
||||||
with st.chat_message("user"):
|
chat_box.ai_say("正在思考...")
|
||||||
st.markdown(prompt)
|
# with api.chat_fastchat([{"role": "user", "content": "prompt"}], stream=streaming) as r: # todo: support history len
|
||||||
|
text = ""
|
||||||
with st.chat_message("assistant"):
|
r = api.chat_chat(prompt, no_remote_api=True)
|
||||||
message_placeholder = st.empty()
|
for t in r:
|
||||||
full_response = ""
|
text += t
|
||||||
for response in openai.ChatCompletion.create(
|
chat_box.update_msg(text)
|
||||||
model=OPENAI_MODEL,
|
chat_box.update_msg(text, streaming=False)
|
||||||
messages=[
|
# with api.chat_chat(prompt) as r:
|
||||||
{"role": m["role"], "content": m["content"]}
|
# for t in r.iter_text(None):
|
||||||
for m in st.session_state.messages
|
# text += t
|
||||||
],
|
# chat_box.update_msg(text)
|
||||||
stream=True,
|
# chat_box.update_msg(text, streaming=False)
|
||||||
):
|
|
||||||
full_response += response.choices[0].delta.get("content", "")
|
|
||||||
message_placeholder.markdown(full_response + "▌")
|
|
||||||
message_placeholder.markdown(full_response)
|
|
||||||
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
|
||||||
|
|
||||||
|
|
||||||
def knowledge_base_edit_page():
|
def knowledge_base_edit_page():
|
||||||
pass
|
pass
|
||||||
|
|
@ -51,8 +48,7 @@ def config_page():
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
st.set_page_config("langchain-chatglm WebUI")
|
st.set_page_config("langchain-chatglm WebUI")
|
||||||
|
|
||||||
if "messages" not in st.session_state:
|
chat_box = ChatBox()
|
||||||
st.session_state.messages = []
|
|
||||||
|
|
||||||
pages = {"对话": {"icon": "chat",
|
pages = {"对话": {"icon": "chat",
|
||||||
"func": dialogue_page,
|
"func": dialogue_page,
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue