update llm_api.py and webui.py

This commit is contained in:
imClumsyPanda 2023-08-01 14:33:18 +08:00
parent 2c5b6bb0ad
commit c8a75ab11f
2 changed files with 3 additions and 7 deletions

View File

@ -1,7 +1,7 @@
from multiprocessing import Process, Queue from multiprocessing import Process, Queue
import sys import sys
import os import os
sys.path.append(os.path.dirname(os.path.dirname(__file__))) sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from configs.model_config import llm_model_dict, LLM_MODEL, LLM_DEVICE, LOG_PATH, logger from configs.model_config import llm_model_dict, LLM_MODEL, LLM_DEVICE, LOG_PATH, logger
import asyncio import asyncio
@ -199,9 +199,6 @@ def run_openai_api(q):
uvicorn.run(app, host=host_ip, port=openai_api_port) uvicorn.run(app, host=host_ip, port=openai_api_port)
if __name__ == "__main__": if __name__ == "__main__":
logger.info(llm_model_dict[LLM_MODEL]) logger.info(llm_model_dict[LLM_MODEL])
model_path = llm_model_dict[LLM_MODEL]["local_model_path"] model_path = llm_model_dict[LLM_MODEL]["local_model_path"]
@ -243,7 +240,6 @@ if __name__ == "__main__":
# model_worker_process.join() # model_worker_process.join()
openai_api_process.join() openai_api_process.join()
# 服务启动后接口调用示例: # 服务启动后接口调用示例:
# import openai # import openai
# openai.api_key = "EMPTY" # Not support yet # openai.api_key = "EMPTY" # Not support yet

View File

@ -21,7 +21,7 @@ def dialogue_page():
# Display chat messages from history on app rerun # Display chat messages from history on app rerun
chat_box.output_messages() chat_box.output_messages()
if prompt := st.chat_input("What is up?"): if prompt := st.chat_input("请输入对话内容换行请使用Ctrl+Enter"):
chat_box.user_say(prompt) chat_box.user_say(prompt)
chat_box.ai_say("正在思考...") chat_box.ai_say("正在思考...")
# with api.chat_fastchat([{"role": "user", "content": "prompt"}], stream=streaming) as r: # todo: support history len # with api.chat_fastchat([{"role": "user", "content": "prompt"}], stream=streaming) as r: # todo: support history len