diff --git a/server/llm_api.py b/server/llm_api.py index e8e5452..60227cf 100644 --- a/server/llm_api.py +++ b/server/llm_api.py @@ -1,7 +1,7 @@ - from multiprocessing import Process, Queue import sys import os + sys.path.append(os.path.dirname(os.path.dirname(__file__))) from configs.model_config import llm_model_dict, LLM_MODEL, LLM_DEVICE, LOG_PATH, logger import asyncio @@ -31,7 +31,7 @@ def create_controller_app( controller = Controller(dispatch_method) sys.modules["fastchat.serve.controller"].controller = controller - #todo 替换fastchat的日志文件 + # todo 替换fastchat的日志文件 sys.modules["fastchat.serve.controller"].logger = logger logger.info(f"controller dispatch method: {dispatch_method}") return app @@ -199,9 +199,6 @@ def run_openai_api(q): uvicorn.run(app, host=host_ip, port=openai_api_port) - - - if __name__ == "__main__": logger.info(llm_model_dict[LLM_MODEL]) model_path = llm_model_dict[LLM_MODEL]["local_model_path"] @@ -243,7 +240,6 @@ if __name__ == "__main__": # model_worker_process.join() openai_api_process.join() - # 服务启动后接口调用示例: # import openai # openai.api_key = "EMPTY" # Not support yet diff --git a/webui.py b/webui.py index 1976a7f..a79f4c0 100644 --- a/webui.py +++ b/webui.py @@ -21,7 +21,7 @@ def dialogue_page(): # Display chat messages from history on app rerun chat_box.output_messages() - if prompt := st.chat_input("What is up?"): + if prompt := st.chat_input("请输入对话内容,换行请使用Ctrl+Enter"): chat_box.user_say(prompt) chat_box.ai_say("正在思考...") # with api.chat_fastchat([{"role": "user", "content": "prompt"}], stream=streaming) as r: # todo: support history len