From 348edb9a0d0a60d5ad2fcd1f8e0d2a78a9029389 Mon Sep 17 00:00:00 2001 From: hzg0601 Date: Wed, 16 Aug 2023 10:13:51 +0800 Subject: [PATCH] =?UTF-8?q?=E6=9B=B4=E6=96=B0=E6=8F=90=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- server/api_allinone.py | 4 ++++ server/llm_api_launch.py | 6 +++++- webui_allinone.py | 9 +++++++-- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/server/api_allinone.py b/server/api_allinone.py index b19b6cc..de8f716 100644 --- a/server/api_allinone.py +++ b/server/api_allinone.py @@ -41,6 +41,8 @@ def run_api(host, port, **kwargs): uvicorn.run(app, host=host, port=port) if __name__ == "__main__": + print("Luanching api_allinone,it would take a while, please be patient...") + print("正在启动api_allinone,LLM服务启动约3-10分钟,请耐心等待...") launch_all(args=args,controller_args=controller_args,worker_args=worker_args,server_args=server_args) run_api( host=args.api_host, @@ -48,3 +50,5 @@ if __name__ == "__main__": ssl_keyfile=args.ssl_keyfile, ssl_certfile=args.ssl_certfile, ) + print("Luanching api_allinone done.") + print("api_allinone启动完毕.") diff --git a/server/llm_api_launch.py b/server/llm_api_launch.py index 0fdccfc..0c92768 100644 --- a/server/llm_api_launch.py +++ b/server/llm_api_launch.py @@ -217,6 +217,7 @@ def launch_worker(item,args=args,worker_args=worker_args): args.model_path, args.worker_host, args.worker_port = item.split("@") args.worker_address = f"http://{args.worker_host}:{args.worker_port}" print("*" * 80) + print(f"worker启动视设备不同而不同,约需3-10分钟,如长时间未启动,请到{LOG_PATH}/{log_name}下查看日志") worker_str_args = string_args(args, worker_args) print(worker_str_args) worker_sh = base_launch_sh.format("model_worker", worker_str_args, LOG_PATH, f"worker_{log_name}") @@ -230,6 +231,8 @@ def launch_all(args=args, worker_args=worker_args, server_args=server_args ): + print(f"Launching llm service,logs are located in {LOG_PATH}...") + print(f"开始启动LLM服务,请到{LOG_PATH}下监控各模块日志...") controller_str_args = string_args(args, controller_args) controller_sh = base_launch_sh.format("controller", controller_str_args, LOG_PATH, "controller") controller_check_sh = base_check_sh.format(LOG_PATH, "controller", "controller") @@ -248,7 +251,8 @@ def launch_all(args=args, server_check_sh = base_check_sh.format(LOG_PATH, "openai_api_server", "openai_api_server") subprocess.run(server_sh, shell=True, check=True) subprocess.run(server_check_sh, shell=True, check=True) - + print("Launching LLM service done!") + print("LLM服务启动完毕。") if __name__ == "__main__": launch_all() diff --git a/webui_allinone.py b/webui_allinone.py index b347387..f710855 100644 --- a/webui_allinone.py +++ b/webui_allinone.py @@ -37,7 +37,8 @@ web_args = ["server.port","theme.base","theme.primaryColor","theme.secondaryBack args = parser.parse_args() def launch_api(args=args,args_list=api_args,log_name=None): - print("launch api ...") + print("Launching api ...") + print("启动API服务...") if not log_name: log_name = f"{LOG_PATH}api_{args.api_host}_{args.api_port}" print(f"logs on api are written in {log_name}") @@ -46,10 +47,11 @@ def launch_api(args=args,args_list=api_args,log_name=None): script="api.py",args_str=args_str,log_name=log_name) subprocess.run(api_sh, shell=True, check=True) print("launch api done!") - + print("启动API服务完毕.") def launch_webui(args=args,args_list=web_args,log_name=None): print("Launching webui...") + print("启动webui服务...") if not log_name: log_name = f"{LOG_PATH}webui" print(f"logs on api are written in {log_name}") @@ -62,12 +64,15 @@ def launch_webui(args=args,args_list=web_args,log_name=None): args_str=args_str) subprocess.run(webui_sh, shell=True, check=True) print("launch webui done!") + print("启动webui服务完毕.") if __name__ == "__main__": print("Starting webui_allineone.py, it would take a while, please be patient....") + print(f"开始启动webui_allinone,启动LLM服务需要约3-10分钟,请耐心等待,如长时间未启动,请到{LOG_PATH}下查看日志...") if not args.use_remote_api: launch_all(args=args,controller_args=controller_args,worker_args=worker_args,server_args=server_args) launch_api(args=args,args_list=api_args) launch_webui(args=args,args_list=web_args) print("Start webui_allinone.py done!") + print("感谢耐心等待,启动webui_allinone完毕。") \ No newline at end of file