1. 增加api的一键启动脚本;2. 增加webui的一键启动脚本;3. 更新readme;4. 调整llm_api_launch

This commit is contained in:
hzg0601 2023-08-15 11:02:32 +08:00
parent bb8331384f
commit 5c0274efce
4 changed files with 181 additions and 5 deletions

View File

@ -257,7 +257,7 @@ $ python server/llm_api_launch.py --model-path-addresss model1@host1@port1 model
$ python server/llm_api_launch.py --gpus 0,1 --num-gpus 2 --max-gpu-memory 10GiB
```
以如上方式启动LLM服务会以nohup命令在后台运行 fastchat 服务,如需停止服务,可以运行如下命令:
以如上方式启动LLM服务会以nohup命令在后台运行 fastchat 服务,如需停止服务,可以运行如下命令,但该脚本**仅适用于linux和mac平台**
```shell
$ python server/llm_api_shutdown.py --serve all
@ -331,6 +331,55 @@ $ streamlit run webui.py --server.port 666
---
### 6 一键启动:
#### 6.1 api服务一键启动脚本
新增api一键启动脚本可一键开启fastchat后台服务及本项目提供的langchain api服务,调用示例:
调用默认模型:
```shell
$ python server/api_allinone.py
```
加载多个非默认模型:
```shell
$ python server/api_allinone.py --model-path-address model1@host1@port1 model2@host2@port2
```
多卡启动:
```shell
python server/api_allinone.py --model-path-address model@host@port --num-gpus 2 --gpus 0,1 --max-gpu-memory 10GiB
```
其他参数详见各脚本及fastchat服务说明。
#### 6.2 webui一键启动脚本
加载本地模型:
```shell
$ python webui_allinone.py
```
调用远程api服务
```shell
$ python webui_allinone.py --use-remote-api
```
后台运行webui服务
```shell
$ python webui_allinone.py --nohup
```
加载多个非默认模型:
```shell
$ python webui_allinone.py --model-path-address model1@host1@port1 model2@host2@port2
```
多卡启动:
```
python webui_alline.py --model-path-address model@host@port --num-gpus 2 --gpus 0,1 --max-gpu-memory 10GiB
```
其他参数详见各脚本及fastchat服务说明。
## 常见问题
参见 [常见问题](docs/FAQ.md)。

50
server/api_allinone.py Normal file
View File

@ -0,0 +1,50 @@
"""Usage
调用默认模型
python server/api_allinone.py
加载多个非默认模型
python server/api_allinone.py --model-path-address model1@host1@port1 model2@host2@port2
多卡启动
python server/api_allinone.py --model-path-address model@host@port --num-gpus 2 --gpus 0,1 --max-gpu-memory 10GiB
"""
import sys
import os
sys.path.append(os.path.dirname(__file__))
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from llm_api_launch import launch_all,parser,controller_args,worker_args,server_args
from api import create_app
import uvicorn
parser.add_argument("--api-host", type=str, default="0.0.0.0")
parser.add_argument("--api-port", type=int, default=7861)
parser.add_argument("--ssl_keyfile", type=str)
parser.add_argument("--ssl_certfile", type=str)
# 初始化消息
args = parser.parse_args()
args_dict = vars(args)
api_args = ["api-host","api-port","ssl_keyfile","ssl_certfile"]
def run_api(host, port, **kwargs):
app = create_app()
if kwargs.get("ssl_keyfile") and kwargs.get("ssl_certfile"):
uvicorn.run(app,
host=host,
port=port,
ssl_keyfile=kwargs.get("ssl_keyfile"),
ssl_certfile=kwargs.get("ssl_certfile"),
)
else:
uvicorn.run(app, host=host, port=port)
if __name__ == "__main__":
launch_all(args=args,controller_args=controller_args,worker_args=worker_args,server_args=server_args)
run_api(
host=args.api_host,
port=args.api_port,
ssl_keyfile=args.ssl_keyfile,
ssl_certfile=args.ssl_certfile,
)

View File

@ -181,7 +181,7 @@ base_launch_sh = "nohup python3 -m fastchat.serve.{0} {1} >{2}/{3}.log 2>&1 &"
# ! 1 log的文件名必须与bash_launch_sh一致
# 2 controller, worker, openai_api_server
base_check_sh = """while [ `grep -c "Uvicorn running on" {0}/{1}.log` -eq '0' ];do
sleep 1s;
sleep 5s;
echo "wait {2} running"
done
echo '{2} running' """
@ -211,7 +211,7 @@ def string_args(args, args_list):
return args_str
def launch_worker(item):
def launch_worker(item,args=args,worker_args=worker_args):
log_name = item.split("/")[-1].split("\\")[-1].replace("-", "_").replace("@", "_").replace(".", "_")
# 先分割model-path-address,在传到string_args中分析参数
args.model_path, args.worker_host, args.worker_port = item.split("@")
@ -225,7 +225,11 @@ def launch_worker(item):
subprocess.run(worker_check_sh, shell=True, check=True)
def launch_all():
def launch_all(args=args,
controller_args=controller_args,
worker_args=worker_args,
server_args=server_args
):
controller_str_args = string_args(args, controller_args)
controller_sh = base_launch_sh.format("controller", controller_str_args, LOG_PATH, "controller")
controller_check_sh = base_check_sh.format(LOG_PATH, "controller", "controller")
@ -233,7 +237,7 @@ def launch_all():
subprocess.run(controller_check_sh, shell=True, check=True)
if isinstance(args.model_path_address, str):
launch_worker(args.model_path_address)
launch_worker(args.model_path_address,args=args,worker_args=worker_args)
else:
for idx, item in enumerate(args.model_path_address):
print(f"开始加载第{idx}个模型:{item}")

73
webui_allinone.py Normal file
View File

@ -0,0 +1,73 @@
"""Usage
加载本地模型
python webui_allinone.py
调用远程api服务
python webui_allinone.py --use-remote-api
后台运行webui服务
python webui_allinone.py --nohup
加载多个非默认模型
python webui_allinone.py --model-path-address model1@host1@port1 model2@host2@port2
多卡启动
python webui_alline.py --model-path-address model@host@port --num-gpus 2 --gpus 0,1 --max-gpu-memory 10GiB
"""
import streamlit as st
from webui_pages.utils import *
from streamlit_option_menu import option_menu
from webui_pages import *
import os
from server.llm_api_launch import string_args,launch_all,controller_args,worker_args,server_args,LOG_PATH
from server.api_allinone import parser, api_args
import subprocess
parser.add_argument("--use-remote-api",action="store_true")
parser.add_argument("--nohup",action="store_true")
parser.add_argument("--server.port",type=int,default=8501)
parser.add_argument("--theme.base",type=str,default='"light"')
parser.add_argument("--theme.primaryColor",type=str,default='"#165dff"')
parser.add_argument("--theme.secondaryBackgroundColor",type=str,default='"#f5f5f5"')
parser.add_argument("--theme.textColor",type=str,default='"#000000"')
web_args = ["server.port","theme.base","theme.primaryColor","theme.secondaryBackgroundColor","theme.textColor"]
args = parser.parse_args()
def launch_api(args=args,args_list=api_args,log_name=None):
print("launch api ...")
if not log_name:
log_name = f"{LOG_PATH}api_{args.api_host}_{args.api_port}"
print(f"logs on api are written in {log_name}")
args_str = string_args(args,args_list)
api_sh = "python server/{script} {args_str} >{log_name}.log 2>&1 &".format(
script="api.py",args_str=args_str,log_name=log_name)
subprocess.run(api_sh, shell=True, check=True)
print("launch api done!")
def launch_webui(args=args,args_list=web_args,log_name=None):
print("Launching webui...")
if not log_name:
log_name = f"{LOG_PATH}webui"
print(f"logs on api are written in {log_name}")
args_str = string_args(args,args_list)
if args.nohup:
webui_sh = "streamlit run webui.py {args_str} >{log_name}.log 2>&1 &".format(
args_str=args_str,log_name=log_name)
else:
webui_sh = "streamlit run webui.py {args_str}".format(
args_str=args_str)
subprocess.run(webui_sh, shell=True, check=True)
print("launch webui done!")
if __name__ == "__main__":
print("Starting webui_allineone.py, it would take a while, please be patient....")
if not args.use_remote_api:
launch_all(args=args,controller_args=controller_args,worker_args=worker_args,server_args=server_args)
launch_api(args=args,args_list=api_args)
launch_webui(args=args,args_list=web_args)
print("Start webui_allinone.py done!")