增加停止服务的python脚本

This commit is contained in:
hzg0601 2023-08-09 23:16:02 +08:00
parent 1b70fb5f9b
commit 8a34c3f163
6 changed files with 27 additions and 57 deletions

View File

@ -1,5 +1,5 @@
""" """
调用示例: python llm_api_sh.py --model-path-address THUDM/chatglm2-6b@localhost@7650 THUDM/chatglm2-6b-32k@localhost@7651 调用示例: python llm_api_launch.py --model-path-address THUDM/chatglm2-6b@localhost@7650 THUDM/chatglm2-6b-32k@localhost@7651
其他fastchat.server.controller/worker/openai_api_server参数可按照fastchat文档调用 其他fastchat.server.controller/worker/openai_api_server参数可按照fastchat文档调用
但少数非关键参数如--worker-address,--allowed-origins,--allowed-methods,--allowed-headers不支持 但少数非关键参数如--worker-address,--allowed-origins,--allowed-methods,--allowed-headers不支持

View File

@ -0,0 +1,26 @@
"""
调用示例
python llm_api_shutdown.py --serve all
可选"all","controller","worker","openai_api_server" all表示停止所有服务
"""
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import subprocess
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--serve",choices=["all","controller","worker","openai_api_server"])
args = parser.parse_args()
base_shell = "ps -eo user,pid,cmd|grep fastchat.serve{}|grep -v grep|awk '{print $2}'|xargs kill -9"
if args.serve == "all":
shell_script = base_shell.format("")
else:
serve = f".{args.serve}"
shell_script = base_shell.format(serve)
subprocess.run(shell_script,shell=True,check=True)

View File

@ -1,30 +0,0 @@
[ -d "../logs/" ] && echo "log dir exists" || mkdir "../logs/"
# controller
nohup python3 -m fastchat.serve.controller >../logs/controller.log 2>&1 &
while [ `grep -c "Uvicorn running on" ../logs/controller.log` -eq '0' ];do
sleep 1s;
echo "wait controller running"
done
echo "controller running"
# worker
nohup python3 -m fastchat.serve.model_worker \
--model-name 'chatglm2-6b' \
--model-path THUDM/chatglm2-6b \
--num-gpus 2 \
>> ./logs/worker.log 2>&1 &
while [ `grep -c "Uvicorn running on" ../logs/worker.log` -eq '0' ];do
sleep 3s;
echo "wait worker running"
done
echo "worker running"
# webui
nohup python3 -m fastchat.serve.openai_api_server >> "../logs/openai_server.log" 2>&1 &
while [ `grep -c "Uvicorn running on" ../logs/openai_server.log` -eq '0' ];do
sleep 3s;
echo "wait openai_server running"
done
echo "openai_server running"

View File

@ -1,7 +0,0 @@
# controller
nohup python3 -m fastchat.serve.controller >../logs/controller.log 2>&1 &
while [ `grep -c "Uvicorn running on" ../logs/controller.log` -eq '0' ];do
sleep 1s;
echo "wait controller running"
done
echo "controller running"

View File

@ -1,8 +0,0 @@
# webui
nohup python3 -m fastchat.serve.openai_api_server >> "../logs/openai_server.log" 2>&1 &
while [ `grep -c "Uvicorn running on" ../logs/openai_server.log` -eq '0' ];do
sleep 3s;
echo "wait openai_server running"
done
echo "openai_server running"

View File

@ -1,11 +0,0 @@
nohup python3 -m fastchat.serve.model_worker \
--model-name 'chatglm2-6b' \
--model-path THUDM/chatglm2-6b \
--num-gpus 2 \
>> ../logs/worker.log 2>&1 &
while [ `grep -c "Uvicorn running on" ../logs/worker.log` -eq '0' ];do
sleep 3s;
echo "wait worker running"
done
echo "worker running"