Merge branch 'dev' of github.com:chatchat-space/Langchain-Chatchat into dev
This commit is contained in:
commit
7cca0e2a72
17
README.md
17
README.md
|
|
@ -42,7 +42,7 @@
|
||||||
|
|
||||||
🚩 本项目未涉及微调、训练过程,但可利用微调或训练对本项目效果进行优化。
|
🚩 本项目未涉及微调、训练过程,但可利用微调或训练对本项目效果进行优化。
|
||||||
|
|
||||||
🌐 [AutoDL 镜像](https://www.codewithgpu.com/i/imClumsyPanda/langchain-ChatGLM/langchain-ChatGLM) 中 `v5` 版本所使用代码已更新至本项目 `0.2.0` 版本。
|
🌐 [AutoDL 镜像](https://www.codewithgpu.com/i/imClumsyPanda/langchain-ChatGLM/Langchain-Chatchat) 中 `v5` 版本所使用代码已更新至本项目 `0.2.0` 版本。
|
||||||
|
|
||||||
🐳 [Docker 镜像](registry.cn-beijing.aliyuncs.com/chatchat/chatchat:0.2.0)
|
🐳 [Docker 镜像](registry.cn-beijing.aliyuncs.com/chatchat/chatchat:0.2.0)
|
||||||
|
|
||||||
|
|
@ -214,6 +214,7 @@ embedding_model_dict = {
|
||||||
```shell
|
```shell
|
||||||
$ python init_database.py
|
$ python init_database.py
|
||||||
```
|
```
|
||||||
|
|
||||||
- 如果您是第一次运行本项目,知识库尚未建立,或者配置文件中的知识库类型、嵌入模型发生变化,需要以下命令初始化或重建知识库:
|
- 如果您是第一次运行本项目,知识库尚未建立,或者配置文件中的知识库类型、嵌入模型发生变化,需要以下命令初始化或重建知识库:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
|
@ -242,8 +243,7 @@ embedding_model_dict = {
|
||||||
$ python server/llm_api.py
|
$ python server/llm_api.py
|
||||||
```
|
```
|
||||||
|
|
||||||
项目支持多卡加载,需在 llm_api.py 中修改 create_model_worker_app 函数中如下三个参数:
|
项目支持多卡加载,需在 llm_api.py 中修改 create_model_worker_app 函数中,修改如下三个参数:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
gpus=None,
|
gpus=None,
|
||||||
num_gpus=1,
|
num_gpus=1,
|
||||||
|
|
@ -258,14 +258,12 @@ max_gpu_memory="20GiB"
|
||||||
|
|
||||||
##### 5.1.2 基于命令行脚本 llm_api_launch.py 启动 LLM 服务
|
##### 5.1.2 基于命令行脚本 llm_api_launch.py 启动 LLM 服务
|
||||||
|
|
||||||
**!!!注意:**
|
⚠️ **注意:**
|
||||||
|
|
||||||
**1.llm_api_launch.py脚本仅适用于linux和mac设备,win平台请使用wls;**
|
**1.llm_api_launch.py脚本仅适用于linux和mac设备,win平台请使用wls;**
|
||||||
|
|
||||||
**2.加载非默认模型需要用命令行参数--model-path-address指定指定模型,不会读取model_config.py配置;**
|
**2.加载非默认模型需要用命令行参数--model-path-address指定指定模型,不会读取model_config.py配置;**
|
||||||
|
|
||||||
**!!!**
|
|
||||||
|
|
||||||
在项目根目录下,执行 [server/llm_api_launch.py](server/llm_api.py) 脚本启动 **LLM 模型**服务:
|
在项目根目录下,执行 [server/llm_api_launch.py](server/llm_api.py) 脚本启动 **LLM 模型**服务:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
|
@ -284,7 +282,7 @@ $ python server/llm_api_launch.py --model-path-addresss model1@host1@port1 model
|
||||||
$ python server/llm_api_launch.py --gpus 0,1 --num-gpus 2 --max-gpu-memory 10GiB
|
$ python server/llm_api_launch.py --gpus 0,1 --num-gpus 2 --max-gpu-memory 10GiB
|
||||||
```
|
```
|
||||||
|
|
||||||
注:以如上方式启动LLM服务会以nohup命令在后台运行 fastchat 服务,如需停止服务,可以运行如下命令,但该脚本**仅适用于linux和mac平台,win平台请使用wsl**:
|
注:以如上方式启动LLM服务会以nohup命令在后台运行 FastChat 服务,如需停止服务,可以运行如下命令:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ python server/llm_api_shutdown.py --serve all
|
$ python server/llm_api_shutdown.py --serve all
|
||||||
|
|
@ -352,6 +350,7 @@ $ streamlit run webui.py --server.port 666
|
||||||
- Web UI 对话界面:
|
- Web UI 对话界面:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
- Web UI 知识库管理页面:
|
- Web UI 知识库管理页面:
|
||||||
|
|
||||||

|

|
||||||
|
|
@ -422,7 +421,7 @@ $ python webui_allinone.py --model-path-address model1@host1@port1 model2@host2@
|
||||||
$ python webui_alline.py --model-path-address model@host@port --num-gpus 2 --gpus 0,1 --max-gpu-memory 10GiB
|
$ python webui_alline.py --model-path-address model@host@port --num-gpus 2 --gpus 0,1 --max-gpu-memory 10GiB
|
||||||
```
|
```
|
||||||
|
|
||||||
其他参数详见各脚本及fastchat服务说明。
|
其他参数详见各脚本及 Fastchat 服务说明。
|
||||||
|
|
||||||
上述两个一键启动脚本会后台运行多个服务,如要停止所有服务,可使用 `shutdown_all.sh` 脚本:
|
上述两个一键启动脚本会后台运行多个服务,如要停止所有服务,可使用 `shutdown_all.sh` 脚本:
|
||||||
|
|
||||||
|
|
@ -472,6 +471,6 @@ bash shutdown_all.sh
|
||||||
|
|
||||||
## 项目交流群
|
## 项目交流群
|
||||||
|
|
||||||
<img src="img/qr_code_51.jpg" alt="二维码" width="300" height="300" />
|
<img src="img/qr_code_52.jpg" alt="二维码" width="300" height="300" />
|
||||||
|
|
||||||
🎉 langchain-ChatGLM 项目微信交流群,如果你也对本项目感兴趣,欢迎加入群聊参与讨论交流。
|
🎉 langchain-ChatGLM 项目微信交流群,如果你也对本项目感兴趣,欢迎加入群聊参与讨论交流。
|
||||||
|
|
|
||||||
Binary file not shown.
|
After Width: | Height: | Size: 281 KiB |
|
|
@ -11,10 +11,11 @@ python server/api_allinone.py --model-path-address model@host@port --num-gpus 2
|
||||||
"""
|
"""
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
|
||||||
sys.path.append(os.path.dirname(__file__))
|
sys.path.append(os.path.dirname(__file__))
|
||||||
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
|
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
|
||||||
|
|
||||||
from llm_api_launch import launch_all,parser,controller_args,worker_args,server_args
|
from llm_api_launch import launch_all, parser, controller_args, worker_args, server_args
|
||||||
from api import create_app
|
from api import create_app
|
||||||
import uvicorn
|
import uvicorn
|
||||||
|
|
||||||
|
|
@ -23,8 +24,8 @@ parser.add_argument("--api-port", type=int, default=7861)
|
||||||
parser.add_argument("--ssl_keyfile", type=str)
|
parser.add_argument("--ssl_keyfile", type=str)
|
||||||
parser.add_argument("--ssl_certfile", type=str)
|
parser.add_argument("--ssl_certfile", type=str)
|
||||||
|
|
||||||
|
api_args = ["api-host", "api-port", "ssl_keyfile", "ssl_certfile"]
|
||||||
|
|
||||||
api_args = ["api-host","api-port","ssl_keyfile","ssl_certfile"]
|
|
||||||
|
|
||||||
def run_api(host, port, **kwargs):
|
def run_api(host, port, **kwargs):
|
||||||
app = create_app()
|
app = create_app()
|
||||||
|
|
@ -38,18 +39,19 @@ def run_api(host, port, **kwargs):
|
||||||
else:
|
else:
|
||||||
uvicorn.run(app, host=host, port=port)
|
uvicorn.run(app, host=host, port=port)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
print("Luanching api_allinone,it would take a while, please be patient...")
|
print("Luanching api_allinone,it would take a while, please be patient...")
|
||||||
print("正在启动api_allinone,LLM服务启动约3-10分钟,请耐心等待...")
|
print("正在启动api_allinone,LLM服务启动约3-10分钟,请耐心等待...")
|
||||||
# 初始化消息
|
# 初始化消息
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
args_dict = vars(args)
|
args_dict = vars(args)
|
||||||
launch_all(args=args,controller_args=controller_args,worker_args=worker_args,server_args=server_args)
|
launch_all(args=args, controller_args=controller_args, worker_args=worker_args, server_args=server_args)
|
||||||
run_api(
|
run_api(
|
||||||
host=args.api_host,
|
host=args.api_host,
|
||||||
port=args.api_port,
|
port=args.api_port,
|
||||||
ssl_keyfile=args.ssl_keyfile,
|
ssl_keyfile=args.ssl_keyfile,
|
||||||
ssl_certfile=args.ssl_certfile,
|
ssl_certfile=args.ssl_certfile,
|
||||||
)
|
)
|
||||||
print("Luanching api_allinone done.")
|
print("Luanching api_allinone done.")
|
||||||
print("api_allinone启动完毕.")
|
print("api_allinone启动完毕.")
|
||||||
|
|
|
||||||
|
|
@ -132,7 +132,7 @@ worker_args = [
|
||||||
"gptq-ckpt", "gptq-wbits", "gptq-groupsize",
|
"gptq-ckpt", "gptq-wbits", "gptq-groupsize",
|
||||||
"gptq-act-order", "model-names", "limit-worker-concurrency",
|
"gptq-act-order", "model-names", "limit-worker-concurrency",
|
||||||
"stream-interval", "no-register",
|
"stream-interval", "no-register",
|
||||||
"controller-address","worker-address"
|
"controller-address", "worker-address"
|
||||||
]
|
]
|
||||||
# -----------------openai server---------------------------
|
# -----------------openai server---------------------------
|
||||||
|
|
||||||
|
|
@ -159,8 +159,6 @@ server_args = ["server-host", "server-port", "allow-credentials", "api-keys",
|
||||||
"controller-address"
|
"controller-address"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# 0,controller, model_worker, openai_api_server
|
# 0,controller, model_worker, openai_api_server
|
||||||
# 1, 命令行选项
|
# 1, 命令行选项
|
||||||
# 2,LOG_PATH
|
# 2,LOG_PATH
|
||||||
|
|
@ -201,7 +199,7 @@ def string_args(args, args_list):
|
||||||
return args_str
|
return args_str
|
||||||
|
|
||||||
|
|
||||||
def launch_worker(item,args,worker_args=worker_args):
|
def launch_worker(item, args, worker_args=worker_args):
|
||||||
log_name = item.split("/")[-1].split("\\")[-1].replace("-", "_").replace("@", "_").replace(".", "_")
|
log_name = item.split("/")[-1].split("\\")[-1].replace("-", "_").replace("@", "_").replace(".", "_")
|
||||||
# 先分割model-path-address,在传到string_args中分析参数
|
# 先分割model-path-address,在传到string_args中分析参数
|
||||||
args.model_path, args.worker_host, args.worker_port = item.split("@")
|
args.model_path, args.worker_host, args.worker_port = item.split("@")
|
||||||
|
|
@ -230,11 +228,11 @@ def launch_all(args,
|
||||||
subprocess.run(controller_check_sh, shell=True, check=True)
|
subprocess.run(controller_check_sh, shell=True, check=True)
|
||||||
print(f"worker启动时间视设备不同而不同,约需3-10分钟,请耐心等待...")
|
print(f"worker启动时间视设备不同而不同,约需3-10分钟,请耐心等待...")
|
||||||
if isinstance(args.model_path_address, str):
|
if isinstance(args.model_path_address, str):
|
||||||
launch_worker(args.model_path_address,args=args,worker_args=worker_args)
|
launch_worker(args.model_path_address, args=args, worker_args=worker_args)
|
||||||
else:
|
else:
|
||||||
for idx, item in enumerate(args.model_path_address):
|
for idx, item in enumerate(args.model_path_address):
|
||||||
print(f"开始加载第{idx}个模型:{item}")
|
print(f"开始加载第{idx}个模型:{item}")
|
||||||
launch_worker(item,args=args,worker_args=worker_args)
|
launch_worker(item, args=args, worker_args=worker_args)
|
||||||
|
|
||||||
server_str_args = string_args(args, server_args)
|
server_str_args = string_args(args, server_args)
|
||||||
server_sh = base_launch_sh.format("openai_api_server", server_str_args, LOG_PATH, "openai_api_server")
|
server_sh = base_launch_sh.format("openai_api_server", server_str_args, LOG_PATH, "openai_api_server")
|
||||||
|
|
@ -244,11 +242,12 @@ def launch_all(args,
|
||||||
print("Launching LLM service done!")
|
print("Launching LLM service done!")
|
||||||
print("LLM服务启动完毕。")
|
print("LLM服务启动完毕。")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
# 必须要加http//:,否则InvalidSchema: No connection adapters were found
|
# 必须要加http//:,否则InvalidSchema: No connection adapters were found
|
||||||
args = argparse.Namespace(**vars(args),
|
args = argparse.Namespace(**vars(args),
|
||||||
**{"controller-address": f"http://{args.controller_host}:{str(args.controller_port)}"})
|
**{"controller-address": f"http://{args.controller_host}:{str(args.controller_port)}"})
|
||||||
|
|
||||||
if args.gpus:
|
if args.gpus:
|
||||||
if len(args.gpus.split(",")) < args.num_gpus:
|
if len(args.gpus.split(",")) < args.num_gpus:
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue