2023-09-15 17:52:22 +08:00
|
|
|
|
from configs.model_config import LLM_DEVICE
|
2023-09-01 23:58:09 +08:00
|
|
|
|
import httpx
|
|
|
|
|
|
|
|
|
|
|
|
# httpx 请求默认超时时间(秒)。如果加载模型或对话较慢,出现超时错误,可以适当加大该值。
|
|
|
|
|
|
HTTPX_DEFAULT_TIMEOUT = 300.0
|
2023-08-17 13:24:53 +08:00
|
|
|
|
|
|
|
|
|
|
# API 是否开启跨域,默认为False,如果需要开启,请设置为True
|
|
|
|
|
|
# is open cross domain
|
|
|
|
|
|
OPEN_CROSS_DOMAIN = False
|
|
|
|
|
|
|
2023-09-15 17:52:22 +08:00
|
|
|
|
# 各服务器默认绑定host。如改为"0.0.0.0"需要修改下方所有XX_SERVER的host
|
2023-08-17 13:24:53 +08:00
|
|
|
|
DEFAULT_BIND_HOST = "127.0.0.1"
|
|
|
|
|
|
|
|
|
|
|
|
# webui.py server
|
|
|
|
|
|
WEBUI_SERVER = {
|
|
|
|
|
|
"host": DEFAULT_BIND_HOST,
|
|
|
|
|
|
"port": 8501,
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
# api.py server
|
|
|
|
|
|
API_SERVER = {
|
|
|
|
|
|
"host": DEFAULT_BIND_HOST,
|
|
|
|
|
|
"port": 7861,
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
# fastchat openai_api server
|
|
|
|
|
|
FSCHAT_OPENAI_API = {
|
|
|
|
|
|
"host": DEFAULT_BIND_HOST,
|
2023-09-15 17:52:22 +08:00
|
|
|
|
"port": 20000,
|
2023-08-17 13:24:53 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
# fastchat model_worker server
|
2023-09-15 17:52:22 +08:00
|
|
|
|
# 这些模型必须是在model_config.MODEL_PATH或ONLINE_MODEL中正确配置的。
|
2023-08-17 13:24:53 +08:00
|
|
|
|
# 在启动startup.py时,可用通过`--model-worker --model-name xxxx`指定模型,不指定则为LLM_MODEL
|
|
|
|
|
|
FSCHAT_MODEL_WORKERS = {
|
2023-09-15 17:52:22 +08:00
|
|
|
|
# 所有模型共用的默认配置,可在模型专项配置中进行覆盖。
|
2023-09-01 23:58:09 +08:00
|
|
|
|
"default": {
|
2023-08-17 13:24:53 +08:00
|
|
|
|
"host": DEFAULT_BIND_HOST,
|
|
|
|
|
|
"port": 20002,
|
|
|
|
|
|
"device": LLM_DEVICE,
|
2023-09-01 23:58:09 +08:00
|
|
|
|
|
|
|
|
|
|
# 多卡加载需要配置的参数
|
2023-08-29 10:06:09 +08:00
|
|
|
|
# "gpus": None, # 使用的GPU,以str的格式指定,如"0,1"
|
|
|
|
|
|
# "num_gpus": 1, # 使用GPU的数量
|
2023-08-25 11:27:39 +08:00
|
|
|
|
# "max_gpu_memory": "20GiB", # 每个GPU占用的最大显存
|
2023-09-01 23:58:09 +08:00
|
|
|
|
|
|
|
|
|
|
# 以下为非常用参数,可根据需要配置
|
2023-08-25 11:27:39 +08:00
|
|
|
|
# "load_8bit": False, # 开启8bit量化
|
2023-08-17 13:24:53 +08:00
|
|
|
|
# "cpu_offloading": None,
|
|
|
|
|
|
# "gptq_ckpt": None,
|
|
|
|
|
|
# "gptq_wbits": 16,
|
|
|
|
|
|
# "gptq_groupsize": -1,
|
|
|
|
|
|
# "gptq_act_order": False,
|
|
|
|
|
|
# "awq_ckpt": None,
|
|
|
|
|
|
# "awq_wbits": 16,
|
|
|
|
|
|
# "awq_groupsize": -1,
|
|
|
|
|
|
# "model_names": [LLM_MODEL],
|
|
|
|
|
|
# "conv_template": None,
|
|
|
|
|
|
# "limit_worker_concurrency": 5,
|
|
|
|
|
|
# "stream_interval": 2,
|
|
|
|
|
|
# "no_register": False,
|
2023-09-14 23:37:34 +08:00
|
|
|
|
# "embed_in_truncate": False,
|
2023-08-17 13:24:53 +08:00
|
|
|
|
},
|
2023-09-01 23:58:09 +08:00
|
|
|
|
"baichuan-7b": { # 使用default中的IP和端口
|
|
|
|
|
|
"device": "cpu",
|
|
|
|
|
|
},
|
2023-09-15 17:52:22 +08:00
|
|
|
|
"zhipu-api": { # 请为每个要运行的在线API设置不同的端口
|
|
|
|
|
|
"port": 21001,
|
2023-09-01 23:58:09 +08:00
|
|
|
|
},
|
2023-09-15 17:52:22 +08:00
|
|
|
|
"minimax-api": {
|
|
|
|
|
|
"port": 21002,
|
2023-09-12 15:24:47 +08:00
|
|
|
|
},
|
2023-09-15 17:52:22 +08:00
|
|
|
|
"xinghuo-api": {
|
|
|
|
|
|
"port": 21003,
|
2023-09-13 13:51:05 +08:00
|
|
|
|
},
|
2023-09-14 23:37:34 +08:00
|
|
|
|
"qianfan-api": {
|
2023-09-15 17:52:22 +08:00
|
|
|
|
"port": 21004,
|
2023-09-14 23:37:34 +08:00
|
|
|
|
},
|
2023-08-17 13:24:53 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
# fastchat multi model worker server
|
|
|
|
|
|
FSCHAT_MULTI_MODEL_WORKERS = {
|
2023-09-01 23:58:09 +08:00
|
|
|
|
# TODO:
|
2023-08-17 13:24:53 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
# fastchat controller server
|
|
|
|
|
|
FSCHAT_CONTROLLER = {
|
|
|
|
|
|
"host": DEFAULT_BIND_HOST,
|
|
|
|
|
|
"port": 20001,
|
|
|
|
|
|
"dispatch_method": "shortest_queue",
|
|
|
|
|
|
}
|