add chatglm2-6b-32k and make m3e default embedding model

This commit is contained in:
imClumsyPanda 2023-08-01 14:12:28 +08:00
parent 9e2b411b01
commit 9f4567865c
1 changed files with 7 additions and 1 deletions

View File

@ -26,7 +26,7 @@ embedding_model_dict = {
} }
# 选用的 Embedding 名称 # 选用的 Embedding 名称
EMBEDDING_MODEL = "text2vec" EMBEDDING_MODEL = "m3e-base"
# Embedding 模型运行设备 # Embedding 模型运行设备
EMBEDDING_DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" EMBEDDING_DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
@ -51,6 +51,12 @@ llm_model_dict = {
"api_key": "EMPTY" "api_key": "EMPTY"
}, },
"chatglm2-6b-32k": {
"local_model_path": "THUDM/chatglm2-6b-32k", # "THUDM/chatglm2-6b-32k",
"api_base_url": "http://localhost:8888/v1", # "name"修改为fastchat服务中的"api_base_url"
"api_key": "EMPTY"
},
"vicuna-13b-hf": { "vicuna-13b-hf": {
"local_model_path": "", "local_model_path": "",
"api_base_url": "http://localhost:8000/v1", # "name"修改为fastchat服务中的"api_base_url" "api_base_url": "http://localhost:8000/v1", # "name"修改为fastchat服务中的"api_base_url"