Update model_config.py
This commit is contained in:
parent
090f164950
commit
811d5a2e46
|
|
@ -6,11 +6,10 @@ embedding_model_dict = {
|
|||
"ernie-tiny": "nghuyong/ernie-3.0-nano-zh",
|
||||
"ernie-base": "nghuyong/ernie-3.0-base-zh",
|
||||
"text2vec": "GanymedeNil/text2vec-large-chinese",
|
||||
"local-text2vec": "./embedding/text2vec-large-chinese"
|
||||
}
|
||||
|
||||
# Embedding model name
|
||||
EMBEDDING_MODEL = "local-text2vec"
|
||||
EMBEDDING_MODEL = "text2vec"
|
||||
|
||||
# Embedding running device
|
||||
EMBEDDING_DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
|
||||
|
|
@ -20,11 +19,10 @@ llm_model_dict = {
|
|||
"chatglm-6b-int4-qe": "THUDM/chatglm-6b-int4-qe",
|
||||
"chatglm-6b-int4": "THUDM/chatglm-6b-int4",
|
||||
"chatglm-6b": "THUDM/chatglm-6b",
|
||||
"local-chatglm-6b": "./llm/chatglm-6b"
|
||||
}
|
||||
|
||||
# LLM model name
|
||||
LLM_MODEL = "local-chatglm-6b"
|
||||
LLM_MODEL = "chatglm-6b"
|
||||
|
||||
# Use p-tuning-v2 PrefixEncoder
|
||||
USE_PTUNING_V2 = False
|
||||
|
|
|
|||
Loading…
Reference in New Issue