From eabef0415bb3c0882bb19469761363f83933bca0 Mon Sep 17 00:00:00 2001 From: imClumsyPanda Date: Thu, 13 Apr 2023 23:20:45 +0800 Subject: [PATCH] update cli_demo.py --- cli_demo.py | 1 - configs/model_config.py | 6 ++---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/cli_demo.py b/cli_demo.py index 0678e4e..093164d 100644 --- a/cli_demo.py +++ b/cli_demo.py @@ -1,5 +1,4 @@ from configs.model_config import * -import datetime from chains.local_doc_qa import LocalDocQA # return top-k text chunk from vector store diff --git a/configs/model_config.py b/configs/model_config.py index 640c2f5..fd309e1 100644 --- a/configs/model_config.py +++ b/configs/model_config.py @@ -6,11 +6,10 @@ embedding_model_dict = { "ernie-tiny": "nghuyong/ernie-3.0-nano-zh", "ernie-base": "nghuyong/ernie-3.0-base-zh", "text2vec": "GanymedeNil/text2vec-large-chinese", - "local": "/Users/liuqian/Downloads/ChatGLM-6B/text2vec-large-chinese" } # Embedding model name -EMBEDDING_MODEL = "local"#"text2vec" +EMBEDDING_MODEL = "text2vec" # Embedding running device EMBEDDING_DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" @@ -20,11 +19,10 @@ llm_model_dict = { "chatglm-6b-int4-qe": "THUDM/chatglm-6b-int4-qe", "chatglm-6b-int4": "THUDM/chatglm-6b-int4", "chatglm-6b": "THUDM/chatglm-6b", - "local": "/Users/liuqian/Downloads/ChatGLM-6B/chatglm-6b" } # LLM model name -LLM_MODEL = "local"#"chatglm-6b" +LLM_MODEL = "chatglm-6b" # LLM running device LLM_DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"