diff --git a/README.md b/README.md index 8d45b0b..497f274 100644 --- a/README.md +++ b/README.md @@ -220,6 +220,7 @@ Web UI 可以实现如下功能: - [ ] Agent 实现 - [x] 增加更多 LLM 模型支持 - [x] [THUDM/chatglm2-6b](https://huggingface.co/THUDM/chatglm2-6b) + - [x] [THUDM/chatglm2-6b-32k](https://huggingface.co/THUDM/chatglm2-6b-32k) - [x] [THUDM/chatglm-6b](https://huggingface.co/THUDM/chatglm-6b) - [x] [THUDM/chatglm-6b-int8](https://huggingface.co/THUDM/chatglm-6b-int8) - [x] [THUDM/chatglm-6b-int4](https://huggingface.co/THUDM/chatglm-6b-int4) @@ -235,6 +236,9 @@ Web UI 可以实现如下功能: - [x] [nghuyong/ernie-3.0-nano-zh](https://huggingface.co/nghuyong/ernie-3.0-nano-zh) - [x] [nghuyong/ernie-3.0-base-zh](https://huggingface.co/nghuyong/ernie-3.0-base-zh) - [x] [shibing624/text2vec-base-chinese](https://huggingface.co/shibing624/text2vec-base-chinese) + - [x] [shibing624/text2vec-base-multilingual](https://huggingface.co/shibing624/text2vec-base-multilingual) + - [x] [shibing624/text2vec-base-chinese-sentence](https://huggingface.co/shibing624/text2vec-base-chinese-sentence) + - [x] [shibing624/text2vec-base-chinese-paraphrase](https://huggingface.co/shibing624/text2vec-base-chinese-paraphrase) - [x] [GanymedeNil/text2vec-large-chinese](https://huggingface.co/GanymedeNil/text2vec-large-chinese) - [x] [moka-ai/m3e-small](https://huggingface.co/moka-ai/m3e-small) - [x] [moka-ai/m3e-base](https://huggingface.co/moka-ai/m3e-base) diff --git a/configs/model_config.py b/configs/model_config.py index cba72c6..f721c89 100644 --- a/configs/model_config.py +++ b/configs/model_config.py @@ -17,6 +17,9 @@ embedding_model_dict = { "ernie-base": "nghuyong/ernie-3.0-base-zh", "text2vec-base": "shibing624/text2vec-base-chinese", "text2vec": "GanymedeNil/text2vec-large-chinese", + "text2vec-base-multilingual": "shibing624/text2vec-base-multilingual", + "text2vec-base-chinese-sentence": "shibing624/text2vec-base-chinese-sentence", + "text2vec-base-chinese-paraphrase": "shibing624/text2vec-base-chinese-paraphrase", "m3e-small": "moka-ai/m3e-small", "m3e-base": "moka-ai/m3e-base", } @@ -72,6 +75,12 @@ llm_model_dict = { "local_model_path": None, "provides": "ChatGLMLLMChain" }, + "chatglm2-6b-32k": { + "name": "chatglm2-6b-32k", + "pretrained_model_name": "THUDM/chatglm2-6b-32k", + "local_model_path": None, + "provides": "ChatGLMLLMChain" + }, # 注:chatglm2-cpp已在mac上测试通过,其他系统暂不支持 "chatglm2-cpp": { "name": "chatglm2-cpp", @@ -219,7 +228,7 @@ llm_model_dict = { } # LLM 名称 -LLM_MODEL = "chatglm-6b" +LLM_MODEL = "chatglm2-6b-32k" # 量化加载8bit 模型 LOAD_IN_8BIT = False # Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU. diff --git a/requirements.txt b/requirements.txt index 4c981b2..7e37cf0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,6 @@ layoutparser[layoutmodels,tesseract] nltk~=3.8.1 sentence-transformers beautifulsoup4 -icetk cpm_kernels faiss-cpu gradio==3.37.0