依赖升级 (#2246)

大量依赖升级到最新版
测试环境:Ubuntu 22.04 LTS kernel 6.6.3 Python 3.10.12,Cuda12.3 update1
This commit is contained in:
zR 2023-12-01 11:34:41 +08:00 committed by GitHub
parent 60f8e1d55e
commit 0cc1be224d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 100 additions and 92 deletions

View File

@ -15,9 +15,9 @@ EMBEDDING_DEVICE = "auto"
EMBEDDING_KEYWORD_FILE = "keywords.txt"
EMBEDDING_MODEL_OUTPUT_PATH = "output"
# 要运行的 LLM 名称,可以包括本地模型和在线模型。
# 第一个将作为 API 和 WEBUI 的默认模型
LLM_MODELS = ["chatglm3-6b", "zhipu-api", "openai-api"]
# 要运行的 LLM 名称,可以包括本地模型和在线模型。第一个将作为 API 和 WEBUI 的默认模型
# 在这里我们使用目前主流的两个离线模型其中chatglm3-6b 为默认加载模型,如果你的显存不足,可使用 Qwen-1_8B-Chat, 该模型 FP16 仅需 3.8G显存。
LLM_MODELS = ["chatglm3-6b", "Qwen-1_8B-Chat", "zhipu-api", "openai-api"]
# AgentLM模型的名称 (可以不指定指定之后就锁定进入Agent之后的Chain的模型不指定就是LLM_MODELS[0])
Agent_MODEL = None

View File

@ -93,7 +93,7 @@ FSCHAT_MODEL_WORKERS = {
},
# 可以如下示例方式更改默认配置
# "Qwen-7B-Chat": { # 使用default中的IP和端口
# "Qwen-1_8B-Chat": { # 使用default中的IP和端口
# "device": "cpu",
# },
"chatglm3-6b": { # 使用default中的IP和端口

Binary file not shown.

Before

Width:  |  Height:  |  Size: 198 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 270 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 174 KiB

View File

@ -1,64 +1,69 @@
# API requirements
langchain==0.0.343
langchain==0.0.344
langchain-experimental>=0.0.42
fschat[model_worker]>=0.2.33
pydantic==1.10.13
fschat>=0.2.33
xformers>=0.0.22.post7
openai>=1.3.5
openai>=1.3.6
sentence_transformers
transformers>=4.35.2
torch==2.1.0 ##on win, install the cuda version manually if you want use gpu
torchvision #on win, install the cuda version manually if you want use gpu
torchaudio #on win, install the cuda version manually if you want use gpu
torch==2.1.0 ##on Windows system, install the cuda version manually from https://pytorch.org/
torchvision #on Windows system, install the cuda version manually from https://pytorch.org/
torchaudio #on Windows system, install the cuda version manually from https://pytorch.org/
fastapi>=0.104
nltk>=3.8.1
uvicorn~=0.23.1
uvicorn>=0.24.0.post1
starlette~=0.27.0
pydantic<2
unstructured[all-docs]==0.11.0
python-magic-bin; sys_platform == 'win32'
SQLAlchemy==2.0.19
faiss-cpu
accelerate
spacy
accelerate>=0.24.1
spacy>=3.7.2
PyMuPDF
rapidocr_onnxruntime
requests
pathlib
pytest
numexpr
strsimpy
markdownify
tiktoken
tqdm
requests>=2.31.0
pathlib>=1.0.1
pytest>=7.4.3
numexpr>=2.8.7
strsimpy>=0.2.1
markdownify>=0.11.6
tiktoken>=0.5.1
tqdm>=4.66.1
websockets
numpy~=1.24.4
pandas~=2.0.3
einops
einops>=0.7.0
transformers_stream_generator==0.0.4
vllm==0.2.2; sys_platform == "linux"
# online api libs dependencies
# Online api libs dependencies
# zhipuai>=1.0.7
# dashscope>=1.10.0
# qianfan>=0.2.0
# volcengine>=1.0.106
# uncomment libs if you want to use corresponding vector store
# pymilvus==2.1.3 # requires milvus==2.1.3
# pymilvus>=2.3.3
# psycopg2
# pgvector
# pgvector>=0.2.4
# Agent and Search Tools
arxiv>=2.0.0
youtube-search>=2.1.2
duckduckgo-search>=4.9.3
metaphor-python>=0.1.23
# WebUI requirements
streamlit~=1.28.2 # # on win, make sure write its path in environment variable
streamlit>=1.29.0
streamlit-option-menu>=0.3.6
streamlit-antd-components>=0.2.3
streamlit-chatbox>=1.1.11
streamlit-modal==0.1.0
streamlit-modal>=0.1.0
streamlit-aggrid>=0.3.4.post3
httpx[brotli,http2,socks]~=0.24.1
watchdog
httpx[brotli,http2,socks]>=0.25.2
watchdog>=3.0.0

View File

@ -1,52 +1,57 @@
# API requirements
langchain==0.0.343
langchain==0.0.344
langchain-experimental>=0.0.42
fschat[model_worker]>=0.2.33
pydantic==1.10.13
fschat>=0.2.33
xformers>=0.0.22.post7
openai>=1.3.5
openai>=1.3.6
sentence_transformers
transformers>=4.35.2
torch==2.1.0
torchvision
torchaudio
torch==2.1.0 ##on Windows system, install the cuda version manually from https://pytorch.org/
torchvision #on Windows system, install the cuda version manually from https://pytorch.org/
torchaudio #on Windows system, install the cuda version manually from https://pytorch.org/
fastapi>=0.104
nltk>=3.8.1
uvicorn~=0.23.1
uvicorn>=0.24.0.post1
starlette~=0.27.0
pydantic<2
unstructured[all-docs]==0.11.0
python-magic-bin; sys_platform == 'win32'
SQLAlchemy==2.0.19
faiss-cpu
accelerate>=0.24.1
spacy
spacy>=3.7.2
PyMuPDF
rapidocr_onnxruntime
requests
pathlib
pytest
numexpr
strsimpy
markdownify
tiktoken
tqdm
requests>=2.31.0
pathlib>=1.0.1
pytest>=7.4.3
numexpr>=2.8.7
strsimpy>=0.2.1
markdownify>=0.11.6
tiktoken>=0.5.1
tqdm>=4.66.1
websockets
numpy~=1.24.4
pandas~=2.0.3
einops
transformers_stream_generator>=0.0.4
einops>=0.7.0
transformers_stream_generator==0.0.4
vllm==0.2.2; sys_platform == "linux"
vllm>=0.2.0; sys_platform == "linux"
# online api libs
zhipuai
dashscope>=1.10.0 # qwen
qianfan
# volcengine>=1.0.106 # fangzhou
# Online api libs dependencies
# uncomment libs if you want to use corresponding vector store
# pymilvus==2.1.3 # requires milvus==2.1.3
# zhipuai>=1.0.7
# dashscope>=1.10.0
# qianfan>=0.2.0
# volcengine>=1.0.106
# pymilvus>=2.3.3
# psycopg2
# pgvector
# pgvector>=0.2.4
# Agent and Search Tools
arxiv>=2.0.0
youtube-search>=2.1.2
duckduckgo-search>=4.9.3
metaphor-python>=0.1.23

View File

@ -1,23 +1,21 @@
langchain==0.0.343
langchain==0.0.344
pydantic==1.10.13
fschat>=0.2.33
openai>=1.3.5
# sentence_transformers
# transformers>=4.35.2
# torch>=2.0.1
# torchvision
# torchaudio
openai>=1.3.6
fastapi>=0.104.1
python-multipart
nltk~=3.8.1
uvicorn~=0.23.1
uvicorn>=0.24.0.post1
starlette~=0.27.0
pydantic~=1.10.11
unstructured[docx,csv]==0.11.0 # add pdf if need
python-magic-bin; sys_platform == 'win32'
SQLAlchemy==2.0.19
numexpr>=2.8.7
strsimpy>=0.2.1
faiss-cpu
# accelerate
# spacy
# accelerate>=0.24.1
# spacy>=3.7.2
# PyMuPDF==1.22.5 # install if need pdf
# rapidocr_onnxruntime>=1.3.2 # install if need pdf
@ -26,18 +24,18 @@ pathlib
pytest
# scikit-learn
# numexpr
# vllm==0.1.7; sys_platform == "linux"
# vllm==0.2.2; sys_platform == "linux"
# online api libs
zhipuai
dashscope>=1.10.0 # qwen
# qianfan
zhipuai>=1.0.7 # zhipu
# dashscope>=1.10.0 # qwen
# volcengine>=1.0.106 # fangzhou
# uncomment libs if you want to use corresponding vector store
# pymilvus==2.1.3 # requires milvus==2.1.3
# psycopg2
# pgvector
# pgvector>=0.2.4
numpy~=1.24.4
pandas~=2.0.3
@ -50,13 +48,15 @@ httpx~=0.24.1
watchdog
tqdm
websockets
einops>=0.7.0
# tiktoken
einops
# scipy
# scipy>=1.11.4
# transformers_stream_generator==0.0.4
# search engine libs
duckduckgo-search
metaphor-python
strsimpy
markdownify
# Agent and Search Tools
arxiv>=2.0.0
youtube-search>=2.1.2
duckduckgo-search>=4.9.3
metaphor-python>=0.1.23

View File

@ -1,11 +1,10 @@
# WebUI requirements
streamlit~=1.28.2
streamlit>=1.29.0
streamlit-option-menu>=0.3.6
streamlit-antd-components>=0.2.3
streamlit-chatbox>=1.1.11
streamlit-modal==0.1.0
streamlit-modal>=0.1.0
streamlit-aggrid>=0.3.4.post3
httpx[brotli,http2,socks]~=0.24.1
watchdog
httpx[brotli,http2,socks]>=0.25.2
watchdog>=3.0.0

View File

@ -170,7 +170,6 @@ class LLMKnowledgeChain(LLMChain):
queries = [(line.split(",")[0].strip(), line.split(",")[1].strip()) for line in lines]
except:
queries = [(line.split("")[0].strip(), line.split("")[1].strip()) for line in lines]
print(queries)
run_manager.on_text("知识库查询询内容:\n\n" + str(queries) + " \n\n", color="blue", verbose=self.verbose)
output = self._evaluate_expression(queries)
run_manager.on_text("\nAnswer: ", verbose=self.verbose)