enhance
This commit is contained in:
parent
c5d1ff6621
commit
afa07ad208
|
|
@ -0,0 +1,344 @@
|
||||||
|
import os
|
||||||
|
|
||||||
|
# 可以指定一个绝对路径,统一存放所有的Embedding和LLM模型。
|
||||||
|
# 每个模型可以是一个单独的目录,也可以是某个目录下的二级子目录。
|
||||||
|
# 如果模型目录名称和 MODEL_PATH 中的 key 或 value 相同,程序会自动检测加载,无需修改 MODEL_PATH 中的路径。
|
||||||
|
MODEL_ROOT_PATH = ""
|
||||||
|
|
||||||
|
# 选用的 Embedding 名称
|
||||||
|
EMBEDDING_MODEL = "bge-large-zh"
|
||||||
|
|
||||||
|
# Embedding 模型运行设备。设为"auto"会自动检测,也可手动设定为"cuda","mps","cpu"其中之一。
|
||||||
|
EMBEDDING_DEVICE = "auto"
|
||||||
|
|
||||||
|
# 选用的reranker模型
|
||||||
|
RERANKER_MODEL = "bge-reranker-large"
|
||||||
|
# 是否启用reranker模型
|
||||||
|
USE_RERANKER = False
|
||||||
|
RERANKER_MAX_LENGTH = 1024
|
||||||
|
|
||||||
|
# 是否启用精排
|
||||||
|
USE_RANKING = False
|
||||||
|
|
||||||
|
# 如果需要在 EMBEDDING_MODEL 中增加自定义的关键字时配置
|
||||||
|
EMBEDDING_KEYWORD_FILE = "keywords.txt"
|
||||||
|
EMBEDDING_MODEL_OUTPUT_PATH = "output"
|
||||||
|
|
||||||
|
# 要运行的 LLM 名称,可以包括本地模型和在线模型。列表中本地模型将在启动项目时全部加载。
|
||||||
|
# 列表中第一个模型将作为 API 和 WEBUI 的默认模型。
|
||||||
|
# 在这里,我们使用目前主流的两个离线模型,其中,chatglm3-6b 为默认加载模型。
|
||||||
|
# 如果你的显存不足,可使用 Qwen-1_8B-Chat, 该模型 FP16 仅需 3.8G显存。
|
||||||
|
|
||||||
|
# chatglm3-6b输出角色标签<|user|>及自问自答的问题详见项目wiki->常见问题->Q20.
|
||||||
|
|
||||||
|
LLM_MODELS = ["chatglm3-6b", "zhipu-api", "openai-api"] # "Qwen-1_8B-Chat",
|
||||||
|
|
||||||
|
# AgentLM模型的名称 (可以不指定,指定之后就锁定进入Agent之后的Chain的模型,不指定就是LLM_MODELS[0])
|
||||||
|
Agent_MODEL = None
|
||||||
|
|
||||||
|
# LLM 运行设备。设为"auto"会自动检测,也可手动设定为"cuda","mps","cpu"其中之一。
|
||||||
|
LLM_DEVICE = "auto"
|
||||||
|
|
||||||
|
# 历史对话轮数
|
||||||
|
HISTORY_LEN = 3
|
||||||
|
|
||||||
|
# 大模型最长支持的长度,如果不填写,则使用模型默认的最大长度,如果填写,则为用户设定的最大长度
|
||||||
|
MAX_TOKENS = None
|
||||||
|
|
||||||
|
# LLM通用对话参数
|
||||||
|
TEMPERATURE = 0.7
|
||||||
|
# TOP_P = 0.95 # ChatOpenAI暂不支持该参数
|
||||||
|
|
||||||
|
ONLINE_LLM_MODEL = {
|
||||||
|
# 线上模型。请在server_config中为每个在线API设置不同的端口
|
||||||
|
|
||||||
|
"openai-api": {
|
||||||
|
"model_name": "gpt-3.5-turbo",
|
||||||
|
"api_base_url": "https://api.openai.com/v1",
|
||||||
|
"api_key": "",
|
||||||
|
"openai_proxy": "",
|
||||||
|
},
|
||||||
|
|
||||||
|
# 具体注册及api key获取请前往 http://open.bigmodel.cn
|
||||||
|
"zhipu-api": {
|
||||||
|
"api_key": "",
|
||||||
|
"version": "chatglm_turbo", # 可选包括 "chatglm_turbo"
|
||||||
|
"provider": "ChatGLMWorker",
|
||||||
|
},
|
||||||
|
|
||||||
|
|
||||||
|
# 具体注册及api key获取请前往 https://api.minimax.chat/
|
||||||
|
"minimax-api": {
|
||||||
|
"group_id": "",
|
||||||
|
"api_key": "",
|
||||||
|
"is_pro": False,
|
||||||
|
"provider": "MiniMaxWorker",
|
||||||
|
},
|
||||||
|
|
||||||
|
|
||||||
|
# 具体注册及api key获取请前往 https://xinghuo.xfyun.cn/
|
||||||
|
"xinghuo-api": {
|
||||||
|
"APPID": "",
|
||||||
|
"APISecret": "",
|
||||||
|
"api_key": "",
|
||||||
|
"version": "v1.5", # 你使用的讯飞星火大模型版本,可选包括 "v3.0", "v1.5", "v2.0"
|
||||||
|
"provider": "XingHuoWorker",
|
||||||
|
},
|
||||||
|
|
||||||
|
# 百度千帆 API,申请方式请参考 https://cloud.baidu.com/doc/WENXINWORKSHOP/s/4lilb2lpf
|
||||||
|
"qianfan-api": {
|
||||||
|
"version": "ERNIE-Bot", # 注意大小写。当前支持 "ERNIE-Bot" 或 "ERNIE-Bot-turbo", 更多的见官方文档。
|
||||||
|
"version_url": "", # 也可以不填写version,直接填写在千帆申请模型发布的API地址
|
||||||
|
"api_key": "",
|
||||||
|
"secret_key": "",
|
||||||
|
"provider": "QianFanWorker",
|
||||||
|
},
|
||||||
|
|
||||||
|
# 火山方舟 API,文档参考 https://www.volcengine.com/docs/82379
|
||||||
|
"fangzhou-api": {
|
||||||
|
"version": "chatglm-6b-model", # 当前支持 "chatglm-6b-model", 更多的见文档模型支持列表中方舟部分。
|
||||||
|
"version_url": "", # 可以不填写version,直接填写在方舟申请模型发布的API地址
|
||||||
|
"api_key": "",
|
||||||
|
"secret_key": "",
|
||||||
|
"provider": "FangZhouWorker",
|
||||||
|
},
|
||||||
|
|
||||||
|
# 阿里云通义千问 API,文档参考 https://help.aliyun.com/zh/dashscope/developer-reference/api-details
|
||||||
|
"qwen-api": {
|
||||||
|
"version": "qwen-turbo", # 可选包括 "qwen-turbo", "qwen-plus"
|
||||||
|
"api_key": "", # 请在阿里云控制台模型服务灵积API-KEY管理页面创建
|
||||||
|
"provider": "QwenWorker",
|
||||||
|
"embed_model": "text-embedding-v1" # embedding 模型名称
|
||||||
|
},
|
||||||
|
|
||||||
|
# 百川 API,申请方式请参考 https://www.baichuan-ai.com/home#api-enter
|
||||||
|
"baichuan-api": {
|
||||||
|
"version": "Baichuan2-53B", # 当前支持 "Baichuan2-53B", 见官方文档。
|
||||||
|
"api_key": "",
|
||||||
|
"secret_key": "",
|
||||||
|
"provider": "BaiChuanWorker",
|
||||||
|
},
|
||||||
|
|
||||||
|
# Azure API
|
||||||
|
"azure-api": {
|
||||||
|
"deployment_name": "", # 部署容器的名字
|
||||||
|
"resource_name": "", # https://{resource_name}.openai.azure.com/openai/ 填写resource_name的部分,其他部分不要填写
|
||||||
|
"api_version": "", # API的版本,不是模型版本
|
||||||
|
"api_key": "",
|
||||||
|
"provider": "AzureWorker",
|
||||||
|
},
|
||||||
|
|
||||||
|
# 昆仑万维天工 API https://model-platform.tiangong.cn/
|
||||||
|
"tiangong-api": {
|
||||||
|
"version": "SkyChat-MegaVerse",
|
||||||
|
"api_key": "",
|
||||||
|
"secret_key": "",
|
||||||
|
"provider": "TianGongWorker",
|
||||||
|
},
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
# 在以下字典中修改属性值,以指定本地embedding模型存储位置。支持3种设置方法:
|
||||||
|
# 1、将对应的值修改为模型绝对路径
|
||||||
|
# 2、不修改此处的值(以 text2vec 为例):
|
||||||
|
# 2.1 如果{MODEL_ROOT_PATH}下存在如下任一子目录:
|
||||||
|
# - text2vec
|
||||||
|
# - GanymedeNil/text2vec-large-chinese
|
||||||
|
# - text2vec-large-chinese
|
||||||
|
# 2.2 如果以上本地路径不存在,则使用huggingface模型
|
||||||
|
MODEL_PATH = {
|
||||||
|
"embed_model": {
|
||||||
|
"ernie-tiny": "nghuyong/ernie-3.0-nano-zh",
|
||||||
|
"ernie-base": "nghuyong/ernie-3.0-base-zh",
|
||||||
|
"text2vec-base": "shibing624/text2vec-base-chinese",
|
||||||
|
"text2vec": "GanymedeNil/text2vec-large-chinese",
|
||||||
|
"text2vec-paraphrase": "shibing624/text2vec-base-chinese-paraphrase",
|
||||||
|
"text2vec-sentence": "shibing624/text2vec-base-chinese-sentence",
|
||||||
|
"text2vec-multilingual": "shibing624/text2vec-base-multilingual",
|
||||||
|
"text2vec-bge-large-chinese": "shibing624/text2vec-bge-large-chinese",
|
||||||
|
"m3e-small": "moka-ai/m3e-small",
|
||||||
|
"m3e-base": "moka-ai/m3e-base",
|
||||||
|
"m3e-large": "moka-ai/m3e-large",
|
||||||
|
"bge-small-zh": "BAAI/bge-small-zh",
|
||||||
|
"bge-base-zh": "BAAI/bge-base-zh",
|
||||||
|
"bge-large-zh": "BAAI/bge-large-zh",
|
||||||
|
"bge-large-zh-noinstruct": "BAAI/bge-large-zh-noinstruct",
|
||||||
|
"bge-base-zh-v1.5": "BAAI/bge-base-zh-v1.5",
|
||||||
|
"bge-large-zh-v1.5": "BAAI/bge-large-zh-v1.5",
|
||||||
|
"piccolo-base-zh": "sensenova/piccolo-base-zh",
|
||||||
|
"piccolo-large-zh": "sensenova/piccolo-large-zh",
|
||||||
|
"nlp_gte_sentence-embedding_chinese-large": "damo/nlp_gte_sentence-embedding_chinese-large",
|
||||||
|
"text-embedding-ada-002": "your OPENAI_API_KEY",
|
||||||
|
},
|
||||||
|
|
||||||
|
"llm_model": {
|
||||||
|
# 以下部分模型并未完全测试,仅根据fastchat和vllm模型的模型列表推定支持
|
||||||
|
"chatglm2-6b": "THUDM/chatglm2-6b",
|
||||||
|
"chatglm2-6b-32k": "THUDM/chatglm2-6b-32k",
|
||||||
|
|
||||||
|
"chatglm3-6b": "THUDM/chatglm3-6b",
|
||||||
|
"chatglm3-6b-32k": "THUDM/chatglm3-6b-32k",
|
||||||
|
"chatglm3-6b-base": "THUDM/chatglm3-6b-base",
|
||||||
|
|
||||||
|
"Qwen-1_8B": "Qwen/Qwen-1_8B",
|
||||||
|
"Qwen-1_8B-Chat": "Qwen/Qwen-1_8B-Chat",
|
||||||
|
"Qwen-1_8B-Chat-Int8": "Qwen/Qwen-1_8B-Chat-Int8",
|
||||||
|
"Qwen-1_8B-Chat-Int4": "Qwen/Qwen-1_8B-Chat-Int4",
|
||||||
|
|
||||||
|
"Qwen-7B": "Qwen/Qwen-7B",
|
||||||
|
"Qwen-7B-Chat": "Qwen/Qwen-7B-Chat",
|
||||||
|
|
||||||
|
"Qwen-14B": "Qwen/Qwen-14B",
|
||||||
|
"Qwen-14B-Chat": "Qwen/Qwen-14B-Chat",
|
||||||
|
|
||||||
|
"Qwen-14B-Chat-Int8": "Qwen/Qwen-14B-Chat-Int8",
|
||||||
|
# 在新版的transformers下需要手动修改模型的config.json文件,在quantization_config字典中
|
||||||
|
# 增加`disable_exllama:true` 字段才能启动qwen的量化模型
|
||||||
|
"Qwen-14B-Chat-Int4": "Qwen/Qwen-14B-Chat-Int4",
|
||||||
|
|
||||||
|
"Qwen-72B": "Qwen/Qwen-72B",
|
||||||
|
"Qwen-72B-Chat": "Qwen/Qwen-72B-Chat",
|
||||||
|
"Qwen-72B-Chat-Int8": "Qwen/Qwen-72B-Chat-Int8",
|
||||||
|
"Qwen-72B-Chat-Int4": "Qwen/Qwen-72B-Chat-Int4",
|
||||||
|
|
||||||
|
"baichuan2-13b": "baichuan-inc/Baichuan2-13B-Chat",
|
||||||
|
"baichuan2-7b": "baichuan-inc/Baichuan2-7B-Chat",
|
||||||
|
|
||||||
|
"baichuan-7b": "baichuan-inc/Baichuan-7B",
|
||||||
|
"baichuan-13b": "baichuan-inc/Baichuan-13B",
|
||||||
|
"baichuan-13b-chat": "baichuan-inc/Baichuan-13B-Chat",
|
||||||
|
|
||||||
|
"aquila-7b": "BAAI/Aquila-7B",
|
||||||
|
"aquilachat-7b": "BAAI/AquilaChat-7B",
|
||||||
|
|
||||||
|
"internlm-7b": "internlm/internlm-7b",
|
||||||
|
"internlm-chat-7b": "internlm/internlm-chat-7b",
|
||||||
|
|
||||||
|
"falcon-7b": "tiiuae/falcon-7b",
|
||||||
|
"falcon-40b": "tiiuae/falcon-40b",
|
||||||
|
"falcon-rw-7b": "tiiuae/falcon-rw-7b",
|
||||||
|
|
||||||
|
"gpt2": "gpt2",
|
||||||
|
"gpt2-xl": "gpt2-xl",
|
||||||
|
|
||||||
|
"gpt-j-6b": "EleutherAI/gpt-j-6b",
|
||||||
|
"gpt4all-j": "nomic-ai/gpt4all-j",
|
||||||
|
"gpt-neox-20b": "EleutherAI/gpt-neox-20b",
|
||||||
|
"pythia-12b": "EleutherAI/pythia-12b",
|
||||||
|
"oasst-sft-4-pythia-12b-epoch-3.5": "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
|
||||||
|
"dolly-v2-12b": "databricks/dolly-v2-12b",
|
||||||
|
"stablelm-tuned-alpha-7b": "stabilityai/stablelm-tuned-alpha-7b",
|
||||||
|
|
||||||
|
"Llama-2-13b-hf": "meta-llama/Llama-2-13b-hf",
|
||||||
|
"Llama-2-70b-hf": "meta-llama/Llama-2-70b-hf",
|
||||||
|
"open_llama_13b": "openlm-research/open_llama_13b",
|
||||||
|
"vicuna-13b-v1.3": "lmsys/vicuna-13b-v1.3",
|
||||||
|
"koala": "young-geng/koala",
|
||||||
|
|
||||||
|
"mpt-7b": "mosaicml/mpt-7b",
|
||||||
|
"mpt-7b-storywriter": "mosaicml/mpt-7b-storywriter",
|
||||||
|
"mpt-30b": "mosaicml/mpt-30b",
|
||||||
|
"opt-66b": "facebook/opt-66b",
|
||||||
|
"opt-iml-max-30b": "facebook/opt-iml-max-30b",
|
||||||
|
|
||||||
|
"agentlm-7b": "THUDM/agentlm-7b",
|
||||||
|
"agentlm-13b": "THUDM/agentlm-13b",
|
||||||
|
"agentlm-70b": "THUDM/agentlm-70b",
|
||||||
|
|
||||||
|
"Yi-34B-Chat": "01-ai/Yi-34B-Chat",
|
||||||
|
},
|
||||||
|
"reranker":{
|
||||||
|
"bge-reranker-large":"BAAI/bge-reranker-large",
|
||||||
|
"bge-reranker-base":"BAAI/bge-reranker-base",
|
||||||
|
#TODO 增加在线reranker,如cohere
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# 通常情况下不需要更改以下内容
|
||||||
|
|
||||||
|
# nltk 模型存储路径
|
||||||
|
NLTK_DATA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "nltk_data")
|
||||||
|
|
||||||
|
VLLM_MODEL_DICT = {
|
||||||
|
"aquila-7b": "BAAI/Aquila-7B",
|
||||||
|
"aquilachat-7b": "BAAI/AquilaChat-7B",
|
||||||
|
|
||||||
|
"baichuan-7b": "baichuan-inc/Baichuan-7B",
|
||||||
|
"baichuan-13b": "baichuan-inc/Baichuan-13B",
|
||||||
|
"baichuan-13b-chat": "baichuan-inc/Baichuan-13B-Chat",
|
||||||
|
|
||||||
|
"chatglm2-6b": "THUDM/chatglm2-6b",
|
||||||
|
"chatglm2-6b-32k": "THUDM/chatglm2-6b-32k",
|
||||||
|
"chatglm3-6b": "THUDM/chatglm3-6b",
|
||||||
|
"chatglm3-6b-32k": "THUDM/chatglm3-6b-32k",
|
||||||
|
|
||||||
|
"BlueLM-7B-Chat": "vivo-ai/BlueLM-7B-Chat",
|
||||||
|
"BlueLM-7B-Chat-32k": "vivo-ai/BlueLM-7B-Chat-32k",
|
||||||
|
|
||||||
|
# 注意:bloom系列的tokenizer与model是分离的,因此虽然vllm支持,但与fschat框架不兼容
|
||||||
|
# "bloom": "bigscience/bloom",
|
||||||
|
# "bloomz": "bigscience/bloomz",
|
||||||
|
# "bloomz-560m": "bigscience/bloomz-560m",
|
||||||
|
# "bloomz-7b1": "bigscience/bloomz-7b1",
|
||||||
|
# "bloomz-1b7": "bigscience/bloomz-1b7",
|
||||||
|
|
||||||
|
"internlm-7b": "internlm/internlm-7b",
|
||||||
|
"internlm-chat-7b": "internlm/internlm-chat-7b",
|
||||||
|
"falcon-7b": "tiiuae/falcon-7b",
|
||||||
|
"falcon-40b": "tiiuae/falcon-40b",
|
||||||
|
"falcon-rw-7b": "tiiuae/falcon-rw-7b",
|
||||||
|
"gpt2": "gpt2",
|
||||||
|
"gpt2-xl": "gpt2-xl",
|
||||||
|
"gpt-j-6b": "EleutherAI/gpt-j-6b",
|
||||||
|
"gpt4all-j": "nomic-ai/gpt4all-j",
|
||||||
|
"gpt-neox-20b": "EleutherAI/gpt-neox-20b",
|
||||||
|
"pythia-12b": "EleutherAI/pythia-12b",
|
||||||
|
"oasst-sft-4-pythia-12b-epoch-3.5": "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
|
||||||
|
"dolly-v2-12b": "databricks/dolly-v2-12b",
|
||||||
|
"stablelm-tuned-alpha-7b": "stabilityai/stablelm-tuned-alpha-7b",
|
||||||
|
"Llama-2-13b-hf": "meta-llama/Llama-2-13b-hf",
|
||||||
|
"Llama-2-70b-hf": "meta-llama/Llama-2-70b-hf",
|
||||||
|
"open_llama_13b": "openlm-research/open_llama_13b",
|
||||||
|
"vicuna-13b-v1.3": "lmsys/vicuna-13b-v1.3",
|
||||||
|
"koala": "young-geng/koala",
|
||||||
|
"mpt-7b": "mosaicml/mpt-7b",
|
||||||
|
"mpt-7b-storywriter": "mosaicml/mpt-7b-storywriter",
|
||||||
|
"mpt-30b": "mosaicml/mpt-30b",
|
||||||
|
"opt-66b": "facebook/opt-66b",
|
||||||
|
"opt-iml-max-30b": "facebook/opt-iml-max-30b",
|
||||||
|
|
||||||
|
"Qwen-1_8B": "Qwen/Qwen-1_8B",
|
||||||
|
"Qwen-1_8B-Chat": "Qwen/Qwen-1_8B-Chat",
|
||||||
|
"Qwen-1_8B-Chat-Int8": "Qwen/Qwen-1_8B-Chat-Int8",
|
||||||
|
"Qwen-1_8B-Chat-Int4": "Qwen/Qwen-1_8B-Chat-Int4",
|
||||||
|
|
||||||
|
"Qwen-7B": "Qwen/Qwen-7B",
|
||||||
|
"Qwen-7B-Chat": "Qwen/Qwen-7B-Chat",
|
||||||
|
|
||||||
|
"Qwen-14B": "Qwen/Qwen-14B",
|
||||||
|
"Qwen-14B-Chat": "Qwen/Qwen-14B-Chat",
|
||||||
|
"Qwen-14B-Chat-Int8": "Qwen/Qwen-14B-Chat-Int8",
|
||||||
|
"Qwen-14B-Chat-Int4": "Qwen/Qwen-14B-Chat-Int4",
|
||||||
|
|
||||||
|
"Qwen-72B": "Qwen/Qwen-72B",
|
||||||
|
"Qwen-72B-Chat": "Qwen/Qwen-72B-Chat",
|
||||||
|
"Qwen-72B-Chat-Int8": "Qwen/Qwen-72B-Chat-Int8",
|
||||||
|
"Qwen-72B-Chat-Int4": "Qwen/Qwen-72B-Chat-Int4",
|
||||||
|
|
||||||
|
"agentlm-7b": "THUDM/agentlm-7b",
|
||||||
|
"agentlm-13b": "THUDM/agentlm-13b",
|
||||||
|
"agentlm-70b": "THUDM/agentlm-70b",
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
# 你认为支持Agent能力的模型,可以在这里添加,添加后不会出现可视化界面的警告
|
||||||
|
# 经过我们测试,原生支持Agent的模型仅有以下几个
|
||||||
|
SUPPORT_AGENT_MODEL = [
|
||||||
|
"azure-api",
|
||||||
|
"openai-api",
|
||||||
|
"qwen-api",
|
||||||
|
"Qwen",
|
||||||
|
"chatglm3",
|
||||||
|
"xinghuo-api",
|
||||||
|
]
|
||||||
|
|
@ -0,0 +1,126 @@
|
||||||
|
# prompt模板使用Jinja2语法,简单点就是用双大括号代替f-string的单大括号
|
||||||
|
# 本配置文件支持热加载,修改prompt模板后无需重启服务。
|
||||||
|
|
||||||
|
# LLM对话支持的变量:
|
||||||
|
# - input: 用户输入内容
|
||||||
|
|
||||||
|
# 知识库和搜索引擎对话支持的变量:
|
||||||
|
# - context: 从检索结果拼接的知识文本
|
||||||
|
# - question: 用户提出的问题
|
||||||
|
|
||||||
|
# Agent对话支持的变量:
|
||||||
|
|
||||||
|
# - tools: 可用的工具列表
|
||||||
|
# - tool_names: 可用的工具名称列表
|
||||||
|
# - history: 用户和Agent的对话历史
|
||||||
|
# - input: 用户输入内容
|
||||||
|
# - agent_scratchpad: Agent的思维记录
|
||||||
|
|
||||||
|
PROMPT_TEMPLATES = {
|
||||||
|
"llm_chat": {
|
||||||
|
"default":
|
||||||
|
'{{ input }}',
|
||||||
|
|
||||||
|
"with_history":
|
||||||
|
'The following is a friendly conversation between a human and an AI. '
|
||||||
|
'The AI is talkative and provides lots of specific details from its context. '
|
||||||
|
'If the AI does not know the answer to a question, it truthfully says it does not know.\n\n'
|
||||||
|
'Current conversation:\n'
|
||||||
|
'{history}\n'
|
||||||
|
'Human: {input}\n'
|
||||||
|
'AI:',
|
||||||
|
|
||||||
|
"py":
|
||||||
|
'你是一个聪明的代码助手,请你给我写出简单的py代码。 \n'
|
||||||
|
'{{ input }}',
|
||||||
|
},
|
||||||
|
|
||||||
|
"knowledge_base_chat": {
|
||||||
|
"default":
|
||||||
|
'<指令>你是一个电力公司相关的专家,请完全依据已知信息的内容,先找出与问题相关的信息,然后再根据这些相关信息回答简洁、专业地来回答问题。如果无法从中得到答案,请说 “根据已知信息无法回答该问题”,'
|
||||||
|
'不允许在答案中添加编造成分,不回答与问题无关的内容,答案请使用中文。 </指令>\n'
|
||||||
|
'<已知信息>{{ context }}</已知信息>\n'
|
||||||
|
'<问题>{{ question }}</问题>\n',
|
||||||
|
|
||||||
|
"text":
|
||||||
|
'<指令>根据已知信息,简洁和专业的来回答问题。如果无法从中得到答案,请说 “根据已知信息无法回答该问题”,答案请使用中文。 </指令>\n'
|
||||||
|
'<已知信息>{{ context }}</已知信息>\n'
|
||||||
|
'<问题>{{ question }}</问题>\n',
|
||||||
|
|
||||||
|
"empty": # 搜不到知识库的时候使用
|
||||||
|
'请你回答我的问题:\n'
|
||||||
|
'{{ question }}\n\n',
|
||||||
|
},
|
||||||
|
|
||||||
|
|
||||||
|
"search_engine_chat": {
|
||||||
|
"default":
|
||||||
|
'<指令>这是我搜索到的互联网信息,请你根据这些信息进行提取并有调理,简洁的回答问题。'
|
||||||
|
'如果无法从中得到答案,请说 “无法搜索到能回答问题的内容”。 </指令>\n'
|
||||||
|
'<已知信息>{{ context }}</已知信息>\n'
|
||||||
|
'<问题>{{ question }}</问题>\n',
|
||||||
|
|
||||||
|
"search":
|
||||||
|
'<指令>根据已知信息,简洁和专业的来回答问题。如果无法从中得到答案,请说 “根据已知信息无法回答该问题”,答案请使用中文。 </指令>\n'
|
||||||
|
'<已知信息>{{ context }}</已知信息>\n'
|
||||||
|
'<问题>{{ question }}</问题>\n',
|
||||||
|
},
|
||||||
|
|
||||||
|
|
||||||
|
"agent_chat": {
|
||||||
|
"default":
|
||||||
|
'Answer the following questions as best you can. If it is in order, you can use some tools appropriately. '
|
||||||
|
'You have access to the following tools:\n\n'
|
||||||
|
'{tools}\n\n'
|
||||||
|
'Use the following format:\n'
|
||||||
|
'Question: the input question you must answer1\n'
|
||||||
|
'Thought: you should always think about what to do and what tools to use.\n'
|
||||||
|
'Action: the action to take, should be one of [{tool_names}]\n'
|
||||||
|
'Action Input: the input to the action\n'
|
||||||
|
'Observation: the result of the action\n'
|
||||||
|
'... (this Thought/Action/Action Input/Observation can be repeated zero or more times)\n'
|
||||||
|
'Thought: I now know the final answer\n'
|
||||||
|
'Final Answer: the final answer to the original input question\n'
|
||||||
|
'Begin!\n\n'
|
||||||
|
'history: {history}\n\n'
|
||||||
|
'Question: {input}\n\n'
|
||||||
|
'Thought: {agent_scratchpad}\n',
|
||||||
|
|
||||||
|
"ChatGLM3":
|
||||||
|
'You can answer using the tools, or answer directly using your knowledge without using the tools. '
|
||||||
|
'Respond to the human as helpfully and accurately as possible.\n'
|
||||||
|
'You have access to the following tools:\n'
|
||||||
|
'{tools}\n'
|
||||||
|
'Use a json blob to specify a tool by providing an action key (tool name) '
|
||||||
|
'and an action_input key (tool input).\n'
|
||||||
|
'Valid "action" values: "Final Answer" or [{tool_names}]'
|
||||||
|
'Provide only ONE action per $JSON_BLOB, as shown:\n\n'
|
||||||
|
'```\n'
|
||||||
|
'{{{{\n'
|
||||||
|
' "action": $TOOL_NAME,\n'
|
||||||
|
' "action_input": $INPUT\n'
|
||||||
|
'}}}}\n'
|
||||||
|
'```\n\n'
|
||||||
|
'Follow this format:\n\n'
|
||||||
|
'Question: input question to answer\n'
|
||||||
|
'Thought: consider previous and subsequent steps\n'
|
||||||
|
'Action:\n'
|
||||||
|
'```\n'
|
||||||
|
'$JSON_BLOB\n'
|
||||||
|
'```\n'
|
||||||
|
'Observation: action result\n'
|
||||||
|
'... (repeat Thought/Action/Observation N times)\n'
|
||||||
|
'Thought: I know what to respond\n'
|
||||||
|
'Action:\n'
|
||||||
|
'```\n'
|
||||||
|
'{{{{\n'
|
||||||
|
' "action": "Final Answer",\n'
|
||||||
|
' "action_input": "Final response to human"\n'
|
||||||
|
'}}}}\n'
|
||||||
|
'Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. '
|
||||||
|
'Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.\n'
|
||||||
|
'history: {history}\n\n'
|
||||||
|
'Question: {input}\n\n'
|
||||||
|
'Thought: {agent_scratchpad}',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -7,6 +7,8 @@ from docx.oxml.text.paragraph import CT_P
|
||||||
from docx.oxml.table import CT_Tbl
|
from docx.oxml.table import CT_Tbl
|
||||||
from docx.table import _Cell, Table
|
from docx.table import _Cell, Table
|
||||||
from docx.text.paragraph import Paragraph
|
from docx.text.paragraph import Paragraph
|
||||||
|
#from langchain.document_loaders.unstructured import UnstructuredFileLoader
|
||||||
|
#from langchain.document_loaders.word_document import Docx2txtLoader
|
||||||
|
|
||||||
class RapidWordLoader(UnstructuredFileLoader):
|
class RapidWordLoader(UnstructuredFileLoader):
|
||||||
def _get_elements(self) -> List:
|
def _get_elements(self) -> List:
|
||||||
|
|
@ -57,6 +59,8 @@ class RapidWordLoader(UnstructuredFileLoader):
|
||||||
doc = docxDocument(filepath)
|
doc = docxDocument(filepath)
|
||||||
for block in iter_block_items(doc):
|
for block in iter_block_items(doc):
|
||||||
if isinstance(block,Paragraph):
|
if isinstance(block,Paragraph):
|
||||||
|
|
||||||
|
#print(f"Paragraph:{block.text}")
|
||||||
resp += (block.text + "\n\n")
|
resp += (block.text + "\n\n")
|
||||||
elif isinstance(block, Table):
|
elif isinstance(block, Table):
|
||||||
resp += read_table(block) + "\n"
|
resp += read_table(block) + "\n"
|
||||||
|
|
@ -68,10 +72,12 @@ class RapidWordLoader(UnstructuredFileLoader):
|
||||||
|
|
||||||
text = word2text(self.file_path)
|
text = word2text(self.file_path)
|
||||||
from unstructured.partition.text import partition_text
|
from unstructured.partition.text import partition_text
|
||||||
return partition_text(text=text, **self.unstructured_kwargs)
|
return partition_text(text=text, paragraph_grouper = False, **self.unstructured_kwargs)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
loader = RapidWordLoader(file_path="/Users/wangvivi/Desktop/MySelf/AI/Test/国家电网公司供电企业组织机构规范标准.docx")
|
loader = RapidWordLoader(file_path="/Users/wangvivi/Desktop/Work/思极GPT/数字化部/设备类all/sb389/10kV带电作业用绝缘斗臂车.docx")
|
||||||
|
#loader = Docx2txtLoader(file_path="/Users/wangvivi/Desktop/Work/思极GPT/数字化部/设备类all/sb389/10kV带电作业用绝缘斗臂车.docx")
|
||||||
|
#loader = RapidWordLoader(file_path="/Users/wangvivi/Desktop/MySelf/AI/Test/这是一个测试文档_副本2.docx")
|
||||||
docs = loader.load()
|
docs = loader.load()
|
||||||
print(docs)
|
print(docs)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ from server.agent import model_container
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
## 使用和风天气API查询天气
|
## 使用和风天气API查询天气
|
||||||
KEY = "ac880e5a877042809ac7ffdd19d95b0d"
|
KEY = "1234567890wangweiwei"
|
||||||
# key长这样,这里提供了示例的key,这个key没法使用,你需要自己去注册和风天气的账号,然后在这里填入你的key
|
# key长这样,这里提供了示例的key,这个key没法使用,你需要自己去注册和风天气的账号,然后在这里填入你的key
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@ tools = [
|
||||||
Tool.from_function(
|
Tool.from_function(
|
||||||
func=weathercheck,
|
func=weathercheck,
|
||||||
name="weather_check",
|
name="weather_check",
|
||||||
description="",
|
description="Use this tools to answer questons about weather",
|
||||||
args_schema=WhetherSchema,
|
args_schema=WhetherSchema,
|
||||||
),
|
),
|
||||||
Tool.from_function(
|
Tool.from_function(
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ from server.chat.utils import History
|
||||||
import json
|
import json
|
||||||
from server.agent import model_container
|
from server.agent import model_container
|
||||||
from server.knowledge_base.kb_service.base import get_kb_details
|
from server.knowledge_base.kb_service.base import get_kb_details
|
||||||
|
from fastapi.responses import StreamingResponse
|
||||||
|
|
||||||
async def agent_chat(query: str = Body(..., description="用户输入", examples=["恼羞成怒"]),
|
async def agent_chat(query: str = Body(..., description="用户输入", examples=["恼羞成怒"]),
|
||||||
history: List[History] = Body([],
|
history: List[History] = Body([],
|
||||||
|
|
@ -29,7 +29,7 @@ async def agent_chat(query: str = Body(..., description="用户输入", examples
|
||||||
),
|
),
|
||||||
stream: bool = Body(False, description="流式输出"),
|
stream: bool = Body(False, description="流式输出"),
|
||||||
model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"),
|
model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"),
|
||||||
temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0),
|
temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=0.9),
|
||||||
max_tokens: Optional[int] = Body(None, description="限制LLM生成Token数量,默认None代表模型最大值"),
|
max_tokens: Optional[int] = Body(None, description="限制LLM生成Token数量,默认None代表模型最大值"),
|
||||||
prompt_name: str = Body("default",
|
prompt_name: str = Body("default",
|
||||||
description="使用的prompt模板名称(在configs/prompt_config.py中配置)"),
|
description="使用的prompt模板名称(在configs/prompt_config.py中配置)"),
|
||||||
|
|
@ -59,6 +59,8 @@ async def agent_chat(query: str = Body(..., description="用户输入", examples
|
||||||
kb_list = {x["kb_name"]: x for x in get_kb_details()}
|
kb_list = {x["kb_name"]: x for x in get_kb_details()}
|
||||||
model_container.DATABASE = {name: details['kb_info'] for name, details in kb_list.items()}
|
model_container.DATABASE = {name: details['kb_info'] for name, details in kb_list.items()}
|
||||||
|
|
||||||
|
print(f"agent_chat_iterator model_container.DATABASE:{model_container.DATABASE}")
|
||||||
|
print(f"agent_chat_iterator temperature:{temperature}")
|
||||||
if Agent_MODEL:
|
if Agent_MODEL:
|
||||||
## 如果有指定使用Agent模型来完成任务
|
## 如果有指定使用Agent模型来完成任务
|
||||||
model_agent = get_ChatOpenAI(
|
model_agent = get_ChatOpenAI(
|
||||||
|
|
@ -68,8 +70,10 @@ async def agent_chat(query: str = Body(..., description="用户输入", examples
|
||||||
callbacks=[callback],
|
callbacks=[callback],
|
||||||
)
|
)
|
||||||
model_container.MODEL = model_agent
|
model_container.MODEL = model_agent
|
||||||
|
print(f"111 agent_chat_iterator :model_container.MODEL:{ model_container.MODEL}")
|
||||||
else:
|
else:
|
||||||
model_container.MODEL = model
|
model_container.MODEL = model
|
||||||
|
print(f"222 agent_chat_iterator :model_container.MODEL:{model_container.MODEL}")
|
||||||
|
|
||||||
prompt_template = get_prompt_template("agent_chat", prompt_name)
|
prompt_template = get_prompt_template("agent_chat", prompt_name)
|
||||||
prompt_template_agent = CustomPromptTemplate(
|
prompt_template_agent = CustomPromptTemplate(
|
||||||
|
|
@ -91,6 +95,7 @@ async def agent_chat(query: str = Body(..., description="用户输入", examples
|
||||||
memory.chat_memory.add_ai_message(message.content)
|
memory.chat_memory.add_ai_message(message.content)
|
||||||
|
|
||||||
if "chatglm3" in model_container.MODEL.model_name:
|
if "chatglm3" in model_container.MODEL.model_name:
|
||||||
|
print(f"model_container.MODEL.model_name is chatglm3")
|
||||||
agent_executor = initialize_glm3_agent(
|
agent_executor = initialize_glm3_agent(
|
||||||
llm=model,
|
llm=model,
|
||||||
tools=tools,
|
tools=tools,
|
||||||
|
|
@ -180,8 +185,13 @@ async def agent_chat(query: str = Body(..., description="用户输入", examples
|
||||||
yield json.dumps({"answer": answer, "final_answer": final_answer}, ensure_ascii=False)
|
yield json.dumps({"answer": answer, "final_answer": final_answer}, ensure_ascii=False)
|
||||||
await task
|
await task
|
||||||
|
|
||||||
return EventSourceResponse(agent_chat_iterator(query=query,
|
return StreamingResponse(agent_chat_iterator(query=query,
|
||||||
history=history,
|
history=history,
|
||||||
model_name=model_name,
|
model_name=model_name,
|
||||||
prompt_name=prompt_name),
|
prompt_name=prompt_name),
|
||||||
)
|
media_type="text/event-stream")
|
||||||
|
# return EventSourceResponse(agent_chat_iterator(query=query,
|
||||||
|
# history=history,
|
||||||
|
# model_name=model_name,
|
||||||
|
# prompt_name=prompt_name),
|
||||||
|
# )
|
||||||
|
|
|
||||||
|
|
@ -350,7 +350,9 @@ def dialogue_page(api: ApiRequest, is_lite: bool = False):
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
):
|
):
|
||||||
try:
|
try:
|
||||||
|
print(f"1111自定义Agent问答, d:{d}")
|
||||||
d = json.loads(d)
|
d = json.loads(d)
|
||||||
|
print(f"22222自定义Agent问答, d:{d}")
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
if error_msg := check_error_msg(d): # check whether error occured
|
if error_msg := check_error_msg(d): # check whether error occured
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue