* 支持glm3
This commit is contained in:
zR 2023-10-27 17:56:27 +08:00 committed by GitHub
parent d054244e55
commit bb72d9ac26
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 61 additions and 48 deletions

View File

@ -39,10 +39,10 @@ MODEL_PATH = {
# TODO: add all supported llm models # TODO: add all supported llm models
"llm_model": { "llm_model": {
# 以下部分模型并未完全测试仅根据fastchat和vllm模型的模型列表推定支持 # 以下部分模型并未完全测试仅根据fastchat和vllm模型的模型列表推定支持
"chatglm-6b": "THUDM/chatglm-6b",
"chatglm2-6b": "THUDM/chatglm2-6b", "chatglm2-6b": "THUDM/chatglm2-6b",
"chatglm2-6b-int4": "THUDM/chatglm2-6b-int4",
"chatglm2-6b-32k": "THUDM/chatglm2-6b-32k", "chatglm2-6b-32k": "THUDM/chatglm2-6b-32k",
"chatglm3-6b": "THUDM/chatglm3-6b-32k",
"chatglm3-6b-32k": "THUDM/chatglm3-6b-32k",
"baichuan2-13b": "baichuan-inc/Baichuan2-13B-Chat", "baichuan2-13b": "baichuan-inc/Baichuan2-13B-Chat",
"baichuan2-7b":"baichuan-inc/Baichuan2-7B-Chat", "baichuan2-7b":"baichuan-inc/Baichuan2-7B-Chat",
@ -122,6 +122,13 @@ TEMPERATURE = 0.7
ONLINE_LLM_MODEL = { ONLINE_LLM_MODEL = {
# 线上模型。请在server_config中为每个在线API设置不同的端口 # 线上模型。请在server_config中为每个在线API设置不同的端口
"openai-api": {
"model_name": "gpt-35-turbo",
"api_base_url": "https://api.openai.com/v1",
"api_key": "",
"openai_proxy": "",
},
# 具体注册及api key获取请前往 http://open.bigmodel.cn # 具体注册及api key获取请前往 http://open.bigmodel.cn
"zhipu-api": { "zhipu-api": {
"api_key": "", "api_key": "",
@ -243,11 +250,13 @@ VLLM_MODEL_DICT = {
## 你认为支持Agent能力的模型可以在这里添加添加后不会出现可视化界面的警告 ## 你认为支持Agent能力的模型可以在这里添加添加后不会出现可视化界面的警告
SUPPORT_AGENT_MODEL = [ SUPPORT_AGENT_MODEL = [
"Azure-OpenAI", "azure-api",
"OpenAI", "openai-api",
"Anthropic", "claude-api",
"Qwen", "Qwen",
"qwen-api", "qwen-api",
"baichuan-api", "baichuan-api",
"agentlm" "agentlm"
"chatglm3-6b"
"xinghuo-api"
] ]

View File

@ -127,9 +127,11 @@ PROMPT_TEMPLATES["agent_chat"] = {
""", """,
"中文版本": "中文版本":
""" """
请请严格按照提供的思维方式来思考。你的知识不一定正确,所以你一定要用提供的工具来思考,并给出用户答案。 你的知识不一定正确,所以你一定要用提供的工具来思考,并给出用户答案。
你有以下工具可以使用: 你有以下工具可以使用:
{tools} {tools}
请请严格按照提供的思维方式来思考所有的关键词都要输出例如ActionAction InputObservation等
``` ```
Question: 用户的提问或者观察到的信息, Question: 用户的提问或者观察到的信息,
Thought: 你应该思考该做什么,是根据工具的结果来回答问题,还是决定使用什么工具。 Thought: 你应该思考该做什么,是根据工具的结果来回答问题,还是决定使用什么工具。
@ -148,6 +150,5 @@ PROMPT_TEMPLATES["agent_chat"] = {
用户开始以提问: 用户开始以提问:
Question: {input} Question: {input}
Thought: {agent_scratchpad} Thought: {agent_scratchpad}
""", """,
} }

View File

@ -48,7 +48,7 @@ class CustomOutputParser(AgentOutputParser):
parts = llm_output.split("Action:") parts = llm_output.split("Action:")
if len(parts) < 2: if len(parts) < 2:
return AgentFinish( return AgentFinish(
return_values={"output": f"调用agent失败: `{llm_output}`"}, return_values={"output": f"调用agent工具失败,该回答为大模型自身能力的回答:\n\n `{llm_output}`"},
log=llm_output, log=llm_output,
) )

View File

@ -1,89 +1,90 @@
from langchain.tools import Tool from langchain.tools import Tool
from server.agent.tools import * from server.agent.tools import *
## 请注意如果你是为了使用AgentLM在这里你应该使用英文版本下面的内容是英文版本。
# tools = [ # tools = [
# Tool.from_function( # Tool.from_function(
# func=calculate, # func=calculate,
# name="计算器工具", # name="Simple Calculator Tool",
# description="进行简单的数学运算, 只是简单的, 使用Wolfram数学工具进行更复杂的运算", # description="Perform simple mathematical operations, Just simple, Use Wolfram Math Tool for more complex operations"
# ), # ),
# Tool.from_function( # Tool.from_function(
# func=translate, # func=translate,
# name="翻译工具", # name="Translation Tool",
# description="如果你无法访问互联网,并且需要翻译各种语言,应该使用这个工具" # description="Use this tool if you can't access the internet and need to translate various languages"
# ), # ),
# Tool.from_function( # Tool.from_function(
# func=weathercheck, # func=weathercheck,
# name="天气查询工具", # name="Weather Checking Tool",
# description="无需访问互联网使用这个工具查询中国各地未来24小时的天气", # description="Check the weather for various places in China for the next 24 hours without needing internet access"
# ), # ),
# Tool.from_function( # Tool.from_function(
# func=shell, # func=shell,
# name="shell工具", # name="Shell Tool",
# description="使用命令行工具输出", # description="Use command line tool output"
# ), # ),
# Tool.from_function( # Tool.from_function(
# func=knowledge_search_more, # func=knowledge_search_more,
# name="知识库查询工具", # name="Knowledge Base Tool",
# description="优先访问知识库来获取答案", # description="Prioritize accessing the knowledge base to get answers"
# ), # ),
# Tool.from_function( # Tool.from_function(
# func=search_internet, # func=search_internet,
# name="互联网查询工具", # name="Internet Tool",
# description="如果你无法访问互联网这个工具可以帮助你访问Bing互联网来解答问题", # description="If you can't access the internet, this tool can help you access Bing to answer questions"
# ), # ),
# Tool.from_function( # Tool.from_function(
# func=wolfram, # func=wolfram,
# name="Wolfram数学工具", # name="Wolfram Math Tool",
# description="高级的数学运算工具,能够完成非常复杂的数学问题" # description="Use this tool to perform more complex mathematical operations"
# ), # ),
# Tool.from_function( # Tool.from_function(
# func=youtube_search, # func=youtube_search,
# name="Youtube搜索工具", # name="Youtube Search Tool",
# description="使用这个工具在Youtube上搜索视频" # description="Use this tool to search for videos on Youtube"
# )
# ] # ]
## 请注意如果你是为了使用AgentLM在这里你应该使用英文版本下面的内容是英文版本。
tools = [ tools = [
Tool.from_function( Tool.from_function(
func=calculate, func=calculate,
name="Simple Calculator Tool", name="计算器工具",
description="Perform simple mathematical operations, Just simple, Use Wolfram Math Tool for more complex operations" description="进行简单的数学运算, 只是简单的, 使用Wolfram数学工具进行更复杂的运算",
), ),
Tool.from_function( Tool.from_function(
func=translate, func=translate,
name="Translation Tool", name="翻译工具",
description="Use this tool if you can't access the internet and need to translate various languages" description="如果你无法访问互联网,并且需要翻译各种语言,应该使用这个工具"
), ),
Tool.from_function( Tool.from_function(
func=weathercheck, func=weathercheck,
name="Weather Checking Tool", name="天气查询工具",
description="Check the weather for various places in China for the next 24 hours without needing internet access" description="无需访问互联网使用这个工具查询中国各地未来24小时的天气",
), ),
Tool.from_function( Tool.from_function(
func=shell, func=shell,
name="Shell Tool", name="shell工具",
description="Use command line tool output" description="使用命令行工具输出",
), ),
Tool.from_function( Tool.from_function(
func=knowledge_search_more, func=knowledge_search_more,
name="Knowledge Base Tool", name="知识库查询工具",
description="Prioritize accessing the knowledge base to get answers" description="优先访问知识库来获取答案",
), ),
Tool.from_function( Tool.from_function(
func=search_internet, func=search_internet,
name="Internet Tool", name="互联网查询工具",
description="If you can't access the internet, this tool can help you access Bing to answer questions" description="如果你无法访问互联网这个工具可以帮助你访问Bing互联网来解答问题",
), ),
Tool.from_function( Tool.from_function(
func=wolfram, func=wolfram,
name="Wolfram Math Tool", name="Wolfram数学工具",
description="Use this tool to perform more complex mathematical operations" description="高级的数学运算工具,能够完成非常复杂的数学问题"
), ),
Tool.from_function( Tool.from_function(
func=youtube_search, func=youtube_search,
name="Youtube Search Tool", name="Youtube搜索工具",
description="Use this tool to search for videos on Youtube" description="使用这个工具在Youtube上搜索视频"
) )
] ]

View File

@ -5,7 +5,7 @@ from langchain.agents import AgentExecutor, LLMSingleActionAgent
from server.agent.custom_template import CustomOutputParser, CustomPromptTemplate from server.agent.custom_template import CustomOutputParser, CustomPromptTemplate
from fastapi import Body from fastapi import Body
from fastapi.responses import StreamingResponse from fastapi.responses import StreamingResponse
from configs import LLM_MODEL, TEMPERATURE, HISTORY_LEN,Agent_MODEL from configs import LLM_MODEL, TEMPERATURE, HISTORY_LEN, Agent_MODEL
from server.utils import wrap_done, get_ChatOpenAI, get_prompt_template from server.utils import wrap_done, get_ChatOpenAI, get_prompt_template
from langchain.chains import LLMChain from langchain.chains import LLMChain
from typing import AsyncIterable, Optional, Dict from typing import AsyncIterable, Optional, Dict
@ -16,18 +16,21 @@ import json
from server.agent import model_container from server.agent import model_container
from server.knowledge_base.kb_service.base import get_kb_details from server.knowledge_base.kb_service.base import get_kb_details
async def agent_chat(query: str = Body(..., description="用户输入", examples=["恼羞成怒"]), async def agent_chat(query: str = Body(..., description="用户输入", examples=["恼羞成怒"]),
history: List[History] = Body([], history: List[History] = Body([],
description="历史对话", description="历史对话",
examples=[[ examples=[[
{"role": "user", "content": "请使用知识库工具查询今天北京天气"}, {"role": "user", "content": "请使用知识库工具查询今天北京天气"},
{"role": "assistant", "content": "使用天气查询工具查询到今天北京多云10-14摄氏度东北风2级易感冒"}]] {"role": "assistant",
"content": "使用天气查询工具查询到今天北京多云10-14摄氏度东北风2级易感冒"}]]
), ),
stream: bool = Body(False, description="流式输出"), stream: bool = Body(False, description="流式输出"),
model_name: str = Body(LLM_MODEL, description="LLM 模型名称。"), model_name: str = Body(LLM_MODEL, description="LLM 模型名称。"),
temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0), temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0),
max_tokens: Optional[int] = Body(None, description="限制LLM生成Token数量默认None代表模型最大值"), max_tokens: Optional[int] = Body(None, description="限制LLM生成Token数量默认None代表模型最大值"),
prompt_name: str = Body("default",description="使用的prompt模板名称(在configs/prompt_config.py中配置)"), prompt_name: str = Body("default",
description="使用的prompt模板名称(在configs/prompt_config.py中配置)"),
# top_p: float = Body(TOP_P, description="LLM 核采样。勿与temperature同时设置", gt=0.0, lt=1.0), # top_p: float = Body(TOP_P, description="LLM 核采样。勿与temperature同时设置", gt=0.0, lt=1.0),
): ):
history = [History.from_data(h) for h in history] history = [History.from_data(h) for h in history]
@ -50,7 +53,6 @@ async def agent_chat(query: str = Body(..., description="用户输入", examples
kb_list = {x["kb_name"]: x for x in get_kb_details()} kb_list = {x["kb_name"]: x for x in get_kb_details()}
model_container.DATABASE = {name: details['kb_info'] for name, details in kb_list.items()} model_container.DATABASE = {name: details['kb_info'] for name, details in kb_list.items()}
if Agent_MODEL: if Agent_MODEL:
## 如果有指定使用Agent模型来完成任务 ## 如果有指定使用Agent模型来完成任务
model_agent = get_ChatOpenAI( model_agent = get_ChatOpenAI(
@ -74,7 +76,7 @@ async def agent_chat(query: str = Body(..., description="用户输入", examples
agent = LLMSingleActionAgent( agent = LLMSingleActionAgent(
llm_chain=llm_chain, llm_chain=llm_chain,
output_parser=output_parser, output_parser=output_parser,
stop=["\nObservation:", "Observation:", "<|im_end|>"], # Qwen模型中使用这个 stop=["\nObservation:", "Observation:", "<|im_end|>", "<|observation|>"],
allowed_tools=tool_names, allowed_tools=tool_names,
) )
# 把history转成agent的memory # 把history转成agent的memory