From 1c5382d96b087d5de19d11f30e7dc0e6c5a82d2a Mon Sep 17 00:00:00 2001 From: zR <2448370773@qq.com> Date: Sat, 21 Oct 2023 22:09:53 +0800 Subject: [PATCH] =?UTF-8?q?=E6=94=AF=E6=8C=81AgentLM=E6=A8=A1=E5=9E=8B=20(?= =?UTF-8?q?#1821)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 支持了agentlm Co-authored-by: zR --- configs/model_config.py.example | 4 +++ configs/prompt_config.py.example | 28 ++++++++++++++- server/agent/tools_select.py | 59 +++++++++++++++++++++++++------- startup.py | 7 ++-- webui_pages/dialogue/dialogue.py | 2 +- 5 files changed, 83 insertions(+), 17 deletions(-) diff --git a/configs/model_config.py.example b/configs/model_config.py.example index 9c1b886..8625a3c 100644 --- a/configs/model_config.py.example +++ b/configs/model_config.py.example @@ -256,4 +256,8 @@ VLLM_MODEL_DICT = { "Qwen-7B-Chat":"Qwen/Qwen-7B-Chat", "Qwen-14B-Chat":"Qwen/Qwen-14B-Chat", + "agentlm-7b":"THUDM/agentlm-7b", + "agentlm-13b":"THUDM/agentlm-13b", + "agentlm-70b":"THUDM/agentlm-70b", + } \ No newline at end of file diff --git a/configs/prompt_config.py.example b/configs/prompt_config.py.example index a52b1f7..26ca621 100644 --- a/configs/prompt_config.py.example +++ b/configs/prompt_config.py.example @@ -86,7 +86,33 @@ PROMPT_TEMPLATES["agent_chat"] = { Question: {input} Thought: {agent_scratchpad} """, - "ChatGLM": + "AgentLM": + """ + >\n + You are a helpful, respectful and honest assistant. + >\n + Answer the following questions as best you can. If it is in order, you can use some tools appropriately.You have access to the following tools: + + {tools}. + + Use the following steps and think step by step!: + Question: the input question you must answer1 + Thought: you should always think about what to do and what tools to use. + Action: the action to take, should be one of [{tool_names}] + Action Input: the input to the action + Observation: the result of the action + ... (this Thought/Action/Action Input/Observation can be repeated zero or more times) + Thought: I now know the final answer + Final Answer: the final answer to the original input question + + Begin! let's think step by step! + history: + {history} + Question: {input} + Thought: {agent_scratchpad} + + """, + "中文版本": """ 请请严格按照提供的思维方式来思考。你的知识不一定正确,所以你一定要用提供的工具来思考,并给出用户答案。 你有以下工具可以使用: diff --git a/server/agent/tools_select.py b/server/agent/tools_select.py index 40bb63f..23c99a0 100644 --- a/server/agent/tools_select.py +++ b/server/agent/tools_select.py @@ -1,35 +1,70 @@ from langchain.tools import Tool from server.agent.tools import * + +# tools = [ +# Tool.from_function( +# func=calculate, +# name="计算器工具", +# description="进行简单的数学运算" +# ), +# Tool.from_function( +# func=translate, +# name="翻译工具", +# description="如果你无法访问互联网,并且需要翻译各种语言,应该使用这个工具" +# ), +# Tool.from_function( +# func=weathercheck, +# name="天气查询工具", +# description="无需访问互联网,使用这个工具查询中国各地未来24小时的天气", +# ), +# Tool.from_function( +# func=shell, +# name="shell工具", +# description="使用命令行工具输出", +# ), +# Tool.from_function( +# func=knowledge_search_more, +# name="知识库查询工具", +# description="优先访问知识库来获取答案", +# ), +# Tool.from_function( +# func=search_internet, +# name="互联网查询工具", +# description="如果你无法访问互联网,这个工具可以帮助你访问Bing互联网来解答问题", +# ), +# ] + +## 请注意,如果你是为了使用AgentLM,在这里,你应该使用英文版本,下面的内容是英文版本。 tools = [ Tool.from_function( func=calculate, - name="计算器工具", - description="进行简单的数学运算" + name="Calculator Tool", + description="Perform simple mathematical operations" ), Tool.from_function( func=translate, - name="翻译工具", - description="如果你无法访问互联网,并且需要翻译各种语言,应该使用这个工具" + name="Translation Tool", + description="Use this tool if you can't access the internet and need to translate various languages" ), Tool.from_function( func=weathercheck, - name="天气查询工具", - description="无需访问互联网,使用这个工具查询中国各地未来24小时的天气", + name="Weather Checking Tool", + description="Check the weather for various places in China for the next 24 hours without needing internet access" ), Tool.from_function( func=shell, - name="shell工具", - description="使用命令行工具输出", + name="Shell Tool", + description="Use command line tool output" ), Tool.from_function( func=knowledge_search_more, - name="知识库查询工具", - description="优先访问知识库来获取答案", + name="Knowledge Base Query Tool", + description="Prioritize accessing the knowledge base to get answers" ), Tool.from_function( func=search_internet, - name="互联网查询工具", - description="如果你无法访问互联网,这个工具可以帮助你访问Bing互联网来解答问题", + name="Internet Query Tool", + description="If you can't access the internet, this tool can help you access Bing to answer questions" ), ] diff --git a/startup.py b/startup.py index 4bb3449..a51c891 100644 --- a/startup.py +++ b/startup.py @@ -121,21 +121,22 @@ def create_model_worker_app(log_level: str = "INFO", **kwargs) -> FastAPI: args.block_size = 16 args.swap_space = 4 # GiB args.gpu_memory_utilization = 0.90 - args.max_num_batched_tokens = 16384 # 一个批次中的最大令牌(tokens)数量,这个取决于你的显卡和大模型设置,设置太大显存会不够 + args.max_num_batched_tokens = None # 一个批次中的最大令牌(tokens)数量,这个取决于你的显卡和大模型设置,设置太大显存会不够 args.max_num_seqs = 256 args.disable_log_stats = False args.conv_template = None args.limit_worker_concurrency = 5 args.no_register = False - args.num_gpus = 1 # vllm worker的切分是tensor并行,这里填写显卡的数量 + args.num_gpus = 4 # vllm worker的切分是tensor并行,这里填写显卡的数量 args.engine_use_ray = False args.disable_log_requests = False - # 0.2.0 vllm后要加的参数, 但是这里不需要 + # 0.2.1 vllm后要加的参数, 但是这里不需要 args.max_model_len = None args.revision = None args.quantization = None args.max_log_len = None + args.tokenizer_revision = None if args.model_path: args.model = args.model_path diff --git a/webui_pages/dialogue/dialogue.py b/webui_pages/dialogue/dialogue.py index ff0f30f..b49c47d 100644 --- a/webui_pages/dialogue/dialogue.py +++ b/webui_pages/dialogue/dialogue.py @@ -76,7 +76,7 @@ def dialogue_page(api: ApiRequest): "搜索引擎问答", "自定义Agent问答", ], - index=0, + index=3, on_change=on_mode_change, key="dialogue_mode", )