parent
fcbd97203a
commit
1c5382d96b
|
|
@ -256,4 +256,8 @@ VLLM_MODEL_DICT = {
|
||||||
"Qwen-7B-Chat":"Qwen/Qwen-7B-Chat",
|
"Qwen-7B-Chat":"Qwen/Qwen-7B-Chat",
|
||||||
"Qwen-14B-Chat":"Qwen/Qwen-14B-Chat",
|
"Qwen-14B-Chat":"Qwen/Qwen-14B-Chat",
|
||||||
|
|
||||||
|
"agentlm-7b":"THUDM/agentlm-7b",
|
||||||
|
"agentlm-13b":"THUDM/agentlm-13b",
|
||||||
|
"agentlm-70b":"THUDM/agentlm-70b",
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
@ -86,7 +86,33 @@ PROMPT_TEMPLATES["agent_chat"] = {
|
||||||
Question: {input}
|
Question: {input}
|
||||||
Thought: {agent_scratchpad}
|
Thought: {agent_scratchpad}
|
||||||
""",
|
""",
|
||||||
"ChatGLM":
|
"AgentLM":
|
||||||
|
"""
|
||||||
|
<SYS>>\n
|
||||||
|
You are a helpful, respectful and honest assistant.
|
||||||
|
</SYS>>\n
|
||||||
|
Answer the following questions as best you can. If it is in order, you can use some tools appropriately.You have access to the following tools:
|
||||||
|
|
||||||
|
{tools}.
|
||||||
|
|
||||||
|
Use the following steps and think step by step!:
|
||||||
|
Question: the input question you must answer1
|
||||||
|
Thought: you should always think about what to do and what tools to use.
|
||||||
|
Action: the action to take, should be one of [{tool_names}]
|
||||||
|
Action Input: the input to the action
|
||||||
|
Observation: the result of the action
|
||||||
|
... (this Thought/Action/Action Input/Observation can be repeated zero or more times)
|
||||||
|
Thought: I now know the final answer
|
||||||
|
Final Answer: the final answer to the original input question
|
||||||
|
|
||||||
|
Begin! let's think step by step!
|
||||||
|
history:
|
||||||
|
{history}
|
||||||
|
Question: {input}
|
||||||
|
Thought: {agent_scratchpad}
|
||||||
|
|
||||||
|
""",
|
||||||
|
"中文版本":
|
||||||
"""
|
"""
|
||||||
请请严格按照提供的思维方式来思考。你的知识不一定正确,所以你一定要用提供的工具来思考,并给出用户答案。
|
请请严格按照提供的思维方式来思考。你的知识不一定正确,所以你一定要用提供的工具来思考,并给出用户答案。
|
||||||
你有以下工具可以使用:
|
你有以下工具可以使用:
|
||||||
|
|
|
||||||
|
|
@ -1,35 +1,70 @@
|
||||||
from langchain.tools import Tool
|
from langchain.tools import Tool
|
||||||
from server.agent.tools import *
|
from server.agent.tools import *
|
||||||
|
|
||||||
|
# tools = [
|
||||||
|
# Tool.from_function(
|
||||||
|
# func=calculate,
|
||||||
|
# name="计算器工具",
|
||||||
|
# description="进行简单的数学运算"
|
||||||
|
# ),
|
||||||
|
# Tool.from_function(
|
||||||
|
# func=translate,
|
||||||
|
# name="翻译工具",
|
||||||
|
# description="如果你无法访问互联网,并且需要翻译各种语言,应该使用这个工具"
|
||||||
|
# ),
|
||||||
|
# Tool.from_function(
|
||||||
|
# func=weathercheck,
|
||||||
|
# name="天气查询工具",
|
||||||
|
# description="无需访问互联网,使用这个工具查询中国各地未来24小时的天气",
|
||||||
|
# ),
|
||||||
|
# Tool.from_function(
|
||||||
|
# func=shell,
|
||||||
|
# name="shell工具",
|
||||||
|
# description="使用命令行工具输出",
|
||||||
|
# ),
|
||||||
|
# Tool.from_function(
|
||||||
|
# func=knowledge_search_more,
|
||||||
|
# name="知识库查询工具",
|
||||||
|
# description="优先访问知识库来获取答案",
|
||||||
|
# ),
|
||||||
|
# Tool.from_function(
|
||||||
|
# func=search_internet,
|
||||||
|
# name="互联网查询工具",
|
||||||
|
# description="如果你无法访问互联网,这个工具可以帮助你访问Bing互联网来解答问题",
|
||||||
|
# ),
|
||||||
|
# ]
|
||||||
|
|
||||||
|
## 请注意,如果你是为了使用AgentLM,在这里,你应该使用英文版本,下面的内容是英文版本。
|
||||||
tools = [
|
tools = [
|
||||||
Tool.from_function(
|
Tool.from_function(
|
||||||
func=calculate,
|
func=calculate,
|
||||||
name="计算器工具",
|
name="Calculator Tool",
|
||||||
description="进行简单的数学运算"
|
description="Perform simple mathematical operations"
|
||||||
),
|
),
|
||||||
Tool.from_function(
|
Tool.from_function(
|
||||||
func=translate,
|
func=translate,
|
||||||
name="翻译工具",
|
name="Translation Tool",
|
||||||
description="如果你无法访问互联网,并且需要翻译各种语言,应该使用这个工具"
|
description="Use this tool if you can't access the internet and need to translate various languages"
|
||||||
),
|
),
|
||||||
Tool.from_function(
|
Tool.from_function(
|
||||||
func=weathercheck,
|
func=weathercheck,
|
||||||
name="天气查询工具",
|
name="Weather Checking Tool",
|
||||||
description="无需访问互联网,使用这个工具查询中国各地未来24小时的天气",
|
description="Check the weather for various places in China for the next 24 hours without needing internet access"
|
||||||
),
|
),
|
||||||
Tool.from_function(
|
Tool.from_function(
|
||||||
func=shell,
|
func=shell,
|
||||||
name="shell工具",
|
name="Shell Tool",
|
||||||
description="使用命令行工具输出",
|
description="Use command line tool output"
|
||||||
),
|
),
|
||||||
Tool.from_function(
|
Tool.from_function(
|
||||||
func=knowledge_search_more,
|
func=knowledge_search_more,
|
||||||
name="知识库查询工具",
|
name="Knowledge Base Query Tool",
|
||||||
description="优先访问知识库来获取答案",
|
description="Prioritize accessing the knowledge base to get answers"
|
||||||
),
|
),
|
||||||
Tool.from_function(
|
Tool.from_function(
|
||||||
func=search_internet,
|
func=search_internet,
|
||||||
name="互联网查询工具",
|
name="Internet Query Tool",
|
||||||
description="如果你无法访问互联网,这个工具可以帮助你访问Bing互联网来解答问题",
|
description="If you can't access the internet, this tool can help you access Bing to answer questions"
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -121,21 +121,22 @@ def create_model_worker_app(log_level: str = "INFO", **kwargs) -> FastAPI:
|
||||||
args.block_size = 16
|
args.block_size = 16
|
||||||
args.swap_space = 4 # GiB
|
args.swap_space = 4 # GiB
|
||||||
args.gpu_memory_utilization = 0.90
|
args.gpu_memory_utilization = 0.90
|
||||||
args.max_num_batched_tokens = 16384 # 一个批次中的最大令牌(tokens)数量,这个取决于你的显卡和大模型设置,设置太大显存会不够
|
args.max_num_batched_tokens = None # 一个批次中的最大令牌(tokens)数量,这个取决于你的显卡和大模型设置,设置太大显存会不够
|
||||||
args.max_num_seqs = 256
|
args.max_num_seqs = 256
|
||||||
args.disable_log_stats = False
|
args.disable_log_stats = False
|
||||||
args.conv_template = None
|
args.conv_template = None
|
||||||
args.limit_worker_concurrency = 5
|
args.limit_worker_concurrency = 5
|
||||||
args.no_register = False
|
args.no_register = False
|
||||||
args.num_gpus = 1 # vllm worker的切分是tensor并行,这里填写显卡的数量
|
args.num_gpus = 4 # vllm worker的切分是tensor并行,这里填写显卡的数量
|
||||||
args.engine_use_ray = False
|
args.engine_use_ray = False
|
||||||
args.disable_log_requests = False
|
args.disable_log_requests = False
|
||||||
|
|
||||||
# 0.2.0 vllm后要加的参数, 但是这里不需要
|
# 0.2.1 vllm后要加的参数, 但是这里不需要
|
||||||
args.max_model_len = None
|
args.max_model_len = None
|
||||||
args.revision = None
|
args.revision = None
|
||||||
args.quantization = None
|
args.quantization = None
|
||||||
args.max_log_len = None
|
args.max_log_len = None
|
||||||
|
args.tokenizer_revision = None
|
||||||
|
|
||||||
if args.model_path:
|
if args.model_path:
|
||||||
args.model = args.model_path
|
args.model = args.model_path
|
||||||
|
|
|
||||||
|
|
@ -76,7 +76,7 @@ def dialogue_page(api: ApiRequest):
|
||||||
"搜索引擎问答",
|
"搜索引擎问答",
|
||||||
"自定义Agent问答",
|
"自定义Agent问答",
|
||||||
],
|
],
|
||||||
index=0,
|
index=3,
|
||||||
on_change=on_mode_change,
|
on_change=on_mode_change,
|
||||||
key="dialogue_mode",
|
key="dialogue_mode",
|
||||||
)
|
)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue