parent
e78a804ec6
commit
21b079d751
|
|
@ -17,23 +17,18 @@
|
||||||
# - input: 用户输入内容
|
# - input: 用户输入内容
|
||||||
# - agent_scratchpad: Agent的思维记录
|
# - agent_scratchpad: Agent的思维记录
|
||||||
|
|
||||||
PROMPT_TEMPLATES = {
|
PROMPT_TEMPLATES = {}
|
||||||
"completion": {
|
|
||||||
"default": "{input}"
|
|
||||||
},
|
|
||||||
|
|
||||||
"llm_chat": {
|
PROMPT_TEMPLATES["llm_chat"] = {
|
||||||
"default": "{{ input }}",
|
"default": "{{ input }}",
|
||||||
|
|
||||||
"py":
|
"py":
|
||||||
"""
|
"""
|
||||||
你是一个聪明的代码助手,请你给我写出简单的py代码。 \n
|
你是一个聪明的代码助手,请你给我写出简单的py代码。 \n
|
||||||
{{ input }}
|
{{ input }}
|
||||||
"""
|
""",
|
||||||
,
|
}
|
||||||
},
|
|
||||||
|
|
||||||
"knowledge_base_chat": {
|
PROMPT_TEMPLATES["knowledge_base_chat"] = {
|
||||||
"default":
|
"default":
|
||||||
"""
|
"""
|
||||||
<指令>根据已知信息,简洁和专业的来回答问题。如果无法从中得到答案,请说 “根据已知信息无法回答该问题”,不允许在答案中添加编造成分,答案请使用中文。 </指令>
|
<指令>根据已知信息,简洁和专业的来回答问题。如果无法从中得到答案,请说 “根据已知信息无法回答该问题”,不允许在答案中添加编造成分,答案请使用中文。 </指令>
|
||||||
|
|
@ -46,18 +41,18 @@ PROMPT_TEMPLATES = {
|
||||||
<已知信息>{{ context }}</已知信息>、
|
<已知信息>{{ context }}</已知信息>、
|
||||||
<问题>{{ question }}</问题>
|
<问题>{{ question }}</问题>
|
||||||
""",
|
""",
|
||||||
"Empty": # 搜不到内容的时候调用,此时没有已知信息,这个Empty可以更改,但不能删除,会影响程序使用
|
"Empty": # 搜不到知识库的时候使用
|
||||||
"""
|
"""
|
||||||
<指令>请根据用户的问题,进行简洁明了的回答</指令>
|
请你回答我的问题:
|
||||||
<问题>{{ question }}</问题>
|
{{ question }}
|
||||||
|
\n
|
||||||
""",
|
""",
|
||||||
},
|
}
|
||||||
|
PROMPT_TEMPLATES["search_engine_chat"] = {
|
||||||
"search_engine_chat": {
|
|
||||||
"default":
|
"default":
|
||||||
"""
|
"""
|
||||||
<指令>这是我搜索到的互联网信息,请你根据这些信息进行提取并有调理,简洁的回答问题。如果无法从中得到答案,请说 “无法搜索到能回答问题的内容”。 </指令>
|
<指令>这是我搜索到的互联网信息,请你根据这些信息进行提取并有调理,简洁的回答问题。如果无法从中得到答案,请说 “无法搜索到能回答问题的内容”。 </指令>
|
||||||
<已知信息>{{ context }}</已知信息>、
|
<已知信息>{{ context }}</已知信息>
|
||||||
<问题>{{ question }}</问题>
|
<问题>{{ question }}</问题>
|
||||||
""",
|
""",
|
||||||
"search":
|
"search":
|
||||||
|
|
@ -66,14 +61,8 @@ PROMPT_TEMPLATES = {
|
||||||
<已知信息>{{ context }}</已知信息>、
|
<已知信息>{{ context }}</已知信息>、
|
||||||
<问题>{{ question }}</问题>
|
<问题>{{ question }}</问题>
|
||||||
""",
|
""",
|
||||||
"Empty": # 搜不到内容的时候调用,此时没有已知信息,这个Empty可以更改,但不能删除,会影响程序使用
|
}
|
||||||
"""
|
PROMPT_TEMPLATES["agent_chat"] = {
|
||||||
<指令>请根据用户的问题,进行简洁明了的回答</指令>
|
|
||||||
<问题>{{ question }}</问题>
|
|
||||||
""",
|
|
||||||
},
|
|
||||||
|
|
||||||
"agent_chat": {
|
|
||||||
"default":
|
"default":
|
||||||
"""
|
"""
|
||||||
Answer the following questions as best you can. If it is in order, you can use some tools appropriately.You have access to the following tools:
|
Answer the following questions as best you can. If it is in order, you can use some tools appropriately.You have access to the following tools:
|
||||||
|
|
@ -92,21 +81,54 @@ PROMPT_TEMPLATES = {
|
||||||
... (this Thought/Action/Action Input/Observation can be repeated zero or more times)
|
... (this Thought/Action/Action Input/Observation can be repeated zero or more times)
|
||||||
Thought: I now know the final answer
|
Thought: I now know the final answer
|
||||||
Final Answer: the final answer to the original input question
|
Final Answer: the final answer to the original input question
|
||||||
|
|
||||||
|
|
||||||
Begin!
|
Begin!
|
||||||
history:
|
|
||||||
{history}
|
history: {history}
|
||||||
|
|
||||||
Question: {input}
|
Question: {input}
|
||||||
|
|
||||||
Thought: {agent_scratchpad}
|
Thought: {agent_scratchpad}
|
||||||
""",
|
""",
|
||||||
|
|
||||||
"ChatGLM3": # ChatGLM3必须用官方的提示词,没有修改空间,目前参数都不会传入进去
|
"ChatGLM3":
|
||||||
"""
|
"""
|
||||||
history:
|
You can answer using the tools, or answer directly using your knowledge without using the tools.Respond to the human as helpfully and accurately as possible.
|
||||||
{history}
|
You have access to the following tools:
|
||||||
|
{tools}
|
||||||
|
Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
|
||||||
|
Valid "action" values: "Final Answer" or [{tool_names}]
|
||||||
|
Provide only ONE action per $JSON_BLOB, as shown:
|
||||||
|
|
||||||
|
```
|
||||||
|
{{{{
|
||||||
|
"action": $TOOL_NAME,
|
||||||
|
"action_input": $INPUT
|
||||||
|
}}}}
|
||||||
|
```
|
||||||
|
|
||||||
|
Follow this format:
|
||||||
|
|
||||||
|
Question: input question to answer
|
||||||
|
Thought: consider previous and subsequent steps
|
||||||
|
Action:
|
||||||
|
```
|
||||||
|
$JSON_BLOB
|
||||||
|
```
|
||||||
|
Observation: action result
|
||||||
|
... (repeat Thought/Action/Observation N times)
|
||||||
|
Thought: I know what to respond
|
||||||
|
Action:
|
||||||
|
```
|
||||||
|
{{{{
|
||||||
|
"action": "Final Answer",
|
||||||
|
"action_input": "Final response to human"
|
||||||
|
}}}}
|
||||||
|
Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.
|
||||||
|
|
||||||
|
history: {history}
|
||||||
|
|
||||||
Question: {input}
|
Question: {input}
|
||||||
|
|
||||||
Thought: {agent_scratchpad}
|
Thought: {agent_scratchpad}
|
||||||
""",
|
""",
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,11 @@
|
||||||
"""
|
"""
|
||||||
This file is a modified version for ChatGLM3-6B the original ChatGLM3Agent.py file from the langchain repo.
|
This file is a modified version for ChatGLM3-6B the original ChatGLM3Agent.py file from the langchain repo.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
from langchain.agents.structured_chat.output_parser import StructuredChatOutputParser
|
from langchain.agents.structured_chat.output_parser import StructuredChatOutputParser
|
||||||
|
from langchain.memory import ConversationBufferWindowMemory
|
||||||
from typing import Any, List, Sequence, Tuple, Optional, Union
|
from typing import Any, List, Sequence, Tuple, Optional, Union
|
||||||
import os
|
import os
|
||||||
from langchain.agents.agent import Agent
|
from langchain.agents.agent import Agent
|
||||||
|
|
@ -13,7 +13,7 @@ from langchain.chains.llm import LLMChain
|
||||||
from langchain.prompts.chat import (
|
from langchain.prompts.chat import (
|
||||||
ChatPromptTemplate,
|
ChatPromptTemplate,
|
||||||
HumanMessagePromptTemplate,
|
HumanMessagePromptTemplate,
|
||||||
SystemMessagePromptTemplate,
|
SystemMessagePromptTemplate, MessagesPlaceholder,
|
||||||
)
|
)
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
|
@ -26,45 +26,6 @@ from langchain.callbacks.base import BaseCallbackManager
|
||||||
from langchain.schema.language_model import BaseLanguageModel
|
from langchain.schema.language_model import BaseLanguageModel
|
||||||
from langchain.tools.base import BaseTool
|
from langchain.tools.base import BaseTool
|
||||||
|
|
||||||
PREFIX = """
|
|
||||||
You can answer using the tools, or answer directly using your knowledge without using the tools.
|
|
||||||
Respond to the human as helpfully and accurately as possible.
|
|
||||||
You have access to the following tools:
|
|
||||||
"""
|
|
||||||
FORMAT_INSTRUCTIONS = """Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
|
|
||||||
|
|
||||||
Valid "action" values: "Final Answer" or {tool_names}
|
|
||||||
|
|
||||||
Provide only ONE action per $JSON_BLOB, as shown:
|
|
||||||
|
|
||||||
```
|
|
||||||
{{{{
|
|
||||||
"action": $TOOL_NAME,
|
|
||||||
"action_input": $INPUT
|
|
||||||
}}}}
|
|
||||||
```
|
|
||||||
|
|
||||||
Follow this format:
|
|
||||||
|
|
||||||
Question: input question to answer
|
|
||||||
Thought: consider previous and subsequent steps
|
|
||||||
Action:
|
|
||||||
```
|
|
||||||
$JSON_BLOB
|
|
||||||
```
|
|
||||||
Observation: action result
|
|
||||||
... (repeat Thought/Action/Observation N times)
|
|
||||||
Thought: I know what to respond
|
|
||||||
Action:
|
|
||||||
```
|
|
||||||
{{{{
|
|
||||||
"action": "Final Answer",
|
|
||||||
"action_input": "Final response to human"
|
|
||||||
}}}}
|
|
||||||
```"""
|
|
||||||
SUFFIX = """Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.
|
|
||||||
Thought:"""
|
|
||||||
|
|
||||||
HUMAN_MESSAGE_TEMPLATE = "{input}\n\n{agent_scratchpad}"
|
HUMAN_MESSAGE_TEMPLATE = "{input}\n\n{agent_scratchpad}"
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
@ -77,9 +38,6 @@ class StructuredChatOutputParserWithRetries(AgentOutputParser):
|
||||||
output_fixing_parser: Optional[OutputFixingParser] = None
|
output_fixing_parser: Optional[OutputFixingParser] = None
|
||||||
"""The output fixing parser to use."""
|
"""The output fixing parser to use."""
|
||||||
|
|
||||||
def get_format_instructions(self) -> str:
|
|
||||||
return FORMAT_INSTRUCTIONS
|
|
||||||
|
|
||||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||||
special_tokens = ["Action:", "<|observation|>"]
|
special_tokens = ["Action:", "<|observation|>"]
|
||||||
first_index = min([text.find(token) if token in text else len(text) for token in special_tokens])
|
first_index = min([text.find(token) if token in text else len(text) for token in special_tokens])
|
||||||
|
|
@ -112,6 +70,7 @@ Action:
|
||||||
return parsed_obj
|
return parsed_obj
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise OutputParserException(f"Could not parse LLM output: {text}") from e
|
raise OutputParserException(f"Could not parse LLM output: {text}") from e
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _type(self) -> str:
|
def _type(self) -> str:
|
||||||
return "structured_chat_ChatGLM3_6b_with_retries"
|
return "structured_chat_ChatGLM3_6b_with_retries"
|
||||||
|
|
@ -168,47 +127,57 @@ class StructuredGLM3ChatAgent(Agent):
|
||||||
def create_prompt(
|
def create_prompt(
|
||||||
cls,
|
cls,
|
||||||
tools: Sequence[BaseTool],
|
tools: Sequence[BaseTool],
|
||||||
prefix: str = PREFIX,
|
prompt: str = None,
|
||||||
suffix: str = SUFFIX,
|
|
||||||
human_message_template: str = HUMAN_MESSAGE_TEMPLATE,
|
|
||||||
format_instructions: str = FORMAT_INSTRUCTIONS,
|
|
||||||
input_variables: Optional[List[str]] = None,
|
input_variables: Optional[List[str]] = None,
|
||||||
memory_prompts: Optional[List[BasePromptTemplate]] = None,
|
memory_prompts: Optional[List[BasePromptTemplate]] = None,
|
||||||
) -> BasePromptTemplate:
|
) -> BasePromptTemplate:
|
||||||
def tool_config_from_file(tool_name, directory="server/agent/tools/"):
|
def tool_config_from_file(tool_name, directory="server/agent/tools/"):
|
||||||
"""search tool yaml and return json format"""
|
"""search tool yaml and return simplified json format"""
|
||||||
file_path = os.path.join(directory, f"{tool_name.lower()}.yaml")
|
file_path = os.path.join(directory, f"{tool_name.lower()}.yaml")
|
||||||
try:
|
try:
|
||||||
with open(file_path, 'r', encoding='utf-8') as file:
|
with open(file_path, 'r', encoding='utf-8') as file:
|
||||||
return yaml.safe_load(file)
|
tool_config = yaml.safe_load(file)
|
||||||
|
# Simplify the structure if needed
|
||||||
|
simplified_config = {
|
||||||
|
"name": tool_config.get("name", ""),
|
||||||
|
"description": tool_config.get("description", ""),
|
||||||
|
"parameters": tool_config.get("parameters", {})
|
||||||
|
}
|
||||||
|
return simplified_config
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
print(f"File not found: {file_path}")
|
logger.error(f"File not found: {file_path}")
|
||||||
return None
|
return None
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"An error occurred while reading {file_path}: {e}")
|
logger.error(f"An error occurred while reading {file_path}: {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
tools_json = []
|
tools_json = []
|
||||||
tool_names = ""
|
tool_names = []
|
||||||
for tool in tools:
|
for tool in tools:
|
||||||
tool_config = tool_config_from_file(tool.name)
|
tool_config = tool_config_from_file(tool.name)
|
||||||
if tool_config:
|
if tool_config:
|
||||||
tools_json.append(tool_config)
|
tools_json.append(tool_config)
|
||||||
tool_names.join(tool.name + ", ")
|
tool_names.append(tool.name)
|
||||||
|
|
||||||
|
# Format the tools for output
|
||||||
formatted_tools = "\n".join([
|
formatted_tools = "\n".join([
|
||||||
json.dumps(tool, ensure_ascii=False).replace("\"", "\\\"").replace("{", "{{").replace("}", "}}")
|
f"{tool['name']}: {tool['description']}, args: {tool['parameters']}"
|
||||||
for tool in tools_json
|
for tool in tools_json
|
||||||
])
|
])
|
||||||
format_instructions = format_instructions.format(tool_names=tool_names)
|
formatted_tools = formatted_tools.replace("'", "\\'").replace("{", "{{").replace("}", "}}")
|
||||||
template = "\n\n".join([prefix, formatted_tools, format_instructions, suffix])
|
|
||||||
|
template = prompt.format(tool_names=tool_names,
|
||||||
|
tools=formatted_tools,
|
||||||
|
history="{history}",
|
||||||
|
input="{input}",
|
||||||
|
agent_scratchpad="{agent_scratchpad}")
|
||||||
|
|
||||||
if input_variables is None:
|
if input_variables is None:
|
||||||
input_variables = ["input", "agent_scratchpad"]
|
input_variables = ["input", "agent_scratchpad"]
|
||||||
_memory_prompts = memory_prompts or []
|
_memory_prompts = memory_prompts or []
|
||||||
messages = [
|
messages = [
|
||||||
SystemMessagePromptTemplate.from_template(template),
|
SystemMessagePromptTemplate.from_template(template),
|
||||||
*_memory_prompts,
|
*_memory_prompts,
|
||||||
HumanMessagePromptTemplate.from_template(human_message_template),
|
|
||||||
]
|
]
|
||||||
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
|
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
|
||||||
|
|
||||||
|
|
@ -217,12 +186,10 @@ class StructuredGLM3ChatAgent(Agent):
|
||||||
cls,
|
cls,
|
||||||
llm: BaseLanguageModel,
|
llm: BaseLanguageModel,
|
||||||
tools: Sequence[BaseTool],
|
tools: Sequence[BaseTool],
|
||||||
|
prompt: str = None,
|
||||||
callback_manager: Optional[BaseCallbackManager] = None,
|
callback_manager: Optional[BaseCallbackManager] = None,
|
||||||
output_parser: Optional[AgentOutputParser] = None,
|
output_parser: Optional[AgentOutputParser] = None,
|
||||||
prefix: str = PREFIX,
|
|
||||||
suffix: str = SUFFIX,
|
|
||||||
human_message_template: str = HUMAN_MESSAGE_TEMPLATE,
|
human_message_template: str = HUMAN_MESSAGE_TEMPLATE,
|
||||||
format_instructions: str = FORMAT_INSTRUCTIONS,
|
|
||||||
input_variables: Optional[List[str]] = None,
|
input_variables: Optional[List[str]] = None,
|
||||||
memory_prompts: Optional[List[BasePromptTemplate]] = None,
|
memory_prompts: Optional[List[BasePromptTemplate]] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
|
|
@ -231,10 +198,7 @@ class StructuredGLM3ChatAgent(Agent):
|
||||||
cls._validate_tools(tools)
|
cls._validate_tools(tools)
|
||||||
prompt = cls.create_prompt(
|
prompt = cls.create_prompt(
|
||||||
tools,
|
tools,
|
||||||
prefix=prefix,
|
prompt=prompt,
|
||||||
suffix=suffix,
|
|
||||||
human_message_template=human_message_template,
|
|
||||||
format_instructions=format_instructions,
|
|
||||||
input_variables=input_variables,
|
input_variables=input_variables,
|
||||||
memory_prompts=memory_prompts,
|
memory_prompts=memory_prompts,
|
||||||
)
|
)
|
||||||
|
|
@ -260,7 +224,9 @@ class StructuredGLM3ChatAgent(Agent):
|
||||||
def initialize_glm3_agent(
|
def initialize_glm3_agent(
|
||||||
tools: Sequence[BaseTool],
|
tools: Sequence[BaseTool],
|
||||||
llm: BaseLanguageModel,
|
llm: BaseLanguageModel,
|
||||||
|
prompt: str = None,
|
||||||
callback_manager: Optional[BaseCallbackManager] = None,
|
callback_manager: Optional[BaseCallbackManager] = None,
|
||||||
|
memory: Optional[ConversationBufferWindowMemory] = None,
|
||||||
agent_kwargs: Optional[dict] = None,
|
agent_kwargs: Optional[dict] = None,
|
||||||
*,
|
*,
|
||||||
tags: Optional[Sequence[str]] = None,
|
tags: Optional[Sequence[str]] = None,
|
||||||
|
|
@ -269,12 +235,17 @@ def initialize_glm3_agent(
|
||||||
tags_ = list(tags) if tags else []
|
tags_ = list(tags) if tags else []
|
||||||
agent_kwargs = agent_kwargs or {}
|
agent_kwargs = agent_kwargs or {}
|
||||||
agent_obj = StructuredGLM3ChatAgent.from_llm_and_tools(
|
agent_obj = StructuredGLM3ChatAgent.from_llm_and_tools(
|
||||||
llm, tools, callback_manager=callback_manager, **agent_kwargs
|
llm=llm,
|
||||||
|
tools=tools,
|
||||||
|
prompt=prompt,
|
||||||
|
callback_manager=callback_manager, **agent_kwargs
|
||||||
)
|
)
|
||||||
return AgentExecutor.from_agent_and_tools(
|
return AgentExecutor.from_agent_and_tools(
|
||||||
agent=agent_obj,
|
agent=agent_obj,
|
||||||
tools=tools,
|
tools=tools,
|
||||||
callback_manager=callback_manager,
|
callback_manager=callback_manager,
|
||||||
|
memory=memory,
|
||||||
tags=tags_,
|
tags=tags_,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,6 @@ class CustomPromptTemplate(StringPromptTemplate):
|
||||||
kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
|
kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
|
||||||
return self.template.format(**kwargs)
|
return self.template.format(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
class CustomOutputParser(AgentOutputParser):
|
class CustomOutputParser(AgentOutputParser):
|
||||||
begin: bool = False
|
begin: bool = False
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
|
|
||||||
|
|
@ -91,8 +91,11 @@ async def agent_chat(query: str = Body(..., description="用户输入", examples
|
||||||
llm=model,
|
llm=model,
|
||||||
tools=tools,
|
tools=tools,
|
||||||
callback_manager=None,
|
callback_manager=None,
|
||||||
verbose=True,
|
# Langchain Prompt is not constructed directly here, it is constructed inside the GLM3 agent.
|
||||||
|
prompt=prompt_template,
|
||||||
|
input_variables=["input", "intermediate_steps", "history"],
|
||||||
memory=memory,
|
memory=memory,
|
||||||
|
verbose=True,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
agent = LLMSingleActionAgent(
|
agent = LLMSingleActionAgent(
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,7 @@ from configs import (LLM_MODELS, LLM_DEVICE, EMBEDDING_DEVICE,
|
||||||
FSCHAT_MODEL_WORKERS, HTTPX_DEFAULT_TIMEOUT)
|
FSCHAT_MODEL_WORKERS, HTTPX_DEFAULT_TIMEOUT)
|
||||||
import os
|
import os
|
||||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
from langchain.chat_models import ChatOpenAI, AzureChatOpenAI, ChatAnthropic
|
from langchain.chat_models import ChatOpenAI
|
||||||
from langchain.llms import OpenAI, AzureOpenAI, Anthropic
|
from langchain.llms import OpenAI, AzureOpenAI, Anthropic
|
||||||
import httpx
|
import httpx
|
||||||
from typing import Literal, Optional, Callable, Generator, Dict, Any, Awaitable, Union
|
from typing import Literal, Optional, Callable, Generator, Dict, Any, Awaitable, Union
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue