2023-09-29 16:04:44 +08:00
|
|
|
|
from __future__ import annotations
|
2023-09-28 20:19:26 +08:00
|
|
|
|
from langchain.agents import Tool, AgentOutputParser
|
2023-09-17 11:19:16 +08:00
|
|
|
|
from langchain.prompts import StringPromptTemplate
|
2023-10-18 15:19:02 +08:00
|
|
|
|
from typing import List
|
2023-09-28 20:19:26 +08:00
|
|
|
|
from langchain.schema import AgentAction, AgentFinish
|
2023-10-27 11:52:44 +08:00
|
|
|
|
|
|
|
|
|
|
from configs import SUPPORT_AGENT_MODEL
|
2023-10-18 15:19:02 +08:00
|
|
|
|
from server.agent import model_container
|
2023-09-17 11:19:16 +08:00
|
|
|
|
class CustomPromptTemplate(StringPromptTemplate):
|
|
|
|
|
|
template: str
|
|
|
|
|
|
tools: List[Tool]
|
|
|
|
|
|
|
|
|
|
|
|
def format(self, **kwargs) -> str:
|
|
|
|
|
|
intermediate_steps = kwargs.pop("intermediate_steps")
|
|
|
|
|
|
thoughts = ""
|
|
|
|
|
|
for action, observation in intermediate_steps:
|
|
|
|
|
|
thoughts += action.log
|
|
|
|
|
|
thoughts += f"\nObservation: {observation}\nThought: "
|
|
|
|
|
|
kwargs["agent_scratchpad"] = thoughts
|
|
|
|
|
|
kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in self.tools])
|
|
|
|
|
|
kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
|
|
|
|
|
|
return self.template.format(**kwargs)
|
2023-10-07 11:26:11 +08:00
|
|
|
|
|
2023-09-17 11:19:16 +08:00
|
|
|
|
class CustomOutputParser(AgentOutputParser):
|
2023-10-07 11:26:11 +08:00
|
|
|
|
begin: bool = False
|
|
|
|
|
|
def __init__(self):
|
|
|
|
|
|
super().__init__()
|
|
|
|
|
|
self.begin = True
|
2023-09-17 11:19:16 +08:00
|
|
|
|
|
2023-10-07 11:26:11 +08:00
|
|
|
|
def parse(self, llm_output: str) -> AgentFinish | tuple[dict[str, str], str] | AgentAction:
|
2023-10-27 11:52:44 +08:00
|
|
|
|
if not any(agent in model_container.MODEL for agent in SUPPORT_AGENT_MODEL) and self.begin:
|
2023-10-07 11:26:11 +08:00
|
|
|
|
self.begin = False
|
|
|
|
|
|
stop_words = ["Observation:"]
|
|
|
|
|
|
min_index = len(llm_output)
|
|
|
|
|
|
for stop_word in stop_words:
|
|
|
|
|
|
index = llm_output.find(stop_word)
|
|
|
|
|
|
if index != -1 and index < min_index:
|
|
|
|
|
|
min_index = index
|
|
|
|
|
|
llm_output = llm_output[:min_index]
|
|
|
|
|
|
|
2023-09-17 11:19:16 +08:00
|
|
|
|
if "Final Answer:" in llm_output:
|
2023-10-07 11:26:11 +08:00
|
|
|
|
self.begin = True
|
2023-09-17 11:19:16 +08:00
|
|
|
|
return AgentFinish(
|
2023-10-07 11:52:54 +08:00
|
|
|
|
return_values={"output": llm_output.split("Final Answer:", 1)[-1].strip()},
|
2023-09-17 11:19:16 +08:00
|
|
|
|
log=llm_output,
|
|
|
|
|
|
)
|
2023-10-07 11:26:11 +08:00
|
|
|
|
parts = llm_output.split("Action:")
|
|
|
|
|
|
if len(parts) < 2:
|
2023-09-17 11:19:16 +08:00
|
|
|
|
return AgentFinish(
|
2023-10-27 17:56:27 +08:00
|
|
|
|
return_values={"output": f"调用agent工具失败,该回答为大模型自身能力的回答:\n\n `{llm_output}`"},
|
2023-09-17 11:19:16 +08:00
|
|
|
|
log=llm_output,
|
|
|
|
|
|
)
|
2023-10-07 11:26:11 +08:00
|
|
|
|
|
|
|
|
|
|
action = parts[1].split("Action Input:")[0].strip()
|
|
|
|
|
|
action_input = parts[1].split("Action Input:")[1].strip()
|
2023-09-28 20:19:26 +08:00
|
|
|
|
try:
|
|
|
|
|
|
ans = AgentAction(
|
2023-10-07 11:26:11 +08:00
|
|
|
|
tool=action,
|
|
|
|
|
|
tool_input=action_input.strip(" ").strip('"'),
|
|
|
|
|
|
log=llm_output
|
2023-09-28 20:19:26 +08:00
|
|
|
|
)
|
|
|
|
|
|
return ans
|
|
|
|
|
|
except:
|
|
|
|
|
|
return AgentFinish(
|
|
|
|
|
|
return_values={"output": f"调用agent失败: `{llm_output}`"},
|
|
|
|
|
|
log=llm_output,
|
|
|
|
|
|
)
|