1179 lines
109 KiB
Plaintext
1179 lines
109 KiB
Plaintext
{
|
||
"cells": [
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 2,
|
||
"id": "d2ff171c-f5f8-4590-9ce0-21c87e3d5b39",
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"import sys\n",
|
||
"sys.path.append('/media/gpt4-pdf-chatbot-langchain/langchain-ChatGLM/')\n",
|
||
"from langchain.llms.base import LLM\n",
|
||
"import torch\n",
|
||
"import transformers \n",
|
||
"import models.shared as shared \n",
|
||
"from abc import ABC\n",
|
||
"\n",
|
||
"from langchain.llms.base import LLM\n",
|
||
"import random\n",
|
||
"from transformers.generation.logits_process import LogitsProcessor\n",
|
||
"from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList\n",
|
||
"from typing import Optional, List, Dict, Any\n",
|
||
"from models.loader import LoaderCheckPoint\n",
|
||
"from models.extensions.callback import (Iteratorize, Stream, FixedLengthQueue) \n",
|
||
"from models.base import (BaseAnswer,\n",
|
||
" AnswerResult,\n",
|
||
" AnswerResultStream,\n",
|
||
" AnswerResultQueueSentinelTokenListenerQueue)\n",
|
||
"from langchain.callbacks.manager import (\n",
|
||
" CallbackManagerForLLMRun\n",
|
||
")\n",
|
||
"\n"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 3,
|
||
"id": "68978c38-c0e9-4ae9-ba90-9c02aca335be",
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Overriding torch_dtype=None with `torch_dtype=torch.float16` due to requirements of `bitsandbytes` to enable model loading in mixed int8. Either pass torch_dtype=torch.float16 or don't pass this argument at all to remove this warning.\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Loading vicuna-13b-hf...\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "999ea6baab394a6f9b2b4a815ead7ef4",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Loading checkpoint shards: 0%| | 0/3 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Loaded the model in 182.64 seconds.\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"import asyncio\n",
|
||
"from argparse import Namespace\n",
|
||
"from models.loader.args import parser\n",
|
||
"from langchain.agents import initialize_agent, Tool\n",
|
||
"from langchain.agents import AgentType\n",
|
||
" \n",
|
||
"args = parser.parse_args(args=['--model-dir', '/media/checkpoint/', '--model', 'vicuna-13b-hf', '--no-remote-model', '--load-in-8bit'])\n",
|
||
"\n",
|
||
"args_dict = vars(args)\n",
|
||
"\n",
|
||
"shared.loaderCheckPoint = LoaderCheckPoint(args_dict)\n",
|
||
"torch.cuda.empty_cache()\n",
|
||
"shared.loaderCheckPoint.unload_model()\n",
|
||
"shared.loaderCheckPoint.reload_model() \n"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 4,
|
||
"id": "c8e4a58d-1a3a-484a-8417-bcec0eb7170e",
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"from abc import ABC\n",
|
||
"\n",
|
||
"from langchain.llms.base import LLM\n",
|
||
"import random\n",
|
||
"import torch\n",
|
||
"import transformers\n",
|
||
"from transformers.generation.logits_process import LogitsProcessor\n",
|
||
"from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList\n",
|
||
"from typing import Optional, List, Dict, Any\n",
|
||
"from models.loader import LoaderCheckPoint\n",
|
||
"from models.extensions.callback import (Iteratorize, Stream, FixedLengthQueue)\n",
|
||
"import models.shared as shared\n",
|
||
"from models.base import (BaseAnswer,\n",
|
||
" AnswerResult,\n",
|
||
" AnswerResultStream,\n",
|
||
" AnswerResultQueueSentinelTokenListenerQueue)\n",
|
||
"from langchain.callbacks.manager import (\n",
|
||
" CallbackManagerForLLMRun\n",
|
||
")\n",
|
||
"\n",
|
||
"\n",
|
||
"def _streaming_response_template() -> Dict[str, Any]:\n",
|
||
" \"\"\"\n",
|
||
" :return: 响应结构\n",
|
||
" \"\"\"\n",
|
||
" return {\n",
|
||
" \"text\": \"\"\n",
|
||
" }\n",
|
||
"\n",
|
||
"\n",
|
||
"def _update_response(response: Dict[str, Any], stream_response: str) -> None:\n",
|
||
" \"\"\"Update response from the stream response.\"\"\"\n",
|
||
" response[\"text\"] += stream_response\n",
|
||
"\n",
|
||
"\n",
|
||
"class InvalidScoreLogitsProcessor(LogitsProcessor):\n",
|
||
" def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:\n",
|
||
" if torch.isnan(scores).any() or torch.isinf(scores).any():\n",
|
||
" scores.zero_()\n",
|
||
" scores[..., 5] = 5e4\n",
|
||
" return scores\n",
|
||
"\n",
|
||
"\n",
|
||
"class LLamaLLM(BaseAnswer, LLM, ABC):\n",
|
||
" checkPoint: LoaderCheckPoint = None\n",
|
||
" history = []\n",
|
||
" history_len: int = 3\n",
|
||
" max_new_tokens: int = 500\n",
|
||
" num_beams: int = 1\n",
|
||
" temperature: float = 0.5\n",
|
||
" top_p: float = 0.4\n",
|
||
" top_k: int = 10\n",
|
||
" repetition_penalty: float = 1.12\n",
|
||
" encoder_repetition_penalty: int = 1\n",
|
||
" min_length: int = 0\n",
|
||
" logits_processor: LogitsProcessorList = None\n",
|
||
" stopping_criteria: Optional[StoppingCriteriaList] = None\n",
|
||
"\n",
|
||
" state: object = {'max_new_tokens': 50,\n",
|
||
" 'seed': 1,\n",
|
||
" 'temperature': 0, 'top_p': 0.1,\n",
|
||
" 'top_k': 40, 'typical_p': 1,\n",
|
||
" 'repetition_penalty': 1.18,\n",
|
||
" 'encoder_repetition_penalty': 1,\n",
|
||
" 'no_repeat_ngram_size': 0,\n",
|
||
" 'min_length': 0,\n",
|
||
" 'penalty_alpha': 0,\n",
|
||
" 'num_beams': 1,\n",
|
||
" 'length_penalty': 1,\n",
|
||
" 'early_stopping': False, 'add_bos_token': True, 'ban_eos_token': False,\n",
|
||
" 'truncation_length': 2048, 'custom_stopping_strings': '',\n",
|
||
" 'cpu_memory': 0, 'auto_devices': False, 'disk': False, 'cpu': False, 'bf16': False,\n",
|
||
" 'load_in_8bit': False, 'wbits': 'None', 'groupsize': 'None', 'model_type': 'None',\n",
|
||
" 'pre_layer': 0, 'gpu_memory_0': 0}\n",
|
||
"\n",
|
||
" def __init__(self, checkPoint: LoaderCheckPoint = None):\n",
|
||
" super().__init__()\n",
|
||
" self.checkPoint = checkPoint\n",
|
||
"\n",
|
||
" @property\n",
|
||
" def _llm_type(self) -> str:\n",
|
||
" return \"LLamaLLM\"\n",
|
||
"\n",
|
||
" @property\n",
|
||
" def _check_point(self) -> LoaderCheckPoint:\n",
|
||
" return self.checkPoint\n",
|
||
"\n",
|
||
" def encode(self, prompt, add_special_tokens=True, add_bos_token=True, truncation_length=None):\n",
|
||
" input_ids = self.checkPoint.tokenizer.encode(str(prompt), return_tensors='pt',\n",
|
||
" add_special_tokens=add_special_tokens)\n",
|
||
" # This is a hack for making replies more creative.\n",
|
||
" if not add_bos_token and input_ids[0][0] == self.checkPoint.tokenizer.bos_token_id:\n",
|
||
" input_ids = input_ids[:, 1:]\n",
|
||
"\n",
|
||
" # Llama adds this extra token when the first character is '\\n', and this\n",
|
||
" # compromises the stopping criteria, so we just remove it\n",
|
||
" if type(self.checkPoint.tokenizer) is transformers.LlamaTokenizer and input_ids[0][0] == 29871:\n",
|
||
" input_ids = input_ids[:, 1:]\n",
|
||
"\n",
|
||
" # Handling truncation\n",
|
||
" if truncation_length is not None:\n",
|
||
" input_ids = input_ids[:, -truncation_length:]\n",
|
||
"\n",
|
||
" return input_ids.cuda()\n",
|
||
"\n",
|
||
" def decode(self, output_ids):\n",
|
||
" reply = self.checkPoint.tokenizer.decode(output_ids, skip_special_tokens=True)\n",
|
||
" return reply\n",
|
||
"\n",
|
||
" def generate_with_callback(self, callback=None, **kwargs):\n",
|
||
" self.checkPoint.clear_torch_cache()\n",
|
||
" kwargs['stopping_criteria'].append(Stream(callback_func=callback))\n",
|
||
" with torch.no_grad():\n",
|
||
" self.checkPoint.model.generate(**kwargs)\n",
|
||
" print(\"方法结束\")\n",
|
||
"\n",
|
||
" def generate_with_streaming(self, **kwargs):\n",
|
||
" return Iteratorize(self.generate_with_callback, kwargs)\n",
|
||
"\n",
|
||
" # 将历史对话数组转换为文本格式\n",
|
||
" def history_to_text(self, query):\n",
|
||
" formatted_history = ''\n",
|
||
" history = self.history[-self.history_len:] if self.history_len > 0 else []\n",
|
||
" for i, (old_query, response) in enumerate(history):\n",
|
||
" formatted_history += \"[Round {}]\\n问:{}\\n答:{}\\n\".format(i, old_query, response)\n",
|
||
" formatted_history += \"[Round {}]\\n问:{}\\n答:\".format(len(history), query)\n",
|
||
" return formatted_history\n",
|
||
"\n",
|
||
" def prepare_inputs_for_generation(self,\n",
|
||
" input_ids: torch.LongTensor):\n",
|
||
" \"\"\"\n",
|
||
" 预生成注意力掩码和 输入序列中每个位置的索引的张量\n",
|
||
" # TODO 没有思路\n",
|
||
" :return:\n",
|
||
" \"\"\"\n",
|
||
"\n",
|
||
" mask_positions = torch.zeros((1, input_ids.shape[1]), dtype=input_ids.dtype).to(self.checkPoint.model.device)\n",
|
||
"\n",
|
||
" attention_mask = self.get_masks(input_ids, input_ids.device)\n",
|
||
"\n",
|
||
" position_ids = self.get_position_ids(\n",
|
||
" input_ids,\n",
|
||
" device=input_ids.device,\n",
|
||
" mask_positions=mask_positions\n",
|
||
" )\n",
|
||
"\n",
|
||
" return input_ids, position_ids, attention_mask\n",
|
||
"\n",
|
||
" def get_position_ids(self, input_ids: torch.LongTensor, mask_positions, device):\n",
|
||
" \"\"\"\n",
|
||
" 注意力偏移量\n",
|
||
" :param input_ids:\n",
|
||
" :param mask_positions:\n",
|
||
" :param device:\n",
|
||
" :param use_gmasks:\n",
|
||
" :return:\n",
|
||
" \"\"\"\n",
|
||
" batch_size, seq_length = input_ids.shape\n",
|
||
" context_lengths = [seq.tolist().index(self.checkPoint.model_config.bos_token_id) for seq in input_ids]\n",
|
||
" position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)\n",
|
||
" for i, context_length in enumerate(context_lengths):\n",
|
||
" position_ids[i, context_length:] = mask_positions[i]\n",
|
||
" block_position_ids = [torch.cat((\n",
|
||
" torch.zeros(context_length, dtype=torch.long, device=device),\n",
|
||
" torch.arange(seq_length - context_length, dtype=torch.long, device=device) + 1\n",
|
||
" )) for context_length in context_lengths]\n",
|
||
" block_position_ids = torch.stack(block_position_ids, dim=0)\n",
|
||
" position_ids = torch.stack((position_ids, block_position_ids), dim=1)\n",
|
||
" return position_ids\n",
|
||
"\n",
|
||
" def get_masks(self, input_ids, device):\n",
|
||
" \"\"\"\n",
|
||
" 获取注意力掩码\n",
|
||
" :param input_ids:\n",
|
||
" :param device:\n",
|
||
" :return:\n",
|
||
" \"\"\"\n",
|
||
" batch_size, seq_length = input_ids.shape\n",
|
||
" context_lengths = [seq.tolist().index(self.checkPoint.model_config.bos_token_id) for seq in input_ids]\n",
|
||
" attention_mask = torch.ones((batch_size, seq_length, seq_length), device=device)\n",
|
||
" attention_mask.tril_()\n",
|
||
" for i, context_length in enumerate(context_lengths):\n",
|
||
" attention_mask[i, :, :context_length] = 1\n",
|
||
" attention_mask.unsqueeze_(1)\n",
|
||
" attention_mask = (attention_mask < 0.5).bool()\n",
|
||
" return attention_mask\n",
|
||
"\n",
|
||
" def generate_softprompt_history_tensors(self, query):\n",
|
||
" \"\"\"\n",
|
||
" 历史对话软提示\n",
|
||
" 这段代码首先定义了一个名为 history_to_text 的函数,用于将 self.history\n",
|
||
" 数组转换为所需的文本格式。然后,我们将格式化后的历史文本\n",
|
||
" 再用 self.encode 将其转换为向量表示。最后,将历史对话向量与当前输入的对话向量拼接在一起。\n",
|
||
" :return:\n",
|
||
" \"\"\"\n",
|
||
"\n",
|
||
" # 对话内容\n",
|
||
" # 处理历史对话\n",
|
||
" formatted_history = self.history_to_text(query)\n",
|
||
" return formatted_history\n",
|
||
"\n",
|
||
" def _call(self,\n",
|
||
" prompt: str,\n",
|
||
" stop: Optional[List[str]] = None,\n",
|
||
" run_manager: Optional[CallbackManagerForLLMRun] = None) -> str:\n",
|
||
" print(f\"__call:{prompt}\")\n",
|
||
" if self.logits_processor is None:\n",
|
||
" self.logits_processor = LogitsProcessorList()\n",
|
||
" self.logits_processor.append(InvalidScoreLogitsProcessor())\n",
|
||
"\n",
|
||
" gen_kwargs = {\n",
|
||
" \"max_new_tokens\": self.max_new_tokens,\n",
|
||
" \"num_beams\": self.num_beams,\n",
|
||
" \"top_p\": self.top_p,\n",
|
||
" \"top_k\": self.top_k,\n",
|
||
" \"repetition_penalty\": self.repetition_penalty,\n",
|
||
" \"encoder_repetition_penalty\": self.encoder_repetition_penalty,\n",
|
||
" \"min_length\": self.min_length,\n",
|
||
" \"temperature\": self.temperature,\n",
|
||
" \"logits_processor\": self.logits_processor}\n",
|
||
"\n",
|
||
" # 向量拼接\n",
|
||
" input_ids = self.encode(prompt, add_bos_token=self.state['add_bos_token'], truncation_length=self.max_new_tokens)\n",
|
||
" # input_ids, position_ids, attention_mask = self.prepare_inputs_for_generation(input_ids=filler_input_ids)\n",
|
||
"\n",
|
||
" # 对话模型prompt\n",
|
||
" gen_kwargs.update({'inputs': input_ids})\n",
|
||
" # 注意力掩码\n",
|
||
" # gen_kwargs.update({'attention_mask': attention_mask})\n",
|
||
" # gen_kwargs.update({'position_ids': position_ids})\n",
|
||
" if self.stopping_criteria is None:\n",
|
||
" self.stopping_criteria = transformers.StoppingCriteriaList()\n",
|
||
" # 观测输出\n",
|
||
" gen_kwargs.update({'stopping_criteria': self.stopping_criteria})\n",
|
||
" shared.stop_everything = False\n",
|
||
" stopped = False\n",
|
||
" response_template = _streaming_response_template()\n",
|
||
"\n",
|
||
" # TODO 此流输出方法需要重写!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
|
||
" # stopping_criteria方法不可控制 迭代器的变量无法共享\n",
|
||
" with self.generate_with_streaming(**gen_kwargs) as generator:\n",
|
||
" last_reply_len = 0\n",
|
||
" reply_index = 0\n",
|
||
" # Create a FixedLengthQueue with the desired stop sequence and a maximum length.\n",
|
||
" queue = FixedLengthQueue(stop)\n",
|
||
" for output in generator:\n",
|
||
" new_tokens = len(output) - len(input_ids[0])\n",
|
||
" reply = self.decode(output[-new_tokens:])\n",
|
||
"\n",
|
||
" new_reply = len(reply) - last_reply_len\n",
|
||
" output_reply = reply[-new_reply:]\n",
|
||
" queue.add(reply_index, output_reply)\n",
|
||
" queue.contains_replace_sequence()\n",
|
||
" if stop:\n",
|
||
" pos = queue.contains_stop_sequence()\n",
|
||
" if pos != -1:\n",
|
||
" shared.stop_everything = True\n",
|
||
" stopped = True\n",
|
||
"\n",
|
||
" #print(f\"{reply_index}:reply {output_reply}\")\n",
|
||
" english_reply = queue.put_replace_out(reply_index)\n",
|
||
" #print(f\"{reply_index}:english_reply {english_reply}\")\n",
|
||
" _update_response(response_template, english_reply)\n",
|
||
" last_reply_len = len(reply)\n",
|
||
"\n",
|
||
" reply_index += 1\n",
|
||
" if new_tokens == self.max_new_tokens - 1 or stopped:\n",
|
||
" break\n",
|
||
"\n",
|
||
" response = response_template['text']\n",
|
||
" print(f\"response:{response}\")\n",
|
||
" self.history = self.history + [[None, response]]\n",
|
||
" return response\n",
|
||
"\n",
|
||
" def _generate_answer(self, prompt: str,\n",
|
||
" history: List[List[str]] = [],\n",
|
||
" streaming: bool = False,\n",
|
||
" generate_with_callback: AnswerResultStream = None) -> None:\n",
|
||
" if history:\n",
|
||
" self.history = history\n",
|
||
" # Create the StoppingCriteriaList with the stopping strings\n",
|
||
" self.stopping_criteria = transformers.StoppingCriteriaList()\n",
|
||
" # 定义模型stopping_criteria 队列,在每次响应时将 torch.LongTensor, torch.FloatTensor同步到AnswerResult\n",
|
||
" listenerQueue = AnswerResultQueueSentinelTokenListenerQueue()\n",
|
||
" self.stopping_criteria.append(listenerQueue)\n",
|
||
" # TODO 需要实现chat对话模块和注意力模型,目前_call为langchain的LLM拓展的api,默认为无提示词模式,如果需要操作注意力模型,可以参考chat_glm的实现\n",
|
||
" softprompt = self.generate_softprompt_history_tensors(prompt)\n",
|
||
" response = self._call(prompt=softprompt, stop=['\\n###'])\n",
|
||
" answer_result = AnswerResult()\n",
|
||
" answer_result.history = self.history\n",
|
||
" if listenerQueue.listenerQueue.__len__() > 0:\n",
|
||
" answer_result.listenerToken = listenerQueue.listenerQueue.pop()\n",
|
||
" answer_result.llm_output = {\"answer\": response}\n",
|
||
" generate_with_callback(answer_result)\n"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 5,
|
||
"id": "306450e6-b1fd-4c9c-9083-7419094a14f5",
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"\n",
|
||
"llm_model_ins = LLamaLLM(checkPoint=shared.loaderCheckPoint) "
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 6,
|
||
"id": "7180ae6a-7050-48a0-b89a-82e51bec7db1",
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"\n",
|
||
"from langchain.chains import LLMChain\n",
|
||
"from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n",
|
||
"from langchain.prompts import PromptTemplate\n",
|
||
"\n",
|
||
"template = \"\"\"This is a conversation between a human and a bot:\n",
|
||
"\n",
|
||
"{chat_history}\n",
|
||
"\n",
|
||
"Write a summary of the conversation for {input}:\n",
|
||
"\"\"\"\n",
|
||
"\n",
|
||
"prompt = PromptTemplate(\n",
|
||
" input_variables=[\"input\", \"chat_history\"],\n",
|
||
" template=template\n",
|
||
")\n",
|
||
"memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
|
||
"readonlymemory = ReadOnlySharedMemory(memory=memory)\n",
|
||
"summry_chain = LLMChain(\n",
|
||
" llm=llm_model_ins,\n",
|
||
" prompt=prompt,\n",
|
||
" verbose=True,\n",
|
||
" memory=readonlymemory, # use the read-only memory to prevent the tool from modifying the memory\n",
|
||
")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 7,
|
||
"id": "8516d438-8f9c-4cb7-b921-a7ee216648d5",
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"from langchain.agents import ZeroShotAgent, Tool, AgentExecutor\n",
|
||
"from typing import List, Set\n",
|
||
"\n",
|
||
"\n",
|
||
"class CustomLLMSingleActionAgent(ZeroShotAgent):\n",
|
||
" allowed_tools: List[str]\n",
|
||
"\n",
|
||
" def __init__(self, *args, **kwargs):\n",
|
||
" super(CustomLLMSingleActionAgent, self).__init__(*args, **kwargs)\n",
|
||
" self.allowed_tools = kwargs['allowed_tools']\n",
|
||
"\n",
|
||
" def get_allowed_tools(self) -> Set[str]:\n",
|
||
" return set(self.allowed_tools)\n",
|
||
" \n",
|
||
" \n",
|
||
"tools = [ \n",
|
||
" Tool(\n",
|
||
" name=\"Summary\",\n",
|
||
" func=summry_chain.run,\n",
|
||
" description=\"useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\"\n",
|
||
" )\n",
|
||
" ]\n",
|
||
"\n",
|
||
"prefix = \"\"\"Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\"\"\"\n",
|
||
"suffix = \"\"\"Begin!\n",
|
||
" \n",
|
||
"Question: {input}\n",
|
||
"{agent_scratchpad}\"\"\"\n",
|
||
"\n",
|
||
"\n",
|
||
"prompt = ZeroShotAgent.create_prompt(\n",
|
||
" tools,\n",
|
||
" prefix=prefix,\n",
|
||
" suffix=suffix,\n",
|
||
" input_variables=[\"input\", \"agent_scratchpad\"]\n",
|
||
")\n",
|
||
"tool_names = [tool.name for tool in tools]\n",
|
||
"llm_chain = LLMChain(llm=llm_model_ins, prompt=prompt)\n",
|
||
"agent = CustomLLMSingleActionAgent(llm_chain=llm_chain, tools=tools, allowed_tools=tool_names)\n",
|
||
"agent_chain = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 8,
|
||
"id": "233c3097-66cb-48b0-8de7-d34177e60bd6",
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"__call:Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\n",
|
||
"\n",
|
||
"Summary: useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\n",
|
||
"\n",
|
||
"Use the following format:\n",
|
||
"\n",
|
||
"Question: the input question you must answer\n",
|
||
"Thought: you should always think about what to do\n",
|
||
"Action: the action to take, should be one of [Summary]\n",
|
||
"Action Input: the input to the action\n",
|
||
"Observation: the result of the action\n",
|
||
"... (this Thought/Action/Action Input/Observation can repeat N times)\n",
|
||
"Thought: I now know the final answer\n",
|
||
"Final Answer: the final answer to the original input question\n",
|
||
"\n",
|
||
"Begin!\n",
|
||
" \n",
|
||
"Question: 你好\n",
|
||
"\n",
|
||
"response:Thought: 我必<E68891><E5BF85>hought: 我必须回<E9A1BB><E59B9E>hought: 我必须回答这个问题。\n",
|
||
"Action: 问<><E997AE>hought: 我必须回答这个问题。\n",
|
||
"Action: 问她想知道<E79FA5><E98193>hought: 我必须回答这个问题。\n",
|
||
"Action: 问她想知道什么。\n",
|
||
"Action Input: 没有需要的输入。\n",
|
||
"Observation:\n",
|
||
"__call:Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\n",
|
||
"\n",
|
||
"Summary: useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\n",
|
||
"\n",
|
||
"Use the following format:\n",
|
||
"\n",
|
||
"Question: the input question you must answer\n",
|
||
"Thought: you should always think about what to do\n",
|
||
"Action: the action to take, should be one of [Summary]\n",
|
||
"Action Input: the input to the action\n",
|
||
"Observation: the result of the action\n",
|
||
"... (this Thought/Action/Action Input/Observation can repeat N times)\n",
|
||
"Thought: I now know the final answer\n",
|
||
"Final Answer: the final answer to the original input question\n",
|
||
"\n",
|
||
"Begin!\n",
|
||
" \n",
|
||
"Question: 你好\n",
|
||
"Thought: 我必<E68891><E5BF85>hought: 我必须回<E9A1BB><E59B9E>hought: 我必须回答这个问题。\n",
|
||
"Action: 问<><E997AE>hought: 我必须回答这个问题。\n",
|
||
"Action: 问她想知道<E79FA5><E98193>hought: 我必须回答这个问题。\n",
|
||
"Action: 问她想知道什么。\n",
|
||
"Action Input: 没有需要的输入。\n",
|
||
"Observation:\n",
|
||
"Observation: 问<><E997AE>hought: 我必须回答这个问题。\n",
|
||
"Action: 问她想知道<E79FA5><E98193>hought: 我必须回答这个问题。\n",
|
||
"Action: 问她想知道什么。 is not a valid tool, try another one.\n",
|
||
"Thought:\n",
|
||
"traceback.print_exc()\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Traceback (most recent call last):\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/langchain-ChatGLM/models/extensions/callback.py\", line 188, in gen\n",
|
||
" ret = self.mfunc(callback=_callback, **self.kwargs)\n",
|
||
" File \"/tmp/ipykernel_10879/1809555145.py\", line 114, in generate_with_callback\n",
|
||
" self.checkPoint.model.generate(**kwargs)\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n",
|
||
" return func(*args, **kwargs)\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/transformers/generation/utils.py\", line 1437, in generate\n",
|
||
" return self.greedy_search(\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/transformers/generation/utils.py\", line 2245, in greedy_search\n",
|
||
" model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py\", line 737, in prepare_inputs_for_generation\n",
|
||
" position_ids.masked_fill_(attention_mask == 0, 1)\n",
|
||
"SystemExit\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"方法结束\n",
|
||
"invalid thread id\n",
|
||
"response:我不能回<E883BD><E59B9E>不能回答这个问题,因为我是一个人工智能程序。\n",
|
||
"Final Answer: 我不能回<E883BD><E59B9E>不能回答这个问题,因为我是一个人工智能程序。\n",
|
||
"Final Answer: 我不能回答这个问题,因为我是一个人工智能程序。\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"'我不能回答这个问题,因为我是一个人工智能程序。'"
|
||
]
|
||
},
|
||
"execution_count": 8,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"\n",
|
||
"agent_chain.run(input=\"你好\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 8,
|
||
"id": "71a4259f-f589-4aa6-b868-3e10b4f2b22c",
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"__call:Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\n",
|
||
"\n",
|
||
"Summary: useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\n",
|
||
"\n",
|
||
"Use the following format:\n",
|
||
"\n",
|
||
"Question: the input question you must answer\n",
|
||
"Thought: you should always think about what to do\n",
|
||
"Action: the action to take, should be one of [Summary]\n",
|
||
"Action Input: the input to the action\n",
|
||
"Observation: the result of the action\n",
|
||
"... (this Thought/Action/Action Input/Observation can repeat N times)\n",
|
||
"Thought: I now know the final answer\n",
|
||
"Final Answer: the final answer to the original input question\n",
|
||
"\n",
|
||
"Begin!\n",
|
||
" \n",
|
||
"Question: 你是谁?\n",
|
||
"\n",
|
||
"response:Thought: 我需要回<E8A681><E59B9E>hought: 我需要回答这个问题。\n",
|
||
"Action: <20><>hought: 我需要回答这个问题。\n",
|
||
"Action: 执行以下操作:\n",
|
||
"Action Input: 输入自<E585A5><E887AA>hought: 我需要回答这个问题。\n",
|
||
"Action: 执行以下操作:\n",
|
||
"Action Input: 输入自己的名字。\n",
|
||
"Observation:\n",
|
||
"__call:Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\n",
|
||
"\n",
|
||
"Summary: useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\n",
|
||
"\n",
|
||
"Use the following format:\n",
|
||
"\n",
|
||
"Question: the input question you must answer\n",
|
||
"Thought: you should always think about what to do\n",
|
||
"Action: the action to take, should be one of [Summary]\n",
|
||
"Action Input: the input to the action\n",
|
||
"Observation: the result of the action\n",
|
||
"... (this Thought/Action/Action Input/Observation can repeat N times)\n",
|
||
"Thought: I now know the final answer\n",
|
||
"Final Answer: the final answer to the original input question\n",
|
||
"\n",
|
||
"Begin!\n",
|
||
" \n",
|
||
"Question: 你是谁?\n",
|
||
"Thought: 我需要回<E8A681><E59B9E>hought: 我需要回答这个问题。\n",
|
||
"Action: <20><>hought: 我需要回答这个问题。\n",
|
||
"Action: 执行以下操作:\n",
|
||
"Action Input: 输入自<E585A5><E887AA>hought: 我需要回答这个问题。\n",
|
||
"Action: 执行以下操作:\n",
|
||
"Action Input: 输入自己的名字。\n",
|
||
"Observation:\n",
|
||
"Observation: <20><>hought: 我需要回答这个问题。\n",
|
||
"Action: 执行以下操作: is not a valid tool, try another one.\n",
|
||
"Thought:\n",
|
||
"traceback.print_exc()\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Traceback (most recent call last):\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/langchain-ChatGLM/models/extensions/callback.py\", line 188, in gen\n",
|
||
" ret = self.mfunc(callback=_callback, **self.kwargs)\n",
|
||
" File \"/tmp/ipykernel_13699/1809555145.py\", line 114, in generate_with_callback\n",
|
||
" self.checkPoint.model.generate(**kwargs)\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n",
|
||
" return func(*args, **kwargs)\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/transformers/generation/utils.py\", line 1437, in generate\n",
|
||
" return self.greedy_search(\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/transformers/generation/utils.py\", line 2245, in greedy_search\n",
|
||
" model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py\", line 737, in prepare_inputs_for_generation\n",
|
||
" position_ids.masked_fill_(attention_mask == 0, 1)\n",
|
||
"SystemExit\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"方法结束\n",
|
||
"invalid thread id\n",
|
||
"response:我不知道该如何回<E4BD95><E59B9E>不知道该如何回答这个问题。\n",
|
||
"Final Answer: 我不知道该如何回<E4BD95><E59B9E>不知道该如何回答这个问题。\n",
|
||
"Final Answer: 我不知道该如何回答这个问题。\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"'我不知道该如何回答这个问题。'"
|
||
]
|
||
},
|
||
"execution_count": 8,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"agent_chain.run(input=\"你是谁?\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 9,
|
||
"id": "e0cd4b1d-a7c8-432f-93d4-ef78b3dd851a",
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"__call:Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\n",
|
||
"\n",
|
||
"Summary: useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\n",
|
||
"\n",
|
||
"Use the following format:\n",
|
||
"\n",
|
||
"Question: the input question you must answer\n",
|
||
"Thought: you should always think about what to do\n",
|
||
"Action: the action to take, should be one of [Summary]\n",
|
||
"Action Input: the input to the action\n",
|
||
"Observation: the result of the action\n",
|
||
"... (this Thought/Action/Action Input/Observation can repeat N times)\n",
|
||
"Thought: I now know the final answer\n",
|
||
"Final Answer: the final answer to the original input question\n",
|
||
"\n",
|
||
"Begin!\n",
|
||
" \n",
|
||
"Question: 我们之前聊了什么?\n",
|
||
"\n",
|
||
"response:Thought: 我需要回<E8A681><E59B9E>hought: 我需要回顾我们之前的对话以确定问题的<E9A298><E79A84>hought: 我需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>hought: 我需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: 检查我们之前的对话记录\n",
|
||
"Action Input: 无\n",
|
||
"Observation:\n",
|
||
"__call:Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\n",
|
||
"\n",
|
||
"Summary: useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\n",
|
||
"\n",
|
||
"Use the following format:\n",
|
||
"\n",
|
||
"Question: the input question you must answer\n",
|
||
"Thought: you should always think about what to do\n",
|
||
"Action: the action to take, should be one of [Summary]\n",
|
||
"Action Input: the input to the action\n",
|
||
"Observation: the result of the action\n",
|
||
"... (this Thought/Action/Action Input/Observation can repeat N times)\n",
|
||
"Thought: I now know the final answer\n",
|
||
"Final Answer: the final answer to the original input question\n",
|
||
"\n",
|
||
"Begin!\n",
|
||
" \n",
|
||
"Question: 我们之前聊了什么?\n",
|
||
"Thought: 我需要回<E8A681><E59B9E>hought: 我需要回顾我们之前的对话以确定问题的<E9A298><E79A84>hought: 我需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>hought: 我需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: 检查我们之前的对话记录\n",
|
||
"Action Input: 无\n",
|
||
"Observation:\n",
|
||
"Observation: <20><>hought: 我需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: 检查我们之前的对话记录 is not a valid tool, try another one.\n",
|
||
"Thought:\n",
|
||
"traceback.print_exc()\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Traceback (most recent call last):\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/langchain-ChatGLM/models/extensions/callback.py\", line 188, in gen\n",
|
||
" ret = self.mfunc(callback=_callback, **self.kwargs)\n",
|
||
" File \"/tmp/ipykernel_13699/1809555145.py\", line 114, in generate_with_callback\n",
|
||
" self.checkPoint.model.generate(**kwargs)\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n",
|
||
" return func(*args, **kwargs)\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/transformers/generation/utils.py\", line 1437, in generate\n",
|
||
" return self.greedy_search(\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/transformers/generation/utils.py\", line 2245, in greedy_search\n",
|
||
" model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py\", line 737, in prepare_inputs_for_generation\n",
|
||
" position_ids.masked_fill_(attention_mask == 0, 1)\n",
|
||
"SystemExit\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"response:我需要回<E8A681><E59B9E>需要回顾我们之前的对话以确定问题的<E9A298><E79A84>需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>hought: 我需要回<E8A681><E59B9E>需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>hought: 我需要回顾我们之前的对话以确定问题的<E9A298><E79A84>需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>hought: 我需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>hought: 我需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: 找出我们之前<E4B98B><E5898D>需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>hought: 我需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: 找出我们之前聊的主题\n",
|
||
"Action Input: 无\n",
|
||
"Observation:\n",
|
||
"__call:Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\n",
|
||
"\n",
|
||
"Summary: useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.\n",
|
||
"\n",
|
||
"Use the following format:\n",
|
||
"\n",
|
||
"Question: the input question you must answer\n",
|
||
"Thought: you should always think about what to do\n",
|
||
"Action: the action to take, should be one of [Summary]\n",
|
||
"Action Input: the input to the action\n",
|
||
"Observation: the result of the action\n",
|
||
"... (this Thought/Action/Action Input/Observation can repeat N times)\n",
|
||
"Thought: I now know the final answer\n",
|
||
"Final Answer: the final answer to the original input question\n",
|
||
"\n",
|
||
"Begin!\n",
|
||
" \n",
|
||
"Question: 我们之前聊了什么?\n",
|
||
"Thought: 我需要回<E8A681><E59B9E>hought: 我需要回顾我们之前的对话以确定问题的<E9A298><E79A84>hought: 我需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>hought: 我需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: 检查我们之前的对话记录\n",
|
||
"Action Input: 无\n",
|
||
"Observation:\n",
|
||
"Observation: <20><>hought: 我需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: 检查我们之前的对话记录 is not a valid tool, try another one.\n",
|
||
"Thought:我需要回<E8A681><E59B9E>需要回顾我们之前的对话以确定问题的<E9A298><E79A84>需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>hought: 我需要回<E8A681><E59B9E>需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>hought: 我需要回顾我们之前的对话以确定问题的<E9A298><E79A84>需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>hought: 我需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>hought: 我需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: 找出我们之前<E4B98B><E5898D>需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>hought: 我需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: 找出我们之前聊的主题\n",
|
||
"Action Input: 无\n",
|
||
"Observation:\n",
|
||
"Observation: <20><>hought: 我需要回<E8A681><E59B9E>需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>hought: 我需要回顾我们之前的对话以确定问题的<E9A298><E79A84>需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>hought: 我需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>hought: 我需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: 找出我们之前<E4B98B><E5898D>需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: <20><>hought: 我需要回顾我们之前的对话以确定问题的具体内容。\n",
|
||
"Action: 找出我们之前聊的主题 is not a valid tool, try another one.\n",
|
||
"Thought:\n",
|
||
"traceback.print_exc()\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Traceback (most recent call last):\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/langchain-ChatGLM/models/extensions/callback.py\", line 188, in gen\n",
|
||
" ret = self.mfunc(callback=_callback, **self.kwargs)\n",
|
||
" File \"/tmp/ipykernel_13699/1809555145.py\", line 114, in generate_with_callback\n",
|
||
" self.checkPoint.model.generate(**kwargs)\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n",
|
||
" return func(*args, **kwargs)\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/transformers/generation/utils.py\", line 1437, in generate\n",
|
||
" return self.greedy_search(\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/transformers/generation/utils.py\", line 2245, in greedy_search\n",
|
||
" model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py\", line 737, in prepare_inputs_for_generation\n",
|
||
" position_ids.masked_fill_(attention_mask == 0, 1)\n",
|
||
"SystemExit\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"response:<3A><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>\n",
|
||
"traceback.print_exc()\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Traceback (most recent call last):\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/langchain-ChatGLM/models/extensions/callback.py\", line 188, in gen\n",
|
||
" ret = self.mfunc(callback=_callback, **self.kwargs)\n",
|
||
" File \"/tmp/ipykernel_13699/1809555145.py\", line 114, in generate_with_callback\n",
|
||
" self.checkPoint.model.generate(**kwargs)\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/torch/utils/_contextlib.py\", line 115, in decorate_context\n",
|
||
" return func(*args, **kwargs)\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/transformers/generation/utils.py\", line 1437, in generate\n",
|
||
" return self.greedy_search(\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/transformers/generation/utils.py\", line 2245, in greedy_search\n",
|
||
" model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)\n",
|
||
" File \"/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/transformers/models/llama/modeling_llama.py\", line 739, in prepare_inputs_for_generation\n",
|
||
" position_ids = position_ids[:, -1].unsqueeze(-1)\n",
|
||
"SystemExit\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/html": [
|
||
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #800000; text-decoration-color: #800000\">╭─────────────────────────────── </span><span style=\"color: #800000; text-decoration-color: #800000; font-weight: bold\">Traceback </span><span style=\"color: #bf7f7f; text-decoration-color: #bf7f7f; font-weight: bold\">(most recent call last)</span><span style=\"color: #800000; text-decoration-color: #800000\"> ────────────────────────────────╮</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #bfbf7f; text-decoration-color: #bfbf7f\">/tmp/ipykernel_13699/</span><span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">3633147670.py</span>:<span style=\"color: #0000ff; text-decoration-color: #0000ff\">1</span> in <span style=\"color: #00ff00; text-decoration-color: #00ff00\"><module></span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000; font-style: italic\">[Errno 2] No such file or directory: '/tmp/ipykernel_13699/3633147670.py'</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #bfbf7f; text-decoration-color: #bfbf7f\">/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/langchain/chains/</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">base.py</span>:<span style=\"color: #0000ff; text-decoration-color: #0000ff\">241</span> in <span style=\"color: #00ff00; text-decoration-color: #00ff00\">run</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">238 │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">return</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>(args[<span style=\"color: #0000ff; text-decoration-color: #0000ff\">0</span>], callbacks=callbacks)[<span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.output_keys[<span style=\"color: #0000ff; text-decoration-color: #0000ff\">0</span>]] <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">239 │ │ </span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">240 │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">if</span> kwargs <span style=\"color: #ff00ff; text-decoration-color: #ff00ff\">and</span> <span style=\"color: #ff00ff; text-decoration-color: #ff00ff\">not</span> args: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">❱ </span>241 <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">return</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>(kwargs, callbacks=callbacks)[<span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.output_keys[<span style=\"color: #0000ff; text-decoration-color: #0000ff\">0</span>]] <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">242 │ │ </span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">243 │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">raise</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">ValueError</span>( <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">244 │ │ │ </span><span style=\"color: #808000; text-decoration-color: #808000\">f\"`run` supported with either positional arguments or keyword arguments\"</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #bfbf7f; text-decoration-color: #bfbf7f\">/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/langchain/chains/</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">base.py</span>:<span style=\"color: #0000ff; text-decoration-color: #0000ff\">142</span> in <span style=\"color: #00ff00; text-decoration-color: #00ff00\">__call__</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">139 │ │ │ </span>) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">140 │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">except</span> (<span style=\"color: #00ffff; text-decoration-color: #00ffff\">KeyboardInterrupt</span>, <span style=\"color: #00ffff; text-decoration-color: #00ffff\">Exception</span>) <span style=\"color: #0000ff; text-decoration-color: #0000ff\">as</span> e: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">141 │ │ │ </span>run_manager.on_chain_error(e) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">❱ </span>142 <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">raise</span> e <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">143 │ │ </span>run_manager.on_chain_end(outputs) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">144 │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">return</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.prep_outputs(inputs, outputs, return_only_outputs) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">145 </span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #bfbf7f; text-decoration-color: #bfbf7f\">/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/langchain/chains/</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">base.py</span>:<span style=\"color: #0000ff; text-decoration-color: #0000ff\">136</span> in <span style=\"color: #00ff00; text-decoration-color: #00ff00\">__call__</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">133 │ │ </span>) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">134 │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">try</span>: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">135 │ │ │ </span>outputs = ( <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">❱ </span>136 <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ </span><span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>._call(inputs, run_manager=run_manager) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">137 │ │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">if</span> new_arg_supported <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">138 │ │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">else</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>._call(inputs) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">139 │ │ │ </span>) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #bfbf7f; text-decoration-color: #bfbf7f\">/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/langchain/agents/</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">agent.py</span>:<span style=\"color: #0000ff; text-decoration-color: #0000ff\">904</span> in <span style=\"color: #00ff00; text-decoration-color: #00ff00\">_call</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">901 │ │ </span>start_time = time.time() <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">902 │ │ # We now enter the agent loop (until it returns something).</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">903 │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">while</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>._should_continue(iterations, time_elapsed): <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">❱ </span>904 <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ </span>next_step_output = <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>._take_next_step( <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">905 │ │ │ │ </span>name_to_tool_map, <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">906 │ │ │ │ </span>color_mapping, <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">907 │ │ │ │ </span>inputs, <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #bfbf7f; text-decoration-color: #bfbf7f\">/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/langchain/agents/</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">agent.py</span>:<span style=\"color: #0000ff; text-decoration-color: #0000ff\">748</span> in <span style=\"color: #00ff00; text-decoration-color: #00ff00\">_take_next_step</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">745 │ │ │ </span>) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">746 │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">except</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">Exception</span> <span style=\"color: #0000ff; text-decoration-color: #0000ff\">as</span> e: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">747 │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">if</span> <span style=\"color: #ff00ff; text-decoration-color: #ff00ff\">not</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.handle_parsing_errors: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">❱ </span>748 <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">raise</span> e <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">749 │ │ │ </span>text = <span style=\"color: #00ffff; text-decoration-color: #00ffff\">str</span>(e).split(<span style=\"color: #808000; text-decoration-color: #808000\">\"`\"</span>)[<span style=\"color: #0000ff; text-decoration-color: #0000ff\">1</span>] <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">750 │ │ │ </span>observation = <span style=\"color: #808000; text-decoration-color: #808000\">\"Invalid or incomplete response\"</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">751 │ │ │ </span>output = AgentAction(<span style=\"color: #808000; text-decoration-color: #808000\">\"_Exception\"</span>, observation, text) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #bfbf7f; text-decoration-color: #bfbf7f\">/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/langchain/agents/</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">agent.py</span>:<span style=\"color: #0000ff; text-decoration-color: #0000ff\">741</span> in <span style=\"color: #00ff00; text-decoration-color: #00ff00\">_take_next_step</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">738 </span><span style=\"color: #bfbf7f; text-decoration-color: #bfbf7f\">│ │ </span><span style=\"color: #808000; text-decoration-color: #808000\">\"\"\"</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">739 │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">try</span>: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">740 │ │ │ # Call the LLM to see what to do.</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">❱ </span>741 <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ </span>output = <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.agent.plan( <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">742 │ │ │ │ </span>intermediate_steps, <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">743 │ │ │ │ </span>callbacks=run_manager.get_child() <span style=\"color: #0000ff; text-decoration-color: #0000ff\">if</span> run_manager <span style=\"color: #0000ff; text-decoration-color: #0000ff\">else</span> <span style=\"color: #0000ff; text-decoration-color: #0000ff\">None</span>, <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">744 │ │ │ │ </span>**inputs, <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #bfbf7f; text-decoration-color: #bfbf7f\">/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/langchain/agents/</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">agent.py</span>:<span style=\"color: #0000ff; text-decoration-color: #0000ff\">426</span> in <span style=\"color: #00ff00; text-decoration-color: #00ff00\">plan</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">423 </span><span style=\"color: #bfbf7f; text-decoration-color: #bfbf7f\">│ │ </span><span style=\"color: #808000; text-decoration-color: #808000\">\"\"\"</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">424 │ │ </span>full_inputs = <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.get_full_inputs(intermediate_steps, **kwargs) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">425 │ │ </span>full_output = <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.llm_chain.predict(callbacks=callbacks, **full_inputs) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">❱ </span>426 <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">return</span> <span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>.output_parser.parse(full_output) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">427 │ </span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">428 │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">async</span> <span style=\"color: #0000ff; text-decoration-color: #0000ff\">def</span> <span style=\"color: #00ff00; text-decoration-color: #00ff00\">aplan</span>( <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">429 │ │ </span><span style=\"color: #00ffff; text-decoration-color: #00ffff\">self</span>, <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #bfbf7f; text-decoration-color: #bfbf7f\">/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/langchain/agents/</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #bfbf7f; text-decoration-color: #bfbf7f\">mrkl/</span><span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">output_parser.py</span>:<span style=\"color: #0000ff; text-decoration-color: #0000ff\">26</span> in <span style=\"color: #00ff00; text-decoration-color: #00ff00\">parse</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">23 │ │ </span>) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">24 │ │ </span>match = re.search(regex, text, re.DOTALL) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">25 │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">if</span> <span style=\"color: #ff00ff; text-decoration-color: #ff00ff\">not</span> match: <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #800000; text-decoration-color: #800000\">❱ </span>26 <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">│ │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">raise</span> OutputParserException(<span style=\"color: #808000; text-decoration-color: #808000\">f\"Could not parse LLM output: `{</span>text<span style=\"color: #808000; text-decoration-color: #808000\">}`\"</span>) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">27 │ │ </span>action = match.group(<span style=\"color: #0000ff; text-decoration-color: #0000ff\">1</span>).strip() <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">28 │ │ </span>action_input = match.group(<span style=\"color: #0000ff; text-decoration-color: #0000ff\">2</span>) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">│</span> <span style=\"color: #7f7f7f; text-decoration-color: #7f7f7f\">29 │ │ </span><span style=\"color: #0000ff; text-decoration-color: #0000ff\">return</span> AgentAction(action, action_input.strip(<span style=\"color: #808000; text-decoration-color: #808000\">\" \"</span>).strip(<span style=\"color: #808000; text-decoration-color: #808000\">'\"'</span>), text) <span style=\"color: #800000; text-decoration-color: #800000\">│</span>\n",
|
||
"<span style=\"color: #800000; text-decoration-color: #800000\">╰──────────────────────────────────────────────────────────────────────────────────────────────────╯</span>\n",
|
||
"<span style=\"color: #ff0000; text-decoration-color: #ff0000; font-weight: bold\">OutputParserException: </span>Could not parse LLM output: \n",
|
||
"`<60><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>\n",
|
||
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>\n",
|
||
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>\n",
|
||
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>\n",
|
||
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>\n",
|
||
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>\n",
|
||
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>\n",
|
||
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>\n",
|
||
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>`\n",
|
||
"</pre>\n"
|
||
],
|
||
"text/plain": [
|
||
"\u001b[31m╭─\u001b[0m\u001b[31m──────────────────────────────\u001b[0m\u001b[31m \u001b[0m\u001b[1;31mTraceback \u001b[0m\u001b[1;2;31m(most recent call last)\u001b[0m\u001b[31m \u001b[0m\u001b[31m───────────────────────────────\u001b[0m\u001b[31m─╮\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2;33m/tmp/ipykernel_13699/\u001b[0m\u001b[1;33m3633147670.py\u001b[0m:\u001b[94m1\u001b[0m in \u001b[92m<module>\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[3;31m[Errno 2] No such file or directory: '/tmp/ipykernel_13699/3633147670.py'\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2;33m/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/langchain/chains/\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[1;33mbase.py\u001b[0m:\u001b[94m241\u001b[0m in \u001b[92mrun\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m238 \u001b[0m\u001b[2m│ │ │ \u001b[0m\u001b[94mreturn\u001b[0m \u001b[96mself\u001b[0m(args[\u001b[94m0\u001b[0m], callbacks=callbacks)[\u001b[96mself\u001b[0m.output_keys[\u001b[94m0\u001b[0m]] \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m239 \u001b[0m\u001b[2m│ │ \u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m240 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94mif\u001b[0m kwargs \u001b[95mand\u001b[0m \u001b[95mnot\u001b[0m args: \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m241 \u001b[2m│ │ │ \u001b[0m\u001b[94mreturn\u001b[0m \u001b[96mself\u001b[0m(kwargs, callbacks=callbacks)[\u001b[96mself\u001b[0m.output_keys[\u001b[94m0\u001b[0m]] \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m242 \u001b[0m\u001b[2m│ │ \u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m243 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94mraise\u001b[0m \u001b[96mValueError\u001b[0m( \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m244 \u001b[0m\u001b[2m│ │ │ \u001b[0m\u001b[33mf\u001b[0m\u001b[33m\"\u001b[0m\u001b[33m`run` supported with either positional arguments or keyword arguments\u001b[0m\u001b[33m\"\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2;33m/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/langchain/chains/\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[1;33mbase.py\u001b[0m:\u001b[94m142\u001b[0m in \u001b[92m__call__\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m139 \u001b[0m\u001b[2m│ │ │ \u001b[0m) \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m140 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94mexcept\u001b[0m (\u001b[96mKeyboardInterrupt\u001b[0m, \u001b[96mException\u001b[0m) \u001b[94mas\u001b[0m e: \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m141 \u001b[0m\u001b[2m│ │ │ \u001b[0mrun_manager.on_chain_error(e) \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m142 \u001b[2m│ │ │ \u001b[0m\u001b[94mraise\u001b[0m e \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m143 \u001b[0m\u001b[2m│ │ \u001b[0mrun_manager.on_chain_end(outputs) \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m144 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94mreturn\u001b[0m \u001b[96mself\u001b[0m.prep_outputs(inputs, outputs, return_only_outputs) \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m145 \u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2;33m/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/langchain/chains/\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[1;33mbase.py\u001b[0m:\u001b[94m136\u001b[0m in \u001b[92m__call__\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m133 \u001b[0m\u001b[2m│ │ \u001b[0m) \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m134 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94mtry\u001b[0m: \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m135 \u001b[0m\u001b[2m│ │ │ \u001b[0moutputs = ( \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m136 \u001b[2m│ │ │ │ \u001b[0m\u001b[96mself\u001b[0m._call(inputs, run_manager=run_manager) \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m137 \u001b[0m\u001b[2m│ │ │ │ \u001b[0m\u001b[94mif\u001b[0m new_arg_supported \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m138 \u001b[0m\u001b[2m│ │ │ │ \u001b[0m\u001b[94melse\u001b[0m \u001b[96mself\u001b[0m._call(inputs) \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m139 \u001b[0m\u001b[2m│ │ │ \u001b[0m) \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2;33m/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/langchain/agents/\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[1;33magent.py\u001b[0m:\u001b[94m904\u001b[0m in \u001b[92m_call\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m901 \u001b[0m\u001b[2m│ │ \u001b[0mstart_time = time.time() \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m902 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[2m# We now enter the agent loop (until it returns something).\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m903 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94mwhile\u001b[0m \u001b[96mself\u001b[0m._should_continue(iterations, time_elapsed): \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m904 \u001b[2m│ │ │ \u001b[0mnext_step_output = \u001b[96mself\u001b[0m._take_next_step( \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m905 \u001b[0m\u001b[2m│ │ │ │ \u001b[0mname_to_tool_map, \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m906 \u001b[0m\u001b[2m│ │ │ │ \u001b[0mcolor_mapping, \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m907 \u001b[0m\u001b[2m│ │ │ │ \u001b[0minputs, \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2;33m/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/langchain/agents/\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[1;33magent.py\u001b[0m:\u001b[94m748\u001b[0m in \u001b[92m_take_next_step\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m745 \u001b[0m\u001b[2m│ │ │ \u001b[0m) \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m746 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94mexcept\u001b[0m \u001b[96mException\u001b[0m \u001b[94mas\u001b[0m e: \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m747 \u001b[0m\u001b[2m│ │ │ \u001b[0m\u001b[94mif\u001b[0m \u001b[95mnot\u001b[0m \u001b[96mself\u001b[0m.handle_parsing_errors: \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m748 \u001b[2m│ │ │ │ \u001b[0m\u001b[94mraise\u001b[0m e \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m749 \u001b[0m\u001b[2m│ │ │ \u001b[0mtext = \u001b[96mstr\u001b[0m(e).split(\u001b[33m\"\u001b[0m\u001b[33m`\u001b[0m\u001b[33m\"\u001b[0m)[\u001b[94m1\u001b[0m] \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m750 \u001b[0m\u001b[2m│ │ │ \u001b[0mobservation = \u001b[33m\"\u001b[0m\u001b[33mInvalid or incomplete response\u001b[0m\u001b[33m\"\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m751 \u001b[0m\u001b[2m│ │ │ \u001b[0moutput = AgentAction(\u001b[33m\"\u001b[0m\u001b[33m_Exception\u001b[0m\u001b[33m\"\u001b[0m, observation, text) \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2;33m/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/langchain/agents/\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[1;33magent.py\u001b[0m:\u001b[94m741\u001b[0m in \u001b[92m_take_next_step\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m738 \u001b[0m\u001b[2;33m│ │ \u001b[0m\u001b[33m\"\"\"\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m739 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94mtry\u001b[0m: \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m740 \u001b[0m\u001b[2m│ │ │ \u001b[0m\u001b[2m# Call the LLM to see what to do.\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m741 \u001b[2m│ │ │ \u001b[0moutput = \u001b[96mself\u001b[0m.agent.plan( \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m742 \u001b[0m\u001b[2m│ │ │ │ \u001b[0mintermediate_steps, \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m743 \u001b[0m\u001b[2m│ │ │ │ \u001b[0mcallbacks=run_manager.get_child() \u001b[94mif\u001b[0m run_manager \u001b[94melse\u001b[0m \u001b[94mNone\u001b[0m, \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m744 \u001b[0m\u001b[2m│ │ │ │ \u001b[0m**inputs, \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2;33m/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/langchain/agents/\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[1;33magent.py\u001b[0m:\u001b[94m426\u001b[0m in \u001b[92mplan\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m423 \u001b[0m\u001b[2;33m│ │ \u001b[0m\u001b[33m\"\"\"\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m424 \u001b[0m\u001b[2m│ │ \u001b[0mfull_inputs = \u001b[96mself\u001b[0m.get_full_inputs(intermediate_steps, **kwargs) \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m425 \u001b[0m\u001b[2m│ │ \u001b[0mfull_output = \u001b[96mself\u001b[0m.llm_chain.predict(callbacks=callbacks, **full_inputs) \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m426 \u001b[2m│ │ \u001b[0m\u001b[94mreturn\u001b[0m \u001b[96mself\u001b[0m.output_parser.parse(full_output) \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m427 \u001b[0m\u001b[2m│ \u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m428 \u001b[0m\u001b[2m│ \u001b[0m\u001b[94masync\u001b[0m \u001b[94mdef\u001b[0m \u001b[92maplan\u001b[0m( \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m429 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[96mself\u001b[0m, \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2;33m/media/gpt4-pdf-chatbot-langchain/pyenv-langchain/lib/python3.10/site-packages/langchain/agents/\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2;33mmrkl/\u001b[0m\u001b[1;33moutput_parser.py\u001b[0m:\u001b[94m26\u001b[0m in \u001b[92mparse\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m23 \u001b[0m\u001b[2m│ │ \u001b[0m) \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m24 \u001b[0m\u001b[2m│ │ \u001b[0mmatch = re.search(regex, text, re.DOTALL) \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m25 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94mif\u001b[0m \u001b[95mnot\u001b[0m match: \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[31m❱ \u001b[0m26 \u001b[2m│ │ │ \u001b[0m\u001b[94mraise\u001b[0m OutputParserException(\u001b[33mf\u001b[0m\u001b[33m\"\u001b[0m\u001b[33mCould not parse LLM output: `\u001b[0m\u001b[33m{\u001b[0mtext\u001b[33m}\u001b[0m\u001b[33m`\u001b[0m\u001b[33m\"\u001b[0m) \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m27 \u001b[0m\u001b[2m│ │ \u001b[0maction = match.group(\u001b[94m1\u001b[0m).strip() \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m28 \u001b[0m\u001b[2m│ │ \u001b[0maction_input = match.group(\u001b[94m2\u001b[0m) \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m│\u001b[0m \u001b[2m29 \u001b[0m\u001b[2m│ │ \u001b[0m\u001b[94mreturn\u001b[0m AgentAction(action, action_input.strip(\u001b[33m\"\u001b[0m\u001b[33m \u001b[0m\u001b[33m\"\u001b[0m).strip(\u001b[33m'\u001b[0m\u001b[33m\"\u001b[0m\u001b[33m'\u001b[0m), text) \u001b[31m│\u001b[0m\n",
|
||
"\u001b[31m╰──────────────────────────────────────────────────────────────────────────────────────────────────╯\u001b[0m\n",
|
||
"\u001b[1;91mOutputParserException: \u001b[0mCould not parse LLM output: \n",
|
||
"`<60><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>\n",
|
||
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>\n",
|
||
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>\n",
|
||
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>\n",
|
||
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>\n",
|
||
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>\n",
|
||
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>\n",
|
||
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>\n",
|
||
"<22><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>`\n"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
}
|
||
],
|
||
"source": [
|
||
"agent_chain.run(input=\"我们之前聊了什么?\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": null,
|
||
"id": "096f0378-b7eb-46b6-9985-95c3f3521ecd",
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": []
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": null,
|
||
"id": "a3c964f3-fb14-40bd-8df9-ae3c452260bb",
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": []
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": null,
|
||
"id": "e31b13c0-af47-4614-93d0-090a69579cc1",
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": []
|
||
}
|
||
],
|
||
"metadata": {
|
||
"kernelspec": {
|
||
"display_name": "Python 3 (ipykernel)",
|
||
"language": "python",
|
||
"name": "python3"
|
||
},
|
||
"language_info": {
|
||
"codemirror_mode": {
|
||
"name": "ipython",
|
||
"version": 3
|
||
},
|
||
"file_extension": ".py",
|
||
"mimetype": "text/x-python",
|
||
"name": "python",
|
||
"nbconvert_exporter": "python",
|
||
"pygments_lexer": "ipython3",
|
||
"version": "3.10.9"
|
||
}
|
||
},
|
||
"nbformat": 4,
|
||
"nbformat_minor": 5
|
||
}
|