更新web-search的api接口

This commit is contained in:
GuanYuankai 2025-04-18 16:11:37 +08:00
parent 43c82ee797
commit 468b3116cd
1 changed files with 17 additions and 29 deletions

View File

@ -116,28 +116,18 @@ def tavily_search(text, config, top_k):
def zhipu_search(text, config, top_k):
api_key = config["zhipu_api_key"]
msg = [
{
"role": "user",
"content": text
}
]
tool = "web-search-pro"
url = "https://open.bigmodel.cn/api/paas/v4/tools"
request_id = str(uuid.uuid4())
data = {
"request_id": request_id,
"tool": tool,
"stream": False,
"messages": msg
endpoint = " https://open.bigmodel.cn/api/paas/v4/web_search"
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
resp = requests.post(
url,
json=data,
headers={'Authorization': api_key},
timeout=300
)
return resp.content.decode()
payload = {
"search_engine": "search_std", # 指定Web搜索专用模型
"search_query": "哪吒2票房"
}
response = requests.post(endpoint, headers=headers, json=payload)
result = response.json()
return result
SEARCH_ENGINES = {
@ -154,20 +144,18 @@ def search_result2docs(search_results, engine_name, top_k) -> List[Document]:
docs = []
if engine_name == "zhipu_search":
try:
raw_result = json.loads(search_results)
results = raw_result["choices"][0]["message"]["tool_calls"][1]["search_result"]
# search_results_json = json.loads(search_results)
results = search_results["search_result"]
except (KeyError, IndexError) as e:
print(f"结构异常: {e}")
results = []
# 遍历并处理每个结果
for idx, result in enumerate(results[:top_k], 1):
for item in results[:top_k]:
doc = Document(
page_content=result["content"],
metadata={"link": result["link"], "title": result["title"]}
page_content=item['content'],
metadata={"link": item['link'], "title": item['title']}
)
docs.append(doc)
print(f"内容:\n{result}\n")
return docs
page_contents_key = "snippet" if engine_name != "tavily" else "content"
metadata_key = "link" if engine_name != "tavily" else "url"
@ -196,7 +184,7 @@ def search_engine(query: str, top_k: int = 0, engine_name: str = "", config: dic
docs = [x for x in search_result2docs(results, engine_name, top_k) if x.page_content and x.page_content.strip()]
print(f"len(docs): {len(docs)}")
print(f"docs: {docs}")
print(f"docs: {docs[:150]}")
return {"docs": docs, "search_engine": engine_name}