更新web-search的api接口
This commit is contained in:
parent
43c82ee797
commit
468b3116cd
|
|
@ -116,28 +116,18 @@ def tavily_search(text, config, top_k):
|
||||||
|
|
||||||
def zhipu_search(text, config, top_k):
|
def zhipu_search(text, config, top_k):
|
||||||
api_key = config["zhipu_api_key"]
|
api_key = config["zhipu_api_key"]
|
||||||
msg = [
|
endpoint = " https://open.bigmodel.cn/api/paas/v4/web_search"
|
||||||
{
|
headers = {
|
||||||
"role": "user",
|
"Authorization": f"Bearer {api_key}",
|
||||||
"content": text
|
"Content-Type": "application/json"
|
||||||
}
|
}
|
||||||
]
|
payload = {
|
||||||
tool = "web-search-pro"
|
"search_engine": "search_std", # 指定Web搜索专用模型
|
||||||
url = "https://open.bigmodel.cn/api/paas/v4/tools"
|
"search_query": "哪吒2票房"
|
||||||
request_id = str(uuid.uuid4())
|
|
||||||
data = {
|
|
||||||
"request_id": request_id,
|
|
||||||
"tool": tool,
|
|
||||||
"stream": False,
|
|
||||||
"messages": msg
|
|
||||||
}
|
}
|
||||||
resp = requests.post(
|
response = requests.post(endpoint, headers=headers, json=payload)
|
||||||
url,
|
result = response.json()
|
||||||
json=data,
|
return result
|
||||||
headers={'Authorization': api_key},
|
|
||||||
timeout=300
|
|
||||||
)
|
|
||||||
return resp.content.decode()
|
|
||||||
|
|
||||||
|
|
||||||
SEARCH_ENGINES = {
|
SEARCH_ENGINES = {
|
||||||
|
|
@ -154,20 +144,18 @@ def search_result2docs(search_results, engine_name, top_k) -> List[Document]:
|
||||||
docs = []
|
docs = []
|
||||||
if engine_name == "zhipu_search":
|
if engine_name == "zhipu_search":
|
||||||
try:
|
try:
|
||||||
raw_result = json.loads(search_results)
|
# search_results_json = json.loads(search_results)
|
||||||
results = raw_result["choices"][0]["message"]["tool_calls"][1]["search_result"]
|
results = search_results["search_result"]
|
||||||
except (KeyError, IndexError) as e:
|
except (KeyError, IndexError) as e:
|
||||||
print(f"结构异常: {e}")
|
print(f"结构异常: {e}")
|
||||||
results = []
|
results = []
|
||||||
# 遍历并处理每个结果
|
# 遍历并处理每个结果
|
||||||
|
for item in results[:top_k]:
|
||||||
for idx, result in enumerate(results[:top_k], 1):
|
|
||||||
doc = Document(
|
doc = Document(
|
||||||
page_content=result["content"],
|
page_content=item['content'],
|
||||||
metadata={"link": result["link"], "title": result["title"]}
|
metadata={"link": item['link'], "title": item['title']}
|
||||||
)
|
)
|
||||||
docs.append(doc)
|
docs.append(doc)
|
||||||
print(f"内容:\n{result}\n")
|
|
||||||
return docs
|
return docs
|
||||||
page_contents_key = "snippet" if engine_name != "tavily" else "content"
|
page_contents_key = "snippet" if engine_name != "tavily" else "content"
|
||||||
metadata_key = "link" if engine_name != "tavily" else "url"
|
metadata_key = "link" if engine_name != "tavily" else "url"
|
||||||
|
|
@ -196,7 +184,7 @@ def search_engine(query: str, top_k: int = 0, engine_name: str = "", config: dic
|
||||||
|
|
||||||
docs = [x for x in search_result2docs(results, engine_name, top_k) if x.page_content and x.page_content.strip()]
|
docs = [x for x in search_result2docs(results, engine_name, top_k) if x.page_content and x.page_content.strip()]
|
||||||
print(f"len(docs): {len(docs)}")
|
print(f"len(docs): {len(docs)}")
|
||||||
print(f"docs: {docs}")
|
print(f"docs: {docs[:150]}")
|
||||||
return {"docs": docs, "search_engine": engine_name}
|
return {"docs": docs, "search_engine": engine_name}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue