fix api and webui: (#1435)
1. fix #1431: 优化知识库问答与搜索引擎问答的API接口,避免docs重复返回 2. startup.py根据configs.log_verbose控制log级别 3. 修复/llm_model/list_models的bug: 只有一个参数时,fastapi未返回json导致视图函数出错
This commit is contained in:
parent
22ff073309
commit
6a03611750
|
|
@ -86,9 +86,8 @@ async def knowledge_base_chat(query: str = Body(..., description="用户输入",
|
|||
if stream:
|
||||
async for token in callback.aiter():
|
||||
# Use server-sent-events to stream the response
|
||||
yield json.dumps({"answer": token,
|
||||
"docs": source_documents},
|
||||
ensure_ascii=False)
|
||||
yield json.dumps({"answer": token}, ensure_ascii=False)
|
||||
yield json.dumps({"docs": source_documents}, ensure_ascii=False)
|
||||
else:
|
||||
answer = ""
|
||||
async for token in callback.aiter():
|
||||
|
|
|
|||
|
|
@ -121,9 +121,8 @@ async def search_engine_chat(query: str = Body(..., description="用户输入",
|
|||
if stream:
|
||||
async for token in callback.aiter():
|
||||
# Use server-sent-events to stream the response
|
||||
yield json.dumps({"answer": token,
|
||||
"docs": source_documents},
|
||||
ensure_ascii=False)
|
||||
yield json.dumps({"answer": token}, ensure_ascii=False)
|
||||
yield json.dumps({"docs": source_documents}, ensure_ascii=False)
|
||||
else:
|
||||
answer = ""
|
||||
async for token in callback.aiter():
|
||||
|
|
|
|||
|
|
@ -25,7 +25,6 @@ from server.knowledge_base.utils import (
|
|||
list_kbs_from_folder, list_files_from_folder,
|
||||
)
|
||||
from server.utils import embedding_device
|
||||
from typing import List, Union, Dict
|
||||
from typing import List, Union, Dict, Optional
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,8 @@ import httpx
|
|||
|
||||
|
||||
def list_llm_models(
|
||||
controller_address: str = Body(None, description="Fastchat controller服务器地址", examples=[fschat_controller_address()])
|
||||
controller_address: str = Body(None, description="Fastchat controller服务器地址", examples=[fschat_controller_address()]),
|
||||
placeholder: str = Body(None, description="该参数未使用,占位用"),
|
||||
) -> BaseResponse:
|
||||
'''
|
||||
从fastchat controller获取已加载模型列表
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ except:
|
|||
|
||||
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
|
||||
from configs.model_config import EMBEDDING_MODEL, llm_model_dict, LLM_MODEL, LOG_PATH, \
|
||||
logger
|
||||
logger, log_verbose
|
||||
from configs.server_config import (WEBUI_SERVER, API_SERVER, FSCHAT_CONTROLLER,
|
||||
FSCHAT_OPENAI_API, HTTPX_DEFAULT_TIMEOUT)
|
||||
from server.utils import (fschat_controller_address, fschat_model_worker_address,
|
||||
|
|
@ -536,7 +536,7 @@ async def start_main_server():
|
|||
def process_count():
|
||||
return len(processes) + len(processes["online_api"]) + len(processes["model_worker"]) - 2
|
||||
|
||||
if args.quiet:
|
||||
if args.quiet or not log_verbose:
|
||||
log_level = "ERROR"
|
||||
else:
|
||||
log_level = "INFO"
|
||||
|
|
|
|||
|
|
@ -80,10 +80,8 @@ def dialogue_page(api: ApiRequest):
|
|||
if x in config_models:
|
||||
config_models.remove(x)
|
||||
llm_models = running_models + config_models
|
||||
if "prev_llm_model" not in st.session_state:
|
||||
index = llm_models.index(LLM_MODEL)
|
||||
else:
|
||||
index = 0
|
||||
cur_model = st.session_state.get("prev_llm_model", LLM_MODEL)
|
||||
index = llm_models.index(cur_model)
|
||||
llm_model = st.selectbox("选择LLM模型:",
|
||||
llm_models,
|
||||
index,
|
||||
|
|
@ -155,10 +153,11 @@ def dialogue_page(api: ApiRequest):
|
|||
for d in api.knowledge_base_chat(prompt, selected_kb, kb_top_k, score_threshold, history, model=llm_model):
|
||||
if error_msg := check_error_msg(d): # check whether error occured
|
||||
st.error(error_msg)
|
||||
text += d["answer"]
|
||||
chat_box.update_msg(text, 0)
|
||||
chat_box.update_msg("\n\n".join(d["docs"]), 1, streaming=False)
|
||||
elif chunk := d.get("answer"):
|
||||
text += chunk
|
||||
chat_box.update_msg(text, 0)
|
||||
chat_box.update_msg(text, 0, streaming=False)
|
||||
chat_box.update_msg("\n\n".join(d.get("docs", [])), 1, streaming=False)
|
||||
elif dialogue_mode == "搜索引擎问答":
|
||||
chat_box.ai_say([
|
||||
f"正在执行 `{search_engine}` 搜索...",
|
||||
|
|
@ -168,11 +167,11 @@ def dialogue_page(api: ApiRequest):
|
|||
for d in api.search_engine_chat(prompt, search_engine, se_top_k, model=llm_model):
|
||||
if error_msg := check_error_msg(d): # check whether error occured
|
||||
st.error(error_msg)
|
||||
else:
|
||||
text += d["answer"]
|
||||
elif chunk := d.get("answer"):
|
||||
text += chunk
|
||||
chat_box.update_msg(text, 0)
|
||||
chat_box.update_msg("\n\n".join(d["docs"]), 1, streaming=False)
|
||||
chat_box.update_msg(text, 0, streaming=False)
|
||||
chat_box.update_msg("\n\n".join(d.get("docs", [])), 1, streaming=False)
|
||||
|
||||
now = datetime.now()
|
||||
with st.sidebar:
|
||||
|
|
|
|||
Loading…
Reference in New Issue