修复BUG (#570)
一、修复模型历史记录仅一条问题,修改了模型加载代码,已修复。 二、修复模型历史记录仅一条问题,修改了webui有一个地方有问题,已修复。 三、知识库单条数据入库因知识库名称缓存问题导致的BUG,也已修复。
This commit is contained in:
parent
9dee1f28c0
commit
57b4f9306d
|
|
@ -278,7 +278,7 @@ class LocalDocQA:
|
||||||
if not one_content_segmentation:
|
if not one_content_segmentation:
|
||||||
text_splitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
|
text_splitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
|
||||||
docs = text_splitter.split_documents(docs)
|
docs = text_splitter.split_documents(docs)
|
||||||
if os.path.isdir(vs_path):
|
if os.path.isdir(vs_path) and os.path.isfile(vs_path+"/index.faiss"):
|
||||||
vector_store = load_vector_store(vs_path, self.embeddings)
|
vector_store = load_vector_store(vs_path, self.embeddings)
|
||||||
vector_store.add_documents(docs)
|
vector_store.add_documents(docs)
|
||||||
else:
|
else:
|
||||||
|
|
@ -298,7 +298,10 @@ class LocalDocQA:
|
||||||
vector_store.score_threshold = self.score_threshold
|
vector_store.score_threshold = self.score_threshold
|
||||||
related_docs_with_score = vector_store.similarity_search_with_score(query, k=self.top_k)
|
related_docs_with_score = vector_store.similarity_search_with_score(query, k=self.top_k)
|
||||||
torch_gc()
|
torch_gc()
|
||||||
|
if len(related_docs_with_score)>0:
|
||||||
prompt = generate_prompt(related_docs_with_score, query)
|
prompt = generate_prompt(related_docs_with_score, query)
|
||||||
|
else:
|
||||||
|
prompt = query
|
||||||
|
|
||||||
for answer_result in self.llm.generatorAnswer(prompt=prompt, history=chat_history,
|
for answer_result in self.llm.generatorAnswer(prompt=prompt, history=chat_history,
|
||||||
streaming=streaming):
|
streaming=streaming):
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,7 @@ class ChatGLM(BaseAnswer, LLM, ABC):
|
||||||
for inum, (stream_resp, _) in enumerate(self.checkPoint.model.stream_chat(
|
for inum, (stream_resp, _) in enumerate(self.checkPoint.model.stream_chat(
|
||||||
self.checkPoint.tokenizer,
|
self.checkPoint.tokenizer,
|
||||||
prompt,
|
prompt,
|
||||||
history=history[-self.history_len:-1] if self.history_len > 0 else [],
|
history=history[-self.history_len:] if self.history_len > 0 else [],
|
||||||
max_length=self.max_token,
|
max_length=self.max_token,
|
||||||
temperature=self.temperature
|
temperature=self.temperature
|
||||||
)):
|
)):
|
||||||
|
|
|
||||||
|
|
@ -55,7 +55,7 @@ class MOSSLLM(BaseAnswer, LLM, ABC):
|
||||||
history: List[List[str]] = [],
|
history: List[List[str]] = [],
|
||||||
streaming: bool = False):
|
streaming: bool = False):
|
||||||
if len(history) > 0:
|
if len(history) > 0:
|
||||||
history = history[-self.history_len:-1] if self.history_len > 0 else []
|
history = history[-self.history_len:] if self.history_len > 0 else []
|
||||||
prompt_w_history = str(history)
|
prompt_w_history = str(history)
|
||||||
prompt_w_history += '<|Human|>: ' + prompt + '<eoh>'
|
prompt_w_history += '<|Human|>: ' + prompt + '<eoh>'
|
||||||
else:
|
else:
|
||||||
|
|
|
||||||
2
webui.py
2
webui.py
|
|
@ -87,7 +87,7 @@ def get_answer(query, vs_path, history, mode, score_threshold=VECTOR_SEARCH_SCOR
|
||||||
yield history + [[query,
|
yield history + [[query,
|
||||||
"请选择知识库后进行测试,当前未选择知识库。"]], ""
|
"请选择知识库后进行测试,当前未选择知识库。"]], ""
|
||||||
else:
|
else:
|
||||||
for answer_result in local_doc_qa.llm.generatorAnswer(prompt=query, history=history[:-1],
|
for answer_result in local_doc_qa.llm.generatorAnswer(prompt=query, history=history,
|
||||||
streaming=streaming):
|
streaming=streaming):
|
||||||
resp = answer_result.llm_output["answer"]
|
resp = answer_result.llm_output["answer"]
|
||||||
history = answer_result.history
|
history = answer_result.history
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue