diff --git a/README.md b/README.md index 6fc5538..f2dbfd7 100644 --- a/README.md +++ b/README.md @@ -227,6 +227,6 @@ Web UI 可以实现如下功能: - [x] VUE 前端 ## 项目交流群 -![二维码](img/qr_code_27.jpg) +![二维码](img/qr_code_28.jpg) 🎉 langchain-ChatGLM 项目交流群,如果你也对本项目感兴趣,欢迎加入群聊参与讨论交流。 diff --git a/chains/local_doc_qa.py b/chains/local_doc_qa.py index e0411f0..25016f7 100644 --- a/chains/local_doc_qa.py +++ b/chains/local_doc_qa.py @@ -278,7 +278,7 @@ class LocalDocQA: if not one_content_segmentation: text_splitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size) docs = text_splitter.split_documents(docs) - if os.path.isdir(vs_path): + if os.path.isdir(vs_path) and os.path.isfile(vs_path+"/index.faiss"): vector_store = load_vector_store(vs_path, self.embeddings) vector_store.add_documents(docs) else: @@ -298,7 +298,10 @@ class LocalDocQA: vector_store.score_threshold = self.score_threshold related_docs_with_score = vector_store.similarity_search_with_score(query, k=self.top_k) torch_gc() - prompt = generate_prompt(related_docs_with_score, query) + if len(related_docs_with_score)>0: + prompt = generate_prompt(related_docs_with_score, query) + else: + prompt = query for answer_result in self.llm.generatorAnswer(prompt=prompt, history=chat_history, streaming=streaming): diff --git a/img/qr_code_28.jpg b/img/qr_code_28.jpg new file mode 100644 index 0000000..b79bf45 Binary files /dev/null and b/img/qr_code_28.jpg differ diff --git a/models/chatglm_llm.py b/models/chatglm_llm.py index 6c16d25..6e84234 100644 --- a/models/chatglm_llm.py +++ b/models/chatglm_llm.py @@ -52,7 +52,7 @@ class ChatGLM(BaseAnswer, LLM, ABC): for inum, (stream_resp, _) in enumerate(self.checkPoint.model.stream_chat( self.checkPoint.tokenizer, prompt, - history=history[-self.history_len:-1] if self.history_len > 0 else [], + history=history[-self.history_len:] if self.history_len > 0 else [], max_length=self.max_token, temperature=self.temperature )): diff --git a/models/moss_llm.py b/models/moss_llm.py index 084e761..c608edb 100644 --- a/models/moss_llm.py +++ b/models/moss_llm.py @@ -55,7 +55,7 @@ class MOSSLLM(BaseAnswer, LLM, ABC): history: List[List[str]] = [], streaming: bool = False): if len(history) > 0: - history = history[-self.history_len:-1] if self.history_len > 0 else [] + history = history[-self.history_len:] if self.history_len > 0 else [] prompt_w_history = str(history) prompt_w_history += '<|Human|>: ' + prompt + '' else: diff --git a/webui.py b/webui.py index 49b764b..ebcc5e1 100644 --- a/webui.py +++ b/webui.py @@ -87,7 +87,7 @@ def get_answer(query, vs_path, history, mode, score_threshold=VECTOR_SEARCH_SCOR yield history + [[query, "请选择知识库后进行测试,当前未选择知识库。"]], "" else: - for answer_result in local_doc_qa.llm.generatorAnswer(prompt=query, history=history[:-1], + for answer_result in local_doc_qa.llm.generatorAnswer(prompt=query, history=history, streaming=streaming): resp = answer_result.llm_output["answer"] history = answer_result.history