diff --git a/README.md b/README.md index 619348d..b630adc 100644 --- a/README.md +++ b/README.md @@ -221,6 +221,6 @@ Web UI 可以实现如下功能: - [x] VUE 前端 ## 项目交流群 -![二维码](img/qr_code_18.jpg) +![二维码](img/qr_code_21.jpg) 🎉 langchain-ChatGLM 项目交流群,如果你也对本项目感兴趣,欢迎加入群聊参与讨论交流。 diff --git a/chains/local_doc_qa.py b/chains/local_doc_qa.py index 8b69e69..ecd5e88 100644 --- a/chains/local_doc_qa.py +++ b/chains/local_doc_qa.py @@ -130,7 +130,7 @@ def similarity_search_with_score_by_vector( else: _id0 = self.index_to_docstore_id[id] doc0 = self.docstore.search(_id0) - doc.page_content += doc0.page_content + doc.page_content += " " + doc0.page_content if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") doc_score = min([scores[0][id] for id in [indices[0].tolist().index(i) for i in id_seq if i in indices[0]]]) diff --git a/docs/INSTALL.md b/docs/INSTALL.md index 9808133..83e52ab 100644 --- a/docs/INSTALL.md +++ b/docs/INSTALL.md @@ -33,6 +33,10 @@ $ cd langchain-ChatGLM # 项目中 pdf 加载由先前的 detectron2 替换为使用 paddleocr,如果之前有安装过 detectron2 需要先完成卸载避免引发 tools 冲突 $ pip uninstall detectron2 +# 检查paddleocr依赖,linux环境下paddleocr依赖libX11,libXext +$ yum install libX11 +$ yum install libXext + # 安装依赖 $ pip install -r requirements.txt diff --git a/img/qr_code_18.jpg b/img/qr_code_18.jpg deleted file mode 100644 index 195d89d..0000000 Binary files a/img/qr_code_18.jpg and /dev/null differ diff --git a/img/qr_code_21.jpg b/img/qr_code_21.jpg new file mode 100644 index 0000000..a8d374b Binary files /dev/null and b/img/qr_code_21.jpg differ diff --git a/models/chatglm_llm.py b/models/chatglm_llm.py index ae5b6dd..ea53737 100644 --- a/models/chatglm_llm.py +++ b/models/chatglm_llm.py @@ -54,7 +54,7 @@ class ChatGLM(BaseAnswer, LLM, ABC): stopping_criteria_list.append(listenerQueue) if streaming: - + history += [[]] for inum, (stream_resp, _) in enumerate(self.checkPoint.model.stream_chat( self.checkPoint.tokenizer, prompt, @@ -64,10 +64,7 @@ class ChatGLM(BaseAnswer, LLM, ABC): stopping_criteria=stopping_criteria_list )): self.checkPoint.clear_torch_cache() - if inum == 0: - history += [[prompt, stream_resp]] - else: - history[-1] = [prompt, stream_resp] + history[-1] = [prompt, stream_resp] answer_result = AnswerResult() answer_result.history = history answer_result.llm_output = {"answer": stream_resp} diff --git a/views/src/views/chat/index.vue b/views/src/views/chat/index.vue index 2389f59..2087357 100644 --- a/views/src/views/chat/index.vue +++ b/views/src/views/chat/index.vue @@ -116,6 +116,7 @@ async function handleSubmit() { async function onConversation() { const message = prompt.value + history.value = [] if (usingContext.value) { for (let i = 0; i < dataSources.value.length; i = i + 2) { if (!i) diff --git a/webui.py b/webui.py index d535c9d..ea1fe03 100644 --- a/webui.py +++ b/webui.py @@ -466,4 +466,4 @@ with gr.Blocks(css=block_css, theme=gr.themes.Default(**default_theme_args)) as server_port=7860, show_api=False, share=False, - inbrowser=False)) + inbrowser=False)) \ No newline at end of file