merge master
This commit is contained in:
commit
e352a04cbb
|
|
@ -221,6 +221,6 @@ Web UI 可以实现如下功能:
|
||||||
- [x] VUE 前端
|
- [x] VUE 前端
|
||||||
|
|
||||||
## 项目交流群
|
## 项目交流群
|
||||||

|

|
||||||
|
|
||||||
🎉 langchain-ChatGLM 项目交流群,如果你也对本项目感兴趣,欢迎加入群聊参与讨论交流。
|
🎉 langchain-ChatGLM 项目交流群,如果你也对本项目感兴趣,欢迎加入群聊参与讨论交流。
|
||||||
|
|
|
||||||
|
|
@ -130,7 +130,7 @@ def similarity_search_with_score_by_vector(
|
||||||
else:
|
else:
|
||||||
_id0 = self.index_to_docstore_id[id]
|
_id0 = self.index_to_docstore_id[id]
|
||||||
doc0 = self.docstore.search(_id0)
|
doc0 = self.docstore.search(_id0)
|
||||||
doc.page_content += doc0.page_content
|
doc.page_content += " " + doc0.page_content
|
||||||
if not isinstance(doc, Document):
|
if not isinstance(doc, Document):
|
||||||
raise ValueError(f"Could not find document for id {_id}, got {doc}")
|
raise ValueError(f"Could not find document for id {_id}, got {doc}")
|
||||||
doc_score = min([scores[0][id] for id in [indices[0].tolist().index(i) for i in id_seq if i in indices[0]]])
|
doc_score = min([scores[0][id] for id in [indices[0].tolist().index(i) for i in id_seq if i in indices[0]]])
|
||||||
|
|
|
||||||
|
|
@ -33,6 +33,10 @@ $ cd langchain-ChatGLM
|
||||||
# 项目中 pdf 加载由先前的 detectron2 替换为使用 paddleocr,如果之前有安装过 detectron2 需要先完成卸载避免引发 tools 冲突
|
# 项目中 pdf 加载由先前的 detectron2 替换为使用 paddleocr,如果之前有安装过 detectron2 需要先完成卸载避免引发 tools 冲突
|
||||||
$ pip uninstall detectron2
|
$ pip uninstall detectron2
|
||||||
|
|
||||||
|
# 检查paddleocr依赖,linux环境下paddleocr依赖libX11,libXext
|
||||||
|
$ yum install libX11
|
||||||
|
$ yum install libXext
|
||||||
|
|
||||||
# 安装依赖
|
# 安装依赖
|
||||||
$ pip install -r requirements.txt
|
$ pip install -r requirements.txt
|
||||||
|
|
||||||
|
|
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 278 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 256 KiB |
|
|
@ -54,7 +54,7 @@ class ChatGLM(BaseAnswer, LLM, ABC):
|
||||||
stopping_criteria_list.append(listenerQueue)
|
stopping_criteria_list.append(listenerQueue)
|
||||||
|
|
||||||
if streaming:
|
if streaming:
|
||||||
|
history += [[]]
|
||||||
for inum, (stream_resp, _) in enumerate(self.checkPoint.model.stream_chat(
|
for inum, (stream_resp, _) in enumerate(self.checkPoint.model.stream_chat(
|
||||||
self.checkPoint.tokenizer,
|
self.checkPoint.tokenizer,
|
||||||
prompt,
|
prompt,
|
||||||
|
|
@ -64,10 +64,7 @@ class ChatGLM(BaseAnswer, LLM, ABC):
|
||||||
stopping_criteria=stopping_criteria_list
|
stopping_criteria=stopping_criteria_list
|
||||||
)):
|
)):
|
||||||
self.checkPoint.clear_torch_cache()
|
self.checkPoint.clear_torch_cache()
|
||||||
if inum == 0:
|
history[-1] = [prompt, stream_resp]
|
||||||
history += [[prompt, stream_resp]]
|
|
||||||
else:
|
|
||||||
history[-1] = [prompt, stream_resp]
|
|
||||||
answer_result = AnswerResult()
|
answer_result = AnswerResult()
|
||||||
answer_result.history = history
|
answer_result.history = history
|
||||||
answer_result.llm_output = {"answer": stream_resp}
|
answer_result.llm_output = {"answer": stream_resp}
|
||||||
|
|
|
||||||
|
|
@ -116,6 +116,7 @@ async function handleSubmit() {
|
||||||
|
|
||||||
async function onConversation() {
|
async function onConversation() {
|
||||||
const message = prompt.value
|
const message = prompt.value
|
||||||
|
history.value = []
|
||||||
if (usingContext.value) {
|
if (usingContext.value) {
|
||||||
for (let i = 0; i < dataSources.value.length; i = i + 2) {
|
for (let i = 0; i < dataSources.value.length; i = i + 2) {
|
||||||
if (!i)
|
if (!i)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue