diff --git a/chains/local_doc_qa.py b/chains/local_doc_qa.py index d4cdcb0..6921ef8 100644 --- a/chains/local_doc_qa.py +++ b/chains/local_doc_qa.py @@ -121,7 +121,7 @@ def similarity_search_with_score_by_vector( else: _id0 = self.index_to_docstore_id[id] doc0 = self.docstore.search(_id0) - doc.page_content += doc0.page_content + doc.page_content += " " + doc0.page_content if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") doc_score = min([scores[0][id] for id in [indices[0].tolist().index(i) for i in id_seq if i in indices[0]]]) diff --git a/models/chatglm_llm.py b/models/chatglm_llm.py index 49ce97c..a63a3d1 100644 --- a/models/chatglm_llm.py +++ b/models/chatglm_llm.py @@ -69,19 +69,17 @@ class ChatGLM(LLM): history: List[List[str]] = [], streaming: bool = STREAMING): # -> Tuple[str, List[List[str]]]: if streaming: - for inum, (stream_resp, _) in enumerate(self.model.stream_chat( + history += [[]] + for stream_resp, _ in self.model.stream_chat( self.tokenizer, prompt, history=history[-self.history_len:-1] if self.history_len > 0 else [], max_length=self.max_token, temperature=self.temperature, top_p=self.top_p, - )): + ): torch_gc() - if inum == 0: - history += [[prompt, stream_resp]] - else: - history[-1] = [prompt, stream_resp] + history[-1] = [prompt, stream_resp] yield stream_resp, history torch_gc() else: