diff --git a/chains/local_doc_qa.py b/chains/local_doc_qa.py index f9b7207..7554854 100644 --- a/chains/local_doc_qa.py +++ b/chains/local_doc_qa.py @@ -17,6 +17,7 @@ VECTOR_SEARCH_TOP_K = 6 # LLM input history length LLM_HISTORY_LEN = 3 +<<<<<<< HEAD def load_file(filepath): if filepath.lower().endswith(".pdf"): @@ -29,6 +30,8 @@ def load_file(filepath): docs = loader.load_and_split(text_splitter=textsplitter) return docs +======= +>>>>>>> cba44ca (修复 webui.py 中 llm_history_len 和 vector_search_top_k 显示值与启动设置默认值不一致的问题) class LocalDocQA: llm: object = None diff --git a/embedding/text2vec-large-chinese b/embedding/text2vec-large-chinese new file mode 160000 index 0000000..b23825b --- /dev/null +++ b/embedding/text2vec-large-chinese @@ -0,0 +1 @@ +Subproject commit b23825b5841818578dd225b5420c4b026ff58aa3 diff --git a/llm/chatglm-6b b/llm/chatglm-6b new file mode 160000 index 0000000..4de8efe --- /dev/null +++ b/llm/chatglm-6b @@ -0,0 +1 @@ +Subproject commit 4de8efebc837788ffbfc0a15663de8553da362a2 diff --git a/webui.py b/webui.py index 9f143b8..f4eacaa 100644 --- a/webui.py +++ b/webui.py @@ -15,7 +15,15 @@ LLM_HISTORY_LEN = 3 <<<<<<< HEAD ======= +<<<<<<< HEAD >>>>>>> f87a5f5 (fix bug in webui.py) +======= +# return top-k text chunk from vector store +VECTOR_SEARCH_TOP_K = 6 + +# LLM input history length +LLM_HISTORY_LEN = 3 +>>>>>>> cba44ca (修复 webui.py 中 llm_history_len 和 vector_search_top_k 显示值与启动设置默认值不一致的问题) def get_file_list(): if not os.path.exists("content"):