From a04244cf6a21f0382021d525822948d7f32f8b8c Mon Sep 17 00:00:00 2001 From: lurenlym Date: Thu, 13 Apr 2023 14:13:58 +0800 Subject: [PATCH] incorrect arg order and support zero his --- chatglm_llm.py | 2 +- webui.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/chatglm_llm.py b/chatglm_llm.py index c074c23..aceb984 100644 --- a/chatglm_llm.py +++ b/chatglm_llm.py @@ -38,7 +38,7 @@ class ChatGLM(LLM): response, _ = self.model.chat( self.tokenizer, prompt, - history=self.history[-self.history_len:], + history=self.history[-self.history_len:] if self.history_len>0 else [], max_length=self.max_token, temperature=self.temperature, ) diff --git a/webui.py b/webui.py index a234e0c..b28d2bd 100644 --- a/webui.py +++ b/webui.py @@ -74,7 +74,7 @@ with gr.Blocks(css=""" label="llm model", value="chatglm-6b", interactive=True) - LLM_HISTORY_LEN = gr.Slider(1, + LLM_HISTORY_LEN = gr.Slider(0, 10, value=3, step=1, @@ -95,7 +95,7 @@ with gr.Blocks(css=""" kb.init_cfg(args[0], args[1], args[2], args[3]), show_progress=True, api_name="init_cfg", - inputs=[llm_model, embedding_model, VECTOR_SEARCH_TOP_K, LLM_HISTORY_LEN] + inputs=[llm_model, embedding_model, LLM_HISTORY_LEN,VECTOR_SEARCH_TOP_K] ).then( get_model_status, chatbot, chatbot )