From c4b52dda7284262d167ce13ebc8ba1a6a232b810 Mon Sep 17 00:00:00 2001 From: littlepanda0716 Date: Fri, 7 Apr 2023 10:46:02 +0800 Subject: [PATCH] add torch_gc to clear gpu cache in knowledge_based_chatglm.py --- chatglm_llm.py | 1 + 1 file changed, 1 insertion(+) diff --git a/chatglm_llm.py b/chatglm_llm.py index 3833e4f..776a8e1 100644 --- a/chatglm_llm.py +++ b/chatglm_llm.py @@ -52,6 +52,7 @@ class ChatGLM(LLM): max_length=self.max_token, temperature=self.temperature, ) + torch_gc() print("history: ", self.history) if stop is not None: response = enforce_stop_tokens(response, stop)