add torch_gc to clear gpu cache in knowledge_based_chatglm.py

This commit is contained in:
littlepanda0716 2023-04-07 10:46:02 +08:00
parent 5664d1ff62
commit c4b52dda72
1 changed files with 1 additions and 0 deletions

View File

@ -52,6 +52,7 @@ class ChatGLM(LLM):
max_length=self.max_token, max_length=self.max_token,
temperature=self.temperature, temperature=self.temperature,
) )
torch_gc()
print("history: ", self.history) print("history: ", self.history)
if stop is not None: if stop is not None:
response = enforce_stop_tokens(response, stop) response = enforce_stop_tokens(response, stop)