add torch_gc to clear gpu cache in knowledge_based_chatglm.py
This commit is contained in:
parent
5664d1ff62
commit
c4b52dda72
|
|
@ -52,6 +52,7 @@ class ChatGLM(LLM):
|
||||||
max_length=self.max_token,
|
max_length=self.max_token,
|
||||||
temperature=self.temperature,
|
temperature=self.temperature,
|
||||||
)
|
)
|
||||||
|
torch_gc()
|
||||||
print("history: ", self.history)
|
print("history: ", self.history)
|
||||||
if stop is not None:
|
if stop is not None:
|
||||||
response = enforce_stop_tokens(response, stop)
|
response = enforce_stop_tokens(response, stop)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue