fix: 修复chatglm模型被复制,显存占用过多
model作为类成员会在类实例化时进行一次复制 这导致每询问一个问题显存占用就会翻倍 通过将model改成全局变量修复这个问题
This commit is contained in:
parent
1851b4f074
commit
bed03a6ff1
|
|
@ -5,24 +5,14 @@ from transformers import AutoTokenizer, AutoModel
|
|||
|
||||
"""ChatGLM_G is a wrapper around the ChatGLM model to fit LangChain framework. May not be an optimal implementation"""
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
|
||||
model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
|
||||
|
||||
class ChatGLM(LLM):
|
||||
max_token: int = 10000
|
||||
temperature: float = 0.1
|
||||
top_p = 0.9
|
||||
history = []
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
"THUDM/chatglm-6b",
|
||||
trust_remote_code=True
|
||||
)
|
||||
model = (
|
||||
AutoModel.from_pretrained(
|
||||
"THUDM/chatglm-6b",
|
||||
trust_remote_code=True)
|
||||
.half()
|
||||
.cuda()
|
||||
)
|
||||
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
|
@ -34,8 +24,8 @@ class ChatGLM(LLM):
|
|||
def _call(self,
|
||||
prompt: str,
|
||||
stop: Optional[List[str]] = None) -> str:
|
||||
response, updated_history = self.model.chat(
|
||||
self.tokenizer,
|
||||
response, updated_history = model.chat(
|
||||
tokenizer,
|
||||
prompt,
|
||||
history=self.history,
|
||||
max_length=self.max_token,
|
||||
|
|
|
|||
Loading…
Reference in New Issue