Add llm_model_dict to choose llm and add chatglm-6b-int4 as an option
This commit is contained in:
parent
6d5b143811
commit
3dc5860cfe
|
|
@ -17,7 +17,6 @@ def torch_gc():
|
|||
|
||||
|
||||
class ChatGLM(LLM):
|
||||
model_name: str
|
||||
max_token: int = 10000
|
||||
temperature: float = 0.1
|
||||
top_p = 0.9
|
||||
|
|
@ -28,20 +27,6 @@ class ChatGLM(LLM):
|
|||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def load_model(self,
|
||||
model_name_or_path: str = "THUDM/chatglm-6b"):
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_name_or_path,
|
||||
trust_remote_code=True
|
||||
)
|
||||
self.model = (
|
||||
AutoModel.from_pretrained(
|
||||
model_name_or_path,
|
||||
trust_remote_code=True)
|
||||
.half()
|
||||
.cuda()
|
||||
)
|
||||
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
return "ChatGLM"
|
||||
|
|
@ -62,3 +47,17 @@ class ChatGLM(LLM):
|
|||
response = enforce_stop_tokens(response, stop)
|
||||
self.history = updated_history
|
||||
return response
|
||||
|
||||
def load_model(self,
|
||||
model_name_or_path: str = "THUDM/chatglm-6b"):
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_name_or_path,
|
||||
trust_remote_code=True
|
||||
)
|
||||
self.model = (
|
||||
AutoModel.from_pretrained(
|
||||
model_name_or_path,
|
||||
trust_remote_code=True)
|
||||
.half()
|
||||
.cuda()
|
||||
)
|
||||
|
|
|
|||
Loading…
Reference in New Issue