diff --git a/models/chatglm_llm.py b/models/chatglm_llm.py index 903d488..7da423d 100644 --- a/models/chatglm_llm.py +++ b/models/chatglm_llm.py @@ -34,6 +34,7 @@ class ChatGLM(BaseAnswer, LLM, ABC): self.history_len = history_len def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: + print(f"__call:{prompt}") response, _ = self.checkPoint.model.chat( self.checkPoint.tokenizer, prompt, @@ -41,6 +42,8 @@ class ChatGLM(BaseAnswer, LLM, ABC): max_length=self.max_token, temperature=self.temperature ) + print(f"response:{response}") + print(f"+++++++++++++++++++++++++++++++++++") return response def generatorAnswer(self, prompt: str, diff --git a/models/fastchat_openai_llm.py b/models/fastchat_openai_llm.py index 5228c42..df66add 100644 --- a/models/fastchat_openai_llm.py +++ b/models/fastchat_openai_llm.py @@ -69,7 +69,25 @@ class FastChatOpenAILLM(RemoteRpcModel, LLM, ABC): self.model_name = model_name def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: - pass + print(f"__call:{prompt}") + try: + import openai + # Not support yet + openai.api_key = "EMPTY" + openai.api_base = self.api_base_url + except ImportError: + raise ValueError( + "Could not import openai python package. " + "Please install it with `pip install openai`." + ) + # create a chat completion + completion = openai.ChatCompletion.create( + model=self.model_name, + messages=self.build_message_list(prompt) + ) + print(f"response:{completion.choices[0].message.content}") + print(f"+++++++++++++++++++++++++++++++++++") + return completion.choices[0].message.content # 将历史对话数组转换为文本格式 def build_message_list(self, query) -> Collection[Dict[str, str]]: