Langchain-Chatchat/server/chat/openai_chat.py

53 lines
1.4 KiB
Python
Raw Normal View History

2023-07-27 23:22:07 +08:00
from fastapi.responses import StreamingResponse
2023-08-10 21:26:05 +08:00
from typing import List
2023-07-27 23:22:07 +08:00
import openai
from configs.model_config import llm_model_dict, LLM_MODEL
from pydantic import BaseModel
2023-07-27 23:22:07 +08:00
class OpenAiMessage(BaseModel):
role: str = "user"
content: str = "hello"
class OpenAiChatMsgIn(BaseModel):
model: str = LLM_MODEL
messages: List[OpenAiMessage]
temperature: float = 0.7
n: int = 1
max_tokens: int = 1024
stop: List[str] = []
stream: bool = False
presence_penalty: int = 0
frequency_penalty: int = 0
async def openai_chat(msg: OpenAiChatMsgIn):
2023-07-27 23:22:07 +08:00
openai.api_key = llm_model_dict[LLM_MODEL]["api_key"]
print(f"{openai.api_key=}")
openai.api_base = llm_model_dict[LLM_MODEL]["api_base_url"]
print(f"{openai.api_base=}")
print(msg)
2023-07-27 23:22:07 +08:00
async def get_response(msg):
data = msg.dict()
data["streaming"] = True
data.pop("stream")
response = openai.ChatCompletion.create(**data)
if msg.stream:
for chunk in response.choices[0].message.content:
print(chunk)
yield chunk
else:
answer = ""
for chunk in response.choices[0].message.content:
answer += chunk
print(answer)
yield(answer)
2023-07-27 23:22:07 +08:00
return StreamingResponse(
get_response(msg),
2023-07-27 23:22:07 +08:00
media_type='text/event-stream',
)