From 179c2a9a92bfec85657007b41c8e40682f2bcdb0 Mon Sep 17 00:00:00 2001 From: liunux4odoo Date: Sun, 30 Jul 2023 08:56:49 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E6=94=B9server.chat.openai=5Fchat?= =?UTF-8?q?=E4=B8=AD=E7=9A=84=E5=8F=82=E6=95=B0=E5=AE=9A=E4=B9=89=EF=BC=8C?= =?UTF-8?q?=E4=BD=BF=E5=85=B6=E4=B8=8Eopenai=E4=B8=AD/v1/chat/completions?= =?UTF-8?q?=E6=8E=A5=E5=8F=A3=E7=9A=84=E5=85=A5=E5=8F=82=E4=BF=9D=E6=8C=81?= =?UTF-8?q?=E4=B8=80=E8=87=B4=EF=BC=8C=E5=B9=B6=E6=8C=89=E7=85=A7model=5Fc?= =?UTF-8?q?onfig=E6=8F=90=E4=BE=9B=E9=BB=98=E8=AE=A4=E5=80=BC=E3=80=82=20o?= =?UTF-8?q?penai=5Fchat=E4=B8=AD=E7=9A=84=E6=8E=A5=E5=8F=A3=E8=BF=98?= =?UTF-8?q?=E8=A6=81=E4=BF=AE=E6=94=B9=EF=BC=9Aopenai=E6=A0=B9=E6=8D=AE?= =?UTF-8?q?=E5=8F=82=E6=95=B0stream=E6=9C=89=E4=B8=8D=E5=90=8C=E7=9A=84?= =?UTF-8?q?=E8=BF=94=E5=9B=9E=E5=80=BC=EF=BC=8C=E6=9C=AC=E6=8E=A5=E5=8F=A3?= =?UTF-8?q?=E8=A6=81=E4=B8=8E=E5=85=B6=E5=AF=B9=E5=BA=94=E3=80=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- server/chat/openai_chat.py | 36 +++++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/server/chat/openai_chat.py b/server/chat/openai_chat.py index e17dbca..e6c93f8 100644 --- a/server/chat/openai_chat.py +++ b/server/chat/openai_chat.py @@ -3,26 +3,40 @@ from fastapi.responses import StreamingResponse from typing import List, Dict import openai from configs.model_config import llm_model_dict, LLM_MODEL +from pydantic import BaseModel -async def openai_chat(messages: List[Dict] = Body(..., - description="用户输入", - example=[{"role": "user", "content": "你好"}])): + +class OpenAiMessage(BaseModel): + role: str = "user" + content: str = "hello" + + +class OpenAiChatMsgIn(BaseModel): + model: str = LLM_MODEL + messages: List[OpenAiMessage] + temperature: float = 0.7 + n: int = 1 + max_tokens: int = 1024 + stop: List[str] = [] + stream: bool = True + presence_penalty: int = 0 + frequency_penalty: int = 0 + + +async def openai_chat(msg: OpenAiChatMsgIn): openai.api_key = llm_model_dict[LLM_MODEL]["api_key"] print(f"{openai.api_key=}") openai.api_base = llm_model_dict[LLM_MODEL]["api_base_url"] print(f"{openai.api_base=}") - print(messages) + print(msg) - async def get_response(messages: List[Dict]): - response = openai.ChatCompletion.create( - model=LLM_MODEL, - messages=messages, - ) + async def get_response(msg): + response = openai.ChatCompletion.create(**msg.dict()) for chunk in response.choices[0].message.content: print(chunk) yield chunk return StreamingResponse( - get_response(messages), + get_response(msg), media_type='text/event-stream', - ) \ No newline at end of file + )