2023-09-01 23:58:09 +08:00
|
|
|
|
from server.model_workers.base import ApiModelWorker
|
|
|
|
|
|
from fastchat import conversation as conv
|
|
|
|
|
|
import sys
|
|
|
|
|
|
import json
|
|
|
|
|
|
from typing import List, Literal
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class ChatGLMWorker(ApiModelWorker):
|
|
|
|
|
|
BASE_URL = "https://open.bigmodel.cn/api/paas/v3/model-api"
|
|
|
|
|
|
SUPPORT_MODELS = ["chatglm_pro", "chatglm_std", "chatglm_lite"]
|
|
|
|
|
|
|
|
|
|
|
|
def __init__(
|
|
|
|
|
|
self,
|
|
|
|
|
|
*,
|
2023-09-15 01:48:02 +08:00
|
|
|
|
model_names: List[str] = ["zhipu-api"],
|
2023-09-01 23:58:09 +08:00
|
|
|
|
version: Literal["chatglm_pro", "chatglm_std", "chatglm_lite"] = "chatglm_std",
|
|
|
|
|
|
controller_addr: str,
|
|
|
|
|
|
worker_addr: str,
|
|
|
|
|
|
**kwargs,
|
|
|
|
|
|
):
|
|
|
|
|
|
kwargs.update(model_names=model_names, controller_addr=controller_addr, worker_addr=worker_addr)
|
|
|
|
|
|
kwargs.setdefault("context_len", 32768)
|
|
|
|
|
|
super().__init__(**kwargs)
|
|
|
|
|
|
self.version = version
|
|
|
|
|
|
|
|
|
|
|
|
# 这里的是chatglm api的模板,其它API的conv_template需要定制
|
|
|
|
|
|
self.conv = conv.Conversation(
|
2023-09-12 15:24:47 +08:00
|
|
|
|
name=self.model_names[0],
|
2023-09-01 23:58:09 +08:00
|
|
|
|
system_message="你是一个聪明、对人类有帮助的人工智能,你可以对人类提出的问题给出有用、详细、礼貌的回答。",
|
|
|
|
|
|
messages=[],
|
|
|
|
|
|
roles=["Human", "Assistant"],
|
|
|
|
|
|
sep="\n### ",
|
|
|
|
|
|
stop_str="###",
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def generate_stream_gate(self, params):
|
2023-09-12 15:24:47 +08:00
|
|
|
|
# TODO: 维护request_id
|
2023-09-05 12:43:51 +08:00
|
|
|
|
import zhipuai
|
2023-09-01 23:58:09 +08:00
|
|
|
|
|
|
|
|
|
|
super().generate_stream_gate(params)
|
2023-09-12 15:24:47 +08:00
|
|
|
|
zhipuai.api_key = self.get_config().get("api_key")
|
2023-09-01 23:58:09 +08:00
|
|
|
|
|
|
|
|
|
|
response = zhipuai.model_api.sse_invoke(
|
|
|
|
|
|
model=self.version,
|
|
|
|
|
|
prompt=[{"role": "user", "content": params["prompt"]}],
|
|
|
|
|
|
temperature=params.get("temperature"),
|
|
|
|
|
|
top_p=params.get("top_p"),
|
|
|
|
|
|
incremental=False,
|
|
|
|
|
|
)
|
|
|
|
|
|
for e in response.events():
|
|
|
|
|
|
if e.event == "add":
|
|
|
|
|
|
yield json.dumps({"error_code": 0, "text": e.data}, ensure_ascii=False).encode() + b"\0"
|
|
|
|
|
|
# TODO: 更健壮的消息处理
|
|
|
|
|
|
# elif e.event == "finish":
|
|
|
|
|
|
# ...
|
|
|
|
|
|
|
|
|
|
|
|
def get_embeddings(self, params):
|
|
|
|
|
|
# TODO: 支持embeddings
|
|
|
|
|
|
print("embedding")
|
|
|
|
|
|
print(params)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
|
import uvicorn
|
|
|
|
|
|
from server.utils import MakeFastAPIOffline
|
|
|
|
|
|
from fastchat.serve.model_worker import app
|
|
|
|
|
|
|
|
|
|
|
|
worker = ChatGLMWorker(
|
|
|
|
|
|
controller_addr="http://127.0.0.1:20001",
|
|
|
|
|
|
worker_addr="http://127.0.0.1:20003",
|
|
|
|
|
|
)
|
|
|
|
|
|
sys.modules["fastchat.serve.model_worker"].worker = worker
|
|
|
|
|
|
MakeFastAPIOffline(app)
|
|
|
|
|
|
uvicorn.run(app, port=20003)
|