fix: set vllm based on platform to avoid error on windows

This commit is contained in:
liunux4odoo 2023-09-27 21:43:54 +08:00
parent d39878ff35
commit 523764e284
2 changed files with 3 additions and 2 deletions

View File

@ -38,7 +38,8 @@ FSCHAT_MODEL_WORKERS = {
"host": DEFAULT_BIND_HOST,
"port": 20002,
"device": LLM_DEVICE,
"infer_turbo": False # 可选[False,'vllm'],使用的推理加速框架,使用vllm如果出现HuggingFace通信问题参见doc/FAQ
# False,'vllm',使用的推理加速框架,使用vllm如果出现HuggingFace通信问题参见doc/FAQ
"infer_turbo": "vllm" if sys.platform.startswith("linux") else False,
# model_worker多卡加载需要配置的参数
# "gpus": None, # 使用的GPU以str的格式指定如"0,1"如失效请使用CUDA_VISIBLE_DEVICES="0,1"等形式指定

View File

@ -78,7 +78,6 @@ def create_model_worker_app(log_level: str = "INFO", **kwargs) -> FastAPI:
from fastchat.serve.model_worker import worker_id, logger
import argparse
import fastchat.serve.model_worker
import fastchat.serve.vllm_worker
logger.setLevel(log_level)
parser = argparse.ArgumentParser()
@ -98,6 +97,7 @@ def create_model_worker_app(log_level: str = "INFO", **kwargs) -> FastAPI:
else:
from configs.model_config import VLLM_MODEL_DICT
if kwargs["model_names"][0] in VLLM_MODEL_DICT and args.infer_turbo == "vllm":
import fastchat.serve.vllm_worker
from fastchat.serve.vllm_worker import VLLMWorker,app
from vllm import AsyncLLMEngine
from vllm.engine.arg_utils import AsyncEngineArgs,EngineArgs