cuda error with multiprocessing, change model_worker to main process

This commit is contained in:
liunux4odoo 2023-07-31 11:18:57 +08:00
parent 47dfb6cd8b
commit 9e2b411b01
1 changed files with 13 additions and 9 deletions

View File

@ -219,14 +219,15 @@ if __name__ == "__main__":
) )
controller_process.start() controller_process.start()
model_worker_process = Process( # cuda 没办法用在fork的多进程中
target=run_model_worker, # model_worker_process = Process(
name=f"model_worker({os.getpid()})", # target=run_model_worker,
args=(queue,), # name=f"model_worker({os.getpid()})",
# kwargs={"load_8bit": True}, # args=(queue,),
daemon=True, # # kwargs={"load_8bit": True},
) # daemon=True,
model_worker_process.start() # )
# model_worker_process.start()
openai_api_process = Process( openai_api_process = Process(
target=run_openai_api, target=run_openai_api,
@ -236,10 +237,13 @@ if __name__ == "__main__":
) )
openai_api_process.start() openai_api_process.start()
run_model_worker(queue)
controller_process.join() controller_process.join()
model_worker_process.join() # model_worker_process.join()
openai_api_process.join() openai_api_process.join()
# 服务启动后接口调用示例: # 服务启动后接口调用示例:
# import openai # import openai
# openai.api_key = "EMPTY" # Not support yet # openai.api_key = "EMPTY" # Not support yet