在model_config.py.example中增加qwen量化模型启动的说明

This commit is contained in:
huangzhiguo 2023-12-15 14:23:34 +08:00
parent 7e01e82470
commit a870076051
1 changed files with 2 additions and 0 deletions

View File

@ -179,6 +179,8 @@ MODEL_PATH = {
"Qwen-14B": "Qwen/Qwen-14B", "Qwen-14B": "Qwen/Qwen-14B",
"Qwen-14B-Chat": "Qwen/Qwen-14B-Chat", "Qwen-14B-Chat": "Qwen/Qwen-14B-Chat",
# 在新版的transformers下需要手动修改模型的config.json文件在quantization_config字典中
# 增加`disable_exllama:true` 字段才能启动qwen的量化模型
"Qwen-14B-Chat-Int8": "Qwen/Qwen-14B-Chat-Int8", "Qwen-14B-Chat-Int8": "Qwen/Qwen-14B-Chat-Int8",
"Qwen-14B-Chat-Int4": "Qwen/Qwen-14B-Chat-Int4", "Qwen-14B-Chat-Int4": "Qwen/Qwen-14B-Chat-Int4",