From 30b8daecb365a979f3de0768aa557f20c25b07d0 Mon Sep 17 00:00:00 2001 From: liunux4odoo Date: Thu, 28 Sep 2023 23:21:35 +0800 Subject: [PATCH] fix readme --- README.md | 2 +- README_en.md | 2 +- configs/model_config.py.example | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index bdf8cb3..c8e065e 100644 --- a/README.md +++ b/README.md @@ -69,7 +69,7 @@ docker run -d --gpus all -p 80:8501 registry.cn-beijing.aliyuncs.com/chatchat/ch + LLaMA-13B 最低显存要求: 11GB 推荐显卡: RTX 2060 12GB, RTX3060 12GB, RTX3080, RTXA2000 + Qwen-14B-Chat 最低显存要求: 13GB 推荐显卡: RTX 3090 + LLaMA-30B 最低显存要求: 22GB 推荐显卡:RTX A5000,RTX 3090,RTX 4090,RTX 6000,Tesla V100,RTX Tesla P40 -+ LLaMA-65B 最低显存要求: 22GB 推荐显卡:A100,A40,A6000 ++ LLaMA-65B 最低显存要求: 40GB 推荐显卡:A100,A40,A6000 如果是int8 则显存x1.5 fp16 x2.5的要求 如:使用fp16 推理Qwen-7B-Chat 模型 则需要使用16GB显存。 diff --git a/README_en.md b/README_en.md index d6aaf0f..c7771ff 100644 --- a/README_en.md +++ b/README_en.md @@ -68,7 +68,7 @@ If you want to run the native model (int4 version) on the GPU without problems, + LLaMA-13B Minimum graphics memory requirement: 11GB Recommended cards: RTX 2060 12GB, RTX3060 12GB, RTX3080, RTXA2000 + Qwen-14B-Chat Minimum memory requirement: 13GB Recommended graphics card: RTX 3090 + LLaMA-30B Minimum Memory Requirement: 22GB Recommended Cards: RTX A5000,RTX 3090,RTX 4090,RTX 6000,Tesla V100,RTX Tesla P40 -+ Minimum memory requirement for LLaMA-65B: 22GB Recommended cards: A100,A40,A6000 ++ Minimum memory requirement for LLaMA-65B: 40GB Recommended cards: A100,A40,A6000 If int8 then memory x1.5 fp16 x2.5 requirement. For example: using fp16 to reason about the Qwen-7B-Chat model requires 16GB of video memory. diff --git a/configs/model_config.py.example b/configs/model_config.py.example index cbf22b6..f5e8465 100644 --- a/configs/model_config.py.example +++ b/configs/model_config.py.example @@ -90,7 +90,7 @@ MODEL_PATH = { } # 选用的 Embedding 名称 -EMBEDDING_MODEL = "piccolo-large-zh" # 最新的嵌入式sota模型 +EMBEDDING_MODEL = "m3e-base" # 可以尝试最新的嵌入式sota模型:piccolo-large-zh # Embedding 模型运行设备。设为"auto"会自动检测,也可手动设定为"cuda","mps","cpu"其中之一。 EMBEDDING_DEVICE = "auto"