From 7a406e42637cbe687bfafa190f00e7bc06ea887e Mon Sep 17 00:00:00 2001 From: imClumsyPanda Date: Mon, 1 May 2023 23:55:37 +0800 Subject: [PATCH] Dev (#214) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * github: Add issue templates * supports GPU usage in docker (#197) * 上下文的prompt模版内容修改 --------- Co-authored-by: Calcitem Co-authored-by: cocomany <124849750+cocomany@users.noreply.github.com> Co-authored-by: glide-the <2533736852@qq.com> --- README.md | 2 +- configs/model_config.py | 10 ++++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index d7684cd..1f72f93 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ sudo systemctl restart docker ``` 安装完成后,可以使用以下命令编译镜像和启动容器: ``` -docker build -t chatglm-cuda:latest . +docker build -f Dockerfile-cuda -t chatglm-cuda:latest . docker run --gpus all -d --name chatglm -p 7860:7860 chatglm-cuda:latest #若要使用离线模型,请配置好模型路径,然后此repo挂载到Container diff --git a/configs/model_config.py b/configs/model_config.py index 20bea5f..c85c1a9 100644 --- a/configs/model_config.py +++ b/configs/model_config.py @@ -41,8 +41,14 @@ VS_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "vector_ UPLOAD_ROOT_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "content", "") # 基于上下文的prompt模版,请务必保留"{question}"和"{context}" -PROMPT_TEMPLATE = """基于以下已知信息,简洁和专业的来回答用户的问题,问题是"{question}"。如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息",不允许在答案中添加编造成分,答案请使用中文。已知内容如下: -{context} """ +PROMPT_TEMPLATE = """已知信息在下方"="包裹的段落,基于以下已知信息,简洁和专业的来回答用户的问题。如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息",不允许在答案中添加编造成分,答案请使用中文。 + +====================================已知信息===================================================== +{context} +================================================================================================ + +问题:"{question}" +答案:""" # 匹配后单段上下文长度 CHUNK_SIZE = 500 \ No newline at end of file