增加语义切分模型 (#248)

This commit is contained in:
royd 2023-05-05 23:38:53 +08:00 committed by GitHub
parent 41cd0fd8ae
commit 23a6b26f3e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 21 additions and 8 deletions

View File

@ -1,25 +1,38 @@
from langchain.text_splitter import CharacterTextSplitter from langchain.text_splitter import CharacterTextSplitter
import re import re
from typing import List from typing import List
from modelscope.pipelines import pipeline
p = pipeline(
task="document-segmentation",
model='damo/nlp_bert_document-segmentation_chinese-base',
device="cpu")
class ChineseTextSplitter(CharacterTextSplitter): class ChineseTextSplitter(CharacterTextSplitter):
def __init__(self, pdf: bool = False, **kwargs): def __init__(self, pdf: bool = False, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
self.pdf = pdf self.pdf = pdf
def split_text(self, text: str) -> List[str]: def split_text(self, text: str, use_document_segmentation: bool=False) -> List[str]:
# use_document_segmentation参数指定是否用语义切分文档此处采取的文档语义分割模型为达摩院开源的nlp_bert_document-segmentation_chinese-base论文见https://arxiv.org/abs/2107.09278
# 如果使用模型进行文档语义切分那么需要安装modelscope[nlp]pip install "modelscope[nlp]" -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
# 考虑到使用了三个模型可能对于低配置gpu不太友好因此这里将模型load进cpu计算有需要的话可以替换device为自己的显卡id
if self.pdf: if self.pdf:
text = re.sub(r"\n{3,}", "\n", text) text = re.sub(r"\n{3,}", "\n", text)
text = re.sub('\s', ' ', text) text = re.sub('\s', ' ', text)
text = text.replace("\n\n", "") text = text.replace("\n\n", "")
sent_sep_pattern = re.compile('([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))') # del if use_document_segmentation:
sent_list = [] result = p(documents=text)
for ele in sent_sep_pattern.split(text): sent_list = [i for i in result["text"].split("\n\t") if i]
if sent_sep_pattern.match(ele) and sent_list: else:
sent_list[-1] += ele sent_sep_pattern = re.compile('([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))') # del
elif ele: sent_list = []
sent_list.append(ele) for ele in sent_sep_pattern.split(text):
if sent_sep_pattern.match(ele) and sent_list:
sent_list[-1] += ele
elif ele:
sent_list.append(ele)
return sent_list return sent_list