bug修复和提示词修改 (#2230)
* 更新Langchain依赖到0.0.343以上版本 * 更改了一些提示词和依赖,修改了openai异常的问题 * 注释data的打印日志
This commit is contained in:
parent
c4fe3393b3
commit
40918c21de
|
|
@ -1,7 +1,6 @@
|
||||||
# prompt模板使用Jinja2语法,简单点就是用双大括号代替f-string的单大括号
|
# prompt模板使用Jinja2语法,简单点就是用双大括号代替f-string的单大括号
|
||||||
# 本配置文件支持热加载,修改prompt模板后无需重启服务。
|
# 本配置文件支持热加载,修改prompt模板后无需重启服务。
|
||||||
|
|
||||||
|
|
||||||
# LLM对话支持的变量:
|
# LLM对话支持的变量:
|
||||||
# - input: 用户输入内容
|
# - input: 用户输入内容
|
||||||
|
|
||||||
|
|
@ -76,9 +75,6 @@ Answer the following questions as best you can. If it is in order, you can use s
|
||||||
|
|
||||||
{tools}
|
{tools}
|
||||||
|
|
||||||
Please note that the "知识库查询工具" is information about the "西交利物浦大学" ,and if a question is asked about it, you must answer with the knowledge base,
|
|
||||||
Please note that the "天气查询工具" can only be used once since Question begin.
|
|
||||||
|
|
||||||
Use the following format:
|
Use the following format:
|
||||||
Question: the input question you must answer1
|
Question: the input question you must answer1
|
||||||
Thought: you should always think about what to do and what tools to use.
|
Thought: you should always think about what to do and what tools to use.
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
# API requirements
|
# API requirements
|
||||||
|
|
||||||
langchain>=0.0.342
|
langchain>=0.0.343
|
||||||
langchain-experimental>=0.0.42
|
langchain-experimental>=0.0.42
|
||||||
fschat[model_worker]>=0.2.33
|
fschat[model_worker]>=0.2.33
|
||||||
xformers>=0.0.22.post7
|
xformers>=0.0.22.post7
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
# API requirements
|
# API requirements
|
||||||
|
|
||||||
langchain>=0.0.342
|
langchain>=0.0.343
|
||||||
langchain-experimental>=0.0.42
|
langchain-experimental>=0.0.42
|
||||||
fschat[model_worker]>=0.2.33
|
fschat[model_worker]>=0.2.33
|
||||||
xformers>=0.0.22.post7
|
xformers>=0.0.22.post7
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
langchain>=0.0.342
|
langchain>=0.0.343
|
||||||
fschat>=0.2.33
|
fschat>=0.2.33
|
||||||
openai>=1.3.5
|
openai>=1.3.5
|
||||||
# sentence_transformers
|
# sentence_transformers
|
||||||
|
|
|
||||||
|
|
@ -40,11 +40,9 @@ def get_ChatOpenAI(
|
||||||
verbose: bool = True,
|
verbose: bool = True,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> ChatOpenAI:
|
) -> ChatOpenAI:
|
||||||
## 以下模型是Langchain原生支持的模型,这些模型不会走Fschat封装
|
|
||||||
config_models = list_config_llm_models()
|
|
||||||
|
|
||||||
## 非Langchain原生支持的模型,走Fschat封装
|
|
||||||
config = get_model_worker_config(model_name)
|
config = get_model_worker_config(model_name)
|
||||||
|
if config.get("openai-api"):
|
||||||
|
model_name = config.get("model_name")
|
||||||
model = ChatOpenAI(
|
model = ChatOpenAI(
|
||||||
streaming=streaming,
|
streaming=streaming,
|
||||||
verbose=verbose,
|
verbose=verbose,
|
||||||
|
|
@ -57,10 +55,7 @@ def get_ChatOpenAI(
|
||||||
openai_proxy=config.get("openai_proxy"),
|
openai_proxy=config.get("openai_proxy"),
|
||||||
**kwargs
|
**kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
def get_OpenAI(
|
def get_OpenAI(
|
||||||
model_name: str,
|
model_name: str,
|
||||||
temperature: float,
|
temperature: float,
|
||||||
|
|
@ -71,67 +66,22 @@ def get_OpenAI(
|
||||||
verbose: bool = True,
|
verbose: bool = True,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> OpenAI:
|
) -> OpenAI:
|
||||||
## 以下模型是Langchain原生支持的模型,这些模型不会走Fschat封装
|
config = get_model_worker_config(model_name)
|
||||||
config_models = list_config_llm_models()
|
if config.get("openai-api"):
|
||||||
if model_name in config_models.get("langchain", {}):
|
model_name = config.get("model_name")
|
||||||
config = config_models["langchain"][model_name]
|
model = OpenAI(
|
||||||
if model_name == "Azure-OpenAI":
|
streaming=streaming,
|
||||||
model = AzureOpenAI(
|
verbose=verbose,
|
||||||
streaming=streaming,
|
callbacks=callbacks,
|
||||||
verbose=verbose,
|
openai_api_key=config.get("api_key", "EMPTY"),
|
||||||
callbacks=callbacks,
|
openai_api_base=config.get("api_base_url", fschat_openai_api_address()),
|
||||||
deployment_name=config.get("deployment_name"),
|
model_name=model_name,
|
||||||
model_version=config.get("model_version"),
|
temperature=temperature,
|
||||||
openai_api_type=config.get("openai_api_type"),
|
max_tokens=max_tokens,
|
||||||
openai_api_base=config.get("api_base_url"),
|
openai_proxy=config.get("openai_proxy"),
|
||||||
openai_api_version=config.get("api_version"),
|
echo=echo,
|
||||||
openai_api_key=config.get("api_key"),
|
**kwargs
|
||||||
openai_proxy=config.get("openai_proxy"),
|
)
|
||||||
temperature=temperature,
|
|
||||||
max_tokens=max_tokens,
|
|
||||||
echo=echo,
|
|
||||||
)
|
|
||||||
|
|
||||||
elif model_name == "OpenAI":
|
|
||||||
model = OpenAI(
|
|
||||||
streaming=streaming,
|
|
||||||
verbose=verbose,
|
|
||||||
callbacks=callbacks,
|
|
||||||
model_name=config.get("model_name"),
|
|
||||||
openai_api_base=config.get("api_base_url"),
|
|
||||||
openai_api_key=config.get("api_key"),
|
|
||||||
openai_proxy=config.get("openai_proxy"),
|
|
||||||
temperature=temperature,
|
|
||||||
max_tokens=max_tokens,
|
|
||||||
echo=echo,
|
|
||||||
)
|
|
||||||
elif model_name == "Anthropic":
|
|
||||||
model = Anthropic(
|
|
||||||
streaming=streaming,
|
|
||||||
verbose=verbose,
|
|
||||||
callbacks=callbacks,
|
|
||||||
model_name=config.get("model_name"),
|
|
||||||
anthropic_api_key=config.get("api_key"),
|
|
||||||
echo=echo,
|
|
||||||
)
|
|
||||||
## TODO 支持其他的Langchain原生支持的模型
|
|
||||||
else:
|
|
||||||
## 非Langchain原生支持的模型,走Fschat封装
|
|
||||||
config = get_model_worker_config(model_name)
|
|
||||||
model = OpenAI(
|
|
||||||
streaming=streaming,
|
|
||||||
verbose=verbose,
|
|
||||||
callbacks=callbacks,
|
|
||||||
openai_api_key=config.get("api_key", "EMPTY"),
|
|
||||||
openai_api_base=config.get("api_base_url", fschat_openai_api_address()),
|
|
||||||
model_name=model_name,
|
|
||||||
temperature=temperature,
|
|
||||||
max_tokens=max_tokens,
|
|
||||||
openai_proxy=config.get("openai_proxy"),
|
|
||||||
echo=echo,
|
|
||||||
**kwargs
|
|
||||||
)
|
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -630,7 +580,7 @@ def get_httpx_client(
|
||||||
for host in os.environ.get("no_proxy", "").split(","):
|
for host in os.environ.get("no_proxy", "").split(","):
|
||||||
if host := host.strip():
|
if host := host.strip():
|
||||||
# default_proxies.update({host: None}) # Origin code
|
# default_proxies.update({host: None}) # Origin code
|
||||||
default_proxies.update({'all://' + host: None}) # PR 1838 fix, if not add 'all://', httpx will raise error
|
default_proxies.update({'all://' + host: None}) # PR 1838 fix, if not add 'all://', httpx will raise error
|
||||||
|
|
||||||
# merge default proxies with user provided proxies
|
# merge default proxies with user provided proxies
|
||||||
if isinstance(proxies, str):
|
if isinstance(proxies, str):
|
||||||
|
|
@ -714,7 +664,7 @@ def get_temp_dir(id: str = None) -> Tuple[str, str]:
|
||||||
from configs.basic_config import BASE_TEMP_DIR
|
from configs.basic_config import BASE_TEMP_DIR
|
||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
if id is not None: # 如果指定的临时目录已存在,直接返回
|
if id is not None: # 如果指定的临时目录已存在,直接返回
|
||||||
path = os.path.join(BASE_TEMP_DIR, id)
|
path = os.path.join(BASE_TEMP_DIR, id)
|
||||||
if os.path.isdir(path):
|
if os.path.isdir(path):
|
||||||
return path, id
|
return path, id
|
||||||
|
|
|
||||||
|
|
@ -276,8 +276,8 @@ class ApiRequest:
|
||||||
"max_tokens": max_tokens,
|
"max_tokens": max_tokens,
|
||||||
}
|
}
|
||||||
|
|
||||||
print(f"received input message:")
|
# print(f"received input message:")
|
||||||
pprint(data)
|
# pprint(data)
|
||||||
|
|
||||||
response = self.post(
|
response = self.post(
|
||||||
"/chat/fastchat",
|
"/chat/fastchat",
|
||||||
|
|
@ -288,16 +288,16 @@ class ApiRequest:
|
||||||
return self._httpx_stream2generator(response)
|
return self._httpx_stream2generator(response)
|
||||||
|
|
||||||
def chat_chat(
|
def chat_chat(
|
||||||
self,
|
self,
|
||||||
query: str,
|
query: str,
|
||||||
conversation_id: str = None,
|
conversation_id: str = None,
|
||||||
history: List[Dict] = [],
|
history: List[Dict] = [],
|
||||||
stream: bool = True,
|
stream: bool = True,
|
||||||
model: str = LLM_MODELS[0],
|
model: str = LLM_MODELS[0],
|
||||||
temperature: float = TEMPERATURE,
|
temperature: float = TEMPERATURE,
|
||||||
max_tokens: int = None,
|
max_tokens: int = None,
|
||||||
prompt_name: str = "default",
|
prompt_name: str = "default",
|
||||||
**kwargs,
|
**kwargs,
|
||||||
):
|
):
|
||||||
'''
|
'''
|
||||||
对应api.py/chat/chat接口 #TODO: 考虑是否返回json
|
对应api.py/chat/chat接口 #TODO: 考虑是否返回json
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue