bug修复和提示词修改 (#2230)
* 更新Langchain依赖到0.0.343以上版本 * 更改了一些提示词和依赖,修改了openai异常的问题 * 注释data的打印日志
This commit is contained in:
parent
c4fe3393b3
commit
40918c21de
|
|
@ -1,7 +1,6 @@
|
|||
# prompt模板使用Jinja2语法,简单点就是用双大括号代替f-string的单大括号
|
||||
# 本配置文件支持热加载,修改prompt模板后无需重启服务。
|
||||
|
||||
|
||||
# LLM对话支持的变量:
|
||||
# - input: 用户输入内容
|
||||
|
||||
|
|
@ -76,9 +75,6 @@ Answer the following questions as best you can. If it is in order, you can use s
|
|||
|
||||
{tools}
|
||||
|
||||
Please note that the "知识库查询工具" is information about the "西交利物浦大学" ,and if a question is asked about it, you must answer with the knowledge base,
|
||||
Please note that the "天气查询工具" can only be used once since Question begin.
|
||||
|
||||
Use the following format:
|
||||
Question: the input question you must answer1
|
||||
Thought: you should always think about what to do and what tools to use.
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# API requirements
|
||||
|
||||
langchain>=0.0.342
|
||||
langchain>=0.0.343
|
||||
langchain-experimental>=0.0.42
|
||||
fschat[model_worker]>=0.2.33
|
||||
xformers>=0.0.22.post7
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# API requirements
|
||||
|
||||
langchain>=0.0.342
|
||||
langchain>=0.0.343
|
||||
langchain-experimental>=0.0.42
|
||||
fschat[model_worker]>=0.2.33
|
||||
xformers>=0.0.22.post7
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
langchain>=0.0.342
|
||||
langchain>=0.0.343
|
||||
fschat>=0.2.33
|
||||
openai>=1.3.5
|
||||
# sentence_transformers
|
||||
|
|
|
|||
|
|
@ -40,11 +40,9 @@ def get_ChatOpenAI(
|
|||
verbose: bool = True,
|
||||
**kwargs: Any,
|
||||
) -> ChatOpenAI:
|
||||
## 以下模型是Langchain原生支持的模型,这些模型不会走Fschat封装
|
||||
config_models = list_config_llm_models()
|
||||
|
||||
## 非Langchain原生支持的模型,走Fschat封装
|
||||
config = get_model_worker_config(model_name)
|
||||
if config.get("openai-api"):
|
||||
model_name = config.get("model_name")
|
||||
model = ChatOpenAI(
|
||||
streaming=streaming,
|
||||
verbose=verbose,
|
||||
|
|
@ -57,10 +55,7 @@ def get_ChatOpenAI(
|
|||
openai_proxy=config.get("openai_proxy"),
|
||||
**kwargs
|
||||
)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def get_OpenAI(
|
||||
model_name: str,
|
||||
temperature: float,
|
||||
|
|
@ -71,67 +66,22 @@ def get_OpenAI(
|
|||
verbose: bool = True,
|
||||
**kwargs: Any,
|
||||
) -> OpenAI:
|
||||
## 以下模型是Langchain原生支持的模型,这些模型不会走Fschat封装
|
||||
config_models = list_config_llm_models()
|
||||
if model_name in config_models.get("langchain", {}):
|
||||
config = config_models["langchain"][model_name]
|
||||
if model_name == "Azure-OpenAI":
|
||||
model = AzureOpenAI(
|
||||
streaming=streaming,
|
||||
verbose=verbose,
|
||||
callbacks=callbacks,
|
||||
deployment_name=config.get("deployment_name"),
|
||||
model_version=config.get("model_version"),
|
||||
openai_api_type=config.get("openai_api_type"),
|
||||
openai_api_base=config.get("api_base_url"),
|
||||
openai_api_version=config.get("api_version"),
|
||||
openai_api_key=config.get("api_key"),
|
||||
openai_proxy=config.get("openai_proxy"),
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
echo=echo,
|
||||
)
|
||||
|
||||
elif model_name == "OpenAI":
|
||||
model = OpenAI(
|
||||
streaming=streaming,
|
||||
verbose=verbose,
|
||||
callbacks=callbacks,
|
||||
model_name=config.get("model_name"),
|
||||
openai_api_base=config.get("api_base_url"),
|
||||
openai_api_key=config.get("api_key"),
|
||||
openai_proxy=config.get("openai_proxy"),
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
echo=echo,
|
||||
)
|
||||
elif model_name == "Anthropic":
|
||||
model = Anthropic(
|
||||
streaming=streaming,
|
||||
verbose=verbose,
|
||||
callbacks=callbacks,
|
||||
model_name=config.get("model_name"),
|
||||
anthropic_api_key=config.get("api_key"),
|
||||
echo=echo,
|
||||
)
|
||||
## TODO 支持其他的Langchain原生支持的模型
|
||||
else:
|
||||
## 非Langchain原生支持的模型,走Fschat封装
|
||||
config = get_model_worker_config(model_name)
|
||||
model = OpenAI(
|
||||
streaming=streaming,
|
||||
verbose=verbose,
|
||||
callbacks=callbacks,
|
||||
openai_api_key=config.get("api_key", "EMPTY"),
|
||||
openai_api_base=config.get("api_base_url", fschat_openai_api_address()),
|
||||
model_name=model_name,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
openai_proxy=config.get("openai_proxy"),
|
||||
echo=echo,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
config = get_model_worker_config(model_name)
|
||||
if config.get("openai-api"):
|
||||
model_name = config.get("model_name")
|
||||
model = OpenAI(
|
||||
streaming=streaming,
|
||||
verbose=verbose,
|
||||
callbacks=callbacks,
|
||||
openai_api_key=config.get("api_key", "EMPTY"),
|
||||
openai_api_base=config.get("api_base_url", fschat_openai_api_address()),
|
||||
model_name=model_name,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
openai_proxy=config.get("openai_proxy"),
|
||||
echo=echo,
|
||||
**kwargs
|
||||
)
|
||||
return model
|
||||
|
||||
|
||||
|
|
@ -630,7 +580,7 @@ def get_httpx_client(
|
|||
for host in os.environ.get("no_proxy", "").split(","):
|
||||
if host := host.strip():
|
||||
# default_proxies.update({host: None}) # Origin code
|
||||
default_proxies.update({'all://' + host: None}) # PR 1838 fix, if not add 'all://', httpx will raise error
|
||||
default_proxies.update({'all://' + host: None}) # PR 1838 fix, if not add 'all://', httpx will raise error
|
||||
|
||||
# merge default proxies with user provided proxies
|
||||
if isinstance(proxies, str):
|
||||
|
|
@ -714,7 +664,7 @@ def get_temp_dir(id: str = None) -> Tuple[str, str]:
|
|||
from configs.basic_config import BASE_TEMP_DIR
|
||||
import tempfile
|
||||
|
||||
if id is not None: # 如果指定的临时目录已存在,直接返回
|
||||
if id is not None: # 如果指定的临时目录已存在,直接返回
|
||||
path = os.path.join(BASE_TEMP_DIR, id)
|
||||
if os.path.isdir(path):
|
||||
return path, id
|
||||
|
|
|
|||
|
|
@ -276,8 +276,8 @@ class ApiRequest:
|
|||
"max_tokens": max_tokens,
|
||||
}
|
||||
|
||||
print(f"received input message:")
|
||||
pprint(data)
|
||||
# print(f"received input message:")
|
||||
# pprint(data)
|
||||
|
||||
response = self.post(
|
||||
"/chat/fastchat",
|
||||
|
|
@ -288,16 +288,16 @@ class ApiRequest:
|
|||
return self._httpx_stream2generator(response)
|
||||
|
||||
def chat_chat(
|
||||
self,
|
||||
query: str,
|
||||
conversation_id: str = None,
|
||||
history: List[Dict] = [],
|
||||
stream: bool = True,
|
||||
model: str = LLM_MODELS[0],
|
||||
temperature: float = TEMPERATURE,
|
||||
max_tokens: int = None,
|
||||
prompt_name: str = "default",
|
||||
**kwargs,
|
||||
self,
|
||||
query: str,
|
||||
conversation_id: str = None,
|
||||
history: List[Dict] = [],
|
||||
stream: bool = True,
|
||||
model: str = LLM_MODELS[0],
|
||||
temperature: float = TEMPERATURE,
|
||||
max_tokens: int = None,
|
||||
prompt_name: str = "default",
|
||||
**kwargs,
|
||||
):
|
||||
'''
|
||||
对应api.py/chat/chat接口 #TODO: 考虑是否返回json
|
||||
|
|
|
|||
Loading…
Reference in New Issue