update model_config.py.example
This commit is contained in:
parent
7291e77978
commit
3504cb5274
|
|
@ -43,12 +43,6 @@ llm_model_dict = {
|
||||||
"api_key": "EMPTY"
|
"api_key": "EMPTY"
|
||||||
},
|
},
|
||||||
|
|
||||||
"chatglm-6b-int4": {
|
|
||||||
"local_model_path": "THUDM/chatglm-6b-int4",
|
|
||||||
"api_base_url": "http://localhost:8888/v1", # "name"修改为fastchat服务中的"api_base_url"
|
|
||||||
"api_key": "EMPTY"
|
|
||||||
},
|
|
||||||
|
|
||||||
"chatglm2-6b": {
|
"chatglm2-6b": {
|
||||||
"local_model_path": "THUDM/chatglm2-6b",
|
"local_model_path": "THUDM/chatglm2-6b",
|
||||||
"api_base_url": "http://localhost:8888/v1", # URL需要与运行fastchat服务端的server_config.FSCHAT_OPENAI_API一致
|
"api_base_url": "http://localhost:8888/v1", # URL需要与运行fastchat服务端的server_config.FSCHAT_OPENAI_API一致
|
||||||
|
|
@ -61,12 +55,6 @@ llm_model_dict = {
|
||||||
"api_key": "EMPTY"
|
"api_key": "EMPTY"
|
||||||
},
|
},
|
||||||
|
|
||||||
"vicuna-13b-hf": {
|
|
||||||
"local_model_path": "",
|
|
||||||
"api_base_url": "http://localhost:8888/v1", # "name"修改为fastchat服务中的"api_base_url"
|
|
||||||
"api_key": "EMPTY"
|
|
||||||
},
|
|
||||||
|
|
||||||
# 调用chatgpt时如果报出: urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='api.openai.com', port=443):
|
# 调用chatgpt时如果报出: urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='api.openai.com', port=443):
|
||||||
# Max retries exceeded with url: /v1/chat/completions
|
# Max retries exceeded with url: /v1/chat/completions
|
||||||
# 则需要将urllib3版本修改为1.25.11
|
# 则需要将urllib3版本修改为1.25.11
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue