mirror of
https://github.com/agentuniverse-ai/agentUniverse.git
synced 2026-02-09 01:59:19 +08:00
support 4.5
This commit is contained in:
@@ -38,7 +38,7 @@ The LLM model integration can be accomplished with simple configuration, current
|
||||
|<img src="https://github.com/user-attachments/assets/334c7f09-7eae-4a65-a70f-2e6531964224" height="25">|Gemini| Gemini 2.5 Pro、Gemini 2.0 Flash、Gemini 2.0 Flash Thinking、Gemini 1.5 Pro、… |
|
||||
|<img src="https://github.com/user-attachments/assets/8e41c73f-3103-4305-ad1f-56116ea55523" height="25">|Llama| llama3.3-70b-instruct、llama3.2-3b-instruct、llama3.2-1b-instruct、… |
|
||||
|<img src="https://github.com/user-attachments/assets/19d264c6-e499-4913-9d6d-314d392f2246" height="25">|KIMI| moonshot-v1-128k、moonshot-v1-32k、moonshot-v1-8k、… |
|
||||
|<img src="https://github.com/user-attachments/assets/79572d9a-29d5-4c0e-a336-ce3f8018fb05" height="25">|WenXin| ERNIE 4.0、ERNIE 4.0 Turbo、ERNIE 3.5、… |
|
||||
|<img src="https://github.com/user-attachments/assets/79572d9a-29d5-4c0e-a336-ce3f8018fb05" height="25">|WenXin| ERNIE 4.5 Turbo、ERNIE 4.5、ERNIE 4.0 Turbo、ERNIE 4.0、ERNIE 3.5、… |
|
||||
|<img src="https://github.com/user-attachments/assets/abb5311e-4d70-4e9c-8fca-e5129ae912fc" height="25">|chatglm| chatglm3-6b、chatglm-6b-v2、… |
|
||||
|<img src="https://github.com/user-attachments/assets/fe265f24-4ea6-4ff2-9b50-58ab6706a5f5" height="25">|BaiChuan| baichuan2-turbo、baichuan2-13b-chat-v1、… |
|
||||
|<img src="https://github.com/user-attachments/assets/41ffe268-392f-4ab9-b42d-e30dbd70d66b" height="25">|Doubao| Doubao-pro-128k、Doubao-pro-32k、Doubao-lite-128k、… |
|
||||
|
||||
@@ -35,7 +35,7 @@ The LLM model integration can be accomplished with simple configuration, current
|
||||
|<img src="https://github.com/user-attachments/assets/334c7f09-7eae-4a65-a70f-2e6531964224" height="25">|Gemini| Gemini 2.5 Pro、Gemini 2.0 Flash、Gemini 2.0 Flash Thinking、Gemini 1.5 Pro、… |
|
||||
|<img src="https://github.com/user-attachments/assets/8e41c73f-3103-4305-ad1f-56116ea55523" height="25">|Llama| llama3.3-70b-instruct、llama3.2-3b-instruct、llama3.2-1b-instruct、… |
|
||||
|<img src="https://github.com/user-attachments/assets/19d264c6-e499-4913-9d6d-314d392f2246" height="25">|KIMI| moonshot-v1-128k、moonshot-v1-32k、moonshot-v1-8k、… |
|
||||
|<img src="https://github.com/user-attachments/assets/79572d9a-29d5-4c0e-a336-ce3f8018fb05" height="25">|WenXin| ERNIE 4.0、ERNIE 4.0 Turbo、ERNIE 3.5、… |
|
||||
|<img src="https://github.com/user-attachments/assets/79572d9a-29d5-4c0e-a336-ce3f8018fb05" height="25">|WenXin| ERNIE 4.5 Turbo、ERNIE 4.5、ERNIE 4.0 Turbo、ERNIE 4.0、ERNIE 3.5、… |
|
||||
|<img src="https://github.com/user-attachments/assets/abb5311e-4d70-4e9c-8fca-e5129ae912fc" height="25">|chatglm| chatglm3-6b、chatglm-6b-v2、… |
|
||||
|<img src="https://github.com/user-attachments/assets/fe265f24-4ea6-4ff2-9b50-58ab6706a5f5" height="25">|BaiChuan| baichuan2-turbo、baichuan2-13b-chat-v1、… |
|
||||
|<img src="https://github.com/user-attachments/assets/41ffe268-392f-4ab9-b42d-e30dbd70d66b" height="25">|Doubao| Doubao-pro-128k、Doubao-pro-32k、Doubao-lite-128k、… |
|
||||
|
||||
@@ -21,7 +21,9 @@ from agentuniverse.llm.llm_output import LLMOutput
|
||||
from agentuniverse.llm.wenxin_langchain_instance import WenXinLangChainInstance
|
||||
|
||||
TokenModelList = [
|
||||
'Ernie-4.0-8k',
|
||||
'ernie-4.5-turbo-32k'
|
||||
'ernie-4.5-8k-preview'
|
||||
'ernie-4.0-8k',
|
||||
'ernie-3.5-8k',
|
||||
'ernie-speed-8k',
|
||||
'ernie-speed-128k',
|
||||
|
||||
@@ -17,10 +17,12 @@ LLM_MODEL_NAME = {
|
||||
'qwen2.5-72b-instruct', 'qwen2.5-32b-instruct', 'qwen2.5-14b-instruct', 'qwen2.5-7b-instruct'],
|
||||
'wenxin_llm': ['ERNIE-Speed-AppBuilder-8K-0516', 'ERNIE-Lite-8K-0725', 'ERNIE-Speed-128K', 'ERNIE-3.5-128K',
|
||||
'ERNIE-3.5-8K-0701', 'ERNIE-4.0-8K-0613', 'ERNIE-4.0-8K-Preview', 'ERNIE-3.5-8K-Preview',
|
||||
'ERNIE-Tiny-8K', 'ERNIE-4.0-8K-Latest', 'ERNIE-4.0-Turbo-8K'],
|
||||
'ERNIE-Tiny-8K', 'ERNIE-4.0-8K-Latest', 'ERNIE-4.0-Turbo-8K','ERNIE-4.5-8K-Preview',
|
||||
'ERNIE-4.5-Turbo-32K', 'ERNIE-4.5-Turbo-128K'],
|
||||
'default_wenxin_llm': ['ERNIE-Speed-AppBuilder-8K-0516', 'ERNIE-Lite-8K-0725', 'ERNIE-Speed-128K', 'ERNIE-3.5-128K',
|
||||
'ERNIE-3.5-8K-0701', 'ERNIE-4.0-8K-0613', 'ERNIE-4.0-8K-Preview', 'ERNIE-3.5-8K-Preview',
|
||||
'ERNIE-Tiny-8K', 'ERNIE-4.0-8K-Latest', 'ERNIE-4.0-Turbo-8K'],
|
||||
'ERNIE-Tiny-8K', 'ERNIE-4.0-8K-Latest', 'ERNIE-4.0-Turbo-8K','ERNIE-4.5-8K-Preview',
|
||||
'ERNIE-4.5-Turbo-32K', 'ERNIE-4.5-Turbo-128K'],
|
||||
'kimi_llm': ['moonshot-v1-8k', 'moonshot-v1-32k', 'moonshot-v1-128k'],
|
||||
'default_kimi_llm': ['moonshot-v1-8k', 'moonshot-v1-32k', 'moonshot-v1-128k'],
|
||||
'deepseek_llm': ['deepseek-chat', 'deepseek-coder', 'deepseek-reasoner', 'deepseek-v3', 'deepseek-r1',
|
||||
|
||||
@@ -0,0 +1,36 @@
|
||||
name: 'ERNIE-4.5-8K-Preview'
|
||||
description: 'Baidu ERNIE-4.5-8K-Preview model'
|
||||
model_name: 'ERNIE-4.5-8K-Preview'
|
||||
max_tokens: 1000
|
||||
streaming: true
|
||||
#
|
||||
# There are three ways to configure the api_key:
|
||||
#
|
||||
# 1. Direct String Value:
|
||||
# Directly input the API key as a string.
|
||||
# Example: api_key: 'sk-xxxxxxxxxxxxxxxx'
|
||||
#
|
||||
# 2. Environment Variable Placeholder:
|
||||
# Use ${VARIABLE_NAME} syntax to load from environment variables. When agentUniverse starts,
|
||||
# it will automatically read the value from environment variables during YAML configuration parsing.
|
||||
# Example: api_key: '${QIANFAN_AK}'
|
||||
#
|
||||
# 3. Custom Function Loading:
|
||||
# Use @FUNC annotation to dynamically load the API key at runtime through a custom function.
|
||||
# Example: api_key: '@FUNC(load_api_key(model_name="wenxin"))'
|
||||
# The function should be defined in the YamlFuncExtension class within yaml_func_extension.py
|
||||
# When agentUniverse loads this configuration, it will:
|
||||
# - Parse the @FUNC annotation
|
||||
# - Execute the load_api_key function with the given argument
|
||||
# - Replace the annotation with the function's return value
|
||||
#
|
||||
# The same configuration methods apply to other parameters below (api_base, organization, proxy)
|
||||
#
|
||||
# Note: Current configuration uses the second method (Environment Variable Placeholder)
|
||||
#
|
||||
api_key: '${QIANFAN_AK}'
|
||||
secret_key: '${QIANFAN_SK}'
|
||||
metadata:
|
||||
type: 'LLM'
|
||||
module: 'agentuniverse.llm.default.wenxin_llm'
|
||||
class: 'WenXinLLM'
|
||||
@@ -0,0 +1,36 @@
|
||||
name: 'Ernie-4.5-turbo-128k'
|
||||
description: 'Baidu Ernie-4.5-turbo-128k model'
|
||||
model_name: 'Ernie-4.5-turbo-128k'
|
||||
max_tokens: 1000
|
||||
streaming: true
|
||||
#
|
||||
# There are three ways to configure the api_key:
|
||||
#
|
||||
# 1. Direct String Value:
|
||||
# Directly input the API key as a string.
|
||||
# Example: api_key: 'sk-xxxxxxxxxxxxxxxx'
|
||||
#
|
||||
# 2. Environment Variable Placeholder:
|
||||
# Use ${VARIABLE_NAME} syntax to load from environment variables. When agentUniverse starts,
|
||||
# it will automatically read the value from environment variables during YAML configuration parsing.
|
||||
# Example: api_key: '${QIANFAN_AK}'
|
||||
#
|
||||
# 3. Custom Function Loading:
|
||||
# Use @FUNC annotation to dynamically load the API key at runtime through a custom function.
|
||||
# Example: api_key: '@FUNC(load_api_key(model_name="wenxin"))'
|
||||
# The function should be defined in the YamlFuncExtension class within yaml_func_extension.py
|
||||
# When agentUniverse loads this configuration, it will:
|
||||
# - Parse the @FUNC annotation
|
||||
# - Execute the load_api_key function with the given argument
|
||||
# - Replace the annotation with the function's return value
|
||||
#
|
||||
# The same configuration methods apply to other parameters below (api_base, organization, proxy)
|
||||
#
|
||||
# Note: Current configuration uses the second method (Environment Variable Placeholder)
|
||||
#
|
||||
api_key: '${QIANFAN_AK}'
|
||||
secret_key: '${QIANFAN_SK}'
|
||||
metadata:
|
||||
type: 'LLM'
|
||||
module: 'agentuniverse.llm.default.wenxin_llm'
|
||||
class: 'WenXinLLM'
|
||||
@@ -0,0 +1,36 @@
|
||||
name: 'Ernie-4.5-turbo-32k'
|
||||
description: 'Baidu Ernie-4.5-turbo-32k model'
|
||||
model_name: 'Ernie-4.5-turbo-32k'
|
||||
max_tokens: 1000
|
||||
streaming: true
|
||||
#
|
||||
# There are three ways to configure the api_key:
|
||||
#
|
||||
# 1. Direct String Value:
|
||||
# Directly input the API key as a string.
|
||||
# Example: api_key: 'sk-xxxxxxxxxxxxxxxx'
|
||||
#
|
||||
# 2. Environment Variable Placeholder:
|
||||
# Use ${VARIABLE_NAME} syntax to load from environment variables. When agentUniverse starts,
|
||||
# it will automatically read the value from environment variables during YAML configuration parsing.
|
||||
# Example: api_key: '${QIANFAN_AK}'
|
||||
#
|
||||
# 3. Custom Function Loading:
|
||||
# Use @FUNC annotation to dynamically load the API key at runtime through a custom function.
|
||||
# Example: api_key: '@FUNC(load_api_key(model_name="wenxin"))'
|
||||
# The function should be defined in the YamlFuncExtension class within yaml_func_extension.py
|
||||
# When agentUniverse loads this configuration, it will:
|
||||
# - Parse the @FUNC annotation
|
||||
# - Execute the load_api_key function with the given argument
|
||||
# - Replace the annotation with the function's return value
|
||||
#
|
||||
# The same configuration methods apply to other parameters below (api_base, organization, proxy)
|
||||
#
|
||||
# Note: Current configuration uses the second method (Environment Variable Placeholder)
|
||||
#
|
||||
api_key: '${QIANFAN_AK}'
|
||||
secret_key: '${QIANFAN_SK}'
|
||||
metadata:
|
||||
type: 'LLM'
|
||||
module: 'agentuniverse.llm.default.wenxin_llm'
|
||||
class: 'WenXinLLM'
|
||||
@@ -0,0 +1,36 @@
|
||||
name: 'ERNIE-4.5-8K-Preview'
|
||||
description: 'Baidu ERNIE-4.5-8K-Preview model'
|
||||
model_name: 'ERNIE-4.5-8K-Preview'
|
||||
max_tokens: 1000
|
||||
streaming: true
|
||||
#
|
||||
# There are three ways to configure the api_key:
|
||||
#
|
||||
# 1. Direct String Value:
|
||||
# Directly input the API key as a string.
|
||||
# Example: api_key: 'sk-xxxxxxxxxxxxxxxx'
|
||||
#
|
||||
# 2. Environment Variable Placeholder:
|
||||
# Use ${VARIABLE_NAME} syntax to load from environment variables. When agentUniverse starts,
|
||||
# it will automatically read the value from environment variables during YAML configuration parsing.
|
||||
# Example: api_key: '${QIANFAN_AK}'
|
||||
#
|
||||
# 3. Custom Function Loading:
|
||||
# Use @FUNC annotation to dynamically load the API key at runtime through a custom function.
|
||||
# Example: api_key: '@FUNC(load_api_key(model_name="wenxin"))'
|
||||
# The function should be defined in the YamlFuncExtension class within yaml_func_extension.py
|
||||
# When agentUniverse loads this configuration, it will:
|
||||
# - Parse the @FUNC annotation
|
||||
# - Execute the load_api_key function with the given argument
|
||||
# - Replace the annotation with the function's return value
|
||||
#
|
||||
# The same configuration methods apply to other parameters below (api_base, organization, proxy)
|
||||
#
|
||||
# Note: Current configuration uses the second method (Environment Variable Placeholder)
|
||||
#
|
||||
api_key: '${QIANFAN_AK}'
|
||||
secret_key: '${QIANFAN_SK}'
|
||||
metadata:
|
||||
type: 'LLM'
|
||||
module: 'agentuniverse.llm.default.wenxin_llm'
|
||||
class: 'WenXinLLM'
|
||||
@@ -0,0 +1,36 @@
|
||||
name: 'Ernie-4.5-turbo-128k'
|
||||
description: 'Baidu Ernie-4.5-turbo-128k model'
|
||||
model_name: 'Ernie-4.5-turbo-128k'
|
||||
max_tokens: 1000
|
||||
streaming: true
|
||||
#
|
||||
# There are three ways to configure the api_key:
|
||||
#
|
||||
# 1. Direct String Value:
|
||||
# Directly input the API key as a string.
|
||||
# Example: api_key: 'sk-xxxxxxxxxxxxxxxx'
|
||||
#
|
||||
# 2. Environment Variable Placeholder:
|
||||
# Use ${VARIABLE_NAME} syntax to load from environment variables. When agentUniverse starts,
|
||||
# it will automatically read the value from environment variables during YAML configuration parsing.
|
||||
# Example: api_key: '${QIANFAN_AK}'
|
||||
#
|
||||
# 3. Custom Function Loading:
|
||||
# Use @FUNC annotation to dynamically load the API key at runtime through a custom function.
|
||||
# Example: api_key: '@FUNC(load_api_key(model_name="wenxin"))'
|
||||
# The function should be defined in the YamlFuncExtension class within yaml_func_extension.py
|
||||
# When agentUniverse loads this configuration, it will:
|
||||
# - Parse the @FUNC annotation
|
||||
# - Execute the load_api_key function with the given argument
|
||||
# - Replace the annotation with the function's return value
|
||||
#
|
||||
# The same configuration methods apply to other parameters below (api_base, organization, proxy)
|
||||
#
|
||||
# Note: Current configuration uses the second method (Environment Variable Placeholder)
|
||||
#
|
||||
api_key: '${QIANFAN_AK}'
|
||||
secret_key: '${QIANFAN_SK}'
|
||||
metadata:
|
||||
type: 'LLM'
|
||||
module: 'agentuniverse.llm.default.wenxin_llm'
|
||||
class: 'WenXinLLM'
|
||||
@@ -0,0 +1,36 @@
|
||||
name: 'Ernie-4.5-turbo-32k'
|
||||
description: 'Baidu Ernie-4.5-turbo-32k model'
|
||||
model_name: 'Ernie-4.5-turbo-32k'
|
||||
max_tokens: 1000
|
||||
streaming: true
|
||||
#
|
||||
# There are three ways to configure the api_key:
|
||||
#
|
||||
# 1. Direct String Value:
|
||||
# Directly input the API key as a string.
|
||||
# Example: api_key: 'sk-xxxxxxxxxxxxxxxx'
|
||||
#
|
||||
# 2. Environment Variable Placeholder:
|
||||
# Use ${VARIABLE_NAME} syntax to load from environment variables. When agentUniverse starts,
|
||||
# it will automatically read the value from environment variables during YAML configuration parsing.
|
||||
# Example: api_key: '${QIANFAN_AK}'
|
||||
#
|
||||
# 3. Custom Function Loading:
|
||||
# Use @FUNC annotation to dynamically load the API key at runtime through a custom function.
|
||||
# Example: api_key: '@FUNC(load_api_key(model_name="wenxin"))'
|
||||
# The function should be defined in the YamlFuncExtension class within yaml_func_extension.py
|
||||
# When agentUniverse loads this configuration, it will:
|
||||
# - Parse the @FUNC annotation
|
||||
# - Execute the load_api_key function with the given argument
|
||||
# - Replace the annotation with the function's return value
|
||||
#
|
||||
# The same configuration methods apply to other parameters below (api_base, organization, proxy)
|
||||
#
|
||||
# Note: Current configuration uses the second method (Environment Variable Placeholder)
|
||||
#
|
||||
api_key: '${QIANFAN_AK}'
|
||||
secret_key: '${QIANFAN_SK}'
|
||||
metadata:
|
||||
type: 'LLM'
|
||||
module: 'agentuniverse.llm.default.wenxin_llm'
|
||||
class: 'WenXinLLM'
|
||||
Reference in New Issue
Block a user