File size: 1,904 Bytes
1ea26af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
# Cognitive Kernel-Pro - 最小配置示例
# 只需要配置核心模型即可运行


[ck.model]
# 主模型配置 - 必需
call_target = "https://api-inference.modelscope.cn/v1/chat/completions"
model = "Qwen/Qwen3-235B-A22B-Instruct-2507"

# 可选:模型参数
[ck.model.extract_body]
temperature = 0.6
max_tokens = 8192

[web]
# Web代理配置``
max_steps = 20
use_multimodal = "auto"  # off | yes | auto

[web.model]
# Web代理模型配置
call_target = "https://api-inference.modelscope.cn/v1/chat/completions"
model = "Qwen/Qwen3-235B-A22B-Instruct-2507"
request_timeout = 600
max_retry_times = 5
max_token_num = 8192

[web.model.extract_body]
# Web模型参数
temperature = 0.0
top_p = 0.95
max_tokens = 8192

[web.model_multimodal]
call_target = "https://api-inference.modelscope.cn/v1/chat/completions"
model = "Qwen/Qwen2.5-VL-72B-Instruct"  # 或其他支持视觉的模型
request_timeout = 600
max_retry_times = 5
max_token_num = 8192

[web.model_multimodal.extract_body]
temperature = 0.0
top_p = 0.95
max_tokens = 8192

[file]
# 文件代理配置
max_steps = 16
max_file_read_tokens = 3000
max_file_screenshots = 2

[file.model]
# 文件代理模型配置
call_target = "https://api-inference.modelscope.cn/v1/chat/completions"
model = "Qwen/Qwen3-235B-A22B-Instruct-2507"
request_timeout = 600
max_retry_times = 5
max_token_num = 8192

[file.model.extract_body]
temperature = 0.3
top_p = 0.95
max_tokens = 8192

[file.model_multimodal]
# 文件多模态模型配置 - 与web_agent相同的视觉模型
call_target = "https://api-inference.modelscope.cn/v1/chat/completions"
model = "Qwen/Qwen2.5-VL-72B-Instruct"  # 与web_agent相同的视觉模型
request_timeout = 600
max_retry_times = 5
max_token_num = 8192

[file.model_multimodal.extract_body]
temperature = 0.0
top_p = 0.95
max_tokens = 8192

# 可选:搜索后端
[search]
backend = "duckduckgo"  # duckduckgo | google