@@ -18,72 +18,93 @@ class ModelConfig(BaseModel):
18
18
19
19
20
20
configs : dict [str , ModelConfig ] = {
21
+ "Doubao-1.5-vision-pro-32k" : ModelConfig (
22
+ properties = ModelProperties (context_size = 32768 , max_tokens = 12288 , mode = LLMMode .CHAT ),
23
+ features = [ModelFeature .AGENT_THOUGHT , ModelFeature .VISION ],
24
+ ),
25
+ "Doubao-1.5-pro-32k" : ModelConfig (
26
+ properties = ModelProperties (context_size = 32768 , max_tokens = 12288 , mode = LLMMode .CHAT ),
27
+ features = [ModelFeature .AGENT_THOUGHT ],
28
+ ),
29
+ "Doubao-1.5-lite-32k" : ModelConfig (
30
+ properties = ModelProperties (context_size = 32768 , max_tokens = 12288 , mode = LLMMode .CHAT ),
31
+ features = [ModelFeature .AGENT_THOUGHT ],
32
+ ),
33
+ "Doubao-1.5-pro-256k" : ModelConfig (
34
+ properties = ModelProperties (context_size = 262144 , max_tokens = 12288 , mode = LLMMode .CHAT ),
35
+ features = [ModelFeature .AGENT_THOUGHT ],
36
+ ),
21
37
"Doubao-vision-pro-32k" : ModelConfig (
22
38
properties = ModelProperties (context_size = 32768 , max_tokens = 4096 , mode = LLMMode .CHAT ),
23
- features = [ModelFeature .VISION ],
39
+ features = [ModelFeature .AGENT_THOUGHT , ModelFeature . VISION ],
24
40
),
25
41
"Doubao-vision-lite-32k" : ModelConfig (
26
42
properties = ModelProperties (context_size = 32768 , max_tokens = 4096 , mode = LLMMode .CHAT ),
27
- features = [ModelFeature .VISION ],
43
+ features = [ModelFeature .AGENT_THOUGHT , ModelFeature . VISION ],
28
44
),
29
45
"Doubao-pro-4k" : ModelConfig (
30
46
properties = ModelProperties (context_size = 4096 , max_tokens = 4096 , mode = LLMMode .CHAT ),
31
- features = [ModelFeature .TOOL_CALL ],
47
+ features = [ModelFeature .AGENT_THOUGHT , ModelFeature . TOOL_CALL ],
32
48
),
33
49
"Doubao-lite-4k" : ModelConfig (
34
50
properties = ModelProperties (context_size = 4096 , max_tokens = 4096 , mode = LLMMode .CHAT ),
35
- features = [ModelFeature .TOOL_CALL ],
51
+ features = [ModelFeature .AGENT_THOUGHT , ModelFeature . TOOL_CALL ],
36
52
),
37
53
"Doubao-pro-32k" : ModelConfig (
38
54
properties = ModelProperties (context_size = 32768 , max_tokens = 4096 , mode = LLMMode .CHAT ),
39
- features = [ModelFeature .TOOL_CALL ],
55
+ features = [ModelFeature .AGENT_THOUGHT , ModelFeature . TOOL_CALL ],
40
56
),
41
57
"Doubao-lite-32k" : ModelConfig (
42
58
properties = ModelProperties (context_size = 32768 , max_tokens = 4096 , mode = LLMMode .CHAT ),
43
- features = [ModelFeature .TOOL_CALL ],
59
+ features = [ModelFeature .AGENT_THOUGHT , ModelFeature . TOOL_CALL ],
44
60
),
45
61
"Doubao-pro-256k" : ModelConfig (
46
62
properties = ModelProperties (context_size = 262144 , max_tokens = 4096 , mode = LLMMode .CHAT ),
47
- features = [],
63
+ features = [ModelFeature . AGENT_THOUGHT ],
48
64
),
49
65
"Doubao-pro-128k" : ModelConfig (
50
66
properties = ModelProperties (context_size = 131072 , max_tokens = 4096 , mode = LLMMode .CHAT ),
51
- features = [ModelFeature .TOOL_CALL ],
67
+ features = [ModelFeature .AGENT_THOUGHT , ModelFeature . TOOL_CALL ],
52
68
),
53
69
"Doubao-lite-128k" : ModelConfig (
54
- properties = ModelProperties (context_size = 131072 , max_tokens = 4096 , mode = LLMMode .CHAT ), features = []
70
+ properties = ModelProperties (context_size = 131072 , max_tokens = 4096 , mode = LLMMode .CHAT ),
71
+ features = [ModelFeature .AGENT_THOUGHT ],
55
72
),
56
73
"Skylark2-pro-4k" : ModelConfig (
57
- properties = ModelProperties (context_size = 4096 , max_tokens = 4096 , mode = LLMMode .CHAT ), features = []
74
+ properties = ModelProperties (context_size = 4096 , max_tokens = 4096 , mode = LLMMode .CHAT ),
75
+ features = [ModelFeature .AGENT_THOUGHT ],
58
76
),
59
77
"Llama3-8B" : ModelConfig (
60
- properties = ModelProperties (context_size = 8192 , max_tokens = 8192 , mode = LLMMode .CHAT ), features = []
78
+ properties = ModelProperties (context_size = 8192 , max_tokens = 8192 , mode = LLMMode .CHAT ),
79
+ features = [ModelFeature .AGENT_THOUGHT ],
61
80
),
62
81
"Llama3-70B" : ModelConfig (
63
- properties = ModelProperties (context_size = 8192 , max_tokens = 8192 , mode = LLMMode .CHAT ), features = []
82
+ properties = ModelProperties (context_size = 8192 , max_tokens = 8192 , mode = LLMMode .CHAT ),
83
+ features = [ModelFeature .AGENT_THOUGHT ],
64
84
),
65
85
"Moonshot-v1-8k" : ModelConfig (
66
86
properties = ModelProperties (context_size = 8192 , max_tokens = 4096 , mode = LLMMode .CHAT ),
67
- features = [ModelFeature .TOOL_CALL ],
87
+ features = [ModelFeature .AGENT_THOUGHT , ModelFeature . TOOL_CALL ],
68
88
),
69
89
"Moonshot-v1-32k" : ModelConfig (
70
90
properties = ModelProperties (context_size = 32768 , max_tokens = 16384 , mode = LLMMode .CHAT ),
71
- features = [ModelFeature .TOOL_CALL ],
91
+ features = [ModelFeature .AGENT_THOUGHT , ModelFeature . TOOL_CALL ],
72
92
),
73
93
"Moonshot-v1-128k" : ModelConfig (
74
94
properties = ModelProperties (context_size = 131072 , max_tokens = 65536 , mode = LLMMode .CHAT ),
75
- features = [ModelFeature .TOOL_CALL ],
95
+ features = [ModelFeature .AGENT_THOUGHT , ModelFeature . TOOL_CALL ],
76
96
),
77
97
"GLM3-130B" : ModelConfig (
78
98
properties = ModelProperties (context_size = 8192 , max_tokens = 4096 , mode = LLMMode .CHAT ),
79
- features = [ModelFeature .TOOL_CALL ],
99
+ features = [ModelFeature .AGENT_THOUGHT , ModelFeature . TOOL_CALL ],
80
100
),
81
101
"GLM3-130B-Fin" : ModelConfig (
82
102
properties = ModelProperties (context_size = 8192 , max_tokens = 4096 , mode = LLMMode .CHAT ),
83
- features = [ModelFeature .TOOL_CALL ],
103
+ features = [ModelFeature .AGENT_THOUGHT , ModelFeature . TOOL_CALL ],
84
104
),
85
105
"Mistral-7B" : ModelConfig (
86
- properties = ModelProperties (context_size = 8192 , max_tokens = 2048 , mode = LLMMode .CHAT ), features = []
106
+ properties = ModelProperties (context_size = 8192 , max_tokens = 2048 , mode = LLMMode .CHAT ),
107
+ features = [ModelFeature .AGENT_THOUGHT ],
87
108
),
88
109
}
89
110
0 commit comments