2
2
import OpenAI from 'openai' ;
3
3
import { Mock , afterEach , beforeEach , describe , expect , it , vi } from 'vitest' ;
4
4
5
- import Qwen from '@/config/modelProviders/qwen' ;
6
- import { AgentRuntimeErrorType , ModelProvider } from '@/libs/agent-runtime' ;
5
+ import { LobeOpenAICompatibleRuntime } from '@/libs/agent-runtime' ;
6
+ import { ModelProvider } from '@/libs/agent-runtime' ;
7
+ import { AgentRuntimeErrorType } from '@/libs/agent-runtime' ;
7
8
8
9
import * as debugStreamModule from '../utils/debugStream' ;
9
10
import { LobeQwenAI } from './index' ;
@@ -16,7 +17,7 @@ const invalidErrorType = AgentRuntimeErrorType.InvalidProviderAPIKey;
16
17
// Mock the console.error to avoid polluting test output
17
18
vi . spyOn ( console , 'error' ) . mockImplementation ( ( ) => { } ) ;
18
19
19
- let instance : LobeQwenAI ;
20
+ let instance : LobeOpenAICompatibleRuntime ;
20
21
21
22
beforeEach ( ( ) => {
22
23
instance = new LobeQwenAI ( { apiKey : 'test' } ) ;
@@ -40,183 +41,7 @@ describe('LobeQwenAI', () => {
40
41
} ) ;
41
42
} ) ;
42
43
43
- describe ( 'models' , ( ) => {
44
- it ( 'should correctly list available models' , async ( ) => {
45
- const instance = new LobeQwenAI ( { apiKey : 'test_api_key' } ) ;
46
- vi . spyOn ( instance , 'models' ) . mockResolvedValue ( Qwen . chatModels ) ;
47
-
48
- const models = await instance . models ( ) ;
49
- expect ( models ) . toEqual ( Qwen . chatModels ) ;
50
- } ) ;
51
- } ) ;
52
-
53
44
describe ( 'chat' , ( ) => {
54
- describe ( 'Params' , ( ) => {
55
- it ( 'should call llms with proper options' , async ( ) => {
56
- const mockStream = new ReadableStream ( ) ;
57
- const mockResponse = Promise . resolve ( mockStream ) ;
58
-
59
- ( instance [ 'client' ] . chat . completions . create as Mock ) . mockResolvedValue ( mockResponse ) ;
60
-
61
- const result = await instance . chat ( {
62
- messages : [ { content : 'Hello' , role : 'user' } ] ,
63
- model : 'qwen-turbo' ,
64
- temperature : 0.6 ,
65
- top_p : 0.7 ,
66
- } ) ;
67
-
68
- // Assert
69
- expect ( instance [ 'client' ] . chat . completions . create ) . toHaveBeenCalledWith (
70
- {
71
- messages : [ { content : 'Hello' , role : 'user' } ] ,
72
- model : 'qwen-turbo' ,
73
- temperature : 0.6 ,
74
- stream : true ,
75
- top_p : 0.7 ,
76
- result_format : 'message' ,
77
- } ,
78
- { headers : { Accept : '*/*' } } ,
79
- ) ;
80
- expect ( result ) . toBeInstanceOf ( Response ) ;
81
- } ) ;
82
-
83
- it ( 'should call vlms with proper options' , async ( ) => {
84
- const mockStream = new ReadableStream ( ) ;
85
- const mockResponse = Promise . resolve ( mockStream ) ;
86
-
87
- ( instance [ 'client' ] . chat . completions . create as Mock ) . mockResolvedValue ( mockResponse ) ;
88
-
89
- const result = await instance . chat ( {
90
- messages : [ { content : 'Hello' , role : 'user' } ] ,
91
- model : 'qwen-vl-plus' ,
92
- temperature : 0.6 ,
93
- top_p : 0.7 ,
94
- } ) ;
95
-
96
- // Assert
97
- expect ( instance [ 'client' ] . chat . completions . create ) . toHaveBeenCalledWith (
98
- {
99
- messages : [ { content : 'Hello' , role : 'user' } ] ,
100
- model : 'qwen-vl-plus' ,
101
- stream : true ,
102
- } ,
103
- { headers : { Accept : '*/*' } } ,
104
- ) ;
105
- expect ( result ) . toBeInstanceOf ( Response ) ;
106
- } ) ;
107
-
108
- it ( 'should transform non-streaming response to stream correctly' , async ( ) => {
109
- const mockResponse = {
110
- id : 'chatcmpl-fc539f49-51a8-94be-8061' ,
111
- object : 'chat.completion' ,
112
- created : 1719901794 ,
113
- model : 'qwen-turbo' ,
114
- choices : [
115
- {
116
- index : 0 ,
117
- message : { role : 'assistant' , content : 'Hello' } ,
118
- finish_reason : 'stop' ,
119
- logprobs : null ,
120
- } ,
121
- ] ,
122
- } as OpenAI . ChatCompletion ;
123
- vi . spyOn ( instance [ 'client' ] . chat . completions , 'create' ) . mockResolvedValue (
124
- mockResponse as any ,
125
- ) ;
126
-
127
- const result = await instance . chat ( {
128
- messages : [ { content : 'Hello' , role : 'user' } ] ,
129
- model : 'qwen-turbo' ,
130
- temperature : 0.6 ,
131
- stream : false ,
132
- } ) ;
133
-
134
- const decoder = new TextDecoder ( ) ;
135
- const reader = result . body ! . getReader ( ) ;
136
- const stream : string [ ] = [ ] ;
137
-
138
- while ( true ) {
139
- const { value, done } = await reader . read ( ) ;
140
- if ( done ) break ;
141
- stream . push ( decoder . decode ( value ) ) ;
142
- }
143
-
144
- expect ( stream ) . toEqual ( [
145
- 'id: chatcmpl-fc539f49-51a8-94be-8061\n' ,
146
- 'event: text\n' ,
147
- 'data: "Hello"\n\n' ,
148
- 'id: chatcmpl-fc539f49-51a8-94be-8061\n' ,
149
- 'event: stop\n' ,
150
- 'data: "stop"\n\n' ,
151
- ] ) ;
152
-
153
- expect ( ( await reader . read ( ) ) . done ) . toBe ( true ) ;
154
- } ) ;
155
-
156
- it ( 'should set temperature to undefined if temperature is 0 or >= 2' , async ( ) => {
157
- const temperatures = [ 0 , 2 , 3 ] ;
158
- const expectedTemperature = undefined ;
159
-
160
- for ( const temp of temperatures ) {
161
- vi . spyOn ( instance [ 'client' ] . chat . completions , 'create' ) . mockResolvedValue (
162
- new ReadableStream ( ) as any ,
163
- ) ;
164
- await instance . chat ( {
165
- messages : [ { content : 'Hello' , role : 'user' } ] ,
166
- model : 'qwen-turbo' ,
167
- temperature : temp ,
168
- } ) ;
169
- expect ( instance [ 'client' ] . chat . completions . create ) . toHaveBeenCalledWith (
170
- expect . objectContaining ( {
171
- messages : expect . any ( Array ) ,
172
- model : 'qwen-turbo' ,
173
- temperature : expectedTemperature ,
174
- } ) ,
175
- expect . any ( Object ) ,
176
- ) ;
177
- }
178
- } ) ;
179
-
180
- it ( 'should set temperature to original temperature' , async ( ) => {
181
- vi . spyOn ( instance [ 'client' ] . chat . completions , 'create' ) . mockResolvedValue (
182
- new ReadableStream ( ) as any ,
183
- ) ;
184
- await instance . chat ( {
185
- messages : [ { content : 'Hello' , role : 'user' } ] ,
186
- model : 'qwen-turbo' ,
187
- temperature : 1.5 ,
188
- } ) ;
189
- expect ( instance [ 'client' ] . chat . completions . create ) . toHaveBeenCalledWith (
190
- expect . objectContaining ( {
191
- messages : expect . any ( Array ) ,
192
- model : 'qwen-turbo' ,
193
- temperature : 1.5 ,
194
- } ) ,
195
- expect . any ( Object ) ,
196
- ) ;
197
- } ) ;
198
-
199
- it ( 'should set temperature to Float' , async ( ) => {
200
- const createMock = vi . fn ( ) . mockResolvedValue ( new ReadableStream ( ) as any ) ;
201
- vi . spyOn ( instance [ 'client' ] . chat . completions , 'create' ) . mockImplementation ( createMock ) ;
202
- await instance . chat ( {
203
- messages : [ { content : 'Hello' , role : 'user' } ] ,
204
- model : 'qwen-turbo' ,
205
- temperature : 1 ,
206
- } ) ;
207
- expect ( instance [ 'client' ] . chat . completions . create ) . toHaveBeenCalledWith (
208
- expect . objectContaining ( {
209
- messages : expect . any ( Array ) ,
210
- model : 'qwen-turbo' ,
211
- temperature : expect . any ( Number ) ,
212
- } ) ,
213
- expect . any ( Object ) ,
214
- ) ;
215
- const callArgs = createMock . mock . calls [ 0 ] [ 0 ] ;
216
- expect ( Number . isInteger ( callArgs . temperature ) ) . toBe ( false ) ; // Temperature is always not an integer
217
- } ) ;
218
- } ) ;
219
-
220
45
describe ( 'Error' , ( ) => {
221
46
it ( 'should return QwenBizError with an openai error response when OpenAI.APIError is thrown' , async ( ) => {
222
47
// Arrange
@@ -238,7 +63,7 @@ describe('LobeQwenAI', () => {
238
63
try {
239
64
await instance . chat ( {
240
65
messages : [ { content : 'Hello' , role : 'user' } ] ,
241
- model : 'qwen-turbo' ,
66
+ model : 'qwen-turbo-latest ' ,
242
67
temperature : 0.999 ,
243
68
} ) ;
244
69
} catch ( e ) {
@@ -278,7 +103,7 @@ describe('LobeQwenAI', () => {
278
103
try {
279
104
await instance . chat ( {
280
105
messages : [ { content : 'Hello' , role : 'user' } ] ,
281
- model : 'qwen-turbo' ,
106
+ model : 'qwen-turbo-latest ' ,
282
107
temperature : 0.999 ,
283
108
} ) ;
284
109
} catch ( e ) {
@@ -304,7 +129,8 @@ describe('LobeQwenAI', () => {
304
129
305
130
instance = new LobeQwenAI ( {
306
131
apiKey : 'test' ,
307
- baseURL : defaultBaseURL ,
132
+
133
+ baseURL : 'https://api.abc.com/v1' ,
308
134
} ) ;
309
135
310
136
vi . spyOn ( instance [ 'client' ] . chat . completions , 'create' ) . mockRejectedValue ( apiError ) ;
@@ -313,13 +139,12 @@ describe('LobeQwenAI', () => {
313
139
try {
314
140
await instance . chat ( {
315
141
messages : [ { content : 'Hello' , role : 'user' } ] ,
316
- model : 'qwen-turbo' ,
142
+ model : 'qwen-turbo-latest ' ,
317
143
temperature : 0.999 ,
318
144
} ) ;
319
145
} catch ( e ) {
320
146
expect ( e ) . toEqual ( {
321
- /* Desensitizing is unnecessary for a public-accessible gateway endpoint. */
322
- endpoint : defaultBaseURL ,
147
+ endpoint : 'https://api.***.com/v1' ,
323
148
error : {
324
149
cause : { message : 'api is undefined' } ,
325
150
stack : 'abc' ,
@@ -339,7 +164,7 @@ describe('LobeQwenAI', () => {
339
164
try {
340
165
await instance . chat ( {
341
166
messages : [ { content : 'Hello' , role : 'user' } ] ,
342
- model : 'qwen-turbo' ,
167
+ model : 'qwen-turbo-latest ' ,
343
168
temperature : 0.999 ,
344
169
} ) ;
345
170
} catch ( e ) {
@@ -362,7 +187,7 @@ describe('LobeQwenAI', () => {
362
187
try {
363
188
await instance . chat ( {
364
189
messages : [ { content : 'Hello' , role : 'user' } ] ,
365
- model : 'qwen-turbo' ,
190
+ model : 'qwen-turbo-latest ' ,
366
191
temperature : 0.999 ,
367
192
} ) ;
368
193
} catch ( e ) {
@@ -410,7 +235,7 @@ describe('LobeQwenAI', () => {
410
235
// 假设的测试函数调用,你可能需要根据实际情况调整
411
236
await instance . chat ( {
412
237
messages : [ { content : 'Hello' , role : 'user' } ] ,
413
- model : 'qwen-turbo' ,
238
+ model : 'qwen-turbo-latest ' ,
414
239
stream : true ,
415
240
temperature : 0.999 ,
416
241
} ) ;
0 commit comments