parent
886b1cbc54
commit
dbc4bed40f
@ -236,6 +236,47 @@ describe('builder helper functions', () => {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
it('should return the error on status not ok', async () => {
|
||||||
|
const request = { body: { model: 'model1' } }
|
||||||
|
const mockSend = jest.fn()
|
||||||
|
const reply = {
|
||||||
|
code: jest.fn().mockReturnThis(),
|
||||||
|
send: jest.fn(),
|
||||||
|
headers: jest.fn().mockReturnValue({
|
||||||
|
send: mockSend,
|
||||||
|
}),
|
||||||
|
raw: {
|
||||||
|
writeHead: jest.fn(),
|
||||||
|
pipe: jest.fn(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
;(existsSync as jest.Mock).mockReturnValue(true)
|
||||||
|
;(readdirSync as jest.Mock).mockReturnValue(['file1'])
|
||||||
|
;(readFileSync as jest.Mock).mockReturnValue(
|
||||||
|
JSON.stringify({ id: 'model1', engine: 'openai' })
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mock fetch
|
||||||
|
const fetch = require('node-fetch')
|
||||||
|
fetch.mockResolvedValue({
|
||||||
|
status: 400,
|
||||||
|
headers: new Map([
|
||||||
|
['content-type', 'application/json'],
|
||||||
|
['x-request-id', '123456'],
|
||||||
|
]),
|
||||||
|
body: { pipe: jest.fn() },
|
||||||
|
text: jest.fn().mockResolvedValue({ error: 'Mock error response' }),
|
||||||
|
})
|
||||||
|
await chatCompletions(request, reply)
|
||||||
|
expect(reply.code).toHaveBeenCalledWith(400)
|
||||||
|
expect(mockSend).toHaveBeenCalledWith(
|
||||||
|
expect.objectContaining({
|
||||||
|
error: 'Mock error response',
|
||||||
|
})
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
it('should return the chat completions', async () => {
|
it('should return the chat completions', async () => {
|
||||||
const request = { body: { model: 'model1' } }
|
const request = { body: { model: 'model1' } }
|
||||||
const reply = {
|
const reply = {
|
||||||
|
|||||||
@ -353,8 +353,10 @@ export const chatCompletions = async (request: any, reply: any) => {
|
|||||||
body: JSON.stringify(request.body),
|
body: JSON.stringify(request.body),
|
||||||
})
|
})
|
||||||
if (response.status !== 200) {
|
if (response.status !== 200) {
|
||||||
console.error(response)
|
// Forward the error response to client via reply
|
||||||
reply.code(400).send(response)
|
const responseBody = await response.text()
|
||||||
|
const responseHeaders = Object.fromEntries(response.headers)
|
||||||
|
reply.code(response.status).headers(responseHeaders).send(responseBody)
|
||||||
} else {
|
} else {
|
||||||
reply.raw.writeHead(200, {
|
reply.raw.writeHead(200, {
|
||||||
'Content-Type': request.body.stream === true ? 'text/event-stream' : 'application/json',
|
'Content-Type': request.body.stream === true ? 'text/event-stream' : 'application/json',
|
||||||
|
|||||||
@ -67,6 +67,11 @@ export const startServer = async (configs?: ServerConfig): Promise<boolean> => {
|
|||||||
// Initialize Fastify server with logging
|
// Initialize Fastify server with logging
|
||||||
server = fastify({
|
server = fastify({
|
||||||
logger: new Logger(),
|
logger: new Logger(),
|
||||||
|
// Set body limit to 100MB - Default is 1MB
|
||||||
|
// According to OpenAI - a batch input file can be up to 100 MB in size
|
||||||
|
// Whisper endpoints accept up to 25MB
|
||||||
|
// Vision endpoints accept up to 4MB
|
||||||
|
bodyLimit: 104_857_600
|
||||||
})
|
})
|
||||||
|
|
||||||
// Register CORS if enabled
|
// Register CORS if enabled
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user