Merge pull request #4059 from janhq/fix/remote-models-error-handling

chore: remote models error handling
This commit is contained in:
Louis 2024-11-20 23:09:45 +07:00 committed by GitHub
commit 3602483f6c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 15 additions and 13 deletions

View File

@ -38,7 +38,7 @@ export function requestInference(
errorCode = ErrorCode.InvalidApiKey errorCode = ErrorCode.InvalidApiKey
} }
const error = { const error = {
message: data.error?.message ?? 'Error occurred.', message: data.error?.message ?? data.message ?? 'Error occurred.',
code: errorCode, code: errorCode,
} }
subscriber.error(error) subscriber.error(error)

View File

@ -113,6 +113,8 @@ export default class JanInferenceCohereExtension extends RemoteOAIEngine {
} }
transformResponse = (data: any) => { transformResponse = (data: any) => {
return typeof data === 'object' ? data.text : JSON.parse(data).text ?? '' return typeof data === 'object'
? data.text
: (JSON.parse(data.replace('data: ', '').trim()).text ?? '')
} }
} }

View File

@ -1,7 +1,7 @@
{ {
"name": "@janhq/inference-openai-extension", "name": "@janhq/inference-openai-extension",
"productName": "OpenAI Inference Engine", "productName": "OpenAI Inference Engine",
"version": "1.0.3", "version": "1.0.4",
"description": "This extension enables OpenAI chat completion API calls", "description": "This extension enables OpenAI chat completion API calls",
"main": "dist/index.js", "main": "dist/index.js",
"module": "dist/module.js", "module": "dist/module.js",

View File

@ -99,6 +99,7 @@
"parameters": { "parameters": {
"temperature": 1, "temperature": 1,
"top_p": 1, "top_p": 1,
"stream": true,
"max_tokens": 32768, "max_tokens": 32768,
"frequency_penalty": 0, "frequency_penalty": 0,
"presence_penalty": 0 "presence_penalty": 0
@ -126,6 +127,7 @@
"temperature": 1, "temperature": 1,
"top_p": 1, "top_p": 1,
"max_tokens": 65536, "max_tokens": 65536,
"stream": true,
"frequency_penalty": 0, "frequency_penalty": 0,
"presence_penalty": 0 "presence_penalty": 0
}, },

View File

@ -80,7 +80,6 @@ export default class JanInferenceOpenAIExtension extends RemoteOAIEngine {
return { return {
...params, ...params,
max_completion_tokens: max_tokens, max_completion_tokens: max_tokens,
stream: false, // o1 only support stream = false
} }
} }
// Pass through for non-preview models // Pass through for non-preview models

View File

@ -63,9 +63,6 @@ describe('ErrorMessage Component', () => {
render(<ErrorMessage message={message} />) render(<ErrorMessage message={message} />)
expect(
screen.getByText('Apologies, somethings amiss!')
).toBeInTheDocument()
expect(screen.getByText('troubleshooting assistance')).toBeInTheDocument() expect(screen.getByText('troubleshooting assistance')).toBeInTheDocument()
}) })

View File

@ -27,8 +27,6 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
const getErrorTitle = () => { const getErrorTitle = () => {
switch (message.error_code) { switch (message.error_code) {
case ErrorCode.Unknown:
return 'Apologies, somethings amiss!'
case ErrorCode.InvalidApiKey: case ErrorCode.InvalidApiKey:
case ErrorCode.AuthenticationError: case ErrorCode.AuthenticationError:
case ErrorCode.InvalidRequestError: case ErrorCode.InvalidRequestError:
@ -55,17 +53,17 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
) )
default: default:
return ( return (
<> <p>
{message.content[0]?.text?.value && ( {message.content[0]?.text?.value && (
<AutoLink text={message.content[0].text.value} /> <AutoLink text={message.content[0].text.value} />
)} )}
</> </p>
) )
} }
} }
return ( return (
<div className="mt-10"> <div className="mx-auto mt-10 max-w-[700px]">
{message.status === MessageStatus.Error && ( {message.status === MessageStatus.Error && (
<div <div
key={message.id} key={message.id}

View File

@ -180,7 +180,11 @@ export default function EventHandler({ children }: { children: ReactNode }) {
setIsGeneratingResponse(false) setIsGeneratingResponse(false)
} }
return return
} else if (message.status === MessageStatus.Error) { } else if (
message.status === MessageStatus.Error &&
activeModelRef.current?.engine &&
isLocalEngine(activeModelRef.current.engine)
) {
;(async () => { ;(async () => {
if ( if (
!(await extensionManager !(await extensionManager