diff --git a/extensions/inference-nitro-extension/src/index.ts b/extensions/inference-nitro-extension/src/index.ts index cb9b88bed..979b4cfac 100644 --- a/extensions/inference-nitro-extension/src/index.ts +++ b/extensions/inference-nitro-extension/src/index.ts @@ -154,7 +154,10 @@ export default class JanInferenceNitroExtension extends InferenceExtension { }) if (nitroInitResult?.error) { - events.emit(ModelEvent.OnModelFail, model) + events.emit(ModelEvent.OnModelFail, { + ...model, + error: nitroInitResult.error, + }) return } diff --git a/extensions/inference-nitro-extension/src/node/index.ts b/extensions/inference-nitro-extension/src/node/index.ts index 25f571c81..32a12cf8a 100644 --- a/extensions/inference-nitro-extension/src/node/index.ts +++ b/extensions/inference-nitro-extension/src/node/index.ts @@ -310,9 +310,15 @@ async function killSubprocess(): Promise { subprocess?.kill() subprocess = undefined }) - .catch(() => {}) + .catch(() => {}) // Do nothing with this attempt .then(() => tcpPortUsed.waitUntilFree(PORT, 300, 5000)) .then(() => log(`[NITRO]::Debug: Nitro process is terminated`)) + .catch((err) => { + log( + `[NITRO]::Debug: Could not kill running process on port ${PORT}. Might be another process running on the same port? ${err}` + ) + throw 'PORT_NOT_AVAILABLE' + }) } /** diff --git a/web/containers/Providers/EventHandler.tsx b/web/containers/Providers/EventHandler.tsx index e1e0ff2a8..102fa5f1c 100644 --- a/web/containers/Providers/EventHandler.tsx +++ b/web/containers/Providers/EventHandler.tsx @@ -114,8 +114,8 @@ export default function EventHandler({ children }: { children: ReactNode }) { const onModelInitFailed = useCallback( (res: any) => { - const errorMessage = `${res.error}` - console.error('Failed to load model: ' + errorMessage) + const errorMessage = res?.error ?? res + console.error('Failed to load model: ', errorMessage) setStateModel(() => ({ state: 'start', loading: false, diff --git a/web/screens/Chat/ErrorMessage/index.tsx b/web/screens/Chat/ErrorMessage/index.tsx index 56ea4847c..5aa0cd6ce 100644 --- a/web/screens/Chat/ErrorMessage/index.tsx +++ b/web/screens/Chat/ErrorMessage/index.tsx @@ -7,6 +7,7 @@ import ModalTroubleShooting, { modalTroubleShootingAtom, } from '@/containers/ModalTroubleShoot' +import { loadModelErrorAtom } from '@/hooks/useActiveModel' import useSendChatMessage from '@/hooks/useSendChatMessage' import { getCurrentChatMessagesAtom } from '@/helpers/atoms/ChatMessage.atom' @@ -15,6 +16,8 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => { const messages = useAtomValue(getCurrentChatMessagesAtom) const { resendChatMessage } = useSendChatMessage() const setModalTroubleShooting = useSetAtom(modalTroubleShootingAtom) + const loadModelError = useAtomValue(loadModelErrorAtom) + const PORT_NOT_AVAILABLE = 'PORT_NOT_AVAILABLE' const regenerateMessage = async () => { const lastMessageIndex = messages.length - 1 @@ -23,9 +26,9 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => { } return ( - <> +
{message.status === MessageStatus.Stopped && ( -
+
Oops! The generation was interrupted. Let's give it another go! @@ -41,25 +44,47 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
)} {message.status === MessageStatus.Error && ( -
-

{`Apologies, something’s amiss!`}

-

- Jan’s in beta. Access  - setModalTroubleShooting(true)} + <> + {loadModelError === PORT_NOT_AVAILABLE ? ( +

- troubleshooting assistance - -  now. -

- -
+

+ Port 3928 is currently unavailable. Check for conflicting apps, + or access  + setModalTroubleShooting(true)} + > + troubleshooting assistance + +  for further support. +

+ +
+ ) : ( +
+

{`Apologies, something’s amiss!`}

+

+ Jan’s in beta. Access  + setModalTroubleShooting(true)} + > + troubleshooting assistance + +  now. +

+ +
+ )} + )} - +
) } export default ErrorMessage