Merge pull request #4302 from janhq/dev

Release cut 0.5.12
This commit is contained in:
Louis 2024-12-19 23:30:08 +07:00 committed by GitHub
commit 9603d36a1f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
163 changed files with 8925 additions and 7950 deletions

View File

@ -1,3 +1,12 @@
---
name: Roadmap
about: Plan Roadmap items with subtasks
title: 'roadmap: '
labels: 'type: planning'
assignees: ''
---
## Goal
## Tasklist

View File

@ -70,6 +70,8 @@ jobs:
permissions:
contents: write
steps:
- name: Getting the repo
uses: actions/checkout@v3
- name: Sync temp to latest
run: |
# sync temp-beta to beta by copy files that are different or new

View File

@ -12,6 +12,8 @@ on:
- none
- aws-s3
default: none
pull_request_review:
types: [submitted]
jobs:
set-public-provider:
@ -33,6 +35,9 @@ jobs:
elif [ "${{ github.event_name }}" == "push" ]; then
echo "::set-output name=public_provider::aws-s3"
echo "::set-output name=ref::${{ github.ref }}"
elif [ "${{ github.event_name }}" == "pull_request_review" ]; then
echo "::set-output name=public_provider::none"
echo "::set-output name=ref::${{ github.ref }}"
else
echo "::set-output name=public_provider::none"
echo "::set-output name=ref::${{ github.ref }}"
@ -116,3 +121,24 @@ jobs:
build_reason: Manual
push_to_branch: dev
new_version: ${{ needs.get-update-version.outputs.new_version }}
comment-pr-build-url:
needs: [build-macos, build-windows-x64, build-linux-x64, get-update-version, set-public-provider, sync-temp-to-latest]
runs-on: ubuntu-latest
if: github.event_name == 'pull_request_review'
steps:
- name: Set up GitHub CLI
run: |
curl -sSL https://github.com/cli/cli/releases/download/v2.33.0/gh_2.33.0_linux_amd64.tar.gz | tar xz
sudo cp gh_2.33.0_linux_amd64/bin/gh /usr/local/bin/
- name: Comment build URL on PR
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
PR_URL=${{ github.event.pull_request.html_url }}
RUN_ID=${{ github.run_id }}
COMMENT="This is the build for this pull request. You can download it from the Artifacts section here: [Build URL](https://github.com/${{ github.repository }}/actions/runs/${RUN_ID})."
gh pr comment $PR_URL --body "$COMMENT"

53
.github/workflows/publish-npm-core.yml vendored Normal file
View File

@ -0,0 +1,53 @@
name: Publish plugin models Package to npmjs
on:
push:
tags: ["v[0-9]+.[0-9]+.[0-9]+-core"]
paths: ["core/**"]
pull_request:
paths: ["core/**"]
jobs:
build-and-publish-plugins:
environment: production
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: "0"
token: ${{ secrets.PAT_SERVICE_ACCOUNT }}
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Extract tag name without v prefix
id: get_version
run: echo "VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_ENV && echo "::set-output name=version::${GITHUB_REF#refs/tags/v}"
env:
GITHUB_REF: ${{ github.ref }}
- name: "Get Semantic Version from tag"
if: github.event_name == 'push'
run: |
# Get the tag from the event
tag=${GITHUB_REF#refs/tags/v}
# remove the -core suffix
new_version=$(echo $tag | sed -n 's/-core//p')
echo $new_version
# Replace the old version with the new version in package.json
jq --arg version "$new_version" '.version = $version' core/package.json > /tmp/package.json && mv /tmp/package.json core/package.json
# Print the new version
echo "Updated package.json version to: $new_version"
cat core/package.json
# Setup .npmrc file to publish to npm
- uses: actions/setup-node@v3
with:
node-version: "20.x"
registry-url: "https://registry.npmjs.org"
- run: cd core && yarn install && yarn build
- run: cd core && yarn publish --access public
if: github.event_name == 'push'
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}

53
.github/workflows/publish-npm-joi.yml vendored Normal file
View File

@ -0,0 +1,53 @@
name: Publish plugin models Package to npmjs
on:
push:
tags: ["v[0-9]+.[0-9]+.[0-9]+-joi"]
paths: ["joi/**"]
pull_request:
paths: ["joi/**"]
jobs:
build-and-publish-plugins:
environment: production
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: "0"
token: ${{ secrets.PAT_SERVICE_ACCOUNT }}
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Extract tag name without v prefix
id: get_version
run: echo "VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_ENV && echo "::set-output name=version::${GITHUB_REF#refs/tags/v}"
env:
GITHUB_REF: ${{ github.ref }}
- name: "Get Semantic Version from tag"
if: github.event_name == 'push'
run: |
# Get the tag from the event
tag=${GITHUB_REF#refs/tags/v}
# remove the -joi suffix
new_version=$(echo $tag | sed -n 's/-joi//p')
echo $new_version
# Replace the old version with the new version in package.json
jq --arg version "$new_version" '.version = $version' joi/package.json > /tmp/package.json && mv /tmp/package.json joi/package.json
# Print the new version
echo "Updated package.json version to: $new_version"
cat joi/package.json
# Setup .npmrc file to publish to npm
- uses: actions/setup-node@v3
with:
node-version: "20.x"
registry-url: "https://registry.npmjs.org"
- run: cd joi && yarn install && yarn build
- run: cd joi && yarn publish --access public
if: github.event_name == 'push'
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}

View File

@ -111,8 +111,10 @@ jobs:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_EC2_METADATA_DISABLED: "true"
AWS_MAX_ATTEMPTS: "5"
AWS_EC2_METADATA_DISABLED: 'true'
AWS_MAX_ATTEMPTS: '5'
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Build and publish app to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == false
@ -122,6 +124,8 @@ jobs:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ANALYTICS_ID: ${{ secrets.JAN_APP_UMAMI_PROJECT_API_KEY }}
ANALYTICS_HOST: ${{ secrets.JAN_APP_UMAMI_URL }}
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Build and publish app to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == true
@ -131,8 +135,10 @@ jobs:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_EC2_METADATA_DISABLED: "true"
AWS_MAX_ATTEMPTS: "5"
AWS_EC2_METADATA_DISABLED: 'true'
AWS_MAX_ATTEMPTS: '5'
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Upload Artifact .deb file
if: inputs.public_provider != 'github'

View File

@ -140,18 +140,20 @@ jobs:
fi
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CSC_LINK: "/tmp/codesign.p12"
CSC_LINK: '/tmp/codesign.p12'
CSC_KEY_PASSWORD: ${{ secrets.CODE_SIGN_P12_PASSWORD }}
CSC_IDENTITY_AUTO_DISCOVERY: "true"
CSC_IDENTITY_AUTO_DISCOVERY: 'true'
APPLE_ID: ${{ secrets.APPLE_ID }}
APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
APP_PATH: "."
APP_PATH: '.'
DEVELOPER_ID: ${{ secrets.DEVELOPER_ID }}
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: auto
AWS_EC2_METADATA_DISABLED: "true"
AWS_MAX_ATTEMPTS: "5"
AWS_EC2_METADATA_DISABLED: 'true'
AWS_MAX_ATTEMPTS: '5'
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Build and publish app to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == false
@ -159,15 +161,17 @@ jobs:
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CSC_LINK: "/tmp/codesign.p12"
CSC_LINK: '/tmp/codesign.p12'
CSC_KEY_PASSWORD: ${{ secrets.CODE_SIGN_P12_PASSWORD }}
CSC_IDENTITY_AUTO_DISCOVERY: "true"
CSC_IDENTITY_AUTO_DISCOVERY: 'true'
APPLE_ID: ${{ secrets.APPLE_ID }}
APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
APP_PATH: "."
APP_PATH: '.'
DEVELOPER_ID: ${{ secrets.DEVELOPER_ID }}
ANALYTICS_ID: ${{ secrets.JAN_APP_UMAMI_PROJECT_API_KEY }}
ANALYTICS_HOST: ${{ secrets.JAN_APP_UMAMI_URL }}
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Build and publish app to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == true
@ -175,18 +179,20 @@ jobs:
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CSC_LINK: "/tmp/codesign.p12"
CSC_LINK: '/tmp/codesign.p12'
CSC_KEY_PASSWORD: ${{ secrets.CODE_SIGN_P12_PASSWORD }}
CSC_IDENTITY_AUTO_DISCOVERY: "true"
CSC_IDENTITY_AUTO_DISCOVERY: 'true'
APPLE_ID: ${{ secrets.APPLE_ID }}
APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
APP_PATH: "."
APP_PATH: '.'
DEVELOPER_ID: ${{ secrets.DEVELOPER_ID }}
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: auto
AWS_EC2_METADATA_DISABLED: "true"
AWS_MAX_ATTEMPTS: "5"
AWS_EC2_METADATA_DISABLED: 'true'
AWS_MAX_ATTEMPTS: '5'
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Upload Artifact
if: inputs.public_provider != 'github'

View File

@ -149,8 +149,10 @@ jobs:
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: auto
AWS_EC2_METADATA_DISABLED: "true"
AWS_MAX_ATTEMPTS: "5"
AWS_EC2_METADATA_DISABLED: 'true'
AWS_MAX_ATTEMPTS: '5'
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Build app and publish app to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == false
@ -165,6 +167,8 @@ jobs:
AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
AZURE_CERT_NAME: homebrewltd
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Build app and publish app to github
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && inputs.public_provider == 'github' && inputs.beta == true
@ -175,14 +179,16 @@ jobs:
AWS_ACCESS_KEY_ID: ${{ secrets.DELTA_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.DELTA_AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: auto
AWS_EC2_METADATA_DISABLED: "true"
AWS_MAX_ATTEMPTS: "5"
AWS_EC2_METADATA_DISABLED: 'true'
AWS_MAX_ATTEMPTS: '5'
AZURE_KEY_VAULT_URI: ${{ secrets.AZURE_KEY_VAULT_URI }}
AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }}
# AZURE_CERT_NAME: ${{ secrets.AZURE_CERT_NAME }}
AZURE_CERT_NAME: homebrewltd
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}
- name: Upload Artifact
if: inputs.public_provider != 'github'
@ -190,4 +196,3 @@ jobs:
with:
name: jan-win-x64-${{ inputs.new_version }}
path: ./electron/dist/*.exe

View File

@ -47,7 +47,7 @@ From PCs to multi-GPU clusters, Jan & Cortex supports universal architectures:
<tr style="text-align:center">
<td style="text-align:center"><b>Version Type</b></td>
<td style="text-align:center"><b>Windows</b></td>
<td colspan="2" style="text-align:center"><b>MacOS</b></td>
<td style="text-align:center"><b>MacOS Universal</b></td>
<td colspan="2" style="text-align:center"><b>Linux</b></td>
</tr>
<tr style="text-align:center">
@ -59,15 +59,9 @@ From PCs to multi-GPU clusters, Jan & Cortex supports universal architectures:
</a>
</td>
<td style="text-align:center">
<a href='https://app.jan.ai/download/latest/mac-x64'>
<a href='https://app.jan.ai/download/latest/mac-universal'>
<img src='https://github.com/janhq/jan/blob/dev/docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>Intel</b>
</a>
</td>
<td style="text-align:center">
<a href='https://app.jan.ai/download/latest/mac-arm64'>
<img src='https://github.com/janhq/jan/blob/dev/docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>M1/M2/M3/M4</b>
<b>jan.dmg</b>
</a>
</td>
<td style="text-align:center">
@ -92,15 +86,9 @@ From PCs to multi-GPU clusters, Jan & Cortex supports universal architectures:
</a>
</td>
<td style="text-align:center">
<a href='https://app.jan.ai/download/beta/mac-x64'>
<a href='https://app.jan.ai/download/beta/mac-universal'>
<img src='https://github.com/janhq/jan/blob/dev/docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>Intel</b>
</a>
</td>
<td style="text-align:center">
<a href='https://app.jan.ai/download/beta/mac-arm64'>
<img src='https://github.com/janhq/jan/blob/dev/docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>M1/M2/M3/M4</b>
<b>jan.dmg</b>
</a>
</td>
<td style="text-align:center">
@ -125,15 +113,9 @@ From PCs to multi-GPU clusters, Jan & Cortex supports universal architectures:
</a>
</td>
<td style="text-align:center">
<a href='https://app.jan.ai/download/nightly/mac-x64'>
<a href='https://app.jan.ai/download/nightly/mac-universal'>
<img src='https://github.com/janhq/jan/blob/dev/docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>Intel</b>
</a>
</td>
<td style="text-align:center">
<a href='https://app.jan.ai/download/nightly/mac-arm64'>
<img src='https://github.com/janhq/jan/blob/dev/docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>M1/M2/M3/M4</b>
<b>jan.dmg</b>
</a>
</td>
<td style="text-align:center">

View File

@ -1,4 +1,10 @@
import { Thread, ThreadInterface, ThreadMessage, MessageInterface } from '../../types'
import {
Thread,
ThreadInterface,
ThreadMessage,
MessageInterface,
ThreadAssistantInfo,
} from '../../types'
import { BaseExtension, ExtensionTypeEnum } from '../extension'
/**
@ -17,10 +23,21 @@ export abstract class ConversationalExtension
return ExtensionTypeEnum.Conversational
}
abstract getThreads(): Promise<Thread[]>
abstract saveThread(thread: Thread): Promise<void>
abstract listThreads(): Promise<Thread[]>
abstract createThread(thread: Partial<Thread>): Promise<Thread>
abstract modifyThread(thread: Thread): Promise<void>
abstract deleteThread(threadId: string): Promise<void>
abstract addNewMessage(message: ThreadMessage): Promise<void>
abstract writeMessages(threadId: string, messages: ThreadMessage[]): Promise<void>
abstract getAllMessages(threadId: string): Promise<ThreadMessage[]>
abstract createMessage(message: Partial<ThreadMessage>): Promise<ThreadMessage>
abstract deleteMessage(threadId: string, messageId: string): Promise<void>
abstract listMessages(threadId: string): Promise<ThreadMessage[]>
abstract getThreadAssistant(threadId: string): Promise<ThreadAssistantInfo>
abstract createThreadAssistant(
threadId: string,
assistant: ThreadAssistantInfo
): Promise<ThreadAssistantInfo>
abstract modifyThreadAssistant(
threadId: string,
assistant: ThreadAssistantInfo
): Promise<ThreadAssistantInfo>
abstract modifyMessage(message: ThreadMessage): Promise<ThreadMessage>
}

View File

@ -2,7 +2,6 @@ import { events } from '../../events'
import { BaseExtension } from '../../extension'
import { MessageRequest, Model, ModelEvent } from '../../../types'
import { EngineManager } from './EngineManager'
import { ModelManager } from '../../models/manager'
/**
* Base AIEngine

View File

@ -71,7 +71,7 @@ export abstract class OAIEngine extends AIEngine {
return
}
const timestamp = Date.now()
const timestamp = Date.now() / 1000
const message: ThreadMessage = {
id: ulid(),
thread_id: data.threadId,
@ -80,8 +80,8 @@ export abstract class OAIEngine extends AIEngine {
role: ChatCompletionRole.Assistant,
content: [],
status: MessageStatus.Pending,
created: timestamp,
updated: timestamp,
created_at: timestamp,
completed_at: timestamp,
object: 'thread.message',
}

View File

@ -1,8 +0,0 @@
export interface HttpServer {
post: (route: string, handler: (req: any, res: any) => Promise<any>) => void
get: (route: string, handler: (req: any, res: any) => Promise<any>) => void
patch: (route: string, handler: (req: any, res: any) => Promise<any>) => void
put: (route: string, handler: (req: any, res: any) => Promise<any>) => void
delete: (route: string, handler: (req: any, res: any) => Promise<any>) => void
register: (router: any, opts?: any) => void
}

View File

@ -1,7 +0,0 @@
import * as restfulV1 from './restful/v1';
it('should re-export from restful/v1', () => {
const restfulV1Exports = require('./restful/v1');
expect(restfulV1Exports).toBeDefined();
})

View File

@ -1,3 +1 @@
export * from './HttpServer'
export * from './restful/v1'
export * from './common/handler'

View File

@ -2,7 +2,6 @@ jest.mock('../../helper', () => ({
...jest.requireActual('../../helper'),
getJanDataFolderPath: () => './app',
}))
import { dirname } from 'path'
import { App } from './app'
it('should call stopServer', () => {

View File

@ -3,7 +3,6 @@ import { basename, dirname, isAbsolute, join, relative } from 'path'
import { Processor } from './Processor'
import {
log as writeLog,
appResourcePath,
getAppConfigurations as appConfiguration,
updateAppConfiguration,
normalizeFilePath,
@ -91,8 +90,6 @@ export class App implements Processor {
port: args?.port,
isCorsEnabled: args?.isCorsEnabled,
isVerboseEnabled: args?.isVerboseEnabled,
schemaPath: join(appResourcePath(), 'docs', 'openapi', 'jan.yaml'),
baseDir: join(appResourcePath(), 'docs', 'openapi'),
prefix: args?.prefix,
})
}

View File

@ -1,62 +0,0 @@
import { HttpServer } from '../../HttpServer'
import { DownloadManager } from '../../../helper/download'
describe('downloadRouter', () => {
let app: HttpServer
beforeEach(() => {
app = {
register: jest.fn(),
post: jest.fn(),
get: jest.fn(),
patch: jest.fn(),
put: jest.fn(),
delete: jest.fn(),
}
})
it('should return download progress for a given modelId', async () => {
const modelId = '123'
const downloadProgress = { progress: 50 }
DownloadManager.instance.downloadProgressMap[modelId] = downloadProgress as any
const req = { params: { modelId } }
const res = {
status: jest.fn(),
send: jest.fn(),
}
jest.spyOn(app, 'get').mockImplementation((path, handler) => {
if (path === `/download/getDownloadProgress/${modelId}`) {
res.status(200)
res.send(downloadProgress)
}
})
app.get(`/download/getDownloadProgress/${modelId}`, req as any)
expect(res.status).toHaveBeenCalledWith(200)
expect(res.send).toHaveBeenCalledWith(downloadProgress)
})
it('should return 404 if download progress is not found', async () => {
const modelId = '123'
const req = { params: { modelId } }
const res = {
status: jest.fn(),
send: jest.fn(),
}
jest.spyOn(app, 'get').mockImplementation((path, handler) => {
if (path === `/download/getDownloadProgress/${modelId}`) {
res.status(404)
res.send({ message: 'Download progress not found' })
}
})
app.get(`/download/getDownloadProgress/${modelId}`, req as any)
expect(res.status).toHaveBeenCalledWith(404)
expect(res.send).toHaveBeenCalledWith({ message: 'Download progress not found' })
})
})

View File

@ -1,23 +0,0 @@
import { DownloadRoute } from '../../../../types/api'
import { DownloadManager } from '../../../helper/download'
import { HttpServer } from '../../HttpServer'
export const downloadRouter = async (app: HttpServer) => {
app.get(`/download/${DownloadRoute.getDownloadProgress}/:modelId`, async (req, res) => {
const modelId = req.params.modelId
console.debug(`Getting download progress for model ${modelId}`)
console.debug(
`All Download progress: ${JSON.stringify(DownloadManager.instance.downloadProgressMap)}`
)
// check if null DownloadManager.instance.downloadProgressMap
if (!DownloadManager.instance.downloadProgressMap[modelId]) {
return res.status(404).send({
message: 'Download progress not found',
})
} else {
return res.status(200).send(DownloadManager.instance.downloadProgressMap[modelId])
}
})
}

View File

@ -1,16 +0,0 @@
//
import { jest } from '@jest/globals';
import { HttpServer } from '../../HttpServer';
import { handleRequests } from './handlers';
import { Handler, RequestHandler } from '../../common/handler';
it('should initialize RequestHandler and call handle', () => {
const mockHandle = jest.fn();
jest.spyOn(RequestHandler.prototype, 'handle').mockImplementation(mockHandle);
const mockApp = { post: jest.fn() };
handleRequests(mockApp as unknown as HttpServer);
expect(mockHandle).toHaveBeenCalled();
});

View File

@ -1,13 +0,0 @@
import { HttpServer } from '../../HttpServer'
import { Handler, RequestHandler } from '../../common/handler'
export function handleRequests(app: HttpServer) {
const restWrapper: Handler = (route: string, listener: (...args: any[]) => any) => {
app.post(`/app/${route}`, async (request: any, reply: any) => {
const args = JSON.parse(request.body) as any[]
reply.send(JSON.stringify(await listener(...args)))
})
}
const handler = new RequestHandler(restWrapper)
handler.handle()
}

View File

@ -1,21 +0,0 @@
import { commonRouter } from './common';
import { JanApiRouteConfiguration } from './helper/configuration';
test('commonRouter sets up routes for each key in JanApiRouteConfiguration', async () => {
const mockHttpServer = {
get: jest.fn(),
post: jest.fn(),
patch: jest.fn(),
put: jest.fn(),
delete: jest.fn(),
};
await commonRouter(mockHttpServer as any);
const expectedRoutes = Object.keys(JanApiRouteConfiguration);
expectedRoutes.forEach((key) => {
expect(mockHttpServer.get).toHaveBeenCalledWith(`/${key}`, expect.any(Function));
expect(mockHttpServer.get).toHaveBeenCalledWith(`/${key}/:id`, expect.any(Function));
expect(mockHttpServer.delete).toHaveBeenCalledWith(`/${key}/:id`, expect.any(Function));
});
});

View File

@ -1,82 +0,0 @@
import { HttpServer } from '../HttpServer'
import {
chatCompletions,
downloadModel,
getBuilder,
retrieveBuilder,
createMessage,
createThread,
getMessages,
retrieveMessage,
updateThread,
models,
} from './helper/builder'
import { JanApiRouteConfiguration } from './helper/configuration'
export const commonRouter = async (app: HttpServer) => {
const normalizeData = (data: any) => {
return {
object: 'list',
data,
}
}
// Common Routes
// Read & Delete :: Threads | Models | Assistants
Object.keys(JanApiRouteConfiguration).forEach((key) => {
app.get(`/${key}`, async (_req, _res) => {
if (key.includes('models')) {
return models(_req, _res)
}
return getBuilder(JanApiRouteConfiguration[key]).then(normalizeData)
})
app.get(`/${key}/:id`, async (_req: any, _res: any) => {
if (key.includes('models')) {
return models(_req, _res)
}
return retrieveBuilder(JanApiRouteConfiguration[key], _req.params.id)
})
app.delete(`/${key}/:id`, async (_req: any, _res: any) => {
if (key.includes('models')) {
return models(_req, _res)
}
return retrieveBuilder(JanApiRouteConfiguration[key], _req.params.id)
})
})
// Threads
app.post(`/threads`, async (req, res) => createThread(req.body))
app.get(`/threads/:threadId/messages`, async (req, res) =>
getMessages(req.params.threadId).then(normalizeData)
)
app.get(`/threads/:threadId/messages/:messageId`, async (req, res) =>
retrieveMessage(req.params.threadId, req.params.messageId)
)
app.post(`/threads/:threadId/messages`, async (req, res) =>
createMessage(req.params.threadId as any, req.body as any)
)
app.patch(`/threads/:threadId`, async (request: any) =>
updateThread(request.params.threadId, request.body)
)
// Models
app.get(`/models/download/:modelId`, async (request: any) =>
downloadModel(request.params.modelId, {
ignoreSSL: request.query.ignoreSSL === 'true',
proxy: request.query.proxy,
})
)
app.post(`/models/start`, async (request: any, reply: any) => models(request, reply))
app.post(`/models/stop`, async (request: any, reply: any) => models(request, reply))
// Chat Completion
app.post(`/chat/completions`, async (request: any, reply: any) => chatCompletions(request, reply))
}

View File

@ -1,251 +0,0 @@
import { existsSync, readdirSync, readFileSync, writeFileSync, mkdirSync, appendFileSync } from 'fs'
import {
getBuilder,
retrieveBuilder,
getMessages,
retrieveMessage,
createThread,
updateThread,
createMessage,
downloadModel,
chatCompletions,
} from './builder'
import { RouteConfiguration } from './configuration'
jest.mock('fs')
jest.mock('path')
jest.mock('../../../helper', () => ({
getEngineConfiguration: jest.fn(),
getJanDataFolderPath: jest.fn().mockReturnValue('/mock/path'),
}))
jest.mock('request')
jest.mock('request-progress')
jest.mock('node-fetch')
describe('builder helper functions', () => {
const mockConfiguration: RouteConfiguration = {
dirName: 'mockDir',
metadataFileName: 'metadata.json',
delete: {
object: 'mockObject',
},
}
beforeEach(() => {
jest.clearAllMocks()
})
describe('getBuilder', () => {
it('should return an empty array if directory does not exist', async () => {
;(existsSync as jest.Mock).mockReturnValue(false)
const result = await getBuilder(mockConfiguration)
expect(result).toEqual([])
})
it('should return model data if directory exists', async () => {
;(existsSync as jest.Mock).mockReturnValue(true)
;(readdirSync as jest.Mock).mockReturnValue(['file1'])
;(readFileSync as jest.Mock).mockReturnValue(JSON.stringify({ id: 'model1' }))
const result = await getBuilder(mockConfiguration)
expect(result).toEqual([{ id: 'model1' }])
})
})
describe('retrieveBuilder', () => {
it('should return undefined if no data matches the id', async () => {
;(existsSync as jest.Mock).mockReturnValue(true)
;(readdirSync as jest.Mock).mockReturnValue(['file1'])
;(readFileSync as jest.Mock).mockReturnValue(JSON.stringify({ id: 'model1' }))
const result = await retrieveBuilder(mockConfiguration, 'nonexistentId')
expect(result).toBeUndefined()
})
it('should return the matching data', async () => {
;(existsSync as jest.Mock).mockReturnValue(true)
;(readdirSync as jest.Mock).mockReturnValue(['file1'])
;(readFileSync as jest.Mock).mockReturnValue(JSON.stringify({ id: 'model1' }))
const result = await retrieveBuilder(mockConfiguration, 'model1')
expect(result).toEqual({ id: 'model1' })
})
})
describe('getMessages', () => {
it('should return an empty array if message file does not exist', async () => {
;(existsSync as jest.Mock).mockReturnValue(false)
const result = await getMessages('thread1')
expect(result).toEqual([])
})
it('should return messages if message file exists', async () => {
;(existsSync as jest.Mock).mockReturnValue(true)
;(readdirSync as jest.Mock).mockReturnValue(['messages.jsonl'])
;(readFileSync as jest.Mock).mockReturnValue('{"id":"msg1"}\n{"id":"msg2"}\n')
const result = await getMessages('thread1')
expect(result).toEqual([{ id: 'msg1' }, { id: 'msg2' }])
})
})
describe('retrieveMessage', () => {
it('should return a message if no messages match the id', async () => {
;(existsSync as jest.Mock).mockReturnValue(true)
;(readdirSync as jest.Mock).mockReturnValue(['messages.jsonl'])
;(readFileSync as jest.Mock).mockReturnValue('{"id":"msg1"}\n')
const result = await retrieveMessage('thread1', 'nonexistentId')
expect(result).toEqual({ message: 'Not found' })
})
it('should return the matching message', async () => {
;(existsSync as jest.Mock).mockReturnValue(true)
;(readdirSync as jest.Mock).mockReturnValue(['messages.jsonl'])
;(readFileSync as jest.Mock).mockReturnValue('{"id":"msg1"}\n')
const result = await retrieveMessage('thread1', 'msg1')
expect(result).toEqual({ id: 'msg1' })
})
})
describe('createThread', () => {
it('should return a message if thread has no assistants', async () => {
const result = await createThread({})
expect(result).toEqual({ message: 'Thread must have at least one assistant' })
})
it('should create a thread and return the updated thread', async () => {
;(existsSync as jest.Mock).mockReturnValue(false)
const thread = { assistants: [{ assistant_id: 'assistant1' }] }
const result = await createThread(thread)
expect(mkdirSync).toHaveBeenCalled()
expect(writeFileSync).toHaveBeenCalled()
expect(result.id).toBeDefined()
})
})
describe('updateThread', () => {
it('should return a message if thread is not found', async () => {
;(existsSync as jest.Mock).mockReturnValue(true)
;(readdirSync as jest.Mock).mockReturnValue(['file1'])
;(readFileSync as jest.Mock).mockReturnValue(JSON.stringify({ id: 'model1' }))
const result = await updateThread('nonexistentId', {})
expect(result).toEqual({ message: 'Thread not found' })
})
it('should update the thread and return the updated thread', async () => {
;(existsSync as jest.Mock).mockReturnValue(true)
;(readdirSync as jest.Mock).mockReturnValue(['file1'])
;(readFileSync as jest.Mock).mockReturnValue(JSON.stringify({ id: 'model1' }))
const result = await updateThread('model1', { name: 'updatedName' })
expect(writeFileSync).toHaveBeenCalled()
expect(result.name).toEqual('updatedName')
})
})
describe('createMessage', () => {
it('should create a message and return the created message', async () => {
;(existsSync as jest.Mock).mockReturnValue(false)
const message = { role: 'user', content: 'Hello' }
const result = (await createMessage('thread1', message)) as any
expect(mkdirSync).toHaveBeenCalled()
expect(appendFileSync).toHaveBeenCalled()
expect(result.id).toBeDefined()
})
})
describe('downloadModel', () => {
it('should return a message if model is not found', async () => {
;(existsSync as jest.Mock).mockReturnValue(true)
;(readdirSync as jest.Mock).mockReturnValue(['file1'])
;(readFileSync as jest.Mock).mockReturnValue(JSON.stringify({ id: 'model1' }))
const result = await downloadModel('nonexistentId')
expect(result).toEqual({ message: 'Model not found' })
})
it('should start downloading the model', async () => {
;(existsSync as jest.Mock).mockReturnValue(true)
;(readdirSync as jest.Mock).mockReturnValue(['file1'])
;(readFileSync as jest.Mock).mockReturnValue(
JSON.stringify({ id: 'model1', object: 'model', sources: ['http://example.com'] })
)
const result = await downloadModel('model1')
expect(result).toEqual({ message: 'Starting download model1' })
})
})
describe('chatCompletions', () => {
it('should return the error on status not ok', async () => {
const request = { body: { model: 'model1' } }
const mockSend = jest.fn()
const reply = {
code: jest.fn().mockReturnThis(),
send: jest.fn(),
headers: jest.fn().mockReturnValue({
send: mockSend,
}),
raw: {
writeHead: jest.fn(),
pipe: jest.fn(),
},
}
;(existsSync as jest.Mock).mockReturnValue(true)
;(readdirSync as jest.Mock).mockReturnValue(['file1'])
;(readFileSync as jest.Mock).mockReturnValue(
JSON.stringify({ id: 'model1', engine: 'openai' })
)
// Mock fetch
const fetch = require('node-fetch')
fetch.mockResolvedValue({
status: 400,
headers: new Map([
['content-type', 'application/json'],
['x-request-id', '123456'],
]),
body: { pipe: jest.fn() },
text: jest.fn().mockResolvedValue({ error: 'Mock error response' }),
})
await chatCompletions(request, reply)
expect(reply.code).toHaveBeenCalledWith(400)
expect(mockSend).toHaveBeenCalledWith(
expect.objectContaining({
error: 'Mock error response',
})
)
})
it('should return the chat completions', async () => {
const request = { body: { model: 'model1' } }
const reply = {
code: jest.fn().mockReturnThis(),
send: jest.fn(),
raw: { writeHead: jest.fn(), pipe: jest.fn() },
}
;(existsSync as jest.Mock).mockReturnValue(true)
;(readdirSync as jest.Mock).mockReturnValue(['file1'])
;(readFileSync as jest.Mock).mockReturnValue(
JSON.stringify({ id: 'model1', engine: 'openai' })
)
// Mock fetch
const fetch = require('node-fetch')
fetch.mockResolvedValue({
status: 200,
body: { pipe: jest.fn() },
json: jest.fn().mockResolvedValue({ completions: ['completion1'] }),
})
await chatCompletions(request, reply)
expect(reply.raw.writeHead).toHaveBeenCalledWith(200, expect.any(Object))
})
})
})

View File

@ -1,340 +0,0 @@
import {
existsSync,
readdirSync,
readFileSync,
writeFileSync,
mkdirSync,
appendFileSync,
createWriteStream,
rmdirSync,
} from 'fs'
import { JanApiRouteConfiguration, RouteConfiguration } from './configuration'
import { join } from 'path'
import { ContentType, InferenceEngine, MessageStatus, ThreadMessage } from '../../../../types'
import { getJanDataFolderPath } from '../../../helper'
import { CORTEX_API_URL } from './consts'
// TODO: Refactor these
export const getBuilder = async (configuration: RouteConfiguration) => {
const directoryPath = join(getJanDataFolderPath(), configuration.dirName)
try {
if (!existsSync(directoryPath)) {
console.debug('model folder not found')
return []
}
const files: string[] = readdirSync(directoryPath)
const allDirectories: string[] = []
for (const file of files) {
if (file === '.DS_Store') continue
allDirectories.push(file)
}
const results = allDirectories
.map((dirName) => {
const jsonPath = join(directoryPath, dirName, configuration.metadataFileName)
return readModelMetadata(jsonPath)
})
.filter((data) => !!data)
const modelData = results
.map((result: any) => {
try {
return JSON.parse(result)
} catch (err) {
console.error(err)
}
})
.filter((e: any) => !!e)
return modelData
} catch (err) {
console.error(err)
return []
}
}
const readModelMetadata = (path: string): string | undefined => {
if (existsSync(path)) {
return readFileSync(path, 'utf-8')
} else {
return undefined
}
}
export const retrieveBuilder = async (configuration: RouteConfiguration, id: string) => {
const data = await getBuilder(configuration)
const filteredData = data.filter((d: any) => d.id === id)[0]
if (!filteredData) {
return undefined
}
return filteredData
}
export const getMessages = async (threadId: string): Promise<ThreadMessage[]> => {
const threadDirPath = join(getJanDataFolderPath(), 'threads', threadId)
const messageFile = 'messages.jsonl'
try {
const files: string[] = readdirSync(threadDirPath)
if (!files.includes(messageFile)) {
console.error(`${threadDirPath} not contains message file`)
return []
}
const messageFilePath = join(threadDirPath, messageFile)
if (!existsSync(messageFilePath)) {
console.debug('message file not found')
return []
}
const lines = readFileSync(messageFilePath, 'utf-8')
.toString()
.split('\n')
.filter((line: any) => line !== '')
const messages: ThreadMessage[] = []
lines.forEach((line: string) => {
messages.push(JSON.parse(line) as ThreadMessage)
})
return messages
} catch (err) {
console.error(err)
return []
}
}
export const retrieveMessage = async (threadId: string, messageId: string) => {
const messages = await getMessages(threadId)
const filteredMessages = messages.filter((m) => m.id === messageId)
if (!filteredMessages || filteredMessages.length === 0) {
return {
message: 'Not found',
}
}
return filteredMessages[0]
}
export const createThread = async (thread: any) => {
const threadMetadataFileName = 'thread.json'
// TODO: add validation
if (!thread.assistants || thread.assistants.length === 0) {
return {
message: 'Thread must have at least one assistant',
}
}
const threadId = generateThreadId(thread.assistants[0].assistant_id)
try {
const updatedThread = {
...thread,
id: threadId,
created: Date.now(),
updated: Date.now(),
}
const threadDirPath = join(getJanDataFolderPath(), 'threads', updatedThread.id)
const threadJsonPath = join(threadDirPath, threadMetadataFileName)
if (!existsSync(threadDirPath)) {
mkdirSync(threadDirPath)
}
await writeFileSync(threadJsonPath, JSON.stringify(updatedThread, null, 2))
return updatedThread
} catch (err) {
return {
error: err,
}
}
}
export const updateThread = async (threadId: string, thread: any) => {
const threadMetadataFileName = 'thread.json'
const currentThreadData = await retrieveBuilder(JanApiRouteConfiguration.threads, threadId)
if (!currentThreadData) {
return {
message: 'Thread not found',
}
}
// we don't want to update the id and object
delete thread.id
delete thread.object
const updatedThread = {
...currentThreadData,
...thread,
updated: Date.now(),
}
try {
const threadDirPath = join(getJanDataFolderPath(), 'threads', updatedThread.id)
const threadJsonPath = join(threadDirPath, threadMetadataFileName)
await writeFileSync(threadJsonPath, JSON.stringify(updatedThread, null, 2))
return updatedThread
} catch (err) {
return {
message: err,
}
}
}
const generateThreadId = (assistantId: string) => {
return `${assistantId}_${(Date.now() / 1000).toFixed(0)}`
}
export const createMessage = async (threadId: string, message: any) => {
const threadMessagesFileName = 'messages.jsonl'
try {
const { ulid } = require('ulidx')
const msgId = ulid()
const createdAt = Date.now()
const threadMessage: ThreadMessage = {
id: msgId,
thread_id: threadId,
status: MessageStatus.Ready,
created: createdAt,
updated: createdAt,
object: 'thread.message',
role: message.role,
content: [
{
type: ContentType.Text,
text: {
value: message.content,
annotations: [],
},
},
],
}
const threadDirPath = join(getJanDataFolderPath(), 'threads', threadId)
const threadMessagePath = join(threadDirPath, threadMessagesFileName)
if (!existsSync(threadDirPath)) {
mkdirSync(threadDirPath)
}
appendFileSync(threadMessagePath, JSON.stringify(threadMessage) + '\n')
return threadMessage
} catch (err) {
return {
message: err,
}
}
}
export const downloadModel = async (
modelId: string,
network?: { proxy?: string; ignoreSSL?: boolean }
) => {
const strictSSL = !network?.ignoreSSL
const proxy = network?.proxy?.startsWith('http') ? network.proxy : undefined
const model = await retrieveBuilder(JanApiRouteConfiguration.models, modelId)
if (!model || model.object !== 'model') {
return {
message: 'Model not found',
}
}
const directoryPath = join(getJanDataFolderPath(), 'models', modelId)
if (!existsSync(directoryPath)) {
mkdirSync(directoryPath)
}
// path to model binary
const modelBinaryPath = join(directoryPath, modelId)
const request = require('request')
const progress = require('request-progress')
for (const source of model.sources) {
const rq = request({ url: source, strictSSL, proxy })
progress(rq, {})
?.on('progress', function (state: any) {
console.debug('progress', JSON.stringify(state, null, 2))
})
?.on('error', function (err: Error) {
console.error('error', err)
})
?.on('end', function () {
console.debug('end')
})
.pipe(createWriteStream(modelBinaryPath))
}
return {
message: `Starting download ${modelId}`,
}
}
/**
* Proxy /models to cortex
* @param request
* @param reply
*/
export const models = async (request: any, reply: any) => {
const fetch = require('node-fetch')
const headers: Record<string, any> = {
'Content-Type': 'application/json',
}
const response = await fetch(`${CORTEX_API_URL}/models${request.url.split('/models')[1] ?? ""}`, {
method: request.method,
headers: headers,
body: JSON.stringify(request.body),
})
if (response.status !== 200) {
// Forward the error response to client via reply
const responseBody = await response.text()
const responseHeaders = Object.fromEntries(response.headers)
reply.code(response.status).headers(responseHeaders).send(responseBody)
} else {
reply.raw.writeHead(200, {
'Content-Type': 'application/json',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Access-Control-Allow-Origin': '*',
})
response.body.pipe(reply.raw)
}
}
/**
* Proxy chat completions
* @param request
* @param reply
*/
export const chatCompletions = async (request: any, reply: any) => {
const headers: Record<string, any> = {
'Content-Type': 'application/json',
}
// add engine for new cortex cpp engine
if (request.body.engine === InferenceEngine.nitro) {
request.body.engine = InferenceEngine.cortex_llamacpp
}
const fetch = require('node-fetch')
const response = await fetch(`${CORTEX_API_URL}/chat/completions`, {
method: 'POST',
headers: headers,
body: JSON.stringify(request.body),
})
if (response.status !== 200) {
// Forward the error response to client via reply
const responseBody = await response.text()
const responseHeaders = Object.fromEntries(response.headers)
reply.code(response.status).headers(responseHeaders).send(responseBody)
} else {
reply.raw.writeHead(200, {
'Content-Type': request.body.stream === true ? 'text/event-stream' : 'application/json',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Access-Control-Allow-Origin': '*',
})
response.body.pipe(reply.raw)
}
}

View File

@ -1,24 +0,0 @@
import { JanApiRouteConfiguration } from './configuration'
describe('JanApiRouteConfiguration', () => {
it('should have the correct models configuration', () => {
const modelsConfig = JanApiRouteConfiguration.models;
expect(modelsConfig.dirName).toBe('models');
expect(modelsConfig.metadataFileName).toBe('model.json');
expect(modelsConfig.delete.object).toBe('model');
});
it('should have the correct assistants configuration', () => {
const assistantsConfig = JanApiRouteConfiguration.assistants;
expect(assistantsConfig.dirName).toBe('assistants');
expect(assistantsConfig.metadataFileName).toBe('assistant.json');
expect(assistantsConfig.delete.object).toBe('assistant');
});
it('should have the correct threads configuration', () => {
const threadsConfig = JanApiRouteConfiguration.threads;
expect(threadsConfig.dirName).toBe('threads');
expect(threadsConfig.metadataFileName).toBe('thread.json');
expect(threadsConfig.delete.object).toBe('thread');
});
});

View File

@ -1,31 +0,0 @@
export const JanApiRouteConfiguration: Record<string, RouteConfiguration> = {
models: {
dirName: 'models',
metadataFileName: 'model.json',
delete: {
object: 'model',
},
},
assistants: {
dirName: 'assistants',
metadataFileName: 'assistant.json',
delete: {
object: 'assistant',
},
},
threads: {
dirName: 'threads',
metadataFileName: 'thread.json',
delete: {
object: 'thread',
},
},
}
export type RouteConfiguration = {
dirName: string
metadataFileName: string
delete: {
object: string
}
}

View File

@ -1,5 +0,0 @@
import { CORTEX_DEFAULT_PORT } from './consts'
it('should test CORTEX_DEFAULT_PORT', () => {
expect(CORTEX_DEFAULT_PORT).toBe(39291)
})

View File

@ -1,7 +0,0 @@
export const CORTEX_DEFAULT_PORT = 39291
export const LOCAL_HOST = '127.0.0.1'
export const SUPPORTED_MODEL_FORMAT = '.gguf'
export const CORTEX_API_URL = `http://${LOCAL_HOST}:${CORTEX_DEFAULT_PORT}/v1`

View File

@ -1,16 +0,0 @@
import { v1Router } from './v1';
import { commonRouter } from './common';
test('should define v1Router function', () => {
expect(v1Router).toBeDefined();
});
test('should register commonRouter', () => {
const mockApp = {
register: jest.fn(),
};
v1Router(mockApp as any);
expect(mockApp.register).toHaveBeenCalledWith(commonRouter);
});

View File

@ -1,16 +0,0 @@
import { HttpServer } from '../HttpServer'
import { commonRouter } from './common'
export const v1Router = async (app: HttpServer) => {
// MARK: Public API Routes
app.register(commonRouter)
// MARK: Internal Application Routes
// DEPRECATED: Vulnerability possible issues
// handleRequests(app)
// Expanded route for tracking download progress
// TODO: Replace by Observer Wrapper (ZeroMQ / Vanilla Websocket)
// DEPRECATED: Jan FE Docker deploy is deprecated
// app.register(downloadRouter)
}

View File

@ -1,5 +1,4 @@
import { join, resolve } from 'path'
import { getJanDataFolderPath } from './config'
import { join } from 'path'
/**
* Normalize file path
@ -34,4 +33,5 @@ export function appResourcePath() {
// server
return join(global.core.appPath(), '../../..')
}
}

View File

@ -36,3 +36,10 @@ export type Assistant = {
/** Represents the metadata of the object. */
metadata?: Record<string, unknown>
}
export interface CodeInterpreterTool {
/**
* The type of tool being defined: `code_interpreter`
*/
type: 'code_interpreter'
}

View File

@ -1,3 +1,4 @@
import { CodeInterpreterTool } from '../assistant'
import { ChatCompletionMessage, ChatCompletionRole } from '../inference'
import { ModelInfo } from '../model'
import { Thread } from '../thread'
@ -15,6 +16,10 @@ export type ThreadMessage = {
thread_id: string
/** The assistant id of this thread. **/
assistant_id?: string
/**
* A list of files attached to the message, and the tools they were added to.
*/
attachments?: Array<Attachment> | null
/** The role of the author of this message. **/
role: ChatCompletionRole
/** The content of this message. **/
@ -22,9 +27,9 @@ export type ThreadMessage = {
/** The status of this message. **/
status: MessageStatus
/** The timestamp indicating when this message was created. Represented in Unix time. **/
created: number
created_at: number
/** The timestamp indicating when this message was updated. Represented in Unix time. **/
updated: number
completed_at: number
/** The additional metadata of this message. **/
metadata?: Record<string, unknown>
@ -52,6 +57,11 @@ export type MessageRequest = {
*/
assistantId?: string
/**
* A list of files attached to the message, and the tools they were added to.
*/
attachments: Array<Attachment> | null
/** Messages for constructing a chat completion request **/
messages?: ChatCompletionMessage[]
@ -97,8 +107,7 @@ export enum ErrorCode {
*/
export enum ContentType {
Text = 'text',
Image = 'image',
Pdf = 'pdf',
Image = 'image_url',
}
/**
@ -108,8 +117,15 @@ export enum ContentType {
export type ContentValue = {
value: string
annotations: string[]
name?: string
size?: number
}
/**
* The `ImageContentValue` type defines the shape of a content value object of image type
* @data_transfer_object
*/
export type ImageContentValue = {
detail?: string
url?: string
}
/**
@ -118,5 +134,37 @@ export type ContentValue = {
*/
export type ThreadContent = {
type: ContentType
text: ContentValue
text?: ContentValue
image_url?: ImageContentValue
}
export interface Attachment {
/**
* The ID of the file to attach to the message.
*/
file_id?: string
/**
* The tools to add this file to.
*/
tools?: Array<CodeInterpreterTool | Attachment.AssistantToolsFileSearchTypeOnly>
}
export namespace Attachment {
export interface AssistantToolsFileSearchTypeOnly {
/**
* The type of tool being defined: `file_search`
*/
type: 'file_search'
}
}
/**
* On an incomplete message, details about why the message is incomplete.
*/
export interface IncompleteDetails {
/**
* The reason the message is incomplete.
*/
reason: 'content_filter' | 'max_tokens' | 'run_cancelled' | 'run_expired' | 'run_failed'
}

View File

@ -11,20 +11,20 @@ export interface MessageInterface {
* @param {ThreadMessage} message - The message to be added.
* @returns {Promise<void>} A promise that resolves when the message has been added.
*/
addNewMessage(message: ThreadMessage): Promise<void>
/**
* Writes an array of messages to a specific thread.
* @param {string} threadId - The ID of the thread to write the messages to.
* @param {ThreadMessage[]} messages - The array of messages to be written.
* @returns {Promise<void>} A promise that resolves when the messages have been written.
*/
writeMessages(threadId: string, messages: ThreadMessage[]): Promise<void>
createMessage(message: ThreadMessage): Promise<ThreadMessage>
/**
* Retrieves all messages from a specific thread.
* @param {string} threadId - The ID of the thread to retrieve the messages from.
* @returns {Promise<ThreadMessage[]>} A promise that resolves to an array of messages from the thread.
*/
getAllMessages(threadId: string): Promise<ThreadMessage[]>
listMessages(threadId: string): Promise<ThreadMessage[]>
/**
* Deletes a specific message from a thread.
* @param {string} threadId - The ID of the thread from which the message will be deleted.
* @param {string} messageId - The ID of the message to be deleted.
* @returns {Promise<void>} A promise that resolves when the message has been successfully deleted.
*/
deleteMessage(threadId: string, messageId: string): Promise<void>
}

View File

@ -11,15 +11,23 @@ export interface ThreadInterface {
* @abstract
* @returns {Promise<Thread[]>} A promise that resolves to an array of threads.
*/
getThreads(): Promise<Thread[]>
listThreads(): Promise<Thread[]>
/**
* Saves a thread.
* Create a thread.
* @abstract
* @param {Thread} thread - The thread to save.
* @returns {Promise<void>} A promise that resolves when the thread is saved.
*/
saveThread(thread: Thread): Promise<void>
createThread(thread: Thread): Promise<Thread>
/**
* modify a thread.
* @abstract
* @param {Thread} thread - The thread to save.
* @returns {Promise<void>} A promise that resolves when the thread is saved.
*/
modifyThread(thread: Thread): Promise<void>
/**
* Deletes a thread.

View File

@ -13,7 +13,8 @@
"declarationDir": "dist/types",
"outDir": "dist/lib",
"importHelpers": true,
"types": ["@types/jest"]
"types": ["@types/jest"],
"resolveJsonModule": true
},
"include": ["src"],
"exclude": ["**/*.test.ts"]

View File

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,319 +0,0 @@
---
components:
schemas:
AssistantObject:
type: object
properties:
id:
type: string
description: The identifier of the assistant.
example: asst_abc123
object:
type: string
description: Type of the object, indicating it's an assistant.
default: assistant
version:
type: integer
description: Version number of the assistant.
example: 1
created_at:
type: integer
format: int64
description: Unix timestamp representing the creation time of the assistant.
example: 1698984975
name:
type: string
description: Name of the assistant.
example: Math Tutor
description:
type: string
description: Description of the assistant. Can be null.
example: null
avatar:
type: string
description: URL of the assistant's avatar. Jan-specific property.
example: https://pic.png
models:
type: array
description: List of models associated with the assistant. Jan-specific property.
items:
type: object
properties:
model_id:
type: string
example: model_0
instructions:
type: string
description: A system prompt for the assistant.
example: Be concise
events:
type: object
description: Event subscription settings for the assistant.
properties:
in:
type: array
items:
type: string
out:
type: array
items:
type: string
metadata:
type: object
description: Metadata associated with the assistant.
ListAssistantsResponse: null
CreateAssistantResponse:
type: object
properties:
id:
type: string
description: The identifier of the assistant.
example: asst_abc123
object:
type: string
description: Type of the object, indicating it's an assistant.
default: assistant
version:
type: integer
description: Version number of the assistant.
example: 1
created_at:
type: integer
format: int64
description: Unix timestamp representing the creation time of the assistant.
example: 1698984975
name:
type: string
description: Name of the assistant.
example: Math Tutor
description:
type: string
description: Description of the assistant. Can be null.
example: null
avatar:
type: string
description: URL of the assistant's avatar. Jan-specific property.
example: https://pic.png
models:
type: array
description: List of models associated with the assistant. Jan-specific property.
items:
type: object
properties:
model_id:
type: string
example: model_0
instructions:
type: string
description: A system prompt for the assistant.
example: Be concise
events:
type: object
description: Event subscription settings for the assistant.
properties:
in:
type: array
items:
type: string
out:
type: array
items:
type: string
metadata:
type: object
description: Metadata associated with the assistant.
RetrieveAssistantResponse:
type: object
properties:
id:
type: string
description: The identifier of the assistant.
example: asst_abc123
object:
type: string
description: Type of the object, indicating it's an assistant.
default: assistant
version:
type: integer
description: Version number of the assistant.
example: 1
created_at:
type: integer
format: int64
description: Unix timestamp representing the creation time of the assistant.
example: 1698984975
name:
type: string
description: Name of the assistant.
example: Math Tutor
description:
type: string
description: Description of the assistant. Can be null.
example: null
avatar:
type: string
description: URL of the assistant's avatar. Jan-specific property.
example: https://pic.png
models:
type: array
description: List of models associated with the assistant. Jan-specific property.
items:
type: object
properties:
model_id:
type: string
example: model_0
instructions:
type: string
description: A system prompt for the assistant.
example: Be concise
events:
type: object
description: Event subscription settings for the assistant.
properties:
in:
type: array
items:
type: string
out:
type: array
items:
type: string
metadata:
type: object
description: Metadata associated with the assistant.
ModifyAssistantObject:
type: object
properties:
id:
type: string
description: The identifier of the assistant.
example: asst_abc123
object:
type: string
description: Type of the object, indicating it's an assistant.
default: assistant
version:
type: integer
description: Version number of the assistant.
example: 1
created_at:
type: integer
format: int64
description: Unix timestamp representing the creation time of the assistant.
example: 1698984975
name:
type: string
description: Name of the assistant.
example: Math Tutor
description:
type: string
description: Description of the assistant. Can be null.
example: null
avatar:
type: string
description: URL of the assistant's avatar. Jan-specific property.
example: https://pic.png
models:
type: array
description: List of models associated with the assistant. Jan-specific property.
items:
type: object
properties:
model_id:
type: string
example: model_0
instructions:
type: string
description: A system prompt for the assistant.
example: Be concise
events:
type: object
description: Event subscription settings for the assistant.
properties:
in:
type: array
items:
type: string
out:
type: array
items:
type: string
metadata:
type: object
description: Metadata associated with the assistant.
ModifyAssistantResponse:
type: object
properties:
id:
type: string
description: The identifier of the assistant.
example: asst_abc123
object:
type: string
description: Type of the object, indicating it's an assistant.
default: assistant
version:
type: integer
description: Version number of the assistant.
example: 1
created_at:
type: integer
format: int64
description: Unix timestamp representing the creation time of the assistant.
example: 1698984975
name:
type: string
description: Name of the assistant.
example: Physics Tutor
description:
type: string
description: Description of the assistant. Can be null.
example: null
avatar:
type: string
description: URL of the assistant's avatar. Jan-specific property.
example: https://pic.png
models:
type: array
description: List of models associated with the assistant. Jan-specific property.
items:
type: object
properties:
model_id:
type: string
example: model_0
instructions:
type: string
description: A system prompt for the assistant.
example: Be concise!
events:
type: object
description: Event subscription settings for the assistant.
properties:
in:
type: array
items:
type: string
out:
type: array
items:
type: string
metadata:
type: object
description: Metadata associated with the assistant.
DeleteAssistantResponse:
type: object
properties:
id:
type: string
description: The identifier of the deleted assistant.
example: asst_abc123
object:
type: string
description: Type of the object, indicating the assistant has been deleted.
example: assistant.deleted
deleted:
type: boolean
description: Indicates whether the assistant was successfully deleted.
example: true

View File

@ -1,196 +0,0 @@
---
components:
schemas:
ChatObject:
type: object
properties:
messages:
type: arrays
description: |
Contains input data or prompts for the model to process.
example:
- content: 'Hello there :wave:'
role: assistant
- content: Can you write a long story
role: user
stream:
type: boolean
default: true
description:
Enables continuous output generation, allowing for streaming of
model responses.
model:
type: string
example: gpt-3.5-turbo
description: Specifies the model being used for inference or processing tasks.
max_tokens:
type: number
default: 2048
description:
The maximum number of tokens the model will generate in a single
response.
stop:
type: arrays
example:
- hello
description:
Defines specific tokens or phrases at which the model will stop
generating further output/
frequency_penalty:
type: number
default: 0
description:
Adjusts the likelihood of the model repeating words or phrases in
its output.
presence_penalty:
type: number
default: 0
description:
Influences the generation of new and varied concepts in the model's
output.
temperature:
type: number
default: 0.7
min: 0
max: 1
description: Controls the randomness of the model's output.
top_p:
type: number
default: 0.95
min: 0
max: 1
description: Set probability threshold for more relevant outputs.
cache_prompt:
type: boolean
default: true
description: Optimize performance in repeated or similar requests.
ChatCompletionRequest:
type: object
properties:
messages:
type: arrays
description: |
Contains input data or prompts for the model to process.
example:
- content: You are a helpful assistant.
role: system
- content: Hello!
role: user
model:
type: string
example: tinyllama-1.1b
description: |
Specifies the model being used for inference or processing tasks.
stream:
type: boolean
default: true
description: >
Enables continuous output generation, allowing for streaming of
model responses.
max_tokens:
type: number
default: 2048
description: >
The maximum number of tokens the model will generate in a single
response.
stop:
type: arrays
example:
- hello
description: >
Defines specific tokens or phrases at which the model will stop
generating further output.
frequency_penalty:
type: number
default: 0
description: >
Adjusts the likelihood of the model repeating words or phrases in
its output.
presence_penalty:
type: number
default: 0
description: >
Influences the generation of new and varied concepts in the model's
output.
temperature:
type: number
default: 0.7
min: 0
max: 1
description: |
Controls the randomness of the model's output.
top_p:
type: number
default: 0.95
min: 0
max: 1
description: |
Set probability threshold for more relevant outputs.
ChatCompletionResponse:
type: object
description: Description of the response structure
properties:
choices:
type: array
description: Array of choice objects
items:
type: object
properties:
finish_reason:
type: string
nullable: true
example: null
description: Reason for finishing the response, if applicable
index:
type: integer
example: 0
description: Index of the choice
message:
type: object
properties:
content:
type: string
example: Hello user. What can I help you with?
description: Content of the message
role:
type: string
example: assistant
description: Role of the sender
created:
type: integer
example: 1700193928
description: Timestamp of when the response was created
id:
type: string
example: ebwd2niJvJB1Q2Whyvkz
description: Unique identifier of the response
model:
type: string
nullable: true
example: _
description: Model used for generating the response
object:
type: string
example: chat.completion
description: Type of the response object
system_fingerprint:
type: string
nullable: true
example: _
description: System fingerprint
usage:
type: object
description: Information about the usage of tokens
properties:
completion_tokens:
type: integer
example: 500
description: Number of tokens used for completion
prompt_tokens:
type: integer
example: 33
description: Number of tokens used in the prompt
total_tokens:
type: integer
example: 533
description: Total number of tokens used

View File

@ -1,313 +0,0 @@
---
components:
schemas:
MessageObject:
type: object
properties:
id:
type: string
description: |
Sequential or UUID identifier of the message.
example: 0
object:
type: string
description: |
Type of the object, defaults to 'thread.message'.
example: thread.message
created_at:
type: integer
format: int64
description: |
Unix timestamp representing the creation time of the message.
thread_id:
type: string
description: >
Identifier of the thread to which this message belongs. Defaults to
parent thread.
example: thread_asdf
assistant_id:
type: string
description: >
Identifier of the assistant involved in the message. Defaults to
parent thread.
example: jan
role:
type: string
enum:
- user
- assistant
description: |
Role of the sender, either 'user' or 'assistant'.
content:
type: array
items:
type: object
properties:
type:
type: string
description: |
Type of content, e.g., 'text'.
text:
type: object
properties:
value:
type: string
description: |
Text content of the message.
example: Hi!?
annotations:
type: array
items:
type: string
description: |
Annotations for the text content, if any.
example: []
metadata:
type: object
description: |
Metadata associated with the message, defaults to an empty object.
example: {}
GetMessageResponse:
type: object
properties:
id:
type: string
description: The identifier of the message.
example: msg_abc123
object:
type: string
description: Type of the object, indicating it's a thread message.
default: thread.message
created_at:
type: integer
format: int64
description: Unix timestamp representing the creation time of the message.
example: 1699017614
thread_id:
type: string
description: Identifier of the thread to which this message belongs.
example: thread_abc123
role:
type: string
description: Role of the sender, either 'user' or 'assistant'.
example: user
content:
type: array
items:
type: object
properties:
type:
type: string
description: Type of content, e.g., 'text'.
example: text
text:
type: object
properties:
value:
type: string
description: Text content of the message.
example: How does AI work? Explain it in simple terms.
annotations:
type: array
items:
type: string
description: Annotations for the text content, if any.
example: []
file_ids:
type: array
items:
type: string
description: Array of file IDs associated with the message, if any.
example: []
assistant_id:
type: string
description: Identifier of the assistant involved in the message, if applicable.
example: null
run_id:
type: string
description: Run ID associated with the message, if applicable.
example: null
metadata:
type: object
description: Metadata associated with the message.
example: {}
CreateMessageResponse:
type: object
properties:
id:
type: string
description: The identifier of the created message.
example: msg_abc123
object:
type: string
description: Type of the object, indicating it's a thread message.
example: thread.message
created_at:
type: integer
format: int64
description: Unix timestamp representing the creation time of the message.
example: 1699017614
thread_id:
type: string
description: Identifier of the thread to which this message belongs.
example: thread_abc123
role:
type: string
description: Role of the sender, either 'user' or 'assistant'.
example: user
content:
type: array
items:
type: object
properties:
type:
type: string
description: Type of content, e.g., 'text'.
example: text
text:
type: object
properties:
value:
type: string
description: Text content of the message.
example: How does AI work? Explain it in simple terms.
annotations:
type: array
items:
type: string
description: Annotations for the text content, if any.
example: []
file_ids:
type: array
items:
type: string
description: Array of file IDs associated with the message, if any.
example: []
assistant_id:
type: string
description: Identifier of the assistant involved in the message, if applicable.
example: null
run_id:
type: string
description: Run ID associated with the message, if applicable.
example: null
metadata:
type: object
description: Metadata associated with the message.
example: {}
ListMessagesResponse:
type: object
properties:
object:
type: string
description: Type of the object, indicating it's a list.
default: list
data:
type: array
items:
$ref: '#/components/schemas/ListMessageObject'
first_id:
type: string
description: Identifier of the first message in the list.
example: msg_abc123
last_id:
type: string
description: Identifier of the last message in the list.
example: msg_abc456
has_more:
type: boolean
description: Indicates whether there are more messages to retrieve.
example: false
ListMessageObject:
type: object
properties:
id:
type: string
description: The identifier of the message.
example: msg_abc123
object:
type: string
description: Type of the object, indicating it's a thread message.
example: thread.message
created_at:
type: integer
format: int64
description: Unix timestamp representing the creation time of the message.
example: 1699017614
thread_id:
type: string
description: Identifier of the thread to which this message belongs.
example: thread_abc123
role:
type: string
description: Role of the sender, either 'user' or 'assistant'.
example: user
content:
type: array
items:
type: object
properties:
type:
type: string
description: Type of content, e.g., 'text'.
text:
type: object
properties:
value:
type: string
description: Text content of the message.
example: How does AI work? Explain it in simple terms.
annotations:
type: array
items:
type: string
description: Annotations for the text content, if any.
file_ids:
type: array
items:
type: string
description: Array of file IDs associated with the message, if any.
example: []
assistant_id:
type: string
description: Identifier of the assistant involved in the message, if applicable.
example: null
run_id:
type: string
description: Run ID associated with the message, if applicable.
example: null
metadata:
type: object
description: Metadata associated with the message.
example: {}
MessageFileObject:
type: object
properties:
id:
type: string
description: The identifier of the file.
example: file-abc123
object:
type: string
description: Type of the object, indicating it's a thread message file.
example: thread.message.file
created_at:
type: integer
format: int64
description: Unix timestamp representing the creation time of the file.
example: 1699061776
message_id:
type: string
description: Identifier of the message to which this file is associated.
example: msg_abc123
ListMessageFilesResponse:
type: object
properties:
object:
type: string
description: Type of the object, indicating it's a list.
default: list
data:
type: array
items:
$ref: '#/components/schemas/MessageFileObject'

View File

@ -1,259 +0,0 @@
---
components:
schemas:
ListModelsResponse:
type: object
properties:
object:
type: string
enum:
- list
data:
type: array
items:
$ref: '#/components/schemas/Model'
required:
- object
- data
Model:
type: object
properties:
source_url:
type: string
format: uri
description: URL to the source of the model.
example: https://huggingface.co/janhq/trinity-v1.2-GGUF/resolve/main/trinity-v1.2.Q4_K_M.gguf
id:
type: string
description:
Unique identifier used in chat-completions model_name, matches
folder name.
example: trinity-v1.2-7b
object:
type: string
example: model
name:
type: string
description: Name of the model.
example: Trinity-v1.2 7B Q4
version:
type: string
default: '1.0'
description: The version number of the model.
description:
type: string
description: Description of the model.
example:
Trinity is an experimental model merge using the Slerp method.
Recommended for daily assistance purposes.
format:
type: string
description: State format of the model, distinct from the engine.
example: gguf
settings:
type: object
properties:
ctx_len:
type: integer
description: Context length.
example: 4096
prompt_template:
type: string
example: "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant"
additionalProperties: false
parameters:
type: object
properties:
temperature:
example: 0.7
top_p:
example: 0.95
stream:
example: true
max_tokens:
example: 4096
stop:
example: []
frequency_penalty:
example: 0
presence_penalty:
example: 0
additionalProperties: false
metadata:
author:
type: string
example: Jan
tags:
example:
- 7B
- Merged
- Featured
size:
example: 4370000000,
cover:
example: https://raw.githubusercontent.com/janhq/jan/main/models/trinity-v1.2-7b/cover.png
engine:
example: nitro
ModelObject:
type: object
properties:
id:
type: string
description: |
The identifier of the model.
example: trinity-v1.2-7b
object:
type: string
description: |
The type of the object, indicating it's a model.
default: model
created:
type: integer
format: int64
description: |
Unix timestamp representing the creation time of the model.
example: 1253935178
owned_by:
type: string
description: |
The entity that owns the model.
example: _
GetModelResponse:
type: object
properties:
source_url:
type: string
format: uri
description: URL to the source of the model.
example: https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_K_M.gguf
id:
type: string
description:
Unique identifier used in chat-completions model_name, matches
folder name.
example: mistral-ins-7b-q4
object:
type: string
example: model
name:
type: string
description: Name of the model.
example: Mistral Instruct 7B Q4
version:
type: string
default: '1.0'
description: The version number of the model.
description:
type: string
description: Description of the model.
example:
Trinity is an experimental model merge using the Slerp method.
Recommended for daily assistance purposes.
format:
type: string
description: State format of the model, distinct from the engine.
example: gguf
settings:
type: object
properties:
ctx_len:
type: integer
description: Context length.
example: 4096
prompt_template:
type: string
example: '[INST] {prompt} [/INST]'
additionalProperties: false
parameters:
type: object
properties:
temperature:
example: 0.7
top_p:
example: 0.95
stream:
example: true
max_tokens:
example: 4096
stop:
example: []
frequency_penalty:
example: 0
presence_penalty:
example: 0
additionalProperties: false
metadata:
author:
type: string
example: MistralAI
tags:
example:
- 7B
- Featured
- Foundation Model
size:
example: 4370000000,
cover:
example: https://raw.githubusercontent.com/janhq/jan/main/models/mistral-ins-7b-q4/cover.png
engine:
example: nitro
DeleteModelResponse:
type: object
properties:
id:
type: string
description: The identifier of the model that was deleted.
example: mistral-ins-7b-q4
object:
type: string
description: Type of the object, indicating it's a model.
default: model
deleted:
type: boolean
description: Indicates whether the model was successfully deleted.
example: true
StartModelResponse:
type: object
properties:
id:
type: string
description: The identifier of the model that was started.
example: model-zephyr-7B
object:
type: string
description: Type of the object, indicating it's a model.
default: model
state:
type: string
description: The current state of the model after the start operation.
example: running
required:
- id
- object
- state
StopModelResponse:
type: object
properties:
id:
type: string
description: The identifier of the model that was started.
example: model-zephyr-7B
object:
type: string
description: Type of the object, indicating it's a model.
default: model
state:
type: string
description: The current state of the model after the start operation.
example: stopped
required:
- id
- object
- state
DownloadModelResponse:
type: object
properties:
message:
type: string
description: Message indicates Jan starting download corresponding model.
example: Starting download mistral-ins-7b-q4

View File

@ -1,227 +0,0 @@
---
components:
schemas:
ThreadObject:
type: object
properties:
id:
type: string
description: |
The identifier of the thread, defaults to foldername.
example: thread_....
object:
type: string
description: |
Type of the object, defaults to thread.
example: thread
title:
type: string
description: >
A brief summary or description of the thread, defaults to an empty
string.
example: funny physics joke
assistants:
type: array
description: ''
items:
properties:
assistant_id:
type: string
description: |
The identifier of assistant, defaults to "jan"
example: jan
model:
type: object
properties:
id:
type: string
description: ''
example: ...
settings:
type: object
description: >
Defaults to and overrides assistant.json's "settings" (and if none,
then model.json "settings")
parameters:
type: object
description: >
Defaults to and overrides assistant.json's "parameters" (and if
none, then model.json "parameters")
created:
type: integer
format: int64
description: >
Unix timestamp representing the creation time of the thread,
defaults to file creation time.
example: 1231231
metadata:
type: object
description: |
Metadata associated with the thread, defaults to an empty object.
example: {}
GetThreadResponse:
type: object
properties:
id:
type: string
description: The identifier of the thread.
example: thread_abc123
object:
type: string
description: Type of the object
example: thread
created_at:
type: integer
format: int64
description: Unix timestamp representing the creation time of the thread.
example: 1699014083
assistants:
type: array
items:
type: string
description: List of assistants involved in the thread.
example:
- assistant-001
metadata:
type: object
description: Metadata associated with the thread.
example: {}
messages:
type: array
items:
type: string
description: List of messages within the thread.
example: []
CreateThreadResponse:
type: object
properties:
id:
type: string
description: The identifier of the newly created thread.
example: thread_abc123
object:
type: string
description: Type of the object, indicating it's a thread.
example: thread
created_at:
type: integer
format: int64
description: Unix timestamp representing the creation time of the thread.
example: 1699014083
metadata:
type: object
description: Metadata associated with the newly created thread.
example: {}
CreateThreadObject:
type: object
properties:
object:
type: string
description: Type of the object, indicating it's a thread.
example: thread
title:
type: string
description: >
A brief summary or description of the thread, defaults to an empty
string.
example: funny physics joke
assistants:
type: array
description: assistant involved in the thread
items:
properties:
assistant_id:
type: string
description: |
The identifier of assistant, defaults to "jan"
example: jan
assistant_name:
type: string
description: |
The name of assistant, defaults to "Jan"
example: Jan
instructions:
type: string
description: >
The instruction of assistant, defaults to "Be my grammar corrector"
model:
type: object
properties:
id:
type: string
description: Model id
example: mistral-ins-7b-q4
settings:
type: object
description: >
Defaults to and overrides assistant.json's "settings" (and if none,
then model.json "settings")
parameters:
type: object
description: >
Defaults to and overrides assistant.json's "parameters" (and if
none, then model.json "parameters")
engine:
type: string
description: Engine id
example: nitro
metadata:
type: object
description: |
Metadata associated with the thread, defaults to an empty object.
ThreadMessageObject:
type: object
properties:
role:
type: string
description: |
"Role of the sender, either 'user' or 'assistant'."
enum:
- user
- assistant
content:
type: string
description: |
"Text content of the message."
file_ids:
type: array
items:
type: string
description: |
"Array of file IDs associated with the message, if any."
ModifyThreadResponse:
type: object
properties:
id:
type: string
description: |
"The identifier of the modified thread."
example: thread_abc123
object:
type: string
description: Type of the object, indicating it's a thread.
example: thread
created_at:
type: integer
format: int64
description: Unix timestamp representing the creation time of the thread.
example: 1699014083
metadata:
type: object
description: Metadata associated with the modified thread.
example: {}
DeleteThreadResponse:
type: object
properties:
id:
type: string
description: The identifier of the deleted thread.
example: thread_abc123
object:
type: string
description: Type of the object, indicating the thread has been deleted.
example: thread.deleted
deleted:
type: boolean
description: Indicates whether the thread was successfully deleted.
example: true

View File

@ -1 +0,0 @@
v1.23.2

View File

@ -18,17 +18,12 @@ type SystemType = {
const systemsTemplate: SystemType[] = [
{
name: 'Mac M1, M2, M3',
label: 'Apple Silicon',
name: 'Mac ',
label: 'Universal',
logo: FaApple,
fileFormat: '{appname}-mac-arm64-{tag}.dmg',
},
{
name: 'Mac (Intel)',
label: 'Apple Intel',
logo: FaApple,
fileFormat: '{appname}-mac-x64-{tag}.dmg',
fileFormat: '{appname}-mac-universal-{tag}.dmg',
},
{
name: 'Windows',
label: 'Standard (64-bit)',

View File

@ -24,14 +24,9 @@ type GpuInfo = {
const systemsTemplate: SystemType[] = [
{
name: 'Download for Mac (M1/M2/M3)',
name: 'Download for Mac',
logo: FaApple,
fileFormat: '{appname}-mac-arm64-{tag}.dmg',
},
{
name: 'Download for Mac (Intel)',
logo: FaApple,
fileFormat: '{appname}-mac-x64-{tag}.dmg',
fileFormat: '{appname}-mac-universal-{tag}.dmg',
},
{
name: 'Download for Windows',
@ -66,27 +61,20 @@ const DropdownDownload = ({ lastRelease }: Props) => {
type: '',
})
const changeDefaultSystem = useCallback(
async (systems: SystemType[]) => {
const userAgent = navigator.userAgent
if (userAgent.includes('Windows')) {
// windows user
setDefaultSystem(systems[2])
} else if (userAgent.includes('Linux')) {
// linux user
setDefaultSystem(systems[3])
} else if (userAgent.includes('Mac OS')) {
if (gpuInfo.type === 'Apple Silicon') {
setDefaultSystem(systems[0])
} else {
setDefaultSystem(systems[1])
}
} else {
setDefaultSystem(systems[1])
}
},
[gpuInfo.type]
)
const changeDefaultSystem = useCallback(async (systems: SystemType[]) => {
const userAgent = navigator.userAgent
if (userAgent.includes('Windows')) {
// windows user
setDefaultSystem(systems[2])
} else if (userAgent.includes('Linux')) {
// linux user
setDefaultSystem(systems[3])
} else if (userAgent.includes('Mac OS')) {
setDefaultSystem(systems[0])
} else {
setDefaultSystem(systems[1])
}
}, [])
function getUnmaskedInfo(gl: WebGLRenderingContext): {
renderer: string

View File

@ -15,7 +15,6 @@
"build/**/*.{js,map}",
"pre-install",
"themes",
"docs/**/*",
"scripts/**/*",
"icons/**/*",
"themes",

View File

@ -108,7 +108,7 @@ export const test = base.extend<
})
test.beforeAll(async () => {
await rmSync(path.join(__dirname, '../../test-data'), {
rmSync(path.join(__dirname, '../../test-data'), {
recursive: true,
force: true,
})
@ -122,6 +122,5 @@ test.beforeAll(async () => {
})
test.afterAll(async () => {
// temporally disabling this due to the config for parallel testing WIP
// teardownElectron()
})

View File

@ -2,11 +2,8 @@ import { expect } from '@playwright/test'
import { page, test, TIMEOUT } from '../config/fixtures'
test('renders left navigation panel', async () => {
const settingsBtn = await page
.getByTestId('Thread')
.first()
.isEnabled({ timeout: TIMEOUT })
expect([settingsBtn].filter((e) => !e).length).toBe(0)
const threadBtn = page.getByTestId('Thread').first()
await expect(threadBtn).toBeVisible({ timeout: TIMEOUT })
// Chat section should be there
await page.getByTestId('Local API Server').first().click({
timeout: TIMEOUT,

View File

@ -15,7 +15,13 @@ test('Select GPT model from Hub and Chat with Invalid API Key', async ({
await page.getByTestId('txt-input-chat').fill('dummy value')
await page.getByTestId('btn-send-chat').click()
const denyButton = page.locator('[data-testid="btn-deny-product-analytics"]')
if ((await denyButton.count()) > 0) {
await denyButton.click({ force: true })
} else {
await page.getByTestId('btn-send-chat').click({ force: true })
}
await page.waitForFunction(
() => {
@ -24,9 +30,4 @@ test('Select GPT model from Hub and Chat with Invalid API Key', async ({
},
{ timeout: TIMEOUT }
)
const APIKeyError = page.getByTestId('passthrough-error-message')
await expect(APIKeyError).toBeVisible({
timeout: TIMEOUT,
})
})

View File

@ -127,7 +127,7 @@ export default class JanAssistantExtension extends AssistantExtension {
thread_location: undefined,
id: 'jan',
object: 'assistant',
created_at: Date.now(),
created_at: Date.now() / 1000,
name: 'Jan',
description: 'A default assistant that can use all downloaded models',
model: '*',
@ -141,7 +141,7 @@ export default class JanAssistantExtension extends AssistantExtension {
top_k: 2,
chunk_size: 1024,
chunk_overlap: 64,
retrieval_template: `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
retrieval_template: `Use the following pieces of context to answer the question at the end.
----------------
CONTEXT: {CONTEXT}
----------------

View File

@ -9,13 +9,14 @@ export function toolRetrievalUpdateTextSplitter(
retrieval.updateTextSplitter(chunkSize, chunkOverlap)
}
export async function toolRetrievalIngestNewDocument(
thread: string,
file: string,
model: string,
engine: string,
useTimeWeighted: boolean
) {
const filePath = path.join(getJanDataFolderPath(), normalizeFilePath(file))
const threadPath = path.dirname(filePath.replace('files', ''))
const threadPath = path.join(getJanDataFolderPath(), 'threads', thread)
const filePath = path.join(getJanDataFolderPath(), 'files', file)
retrieval.updateEmbeddingEngine(model, engine)
return retrieval
.ingestAgentKnowledge(filePath, `${threadPath}/memory`, useTimeWeighted)

View File

@ -35,6 +35,7 @@ export class RetrievalTool extends InferenceTool {
await executeOnMain(
NODE,
'toolRetrievalIngestNewDocument',
data.thread?.id,
docFile,
data.model?.id,
data.model?.engine,

View File

@ -18,12 +18,14 @@
"devDependencies": {
"cpx": "^1.5.0",
"rimraf": "^3.0.2",
"ts-loader": "^9.5.0",
"webpack": "^5.88.2",
"webpack-cli": "^5.1.4",
"ts-loader": "^9.5.0"
"webpack-cli": "^5.1.4"
},
"dependencies": {
"@janhq/core": "file:../../core"
"@janhq/core": "file:../../core",
"ky": "^1.7.2",
"p-queue": "^8.0.1"
},
"engines": {
"node": ">=18.0.0"

View File

@ -0,0 +1,14 @@
export {}
declare global {
declare const API_URL: string
declare const SOCKET_URL: string
interface Core {
api: APIFunctions
events: EventEmitter
}
interface Window {
core?: Core | undefined
electronAPI?: any | undefined
}
}

View File

@ -1,408 +0,0 @@
/**
* @jest-environment jsdom
*/
jest.mock('@janhq/core', () => ({
...jest.requireActual('@janhq/core/node'),
fs: {
existsSync: jest.fn(),
mkdir: jest.fn(),
writeFileSync: jest.fn(),
readdirSync: jest.fn(),
readFileSync: jest.fn(),
appendFileSync: jest.fn(),
rm: jest.fn(),
writeBlob: jest.fn(),
joinPath: jest.fn(),
fileStat: jest.fn(),
},
joinPath: jest.fn(),
ConversationalExtension: jest.fn(),
}))
import { fs } from '@janhq/core'
import JSONConversationalExtension from '.'
describe('JSONConversationalExtension Tests', () => {
let extension: JSONConversationalExtension
beforeEach(() => {
// @ts-ignore
extension = new JSONConversationalExtension()
})
it('should create thread folder on load if it does not exist', async () => {
// @ts-ignore
jest.spyOn(fs, 'existsSync').mockResolvedValue(false)
const mkdirSpy = jest.spyOn(fs, 'mkdir').mockResolvedValue({})
await extension.onLoad()
expect(mkdirSpy).toHaveBeenCalledWith('file://threads')
})
it('should log message on unload', () => {
const consoleSpy = jest.spyOn(console, 'debug').mockImplementation()
extension.onUnload()
expect(consoleSpy).toHaveBeenCalledWith(
'JSONConversationalExtension unloaded'
)
})
it('should return sorted threads', async () => {
jest
.spyOn(extension, 'getValidThreadDirs')
.mockResolvedValue(['dir1', 'dir2'])
jest
.spyOn(extension, 'readThread')
.mockResolvedValueOnce({ updated: '2023-01-01' })
.mockResolvedValueOnce({ updated: '2023-01-02' })
const threads = await extension.getThreads()
expect(threads).toEqual([
{ updated: '2023-01-02' },
{ updated: '2023-01-01' },
])
})
it('should ignore broken threads', async () => {
jest
.spyOn(extension, 'getValidThreadDirs')
.mockResolvedValue(['dir1', 'dir2'])
jest
.spyOn(extension, 'readThread')
.mockResolvedValueOnce(JSON.stringify({ updated: '2023-01-01' }))
.mockResolvedValueOnce('this_is_an_invalid_json_content')
const threads = await extension.getThreads()
expect(threads).toEqual([{ updated: '2023-01-01' }])
})
it('should save thread', async () => {
// @ts-ignore
jest.spyOn(fs, 'existsSync').mockResolvedValue(false)
const mkdirSpy = jest.spyOn(fs, 'mkdir').mockResolvedValue({})
const writeFileSyncSpy = jest
.spyOn(fs, 'writeFileSync')
.mockResolvedValue({})
const thread = { id: '1', updated: '2023-01-01' } as any
await extension.saveThread(thread)
expect(mkdirSpy).toHaveBeenCalled()
expect(writeFileSyncSpy).toHaveBeenCalled()
})
it('should delete thread', async () => {
const rmSpy = jest.spyOn(fs, 'rm').mockResolvedValue({})
await extension.deleteThread('1')
expect(rmSpy).toHaveBeenCalled()
})
it('should add new message', async () => {
// @ts-ignore
jest.spyOn(fs, 'existsSync').mockResolvedValue(false)
const mkdirSpy = jest.spyOn(fs, 'mkdir').mockResolvedValue({})
const appendFileSyncSpy = jest
.spyOn(fs, 'appendFileSync')
.mockResolvedValue({})
const message = {
thread_id: '1',
content: [{ type: 'text', text: { annotations: [] } }],
} as any
await extension.addNewMessage(message)
expect(mkdirSpy).toHaveBeenCalled()
expect(appendFileSyncSpy).toHaveBeenCalled()
})
it('should store image', async () => {
const writeBlobSpy = jest.spyOn(fs, 'writeBlob').mockResolvedValue({})
await extension.storeImage(
'data:image/png;base64,abcd',
'path/to/image.png'
)
expect(writeBlobSpy).toHaveBeenCalled()
})
it('should store file', async () => {
const writeBlobSpy = jest.spyOn(fs, 'writeBlob').mockResolvedValue({})
await extension.storeFile(
'data:application/pdf;base64,abcd',
'path/to/file.pdf'
)
expect(writeBlobSpy).toHaveBeenCalled()
})
it('should write messages', async () => {
// @ts-ignore
jest.spyOn(fs, 'existsSync').mockResolvedValue(false)
const mkdirSpy = jest.spyOn(fs, 'mkdir').mockResolvedValue({})
const writeFileSyncSpy = jest
.spyOn(fs, 'writeFileSync')
.mockResolvedValue({})
const messages = [{ id: '1', thread_id: '1', content: [] }] as any
await extension.writeMessages('1', messages)
expect(mkdirSpy).toHaveBeenCalled()
expect(writeFileSyncSpy).toHaveBeenCalled()
})
it('should get all messages on string response', async () => {
jest.spyOn(fs, 'readdirSync').mockResolvedValue(['messages.jsonl'])
jest.spyOn(fs, 'readFileSync').mockResolvedValue('{"id":"1"}\n{"id":"2"}\n')
const messages = await extension.getAllMessages('1')
expect(messages).toEqual([{ id: '1' }, { id: '2' }])
})
it('should get all messages on object response', async () => {
jest.spyOn(fs, 'readdirSync').mockResolvedValue(['messages.jsonl'])
jest.spyOn(fs, 'readFileSync').mockResolvedValue({ id: 1 })
const messages = await extension.getAllMessages('1')
expect(messages).toEqual([{ id: 1 }])
})
it('get all messages return empty on error', async () => {
jest.spyOn(fs, 'readdirSync').mockRejectedValue(['messages.jsonl'])
const messages = await extension.getAllMessages('1')
expect(messages).toEqual([])
})
it('return empty messages on no messages file', async () => {
jest.spyOn(fs, 'readdirSync').mockResolvedValue([])
const messages = await extension.getAllMessages('1')
expect(messages).toEqual([])
})
it('should ignore error message', async () => {
jest.spyOn(fs, 'readdirSync').mockResolvedValue(['messages.jsonl'])
jest
.spyOn(fs, 'readFileSync')
.mockResolvedValue('{"id":"1"}\nyolo\n{"id":"2"}\n')
const messages = await extension.getAllMessages('1')
expect(messages).toEqual([{ id: '1' }, { id: '2' }])
})
it('should create thread folder on load if it does not exist', async () => {
// @ts-ignore
jest.spyOn(fs, 'existsSync').mockResolvedValue(false)
const mkdirSpy = jest.spyOn(fs, 'mkdir').mockResolvedValue({})
await extension.onLoad()
expect(mkdirSpy).toHaveBeenCalledWith('file://threads')
})
it('should log message on unload', () => {
const consoleSpy = jest.spyOn(console, 'debug').mockImplementation()
extension.onUnload()
expect(consoleSpy).toHaveBeenCalledWith(
'JSONConversationalExtension unloaded'
)
})
it('should return sorted threads', async () => {
jest
.spyOn(extension, 'getValidThreadDirs')
.mockResolvedValue(['dir1', 'dir2'])
jest
.spyOn(extension, 'readThread')
.mockResolvedValueOnce({ updated: '2023-01-01' })
.mockResolvedValueOnce({ updated: '2023-01-02' })
const threads = await extension.getThreads()
expect(threads).toEqual([
{ updated: '2023-01-02' },
{ updated: '2023-01-01' },
])
})
it('should ignore broken threads', async () => {
jest
.spyOn(extension, 'getValidThreadDirs')
.mockResolvedValue(['dir1', 'dir2'])
jest
.spyOn(extension, 'readThread')
.mockResolvedValueOnce(JSON.stringify({ updated: '2023-01-01' }))
.mockResolvedValueOnce('this_is_an_invalid_json_content')
const threads = await extension.getThreads()
expect(threads).toEqual([{ updated: '2023-01-01' }])
})
it('should save thread', async () => {
// @ts-ignore
jest.spyOn(fs, 'existsSync').mockResolvedValue(false)
const mkdirSpy = jest.spyOn(fs, 'mkdir').mockResolvedValue({})
const writeFileSyncSpy = jest
.spyOn(fs, 'writeFileSync')
.mockResolvedValue({})
const thread = { id: '1', updated: '2023-01-01' } as any
await extension.saveThread(thread)
expect(mkdirSpy).toHaveBeenCalled()
expect(writeFileSyncSpy).toHaveBeenCalled()
})
it('should delete thread', async () => {
const rmSpy = jest.spyOn(fs, 'rm').mockResolvedValue({})
await extension.deleteThread('1')
expect(rmSpy).toHaveBeenCalled()
})
it('should add new message', async () => {
// @ts-ignore
jest.spyOn(fs, 'existsSync').mockResolvedValue(false)
const mkdirSpy = jest.spyOn(fs, 'mkdir').mockResolvedValue({})
const appendFileSyncSpy = jest
.spyOn(fs, 'appendFileSync')
.mockResolvedValue({})
const message = {
thread_id: '1',
content: [{ type: 'text', text: { annotations: [] } }],
} as any
await extension.addNewMessage(message)
expect(mkdirSpy).toHaveBeenCalled()
expect(appendFileSyncSpy).toHaveBeenCalled()
})
it('should add new image message', async () => {
jest
.spyOn(fs, 'existsSync')
// @ts-ignore
.mockResolvedValueOnce(false)
// @ts-ignore
.mockResolvedValueOnce(false)
// @ts-ignore
.mockResolvedValueOnce(true)
const mkdirSpy = jest.spyOn(fs, 'mkdir').mockResolvedValue({})
const appendFileSyncSpy = jest
.spyOn(fs, 'appendFileSync')
.mockResolvedValue({})
jest.spyOn(fs, 'writeBlob').mockResolvedValue({})
const message = {
thread_id: '1',
content: [
{ type: 'image', text: { annotations: ['data:image;base64,hehe'] } },
],
} as any
await extension.addNewMessage(message)
expect(mkdirSpy).toHaveBeenCalled()
expect(appendFileSyncSpy).toHaveBeenCalled()
})
it('should add new pdf message', async () => {
jest
.spyOn(fs, 'existsSync')
// @ts-ignore
.mockResolvedValueOnce(false)
// @ts-ignore
.mockResolvedValueOnce(false)
// @ts-ignore
.mockResolvedValueOnce(true)
const mkdirSpy = jest.spyOn(fs, 'mkdir').mockResolvedValue({})
const appendFileSyncSpy = jest
.spyOn(fs, 'appendFileSync')
.mockResolvedValue({})
jest.spyOn(fs, 'writeBlob').mockResolvedValue({})
const message = {
thread_id: '1',
content: [
{ type: 'pdf', text: { annotations: ['data:pdf;base64,hehe'] } },
],
} as any
await extension.addNewMessage(message)
expect(mkdirSpy).toHaveBeenCalled()
expect(appendFileSyncSpy).toHaveBeenCalled()
})
it('should store image', async () => {
const writeBlobSpy = jest.spyOn(fs, 'writeBlob').mockResolvedValue({})
await extension.storeImage(
'data:image/png;base64,abcd',
'path/to/image.png'
)
expect(writeBlobSpy).toHaveBeenCalled()
})
it('should store file', async () => {
const writeBlobSpy = jest.spyOn(fs, 'writeBlob').mockResolvedValue({})
await extension.storeFile(
'data:application/pdf;base64,abcd',
'path/to/file.pdf'
)
expect(writeBlobSpy).toHaveBeenCalled()
})
})
describe('test readThread', () => {
let extension: JSONConversationalExtension
beforeEach(() => {
// @ts-ignore
extension = new JSONConversationalExtension()
})
it('should read thread', async () => {
jest
.spyOn(fs, 'readFileSync')
.mockResolvedValue(JSON.stringify({ id: '1' }))
const thread = await extension.readThread('1')
expect(thread).toEqual(`{"id":"1"}`)
})
it('getValidThreadDirs should return valid thread directories', async () => {
jest
.spyOn(fs, 'readdirSync')
.mockResolvedValueOnce(['1', '2', '3'])
.mockResolvedValueOnce(['thread.json'])
.mockResolvedValueOnce(['thread.json'])
.mockResolvedValueOnce([])
// @ts-ignore
jest.spyOn(fs, 'existsSync').mockResolvedValue(true)
jest.spyOn(fs, 'fileStat').mockResolvedValue({
isDirectory: true,
} as any)
const validThreadDirs = await extension.getValidThreadDirs()
expect(validThreadDirs).toEqual(['1', '2'])
})
})

View File

@ -1,90 +1,71 @@
import {
fs,
joinPath,
ConversationalExtension,
Thread,
ThreadAssistantInfo,
ThreadMessage,
} from '@janhq/core'
import { safelyParseJSON } from './jsonUtil'
import ky from 'ky'
import PQueue from 'p-queue'
type ThreadList = {
data: Thread[]
}
type MessageList = {
data: ThreadMessage[]
}
/**
* JSONConversationalExtension is a ConversationalExtension implementation that provides
* functionality for managing threads.
*/
export default class JSONConversationalExtension extends ConversationalExtension {
private static readonly _threadFolder = 'file://threads'
private static readonly _threadInfoFileName = 'thread.json'
private static readonly _threadMessagesFileName = 'messages.jsonl'
export default class CortexConversationalExtension extends ConversationalExtension {
queue = new PQueue({ concurrency: 1 })
/**
* Called when the extension is loaded.
*/
async onLoad() {
if (!(await fs.existsSync(JSONConversationalExtension._threadFolder))) {
await fs.mkdir(JSONConversationalExtension._threadFolder)
}
this.queue.add(() => this.healthz())
}
/**
* Called when the extension is unloaded.
*/
onUnload() {
console.debug('JSONConversationalExtension unloaded')
}
onUnload() {}
/**
* Returns a Promise that resolves to an array of Conversation objects.
*/
async getThreads(): Promise<Thread[]> {
try {
const threadDirs = await this.getValidThreadDirs()
const promises = threadDirs.map((dirName) => this.readThread(dirName))
const promiseResults = await Promise.allSettled(promises)
const convos = promiseResults
.map((result) => {
if (result.status === 'fulfilled') {
return typeof result.value === 'object'
? result.value
: safelyParseJSON(result.value)
}
return undefined
})
.filter((convo) => !!convo)
convos.sort(
(a, b) => new Date(b.updated).getTime() - new Date(a.updated).getTime()
)
return convos
} catch (error) {
console.error(error)
return []
}
async listThreads(): Promise<Thread[]> {
return this.queue.add(() =>
ky
.get(`${API_URL}/v1/threads`)
.json<ThreadList>()
.then((e) => e.data)
) as Promise<Thread[]>
}
/**
* Saves a Thread object to a json file.
* @param thread The Thread object to save.
*/
async saveThread(thread: Thread): Promise<void> {
try {
const threadDirPath = await joinPath([
JSONConversationalExtension._threadFolder,
thread.id,
])
const threadJsonPath = await joinPath([
threadDirPath,
JSONConversationalExtension._threadInfoFileName,
])
if (!(await fs.existsSync(threadDirPath))) {
await fs.mkdir(threadDirPath)
}
async createThread(thread: Thread): Promise<Thread> {
return this.queue.add(() =>
ky.post(`${API_URL}/v1/threads`, { json: thread }).json<Thread>()
) as Promise<Thread>
}
await fs.writeFileSync(threadJsonPath, JSON.stringify(thread, null, 2))
} catch (err) {
console.error(err)
Promise.reject(err)
}
/**
* Saves a Thread object to a json file.
* @param thread The Thread object to save.
*/
async modifyThread(thread: Thread): Promise<void> {
return this.queue
.add(() =>
ky.post(`${API_URL}/v1/threads/${thread.id}`, { json: thread })
)
.then()
}
/**
@ -92,189 +73,126 @@ export default class JSONConversationalExtension extends ConversationalExtension
* @param threadId The ID of the thread to delete.
*/
async deleteThread(threadId: string): Promise<void> {
const path = await joinPath([
JSONConversationalExtension._threadFolder,
`${threadId}`,
])
try {
await fs.rm(path)
} catch (err) {
console.error(err)
}
return this.queue
.add(() => ky.delete(`${API_URL}/v1/threads/${threadId}`))
.then()
}
async addNewMessage(message: ThreadMessage): Promise<void> {
try {
const threadDirPath = await joinPath([
JSONConversationalExtension._threadFolder,
message.thread_id,
])
const threadMessagePath = await joinPath([
threadDirPath,
JSONConversationalExtension._threadMessagesFileName,
])
if (!(await fs.existsSync(threadDirPath))) await fs.mkdir(threadDirPath)
if (message.content[0]?.type === 'image') {
const filesPath = await joinPath([threadDirPath, 'files'])
if (!(await fs.existsSync(filesPath))) await fs.mkdir(filesPath)
const imagePath = await joinPath([filesPath, `${message.id}.png`])
const base64 = message.content[0].text.annotations[0]
await this.storeImage(base64, imagePath)
if ((await fs.existsSync(imagePath)) && message.content?.length) {
// Use file path instead of blob
message.content[0].text.annotations[0] = `threads/${message.thread_id}/files/${message.id}.png`
}
}
if (message.content[0]?.type === 'pdf') {
const filesPath = await joinPath([threadDirPath, 'files'])
if (!(await fs.existsSync(filesPath))) await fs.mkdir(filesPath)
const filePath = await joinPath([filesPath, `${message.id}.pdf`])
const blob = message.content[0].text.annotations[0]
await this.storeFile(blob, filePath)
if ((await fs.existsSync(filePath)) && message.content?.length) {
// Use file path instead of blob
message.content[0].text.annotations[0] = `threads/${message.thread_id}/files/${message.id}.pdf`
}
}
await fs.appendFileSync(threadMessagePath, JSON.stringify(message) + '\n')
Promise.resolve()
} catch (err) {
Promise.reject(err)
}
/**
* Adds a new message to a specified thread.
* @param message The ThreadMessage object to be added.
* @returns A Promise that resolves when the message has been added.
*/
async createMessage(message: ThreadMessage): Promise<ThreadMessage> {
return this.queue.add(() =>
ky
.post(`${API_URL}/v1/threads/${message.thread_id}/messages`, {
json: message,
})
.json<ThreadMessage>()
) as Promise<ThreadMessage>
}
async storeImage(base64: string, filePath: string): Promise<void> {
const base64Data = base64.replace(/^data:image\/\w+;base64,/, '')
try {
await fs.writeBlob(filePath, base64Data)
} catch (err) {
console.error(err)
}
/**
* Modifies a message in a thread.
* @param message
* @returns
*/
async modifyMessage(message: ThreadMessage): Promise<ThreadMessage> {
return this.queue.add(() =>
ky
.post(
`${API_URL}/v1/threads/${message.thread_id}/messages/${message.id}`,
{
json: message,
}
)
.json<ThreadMessage>()
) as Promise<ThreadMessage>
}
async storeFile(base64: string, filePath: string): Promise<void> {
const base64Data = base64.replace(/^data:application\/pdf;base64,/, '')
try {
await fs.writeBlob(filePath, base64Data)
} catch (err) {
console.error(err)
}
/**
* Deletes a specific message from a thread.
* @param threadId The ID of the thread containing the message.
* @param messageId The ID of the message to be deleted.
* @returns A Promise that resolves when the message has been successfully deleted.
*/
async deleteMessage(threadId: string, messageId: string): Promise<void> {
return this.queue
.add(() =>
ky.delete(`${API_URL}/v1/threads/${threadId}/messages/${messageId}`)
)
.then()
}
async writeMessages(
/**
* Retrieves all messages for a specified thread.
* @param threadId The ID of the thread to get messages from.
* @returns A Promise that resolves to an array of ThreadMessage objects.
*/
async listMessages(threadId: string): Promise<ThreadMessage[]> {
return this.queue.add(() =>
ky
.get(`${API_URL}/v1/threads/${threadId}/messages?order=asc`)
.json<MessageList>()
.then((e) => e.data)
) as Promise<ThreadMessage[]>
}
/**
* Retrieves the assistant information for a specified thread.
* @param threadId The ID of the thread for which to retrieve assistant information.
* @returns A Promise that resolves to a ThreadAssistantInfo object containing
* the details of the assistant associated with the specified thread.
*/
async getThreadAssistant(threadId: string): Promise<ThreadAssistantInfo> {
return this.queue.add(() =>
ky.get(`${API_URL}/v1/assistants/${threadId}`).json<ThreadAssistantInfo>()
) as Promise<ThreadAssistantInfo>
}
/**
* Creates a new assistant for the specified thread.
* @param threadId The ID of the thread for which the assistant is being created.
* @param assistant The information about the assistant to be created.
* @returns A Promise that resolves to the newly created ThreadAssistantInfo object.
*/
async createThreadAssistant(
threadId: string,
messages: ThreadMessage[]
): Promise<void> {
try {
const threadDirPath = await joinPath([
JSONConversationalExtension._threadFolder,
threadId,
])
const threadMessagePath = await joinPath([
threadDirPath,
JSONConversationalExtension._threadMessagesFileName,
])
if (!(await fs.existsSync(threadDirPath))) await fs.mkdir(threadDirPath)
await fs.writeFileSync(
threadMessagePath,
messages.map((msg) => JSON.stringify(msg)).join('\n') +
(messages.length ? '\n' : '')
)
Promise.resolve()
} catch (err) {
Promise.reject(err)
}
assistant: ThreadAssistantInfo
): Promise<ThreadAssistantInfo> {
return this.queue.add(() =>
ky
.post(`${API_URL}/v1/assistants/${threadId}`, { json: assistant })
.json<ThreadAssistantInfo>()
) as Promise<ThreadAssistantInfo>
}
/**
* A promise builder for reading a thread from a file.
* @param threadDirName the thread dir we are reading from.
* @returns data of the thread
* Modifies an existing assistant for the specified thread.
* @param threadId The ID of the thread for which the assistant is being modified.
* @param assistant The updated information for the assistant.
* @returns A Promise that resolves to the updated ThreadAssistantInfo object.
*/
async readThread(threadDirName: string): Promise<any> {
return fs.readFileSync(
await joinPath([
JSONConversationalExtension._threadFolder,
threadDirName,
JSONConversationalExtension._threadInfoFileName,
]),
'utf-8'
)
async modifyThreadAssistant(
threadId: string,
assistant: ThreadAssistantInfo
): Promise<ThreadAssistantInfo> {
return this.queue.add(() =>
ky
.patch(`${API_URL}/v1/assistants/${threadId}`, { json: assistant })
.json<ThreadAssistantInfo>()
) as Promise<ThreadAssistantInfo>
}
/**
* Returns a Promise that resolves to an array of thread directories.
* @private
* Do health check on cortex.cpp
* @returns
*/
async getValidThreadDirs(): Promise<string[]> {
const fileInsideThread: string[] = await fs.readdirSync(
JSONConversationalExtension._threadFolder
)
const threadDirs: string[] = []
for (let i = 0; i < fileInsideThread.length; i++) {
const path = await joinPath([
JSONConversationalExtension._threadFolder,
fileInsideThread[i],
])
if (!(await fs.fileStat(path))?.isDirectory) continue
const isHavingThreadInfo = (await fs.readdirSync(path)).includes(
JSONConversationalExtension._threadInfoFileName
)
if (!isHavingThreadInfo) {
console.debug(`Ignore ${path} because it does not have thread info`)
continue
}
threadDirs.push(fileInsideThread[i])
}
return threadDirs
}
async getAllMessages(threadId: string): Promise<ThreadMessage[]> {
try {
const threadDirPath = await joinPath([
JSONConversationalExtension._threadFolder,
threadId,
])
const files: string[] = await fs.readdirSync(threadDirPath)
if (
!files.includes(JSONConversationalExtension._threadMessagesFileName)
) {
console.debug(`${threadDirPath} not contains message file`)
return []
}
const messageFilePath = await joinPath([
threadDirPath,
JSONConversationalExtension._threadMessagesFileName,
])
let readResult = await fs.readFileSync(messageFilePath, 'utf-8')
if (typeof readResult === 'object') {
readResult = JSON.stringify(readResult)
}
const result = readResult.split('\n').filter((line) => line !== '')
const messages: ThreadMessage[] = []
result.forEach((line: string) => {
const message = safelyParseJSON(line)
if (message) messages.push(safelyParseJSON(line))
healthz(): Promise<void> {
return ky
.get(`${API_URL}/healthz`, {
retry: { limit: 20, delay: () => 500, methods: ['get'] },
})
return messages
} catch (err) {
console.error(err)
return []
}
.then(() => {})
}
}

View File

@ -1,14 +0,0 @@
// Note about performance
// The v8 JavaScript engine used by Node.js cannot optimise functions which contain a try/catch block.
// v8 4.5 and above can optimise try/catch
export function safelyParseJSON(json) {
// This function cannot be optimised, it's best to
// keep it small!
var parsed
try {
parsed = JSON.parse(json)
} catch (e) {
return undefined
}
return parsed // Could be undefined!
}

View File

@ -17,7 +17,12 @@ module.exports = {
filename: 'index.js', // Adjust the output file name as needed
library: { type: 'module' }, // Specify ESM output format
},
plugins: [new webpack.DefinePlugin({})],
plugins: [
new webpack.DefinePlugin({
API_URL: JSON.stringify('http://127.0.0.1:39291'),
SOCKET_URL: JSON.stringify('ws://127.0.0.1:39291'),
}),
],
resolve: {
extensions: ['.ts', '.js'],
},

View File

@ -1 +1 @@
1.0.4
1.0.5-rc2

View File

@ -2,7 +2,7 @@
set BIN_PATH=./bin
set SHARED_PATH=./../../electron/shared
set /p CORTEX_VERSION=<./bin/version.txt
set ENGINE_VERSION=0.1.40
set ENGINE_VERSION=0.1.42
@REM Download cortex.llamacpp binaries
set DOWNLOAD_URL=https://github.com/janhq/cortex.llamacpp/releases/download/v%ENGINE_VERSION%/cortex.llamacpp-%ENGINE_VERSION%-windows-amd64
@ -38,4 +38,4 @@ for %%F in (%SUBFOLDERS%) do (
)
)
echo DLL files moved successfully.
echo DLL files moved successfully.

View File

@ -2,7 +2,7 @@
# Read CORTEX_VERSION
CORTEX_VERSION=$(cat ./bin/version.txt)
ENGINE_VERSION=0.1.40
ENGINE_VERSION=0.1.42
CORTEX_RELEASE_URL="https://github.com/janhq/cortex.cpp/releases/download"
ENGINE_DOWNLOAD_URL="https://github.com/janhq/cortex.llamacpp/releases/download/v${ENGINE_VERSION}/cortex.llamacpp-${ENGINE_VERSION}"
CUDA_DOWNLOAD_URL="https://github.com/janhq/cortex.llamacpp/releases/download/v${ENGINE_VERSION}"

View File

@ -120,7 +120,7 @@ export default [
SETTINGS: JSON.stringify(defaultSettingJson),
CORTEX_API_URL: JSON.stringify('http://127.0.0.1:39291'),
CORTEX_SOCKET_URL: JSON.stringify('ws://127.0.0.1:39291'),
CORTEX_ENGINE_VERSION: JSON.stringify('v0.1.40'),
CORTEX_ENGINE_VERSION: JSON.stringify('v0.1.42'),
}),
// Allow json resolution
json(),

View File

@ -61,37 +61,6 @@
},
"engine": "groq"
},
{
"sources": [
{
"url": "https://groq.com"
}
],
"id": "llama-3.1-70b-versatile",
"object": "model",
"name": "Groq Llama 3.1 70b Versatile",
"version": "1.1",
"description": "Groq Llama 3.1 70b Versatile with supercharged speed!",
"format": "api",
"settings": {},
"parameters": {
"max_tokens": 8000,
"temperature": 0.7,
"top_p": 0.95,
"stream": true,
"stop": [],
"frequency_penalty": 0,
"presence_penalty": 0
},
"metadata": {
"author": "Meta",
"tags": [
"General",
"Big Context Length"
]
},
"engine": "groq"
},
{
"sources": [
{

View File

@ -1,7 +1,7 @@
{
"name": "@janhq/inference-openai-extension",
"productName": "OpenAI Inference Engine",
"version": "1.0.4",
"version": "1.0.5",
"description": "This extension enables OpenAI chat completion API calls",
"main": "dist/index.js",
"module": "dist/module.js",

View File

@ -67,7 +67,9 @@
"version": "1.1",
"description": "OpenAI GPT 4o is a new flagship model with fast speed and high quality",
"format": "api",
"settings": {},
"settings": {
"vision_model": true
},
"parameters": {
"max_tokens": 4096,
"temperature": 0.7,
@ -97,10 +99,10 @@
"format": "api",
"settings": {},
"parameters": {
"max_tokens": 32768,
"temperature": 1,
"top_p": 1,
"stream": true,
"max_tokens": 32768,
"frequency_penalty": 0,
"presence_penalty": 0
},
@ -124,9 +126,9 @@
"format": "api",
"settings": {},
"parameters": {
"max_tokens": 65536,
"temperature": 1,
"top_p": 1,
"max_tokens": 65536,
"stream": true,
"frequency_penalty": 0,
"presence_penalty": 0

View File

@ -74,6 +74,11 @@ export default class JanInferenceOpenAIExtension extends RemoteOAIEngine {
* @returns
*/
transformPayload = (payload: OpenAIPayloadType): OpenAIPayloadType => {
// Remove empty stop words
if (payload.stop?.length === 0) {
const { stop, ...params } = payload
payload = params
}
// Transform the payload for preview models
if (this.previewModels.includes(payload.model)) {
const { max_tokens, stop, ...params } = payload

View File

@ -5,7 +5,6 @@
"module": "dist/esm/index.js",
"types": "dist/index.d.ts",
"description": "A collection of UI component",
"private": true,
"files": [
"dist"
],

View File

@ -27,12 +27,12 @@
"pre-install:linux": "find extensions -type f -path \"**/*.tgz\" -exec cp {} pre-install \\;",
"pre-install:win32": "powershell -Command \"Get-ChildItem -Path \"extensions\" -Recurse -File -Filter \"*.tgz\" | ForEach-Object { Copy-Item -Path $_.FullName -Destination \"pre-install\" }\"",
"pre-install": "run-script-os",
"copy:assets": "cpx \"pre-install/*.tgz\" \"electron/pre-install/\" && cpx \"themes/**\" \"electron/themes\" && cpx \"docs/openapi/**\" \"electron/docs/openapi\"",
"copy:assets": "cpx \"pre-install/*.tgz\" \"electron/pre-install/\" && cpx \"themes/**\" \"electron/themes\"",
"dev:electron": "yarn copy:assets && yarn workspace jan dev",
"dev:web": "yarn workspace @janhq/web dev",
"dev:server": "yarn copy:assets && yarn workspace @janhq/server dev",
"dev:server": "yarn workspace @janhq/server dev",
"dev": "turbo run dev --parallel --filter=!@janhq/server",
"build:server": "yarn copy:assets && cd server && yarn install && yarn run build",
"build:server": "cd server && yarn install && yarn run build",
"build:core": "cd core && yarn install && yarn run build",
"build:web": "yarn workspace @janhq/web build && cpx \"web/out/**\" \"electron/renderer/\"",
"build:electron": "yarn copy:assets && yarn workspace jan build",

6308
server/cortex.json Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,9 @@
import fastify from 'fastify'
import dotenv from 'dotenv'
import { v1Router, log, getJanExtensionsPath } from '@janhq/core/node'
import { join } from 'path'
import { log } from '@janhq/core/node'
import tcpPortUsed from 'tcp-port-used'
import { Logger } from './helpers/logger'
import CORTEX_SCHEMA from './cortex.json'
// Load environment variables
dotenv.config()
@ -66,34 +66,29 @@ export const startServer = async (configs?: ServerConfig): Promise<boolean> => {
// Initialize Fastify server with logging
server = fastify({
logger: new Logger(),
loggerInstance: new Logger(),
// Set body limit to 100MB - Default is 1MB
// According to OpenAI - a batch input file can be up to 100 MB in size
// Whisper endpoints accept up to 25MB
// Vision endpoints accept up to 4MB
bodyLimit: 104_857_600
bodyLimit: 104_857_600,
})
// Register CORS if enabled
if (corsEnabled) await server.register(require('@fastify/cors'), {})
CORTEX_SCHEMA.servers[0].url = configs?.prefix ?? '/v1'
// Register Swagger for API documentation
await server.register(require('@fastify/swagger'), {
mode: 'static',
specification: {
path: configs?.schemaPath ?? './../docs/openapi/jan.yaml',
baseDir: configs?.baseDir ?? './../docs/openapi',
postProcessor: function (swaggerObject: any) {
swaggerObject.servers[0].url = configs?.prefix ?? '/v1'
return swaggerObject
},
document: CORTEX_SCHEMA,
},
})
// Register Swagger UI
await server.register(require('@fastify/swagger-ui'), {
routePrefix: '/',
baseDir: configs?.baseDir ?? join(__dirname, '../..', './docs/openapi'),
uiConfig: {
docExpansion: 'full',
deepLinking: false,
@ -102,26 +97,12 @@ export const startServer = async (configs?: ServerConfig): Promise<boolean> => {
transformSpecificationClone: true,
})
// Register static file serving for extensions
// TODO: Watch extension files changes and reload
await server.register(
(childContext: any, _: any, done: any) => {
childContext.register(require('@fastify/static'), {
root: getJanExtensionsPath(),
wildcard: false,
})
server.register(require('@fastify/http-proxy'), {
upstream: 'http://127.0.0.1:39291/v1',
prefix: configs?.prefix ?? '/v1',
http2: false,
})
done()
},
{ prefix: 'extensions' }
)
// Register proxy middleware
if (configs?.storageAdataper)
server.addHook('preHandler', configs.storageAdataper)
// Register API routes
await server.register(v1Router, { prefix: configs?.prefix ?? '/v1' })
// Start listening for requests
await server
.listen({

View File

@ -1,7 +1,6 @@
import { s3 } from './middleware/s3'
import { setup } from './helpers/setup'
import { startServer as start } from './index'
/**
* Setup extensions and start the server
*/
setup().then(() => start({ storageAdataper: s3 }))
setup().then(() => start())

View File

@ -1,70 +0,0 @@
import { join } from 'path'
// Middleware to intercept requests and proxy if certain conditions are met
const config = {
endpoint: process.env.AWS_ENDPOINT,
region: process.env.AWS_REGION,
credentials: {
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
},
}
const S3_BUCKET_NAME = process.env.S3_BUCKET_NAME
const fs = require('@cyclic.sh/s3fs')(S3_BUCKET_NAME, config)
const PROXY_PREFIX = '/v1/fs'
const PROXY_ROUTES = ['/threads', '/messages']
export const s3 = (req: any, reply: any, done: any) => {
// Proxy FS requests to S3 using S3FS
if (req.url.startsWith(PROXY_PREFIX)) {
const route = req.url.split('/').pop()
const args = parseRequestArgs(req)
// Proxy matched requests to the s3fs module
if (args.length && PROXY_ROUTES.some((route) => args[0].includes(route))) {
try {
// Handle customized route
// S3FS does not handle appendFileSync
if (route === 'appendFileSync') {
let result = handAppendFileSync(args)
reply.status(200).send(result)
return
}
// Reroute the other requests to the s3fs module
const result = fs[route](...args)
reply.status(200).send(result)
return
} catch (ex) {
console.error(ex)
}
}
}
// Let other requests go through
done()
}
const parseRequestArgs = (req: Request) => {
const {
getJanDataFolderPath,
normalizeFilePath,
} = require('@janhq/core/node')
return JSON.parse(req.body as any).map((arg: any) =>
typeof arg === 'string' &&
(arg.startsWith(`file:/`) || arg.startsWith(`file:\\`))
? join(getJanDataFolderPath(), normalizeFilePath(arg))
: arg
)
}
const handAppendFileSync = (args: any[]) => {
if (fs.existsSync(args[0])) {
const data = fs.readFileSync(args[0], 'utf-8')
return fs.writeFileSync(args[0], data + args[1])
} else {
return fs.writeFileSync(args[0], args[1])
}
}

View File

@ -8,7 +8,8 @@
"homepage": "https://jan.ai",
"description": "Use offline LLMs with your own data. Run open source models like Llama2 or Falcon on your internal computers/servers.",
"files": [
"build/**"
"build/**",
"cortex.json"
],
"scripts": {
"lint": "eslint . --ext \".js,.jsx,.ts,.tsx\"",
@ -19,14 +20,15 @@
"dependencies": {
"@alumna/reflect": "^1.1.3",
"@cyclic.sh/s3fs": "^1.2.9",
"@fastify/cors": "^8.4.2",
"@fastify/cors": "^10.0.1",
"@fastify/http-proxy": "^10.0.0",
"@fastify/static": "^6.12.0",
"@fastify/swagger": "^8.13.0",
"@fastify/swagger-ui": "2.0.1",
"@fastify/swagger": "^9.4.0",
"@fastify/swagger-ui": "5.2.0",
"@janhq/core": "link:./core",
"@npmcli/arborist": "^7.3.1",
"dotenv": "^16.3.1",
"fastify": "^4.24.3",
"fastify": "^5.2.0",
"fetch-retry": "^5.0.6",
"node-fetch": "2",
"request": "^2.88.2",

View File

@ -15,7 +15,8 @@
"paths": { "*": ["node_modules/*"] },
"typeRoots": ["node_modules/@types"],
"ignoreDeprecations": "5.0",
"declaration": true
"declaration": true,
"resolveJsonModule": true
},
// "sourceMap": true,

View File

@ -1,15 +1,45 @@
import { PropsWithChildren } from 'react'
import { useMediaQuery } from '@janhq/joi'
import { useAtomValue } from 'jotai'
import { twMerge } from 'tailwind-merge'
import { MainViewState } from '@/constants/screens'
import { LEFT_PANEL_WIDTH } from '../LeftPanelContainer'
import { RIGHT_PANEL_WIDTH } from '../RightPanelContainer'
import {
mainViewStateAtom,
showLeftPanelAtom,
showRightPanelAtom,
} from '@/helpers/atoms/App.atom'
import { reduceTransparentAtom } from '@/helpers/atoms/Setting.atom'
const CenterPanelContainer = ({ children }: PropsWithChildren) => {
type Props = {
isShowStarterScreen?: boolean
} & PropsWithChildren
const CenterPanelContainer = ({ children, isShowStarterScreen }: Props) => {
const reduceTransparent = useAtomValue(reduceTransparentAtom)
const matches = useMediaQuery('(max-width: 880px)')
const showLeftPanel = useAtomValue(showLeftPanelAtom)
const showRightPanel = useAtomValue(showRightPanelAtom)
const mainViewState = useAtomValue(mainViewStateAtom)
return (
<div className={twMerge('flex h-full w-full')}>
<div
className={twMerge('flex h-full w-full')}
style={{
maxWidth: matches
? '100%'
: mainViewState === MainViewState.Thread && !isShowStarterScreen
? `calc(100% - (${showRightPanel ? Number(localStorage.getItem(RIGHT_PANEL_WIDTH)) : 0}px + ${showLeftPanel ? Number(localStorage.getItem(LEFT_PANEL_WIDTH)) : 0}px))`
: '100%',
}}
>
<div
className={twMerge(
'h-full w-full overflow-hidden bg-[hsla(var(--center-panel-bg))]',

View File

@ -30,20 +30,23 @@ describe('ErrorMessage Component', () => {
beforeEach(() => {
jest.clearAllMocks()
;(useAtomValue as jest.Mock).mockReturnValue([])
;(useSetAtom as jest.Mock).mockReturnValue(mockSetMainState)
;(useSetAtom as jest.Mock).mockReturnValue(mockSetSelectedSettingScreen)
;(useSetAtom as jest.Mock).mockReturnValue(mockSetModalTroubleShooting)
;(useSendChatMessage as jest.Mock).mockReturnValue({
resendChatMessage: mockResendChatMessage,
})
; (useAtomValue as jest.Mock).mockReturnValue([])
; (useSetAtom as jest.Mock).mockReturnValue(mockSetMainState)
; (useSetAtom as jest.Mock).mockReturnValue(mockSetSelectedSettingScreen)
; (useSetAtom as jest.Mock).mockReturnValue(mockSetModalTroubleShooting)
; (useSendChatMessage as jest.Mock).mockReturnValue({
resendChatMessage: mockResendChatMessage,
})
})
it('renders error message with InvalidApiKey correctly', () => {
const message: ThreadMessage = {
id: '1',
status: MessageStatus.Error,
error_code: ErrorCode.InvalidApiKey,
metadata: {
error: MessageStatus.Error,
error_code: ErrorCode.InvalidApiKey,
},
status: "completed",
content: [{ text: { value: 'Invalid API Key' } }],
} as ThreadMessage
@ -56,8 +59,11 @@ describe('ErrorMessage Component', () => {
it('renders general error message correctly', () => {
const message: ThreadMessage = {
id: '1',
status: MessageStatus.Error,
error_code: ErrorCode.Unknown,
status: "completed",
metadata: {
error: MessageStatus.Error,
error_code: ErrorCode.Unknown
},
content: [{ text: { value: 'Unknown error occurred' } }],
} as ThreadMessage
@ -69,9 +75,11 @@ describe('ErrorMessage Component', () => {
it('opens troubleshooting modal when link is clicked', () => {
const message: ThreadMessage = {
id: '1',
status: MessageStatus.Error,
error_code: ErrorCode.Unknown,
content: [{ text: { value: 'Unknown error occurred' } }],
status: "completed",
metadata: {
error: MessageStatus.Error,
error_code: ErrorCode.Unknown,
}, content: [{ text: { value: 'Unknown error occurred' } }],
} as ThreadMessage
render(<ErrorMessage message={message} />)

View File

@ -14,41 +14,65 @@ import ModalTroubleShooting, {
import { MainViewState } from '@/constants/screens'
import { isLocalEngine } from '@/utils/modelEngine'
import { mainViewStateAtom } from '@/helpers/atoms/App.atom'
import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom'
import { selectedSettingAtom } from '@/helpers/atoms/Setting.atom'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
const setModalTroubleShooting = useSetAtom(modalTroubleShootingAtom)
const setMainState = useSetAtom(mainViewStateAtom)
const setSelectedSettingScreen = useSetAtom(selectedSettingAtom)
const activeThread = useAtomValue(activeThreadAtom)
const activeAssistant = useAtomValue(activeAssistantAtom)
const defaultDesc = () => {
return (
<>
<p>
{`Something's wrong.`} Access&nbsp;
<span
className="cursor-pointer text-[hsla(var(--app-link))] underline"
onClick={() => setModalTroubleShooting(true)}
>
troubleshooting assistance
</span>
&nbsp;now.
</p>
<ModalTroubleShooting />
</>
)
}
const getEngine = () => {
const engineName = activeAssistant?.model?.engine
return engineName ? EngineManager.instance().get(engineName) : null
}
const getErrorTitle = () => {
switch (message.error_code) {
const engine = getEngine()
switch (message.metadata?.error_code) {
case ErrorCode.InvalidApiKey:
case ErrorCode.AuthenticationError:
return (
<span data-testid="invalid-API-key-error">
Invalid API key. Please check your API key from{' '}
<button
className="font-medium text-[hsla(var(--app-link))] underline"
onClick={() => {
setMainState(MainViewState.Settings)
if (activeThread?.assistants[0]?.model.engine) {
const engine = EngineManager.instance().get(
activeThread.assistants[0].model.engine
)
<>
<span data-testid="invalid-API-key-error">
Invalid API key. Please check your API key from{' '}
<button
className="font-medium text-[hsla(var(--app-link))] underline"
onClick={() => {
setMainState(MainViewState.Settings)
engine?.name && setSelectedSettingScreen(engine.name)
}
}}
>
Settings
</button>{' '}
and try again.
</span>
}}
>
Settings
</button>{' '}
and try again.
</span>
{defaultDesc()}
</>
)
default:
return (
@ -56,8 +80,20 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
data-testid="passthrough-error-message"
className="first-letter:uppercase"
>
{message.content[0]?.text?.value && (
<AutoLink text={message.content[0].text.value} />
{message.content[0]?.text?.value === 'Failed to fetch' &&
engine &&
!isLocalEngine(String(engine?.name)) ? (
<span>
No internet connection. <br /> Switch to an on-device model or
check connection.
</span>
) : (
<>
{message?.content[0]?.text?.value && (
<AutoLink text={message?.content[0]?.text?.value} />
)}
{defaultDesc()}
</>
)}
</p>
)
@ -65,24 +101,13 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
}
return (
<div className="mx-auto mt-10 max-w-[700px]">
{message.status === MessageStatus.Error && (
<div className="mx-auto my-6 max-w-[700px]">
{!!message.metadata?.error && (
<div
key={message.id}
className="mx-6 flex flex-col items-center space-y-2 text-center font-medium text-[hsla(var(--text-secondary))]"
>
{getErrorTitle()}
<p>
{`Something's wrong.`} Access&nbsp;
<span
className="cursor-pointer text-[hsla(var(--app-link))] underline"
onClick={() => setModalTroubleShooting(true)}
>
troubleshooting assistance
</span>
&nbsp;now.
</p>
<ModalTroubleShooting />
</div>
)}
</div>

View File

@ -35,7 +35,7 @@ const BottomPanel = () => {
return (
<div
className={twMerge(
'fixed bottom-0 left-0 z-50 flex h-9 w-full items-center justify-between px-3 text-xs',
'fixed bottom-0 left-0 z-40 flex h-9 w-full items-center justify-between px-3 text-xs',
reduceTransparent &&
'border-t border-[hsla(var(--app-border))] bg-[hsla(var(--bottom-panel-bg))]'
)}

View File

@ -144,7 +144,7 @@ const TopPanel = () => {
theme="icon"
onClick={() => {
setMainViewState(MainViewState.Settings)
setSelectedSetting('Appearance')
setSelectedSetting('Preferences')
}}
>
<PaletteIcon size={16} className="cursor-pointer" />

View File

@ -1,9 +1,11 @@
'use client'
import { useEffect, useMemo } from 'react'
import { useEffect, useState } from 'react'
import { useAtomValue, useSetAtom } from 'jotai'
import { Button } from '@janhq/joi'
import { useAtom, useAtomValue, useSetAtom } from 'jotai'
import posthog from 'posthog-js'
import { twMerge } from 'tailwind-merge'
import BottomPanel from '@/containers/Layout/BottomPanel'
@ -31,12 +33,72 @@ import MainViewContainer from '../MainViewContainer'
import InstallingExtensionModal from './BottomPanel/InstallingExtension/InstallingExtensionModal'
import { mainViewStateAtom } from '@/helpers/atoms/App.atom'
import { reduceTransparentAtom } from '@/helpers/atoms/Setting.atom'
import {
productAnalyticAtom,
productAnalyticPromptAtom,
reduceTransparentAtom,
} from '@/helpers/atoms/Setting.atom'
const BaseLayout = () => {
const setMainViewState = useSetAtom(mainViewStateAtom)
const importModelStage = useAtomValue(getImportModelStageAtom)
const reduceTransparent = useAtomValue(reduceTransparentAtom)
const [productAnalytic, setProductAnalytic] = useAtom(productAnalyticAtom)
const [productAnalyticPrompt, setProductAnalyticPrompt] = useAtom(
productAnalyticPromptAtom
)
const [showProductAnalyticPrompt, setShowProductAnalyticPrompt] =
useState(false)
useEffect(() => {
const timer = setTimeout(() => {
if (productAnalyticPrompt) {
setShowProductAnalyticPrompt(true)
}
return () => clearTimeout(timer)
}, 3000) // 3 seconds delay
return () => clearTimeout(timer) // Cleanup timer on unmount
}, [productAnalyticPrompt])
useEffect(() => {
if (productAnalytic) {
posthog.init(POSTHOG_KEY, {
api_host: POSTHOG_HOST,
autocapture: false,
capture_pageview: false,
capture_pageleave: false,
disable_session_recording: true,
person_profiles: 'always',
persistence: 'localStorage',
opt_out_capturing_by_default: true,
// eslint-disable-next-line @typescript-eslint/naming-convention
sanitize_properties: function (properties) {
const denylist = [
'$pathname',
'$initial_pathname',
'$current_url',
'$initial_current_url',
'$host',
'$initial_host',
'$initial_person_info',
]
denylist.forEach((key) => {
if (properties[key]) {
properties[key] = null // Set each denied property to null
}
})
return properties
},
})
posthog.opt_in_capturing()
posthog.register({ app_version: VERSION })
} else {
posthog.opt_out_capturing()
}
}, [productAnalytic])
useEffect(() => {
if (localStorage.getItem(SUCCESS_SET_NEW_DESTINATION) === 'true') {
@ -54,6 +116,17 @@ const BaseLayout = () => {
)
}, [setMainViewState])
const handleProductAnalytics = (isAllowed: boolean) => {
setProductAnalytic(isAllowed)
setProductAnalyticPrompt(false)
setShowProductAnalyticPrompt(false)
if (isAllowed) {
posthog.opt_in_capturing()
} else {
posthog.opt_out_capturing()
}
}
return (
<div
className={twMerge(
@ -76,6 +149,79 @@ const BaseLayout = () => {
<ChooseWhatToImportModal />
<InstallingExtensionModal />
<HuggingFaceRepoDetailModal />
{showProductAnalyticPrompt && (
<div className="fixed bottom-4 z-50 m-4 max-w-full rounded-xl border border-[hsla(var(--app-border))] bg-[hsla(var(--app-bg))] p-6 shadow-2xl sm:bottom-8 sm:right-4 sm:m-0 sm:max-w-[400px]">
<div className="mb-4 flex items-center gap-x-2">
<svg
width="32"
height="32"
viewBox="0 0 32 32"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M5.5 12.5C5.5 11.1193 6.61929 10 8 10H24C25.3807 10 26.5 11.1193 26.5 12.5V18.5C26.5 24.299 21.799 29 16 29C10.201 29 5.5 24.299 5.5 18.5V12.5Z"
fill="#2563EB"
/>
<path
fillRule="evenodd"
clipRule="evenodd"
d="M8.20959 25.54L12.0093 10H14.0093L9.84984 27.0113C9.25274 26.579 8.70292 26.0855 8.20959 25.54ZM11.5993 28.0361C11.2955 27.8957 10.9996 27.7412 10.7124 27.5734L15.0093 10H16.0093L11.5993 28.0361Z"
fill="white"
/>
<path
d="M21 8C21 6.67392 20.4732 5.40215 19.5355 4.46447C18.5979 3.52678 17.3261 3 16 3C14.6739 3 13.4021 3.52678 12.4645 4.46447C11.5268 5.40215 11 6.67392 11 8"
stroke="#2563EB"
strokeWidth="2"
strokeLinecap="round"
/>
<path
d="M27.0478 18.054C27.609 18.5733 27.609 19.4267 27.0478 19.946C25.221 21.6363 20.9622 25 16 25C11.0378 25 6.77899 21.6363 4.95219 19.946C4.39099 19.4267 4.39099 18.5733 4.95219 18.054C6.77899 16.3637 11.0378 13 16 13C20.9622 13 25.221 16.3637 27.0478 18.054Z"
fill="#C8D1EA"
/>
<circle cx="16" cy="19" r="4" fill="#2563EB" />
<path
d="M19.25 17.5C19.9404 17.5 20.5 16.9404 20.5 16.25C20.5 15.5596 19.9404 15 19.25 15C18.5596 15 18 15.5596 18 16.25C18 16.9404 18.5596 17.5 19.25 17.5Z"
fill="white"
/>
<path
d="M17.75 18.5C18.1642 18.5 18.5 18.1642 18.5 17.75C18.5 17.3358 18.1642 17 17.75 17C17.3358 17 17 17.3358 17 17.75C17 18.1642 17.3358 18.5 17.75 18.5Z"
fill="white"
/>
</svg>
<h6 className="text-base font-semibold">Help Us Improve Jan</h6>
</div>
<p className="text-[hsla(var(--text-secondary))]">
To improve Jan, we collect anonymous data to understand feature
usage. Your chats and personal information are never tracked. You
can change this anytime in&nbsp;
<span className="font-semibold">{`Settings > Privacy.`}</span>
</p>
<p className="mt-6 text-[hsla(var(--text-secondary))]">
Would you like to help us to improve Jan?
</p>
<div className="mt-6 flex items-center gap-x-2">
<Button
onClick={() => {
handleProductAnalytics(true)
}}
>
Allow
</Button>
<Button
data-testid="btn-deny-product-analytics"
theme="ghost"
variant="outline"
onClick={() => {
handleProductAnalytics(false)
}}
>
Deny
</Button>
</div>
</div>
)}
</div>
<BottomPanel />
</div>

View File

@ -17,7 +17,7 @@ import { reduceTransparentAtom } from '@/helpers/atoms/Setting.atom'
type Props = PropsWithChildren
const DEFAULT_LEFT_PANEL_WIDTH = 200
const LEFT_PANEL_WIDTH = 'leftPanelWidth'
export const LEFT_PANEL_WIDTH = 'leftPanelWidth'
const LeftPanelContainer = ({ children }: Props) => {
const [leftPanelRef, setLeftPanelRef] = useState<HTMLDivElement | null>(null)
@ -106,7 +106,7 @@ const LeftPanelContainer = ({ children }: Props) => {
<Fragment>
<div
className={twMerge(
'group/resize absolute right-0 top-0 z-[9999] h-full w-1 flex-shrink-0 flex-grow-0 resize-x blur-sm hover:cursor-col-resize hover:bg-[hsla(var(--resize-bg))]',
'group/resize absolute right-0 top-0 z-40 h-full w-1 flex-shrink-0 flex-grow-0 resize-x blur-sm hover:cursor-col-resize hover:bg-[hsla(var(--resize-bg))]',
isResizing && 'cursor-col-resize bg-[hsla(var(--resize-bg))]',
!reduceTransparent && 'shadow-sm'
)}

View File

@ -37,7 +37,7 @@ const MainViewContainer = () => {
}
return (
<div className={twMerge('relative flex w-full')}>
<div className={twMerge('relative flex w-[calc(100%-48px)]')}>
<div className="w-full">
<m.div
key={mainViewState}

View File

@ -12,7 +12,7 @@ import {
useClickOutside,
} from '@janhq/joi'
import { useAtom, useAtomValue, useSetAtom } from 'jotai'
import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
import {
ChevronDownIcon,
@ -37,6 +37,7 @@ import useUpdateModelParameters from '@/hooks/useUpdateModelParameters'
import { formatDownloadPercentage, toGibibytes } from '@/utils/converter'
import { manualRecommendationModel } from '@/utils/model'
import {
getLogoEngine,
getTitleByEngine,
@ -46,6 +47,7 @@ import {
import { extensionManager } from '@/extension'
import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom'
import { inActiveEngineProviderAtom } from '@/helpers/atoms/Extension.atom'
import {
configuredModelsAtom,
@ -64,17 +66,23 @@ type Props = {
disabled?: boolean
}
export const modelDropdownStateAtom = atom(false)
const ModelDropdown = ({
disabled,
chatInputMode,
strictedThread = true,
}: Props) => {
const { downloadModel } = useDownloadModel()
const [modelDropdownState, setModelDropdownState] = useAtom(
modelDropdownStateAtom
)
const [searchFilter, setSearchFilter] = useState('local')
const [searchText, setSearchText] = useState('')
const [open, setOpen] = useState(false)
const [open, setOpen] = useState<boolean>(modelDropdownState)
const activeThread = useAtomValue(activeThreadAtom)
const activeAssistant = useAtomValue(activeAssistantAtom)
const downloadingModels = useAtomValue(getDownloadingModelAtom)
const [toggle, setToggle] = useState<HTMLDivElement | null>(null)
const [selectedModel, setSelectedModel] = useAtom(selectedModelAtom)
@ -82,22 +90,38 @@ const ModelDropdown = ({
const [dropdownOptions, setDropdownOptions] = useState<HTMLDivElement | null>(
null
)
const downloadStates = useAtomValue(modelDownloadStateAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
const { updateModelParameter } = useUpdateModelParameters()
const searchInputRef = useRef<HTMLInputElement>(null)
const configuredModels = useAtomValue(configuredModelsAtom)
const featuredModel = configuredModels.filter((x) =>
x.metadata?.tags?.includes('Featured')
const featuredModel = configuredModels.filter(
(x) =>
manualRecommendationModel.includes(x.id) &&
x.metadata?.tags?.includes('Featured') &&
x.metadata?.size < 5000000000
)
const { updateThreadMetadata } = useCreateNewThread()
useClickOutside(() => setOpen(false), null, [dropdownOptions, toggle])
useClickOutside(() => handleChangeStateOpen(false), null, [
dropdownOptions,
toggle,
])
const [showEngineListModel, setShowEngineListModel] = useAtom(
showEngineListModelAtom
)
const handleChangeStateOpen = useCallback(
(state: boolean) => {
setOpen(state)
setModelDropdownState(state)
},
[setModelDropdownState]
)
const isModelSupportRagAndTools = useCallback((model: Model) => {
return (
model?.engine === InferenceEngine.openai ||
@ -143,6 +167,12 @@ const ModelDropdown = ({
[configuredModels, searchText, searchFilter, downloadedModels]
)
useEffect(() => {
if (modelDropdownState && chatInputMode) {
setOpen(modelDropdownState)
}
}, [chatInputMode, modelDropdownState])
useEffect(() => {
if (open && searchInputRef.current) {
searchInputRef.current.focus()
@ -151,17 +181,24 @@ const ModelDropdown = ({
useEffect(() => {
if (!activeThread) return
const modelId = activeThread?.assistants?.[0]?.model?.id
const modelId = activeAssistant?.model?.id
let model = downloadedModels.find((model) => model.id === modelId)
if (!model) {
model = recommendedModel
model = undefined
}
setSelectedModel(model)
}, [recommendedModel, activeThread, downloadedModels, setSelectedModel])
}, [
recommendedModel,
activeThread,
downloadedModels,
setSelectedModel,
activeAssistant?.model?.id,
])
const onClickModelItem = useCallback(
async (modelId: string) => {
if (!activeAssistant) return
const model = downloadedModels.find((m) => m.id === modelId)
setSelectedModel(model)
setOpen(false)
@ -172,14 +209,14 @@ const ModelDropdown = ({
...activeThread,
assistants: [
{
...activeThread.assistants[0],
...activeAssistant,
tools: [
{
type: 'retrieval',
enabled: isModelSupportRagAndTools(model as Model),
settings: {
...(activeThread.assistants[0].tools &&
activeThread.assistants[0].tools[0]?.settings),
...(activeAssistant.tools &&
activeAssistant.tools[0]?.settings),
},
},
],
@ -192,8 +229,12 @@ const ModelDropdown = ({
model?.settings.ctx_len ?? 8192
)
const overriddenParameters = {
ctx_len: Math.min(8192, model?.settings.ctx_len ?? 8192),
max_tokens: defaultContextLength,
ctx_len: !isLocalEngine(model?.engine)
? undefined
: defaultContextLength,
max_tokens: !isLocalEngine(model?.engine)
? (model?.parameters.max_tokens ?? 8192)
: defaultContextLength,
}
const modelParams = {
@ -215,13 +256,14 @@ const ModelDropdown = ({
}
},
[
activeAssistant,
downloadedModels,
activeThread,
setSelectedModel,
activeThread,
updateThreadMetadata,
isModelSupportRagAndTools,
setThreadModelParams,
updateModelParameter,
updateThreadMetadata,
]
)
@ -329,14 +371,21 @@ const ModelDropdown = ({
'inline-block max-w-[200px] cursor-pointer overflow-hidden text-ellipsis whitespace-nowrap',
open && 'border border-transparent'
)}
onClick={() => setOpen(!open)}
onClick={() => handleChangeStateOpen(!open)}
>
<span>{selectedModel?.name}</span>
<span
className={twMerge(
!selectedModel && 'text-[hsla(var(--text-tertiary))]'
)}
>
{selectedModel?.name || 'Select Model'}
</span>
</Badge>
) : (
<Input
value={selectedModel?.name || ''}
className="cursor-pointer"
placeholder="Select Model"
disabled={disabled}
readOnly
suffixIcon={

View File

@ -4,9 +4,11 @@ import { PropsWithChildren } from 'react'
import { Provider, atom } from 'jotai'
import { FileInfo } from '@/types/file'
export const editPromptAtom = atom<string>('')
export const currentPromptAtom = atom<string>('')
export const fileUploadAtom = atom<FileInfo[]>([])
export const fileUploadAtom = atom<FileInfo | undefined>()
export const searchAtom = atom<string>('')
@ -15,10 +17,3 @@ export const selectedTextAtom = atom('')
export default function JotaiWrapper({ children }: PropsWithChildren) {
return <Provider>{children}</Provider>
}
export type FileType = 'image' | 'pdf'
export type FileInfo = {
file: File
type: FileType
}

View File

@ -31,6 +31,7 @@ import {
addNewMessageAtom,
updateMessageAtom,
tokenSpeedAtom,
deleteMessageAtom,
} from '@/helpers/atoms/ChatMessage.atom'
import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
import {
@ -49,6 +50,7 @@ export default function ModelHandler() {
const addNewMessage = useSetAtom(addNewMessageAtom)
const updateMessage = useSetAtom(updateMessageAtom)
const downloadedModels = useAtomValue(downloadedModelsAtom)
const deleteMessage = useSetAtom(deleteMessageAtom)
const activeModel = useAtomValue(activeModelAtom)
const setActiveModel = useSetAtom(activeModelAtom)
const setStateModel = useSetAtom(stateModelAtom)
@ -86,7 +88,7 @@ export default function ModelHandler() {
}, [activeModelParams])
const onNewMessageResponse = useCallback(
(message: ThreadMessage) => {
async (message: ThreadMessage) => {
if (message.type === MessageRequestType.Thread) {
addNewMessage(message)
}
@ -154,12 +156,15 @@ export default function ModelHandler() {
...thread,
title: cleanedMessageContent,
metadata: thread.metadata,
metadata: {
...thread.metadata,
title: cleanedMessageContent,
},
}
extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.saveThread({
?.modifyThread({
...updatedThread,
})
.then(() => {
@ -233,7 +238,9 @@ export default function ModelHandler() {
const thread = threadsRef.current?.find((e) => e.id == message.thread_id)
if (!thread) return
const messageContent = message.content[0]?.text?.value
const metadata = {
...thread.metadata,
...(messageContent && { lastMessage: messageContent }),
@ -246,15 +253,31 @@ export default function ModelHandler() {
extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.saveThread({
?.modifyThread({
...thread,
metadata,
})
// If this is not the summary of the Thread, don't need to add it to the Thread
extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.addNewMessage(message)
if (message.status === MessageStatus.Error) {
message.metadata = {
...message.metadata,
error: message.content[0]?.text?.value,
error_code: message.error_code,
}
}
;(async () => {
const updatedMessage = await extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.createMessage(message)
.catch(() => undefined)
if (updatedMessage) {
deleteMessage(message.id)
addNewMessage(updatedMessage)
setTokenSpeed((prev) =>
prev ? { ...prev, message: updatedMessage.id } : undefined
)
}
})()
// Attempt to generate the title of the Thread when needed
generateThreadTitle(message, thread)
@ -279,7 +302,9 @@ export default function ModelHandler() {
const generateThreadTitle = (message: ThreadMessage, thread: Thread) => {
// If this is the first ever prompt in the thread
if (thread.title?.trim() !== defaultThreadTitle) {
if (
(thread.title ?? thread.metadata?.title)?.trim() !== defaultThreadTitle
) {
return
}
@ -292,11 +317,14 @@ export default function ModelHandler() {
const updatedThread: Thread = {
...thread,
title: (thread.metadata?.lastMessage as string) || defaultThreadTitle,
metadata: thread.metadata,
metadata: {
...thread.metadata,
title: (thread.metadata?.lastMessage as string) || defaultThreadTitle,
},
}
return extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.saveThread({
?.modifyThread({
...updatedThread,
})
.then(() => {
@ -313,7 +341,7 @@ export default function ModelHandler() {
if (!threadMessages || threadMessages.length === 0) return
const summarizeFirstPrompt = `Summarize in a ${maxWordForThreadTitle}-word Title. Give the title only. "${threadMessages[0].content[0].text.value}"`
const summarizeFirstPrompt = `Summarize in a ${maxWordForThreadTitle}-word Title. Give the title only. "${threadMessages[0]?.content[0]?.text?.value}"`
// Prompt: Given this query from user {query}, return to me the summary in 10 words as the title
const msgId = ulid()
@ -330,6 +358,7 @@ export default function ModelHandler() {
id: msgId,
threadId: message.thread_id,
type: MessageRequestType.Summary,
attachments: [],
messages,
model: {
...activeModelRef.current,

View File

@ -11,15 +11,14 @@ const Responsive = () => {
const [showRightPanel, setShowRightPanel] = useAtom(showRightPanelAtom)
// Refs to store the last known state of the panels
const lastLeftPanelState = useRef<boolean>(true)
const lastRightPanelState = useRef<boolean>(true)
const lastLeftPanelState = useRef<boolean>(showLeftPanel)
const lastRightPanelState = useRef<boolean>(showRightPanel)
useEffect(() => {
if (matches) {
// Store the last known state before closing the panels
lastLeftPanelState.current = showLeftPanel
lastRightPanelState.current = showRightPanel
setShowLeftPanel(false)
setShowRightPanel(false)
} else {

View File

@ -16,13 +16,13 @@ import { reduceTransparentAtom } from '@/helpers/atoms/Setting.atom'
type Props = PropsWithChildren
const DEFAULT_RIGTH_PANEL_WIDTH = 280
const RIGHT_PANEL_WIDTH = 'rightPanelWidth'
const DEFAULT_RIGHT_PANEL_WIDTH = 280
export const RIGHT_PANEL_WIDTH = 'rightPanelWidth'
const RightPanelContainer = ({ children }: Props) => {
const [isResizing, setIsResizing] = useState(false)
const [threadRightPanelWidth, setRightPanelWidth] = useState(
Number(localStorage.getItem(RIGHT_PANEL_WIDTH)) || DEFAULT_RIGTH_PANEL_WIDTH
Number(localStorage.getItem(RIGHT_PANEL_WIDTH)) || DEFAULT_RIGHT_PANEL_WIDTH
)
const [rightPanelRef, setRightPanelRef] = useState<HTMLDivElement | null>(
null
@ -55,11 +55,11 @@ const RightPanelContainer = ({ children }: Props) => {
mouseMoveEvent.clientX <
200
) {
setRightPanelWidth(DEFAULT_RIGTH_PANEL_WIDTH)
setRightPanelWidth(DEFAULT_RIGHT_PANEL_WIDTH)
setIsResizing(false)
localStorage.setItem(
RIGHT_PANEL_WIDTH,
String(DEFAULT_RIGTH_PANEL_WIDTH)
String(DEFAULT_RIGHT_PANEL_WIDTH)
)
setShowRightPanel(false)
} else {
@ -77,8 +77,8 @@ const RightPanelContainer = ({ children }: Props) => {
useEffect(() => {
if (localStorage.getItem(RIGHT_PANEL_WIDTH) === null) {
setRightPanelWidth(DEFAULT_RIGTH_PANEL_WIDTH)
localStorage.setItem(RIGHT_PANEL_WIDTH, String(DEFAULT_RIGTH_PANEL_WIDTH))
setRightPanelWidth(DEFAULT_RIGHT_PANEL_WIDTH)
localStorage.setItem(RIGHT_PANEL_WIDTH, String(DEFAULT_RIGHT_PANEL_WIDTH))
}
window.addEventListener('mousemove', resize)
window.addEventListener('mouseup', stopResizing)
@ -109,7 +109,7 @@ const RightPanelContainer = ({ children }: Props) => {
<Fragment>
<div
className={twMerge(
'group/resize absolute left-0 top-0 z-[9999] h-full w-1 flex-shrink-0 flex-grow-0 resize-x blur-sm hover:cursor-col-resize hover:bg-[hsla(var(--resize-bg))]',
'group/resize absolute left-0 top-0 z-40 h-full w-1 flex-shrink-0 flex-grow-0 resize-x blur-sm hover:cursor-col-resize hover:bg-[hsla(var(--resize-bg))]',
isResizing && 'cursor-col-resize bg-[hsla(var(--resize-bg))]',
!reduceTransparent && 'shadow-sm'
)}

View File

@ -1,14 +1,25 @@
import { atom } from 'jotai'
import { atomWithStorage } from 'jotai/utils'
import { MainViewState } from '@/constants/screens'
export const mainViewStateAtom = atom<MainViewState>(MainViewState.Thread)
export const defaultJanDataFolderAtom = atom<string>('')
const SHOW_RIGHT_PANEL = 'showRightPanel'
// Store panel atom
export const showLeftPanelAtom = atom<boolean>(true)
export const showRightPanelAtom = atom<boolean>(true)
export const showRightPanelAtom = atomWithStorage<boolean>(
SHOW_RIGHT_PANEL,
false,
undefined,
{ getOnInit: true }
)
export const showSystemMonitorPanelAtom = atom<boolean>(false)
export const appDownloadProgressAtom = atom<number>(-1)
export const updateVersionErrorAtom = atom<string | undefined>(undefined)

View File

@ -1,4 +1,12 @@
import { Assistant } from '@janhq/core'
import { Assistant, ThreadAssistantInfo } from '@janhq/core'
import { atom } from 'jotai'
import { atomWithStorage } from 'jotai/utils'
export const assistantsAtom = atom<Assistant[]>([])
/**
* Get the current active assistant
*/
export const activeAssistantAtom = atomWithStorage<
ThreadAssistantInfo | undefined
>('activeAssistant', undefined, undefined, { getOnInit: true })

View File

@ -6,6 +6,8 @@ import {
} from '@janhq/core'
import { atom } from 'jotai'
import { atomWithStorage } from 'jotai/utils'
import {
getActiveThreadIdAtom,
updateThreadStateLastMessageAtom,
@ -13,15 +15,32 @@ import {
import { TokenSpeed } from '@/types/token'
const CHAT_MESSAGE_NAME = 'chatMessages'
/**
* Stores all chat messages for all threads
*/
export const chatMessages = atom<Record<string, ThreadMessage[]>>({})
export const chatMessagesStorage = atomWithStorage<
Record<string, ThreadMessage[]>
>(CHAT_MESSAGE_NAME, {}, undefined, { getOnInit: true })
export const cachedMessages = atom<Record<string, ThreadMessage[]>>()
/**
* Retrieve chat messages for all threads
*/
export const chatMessages = atom(
(get) => get(cachedMessages) ?? get(chatMessagesStorage),
(_get, set, newValue: Record<string, ThreadMessage[]>) => {
set(cachedMessages, newValue)
;(() => set(chatMessagesStorage, newValue))()
}
)
/**
* Stores the status of the messages load for each thread
*/
export const readyThreadsMessagesAtom = atom<Record<string, boolean>>({})
export const readyThreadsMessagesAtom = atomWithStorage<
Record<string, boolean>
>('currentThreadMessages', {}, undefined, { getOnInit: true })
/**
* Store the token speed for current message
@ -34,6 +53,7 @@ export const getCurrentChatMessagesAtom = atom<ThreadMessage[]>((get) => {
const activeThreadId = get(getActiveThreadIdAtom)
if (!activeThreadId) return []
const messages = get(chatMessages)[activeThreadId]
if (!Array.isArray(messages)) return []
return messages ?? []
})
@ -121,7 +141,7 @@ export const deleteMessageAtom = atom(null, (get, set, id: string) => {
if (threadId) {
// Should also delete error messages to clear out the error state
newData[threadId] = newData[threadId].filter(
(e) => e.id !== id && e.status !== MessageStatus.Error
(e) => e.id !== id && !e.metadata?.error
)
set(chatMessages, newData)

View File

@ -58,7 +58,9 @@ describe('Model.atom.ts', () => {
setAtom.current({ id: '1' } as any)
})
expect(getAtom.current).toEqual([{ id: '1' }])
reset.current([])
act(() => {
reset.current([])
})
})
})
@ -83,7 +85,9 @@ describe('Model.atom.ts', () => {
removeAtom.current('1')
})
expect(getAtom.current).toEqual([])
reset.current([])
act(() => {
reset.current([])
})
})
})
@ -113,7 +117,9 @@ describe('Model.atom.ts', () => {
removeAtom.current('1')
})
expect(getAtom.current).toEqual([])
reset.current([])
act(() => {
reset.current([])
})
})
})

View File

@ -11,9 +11,12 @@ export const janSettingScreenAtom = atom<SettingScreen[]>([])
export const THEME = 'themeAppearance'
export const REDUCE_TRANSPARENT = 'reduceTransparent'
export const SPELL_CHECKING = 'spellChecking'
export const PRODUCT_ANALYTIC = 'productAnalytic'
export const PRODUCT_ANALYTIC_PROMPT = 'productAnalyticPrompt'
export const THEME_DATA = 'themeData'
export const THEME_OPTIONS = 'themeOptions'
export const THEME_PATH = 'themePath'
export const CHAT_WIDTH = 'chatWidth'
export const themesOptionsAtom = atomWithStorage<
{ name: string; value: string }[]
>(THEME_OPTIONS, [], undefined, { getOnInit: true })
@ -47,3 +50,21 @@ export const spellCheckAtom = atomWithStorage<boolean>(
undefined,
{ getOnInit: true }
)
export const productAnalyticAtom = atomWithStorage<boolean>(
PRODUCT_ANALYTIC,
false,
undefined,
{ getOnInit: true }
)
export const productAnalyticPromptAtom = atomWithStorage<boolean>(
PRODUCT_ANALYTIC_PROMPT,
true,
undefined,
{ getOnInit: true }
)
export const chatWidthAtom = atomWithStorage<string>(
CHAT_WIDTH,
'full',
undefined,
{ getOnInit: true }
)

View File

@ -207,7 +207,7 @@ export const setThreadModelParamsAtom = atom(
*/
export const activeSettingInputBoxAtom = atomWithStorage<boolean>(
ACTIVE_SETTING_INPUT_BOX,
false,
true,
undefined,
{ getOnInit: true }
)

View File

@ -8,8 +8,8 @@ import { toaster } from '@/containers/Toast'
import { LAST_USED_MODEL_ID } from './useRecommendedModel'
import { vulkanEnabledAtom } from '@/helpers/atoms/AppConfig.atom'
import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom'
import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
export const activeModelAtom = atom<Model | undefined>(undefined)
export const loadModelErrorAtom = atom<string | undefined>(undefined)
@ -28,12 +28,12 @@ export const stateModelAtom = atom<ModelState>({
export function useActiveModel() {
const [activeModel, setActiveModel] = useAtom(activeModelAtom)
const activeThread = useAtomValue(activeThreadAtom)
const [stateModel, setStateModel] = useAtom(stateModelAtom)
const downloadedModels = useAtomValue(downloadedModelsAtom)
const setLoadModelError = useSetAtom(loadModelErrorAtom)
const pendingModelLoad = useRef(false)
const isVulkanEnabled = useAtomValue(vulkanEnabledAtom)
const activeAssistant = useAtomValue(activeAssistantAtom)
const downloadedModelsRef = useRef<Model[]>([])
@ -79,12 +79,12 @@ export function useActiveModel() {
}
/// Apply thread model settings
if (activeThread?.assistants[0]?.model.id === modelId) {
if (activeAssistant?.model.id === modelId) {
model = {
...model,
settings: {
...model.settings,
...activeThread.assistants[0].model.settings,
...activeAssistant?.model.settings,
},
}
}

View File

@ -67,7 +67,7 @@ describe('useCreateNewThread', () => {
} as any)
})
expect(mockSetAtom).toHaveBeenCalledTimes(6) // Check if all the necessary atoms were set
expect(mockSetAtom).toHaveBeenCalledTimes(1)
expect(extensionManager.get).toHaveBeenCalled()
})
@ -104,7 +104,7 @@ describe('useCreateNewThread', () => {
await result.current.requestCreateNewThread({
id: 'assistant1',
name: 'Assistant 1',
instructions: "Hello Jan Assistant",
instructions: 'Hello Jan Assistant',
model: {
id: 'model1',
parameters: [],
@ -113,16 +113,8 @@ describe('useCreateNewThread', () => {
} as any)
})
expect(mockSetAtom).toHaveBeenCalledTimes(6) // Check if all the necessary atoms were set
expect(mockSetAtom).toHaveBeenCalledTimes(1) // Check if all the necessary atoms were set
expect(extensionManager.get).toHaveBeenCalled()
expect(mockSetAtom).toHaveBeenNthCalledWith(
2,
expect.objectContaining({
assistants: expect.arrayContaining([
expect.objectContaining({ instructions: 'Hello Jan Assistant' }),
]),
})
)
})
it('should create a new thread with previous instructions', async () => {
@ -166,16 +158,8 @@ describe('useCreateNewThread', () => {
} as any)
})
expect(mockSetAtom).toHaveBeenCalledTimes(6) // Check if all the necessary atoms were set
expect(mockSetAtom).toHaveBeenCalledTimes(1) // Check if all the necessary atoms were set
expect(extensionManager.get).toHaveBeenCalled()
expect(mockSetAtom).toHaveBeenNthCalledWith(
2,
expect.objectContaining({
assistants: expect.arrayContaining([
expect.objectContaining({ instructions: 'Hello Jan' }),
]),
})
)
})
it('should show a warning toast if trying to create an empty thread', async () => {
@ -212,13 +196,12 @@ describe('useCreateNewThread', () => {
const { result } = renderHook(() => useCreateNewThread())
const mockThread = { id: 'thread1', title: 'Test Thread' }
const mockThread = { id: 'thread1', title: 'Test Thread', assistants: [{}] }
await act(async () => {
await result.current.updateThreadMetadata(mockThread as any)
})
expect(mockUpdateThread).toHaveBeenCalledWith(mockThread)
expect(extensionManager.get).toHaveBeenCalled()
})
})

View File

@ -1,7 +1,6 @@
import { useCallback } from 'react'
import {
Assistant,
ConversationalExtension,
ExtensionTypeEnum,
Thread,
@ -9,24 +8,28 @@ import {
ThreadState,
AssistantTool,
Model,
Assistant,
} from '@janhq/core'
import { atom, useAtomValue, useSetAtom } from 'jotai'
import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
import { useDebouncedCallback } from 'use-debounce'
import { copyOverInstructionEnabledAtom } from '@/containers/CopyInstruction'
import { fileUploadAtom } from '@/containers/Providers/Jotai'
import { toaster } from '@/containers/Toast'
import { generateThreadId } from '@/utils/thread'
import { isLocalEngine } from '@/utils/modelEngine'
import { useActiveModel } from './useActiveModel'
import useRecommendedModel from './useRecommendedModel'
import useRecommendedModel from './useRecommendedModel'
import useSetActiveThread from './useSetActiveThread'
import { extensionManager } from '@/extension'
import { experimentalFeatureEnabledAtom } from '@/helpers/atoms/AppConfig.atom'
import { activeAssistantAtom } from '@/helpers/atoms/Assistant.atom'
import { selectedModelAtom } from '@/helpers/atoms/Model.atom'
import {
threadsAtom,
@ -34,7 +37,6 @@ import {
updateThreadAtom,
setThreadModelParamsAtom,
isGeneratingResponseAtom,
activeThreadAtom,
} from '@/helpers/atoms/Thread.atom'
const createNewThreadAtom = atom(null, (get, set, newThread: Thread) => {
@ -64,25 +66,25 @@ export const useCreateNewThread = () => {
const copyOverInstructionEnabled = useAtomValue(
copyOverInstructionEnabledAtom
)
const activeThread = useAtomValue(activeThreadAtom)
const [activeAssistant, setActiveAssistant] = useAtom(activeAssistantAtom)
const experimentalEnabled = useAtomValue(experimentalFeatureEnabledAtom)
const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom)
const { recommendedModel, downloadedModels } = useRecommendedModel()
const threads = useAtomValue(threadsAtom)
const { stopInference } = useActiveModel()
const { recommendedModel } = useRecommendedModel()
const requestCreateNewThread = async (
assistant: Assistant,
assistant: (ThreadAssistantInfo & { id: string; name: string }) | Assistant,
model?: Model | undefined
) => {
// Stop generating if any
setIsGeneratingResponse(false)
stopInference()
const defaultModel = model ?? recommendedModel ?? downloadedModels[0]
const defaultModel = model || recommendedModel
if (!model) {
// if we have model, which means user wants to create new thread from Model hub. Allow them.
@ -113,18 +115,22 @@ export const useCreateNewThread = () => {
)
const overriddenSettings = {
ctx_len: defaultContextLength,
ctx_len: !isLocalEngine(defaultModel?.engine)
? undefined
: defaultContextLength,
}
// Use ctx length by default
const overriddenParameters = {
max_tokens: defaultContextLength,
max_tokens: !isLocalEngine(defaultModel?.engine)
? (defaultModel?.parameters.token_limit ?? 8192)
: defaultContextLength,
}
const createdAt = Date.now()
let instructions: string | undefined = assistant.instructions
if (copyOverInstructionEnabled) {
instructions = activeThread?.assistants[0]?.instructions ?? undefined
instructions = activeAssistant?.instructions ?? undefined
}
const assistantInfo: ThreadAssistantInfo = {
assistant_id: assistant.id,
@ -139,46 +145,97 @@ export const useCreateNewThread = () => {
instructions,
}
const threadId = generateThreadId(assistant.id)
const thread: Thread = {
id: threadId,
const thread: Partial<Thread> = {
object: 'thread',
title: 'New Thread',
assistants: [assistantInfo],
created: createdAt,
updated: createdAt,
metadata: {
title: 'New Thread',
},
}
// add the new thread on top of the thread list to the state
//TODO: Why do we have thread list then thread states? Should combine them
createNewThread(thread)
try {
const createdThread = await persistNewThread(thread, assistantInfo)
if (!createdThread) throw 'Thread created failed.'
createNewThread(createdThread)
setSelectedModel(defaultModel)
setThreadModelParams(thread.id, {
...defaultModel?.settings,
...defaultModel?.parameters,
...overriddenSettings,
})
setSelectedModel(defaultModel)
setThreadModelParams(createdThread.id, {
...defaultModel?.settings,
...defaultModel?.parameters,
...overriddenSettings,
})
// Delete the file upload state
setFileUpload([])
// Update thread metadata
await updateThreadMetadata(thread)
setActiveThread(thread)
// Delete the file upload state
setFileUpload(undefined)
setActiveThread(createdThread)
} catch (ex) {
return toaster({
title: 'Thread created failed.',
description: `To avoid piling up empty threads, please reuse previous one before creating new.`,
type: 'error',
})
}
}
const updateThreadExtension = (thread: Thread) => {
return extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.modifyThread(thread)
}
const updateAssistantExtension = (
threadId: string,
assistant: ThreadAssistantInfo
) => {
return extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.modifyThreadAssistant(threadId, assistant)
}
const updateThreadCallback = useDebouncedCallback(updateThreadExtension, 300)
const updateAssistantCallback = useDebouncedCallback(
updateAssistantExtension,
300
)
const updateThreadMetadata = useCallback(
async (thread: Thread) => {
updateThread(thread)
await extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.saveThread(thread)
setActiveAssistant(thread.assistants[0])
updateThreadCallback(thread)
updateAssistantCallback(thread.id, thread.assistants[0])
},
[updateThread]
[
updateThread,
setActiveAssistant,
updateThreadCallback,
updateAssistantCallback,
]
)
const persistNewThread = async (
thread: Partial<Thread>,
assistantInfo: ThreadAssistantInfo
): Promise<Thread | undefined> => {
return await extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.createThread(thread)
.then(async (thread) => {
await extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.createThreadAssistant(thread.id, assistantInfo)
.catch(console.error)
return thread
})
.catch(() => undefined)
}
return {
requestCreateNewThread,
updateThreadMetadata,

Some files were not shown because too many files have changed in this diff Show More