diff --git a/extensions/inference-anthropic-extension/README.md b/extensions/inference-anthropic-extension/README.md new file mode 100644 index 000000000..1c0dcbd3d --- /dev/null +++ b/extensions/inference-anthropic-extension/README.md @@ -0,0 +1,79 @@ +# Anthropic Engine Extension + +Created using Jan extension example + +# Create a Jan Extension using Typescript + +Use this template to bootstrap the creation of a TypeScript Jan extension. 🚀 + +## Create Your Own Extension + +To create your own extension, you can use this repository as a template! Just follow the below instructions: + +1. Click the Use this template button at the top of the repository +2. Select Create a new repository +3. Select an owner and name for your new repository +4. Click Create repository +5. Clone your new repository + +## Initial Setup + +After you've cloned the repository to your local machine or codespace, you'll need to perform some initial setup steps before you can develop your extension. + +> [!NOTE] +> +> You'll need to have a reasonably modern version of +> [Node.js](https://nodejs.org) handy. If you are using a version manager like +> [`nodenv`](https://github.com/nodenv/nodenv) or +> [`nvm`](https://github.com/nvm-sh/nvm), you can run `nodenv install` in the +> root of your repository to install the version specified in +> [`package.json`](./package.json). Otherwise, 20.x or later should work! + +1. :hammer_and_wrench: Install the dependencies + + ```bash + npm install + ``` + +1. :building_construction: Package the TypeScript for distribution + + ```bash + npm run bundle + ``` + +1. :white_check_mark: Check your artifact + + There will be a tgz file in your extension directory now + +## Update the Extension Metadata + +The [`package.json`](package.json) file defines metadata about your extension, such as +extension name, main entry, description and version. + +When you copy this repository, update `package.json` with the name, description for your extension. + +## Update the Extension Code + +The [`src/`](./src/) directory is the heart of your extension! This contains the +source code that will be run when your extension functions are invoked. You can replace the +contents of this directory with your own code. + +There are a few things to keep in mind when writing your extension code: + +- Most Jan Extension functions are processed asynchronously. + In `index.ts`, you will see that the extension function will return a `Promise`. + + ```typescript + import { events, MessageEvent, MessageRequest } from '@janhq/core' + + function onStart(): Promise { + return events.on(MessageEvent.OnMessageSent, (data: MessageRequest) => + this.inference(data) + ) + } + ``` + + For more information about the Jan Extension Core module, see the + [documentation](https://github.com/janhq/jan/blob/main/core/README.md). + +So, what are you waiting for? Go ahead and start customizing your extension! diff --git a/extensions/inference-anthropic-extension/package.json b/extensions/inference-anthropic-extension/package.json new file mode 100644 index 000000000..aa3ff8b2a --- /dev/null +++ b/extensions/inference-anthropic-extension/package.json @@ -0,0 +1,43 @@ +{ + "name": "@janhq/inference-anthropic-extension", + "productName": "Anthropic Inference Engine", + "version": "1.0.0", + "description": "This extension enables Anthropic chat completion API calls", + "main": "dist/index.js", + "module": "dist/module.js", + "engine": "anthropic", + "author": "Jan ", + "license": "AGPL-3.0", + "scripts": { + "build": "tsc -b . && webpack --config webpack.config.js", + "build:publish": "rimraf *.tgz --glob && yarn build && npm pack && cpx *.tgz ../../pre-install", + "sync:core": "cd ../.. && yarn build:core && cd extensions && rm yarn.lock && cd inference-anthropic-extension && yarn && yarn build:publish" + }, + "exports": { + ".": "./dist/index.js", + "./main": "./dist/module.js" + }, + "devDependencies": { + "cpx": "^1.5.0", + "rimraf": "^3.0.2", + "webpack": "^5.88.2", + "webpack-cli": "^5.1.4", + "ts-loader": "^9.5.0" + }, + "dependencies": { + "@janhq/core": "file:../../core", + "fetch-retry": "^5.0.6", + "ulidx": "^2.3.0" + }, + "engines": { + "node": ">=18.0.0" + }, + "files": [ + "dist/*", + "package.json", + "README.md" + ], + "bundleDependencies": [ + "fetch-retry" + ] +} diff --git a/extensions/inference-anthropic-extension/resources/models.json b/extensions/inference-anthropic-extension/resources/models.json new file mode 100644 index 000000000..363e0bd38 --- /dev/null +++ b/extensions/inference-anthropic-extension/resources/models.json @@ -0,0 +1,83 @@ +[ + { + "sources": [ + { + "url": "https://www.anthropic.com/" + } + ], + "id": "claude-3-opus-20240229", + "object": "model", + "name": "Claude 3 Opus", + "version": "1.0", + "description": "Claude 3 Opus is a powerful model suitables for highly complex task.", + "format": "api", + "settings": {}, + "parameters": { + "max_tokens": 4096, + "temperature": 0.7, + "stream": false + }, + "metadata": { + "author": "Anthropic", + "tags": [ + "General", + "Big Context Length" + ] + }, + "engine": "anthropic" + }, + { + "sources": [ + { + "url": "https://www.anthropic.com/" + } + ], + "id": "claude-3-sonnet-20240229", + "object": "model", + "name": "Claude 3 Sonnet", + "version": "1.0", + "description": "Claude 3 Sonnet is an ideal model balance of intelligence and speed for enterprise workloads.", + "format": "api", + "settings": {}, + "parameters": { + "max_tokens": 4096, + "temperature": 0.7, + "stream": false + }, + "metadata": { + "author": "Anthropic", + "tags": [ + "General", + "Big Context Length" + ] + }, + "engine": "anthropic" + }, + { + "sources": [ + { + "url": "https://www.anthropic.com/" + } + ], + "id": "claude-3-haiku-20240307", + "object": "model", + "name": "Claude 3 Haiku", + "version": "1.0", + "description": "Claude 3 Haiku is the fastest model provides near-instant responsiveness.", + "format": "api", + "settings": {}, + "parameters": { + "max_tokens": 4096, + "temperature": 0.7, + "stream": false + }, + "metadata": { + "author": "Anthropic", + "tags": [ + "General", + "Big Context Length" + ] + }, + "engine": "anthropic" + } +] \ No newline at end of file diff --git a/extensions/inference-anthropic-extension/resources/settings.json b/extensions/inference-anthropic-extension/resources/settings.json new file mode 100644 index 000000000..bb35e6b3d --- /dev/null +++ b/extensions/inference-anthropic-extension/resources/settings.json @@ -0,0 +1,23 @@ +[ + { + "key": "chat-completions-endpoint", + "title": "Chat Completions Endpoint", + "description": "The endpoint to use for chat completions. See the [Anthropic API documentation](https://docs.anthropic.com/claude/docs/intro-to-claude) for more information.", + "controllerType": "input", + "controllerProps": { + "placeholder": "https://api.anthropic.com/v1/messages", + "value": "https://api.anthropic.com/v1/messages" + } + }, + { + "key": "anthropic-api-key", + "title": "API Key", + "description": "The Anthropic API uses API keys for authentication. Visit your [API Keys](https://console.anthropic.com/settings/keys) page to retrieve the API key you'll use in your requests.", + "controllerType": "input", + "controllerProps": { + "placeholder": "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + "value": "", + "type": "password" + } + } +] \ No newline at end of file diff --git a/extensions/inference-anthropic-extension/src/index.ts b/extensions/inference-anthropic-extension/src/index.ts new file mode 100644 index 000000000..c625d775a --- /dev/null +++ b/extensions/inference-anthropic-extension/src/index.ts @@ -0,0 +1,124 @@ +/** + * @file This file exports a class that implements the InferenceExtension interface from the @janhq/core package. + * The class provides methods for initializing and stopping a model, and for making inference requests. + * It also subscribes to events emitted by the @janhq/core package and handles new message requests. + * @version 1.0.0 + * @module inference-anthropic-extension/src/index + */ + +import { RemoteOAIEngine } from '@janhq/core' +import { PayloadType } from '@janhq/core' +import { ChatCompletionRole } from '@janhq/core' + +declare const SETTINGS: Array +declare const MODELS: Array + +enum Settings { + apiKey = 'anthropic-api-key', + chatCompletionsEndPoint = 'chat-completions-endpoint', +} + +type AnthropicPayloadType = { + model?: string + max_tokens?: number + messages?: Array<{ role: string; content: string }> +} + +/** + * A class that implements the InferenceExtension interface from the @janhq/core package. + * The class provides methods for initializing and stopping a model, and for making inference requests. + * It also subscribes to events emitted by the @janhq/core package and handles new message requests. + */ +export default class JanInferenceAnthropicExtension extends RemoteOAIEngine { + inferenceUrl: string = '' + provider: string = 'anthropic' + maxTokens: number = 4096 + + override async onLoad(): Promise { + super.onLoad() + + // Register Settings + this.registerSettings(SETTINGS) + this.registerModels(MODELS) + + this.apiKey = await this.getSetting(Settings.apiKey, '') + this.inferenceUrl = await this.getSetting( + Settings.chatCompletionsEndPoint, + '' + ) + + if (this.inferenceUrl.length === 0) { + SETTINGS.forEach((setting) => { + if (setting.key === Settings.chatCompletionsEndPoint) { + this.inferenceUrl = setting.controllerProps.value as string + } + }) + } + } + + // Override the headers method to include the x-API-key in the request headers + override async headers(): Promise { + return { + 'Content-Type': 'application/json', + 'x-api-key': this.apiKey, + 'anthropic-version': '2023-06-01', + } + } + + onSettingUpdate(key: string, value: T): void { + if (key === Settings.apiKey) { + this.apiKey = value as string + } else if (key === Settings.chatCompletionsEndPoint) { + if (typeof value !== 'string') return + + if (value.trim().length === 0) { + SETTINGS.forEach((setting) => { + if (setting.key === Settings.chatCompletionsEndPoint) { + this.inferenceUrl = setting.controllerProps.value as string + } + }) + } else { + this.inferenceUrl = value + } + } + } + + // Override the transformPayload method to convert the payload to the required format + transformPayload = (payload: PayloadType): AnthropicPayloadType => { + if (!payload.messages || payload.messages.length === 0) { + return { max_tokens: this.maxTokens, messages: [], model: payload.model } + } + + const convertedData: AnthropicPayloadType = { + max_tokens: this.maxTokens, + messages: [], + model: payload.model, + } + + payload.messages.forEach((item, index) => { + if (item.role === ChatCompletionRole.User) { + convertedData.messages.push({ + role: 'user', + content: item.content as string, + }) + } else if (item.role === ChatCompletionRole.Assistant) { + convertedData.messages.push({ + role: 'assistant', + content: item.content as string, + }) + } + }) + + return convertedData + } + + // Override the transformResponse method to convert the response to the required format + transformResponse = (data: any): string => { + if (data.content && data.content.length > 0 && data.content[0].text) { + return data.content[0].text + } else { + console.error('Invalid response format:', data) + return '' + } + } +} diff --git a/extensions/inference-anthropic-extension/tsconfig.json b/extensions/inference-anthropic-extension/tsconfig.json new file mode 100644 index 000000000..2477d58ce --- /dev/null +++ b/extensions/inference-anthropic-extension/tsconfig.json @@ -0,0 +1,14 @@ +{ + "compilerOptions": { + "target": "es2016", + "module": "ES6", + "moduleResolution": "node", + "outDir": "./dist", + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "strict": false, + "skipLibCheck": true, + "rootDir": "./src" + }, + "include": ["./src"] +} diff --git a/extensions/inference-anthropic-extension/webpack.config.js b/extensions/inference-anthropic-extension/webpack.config.js new file mode 100644 index 000000000..cd5e65c72 --- /dev/null +++ b/extensions/inference-anthropic-extension/webpack.config.js @@ -0,0 +1,37 @@ +const webpack = require('webpack') +const packageJson = require('./package.json') +const settingJson = require('./resources/settings.json') +const modelsJson = require('./resources/models.json') + +module.exports = { + experiments: { outputModule: true }, + entry: './src/index.ts', // Adjust the entry point to match your project's main file + mode: 'production', + module: { + rules: [ + { + test: /\.tsx?$/, + use: 'ts-loader', + exclude: /node_modules/, + }, + ], + }, + plugins: [ + new webpack.DefinePlugin({ + MODELS: JSON.stringify(modelsJson), + SETTINGS: JSON.stringify(settingJson), + ENGINE: JSON.stringify(packageJson.engine), + }), + ], + output: { + filename: 'index.js', // Adjust the output file name as needed + library: { type: 'module' }, // Specify ESM output format + }, + resolve: { + extensions: ['.ts', '.js'], + }, + optimization: { + minimize: false, + }, + // Add loaders and other configuration as needed for your project +} diff --git a/extensions/inference-cohere-extension/src/index.ts b/extensions/inference-cohere-extension/src/index.ts index b986a25eb..24cc5935b 100644 --- a/extensions/inference-cohere-extension/src/index.ts +++ b/extensions/inference-cohere-extension/src/index.ts @@ -3,7 +3,7 @@ * The class provides methods for initializing and stopping a model, and for making inference requests. * It also subscribes to events emitted by the @janhq/core package and handles new message requests. * @version 1.0.0 - * @module inference-openai-extension/src/index + * @module inference-cohere-extension/src/index */ import { RemoteOAIEngine } from '@janhq/core' @@ -82,7 +82,7 @@ export default class JanInferenceCohereExtension extends RemoteOAIEngine { if (payload.messages.length === 0) { return {} } - const convertedData:CoherePayloadType = { + const convertedData: CoherePayloadType = { chat_history: [], message: '', } @@ -93,7 +93,7 @@ export default class JanInferenceCohereExtension extends RemoteOAIEngine { return } if (item.role === ChatCompletionRole.User) { - convertedData.chat_history.push({ role: RoleType.user, message: item.content as string}) + convertedData.chat_history.push({ role: RoleType.user, message: item.content as string }) } else if (item.role === ChatCompletionRole.Assistant) { convertedData.chat_history.push({ role: RoleType.chatbot, diff --git a/extensions/inference-groq-extension/resources/models.json b/extensions/inference-groq-extension/resources/models.json index 32ec60add..81275f47c 100644 --- a/extensions/inference-groq-extension/resources/models.json +++ b/extensions/inference-groq-extension/resources/models.json @@ -23,7 +23,10 @@ }, "metadata": { "author": "Meta", - "tags": ["General", "Big Context Length"] + "tags": [ + "General", + "Big Context Length" + ] }, "engine": "groq" }, @@ -51,7 +54,10 @@ }, "metadata": { "author": "Meta", - "tags": ["General", "Big Context Length"] + "tags": [ + "General", + "Big Context Length" + ] }, "engine": "groq" }, @@ -79,7 +85,9 @@ }, "metadata": { "author": "Google", - "tags": ["General"] + "tags": [ + "General" + ] }, "engine": "groq" }, @@ -107,8 +115,11 @@ }, "metadata": { "author": "Mistral", - "tags": ["General", "Big Context Length"] + "tags": [ + "General", + "Big Context Length" + ] }, "engine": "groq" } -] +] \ No newline at end of file diff --git a/extensions/inference-openai-extension/resources/models.json b/extensions/inference-openai-extension/resources/models.json index d8aa787d9..eb2fc662c 100644 --- a/extensions/inference-openai-extension/resources/models.json +++ b/extensions/inference-openai-extension/resources/models.json @@ -23,7 +23,9 @@ }, "metadata": { "author": "OpenAI", - "tags": ["General"] + "tags": [ + "General" + ] }, "engine": "openai" }, @@ -51,7 +53,10 @@ }, "metadata": { "author": "OpenAI", - "tags": ["General", "Vision"] + "tags": [ + "General", + "Vision" + ] }, "engine": "openai" }, @@ -79,7 +84,9 @@ }, "metadata": { "author": "OpenAI", - "tags": ["General"] + "tags": [ + "General" + ] }, "engine": "openai" }