efactor app directories and enforce ts strict mode (#201)

* refactor: move Electron app to main directory and enforce ts strict mode

* chore: add pre-install plugins

* remove duplicated initModel function

Signed-off-by: James <james@jan.ai>

* chore: correct module path

* fix: dynamic import does not work with ts

* chore: web should be able to run on target host browser

* fix: history panel, should display conversations rather just blank state

* chore: init default model

* chore: pluggin in ts

* fix: pre-pack model management

* fix: compiled core should not include plugins

* chore: refactor - invoke plugin function

* refactor download/delete file

Signed-off-by: James <james@jan.ai>

* update prebuild lib

Signed-off-by: James <james@jan.ai>

* chore: yarn workspace

* chore: update yarn workspace

* chore: yarn workspace with nohoist

* fix: llama-cpp-import

* chore: fix data-plugin wrong module path

* chore: correct build step

* chore: 	- separate inference service (#212)

- remove base-plugin

Signed-off-by: James <james@jan.ai>
Co-authored-by: James <james@jan.ai>

* chore: update core plugins

* chore: hide installation prompt and fix model load - management plugin

* chore: remove legacy files; update readme

* fix: refresh page lost the download state

Signed-off-by: James <james@jan.ai>

* fix: ai prompt not passed to plugin

Signed-off-by: James <james@jan.ai>

* chore: module import fix for production

* chore: auto updater

* chore: package is public

* chore: fix yarn workspace config

* update: model management uses Q4_K_M

* chore: fix yarn scripts for publishing

* chore: app updater - progress update message

* chore: user confirms update action

* adding some state for changing page
store downloaded model to database

Signed-off-by: James <james@jan.ai>

* chore: refactor plugins into yarn workspace - a single command to publish all base plugins

* chore update readme (#218)

Co-authored-by: Hien To <tominhhien97@gmail.com>

* change app name and app icon

Signed-off-by: James <james@jan.ai>

* remove: go-to-nowhere actions

* chore: bundle core plugins from root and scan default plugins

* fix: app crashes on different field name lookup

* chore: css fix

* chore: bind download progress to app ui

* chore: bind active model

* chore: simplify app splash-screen only centered jan icon

* feature: system monitoring plugin (#196)

* feat: Add function for system monitoring

* chore: register plugin functions

* chore: move to corresponding directory

* chore: bind system monitoring data to UI

---------

Co-authored-by: Louis <louis@jan.ai>

* chore: add build:plugins step to README

* chore: model searching and fix model name

* fix: plugin file selected appearance

* fix: create new conversation does not work

* fix: delete conversation not update state - messages still exist

* chore: fix asset path prefix

* Add CICD for macos (#221)

Co-authored-by: Hien To <tominhhien97@gmail.com>

* chore: fix production plugin path

* chore: add shell open url in external browser

---------

Signed-off-by: James <james@jan.ai>
Co-authored-by: James <james@jan.ai>
Co-authored-by: NamH <NamNh0122@gmail.com>
Co-authored-by: 0xSage <n@pragmatic.vc>
Co-authored-by: hiento09 <136591877+hiento09@users.noreply.github.com>
Co-authored-by: Hien To <tominhhien97@gmail.com>
Co-authored-by: namvuong <22463238+vuonghoainam@users.noreply.github.com>
This commit is contained in:
Louis 2023-09-28 18:15:18 +07:00 committed by GitHub
parent fe394c8bac
commit afbb94f083
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
462 changed files with 6080 additions and 8887 deletions

View File

@ -1,53 +0,0 @@
name: Jan CI Production
on:
push:
tags: ['v*.*.*']
env:
REGISTRY: ghcr.io
HASURA_WORKER_IMAGE_NAME: ${{ github.repository }}/worker
WEB_CLIENT_IMAGE_NAME: ${{ github.repository }}/web-client
jobs:
build-docker-image:
runs-on: ubuntu-latest
environment: production
permissions:
contents: read
packages: write
steps:
- name: Getting the repo
uses: actions/checkout@v3
- name: Log in to the Container registry
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Get tag
id: tag
uses: dawidd6/action-get-tag@v1
# Build and docker image for app-backend worker
- name: Build docker image for app-backend worker
run: |
cd ./app-backend/worker
docker build -t ${{ env.REGISTRY }}/${{ env.HASURA_WORKER_IMAGE_NAME }}:${{ steps.tag.outputs.tag }} .
docker push ${{ env.REGISTRY }}/${{ env.HASURA_WORKER_IMAGE_NAME }}:${{ steps.tag.outputs.tag }}
# Get .env for FE
- name: Get .env file for build time
run: cd ./web-client && base64 -d <<< "$ENV_FILE_BASE64" > .env
shell: bash
env:
ENV_FILE_BASE64: ${{ secrets.ENV_FILE_BASE64 }}
# Build and push docker for web client
- name: Build docker image for web-client
run: |
cd ./web-client
docker build -t ${{ env.REGISTRY }}/${{ env.WEB_CLIENT_IMAGE_NAME }}:${{ steps.tag.outputs.tag }} .
docker push ${{ env.REGISTRY }}/${{ env.WEB_CLIENT_IMAGE_NAME }}:${{ steps.tag.outputs.tag }}

View File

@ -1,58 +0,0 @@
name: Jan CI Staging
on:
push:
branches:
- stag
paths:
- 'app-backend/worker/**' # hasura worker source code
- 'web-client/**' # web client source code
env:
REGISTRY: ghcr.io
HASURA_WORKER_IMAGE_NAME: ${{ github.repository }}/worker
WEB_CLIENT_IMAGE_NAME: ${{ github.repository }}/web-client
jobs:
build-docker-image:
runs-on: ubuntu-latest
environment: staging
permissions:
contents: read
packages: write
steps:
- name: Getting the repo
uses: actions/checkout@v3
- name: Log in to the Container registry
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Get current date
id: date
run: echo "::set-output name=date::$(date +'%Y.%m.%d')"
# Build docker image for app-backend worker
- name: Build docker image for app-backend worker
if: ${{ contains(github.event.head_commit.added, 'app-backend/worker/') }}
run: |
cd ./app-backend/worker
docker build -t ${{ env.REGISTRY }}/${{ env.HASURA_WORKER_IMAGE_NAME }}:staging-${{ steps.date.outputs.date }}.${{github.run_number}} .
docker push ${{ env.REGISTRY }}/${{ env.HASURA_WORKER_IMAGE_NAME }}:staging-${{ steps.date.outputs.date }}.${{github.run_number}}
# Get .env for FE
- name: Get .env file for build time
run: cd ./web-client && base64 -d <<< "$ENV_FILE_BASE64" > .env
shell: bash
env:
ENV_FILE_BASE64: ${{ secrets.ENV_FILE_BASE64 }}
# Build and push docker for web client
- name: Build docker image for web-client
if: ${{ contains(github.event.head_commit.added, 'web-client/') }}
run: |
cd ./web-client
docker build -t ${{ env.REGISTRY }}/${{ env.WEB_CLIENT_IMAGE_NAME }}:staging-${{ steps.date.outputs.date }}.${{github.run_number}} .
docker push ${{ env.REGISTRY }}/${{ env.WEB_CLIENT_IMAGE_NAME }}:staging-${{ steps.date.outputs.date }}.${{github.run_number}}

50
.github/workflows/macos-build-app.yml vendored Normal file
View File

@ -0,0 +1,50 @@
name: Jan Build MacOS App
on:
push:
tags: ['v*.*.*']
jobs:
build-macos-app:
runs-on: macos-latest
permissions:
contents: write
steps:
- name: Getting the repo
uses: actions/checkout@v3
- name: Installing node
uses: actions/setup-node@v1
with:
node-version: 20
- name: Install jq
uses: dcarbone/install-jq-action@v2.0.1
- name: Get tag
id: tag
uses: dawidd6/action-get-tag@v1
- name: Update app version base on tag
run: |
if [[ ! "${VERSION_TAG}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "Error: Tag is not valid!"
exit 1
fi
jq --arg version "${VERSION_TAG#v}" '.version = $version' electron/package.json > /tmp/package.json
mv /tmp/package.json electron/package.json
env:
VERSION_TAG: ${{ steps.tag.outputs.tag }}
- name: Install yarn dependencies
run: |
yarn install
yarn build:plugins
- name: Build and publish app
run: |
yarn build:publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}

12
.gitignore vendored
View File

@ -4,9 +4,11 @@
# Jan inference
models/**
error.log
app/electron/core/*/node_modules
app/electron/core/*/dist
app/electron/core/*/package-lock.json
node_modules
package-lock.json
*.tgz
app/yarn.lock
app/dist
yarn.lock
dist
build
.DS_Store
electron/renderer

28
KC.md
View File

@ -1,28 +0,0 @@
# Configuring Keycloak theme
Jan comes with a default theme for Keycloak. Extended from [Keywind](https://github.com/lukin/keywind)
## Select keywind as theme
1. Navigate to http://localhost:8088/admin
2. Sign in with below credentials
```
username: admin
password: admin
```
3. Select `hasura` from the top left dropdown box
![Screenshot 2023-09-06 at 15 10 53](https://github.com/janhq/jan/assets/10397206/5e3cf99b-7cd6-43ff-a003-e66aedd8c850)
4. Select `Realm settings` on left navigation bar and open tab `Themes`
![Screenshot 2023-09-06 at 15 14 05](https://github.com/janhq/jan/assets/10397206/3256b5c4-e3e7-48ef-9c5e-f720b5beeaa8)
5. On `Login theme` open the drop down box and select `keywind`
![Screenshot 2023-09-06 at 15 15 28](https://github.com/janhq/jan/assets/10397206/c52ba743-d978-4963-9311-cf84b4bb5389)
6. Save
**That's it!**
Open your web browser and navigate to `http://localhost:3000` to access Jan web application. Proceed to `Login` on the top right.
You should expect the theme as below. If it's does not, try to clear the cache from your browser.
![Screenshot 2023-09-06 at 15 29 09](https://github.com/janhq/jan/assets/10397206/a80a32e7-633f-4109-90fa-ec223c9d3b17)

283
README.md
View File

@ -20,13 +20,14 @@
> ⚠️ **Jan is currently in Development**: Expect breaking changes and bugs!
Jan lets you run AI on your own hardware, and with 1-click installs for the latest models. Easy-to-use yet powerful, with helpful tools to monitor and manage software-hardware performance.
Jan lets you run AI on your own hardware, with helpful tools to manage models and monitor your hardware performance.
Jan runs on a wide variety of hardware. We run on consumer-grade GPUs and Mac Minis, as well as datacenter-grade DGX H100 clusters.
In the background, Jan runs [Nitro](https://nitro.jan.ai), a C++ inference engine. It runs various model formats (GGUF/TensorRT) on various hardware (Mac M1/M2/Intel, Windows, Linux, and datacenter-grade Nvidia GPUs) with optional GPU acceleration.
Jan can be run as a server or cloud-native application for enterprise. We offer enterprise plugins for LDAP integration and Audit Logs. Contact us at [hello@jan.ai](mailto:hello@jan.ai) for more details.
> See the Nitro codebase at https://nitro.jan.ai.
Jan is free, [open core](https://en.wikipedia.org/wiki/Open-core_model), and Sustainable Use Licensed.
<!-- TODO: uncomment this later when we have this feature -->
<!-- Jan can be run as a server or cloud-native application for enterprise. We offer enterprise plugins for LDAP integration and Audit Logs. Contact us at [hello@jan.ai](mailto:hello@jan.ai) for more details. -->
## Demo
@ -34,208 +35,122 @@ Jan is free, [open core](https://en.wikipedia.org/wiki/Open-core_model), and Sus
<img style='border:1px solid #000000' src="https://github.com/janhq/jan/assets/69952136/1f9bb48c-2e70-4633-9f68-7881cd925972" alt="Jan Web GIF">
</p>
## Features
## Quicklinks
**Self-Hosted AI**
- [x] Self-hosted Llama2 and LLMs
- [ ] Self-hosted StableDiffusion and Controlnet
- [ ] 1-click installs for Models (coming soon)
- Developer documentation: https://jan.ai/docs (Work in Progress)
- Desktop app: Download at https://jan.ai/
- Mobile app shell: Download via [App Store](https://apps.apple.com/us/app/jan-on-device-ai-cloud-ais/id6449664703) | [Android](https://play.google.com/store/apps/details?id=com.jan.ai)
- Nitro (C++ AI Engine): https://nitro.jan.ai
**3rd-party AIs**
- [ ] Connect to ChatGPT, Claude via API Key (coming soon)
- [ ] Security policy engine for 3rd-party AIs (coming soon)
- [ ] Pre-flight PII and Sensitive Data checks (coming soon)
## Plugins
**Multi-Device**
- [x] Web App
- [ ] Jan Mobile support for custom Jan server (in progress)
- [ ] Cloud deployments (coming soon)
Jan supports core & 3rd party extensions:
**Organization Tools**
- [x] Multi-user support
- [ ] Audit and Usage logs (coming soon)
- [ ] Compliance and Audit policy (coming soon)
- [x] **LLM chat**: Self-hosted Llama2 and LLMs
- [x] **Model Manager**: 1-click to install, swap, and delete models
- [x] **Storage**: Optionally store your conversation history and other data in SQLite/your storage of choice
- [ ] **3rd-party AIs**: Connect to ChatGPT, Claude via API Key (in progress)
- [ ] **Cross device support**: Mobile & Web support for custom shared servers (in progress)
- [ ] **File retrieval**: User can upload private and run a vectorDB (planned)
- [ ] **Multi-user support**: Share a single server across a team/friends (planned)
- [ ] **Compliance**: Auditing and flagging features (planned)
**Hardware Support**
## Hardware Support
- [x] Nvidia GPUs
- [x] Apple Silicon (in progress)
- [x] CPU support via llama.cpp
- [ ] Nvidia GPUs using TensorRT (in progress)
Nitro provides both CPU and GPU support, via [llama.cpp](https://github.com/ggerganov/llama.cpp) and [TensorRT](https://github.com/NVIDIA/TensorRT), respectively.
## Documentation
- [x] Nvidia GPUs (accelerated)
- [x] Apple M-series (accelerated)
- [x] Linux DEB
- [x] Windows x64
👋 https://docs.jan.ai (Work in Progress)
Not supported yet: Apple Intel, Linux RPM, Windows x86|ARM64, AMD ROCm
## Installation
> See [developer docs](https://docs.jan.ai/docs/) for detailed installation instructions.
> ⚠️ **Jan is currently in Development**: Expect breaking changes and bugs!
## Installation and Usage
### Step 1: Install Docker
### Pre-requisites
- node >= 20.0.0
- yarn >= 1.22.0
Jan is currently packaged as a Docker Compose application.
### Use as complete suite (in progress)
### For interactive development
- Docker ([Installation Instructions](https://docs.docker.com/get-docker/))
- Docker Compose ([Installation Instructions](https://docs.docker.com/compose/install/))
Note: This instruction is tested on MacOS only.
### Step 2: Clone Repo
1. **Clone the Repository:**
```bash
git clone https://github.com/janhq/jan.git
cd jan
```
git clone https://github.com/janhq/jan
git checkout feature/hackathon-refactor-jan-into-electron-app
cd jan
```
2. **Install dependencies:**
```
yarn install
# Packing base plugins
yarn build:plugins
```
4. **Run development and Using Jan Desktop**
```
yarn dev
```
This will start the development server and open the desktop app.
In this step, there are a few notification about installing base plugin, just click `OK` and `Next` to continue.
### For production build
```bash
# Do step 1 and 2 in previous section
git clone https://github.com/janhq/jan
cd jan
yarn install
yarn build:plugins
# Build the app
yarn build
```
This will build the app MacOS m1/m2 for production (with code signing already done) and put the result in `dist` folder.
## Contributing
Contributions are welcome! Please read the [CONTRIBUTING.md](CONTRIBUTING.md) file
```sh
# From the root, run:
yarn install
yarn build:plugins
yarn dev
```
### Step 3: Configure `.env`
> See [developer docs]() for guidelines on how to contribute to this project.
We provide a sample `.env` file that you can use to get started.
## License
```shell
cp sample.env .env
```
Jan is free, [open core](https://en.wikipedia.org/wiki/Open-core_model), and Sustainable Use Licensed.
You will need to set the following `.env` variables
## Acknowledgements
```shell
# TODO: Document .env variables
```
### Step 4: Install Models
> Note: These step will change soon as we will be switching to [Nitro](https://github.com/janhq/nitro), an Accelerated Inference Server written in C++
#### Step 4.1: Install Mamba
> For complete Mambaforge installation instructions, see [miniforge repo](https://github.com/conda-forge/miniforge)
Install Mamba to handle native python binding (which can yield better performance on Mac M/ NVIDIA)
```bash
curl -L -O "https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-$(uname)-$(uname -m).sh"
bash Mambaforge-$(uname)-$(uname -m).sh
rm Mambaforge-$(uname)-$(uname -m).sh
# Create environment
conda create -n jan python=3.9.16
conda activate jan
```
Uninstall any previous versions of `llama-cpp-python`
```bash
pip uninstall llama-cpp-python -y
```
#### Step 4.2: Install `llama-cpp-python`
> Note: This step will change soon once [Nitro](https://github.com/janhq/nitro) (our accelerated inference server written in C++) is released
- On Mac
```bash
# See https://github.com/abetlen/llama-cpp-python/blob/main/docs/install/macos.md
CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install -U llama-cpp-python --no-cache-dir
pip install 'llama-cpp-python[server]'
```
- On Linux with NVIDIA GPU Hardware Acceleration
```bash
# See https://github.com/abetlen/llama-cpp-python#installation-with-hardware-acceleration
CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python
pip install 'llama-cpp-python[server]'
```
- On Linux with Intel/ AMD CPU (support for AVX-2/ AVX-512)
```bash
CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" FORCE_CMAKE=1 pip install llama-cpp-python
pip install 'llama-cpp-python[server]'
```
We recommend that Llama2-7B (4-bit quantized) as a basic model to get started.
You will need to download the models to the `models` folder at root level.
```shell
# Downloads model (~4gb)
# Download time depends on your internet connection and HuggingFace's bandwidth
# In this part, please head over to any source contains `.gguf` format model - https://huggingface.co/models?search=gguf
wget https://huggingface.co/TheBloke/Llama-2-7B-GGUF/resolve/main/llama-2-7b.Q4_0.gguf -P models
```
- Run the model in host machine
```bash
# Please change the value of --model key as your corresponding model path
# The --n_gpu_layers 1 means using acclerator (can be Metal on Mac, NVIDIA GPU on on linux with NVIDIA GPU)
# This service will run at `http://localhost:8000` in host level
# The backend service inside docker compose will connect to this service by using `http://host.docker.internal:8000`
python3 -m llama_cpp.server --model models/llama-2-7b.Q4_0.gguf --n_gpu_layers 1
```
### Step 5: `docker compose up`
Jan utilizes Docker Compose to run all services:
```shell
docker compose up -d # Detached mode
```
The table below summarizes the services and their respective URLs and credentials.
| Service | Container Name | URL and Port | Credentials |
| ------------------------------------------------ | -------------------- | --------------------- | ---------------------------------------------------------------------------------- |
| Jan Web | jan-web-* | http://localhost:3000 | Set in `conf/keycloak_conf/example-realm.json` <br />- Default Username / Password |
| [Hasura](https://hasura.io) (Backend) | jan-graphql-engine-* | http://localhost:8080 | Set in `conf/sample.env_app-backend` <br /> - `HASURA_GRAPHQL_ADMIN_SECRET` |
| [Keycloak](https://www.keycloak.org/) (Identity) | jan-keycloak-* | http://localhost:8088 | Set in `.env` <br />- `KEYCLOAK_ADMIN` <br />- `KEYCLOAK_ADMIN_PASSWORD` | |
| PostgresDB | jan-postgres-* | http://localhost:5432 | Set in `.env` |
### Step 6: Configure Keycloak
- [ ] Refactor [Keycloak Instructions](KC.md) into main README.md
- [ ] Changing login theme
### Step 7: Use Jan
- Launch the web application via `http://localhost:3000`.
- Login with default user (username: `username`, password: `password`)
### Step 8: Deploying to Production
- [ ] TODO
## About Jan
Jan is a commercial company with a [Fair Code](https://faircode.io/) business model. This means that while we are open-source and can used for free, we require commercial licenses for specific use cases (e.g. hosting Jan as a service).
We are a team of engineers passionate about AI, productivity and the future of work. We are funded through consulting contracts and enterprise licenses. Feel free to reach out to us!
### Repo Structure
Jan comprises of several repositories:
| Repo | Purpose |
| ------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| [Jan](https://github.com/janhq/jan) | AI Platform to run AI in the enterprise. Easy-to-use for users, and packed with useful organizational and compliance features. |
| [Jan Mobile](https://github.com/janhq/jan-react-native) | Mobile App that can be pointed to a custom Jan server. |
| [Nitro](https://github.com/janhq/nitro) | Inference Engine that runs AI on different types of hardware. Offers popular API formats (e.g. OpenAI, Clipdrop). Written in C++ for blazing fast performance |
### Architecture
Jan builds on top of several open-source projects:
Jan builds on top of other open-source projects:
- [llama.cpp](https://github.com/ggerganov/llama.cpp)
- [TensorRT](https://github.com/NVIDIA/TensorRT)
- [Keycloak Community](https://github.com/keycloak/keycloak) (Apache-2.0)
- [Hasura Community Edition](https://github.com/hasura/graphql-engine) (Apache-2.0)
We may re-evaluate this in the future, given different customer requirements.
## Contact
### Contributing
Contributions are welcome! Please read the [CONTRIBUTING.md](CONTRIBUTING.md) file for guidelines on how to contribute to this project.
Please note that Jan intends to build a sustainable business that can provide high quality jobs to its contributors. If you are excited about our mission and vision, please contact us to explore opportunities.
### Contact
- For support: please file a Github ticket
- For questions: join our Discord [here](https://discord.gg/FTk2MvZwJH)
- For long form inquiries: please email hello@jan.ai
- Bugs & requests: file a Github ticket
- For discussion: join our Discord [here](https://discord.gg/FTk2MvZwJH)
- For business inquiries: email hello@jan.ai
- For jobs: please email hr@jan.ai

View File

@ -1,4 +0,0 @@
.DS_Store
.env
.env_postgresql
worker/node_modules/.mf

View File

@ -1,59 +0,0 @@
## Jan Backend
A Hasura Data API Platform designed to provide APIs for client interaction with the Language Model (LLM) through chat or the generation of art using Stable Diffusion. It is encapsulated within a Docker container for easy local deployment
## Quickstart
1. Run docker up
```bash
docker compose up
```
2. Install [HasuraCLI](https://hasura.io/docs/latest/hasura-cli/overview/)
3. Open Hasura Console
```bash
cd hasura && hasura console
```
4. Apply Migration
```bash
hasura migrate apply
```
5. Apply Metadata
```bash
hasura metadata apply
```
6. Apply seeds
```bash
hasura seed apply
```
## Hasura One Click Deploy
Use this URL to deploy this app to Hasura Cloud
[![Hasura Deploy](https://hasura.io/deploy-button.svg)](https://cloud.hasura.io/deploy?github_repo=https://github.com/janhq/app-backend/&hasura_dir=/hasura)
[One-click deploy docs](https://hasura.io/docs/latest/getting-started/getting-started-cloud/)
## Modify schema & model
[Hasura Tutorials](https://hasura.io/docs/latest/resources/tutorials/index/)
## Events & Workers
Serverless function (Cloudflare worker) to stream llm message & update
Readmore about Hasura Events here:
> https://hasura.io/docs/latest/event-triggers/serverless/
## Deploy Worker
```bash
npx wrangler deploy
```
[Cloudflare Worker Guide](https://developers.cloudflare.com/workers/get-started/guide/)

View File

@ -1,52 +0,0 @@
version: "3.6"
services:
postgres:
image: postgres:13
restart: always
volumes:
- db_data:/var/lib/postgresql/data
env_file:
- .env_postgresql
graphql-engine:
image: hasura/graphql-engine:v2.31.0.cli-migrations-v3
ports:
- "8080:8080"
restart: always
env_file:
- .env
volumes:
- ./hasura/migrations:/migrations
- ./hasura/metadata:/metadata
depends_on:
data-connector-agent:
condition: service_healthy
data-connector-agent:
image: hasura/graphql-data-connector:v2.31.0
restart: always
ports:
- 8081:8081
environment:
QUARKUS_LOG_LEVEL: ERROR # FATAL, ERROR, WARN, INFO, DEBUG, TRACE
## https://quarkus.io/guides/opentelemetry#configuration-reference
QUARKUS_OPENTELEMETRY_ENABLED: "false"
## QUARKUS_OPENTELEMETRY_TRACER_EXPORTER_OTLP_ENDPOINT: http://jaeger:4317
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8081/api/v1/athena/health"]
interval: 5s
timeout: 10s
retries: 5
start_period: 5s
worker:
build:
context: ./worker
dockerfile: ./Dockerfile
restart: always
environment:
- "NODE_ENV=development"
volumes:
- ./worker:/worker
ports:
- "8787:8787"
volumes:
db_data:

View File

@ -1,7 +0,0 @@
version: 3
endpoint: http://localhost:8080
admin_secret: myadminsecretkey
metadata_directory: metadata
actions:
kind: synchronous
handler_webhook_baseurl: http://localhost:3000

View File

@ -1,20 +0,0 @@
type Mutation {
imageGeneration(
input: ImageGenerationInput!
): ImageGenerationOutput
}
input ImageGenerationInput {
prompt: String!
neg_prompt: String!
model: String!
seed: Int!
steps: Int!
width: Int!
height: Int!
}
type ImageGenerationOutput {
url: String!
}

View File

@ -1,33 +0,0 @@
actions:
- name: imageGeneration
definition:
kind: synchronous
handler: '{{HASURA_ACTION_STABLE_DIFFUSION_URL}}'
timeout: 1800
request_transform:
body:
action: transform
template: |-
{
"prompt": {{$body.input.input.prompt}},
"neg_prompt": {{$body.input.input.neg_prompt}},
"unet_model": {{$body.input.input.model}},
"seed": {{$body.input.input.seed}},
"steps": {{$body.input.input.steps}},
"width": {{$body.input.input.width}},
"height": {{$body.input.input.height}}
}
method: POST
query_params: {}
template_engine: Kriti
url: '{{$base_url}}/inferences/txt2img'
version: 2
permissions:
- role: user
custom_types:
enums: []
input_objects:
- name: ImageGenerationInput
objects:
- name: ImageGenerationOutput
scalars: []

View File

@ -1 +0,0 @@
[]

View File

@ -1 +0,0 @@
{}

View File

@ -1,11 +0,0 @@
dataconnector:
athena:
uri: http://data-connector-agent:8081/api/v1/athena
mariadb:
uri: http://data-connector-agent:8081/api/v1/mariadb
mysql8:
uri: http://data-connector-agent:8081/api/v1/mysql
oracle:
uri: http://data-connector-agent:8081/api/v1/oracle
snowflake:
uri: http://data-connector-agent:8081/api/v1/snowflake

View File

@ -1 +0,0 @@
[]

View File

@ -1,9 +0,0 @@
- name: jandb
kind: postgres
configuration:
connection_info:
database_url:
from_env: PG_DATABASE_URL
isolation_level: read-committed
use_prepared_statements: false
tables: "!include jandb/tables/tables.yaml"

View File

@ -1,43 +0,0 @@
table:
name: collection_products
schema: public
array_relationships:
- name: collections
using:
manual_configuration:
column_mapping:
collection_id: id
insertion_order: null
remote_table:
name: collections
schema: public
- name: products
using:
manual_configuration:
column_mapping:
product_id: id
insertion_order: null
remote_table:
name: products
schema: public
select_permissions:
- role: public
permission:
columns:
- created_at
- updated_at
- collection_id
- id
- product_id
filter: {}
comment: ""
- role: user
permission:
columns:
- created_at
- updated_at
- collection_id
- id
- product_id
filter: {}
comment: ""

View File

@ -1,36 +0,0 @@
table:
name: collections
schema: public
array_relationships:
- name: collection_products
using:
manual_configuration:
column_mapping:
id: collection_id
insertion_order: null
remote_table:
name: collection_products
schema: public
select_permissions:
- role: public
permission:
columns:
- slug
- description
- name
- created_at
- updated_at
- id
filter: {}
comment: ""
- role: user
permission:
columns:
- slug
- description
- name
- created_at
- updated_at
- id
filter: {}
comment: ""

View File

@ -1,68 +0,0 @@
table:
name: conversations
schema: public
object_relationships:
- name: conversation_product
using:
manual_configuration:
column_mapping:
product_id: id
insertion_order: null
remote_table:
name: products
schema: public
array_relationships:
- name: conversation_messages
using:
manual_configuration:
column_mapping:
id: conversation_id
insertion_order: null
remote_table:
name: messages
schema: public
insert_permissions:
- role: user
permission:
check:
user_id:
_eq: X-Hasura-User-Id
columns:
- last_image_url
- last_text_message
- product_id
- user_id
comment: ""
select_permissions:
- role: user
permission:
columns:
- last_image_url
- last_text_message
- user_id
- created_at
- updated_at
- id
- product_id
filter:
user_id:
_eq: X-Hasura-User-Id
comment: ""
update_permissions:
- role: user
permission:
columns:
- last_image_url
- last_text_message
filter:
user_id:
_eq: X-Hasura-User-Id
check: null
comment: ""
delete_permissions:
- role: user
permission:
filter:
user_id:
_eq: X-Hasura-User-Id
comment: ""

View File

@ -1,68 +0,0 @@
table:
name: message_medias
schema: public
object_relationships:
- name: media_message
using:
manual_configuration:
column_mapping:
message_id: id
insertion_order: null
remote_table:
name: messages
schema: public
insert_permissions:
- role: user
permission:
check:
media_message:
message_conversation:
user_id:
_eq: X-Hasura-User-Id
columns:
- media_url
- message_id
- mime_type
comment: ""
select_permissions:
- role: user
permission:
columns:
- mime_type
- media_url
- created_at
- updated_at
- id
- message_id
filter:
media_message:
message_conversation:
user_id:
_eq: X-Hasura-User-Id
comment: ""
update_permissions:
- role: user
permission:
columns:
- mime_type
- media_url
- created_at
- updated_at
- id
- message_id
filter:
media_message:
message_conversation:
user_id:
_eq: X-Hasura-User-Id
check: null
comment: ""
delete_permissions:
- role: user
permission:
filter:
media_message:
message_conversation:
user_id:
_eq: X-Hasura-User-Id
comment: ""

View File

@ -1,86 +0,0 @@
table:
name: messages
schema: public
object_relationships:
- name: message_conversation
using:
manual_configuration:
column_mapping:
conversation_id: id
insertion_order: null
remote_table:
name: conversations
schema: public
array_relationships:
- name: message_medias
using:
manual_configuration:
column_mapping:
id: message_id
insertion_order: null
remote_table:
name: message_medias
schema: public
insert_permissions:
- role: user
permission:
check:
message_conversation:
user_id:
_eq: X-Hasura-User-Id
columns:
- content
- conversation_id
- message_sender_type
- message_type
- prompt_cache
- sender
- sender_avatar_url
- sender_name
- status
comment: ""
select_permissions:
- role: user
permission:
columns:
- content
- conversation_id
- created_at
- id
- message_sender_type
- message_type
- sender
- sender_avatar_url
- sender_name
- status
- updated_at
filter:
message_conversation:
user_id:
_eq: X-Hasura-User-Id
comment: ""
update_permissions:
- role: user
permission:
columns:
- content
- message_sender_type
- message_type
- sender
- sender_avatar_url
- sender_name
- status
filter:
message_conversation:
user_id:
_eq: X-Hasura-User-Id
check: null
comment: ""
delete_permissions:
- role: user
permission:
filter:
message_conversation:
user_id:
_eq: X-Hasura-User-Id
comment: ""

View File

@ -1,43 +0,0 @@
table:
name: product_prompts
schema: public
array_relationships:
- name: products
using:
manual_configuration:
column_mapping:
product_id: id
insertion_order: null
remote_table:
name: products
schema: public
- name: prompts
using:
manual_configuration:
column_mapping:
prompt_id: id
insertion_order: null
remote_table:
name: prompts
schema: public
select_permissions:
- role: public
permission:
columns:
- created_at
- updated_at
- id
- product_id
- prompt_id
filter: {}
comment: ""
- role: user
permission:
columns:
- created_at
- updated_at
- id
- product_id
- prompt_id
filter: {}
comment: ""

View File

@ -1,65 +0,0 @@
table:
name: products
schema: public
array_relationships:
- name: product_collections
using:
manual_configuration:
column_mapping:
id: product_id
insertion_order: null
remote_table:
name: collection_products
schema: public
- name: product_prompts
using:
manual_configuration:
column_mapping:
id: product_id
insertion_order: null
remote_table:
name: product_prompts
schema: public
select_permissions:
- role: public
permission:
columns:
- nsfw
- slug
- inputs
- outputs
- author
- description
- greeting
- image_url
- long_description
- name
- source_url
- technical_description
- version
- created_at
- updated_at
- id
filter: {}
comment: ""
- role: user
permission:
columns:
- nsfw
- slug
- inputs
- outputs
- author
- description
- greeting
- image_url
- long_description
- name
- source_url
- technical_description
- version
- created_at
- updated_at
- id
filter: {}
comment: ""

View File

@ -1,36 +0,0 @@
table:
name: prompts
schema: public
array_relationships:
- name: prompt_products
using:
manual_configuration:
column_mapping:
id: prompt_id
insertion_order: null
remote_table:
name: product_prompts
schema: public
select_permissions:
- role: public
permission:
columns:
- slug
- content
- image_url
- created_at
- updated_at
- id
filter: {}
comment: ""
- role: user
permission:
columns:
- slug
- content
- image_url
- created_at
- updated_at
- id
filter: {}
comment: ""

View File

@ -1,8 +0,0 @@
- "!include public_collection_products.yaml"
- "!include public_collections.yaml"
- "!include public_conversations.yaml"
- "!include public_message_medias.yaml"
- "!include public_messages.yaml"
- "!include public_product_prompts.yaml"
- "!include public_products.yaml"
- "!include public_prompts.yaml"

View File

@ -1 +0,0 @@
disabled_for_roles: []

View File

@ -1 +0,0 @@
{}

View File

@ -1 +0,0 @@
{}

View File

@ -1 +0,0 @@
version: 3

View File

@ -1 +0,0 @@
DROP TABLE "public"."collections";

View File

@ -1,18 +0,0 @@
CREATE TABLE "public"."collections" ("id" uuid NOT NULL DEFAULT gen_random_uuid(), "slug" varchar NOT NULL, "name" text NOT NULL, "description" text NOT NULL, "created_at" timestamptz NOT NULL DEFAULT now(), "updated_at" timestamptz NOT NULL DEFAULT now(), PRIMARY KEY ("id") , UNIQUE ("slug"));
CREATE OR REPLACE FUNCTION "public"."set_current_timestamp_updated_at"()
RETURNS TRIGGER AS $$
DECLARE
_new record;
BEGIN
_new := NEW;
_new."updated_at" = NOW();
RETURN _new;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER "set_public_collections_updated_at"
BEFORE UPDATE ON "public"."collections"
FOR EACH ROW
EXECUTE PROCEDURE "public"."set_current_timestamp_updated_at"();
COMMENT ON TRIGGER "set_public_collections_updated_at" ON "public"."collections"
IS 'trigger to set value of column "updated_at" to current timestamp on row update';
CREATE EXTENSION IF NOT EXISTS pgcrypto;

View File

@ -1 +0,0 @@
DROP TABLE "public"."products";

View File

@ -1,18 +0,0 @@
CREATE TABLE "public"."products" ("id" uuid NOT NULL DEFAULT gen_random_uuid(), "slug" varchar NOT NULL, "name" text NOT NULL, "description" text, "image_url" text, "long_description" text, "technical_description" text, "author" text, "version" text, "source_url" text, "nsfw" boolean NOT NULL DEFAULT true, "greeting" text, "inputs" jsonb, "outputs" jsonb, "created_at" timestamptz NOT NULL DEFAULT now(), "updated_at" timestamptz NOT NULL DEFAULT now(), PRIMARY KEY ("id") , UNIQUE ("slug"));
CREATE OR REPLACE FUNCTION "public"."set_current_timestamp_updated_at"()
RETURNS TRIGGER AS $$
DECLARE
_new record;
BEGIN
_new := NEW;
_new."updated_at" = NOW();
RETURN _new;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER "set_public_products_updated_at"
BEFORE UPDATE ON "public"."products"
FOR EACH ROW
EXECUTE PROCEDURE "public"."set_current_timestamp_updated_at"();
COMMENT ON TRIGGER "set_public_products_updated_at" ON "public"."products"
IS 'trigger to set value of column "updated_at" to current timestamp on row update';
CREATE EXTENSION IF NOT EXISTS pgcrypto;

View File

@ -1 +0,0 @@
DROP TABLE "public"."prompts";

View File

@ -1,18 +0,0 @@
CREATE TABLE "public"."prompts" ("id" uuid NOT NULL DEFAULT gen_random_uuid(), "slug" varchar NOT NULL, "content" text, "image_url" text, "created_at" timestamptz NOT NULL DEFAULT now(), "updated_at" timestamptz NOT NULL DEFAULT now(), PRIMARY KEY ("id") , UNIQUE ("slug"));
CREATE OR REPLACE FUNCTION "public"."set_current_timestamp_updated_at"()
RETURNS TRIGGER AS $$
DECLARE
_new record;
BEGIN
_new := NEW;
_new."updated_at" = NOW();
RETURN _new;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER "set_public_prompts_updated_at"
BEFORE UPDATE ON "public"."prompts"
FOR EACH ROW
EXECUTE PROCEDURE "public"."set_current_timestamp_updated_at"();
COMMENT ON TRIGGER "set_public_prompts_updated_at" ON "public"."prompts"
IS 'trigger to set value of column "updated_at" to current timestamp on row update';
CREATE EXTENSION IF NOT EXISTS pgcrypto;

View File

@ -1 +0,0 @@
DROP TABLE "public"."conversations";

View File

@ -1,18 +0,0 @@
CREATE TABLE "public"."conversations" ("id" uuid NOT NULL DEFAULT gen_random_uuid(), "product_id" uuid NOT NULL, "user_id" Text NOT NULL, "last_image_url" text, "last_text_message" text, "created_at" timestamptz NOT NULL DEFAULT now(), "updated_at" timestamptz NOT NULL DEFAULT now(), PRIMARY KEY ("id") );
CREATE OR REPLACE FUNCTION "public"."set_current_timestamp_updated_at"()
RETURNS TRIGGER AS $$
DECLARE
_new record;
BEGIN
_new := NEW;
_new."updated_at" = NOW();
RETURN _new;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER "set_public_conversations_updated_at"
BEFORE UPDATE ON "public"."conversations"
FOR EACH ROW
EXECUTE PROCEDURE "public"."set_current_timestamp_updated_at"();
COMMENT ON TRIGGER "set_public_conversations_updated_at" ON "public"."conversations"
IS 'trigger to set value of column "updated_at" to current timestamp on row update';
CREATE EXTENSION IF NOT EXISTS pgcrypto;

View File

@ -1 +0,0 @@
DROP TABLE "public"."messages";

View File

@ -1,18 +0,0 @@
CREATE TABLE "public"."messages" ("id" uuid NOT NULL DEFAULT gen_random_uuid(), "conversation_id" uuid NOT NULL, "message_type" varchar, "message_sender_type" varchar, "sender" text NOT NULL, "sender_name" text, "sender_avatar_url" text, "content" text, "created_at" timestamptz NOT NULL DEFAULT now(), "updated_at" timestamptz NOT NULL DEFAULT now(), PRIMARY KEY ("id") );
CREATE OR REPLACE FUNCTION "public"."set_current_timestamp_updated_at"()
RETURNS TRIGGER AS $$
DECLARE
_new record;
BEGIN
_new := NEW;
_new."updated_at" = NOW();
RETURN _new;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER "set_public_messages_updated_at"
BEFORE UPDATE ON "public"."messages"
FOR EACH ROW
EXECUTE PROCEDURE "public"."set_current_timestamp_updated_at"();
COMMENT ON TRIGGER "set_public_messages_updated_at" ON "public"."messages"
IS 'trigger to set value of column "updated_at" to current timestamp on row update';
CREATE EXTENSION IF NOT EXISTS pgcrypto;

View File

@ -1 +0,0 @@
DROP TABLE "public"."message_medias";

View File

@ -1,18 +0,0 @@
CREATE TABLE "public"."message_medias" ("id" uuid NOT NULL DEFAULT gen_random_uuid(), "message_id" uuid NOT NULL, "media_url" text, "created_at" timestamptz NOT NULL DEFAULT now(), "updated_at" timestamptz NOT NULL DEFAULT now(), "mime_type" varchar, PRIMARY KEY ("id") );
CREATE OR REPLACE FUNCTION "public"."set_current_timestamp_updated_at"()
RETURNS TRIGGER AS $$
DECLARE
_new record;
BEGIN
_new := NEW;
_new."updated_at" = NOW();
RETURN _new;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER "set_public_message_medias_updated_at"
BEFORE UPDATE ON "public"."message_medias"
FOR EACH ROW
EXECUTE PROCEDURE "public"."set_current_timestamp_updated_at"();
COMMENT ON TRIGGER "set_public_message_medias_updated_at" ON "public"."message_medias"
IS 'trigger to set value of column "updated_at" to current timestamp on row update';
CREATE EXTENSION IF NOT EXISTS pgcrypto;

View File

@ -1 +0,0 @@
DROP TABLE "public"."collection_products";

View File

@ -1,18 +0,0 @@
CREATE TABLE "public"."collection_products" ("id" uuid NOT NULL DEFAULT gen_random_uuid(), "collection_id" uuid NOT NULL, "product_id" uuid NOT NULL, "created_at" timestamptz NOT NULL DEFAULT now(), "updated_at" timestamptz NOT NULL DEFAULT now(), PRIMARY KEY ("id") , UNIQUE ("collection_id", "product_id"));
CREATE OR REPLACE FUNCTION "public"."set_current_timestamp_updated_at"()
RETURNS TRIGGER AS $$
DECLARE
_new record;
BEGIN
_new := NEW;
_new."updated_at" = NOW();
RETURN _new;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER "set_public_collection_products_updated_at"
BEFORE UPDATE ON "public"."collection_products"
FOR EACH ROW
EXECUTE PROCEDURE "public"."set_current_timestamp_updated_at"();
COMMENT ON TRIGGER "set_public_collection_products_updated_at" ON "public"."collection_products"
IS 'trigger to set value of column "updated_at" to current timestamp on row update';
CREATE EXTENSION IF NOT EXISTS pgcrypto;

View File

@ -1 +0,0 @@
DROP TABLE "public"."product_prompts";

View File

@ -1,18 +0,0 @@
CREATE TABLE "public"."product_prompts" ("id" uuid NOT NULL DEFAULT gen_random_uuid(), "product_id" uuid NOT NULL, "prompt_id" uuid NOT NULL, "created_at" timestamptz NOT NULL DEFAULT now(), "updated_at" timestamptz NOT NULL DEFAULT now(), PRIMARY KEY ("id") , UNIQUE ("product_id", "prompt_id"));
CREATE OR REPLACE FUNCTION "public"."set_current_timestamp_updated_at"()
RETURNS TRIGGER AS $$
DECLARE
_new record;
BEGIN
_new := NEW;
_new."updated_at" = NOW();
RETURN _new;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER "set_public_product_prompts_updated_at"
BEFORE UPDATE ON "public"."product_prompts"
FOR EACH ROW
EXECUTE PROCEDURE "public"."set_current_timestamp_updated_at"();
COMMENT ON TRIGGER "set_public_product_prompts_updated_at" ON "public"."product_prompts"
IS 'trigger to set value of column "updated_at" to current timestamp on row update';
CREATE EXTENSION IF NOT EXISTS pgcrypto;

View File

@ -1 +0,0 @@
alter table "public"."collection_products" drop constraint "collection_products_collection_id_fkey";

View File

@ -1,5 +0,0 @@
alter table "public"."collection_products"
add constraint "collection_products_collection_id_fkey"
foreign key ("collection_id")
references "public"."collections"
("id") on update cascade on delete cascade;

View File

@ -1 +0,0 @@
alter table "public"."collection_products" drop constraint "collection_products_product_id_fkey";

View File

@ -1,5 +0,0 @@
alter table "public"."collection_products"
add constraint "collection_products_product_id_fkey"
foreign key ("product_id")
references "public"."products"
("id") on update cascade on delete cascade;

View File

@ -1 +0,0 @@
alter table "public"."messages" drop column "status";

View File

@ -1,2 +0,0 @@
alter table "public"."messages" add column "status" varchar
null default 'ready';

View File

@ -1 +0,0 @@
alter table "public"."messages" drop column "prompt_cache";

View File

@ -1,2 +0,0 @@
alter table "public"."messages" add column "prompt_cache" jsonb
null;

View File

@ -1 +0,0 @@
-- DO NOTHING WITH DATA MIGRATION DOWN

View File

@ -1,3 +0,0 @@
INSERT INTO public.products ("slug", "name", "nsfw", "image_url", "description", "long_description", "technical_description", "author", "version", "source_url", "inputs", "outputs", "greeting") VALUES
('llama2', 'Llama-2-7B-Chat', 't', 'https://static-assets.jan.ai/llama2.jpg','Llama 2 is Meta`s open source large language model (LLM)', 'Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B pretrained model. Links to other models can be found in the index at the bottom.', 'Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.', 'Meta', 'Llama2-7B-GGML', 'https://huggingface.co/TheBloke/airoboros-13B-gpt4-1.4-GGML', '{"body": [{"name": "messages", "type": "array", "items": [{"type": "object", "properties": [{"name": "role", "type": "string", "example": "system", "description": "Defines the role of the message."}, {"name": "content", "type": "string", "example": "Hello, world!", "description": "Contains the content of the message."}]}], "description": "An array of messages, each containing a role and content. The latest message is always at the end of the array."}, {"name": "stream", "type": "boolean", "example": true, "description": "Indicates whether the client wants to keep the connection open for streaming."}, {"name": "max_tokens", "type": "integer", "example": 500, "description": "Defines the maximum number of tokens that the client wants to receive."}], "slug": "llm", "headers": {"accept": "text/event-stream", "content-type": "application/json"}}', '{"slug": "llm", "type": "object", "properties": [{"name": "id", "type": "string", "example": "chatcmpl-4c4e5eb5-bf53-4dbc-9136-1cf69fc5fd7c", "description": "The unique identifier of the chat completion chunk."}, {"name": "model", "type": "string", "example": "gpt-3.5-turbo", "description": "The name of the GPT model used to generate the completion."}, {"name": "created", "type": "integer", "example": 1692169988, "description": "The Unix timestamp representing the time when the completion was generated."}, {"name": "object", "type": "string", "example": "chat.completion.chunk", "description": "A string indicating the type of the chat completion chunk."}, {"name": "choices", "type": "array", "items": [{"type": "object", "properties": [{"name": "index", "type": "integer", "example": 0, "description": "The index of the choice made by the GPT model."}, {"name": "delta", "type": "object", "properties": [{"name": "content", "type": "string", "example": "What", "description": "The content generated by the GPT model."}], "description": "A JSON object containing the content generated by the GPT model."}, {"name": "finish_reason", "type": "string", "example": null, "description": "A string indicating why the GPT model stopped generating content."}]}], "description": "An array containing the choices made by the GPT model to generate the completion."}], "description": "A JSON object representing a chat completion chunk."}', '👋Im a versatile AI trained on a wide range of topics, here to answer your questions about the universe. What are you curious about today?')
ON CONFLICT (slug) DO NOTHING;

View File

@ -1 +0,0 @@
-- DO NOTHING WITH DATA MIGRATION DOWN

View File

@ -1,3 +0,0 @@
INSERT INTO public.collections ("slug", "name", "description") VALUES
('conversational', 'Conversational', 'Converse with these models and get answers.')
ON CONFLICT (slug) DO NOTHING;

View File

@ -1 +0,0 @@
-- DO NOTHING WITH DATA MIGRATION DOWN

View File

@ -1,4 +0,0 @@
INSERT INTO public.collection_products (collection_id, product_id)
SELECT (SELECT id FROM public.collections WHERE slug = 'conversational') AS collection_id, id AS product_id
FROM public.products
WHERE slug IN ('llama2') ON CONFLICT (collection_id, product_id) DO NOTHING;

View File

@ -1 +0,0 @@
-- DO NOTHING WITH DATA MIGRATION DOWN

View File

@ -1,8 +0,0 @@
INSERT INTO public.prompts ("slug", "content", "image_url") VALUES
('conversational-ai-future', 'What are possible developments for AI technology in the next decade?', ''),
('conversational-managing-stress', 'What are some tips for managing stress?', ''),
('conversational-postapoc-robot', 'Let''s role play. You are a robot in a post-apocalyptic world.', ''),
('conversational-python-pytorch', 'What is the difference between Python and Pytorch?', ''),
('conversational-quadratic-equation', 'Can you explain how to solve a quadratic equation?', ''),
('conversational-roman-history', 'What is the history of the Roman Empire?', '')
ON CONFLICT (slug) DO NOTHING;

View File

@ -1 +0,0 @@
-- DO NOTHING WITH DATA MIGRATION DOWN

View File

@ -1,9 +0,0 @@
INSERT INTO public.product_prompts (product_id, prompt_id)
SELECT p.id AS product_id, r.id AS prompt_id
FROM public.products p
JOIN public.prompts r
ON (p.id
IN (SELECT x.id FROM public.products x INNER JOIN public.collection_products y ON x.id = y.product_id
INNER JOIN public.collections z ON y.collection_id = z.id
WHERE z.slug = 'conversational'))
WHERE r.image_url IS NULL OR r.image_url = '' ON CONFLICT (product_id, prompt_id) DO NOTHING;

View File

@ -1,23 +0,0 @@
## postgres database to store Hasura metadata
HASURA_GRAPHQL_METADATA_DATABASE_URL=postgres://postgres:postgrespassword@postgres:5432/postgres
## this env var can be used to add the above postgres database to Hasura as a data source. this can be removed/updated based on your needs
PG_DATABASE_URL=postgres://postgres:postgrespassword@postgres:5432/postgres
## enable the console served by server
HASURA_GRAPHQL_ENABLE_CONSOLE="true" # set to "false" to disable console
## enable debugging mode. It is recommended to disable this in production
HASURA_GRAPHQL_DEV_MODE="true"
HASURA_GRAPHQL_ENABLED_LOG_TYPES=startup, http-log, webhook-log, websocket-log, query-log
## uncomment next line to run console offline (i.e load console assets from server instead of CDN)
# HASURA_GRAPHQL_CONSOLE_ASSETS_DIR: /srv/console-assets
## uncomment next line to set an admin secret
HASURA_GRAPHQL_ADMIN_SECRET=myadminsecretkey
HASURA_GRAPHQL_UNAUTHORIZED_ROLE="public"
HASURA_GRAPHQL_METADATA_DEFAULTS='{"backend_configs":{"dataconnector":{"athena":{"uri":"http://data-connector-agent:8081/api/v1/athena"},"mariadb":{"uri":"http://data-connector-agent:8081/api/v1/mariadb"},"mysql8":{"uri":"http://data-connector-agent:8081/api/v1/mysql"},"oracle":{"uri":"http://data-connector-agent:8081/api/v1/oracle"},"snowflake":{"uri":"http://data-connector-agent:8081/api/v1/snowflake"}}}}'
HASURA_GRAPHQL_JWT_SECRET={"type": "RS256", "key": "-----BEGIN CERTIFICATE-----\n...\n-----END CERTIFICATE-----"}
# Environment variable for auto migrate
HASURA_GRAPHQL_MIGRATIONS_DIR=/migrations
HASURA_GRAPHQL_METADATA_DIR=/metadata
HASURA_GRAPHQL_ENABLE_CONSOLE='true'
HASURA_ACTION_STABLE_DIFFUSION_URL=http://sd:8000
HASURA_EVENTS_HOOK_URL="http://worker:8787"

View File

@ -1 +0,0 @@
POSTGRES_PASSWORD=postgrespassword

View File

@ -1,96 +0,0 @@
# App
Jan Desktop is an Electron application designed to allow users to interact with the Language Model (LLM) through chat or create art using Stable Diffusion.
## Features
- Chat with the Language Model: Engage in interactive conversations with the Language Model. Ask questions, seek information, or simply have a chat.
- Generate Art with Stable Diffusion: Utilize the power of Stable Diffusion to generate unique and captivating pieces of art. Experiment with various parameters to achieve desired results.
## Installation and Usage
### Pre-requisites
- node >= 20.0.0
- yarn >= 1.22.0
### Use as complete suite (in progress)
### For interactive development
Note: This instruction is tested on MacOS only.
1. **Clone the Repository:**
```
git clone https://github.com/janhq/jan
git checkout feature/hackathon-refactor-jan-into-electron-app
cd jan/app
```
2. **Install dependencies:**
```
yarn install
```
3. **Download Model and copy to userdata directory** (this is a hacky step, will be remove in future versions)
```
# Determining the path to save model with /Users/<username>/Library/Application Support/jan-web/
mkdir /Users/<username>/Library/Application Support/jan-web
# Now download the model to correct location by running command
wget -O /Users/<username>/Library/Application Support/jan-web/llama-2-7b-chat.gguf.q4_0.bin https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/resolve/main/llama-2-7b-chat.Q4_0.gguf
```
4. **Run development and Using Jan Desktop**
```
yarn electron:start
```
This will start the development server and open the desktop app.
In this step, there are a few notification about installing base plugin, just click `OK` and `Next` to continue.
![](./images/jan-desktop-dev-instruction-1.png)
![](./images/jan-desktop-dev-instruction-2.png)
![](./images/jan-desktop-dev-instruction-3.png)
After that, you can use Jan Desktop as normal.
![](./images/jan-desktop-dev-instruction-4.png)
![](./images/jan-desktop-dev-instruction-5.png)
![](./images/jan-desktop-dev-instruction-6.png)
### For production build
```bash
# Do step 1 and 2 in previous section
git clone https://github.com/janhq/jan
git checkout feature/hackathon-refactor-jan-into-electron-app
cd jan/app
yarn install
# Build the app
yarn electron:build:all
```
This will build the app MacOS m1/m2 for production (with code signing already done) and put the result in `dist` folder.
## Configuration
TO DO
## Dependencies
TO DO
## Contributing
Contributions are welcome! If you find a bug or have suggestions for improvements, feel free to open an issue or submit a pull request on the [GitHub repository](https://github.com/janhq/jan).
## License
This project is licensed under the Fair-code License - see the [License](https://faircode.io/#licenses) for more details.
---
Feel free to reach out [Discord](https://jan.ai/discord) if you have any questions or need further assistance. Happy coding with Jan Web and exploring the capabilities of the Language Model and Stable Diffusion! 🚀🎨🤖

View File

@ -1,32 +0,0 @@
"use client";
import ModelDetailSideBar from "../ModelDetailSideBar";
import ProductOverview from "../ProductOverview";
import { useAtomValue } from "jotai";
import {
getActiveConvoIdAtom,
showingProductDetailAtom,
} from "@/_helpers/JotaiWrapper";
import { ReactNode } from "react";
import ModelManagement from "../ModelManagement";
type Props = {
children: ReactNode;
};
export default function ChatContainer({ children }: Props) {
const activeConvoId = useAtomValue(getActiveConvoIdAtom);
// const showingProductDetail = useAtomValue(showingProductDetailAtom);
if (!activeConvoId) {
// return <ProductOverview />;
return <ModelManagement />;
}
return (
<div className="flex flex-1 overflow-hidden">
{children}
{/* {showingProductDetail ? <ModelDetailSideBar /> : null} */}
</div>
);
}

View File

@ -1,81 +0,0 @@
"use client";
import DownloadModelContent from "../DownloadModelContent";
import ModelDownloadButton from "../ModelDownloadButton";
import ModelDownloadingButton from "../ModelDownloadingButton";
import ViewModelDetailButton from "../ViewModelDetailButton";
type Props = {
name: string;
type: string;
author: string;
description: string;
isRecommend: boolean;
storage: number;
installed?: boolean;
required?: string;
downloading?: boolean;
total?: number;
transferred?: number;
onInitClick?: () => void;
onDeleteClick?: () => void;
onDownloadClick?: () => void;
};
const DownloadModelCard: React.FC<Props> = ({
author,
description,
isRecommend,
name,
storage,
type,
installed = false,
required,
downloading = false,
total = 0,
transferred = 0,
onInitClick,
onDeleteClick,
onDownloadClick,
}) => {
const handleViewDetails = () => {};
let downloadButton = null;
if (!installed) {
downloadButton = downloading ? (
<div className="w-1/5 flex items-center justify-end">
<ModelDownloadButton callback={() => onDownloadClick?.()} />
</div>
) : (
<div className="w-1/5 flex items-start justify-end">
<ModelDownloadingButton total={total} value={transferred} />
</div>
);
} else {
downloadButton = (
<div className="flex flex-col">
<button onClick={onInitClick}>Init</button>
<button onClick={onDeleteClick}>Delete</button>
</div>
);
}
return (
<div className="border rounded-lg border-gray-200">
<div className="flex justify-between py-4 px-3 gap-[10px]">
<DownloadModelContent
required={required}
author={author}
description={description}
isRecommend={isRecommend}
name={name}
type={type}
/>
{downloadButton}
</div>
<ViewModelDetailButton callback={handleViewDetails} />
</div>
);
};
export default DownloadModelCard;

View File

@ -1,24 +0,0 @@
"use client";
import { useAtomValue } from "jotai";
import AdvancedPrompt from "../AdvancedPrompt";
import CompactSideBar from "../CompactSideBar";
import LeftSidebar from "../LeftSidebar";
import { showingAdvancedPromptAtom } from "@/_helpers/JotaiWrapper";
const LeftContainer: React.FC = () => {
const isShowingAdvPrompt = useAtomValue(showingAdvancedPromptAtom);
if (isShowingAdvPrompt) {
return (
<div className="flex h-screen">
<CompactSideBar />
<AdvancedPrompt />
</div>
);
}
return <LeftSidebar />;
};
export default LeftContainer;

View File

@ -1,136 +0,0 @@
"use client";
import { useEffect, useState } from "react";
import DownloadModelCard from "../DownloadModelCard";
import { executeSerial } from "@/_services/pluginService";
import { ModelManagementService } from "../../../shared/coreService";
import { useAtomValue } from "jotai";
import { modelDownloadStateAtom } from "@/_helpers/JotaiWrapper";
const ModelListContainer: React.FC = () => {
const [downloadedModels, setDownloadedModels] = useState<string[]>([]);
const downloadState = useAtomValue(modelDownloadStateAtom);
const DownloadedModel = {
title: "Downloaded Model",
data: [
{
name: "Llama 2 7B Chat - GGML",
type: "7B",
author: "The Bloke",
description:
"Primary intended uses The primary use of LLaMA is research on large language models, including: exploring potential applications such as question answering, natural language understanding or reading comprehension, understanding capabilities and limitations of current language models, and developing techniques to improve those, evaluating and mitigating biases, risks, toxic and harmful content generations, hallucinations.",
isRecommend: true,
storage: 3780,
default: true,
},
],
};
const BrowseAvailableModels = {
title: "Browse Available Models",
data: [
{
name: "Llama 2 7B Chat - GGML",
type: "7B",
author: "The Bloke",
description:
"Primary intended uses The primary use of LLaMA is research on large language models, including: exploring potential applications such as question answering, natural language understanding or reading comprehension, understanding capabilities and limitations of current language models, and developing techniques to improve those, evaluating and mitigating biases, risks, toxic and harmful content generations, hallucinations.",
isRecommend: true,
storage: 3780,
default: true,
},
],
};
useEffect(() => {
const getDownloadedModels = async () => {
const modelNames = await executeSerial(
ModelManagementService.GET_DOWNLOADED_MODELS,
);
setDownloadedModels(modelNames);
};
getDownloadedModels();
}, []);
const onDeleteClick = async () => {
// TODO: for now we only support 1 model
if (downloadedModels?.length < 1) {
return;
}
console.log(downloadedModels[0]);
const pathArray = downloadedModels[0].split("/");
const modelName = pathArray[pathArray.length - 1];
console.log(`Prepare to delete ${modelName}`);
// setShow(true); // TODO: later
await executeSerial(ModelManagementService.DELETE_MODEL, modelName);
setDownloadedModels([]);
};
const initModel = async () => {
const product = {
name: "LLama 2 7B Chat",
fileName: "llama-2-7b-chat.gguf.q4_0.bin",
downloadUrl:
"https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/resolve/main/llama-2-7b-chat.Q4_0.gguf",
};
await executeSerial(ModelManagementService.INIT_MODEL, product);
};
const onDownloadClick = async () => {
const url =
"https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF/resolve/main/llama-2-7b-chat.Q4_0.gguf";
await executeSerial(ModelManagementService.DOWNLOAD_MODEL, url);
};
return (
<div className="flex flex-col gap-5">
<div className="pb-5 flex flex-col gap-2">
<Title title={DownloadedModel.title} />
{downloadedModels?.length > 0 &&
DownloadedModel.data.map((item, index) => (
<DownloadModelCard
key={index}
{...item}
installed={true}
onInitClick={initModel}
onDeleteClick={onDeleteClick}
/>
))}
</div>
<div className="pb-5 flex flex-col gap-2">
{downloadedModels.length === 0 && (
<>
<Title title={BrowseAvailableModels.title} />
{BrowseAvailableModels.data.map((item, index) => (
<DownloadModelCard
key={index}
{...item}
downloading={downloadState == null}
total={downloadState?.size.total ?? 0}
transferred={downloadState?.size.transferred ?? 0}
onDownloadClick={onDownloadClick}
/>
))}
</>
)}
</div>
</div>
);
};
type Props = {
title: string;
};
const Title: React.FC<Props> = ({ title }) => {
return (
<div className="flex gap-[10px]">
<span className="font-semibold text-xl leading-[25px] tracking-[-0.4px]">
{title}
</span>
</div>
);
};
export default ModelListContainer;

View File

@ -1,286 +0,0 @@
"use client";
import { useEffect, useRef, useState } from "react";
import {
setup,
plugins,
extensionPoints,
activationPoints,
} from "../../electron/core/plugin-manager/execution/index";
import {
ChartPieIcon,
CommandLineIcon,
HomeIcon,
PlayIcon,
} from "@heroicons/react/24/outline";
import { MagnifyingGlassIcon } from "@heroicons/react/20/solid";
import classNames from "classnames";
import Link from "next/link";
const navigation = [
{ name: "Plugin Manager", href: "#", icon: ChartPieIcon, current: true },
];
/* eslint-disable @next/next/no-sync-scripts */
export const Preferences = () => {
const [search, setSearch] = useState<string>("");
const [activePlugins, setActivePlugins] = useState<any[]>([]);
const preferenceRef = useRef(null);
useEffect(() => {
async function setupPE() {
// Enable activation point management
setup({
//@ts-ignore
importer: (plugin) =>
import(/* webpackIgnore: true */ plugin).catch((err) => {
console.log(err);
}),
});
// Register all active plugins with their activation points
await plugins.registerActive();
}
const activePlugins = async () => {
const plgs = await plugins.getActive();
setActivePlugins(plgs);
// Activate alls
setTimeout(async () => {
await activationPoints.trigger("init");
if (extensionPoints.get("experimentComponent")) {
const components = await Promise.all(
extensionPoints.execute("experimentComponent")
);
components.forEach((e) => {
if (preferenceRef.current) {
// @ts-ignore
preferenceRef.current.appendChild(e);
}
});
}
}, 500);
};
setupPE().then(() => activePlugins());
}, []);
// Install a new plugin on clicking the install button
const install = async (e: any) => {
e.preventDefault();
//@ts-ignore
const pluginFile = new FormData(e.target).get("plugin-file").path;
// Send the filename of the to be installed plugin
// to the main process for installation
const installed = await plugins.install([pluginFile]);
if (typeof window !== "undefined") {
window.location.reload();
}
};
// Uninstall a plugin on clicking uninstall
const uninstall = async (name: string) => {
//@ts-ignore
// Send the filename of the to be uninstalled plugin
// to the main process for removal
//@ts-ignore
const res = await plugins.uninstall([name]);
console.log(
res
? "Plugin successfully uninstalled"
: "Plugin could not be uninstalled"
);
};
// Update all plugins on clicking update plugins
const update = async (plugin: string) => {
if (typeof window !== "undefined") {
// @ts-ignore
await window.pluggableElectronIpc.update([plugin], true);
}
// plugins.update(active.map((plg) => plg.name));
};
return (
<div className="w-full h-screen overflow-scroll">
{/* Static sidebar for desktop */}
<div className="fixed inset-y-0 z-50 flex w-72 flex-col">
{/* Sidebar component, swap this element with another sidebar if you like */}
<div className="flex grow flex-col gap-y-5 overflow-y-auto bg-gray-900 px-6 pb-4">
<div className="flex h-16 shrink-0 items-center">
<Link href="/">
<img
className="h-8 w-auto"
src="icons/app_icon.svg"
alt="Your Company"
/>
</Link>
</div>
<nav className="flex flex-1 flex-col">
<ul role="list" className="flex flex-1 flex-col gap-y-7">
<li>
<ul role="list" className="-mx-2 space-y-1">
{navigation.map((item) => (
<li key={item.name}>
<a
href={item.href}
className={classNames(
item.current
? "bg-gray-800 text-white"
: "text-gray-400 hover:text-white hover:bg-gray-800",
"group flex gap-x-3 rounded-md p-2 text-sm leading-6 font-semibold"
)}
>
<item.icon
className="h-6 w-6 shrink-0"
aria-hidden="true"
/>
{item.name}
</a>
</li>
))}
</ul>
</li>
<li className="mt-auto">
<a
href="/"
className="group -mx-2 flex gap-x-3 rounded-md p-2 text-sm font-semibold leading-6 text-gray-400 hover:bg-gray-800 hover:text-white"
>
<HomeIcon className="h-6 w-6 shrink-0" aria-hidden="true" />
Home
</a>
</li>
</ul>
</nav>
</div>
</div>
<div className="pl-72 w-full">
<div className="sticky top-0 z-40 flex h-16 shrink-0 items-center gap-x-4 border-b border-gray-200 bg-white shadow-sm sm:gap-x-6 sm:px-6 px-8">
{/* Separator */}
<div className="h-6 w-px bg-gray-900/10 hidden" aria-hidden="true" />
<div className="flex flex-1 self-stretch gap-x-6">
<form className="relative flex flex-1" action="#" method="GET">
<label htmlFor="search-field" className="sr-only">
Search
</label>
<MagnifyingGlassIcon
className="pointer-events-none absolute inset-y-0 left-0 h-full w-5 text-gray-400"
aria-hidden="true"
/>
<input
defaultValue={search}
onChange={(e) => setSearch(e.target.value)}
id="search-field"
className="block h-full w-full border-0 py-0 pl-8 pr-0 text-gray-900 placeholder:text-gray-400 focus:ring-0 sm:text-sm"
placeholder="Search..."
type="search"
name="search"
/>
</form>
</div>
</div>
<main className="py-5">
<div className="sm:px-6 px-8">
{/* Content */}
<div className="flex flex-row items-center my-4">
<ChartPieIcon width={30} />
Install Plugin
</div>
<form id="plugin-file" onSubmit={install}>
<div className="flex flex-row items-center space-x-10">
<div className="flex items-center justify-center w-[300px]">
<label className="flex flex-col items-center justify-center w-full border-2 border-gray-300 border-dashed rounded-lg cursor-pointer bg-gray-50 dark:hover:bg-bray-800 dark:bg-gray-700 hover:bg-gray-100 dark:border-gray-600 dark:hover:border-gray-500 dark:hover:bg-gray-600">
<div className="flex flex-col items-center justify-center pt-5 pb-6">
<p className="mb-2 text-sm text-gray-500 dark:text-gray-400">
<span className="font-semibold">Click to upload</span>{" "}
or drag and drop
</p>
<p className="text-xs text-gray-500 dark:text-gray-400">
TGZ (MAX 50MB)
</p>
</div>
<input
id="dropzone-file"
name="plugin-file"
type="file"
className="hidden"
required
/>
</label>
</div>
<button
type="submit"
className="rounded-md bg-indigo-600 px-3.5 py-2.5 text-sm font-semibold text-white shadow-sm hover:bg-indigo-500 focus-visible:outline focus-visible:outline-2 focus-visible:outline-offset-2 focus-visible:outline-indigo-600"
>
Install Plugin
</button>
</div>
</form>
<div className="flex flex-row items-center my-4">
<CommandLineIcon width={30} />
Installed Plugins
</div>
<div className="flex flex-wrap">
{activePlugins
.filter(
(e) =>
search.trim() === "" ||
e.name.toLowerCase().includes(search.toLowerCase())
)
.map((e) => (
<div key={e.name} className="m-2">
<a
href="#"
className="block max-w-sm p-6 bg-white border border-gray-200 rounded-lg shadow dark:bg-gray-800 dark:border-gray-700"
>
<h5 className="mb-2 text-2xl font-bold tracking-tight text-gray-900 dark:text-white">
{e.name}
</h5>
<p className="font-normal text-gray-700 dark:text-gray-400">
Activation: {e.activationPoints}
</p>
<p className="font-normal text-gray-700 dark:text-gray-400">
Url: {e.url}
</p>
<div className="flex flex-row space-x-5">
<button
type="submit"
onClick={() => {
uninstall(e.name);
}}
className="mt-5 rounded-md bg-red-600 px-3.5 py-2.5 text-sm font-semibold text-white shadow-sm hover:bg-red-500 focus-visible:outline focus-visible:outline-2 focus-visible:outline-offset-2 focus-visible:outline-red-600"
>
Uninstall
</button>
<button
type="submit"
onClick={() => {
update(e.name);
}}
className="mt-5 rounded-md bg-indigo-600 px-3.5 py-2.5 text-sm font-semibold text-white shadow-sm hover:bg-indigo-500 focus-visible:outline focus-visible:outline-2 focus-visible:outline-offset-2 focus-visible:outline-indigo-600"
>
Update
</button>
</div>
</a>
</div>
))}
</div>
<div className="flex flex-row items-center my-4">
<PlayIcon width={30} />
Test Plugins
</div>
<div className="h-full w-full" ref={preferenceRef}></div>
{/* Content */}
</div>
</main>
</div>
</div>
);
};

View File

@ -1,53 +0,0 @@
import Image from "next/image";
import Link from "next/link";
const SidebarMenu: React.FC = () => {
const menu = [
{
name: "Chat History",
icon: "ClipboardList",
url: "#",
},
{
name: "Explore Models",
icon: "Search_gray",
url: "#",
},
{
name: "My Models",
icon: "ViewGrid",
url: "#",
},
{
name: "Settings",
icon: "Cog",
url: "/settings",
},
];
return (
<div className="flex-1 flex flex-col justify-end">
<div className="text-gray-500 text-xs font-semibold py-2 pl-2 pr-3">
Your Configurations
</div>
{menu.map((item, index) => (
<div key={index} className="py-2 pl-2 pr-3">
<Link
href={item.url}
className="flex items-center gap-3 text-base text-gray-600"
>
<Image
src={`icons/${item.icon}.svg`}
width={24}
height={24}
alt=""
/>
{item.name}
</Link>
</div>
))}
</div>
);
};
export default SidebarMenu;

View File

@ -1,25 +0,0 @@
type Props = {
name: string;
total: number;
value: number;
};
const SystemItem: React.FC<Props> = ({ name, total, value }) => {
return (
<div className="border-l border-gray-200 flex gap-2 pl-4">
<div className="flex gap-[10px] p-1 bg-gray-100 text-gray-600 text-[11px] leading-[13px]">
{name}
</div>
<span className="text-gray-500 text-sm">
{toGigabytes(value)} / {toGigabytes(total)}{" "}
{((value / total) * 100).toFixed(2)} %
</span>
</div>
);
};
const toGigabytes = (input: number) => {
return input > 1000 ? input / 1000 + "GB" : input + "MB";
};
export default SystemItem;

View File

@ -1,60 +0,0 @@
import Image from "next/image";
import SystemStatus from "../SystemStatus";
import { SidebarButton } from "../SidebarButton";
const Welcome: React.FC = () => {
const data = {
name: "LlaMa 2 - Hermes 7B (Q4_K_M)",
type: "7B",
author: "Architecture Llama",
description:
"Primary intended uses The primary use of LLaMA is research on large language models, including: exploring potential applications such as question answering, natural language understanding or reading comprehension, understanding capabilities and limitations of current language models, and developing techniques to improve those, evaluating and mitigating biases, risks, toxic and harmful content generations, hallucinations.",
isRecommend: true,
storage: 3780,
required: "8GB+ RAM",
};
const system = [
{
name: "GPU",
value: 782.2,
total: 14000,
},
{
name: "RAM",
value: 5100,
total: 14000,
},
{
name: "STORAGE",
value: 500000,
total: 1000000,
},
];
return (
<div className="flex flex-col h-full">
<div className="px-[200px] flex-1 flex flex-col gap-5 justify-center items-start">
<Image src={"icons/App_ico.svg"} width={44} height={45} alt="" />
<span className="font-semibold text-gray-500 text-5xl">
Welcome,
<br />
lets download your first model
</span>
<SidebarButton
className="flex flex-row-reverse items-center rounded-lg gap-2 px-3 py-2 text-xs font-medium border border-gray-200"
icon="icons/arrow-right.svg"
title="Explore models"
height={16}
width={16}
/>
</div>
<div className="px-3 py-2 gap-4 flex items-center justify-center">
<span className="text-gray-500 text-sm">System status</span>
{system.map((item, index) => (
<SystemStatus key={index} {...item} />
))}
</div>
</div>
);
};
export default Welcome;

View File

@ -1,25 +0,0 @@
// @ts-nocheck
"use client";
import { useSetAtom } from "jotai";
import { ReactNode, useEffect } from "react";
import { modelDownloadStateAtom } from "./JotaiWrapper";
type Props = {
children: ReactNode;
};
export default function EventListenerWrapper({ children }: Props) {
const setDownloadState = useSetAtom(modelDownloadStateAtom);
useEffect(() => {
window.electronAPI.onModelDownloadUpdate((event, state) => {
setDownloadState(state);
});
window.electronAPI.onModelDownloadError(() => {
// TODO: Show error message
});
}, []);
return <div id="eventlistener">{children}</div>;
}

View File

@ -1,72 +0,0 @@
const inference = async (prompt) =>
new Promise(async (resolve) => {
if (window.electronAPI) {
const response = await window.electronAPI.sendInquiry(prompt);
resolve(response);
}
});
async function testInference(e) {
e.preventDefault();
const message = new FormData(e.target).get("message");
const resp = await inference(message);
alert(resp);
}
const getButton = (text, func) => {
var element = document.createElement("button");
element.innerText = text;
// Add styles to the button element
element.style.marginTop = "5px";
element.style.marginRight = "5px";
element.style.borderRadius = "0.375rem"; // Rounded-md
element.style.backgroundColor = "rgb(79, 70, 229)"; // bg-indigo-600
element.style.paddingLeft = "0.875rem"; //
element.style.paddingRight = "0.875rem"; //
element.style.fontSize = "0.875rem"; // text-sm
element.style.fontWeight = "600"; // font-semibold
element.style.color = "white"; // text-white
element.style.height = "40px";
element.style.boxShadow = "0 2px 4px rgba(0, 0, 0, 0.1)"; // shadow-sm
element.addEventListener("click", func);
return element;
};
const experimentComponent = () => {
var parent = document.createElement("div");
const label = document.createElement("p");
label.style.marginTop = "5px";
label.innerText = "Inference Plugin";
parent.appendChild(label);
const form = document.createElement("form");
form.id = "test";
form.style.display = "flex"; // Enable Flexbox
form.style.alignItems = "center"; // Center items horizontally
form.addEventListener("submit", testInference);
const input = document.createElement("input");
input.style.borderRadius = "5px";
input.style.borderColor = "#E5E7EB";
input.style.marginTop = "5px";
input.style.marginRight = "5px";
input.name = "message";
form.appendChild(input);
const button = getButton("Test Inference", null);
button.type = "submit";
button.innerText = "Test Inference";
form.appendChild(button);
parent.appendChild(form);
return parent;
};
// Register all the above functions and objects with the relevant extension points
export function init({ register }) {
register("inference", "inference", inference);
// Experiment UI - for Preferences
register(
"experimentComponent",
"base-plugin-experiment-component",
experimentComponent
);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,26 +0,0 @@
{
"name": "base-plugin",
"version": "2.1.0",
"description": "",
"main": "dist/bundle.js",
"author": "Igor Honhoff",
"license": "MIT",
"activationPoints": [
"init"
],
"scripts": {
"prepare": "webpack --config webpack.config.js",
"package": "rimraf ./base-plugin*.tgz && npm pack"
},
"devDependencies": {
"rimraf": "^3.0.2",
"webpack": "^5.88.2",
"webpack-cli": "^5.1.4"
},
"dependencies": {},
"files": [
"dist/*",
"package.json",
"README.md"
]
}

View File

@ -1,137 +0,0 @@
// Provide an async method to manipulate the price provided by the extension point
const PLUGIN_NAME = "data-plugin";
const getConversations = () =>
new Promise((resolve) => {
if (window && window.electronAPI) {
window.electronAPI
.invokePluginFunc(PLUGIN_NAME, "getConversations")
.then((res) => resolve(res));
} else {
resolve([]);
}
});
const getConversationMessages = (id) =>
new Promise((resolve) => {
if (window && window.electronAPI) {
window.electronAPI
.invokePluginFunc(PLUGIN_NAME, "getConversationMessages", id)
.then((res) => resolve(res));
} else {
resolve([]);
}
});
const createConversation = (conversation) =>
new Promise((resolve) => {
if (window && window.electronAPI) {
window.electronAPI
.invokePluginFunc(PLUGIN_NAME, "storeConversation", conversation)
.then((res) => {
resolve(res);
});
} else {
resolve("-");
}
});
const createMessage = (message) =>
new Promise((resolve) => {
if (window && window.electronAPI) {
window.electronAPI
.invokePluginFunc(PLUGIN_NAME, "storeMessage", message)
.then((res) => resolve(res));
} else {
resolve("-");
}
});
const deleteConversation = (id) =>
new Promise((resolve) => {
if (window && window.electronAPI) {
window.electronAPI
.invokePluginFunc(PLUGIN_NAME, "deleteConversation", id)
.then((res) => {
resolve(res);
});
} else {
resolve("-");
}
});
const setupDb = () => {
window.electronAPI.invokePluginFunc(PLUGIN_NAME, "init");
};
const getButton = (text, func) => {
var element = document.createElement("button");
element.innerText = text;
// Add styles to the button element
element.style.marginTop = "5px";
element.style.marginRight = "5px";
element.style.borderRadius = "0.375rem"; // Rounded-md
element.style.backgroundColor = "rgb(79, 70, 229)"; // bg-indigo-600
element.style.padding = "0.875rem 1rem"; // px-3.5 py-2.5
element.style.fontSize = "0.875rem"; // text-sm
element.style.fontWeight = "600"; // font-semibold
element.style.color = "white"; // text-white
element.style.boxShadow = "0 2px 4px rgba(0, 0, 0, 0.1)"; // shadow-sm
element.addEventListener("click", func);
return element;
};
const experimentComponent = () => {
var parent = document.createElement("div");
const label = document.createElement("p");
label.style.marginTop = "5px";
label.innerText = "Data Plugin";
parent.appendChild(label);
const getConvs = getButton("Get Conversation", async () => {
// Define the action you want to perform when the button is clicked
alert(JSON.stringify(await getConversations()));
});
const spawnConv = getButton("Spawn Conversation", async () => {
// Define the action you want to perform when the button is clicked
const id = await createConversation({
name: "test",
model_id: "yolo",
});
alert("A new conversation is created: " + id);
});
const deleteLastConv = getButton("Delete Last Conversation", async () => {
// Define the action you want to perform when the button is clicked
const convs = await getConversations();
await deleteConversation(convs[convs.length - 1].id);
alert("Last conversation is deleted");
});
const spawnMessage = getButton("Spawn Message", async () => {
const convs = await getConversations();
await createMessage({
name: "",
conversation_id: convs[0].id,
message: "yoo",
user: "user",
});
alert("Message is created");
});
parent.appendChild(getConvs);
parent.appendChild(spawnConv);
parent.appendChild(deleteLastConv);
parent.appendChild(spawnMessage);
return parent;
};
// Register all the above functions and objects with the relevant extension points
export function init({ register }) {
setupDb();
register("getConversations", "getConv", getConversations, 1);
register("createConversation", "insertConv", createConversation);
register("deleteConversation", "deleteConv", deleteConversation);
register("createMessage", "insertMessage", createMessage);
register("getConversationMessages", "getMessages", getConversationMessages);
// Experiment UI - for Preferences
register(
"experimentComponent",
"data-plugin-experiment-component",
experimentComponent
);
}

View File

@ -1,152 +0,0 @@
const sqlite3 = require("sqlite3").verbose();
const path = require("path");
const { app } = require("electron");
function init() {
const db = new sqlite3.Database(path.join(app.getPath("userData"), "jan.db"));
db.serialize(() => {
db.run(
"CREATE TABLE IF NOT EXISTS models ( id INTEGER PRIMARY KEY, name TEXT, image TEXT, url TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP, updated_at DATETIME DEFAULT CURRENT_TIMESTAMP);"
);
db.run(
"CREATE TABLE IF NOT EXISTS conversations ( id INTEGER PRIMARY KEY, name TEXT, model_id INTEGER, image TEXT, message TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP, updated_at DATETIME DEFAULT CURRENT_TIMESTAMP);"
);
db.run(
"CREATE TABLE IF NOT EXISTS messages ( id INTEGER PRIMARY KEY, name TEXT, conversation_id INTEGER, user TEXT, message TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP, updated_at DATETIME DEFAULT CURRENT_TIMESTAMP);"
);
});
const stmt = db.prepare(
"INSERT INTO conversations (name, model_id, image, message) VALUES (?, ?, ?, ?)"
);
stmt.finalize();
db.close();
}
function getConversations() {
return new Promise((res) => {
const db = new sqlite3.Database(
path.join(app.getPath("userData"), "jan.db")
);
db.all(
"SELECT * FROM conversations ORDER BY created_at DESC",
(err, row) => {
res(row);
}
);
db.close();
});
}
function storeConversation(conversation) {
return new Promise((res) => {
const db = new sqlite3.Database(
path.join(app.getPath("userData"), "jan.db")
);
db.serialize(() => {
const stmt = db.prepare(
"INSERT INTO conversations (name, model_id, image, message) VALUES (?, ?, ?, ?)"
);
stmt.run(
conversation.name,
conversation.model_id,
conversation.image,
conversation.message,
function (err) {
if (err) {
// Handle the insertion error here
console.error(err.message);
res(undefined);
return;
}
const id = this.lastID;
console.log(`Record inserted successfully with ID ${id}`);
res(id);
return;
}
);
stmt.finalize();
});
db.close();
});
}
function storeMessage(message) {
return new Promise((res) => {
const db = new sqlite3.Database(
path.join(app.getPath("userData"), "jan.db")
);
db.serialize(() => {
const stmt = db.prepare(
"INSERT INTO messages (name, conversation_id, user, message) VALUES (?, ?, ?, ?)"
);
stmt.run(
message.name,
message.conversation_id,
message.user,
message.message,
function (err) {
if (err) {
// Handle the insertion error here
console.error(err.message);
res(undefined);
return;
}
const id = this.lastID;
console.log(`Record inserted successfully with ID ${id}`);
res(id);
return;
}
);
stmt.finalize();
});
db.close();
});
}
function deleteConversation(id) {
return new Promise((res) => {
const db = new sqlite3.Database(
path.join(app.getPath("userData"), "jan.db")
);
db.serialize(() => {
const deleteConv = db.prepare("DELETE FROM conversations WHERE id = ?");
deleteConv.run(id);
deleteConv.finalize();
const deleteMessages = db.prepare(
"DELETE FROM messages WHERE conversation_id = ?"
);
deleteMessages.run(id);
deleteMessages.finalize();
res([]);
});
db.close();
});
}
function getConversationMessages(conversation_id) {
return new Promise((res) => {
const db = new sqlite3.Database(
path.join(app.getPath("userData"), "jan.db")
);
const query = `SELECT * FROM messages WHERE conversation_id = ${conversation_id} ORDER BY created_at DESC`;
db.all(query, (err, row) => {
res(row);
});
db.close();
});
}
module.exports = {
init,
getConversations,
deleteConversation,
storeConversation,
storeMessage,
getConversationMessages,
};

View File

@ -1,32 +0,0 @@
{
"name": "data-plugin",
"version": "2.1.0",
"description": "",
"main": "dist/bundle.js",
"author": "Igor Honhoff",
"license": "MIT",
"activationPoints": [
"init"
],
"scripts": {
"prepare": "webpack --config webpack.config.js",
"package": "rimraf ./data-plugin*.tgz && cp -f module.js ./dist/module.js && npm pack"
},
"devDependencies": {
"rimraf": "^3.0.2",
"webpack": "^5.88.2",
"webpack-cli": "^5.1.4"
},
"bundledDependencies": [
"sql.js",
"sqlite3"
],
"files": [
"dist/*",
"package.json",
"README.md"
],
"dependencies": {
"sqlite3": "^5.1.6"
}
}

View File

@ -1,221 +0,0 @@
// @ts-nocheck
const {
app,
BrowserWindow,
screen: electronScreen,
dialog,
ipcMain,
} = require("electron");
const isDev = require("electron-is-dev");
const path = require("path");
const pe = require("pluggable-electron/main");
const fs = require("fs");
const { mkdir, writeFile } = require("fs/promises");
const { Readable } = require("stream");
const { finished } = require("stream/promises");
const request = require("request");
const progress = require("request-progress");
let modelSession = undefined;
let modelName = "llama-2-7b-chat.gguf.q4_0.bin";
let window;
const createMainWindow = () => {
window = new BrowserWindow({
width: electronScreen.getPrimaryDisplay().workArea.width,
height: electronScreen.getPrimaryDisplay().workArea.height,
show: false,
backgroundColor: "white",
webPreferences: {
nodeIntegration: true,
enableRemoteModule: true,
preload: path.resolve(app.getAppPath(), "electron/preload.js"),
},
});
ipcMain.handle("invokePluginFunc", async (event, plugin, method, ...args) => {
const plg = pe
.getStore()
.getActivePlugins()
.filter((p) => p.name === plugin)[0];
const pluginPath = path.join(
app.getPath("userData"),
"plugins",
plg.name,
"dist/module.js",
);
return await import(
/* webpackIgnore: true */
pluginPath
)
.then((plugin) => {
if (typeof plugin[method] === "function") {
return plugin[method](...args);
} else {
console.log(plugin[method]);
console.error(`Function "${method}" does not exist in the module.`);
}
})
.then((res) => {
return res;
})
.catch((err) => console.log(err));
});
const startURL = isDev
? "http://localhost:3000"
: `file://${path.join(__dirname, "../out/index.html")}`;
window.loadURL(startURL);
window.once("ready-to-show", () => window.show());
window.on("closed", () => {
if (process.platform !== "darwin") app.quit();
});
window.webContents.openDevTools();
};
app.whenReady().then(() => {
createMainWindow();
setupPlugins();
ipcMain.handle("userData", async (event) => {
return path.resolve(__dirname, "../");
});
ipcMain.handle("downloadModel", async (event, url) => {
const userDataPath = app.getPath("userData");
const destination = path.resolve(userDataPath, modelName);
progress(request(url), {})
.on("progress", function (state) {
window.webContents.send("model-download-update", {
...state,
modelId: modelName,
});
})
.on("error", function (err) {
window.webContents.send("model-download-error", err);
})
.on("end", function () {
app.relaunch();
app.exit();
// Do something after request finishes
})
.pipe(fs.createWriteStream(destination));
});
ipcMain.handle("deleteModel", async (event, modelFileName) => {
const userDataPath = app.getPath("userData");
const fullPath = path.join(userDataPath, modelFileName);
let result = "NULL";
fs.unlink(fullPath, function (err) {
if (err && err.code == "ENOENT") {
console.info("File doesn't exist, won't remove it.");
result = "FILE_NOT_EXIST";
} else if (err) {
console.error("Error occurred while trying to remove file");
result = "ERROR";
} else {
console.info(`removed`);
result = "REMOVED";
}
});
console.log(result);
return result;
});
// TODO: add options for model configuration
ipcMain.handle("initModel", async (event, product) => {
if (!product.fileName) {
await dialog.showMessageBox({
message: "Selected model does not have file name..",
});
return;
}
console.info(`Initializing model: ${product.name}..`);
import(
isDev
? "../node_modules/node-llama-cpp/dist/index.js"
: path.resolve(
app.getAppPath(),
"./../../app.asar.unpacked/node_modules/node-llama-cpp/dist/index.js",
)
)
.then(({ LlamaContext, LlamaChatSession, LlamaModel }) => {
const modelPath = path.join(app.getPath("userData"), product.fileName);
// TODO: check if file is already there
const model = new LlamaModel({
modelPath: modelPath,
});
const context = new LlamaContext({ model });
modelSession = new LlamaChatSession({ context });
console.info(`Init model ${product.name} successfully!`);
})
.catch(async (e) => {
await dialog.showMessageBox({
message: "Failed to import LLM module",
});
});
});
ipcMain.handle("getDownloadedModels", async (event) => {
const userDataPath = app.getPath("userData");
const allBinariesName = [];
var files = fs.readdirSync(userDataPath);
for (var i = 0; i < files.length; i++) {
var filename = path.join(userDataPath, files[i]);
var stat = fs.lstatSync(filename);
if (stat.isDirectory()) {
// ignore
} else if (filename.endsWith(".bin")) {
allBinariesName.push(filename);
}
}
return allBinariesName;
});
ipcMain.handle("sendInquiry", async (event, question) => {
if (!modelSession) {
console.error("Model session has not been initialized!");
return;
}
return modelSession.prompt(question);
});
app.on("activate", () => {
if (!BrowserWindow.getAllWindows().length) {
createMainWindow();
}
});
});
app.on("window-all-closed", () => {
if (process.platform !== "darwin") {
app.quit();
}
});
function setupPlugins() {
pe.init({
// Function to check from the main process that user wants to install a plugin
confirmInstall: async (plugins) => {
const answer = await dialog.showMessageBox({
message: `Are you sure you want to install the plugin ${plugins.join(
", ",
)}`,
buttons: ["Ok", "Cancel"],
cancelId: 1,
});
return answer.response == 0;
},
// Path to install plugin to
pluginsPath: path.join(app.getPath("userData"), "plugins"),
});
}

View File

@ -1,31 +0,0 @@
/* eslint-disable react-hooks/rules-of-hooks */
// Make Pluggable Electron's facade available to the renderer on window.plugins
const useFacade = require("pluggable-electron/facade");
useFacade();
const { contextBridge, ipcRenderer } = require("electron");
contextBridge.exposeInMainWorld("electronAPI", {
invokePluginFunc: (plugin, method, ...args) =>
ipcRenderer.invoke("invokePluginFunc", plugin, method, ...args),
userData: () => ipcRenderer.invoke("userData"),
sendInquiry: (question) => ipcRenderer.invoke("sendInquiry", question),
initModel: (product) => ipcRenderer.invoke("initModel", product),
getDownloadedModels: () => ipcRenderer.invoke("getDownloadedModels"),
getAvailableModels: () => ipcRenderer.invoke("getAvailableModels"),
deleteModel: (path) => ipcRenderer.invoke("deleteModel", path),
downloadModel: (url) => ipcRenderer.invoke("downloadModel", url),
onModelDownloadUpdate: (callback) =>
ipcRenderer.on("model-download-update", callback),
onModelDownloadError: (callback) =>
ipcRenderer.on("model-download-error", callback),
});

Binary file not shown.

Before

Width:  |  Height:  |  Size: 476 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 542 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 478 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 320 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 175 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 196 KiB

View File

@ -1,26 +0,0 @@
CREATE DATABASE "jan-keycloak"
WITH
OWNER = postgres
ENCODING = 'UTF8'
LC_COLLATE = 'en_US.utf8'
LC_CTYPE = 'en_US.utf8'
TABLESPACE = pg_default
CONNECTION LIMIT = -1;
CREATE DATABASE "jan-hasura-data"
WITH
OWNER = postgres
ENCODING = 'UTF8'
LC_COLLATE = 'en_US.utf8'
LC_CTYPE = 'en_US.utf8'
TABLESPACE = pg_default
CONNECTION LIMIT = -1;
CREATE DATABASE "jan-hasura-metadata"
WITH
OWNER = postgres
ENCODING = 'UTF8'
LC_COLLATE = 'en_US.utf8'
LC_CTYPE = 'en_US.utf8'
TABLESPACE = pg_default
CONNECTION LIMIT = -1;

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More