Louis 942f2f51b7
chore: send chat completion with messages history (#5070)
* chore: send chat completion with messages history

* chore: handle abort controllers

* chore: change max attempts setting

* chore: handle stop running models in system monitor screen

* Update web-app/src/services/models.ts

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>

* chore: format time

* chore: handle stop model load action

---------

Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
2025-05-22 20:13:50 +07:00

58 lines
1.4 KiB
TypeScript

import { events } from '../../events'
import { BaseExtension } from '../../extension'
import { MessageRequest, Model, ModelEvent } from '../../../types'
import { EngineManager } from './EngineManager'
/**
* Base AIEngine
* Applicable to all AI Engines
*/
export abstract class AIEngine extends BaseExtension {
// The inference engine
abstract provider: string
/**
* On extension load, subscribe to events.
*/
override onLoad() {
this.registerEngine()
events.on(ModelEvent.OnModelInit, (model: Model) => this.loadModel(model))
events.on(ModelEvent.OnModelStop, (model: Model) => this.unloadModel(model))
}
/**
* Registers AI Engines
*/
registerEngine() {
EngineManager.instance().register(this)
}
/**
* Loads the model.
*/
async loadModel(model: Partial<Model>, abortController?: AbortController): Promise<any> {
if (model?.engine?.toString() !== this.provider) return Promise.resolve()
events.emit(ModelEvent.OnModelReady, model)
return Promise.resolve()
}
/**
* Stops the model.
*/
async unloadModel(model?: Partial<Model>): Promise<any> {
if (model?.engine && model.engine.toString() !== this.provider) return Promise.resolve()
events.emit(ModelEvent.OnModelStopped, model ?? {})
return Promise.resolve()
}
/**
* Inference request
*/
inference(data: MessageRequest) {}
/**
* Stop inference
*/
stopInference() {}
}