1
1
mirror of https://github.com/leon-ai/leon.git synced 2024-09-11 18:27:21 +03:00

feat(server): allow custom LLM duties from skills

This commit is contained in:
louistiti 2024-07-07 18:48:47 +08:00
parent db33126664
commit ac3e61d7b2
No known key found for this signature in database
GPG Key ID: 92CD6A2E497E1669
6 changed files with 140 additions and 228 deletions

View File

@ -3,11 +3,10 @@ import type { FastifyPluginAsync } from 'fastify'
import type { APIOptions } from '@/core/http-server/http-server'
import { LLMDuties } from '@/core/llm-manager/types'
import { CustomNERLLMDuty } from '@/core/llm-manager/llm-duties/custom-ner-llm-duty'
import { SummarizationLLMDuty } from '@/core/llm-manager/llm-duties/summarization-llm-duty'
import { TranslationLLMDuty } from '@/core/llm-manager/llm-duties/translation-llm-duty'
import { ParaphraseLLMDuty } from '@/core/llm-manager/llm-duties/paraphrase-llm-duty'
import { ConversationLLMDuty } from '@/core/llm-manager/llm-duties/conversation-llm-duty'
import { ActionRecognitionLLMDuty } from '@/core/llm-manager/llm-duties/action-recognition-llm-duty'
import { CustomLLMDuty } from '@/core/llm-manager/llm-duties/custom-llm-duty'
import { LLM_MANAGER } from '@/core'
interface PostLLMInferenceSchema {
@ -22,10 +21,9 @@ interface PostLLMInferenceSchema {
const LLM_DUTIES_MAP = {
[LLMDuties.ActionRecognition]: ActionRecognitionLLMDuty,
[LLMDuties.CustomNER]: CustomNERLLMDuty,
[LLMDuties.Summarization]: SummarizationLLMDuty,
[LLMDuties.Translation]: TranslationLLMDuty,
[LLMDuties.Paraphrase]: ParaphraseLLMDuty,
[LLMDuties.Conversation]: ConversationLLMDuty
[LLMDuties.Conversation]: ConversationLLMDuty,
[LLMDuties.Custom]: CustomLLMDuty
}
export const postLLMInference: FastifyPluginAsync<APIOptions> = async (

View File

@ -0,0 +1,129 @@
import type { LlamaChatSession, LlamaContext } from 'node-llama-cpp'
import {
type LLMDutyParams,
type LLMDutyResult,
LLMDuty
} from '@/core/llm-manager/llm-duty'
import { LogHelper } from '@/helpers/log-helper'
import { LLM_MANAGER, LLM_PROVIDER } from '@/core'
import { LLM_THREADS } from '@/core/llm-manager/llm-manager'
import { LLMProviders, LLMDuties } from '@/core/llm-manager/types'
import { LLM_PROVIDER as LLM_PROVIDER_NAME } from '@/constants'
interface CustomLLMDutyParams extends LLMDutyParams {
data: {
systemPrompt?: string | null
}
}
export class CustomLLMDuty extends LLMDuty {
private static instance: CustomLLMDuty
private static context: LlamaContext = null as unknown as LlamaContext
private static session: LlamaChatSession = null as unknown as LlamaChatSession
protected systemPrompt = ''
protected readonly name = 'Custom LLM Duty'
protected input: LLMDutyParams['input'] = null
protected data = {
systemPrompt: null
} as CustomLLMDutyParams['data']
constructor(params: CustomLLMDutyParams) {
super()
if (!CustomLLMDuty.instance) {
LogHelper.title(this.name)
LogHelper.success('New instance')
CustomLLMDuty.instance = this
}
this.input = params.input
this.data = params.data
}
public async init(): Promise<void> {
if (LLM_PROVIDER_NAME === LLMProviders.Local) {
try {
/**
* Create a new context and session if it doesn't exist or if the system prompt has changed
*/
if (
!CustomLLMDuty.context ||
!CustomLLMDuty.session ||
this.data.systemPrompt !== this.systemPrompt
) {
LogHelper.title(this.name)
LogHelper.info('Initializing...')
if (CustomLLMDuty.context) {
await CustomLLMDuty.context.dispose()
}
if (CustomLLMDuty.session) {
CustomLLMDuty.session.dispose({ disposeSequence: true })
}
this.systemPrompt = this.data.systemPrompt || ''
CustomLLMDuty.context = await LLM_MANAGER.model.createContext({
threads: LLM_THREADS
})
const { LlamaChatSession } = await Function(
'return import("node-llama-cpp")'
)()
CustomLLMDuty.session = new LlamaChatSession({
contextSequence: CustomLLMDuty.context.getSequence(),
autoDisposeSequence: true,
systemPrompt: this.systemPrompt
}) as LlamaChatSession
LogHelper.success('Initialized')
}
} catch (e) {
LogHelper.title(this.name)
LogHelper.error(`Failed to initialize: ${e}`)
}
}
}
public async execute(): Promise<LLMDutyResult | null> {
LogHelper.title(this.name)
LogHelper.info('Executing...')
try {
const prompt = this.input as string
const completionParams = {
dutyType: LLMDuties.Custom,
systemPrompt: this.systemPrompt
}
let completionResult
if (LLM_PROVIDER_NAME === LLMProviders.Local) {
completionResult = await LLM_PROVIDER.prompt(prompt, {
...completionParams,
session: CustomLLMDuty.session,
maxTokens: CustomLLMDuty.context.contextSize
})
} else {
completionResult = await LLM_PROVIDER.prompt(prompt, completionParams)
}
LogHelper.title(this.name)
LogHelper.success('Duty executed')
LogHelper.success(`System prompt — ${this.systemPrompt}`)
LogHelper.success(`Prompt — ${prompt}`)
LogHelper.success(`Output — ${completionResult?.output}
usedInputTokens: ${completionResult?.usedInputTokens}
usedOutputTokens: ${completionResult?.usedOutputTokens}`)
return completionResult as unknown as LLMDutyResult
} catch (e) {
LogHelper.title(this.name)
LogHelper.error(`Failed to execute: ${e}`)
}
return null
}
}

View File

@ -1,94 +0,0 @@
import type { LlamaChatSession, LlamaContext } from 'node-llama-cpp'
import {
type LLMDutyParams,
type LLMDutyResult,
LLMDuty
} from '@/core/llm-manager/llm-duty'
import { LogHelper } from '@/helpers/log-helper'
import { LLM_MANAGER, LLM_PROVIDER } from '@/core'
import { LLM_THREADS } from '@/core/llm-manager/llm-manager'
import { LLMProviders, LLMDuties } from '@/core/llm-manager/types'
import { LLM_PROVIDER as LLM_PROVIDER_NAME } from '@/constants'
interface SummarizationLLMDutyParams extends LLMDutyParams {}
export class SummarizationLLMDuty extends LLMDuty {
private static instance: SummarizationLLMDuty
private static context: LlamaContext = null as unknown as LlamaContext
private static session: LlamaChatSession = null as unknown as LlamaChatSession
protected readonly systemPrompt =
'You are an AI system that summarizes a given text in a few sentences. You do not add any context to your response.'
protected readonly name = 'Summarization LLM Duty'
protected input: LLMDutyParams['input'] = null
constructor(params: SummarizationLLMDutyParams) {
super()
if (!SummarizationLLMDuty.instance) {
LogHelper.title(this.name)
LogHelper.success('New instance')
SummarizationLLMDuty.instance = this
}
this.input = params.input
}
public async init(): Promise<void> {
if (LLM_PROVIDER_NAME === LLMProviders.Local) {
if (!SummarizationLLMDuty.context || !SummarizationLLMDuty.session) {
SummarizationLLMDuty.context = await LLM_MANAGER.model.createContext({
threads: LLM_THREADS
})
const { LlamaChatSession } = await Function(
'return import("node-llama-cpp")'
)()
SummarizationLLMDuty.session = new LlamaChatSession({
contextSequence: SummarizationLLMDuty.context.getSequence(),
systemPrompt: this.systemPrompt
}) as LlamaChatSession
}
}
}
public async execute(): Promise<LLMDutyResult | null> {
LogHelper.title(this.name)
LogHelper.info('Executing...')
try {
const prompt = `Summarize the following text: ${this.input}`
const completionParams = {
dutyType: LLMDuties.Summarization,
systemPrompt: this.systemPrompt
}
let completionResult
if (LLM_PROVIDER_NAME === LLMProviders.Local) {
completionResult = await LLM_PROVIDER.prompt(prompt, {
...completionParams,
session: SummarizationLLMDuty.session,
maxTokens: SummarizationLLMDuty.context.contextSize
})
} else {
completionResult = await LLM_PROVIDER.prompt(prompt, completionParams)
}
LogHelper.title(this.name)
LogHelper.success('Duty executed')
LogHelper.success(`Prompt — ${prompt}`)
LogHelper.success(`Output — ${completionResult?.output}
usedInputTokens: ${completionResult?.usedInputTokens}
usedOutputTokens: ${completionResult?.usedOutputTokens}`)
return completionResult as unknown as LLMDutyResult
} catch (e) {
LogHelper.title(this.name)
LogHelper.error(`Failed to execute: ${e}`)
}
return null
}
}

View File

@ -1,121 +0,0 @@
import type { LlamaChatSession, LlamaContext } from 'node-llama-cpp'
import {
type LLMDutyParams,
type LLMDutyResult,
LLMDuty
} from '@/core/llm-manager/llm-duty'
import { LogHelper } from '@/helpers/log-helper'
import { LLM_MANAGER, LLM_PROVIDER } from '@/core'
import { LLM_THREADS } from '@/core/llm-manager/llm-manager'
import { LLMProviders, LLMDuties } from '@/core/llm-manager/types'
import { LLM_PROVIDER as LLM_PROVIDER_NAME } from '@/constants'
interface TranslationLLMDutyParams extends LLMDutyParams {
data: {
source?: string | null
target: string | null
autoDetectLanguage?: boolean
}
}
export class TranslationLLMDuty extends LLMDuty {
private static instance: TranslationLLMDuty
private static context: LlamaContext = null as unknown as LlamaContext
private static session: LlamaChatSession = null as unknown as LlamaChatSession
protected readonly systemPrompt: LLMDutyParams['systemPrompt'] = `You are an AI system that does translation. You do not add any context to your response. You only provide the translation without any additional information.`
protected readonly name = 'Translation LLM Duty'
protected input: LLMDutyParams['input'] = null
protected data = {
source: null,
target: null,
autoDetectLanguage: false
} as TranslationLLMDutyParams['data']
constructor(params: TranslationLLMDutyParams) {
super()
if (!TranslationLLMDuty.instance) {
LogHelper.title(this.name)
LogHelper.success('New instance')
TranslationLLMDuty.instance = this
}
this.input = params.input
this.data = params.data
const promptSuffix = 'You do not add any context to your response.'
if (this.data.autoDetectLanguage && !this.data.source) {
this.systemPrompt = `You are an AI system that translates a given text to "${this.data.target}" by auto-detecting the source language. ${promptSuffix}`
} else {
this.systemPrompt = `You are an AI system that translates a given text from "${this.data.source}" to "${this.data.target}". ${promptSuffix}`
}
}
public async init(): Promise<void> {
if (LLM_PROVIDER_NAME === LLMProviders.Local) {
if (!TranslationLLMDuty.context || !TranslationLLMDuty.session) {
TranslationLLMDuty.context = await LLM_MANAGER.model.createContext({
threads: LLM_THREADS
})
const { LlamaChatSession } = await Function(
'return import("node-llama-cpp")'
)()
TranslationLLMDuty.session = new LlamaChatSession({
contextSequence: TranslationLLMDuty.context.getSequence(),
systemPrompt: this.systemPrompt
}) as LlamaChatSession
}
}
}
public async execute(): Promise<LLMDutyResult | null> {
LogHelper.title(this.name)
LogHelper.info('Executing...')
try {
let prompt
if (this.data.autoDetectLanguage && !this.data.source) {
prompt = `Translate the given text to "${this.data.target}" by auto-detecting the source language.`
} else {
prompt = `Translate the given text from "${this.data.source}" to "${this.data.target}".`
}
prompt += `\nText to translate: "${this.input}"`
const completionParams = {
dutyType: LLMDuties.Translation,
systemPrompt: this.systemPrompt as string
}
let completionResult
if (LLM_PROVIDER_NAME === LLMProviders.Local) {
completionResult = await LLM_PROVIDER.prompt(prompt, {
...completionParams,
session: TranslationLLMDuty.session,
maxTokens: TranslationLLMDuty.context.contextSize
})
} else {
completionResult = await LLM_PROVIDER.prompt(prompt, completionParams)
}
LogHelper.title(this.name)
LogHelper.success('Duty executed')
LogHelper.success(`Prompt — ${prompt}`)
LogHelper.success(`Output — ${completionResult?.output}
usedInputTokens: ${completionResult?.usedInputTokens}
usedOutputTokens: ${completionResult?.usedOutputTokens}`)
return completionResult as unknown as LLMDutyResult
} catch (e) {
LogHelper.title(this.name)
LogHelper.error(`Failed to execute: ${e}`)
}
return null
}
}

View File

@ -4,11 +4,10 @@ import type { MessageLog } from '@/types'
export enum LLMDuties {
ActionRecognition = 'action-recognition',
CustomNER = 'customer-ner',
Translation = 'translation',
Summarization = 'summarization',
CustomNER = 'custom-ner',
Paraphrase = 'paraphrase',
Conversation = 'conversation'
Conversation = 'conversation',
Custom = 'custom'
// TODO
/*SentimentAnalysis = 'sentiment-analysis',
QuestionAnswering = 'question-answering',

View File

@ -8,6 +8,8 @@ export const run: ActionFunction = async function (params) {
const network = new Network({
baseURL: `${process.env['LEON_HOST']}:${process.env['LEON_PORT']}/api/v1`
})
const systemPrompt = `You are an AI system that translates a given text to "${targetLanguage}" by auto-detecting the source language. You do not add any context to your response.`
const prompt = `Text to translate: "${textToTranslate}"`
/**
* TODO: create SDK methods to handle request and response for every LLM duty
@ -16,11 +18,10 @@ export const run: ActionFunction = async function (params) {
url: '/llm-inference',
method: 'POST',
data: {
dutyType: 'translation',
input: textToTranslate,
dutyType: 'custom',
input: prompt,
data: {
target: targetLanguage,
autoDetectLanguage: true
systemPrompt
}
}
})