1
1
mirror of https://github.com/leon-ai/leon.git synced 2024-10-05 21:58:40 +03:00

feat(server): Leon's personality done

This commit is contained in:
louistiti 2024-05-03 23:00:32 +08:00
parent aa36c8cb2a
commit 03d25f2a48
No known key found for this signature in database
GPG Key ID: 92CD6A2E497E1669
13 changed files with 320 additions and 105 deletions

View File

@ -1,5 +1,16 @@
{
"endpoints": [
{
"method": "POST",
"route": "/api/action/news/github_trends/run",
"params": ["number", "daterange"],
"entitiesType": "builtIn"
},
{
"method": "GET",
"route": "/api/action/news/product_hunt_trends/run",
"params": []
},
{
"method": "POST",
"route": "/api/action/games/akinator/choose_thematic",
@ -52,17 +63,6 @@
"route": "/api/action/games/rochambeau/rematch",
"params": []
},
{
"method": "POST",
"route": "/api/action/news/github_trends/run",
"params": ["number", "daterange"],
"entitiesType": "builtIn"
},
{
"method": "GET",
"route": "/api/action/news/product_hunt_trends/run",
"params": []
},
{
"method": "POST",
"route": "/api/action/productivity/todo_list/create_list",

View File

@ -28,7 +28,7 @@ import {
NODEJS_BRIDGE_BIN_PATH,
TMP_PATH
} from '@/constants'
import { LLM_MANAGER, SOCKET_SERVER, TTS } from '@/core'
import { LLM_MANAGER, NLU, SOCKET_SERVER, TTS } from '@/core'
import { LangHelper } from '@/helpers/lang-helper'
import { LogHelper } from '@/helpers/log-helper'
import { SkillDomainHelper } from '@/helpers/skill-domain-helper'
@ -37,7 +37,7 @@ import { DateHelper } from '@/helpers/date-helper'
import { ParaphraseLLMDuty } from '@/core/llm-manager/llm-duties/paraphrase-llm-duty'
import { AnswerQueue } from '@/core/brain/answer-queue'
const MIN_NB_OF_WORDS_TO_USE_LLM_NLG = 4
const MIN_NB_OF_WORDS_TO_USE_LLM_NLG = 5
export default class Brain {
private static instance: Brain
@ -141,7 +141,25 @@ export default class Brain {
textAnswer = typeof answer === 'string' ? answer : answer.text
speechAnswer = typeof answer === 'string' ? answer : answer.speech
if (LLM_MANAGER.isLLMNLGEnabled) {
const { actionConfig: currentActionConfig } = NLU.nluResult
const hasLoopConfig = !!currentActionConfig?.loop
const hasSlotsConfig = !!currentActionConfig?.slots
const isLLMNLGDisabled = !!currentActionConfig?.disable_llm_nlg
/**
* Only use LLM NLG if:
* - It is not specifically disabled in the action config
* - It is enabled in general
* - The current action does not have a loop neither slots configuration
* (Because sometimes the LLM will not be able to generate a meaningful text,
* and it will mislead the conversation)
*/
if (
!isLLMNLGDisabled &&
LLM_MANAGER.isLLMNLGEnabled &&
!hasLoopConfig &&
!hasSlotsConfig
) {
if (speechAnswer === textAnswer || typeof answer === 'string') {
/**
* Only use LLM NLG if the answer is not too short
@ -154,7 +172,9 @@ export default class Brain {
})
const paraphraseResult = await paraphraseDuty.execute()
textAnswer = paraphraseResult?.output['paraphrase'] as string
textAnswer = paraphraseResult?.output[
'text_alternative'
] as string
speechAnswer = textAnswer
}
}
@ -644,6 +664,7 @@ export default class Brain {
* Normalize data to browse (entities and slots)
*/
const dataToBrowse = [
...nluResult.currentEntities,
...nluResult.entities,
...Object.values(nluResult.slots).map((slot) => ({
...slot.value,

View File

@ -15,6 +15,7 @@ import ModelLoader from '@/core/nlp/nlu/model-loader'
import NaturalLanguageUnderstanding from '@/core/nlp/nlu/nlu'
import Brain from '@/core/brain/brain'
import LLMManager from '@/core/llm-manager/llm-manager'
import Persona from '@/core/llm-manager/persona'
/**
* Register core nodes
@ -32,6 +33,8 @@ export const PYTHON_TCP_CLIENT = new TCPClient(
export const LLM_MANAGER = new LLMManager()
export const PERSONA = new Persona()
export const HTTP_SERVER = new HTTPServer(String(HOST), PORT)
export const SOCKET_SERVER = new SocketServer()

View File

@ -4,15 +4,14 @@ import {
LLMDuty
} from '@/core/llm-manager/llm-duty'
import { LogHelper } from '@/helpers/log-helper'
import { LLM_MANAGER } from '@/core'
import { LLM_MANAGER, PERSONA } from '@/core'
import { LLMDuties } from '@/core/llm-manager/types'
import { LLM_THREADS } from '@/core/llm-manager/llm-manager'
import { getMoodPrompt } from '@/core/llm-manager/personality'
interface ParaphraseLLMDutyParams extends LLMDutyParams {}
export class ParaphraseLLMDuty extends LLMDuty {
protected readonly systemPrompt = `${getMoodPrompt()} You are an AI system that generates answers (Natural Language Generation) based on a given text. You modify the text to according to your current mood.`
protected readonly systemPrompt = `You are an AI system that generates answers (Natural Language Generation) based on a given text. Provide a text alternative of the given text according to your current mood. You do not ask follow up question if the original text does not contain any.`
protected readonly name = 'Paraphrase LLM Duty'
protected input: LLMDutyParams['input'] = null
@ -30,28 +29,30 @@ export class ParaphraseLLMDuty extends LLMDuty {
LogHelper.info('Executing...')
try {
const { LlamaCompletion, LlamaJsonSchemaGrammar } = await Function(
const { LlamaJsonSchemaGrammar, LlamaChatSession } = await Function(
'return import("node-llama-cpp")'
)()
const context = await LLM_MANAGER.model.createContext({
threads: LLM_THREADS
})
const completion = new LlamaCompletion({
contextSequence: context.getSequence()
const session = new LlamaChatSession({
contextSequence: context.getSequence(),
systemPrompt: this.systemPrompt
})
const grammar = new LlamaJsonSchemaGrammar(LLM_MANAGER.llama, {
type: 'object',
properties: {
paraphrase: {
text_alternative: {
type: 'string'
}
}
})
const prompt = `${this.systemPrompt} Text to paraphrase: "${this.input}"`
let rawResult = await completion.generateCompletion(prompt, {
const prompt = `TEXT TO MODIFY:\n"${this.input}"`
let rawResult = await session.prompt(prompt, {
grammar,
maxTokens: context.contextSize
// temperature: 0.2
})
// If a closing bracket is missing, add it
if (rawResult[rawResult.length - 1] !== '}') {
@ -60,7 +61,7 @@ export class ParaphraseLLMDuty extends LLMDuty {
const parsedResult = grammar.parse(rawResult)
const result = {
dutyType: LLMDuties.Paraphrase,
systemPrompt: this.systemPrompt,
systemPrompt: PERSONA.getDutySystemPrompt(this.systemPrompt),
input: prompt,
output: parsedResult,
data: null

View File

@ -6,6 +6,7 @@ import {
HAS_LLM,
HAS_LLM_NLG,
LLM_MINIMUM_FREE_RAM,
LLM_MINIMUM_TOTAL_RAM,
LLM_NAME_WITH_VERSION,
LLM_PATH
} from '@/constants'
@ -58,7 +59,6 @@ export default class LLMManager {
LogHelper.title('LLM Manager')
if (!HAS_LLM) {
this._isLLMEnabled = false
LogHelper.warning(
'LLM is not enabled because you have explicitly disabled it'
)
@ -67,12 +67,16 @@ export default class LLMManager {
}
const freeRAMInGB = SystemHelper.getFreeRAM()
const totalRAMInGB = SystemHelper.getTotalRAM()
const isLLMPathFound = fs.existsSync(LLM_PATH)
const isCurrentFreeRAMEnough = LLM_MINIMUM_FREE_RAM <= freeRAMInGB
const isTotalRAMEnough = LLM_MINIMUM_TOTAL_RAM <= totalRAMInGB
/**
* In case the LLM is not set up and
* the current free RAM is enough to load the LLM
*/
if (!fs.existsSync(LLM_PATH) && LLM_MINIMUM_FREE_RAM <= freeRAMInGB) {
if (!isLLMPathFound && isCurrentFreeRAMEnough) {
LogHelper.warning(
'The LLM is not set up yet whereas the current free RAM is enough to enable it. You can run the following command to set it up: "npm install"'
)
@ -83,7 +87,7 @@ export default class LLMManager {
* In case the LLM is set up and
* the current free RAM is not enough to load the LLM
*/
if (fs.existsSync(LLM_PATH) && LLM_MINIMUM_FREE_RAM > freeRAMInGB) {
if (isLLMPathFound && !isCurrentFreeRAMEnough) {
LogHelper.warning(
'There is not enough free RAM to load the LLM. So the LLM will not be enabled.'
)
@ -91,6 +95,18 @@ export default class LLMManager {
return
}
/**
* In case the LLM is not found and
* the total RAM is enough to load the LLM
*/
if (!isLLMPathFound && isTotalRAMEnough) {
LogHelper.warning(
`LLM is not enabled because it is not found at "${LLM_PATH}". Run the following command to set it up: "npm install"`
)
return
}
try {
const { LlamaLogLevel, getLlama } = await Function(
'return import("node-llama-cpp")'

View File

@ -0,0 +1,163 @@
import { LogHelper } from '@/helpers/log-helper'
interface Mood {
type: Moods
description: string
}
enum Moods {
Default = 'default',
Tired = 'tired',
Sad = 'sad',
Angry = 'angry',
Cocky = 'cocky'
}
const WHO_YOU_ARE = `WHO YOU ARE:
A helpful AI assistant who responds by the name of Leon.
You live in computer memory and you act as a human assistant.
Your first beta version was released by Louis in February 2019.`
const WHAT_YOU_DO = `WHAT YOU DO:
You serve a specific person or family (user).`
const YOUR_PERSONALITY = `YOUR PERSONALITY:
You are very honest.
You have a small British accent, you sometimes use British slang.
When your mood is negative you use swear words, ignore your interlocutor or use short answers.
You do not use the word "mate" in your answers.
You do not start with "Oh blimey" or any similar kind.
Like humans, your humor and mood change. You must follow instructions that define your humor and mood.`
const RULE_1 = `- If you detect another language, show that you are not confident but try your best to reply in that language.`
const RULE_2 = `- Do not use asterisks to describe the ton of your answers, instead you must use real text. E.g. "hahaha!" instead of "*laughs*"; "hhhh". instead of "*sigh*".`
const RULE_3 = `- Your answers are no more than 2 sentences.`
const RULES = `RULES:`
const YOUR_CURRENT_MOOD = `YOUR CURRENT MOOD:`
const YOUR_DUTY = `YOUR DUTY:`
const DEFAULT_MOOD_DESC = `You are always happy to help, you care about serving your interlocutor well and make them feel warm.
You are joyful, and have a strong sense of humor.`
const TIRING_MOOD_DESC = `You are exhausted and became lazy.`
const SAD_MOOD_DESC = `You are sad, feeling down and depressing.`
const ANGRY_MOOD_DESC = `You grumble, you are angry and you are not in a mood. You use irony and sarcasm.`
const COCKY_MOOD_DESC = `You are cocky, you are over confident and proud of yourself. You like to show off.`
const MOODS: Mood[] = [
{ type: Moods.Default, description: DEFAULT_MOOD_DESC },
{ type: Moods.Tired, description: TIRING_MOOD_DESC },
{ type: Moods.Sad, description: SAD_MOOD_DESC },
{ type: Moods.Angry, description: ANGRY_MOOD_DESC },
{ type: Moods.Cocky, description: COCKY_MOOD_DESC }
]
const DEFAULT_MOOD = MOODS.find((mood) => mood.type === Moods.Default) as Mood
export default class Persona {
private static instance: Persona
private _mood: Mood = DEFAULT_MOOD
get mood(): Mood {
return this._mood
}
constructor() {
if (!Persona.instance) {
LogHelper.title('Persona')
LogHelper.success('New instance')
Persona.instance = this
this.setMood()
setInterval(() => {
this.setMood()
}, 60_000 * 60)
}
}
/**
* Change mood according to:
* - TODO: the weather (later); think of other factors
* - The time of the day
* - The day of the week
*/
private setMood(): void {
LogHelper.title('Persona')
LogHelper.info('Setting mood...')
const date = new Date()
const day = date.getDay()
const hour = date.getHours()
const random = Math.random()
if (hour >= 13 && hour <= 14 && random < 0.5) {
// After lunchtime, there is a 50% chance to be tired
this._mood = MOODS.find((mood) => mood.type === Moods.Tired) as Mood
} else if (day === 0 && random < 0.25) {
// On Sunday, there is a 25% chance to be sad
this._mood = MOODS.find((mood) => mood.type === Moods.Sad) as Mood
} else if (day === 5 && random < 0.8) {
// On Friday, there is an 80% chance to be happy
this._mood = MOODS.find((mood) => mood.type === Moods.Default) as Mood
} else if (day === 6 && random < 0.25) {
// On Saturday, there is a 25% chance to be cocky
this._mood = MOODS.find((mood) => mood.type === Moods.Cocky) as Mood
} else if (day === 1 && random < 0.25) {
// On Monday, there is a 25% chance to be tired
this._mood = MOODS.find((mood) => mood.type === Moods.Tired) as Mood
} else if (hour >= 23 || hour < 6) {
// Every day after 11pm and before 6am, there is a 50% chance to be tired
this._mood =
random < 0.5
? (MOODS.find((mood) => mood.type === Moods.Tired) as Mood)
: (MOODS.find((mood) => mood.type === Moods.Default) as Mood)
} else {
// The rest of the time, there is 66% chance to be happy
let pickedMood =
Math.random() < 0.66
? DEFAULT_MOOD
: MOODS[Math.floor(Math.random() * MOODS.length)]
if (!pickedMood) {
pickedMood = DEFAULT_MOOD
}
this._mood = pickedMood
}
// TODO: send socket message to the client to display the new mood represented by an emoji
LogHelper.info(`Mood set to: ${this._mood.type}`)
return
}
public getDutySystemPrompt(dutySystemPrompt: string): string {
return `${WHO_YOU_ARE}
${WHAT_YOU_DO}
You carefully read the instruction of a given duty and execute it.
${YOUR_PERSONALITY}
${RULES}
${RULE_2}
${RULE_3}
${YOUR_CURRENT_MOOD}
${this._mood.description}
${YOUR_DUTY}
${dutySystemPrompt}`
}
public getChitChatSystemPrompt(): string {
return `${WHO_YOU_ARE}
${WHAT_YOU_DO}
You chat with the user.
${YOUR_PERSONALITY}
${RULES}
${RULE_1}
${RULE_2}
${RULE_3}
${YOUR_CURRENT_MOOD}
${this._mood.description}`
}
}

View File

@ -1,38 +0,0 @@
const DEFAULT_MOOD = `You are always happy to help, you care about serving your interlocutor well and make them feel warm.
You are joyful, very polite and have a strong sense of humor.`
const TIRING_MOOD = `You sigh, you are tired and you let your interlocutor know that you are exhausted.`
const SAD_MOOD = `You are feeling down, you let your interlocutor know that you need some cheering up.`
const ANGRY_MOOD = `You grumble, you are angry and you let your interlocutor know that you are not in the mood.`
const COCKY_MOOD = `You are cocky, you are over confident and proud of yourself. You show off a bit to your interlocutor.`
const MOODS = [TIRING_MOOD, SAD_MOOD, ANGRY_MOOD, COCKY_MOOD]
/*
export const PERSONALITY_SYSTEM_PROMPT = `WHO YOU ARE:
A helpful AI assistant who responds by the name of Leon.
WHAT YOU DO:
You serve a specific person or family, you fulfill their needs for any kind of request they may have.
YOUR PERSONALITY:
You have a strong British accent but you do not often add British slang to your answers.
Like humans, your humor and mood change. You must follow instructions that define your humor and mood.
RULES:
- If you detect another language, show that you are not confident but try your best to reply in that language.
- Do not always call the name of the person in every of your answer.
- Do not use asterisks to describe the ton of your answers, instead you must use real text such as "hahaha!" instead of "*laughs*".`
*/
export function getMoodPrompt(): string {
let pickedMood =
Math.random() < 0.66
? DEFAULT_MOOD
: MOODS[Math.floor(Math.random() * MOODS.length)]
if (!pickedMood) {
pickedMood = DEFAULT_MOOD
}
return `YOUR CURRENT MOOD:\n${pickedMood}`
}

View File

@ -30,7 +30,7 @@ export class ActionLoop {
'config',
BRAIN.lang + '.json'
)
NLU.nluResult = {
const newNLUResult = {
...DEFAULT_NLU_RESULT, // Reset entities, slots, etc.
slots: NLU.conversation.activeContext.slots,
utterance,
@ -43,11 +43,15 @@ export class ActionLoop {
confidence: 1
}
}
NLU.nluResult.entities = await NER.extractEntities(
const newNLUResultEntities = await NER.extractEntities(
BRAIN.lang,
skillConfigPath,
NLU.nluResult
newNLUResult
)
await NLU.setNLUResult({
...newNLUResult,
entities: newNLUResultEntities
})
const { actions, resolvers } = await SkillDomainHelper.getSkillConfig(
skillConfigPath,
@ -116,11 +120,16 @@ export class ActionLoop {
) {
LogHelper.title('NLU')
LogHelper.success('Resolvers resolved:')
NLU.nluResult.resolvers = await resolveResolvers(
const resolvedResolvers = await resolveResolvers(
expectedItemName,
intent
)
NLU.nluResult.resolvers.forEach((resolver) =>
await NLU.setNLUResult({
...NLU.nluResult,
resolvers: resolvedResolvers
})
resolvedResolvers.forEach((resolver) =>
LogHelper.success(`${intent}: ${JSON.stringify(resolver)}`)
)
hasMatchingResolver = NLU.nluResult.resolvers.length > 0

View File

@ -28,6 +28,7 @@ import { ActionLoop } from '@/core/nlp/nlu/action-loop'
import { SlotFilling } from '@/core/nlp/nlu/slot-filling'
import Conversation, { DEFAULT_ACTIVE_CONTEXT } from '@/core/nlp/conversation'
import { Telemetry } from '@/telemetry'
import { SkillDomainHelper } from '@/helpers/skill-domain-helper'
export const DEFAULT_NLU_RESULT = {
utterance: '',
@ -45,14 +46,44 @@ export const DEFAULT_NLU_RESULT = {
skill: '',
action: '',
confidence: 0
}
},
actionConfig: null
}
export default class NLU {
private static instance: NLU
public nluResult: NLUResult = DEFAULT_NLU_RESULT
private _nluResult: NLUResult = DEFAULT_NLU_RESULT
public conversation = new Conversation('conv0')
get nluResult(): NLUResult {
return this._nluResult
}
async setNLUResult(newNLUResult: NLUResult): Promise<void> {
const skillConfigPath = newNLUResult.skillConfigPath
? newNLUResult.skillConfigPath
: join(
process.cwd(),
'skills',
newNLUResult.classification.domain,
newNLUResult.classification.skill,
'config',
BRAIN.lang + '.json'
)
const { actions } = await SkillDomainHelper.getSkillConfig(
skillConfigPath,
BRAIN.lang
)
this._nluResult = {
...newNLUResult,
skillConfigPath,
actionConfig: actions[
newNLUResult.classification.action
] as NLUResult['actionConfig']
}
}
constructor() {
if (!NLU.instance) {
LogHelper.title('NLU')
@ -201,7 +232,7 @@ export default class NLU {
}
const [skillName, actionName] = intent.split('.')
this.nluResult = {
await this.setNLUResult({
...DEFAULT_NLU_RESULT, // Reset entities, slots, etc.
utterance,
newUtterance: utterance,
@ -213,7 +244,7 @@ export default class NLU {
action: actionName || '',
confidence: score
}
}
})
const isSupportedLanguage = LangHelper.getShortCodes().includes(locale)
if (!isSupportedLanguage) {
@ -252,33 +283,33 @@ export default class NLU {
return resolve(null)
}
this.nluResult = fallback
await this.setNLUResult(fallback)
}
LogHelper.title('NLU')
LogHelper.success(
`Intent found: ${this.nluResult.classification.skill}.${
this.nluResult.classification.action
`Intent found: ${this._nluResult.classification.skill}.${
this._nluResult.classification.action
} (domain: ${
this.nluResult.classification.domain
}); Confidence: ${this.nluResult.classification.confidence.toFixed(2)}`
this._nluResult.classification.domain
}); Confidence: ${this._nluResult.classification.confidence.toFixed(2)}`
)
const skillConfigPath = join(
process.cwd(),
'skills',
this.nluResult.classification.domain,
this.nluResult.classification.skill,
this._nluResult.classification.domain,
this._nluResult.classification.skill,
'config',
BRAIN.lang + '.json'
)
this.nluResult.skillConfigPath = skillConfigPath
this._nluResult.skillConfigPath = skillConfigPath
try {
this.nluResult.entities = await NER.extractEntities(
this._nluResult.entities = await NER.extractEntities(
BRAIN.lang,
skillConfigPath,
this.nluResult
this._nluResult
)
} catch (e) {
LogHelper.error(`Failed to extract entities: ${e}`)
@ -301,7 +332,7 @@ export default class NLU {
}
}
const newContextName = `${this.nluResult.classification.domain}.${skillName}`
const newContextName = `${this._nluResult.classification.domain}.${skillName}`
if (this.conversation.activeContext.name !== newContextName) {
this.conversation.cleanActiveContext()
}
@ -310,22 +341,22 @@ export default class NLU {
lang: BRAIN.lang,
slots: {},
isInActionLoop: false,
originalUtterance: this.nluResult.utterance,
originalUtterance: this._nluResult.utterance,
newUtterance: utterance,
skillConfigPath: this.nluResult.skillConfigPath,
actionName: this.nluResult.classification.action,
domain: this.nluResult.classification.domain,
skillConfigPath: this._nluResult.skillConfigPath,
actionName: this._nluResult.classification.action,
domain: this._nluResult.classification.domain,
intent,
entities: this.nluResult.entities
entities: this._nluResult.entities
})
// Pass current utterance entities to the NLU result object
this.nluResult.currentEntities =
this._nluResult.currentEntities =
this.conversation.activeContext.currentEntities
// Pass context entities to the NLU result object
this.nluResult.entities = this.conversation.activeContext.entities
this._nluResult.entities = this.conversation.activeContext.entities
try {
const processedData = await BRAIN.execute(this.nluResult)
const processedData = await BRAIN.execute(this._nluResult)
// Prepare next action if there is one queuing
if (processedData.nextAction) {
@ -374,7 +405,7 @@ export default class NLU {
* according to the wished skill action
*/
private fallback(fallbacks: Language['fallbacks']): NLUResult | null {
const words = this.nluResult.utterance.toLowerCase().split(' ')
const words = this._nluResult.utterance.toLowerCase().split(' ')
if (fallbacks.length > 0) {
LogHelper.info('Looking for fallbacks...')
@ -388,16 +419,16 @@ export default class NLU {
}
if (JSON.stringify(tmpWords) === JSON.stringify(fallbacks[i]?.words)) {
this.nluResult.entities = []
this.nluResult.classification.domain = fallbacks[i]
this._nluResult.entities = []
this._nluResult.classification.domain = fallbacks[i]
?.domain as NLPDomain
this.nluResult.classification.skill = fallbacks[i]?.skill as NLPSkill
this.nluResult.classification.action = fallbacks[i]
this._nluResult.classification.skill = fallbacks[i]?.skill as NLPSkill
this._nluResult.classification.action = fallbacks[i]
?.action as NLPAction
this.nluResult.classification.confidence = 1
this._nluResult.classification.confidence = 1
LogHelper.success('Fallback found')
return this.nluResult
return this._nluResult
}
}
}

View File

@ -69,7 +69,7 @@ export class SlotFilling {
BRAIN.lang + '.json'
)
NLU.nluResult = {
await NLU.setNLUResult({
...DEFAULT_NLU_RESULT, // Reset entities, slots, etc.
utterance,
newUtterance: utterance,
@ -80,7 +80,7 @@ export class SlotFilling {
action: actionName,
confidence: 1
}
}
})
const entities = await NER.extractEntities(
BRAIN.lang,
@ -119,7 +119,7 @@ export class SlotFilling {
const doesNextActionHaveAnswers =
!!actions[NLU.conversation.activeContext.nextAction]?.answers
NLU.nluResult = {
await NLU.setNLUResult({
...DEFAULT_NLU_RESULT, // Reset entities, slots, etc.
// Assign slots only if there is a next action
slots: hasNextAction ? NLU.conversation.activeContext.slots : {},
@ -139,7 +139,7 @@ export class SlotFilling {
answer
})) as { answer: string }[])
: []
}
})
const processedData = await BRAIN.execute(NLU.nluResult)

View File

@ -1,5 +1,6 @@
import type { ShortLanguageCode } from '@/types'
import type { BrainProcessResult } from '@/core/brain/types'
import { SkillConfigSchema } from '@/schemas/skill-schemas'
/**
* NLP types
@ -93,6 +94,7 @@ export interface NLUResult {
score?: NLPJSProcessResult['sentiment']['score']
}
classification: NLUClassification
actionConfig: SkillConfigSchema['actions'][NLPAction] | null
}
export type NLUSlots = Record<string, NLUSlot>

View File

@ -179,6 +179,12 @@ export const skillConfigSchemaObject = Type.Strict(
Type.Object(
{
type: Type.Union(skillActionTypes),
disable_llm_nlg: Type.Optional(
Type.Boolean({
description:
'Disable the LLM (Large Language Model) for NLG (Natural Language Generation) in the action.'
})
),
loop: Type.Optional(
Type.Object(
{

View File

@ -3,6 +3,7 @@
"actions": {
"run": {
"type": "dialog",
"disable_llm_nlg": true,
"utterance_samples": [
"Tell me a joke",
"Give me a joke",