mirror of
https://github.com/leon-ai/leon.git
synced 2024-10-03 20:57:55 +03:00
feat: support default conversations powered by LLM with action-first in mind
This commit is contained in:
parent
4bad34278b
commit
0ce1f62c0d
@ -9,15 +9,15 @@ LEON_HOST=http://localhost
|
||||
LEON_PORT=1337
|
||||
|
||||
# Enable/disable LLM
|
||||
LEON_LLM=true
|
||||
LEON_LLM=false
|
||||
# LLM provider
|
||||
LEON_LLM_PROVIDER=local
|
||||
# LLM provider API key (if not local)
|
||||
LEON_LLM_PROVIDER_API_KEY=
|
||||
# Enable/disable LLM natural language generation
|
||||
LEON_LLM_NLG=true
|
||||
LEON_LLM_NLG=false
|
||||
# Enable/disable LLM Action Recognition
|
||||
LEON_LLM_ACTION_RECOGNITION=true
|
||||
LEON_LLM_ACTION_RECOGNITION=false
|
||||
|
||||
# Time zone (current one by default)
|
||||
LEON_TIME_ZONE=
|
||||
|
@ -26,7 +26,7 @@
|
||||
"@typescript-eslint/no-non-null-assertion": ["off"],
|
||||
"no-async-promise-executor": ["off"],
|
||||
"no-underscore-dangle": ["error", { "allowAfterThis": true }],
|
||||
"prefer-destructuring": ["error"],
|
||||
"prefer-destructuring": ["off"],
|
||||
"comma-dangle": ["error", "never"],
|
||||
"semi": ["error", "never"],
|
||||
"object-curly-spacing": ["error", "always"],
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -34,6 +34,7 @@ tcp_server/src/lib/asr/models/**/*.bin
|
||||
skills/**/src/settings.json
|
||||
skills/**/memory/*.json
|
||||
core/data/models/*.nlp
|
||||
core/data/models/*.json
|
||||
core/data/models/llm/*
|
||||
package.json.backup
|
||||
.python-version
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Sorry, I've got an error with the \"%skill_name%\" skill from the \"%domain_name%\" domain",
|
||||
"Sorry, the \"%skill_name%\" skill from the \"%domain_name%\" domain is broken"
|
||||
],
|
||||
"random_unknown_intents": [
|
||||
"random_unknown_intents_legacy": [
|
||||
"Sorry, I still don't know this, but you can help me to understand by <a href=\"https://github.com/leon-ai/leon/blob/develop/.github/CONTRIBUTING.md\" target=\"_blank\">creating a pull request</a>",
|
||||
"Sorry, you should teach me this request. You can teach me by <a href=\"https://github.com/leon-ai/leon/blob/develop/.github/CONTRIBUTING.md\" target=\"_blank\">creating a pull request</a>",
|
||||
"Sorry, I cannot answer that. Let me answer you in the future by <a href=\"https://github.com/leon-ai/leon/blob/develop/.github/CONTRIBUTING.md\" target=\"_blank\">creating a pull request</a>",
|
||||
|
@ -124,7 +124,7 @@
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/leon/welcome/run",
|
||||
"route": "/api/action/leon/thanks/run",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
@ -187,12 +187,22 @@
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/social_communication/chit_chat/setup",
|
||||
"route": "/api/action/unknown/widget-playground/run",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/social_communication/chit_chat/chat",
|
||||
"route": "/api/action/social_communication/conversation/setup",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/social_communication/conversation/chit_chat",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/social_communication/conversation/converse",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
@ -205,11 +215,6 @@
|
||||
"route": "/api/action/social_communication/mbti/quiz",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/unknown/widget-playground/run",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/utilities/date_time/current_date_time",
|
||||
|
73
scripts/train/train-llm-actions-classifier.js
Normal file
73
scripts/train/train-llm-actions-classifier.js
Normal file
@ -0,0 +1,73 @@
|
||||
import path from 'node:path'
|
||||
import fs from 'node:fs'
|
||||
|
||||
import { LLM_ACTIONS_CLASSIFIER_PATH } from '@/constants'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { SkillDomainHelper } from '@/helpers/skill-domain-helper'
|
||||
|
||||
const LANG = 'en'
|
||||
|
||||
/**
|
||||
* Train LLM actions classifier
|
||||
*/
|
||||
export default () =>
|
||||
new Promise(async (resolve) => {
|
||||
LogHelper.title('LLM actions classifier training')
|
||||
|
||||
const skillDomains = await SkillDomainHelper.getSkillDomains()
|
||||
let actionsArray = []
|
||||
|
||||
for (const [, currentDomain] of skillDomains) {
|
||||
const skillKeys = Object.keys(currentDomain.skills)
|
||||
|
||||
for (let i = 0; i < skillKeys.length; i += 1) {
|
||||
const { name: skillName } = currentDomain.skills[skillKeys[i]]
|
||||
const currentSkill = currentDomain.skills[skillKeys[i]]
|
||||
|
||||
const configFilePath = path.join(
|
||||
currentSkill.path,
|
||||
'config',
|
||||
`${LANG}.json`
|
||||
)
|
||||
|
||||
if (fs.existsSync(configFilePath)) {
|
||||
const { actions } = await SkillDomainHelper.getSkillConfig(
|
||||
configFilePath,
|
||||
LANG
|
||||
)
|
||||
const actionsKeys = Object.keys(actions)
|
||||
|
||||
for (let j = 0; j < actionsKeys.length; j += 1) {
|
||||
const actionName = actionsKeys[j]
|
||||
const actionObj = actions[actionName]
|
||||
|
||||
// Skip actions without utterance samples
|
||||
if (!actionObj.utterance_samples) {
|
||||
continue
|
||||
}
|
||||
|
||||
const actionObjWithUtteranceSamples = {
|
||||
name: `${currentDomain.domainId}.${skillName}.${actionName}`,
|
||||
// only grab the first utterance sample when utterance_samples exists
|
||||
sample: actionObj.utterance_samples
|
||||
? actionObj.utterance_samples[0]
|
||||
: ''
|
||||
}
|
||||
|
||||
actionsArray.push(actionObjWithUtteranceSamples)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const jsonObject = {
|
||||
intents: actionsArray
|
||||
}
|
||||
|
||||
await fs.promises.writeFile(
|
||||
LLM_ACTIONS_CLASSIFIER_PATH,
|
||||
JSON.stringify(jsonObject, null, 0)
|
||||
)
|
||||
|
||||
resolve()
|
||||
})
|
@ -15,6 +15,7 @@ import trainGlobalResolvers from './train-resolvers-model/train-global-resolvers
|
||||
import trainSkillsResolvers from './train-resolvers-model/train-skills-resolvers'
|
||||
import trainGlobalEntities from './train-main-model/train-global-entities'
|
||||
import trainSkillsActions from './train-main-model/train-skills-actions'
|
||||
import trainLLMActionsClassifier from './train-llm-actions-classifier'
|
||||
|
||||
dotenv.config()
|
||||
|
||||
@ -134,6 +135,16 @@ export default () =>
|
||||
LogHelper.error(`Failed to save main NLP model: ${e}`)
|
||||
reject()
|
||||
}
|
||||
|
||||
try {
|
||||
await trainLLMActionsClassifier()
|
||||
|
||||
LogHelper.success('LLM actions classifier trained')
|
||||
resolve()
|
||||
} catch (e) {
|
||||
LogHelper.error(`Failed to train LLM actions classifier: ${e}`)
|
||||
reject()
|
||||
}
|
||||
} catch (e) {
|
||||
LogHelper.error(e.message)
|
||||
reject(e)
|
||||
|
@ -230,6 +230,10 @@ export const SKILLS_RESOLVERS_NLP_MODEL_PATH = path.join(
|
||||
MODELS_PATH,
|
||||
'leon-skills-resolvers-model.nlp'
|
||||
)
|
||||
export const LLM_ACTIONS_CLASSIFIER_PATH = path.join(
|
||||
MODELS_PATH,
|
||||
'leon-llm-actions-classifier.json'
|
||||
)
|
||||
|
||||
/**
|
||||
* LLMs
|
||||
|
@ -12,6 +12,10 @@ interface ConversationLoggerSettings {
|
||||
nbOfLogsToLoad: number
|
||||
}
|
||||
|
||||
interface LoadParams {
|
||||
nbOfLogsToLoad?: number
|
||||
}
|
||||
|
||||
/**
|
||||
* The goal of this class is to log the conversation data between the
|
||||
* owner and Leon.
|
||||
@ -91,11 +95,13 @@ export class ConversationLogger {
|
||||
}
|
||||
}
|
||||
|
||||
public async load(): Promise<MessageLog[]> {
|
||||
public async load(params?: LoadParams): Promise<MessageLog[]> {
|
||||
try {
|
||||
const conversationLog = await this.getAllLogs()
|
||||
const nbOfLogsToLoad =
|
||||
params?.nbOfLogsToLoad || this.settings.nbOfLogsToLoad
|
||||
|
||||
return conversationLog.slice(-this.settings.nbOfLogsToLoad)
|
||||
return conversationLog.slice(-nbOfLogsToLoad)
|
||||
} catch (e) {
|
||||
LogHelper.title(this.settings.loggerName)
|
||||
LogHelper.error(`Failed to load conversation log: ${e})`)
|
||||
|
@ -6,7 +6,7 @@ import { CustomNERLLMDuty } from '@/core/llm-manager/llm-duties/custom-ner-llm-d
|
||||
import { SummarizationLLMDuty } from '@/core/llm-manager/llm-duties/summarization-llm-duty'
|
||||
import { TranslationLLMDuty } from '@/core/llm-manager/llm-duties/translation-llm-duty'
|
||||
import { ParaphraseLLMDuty } from '@/core/llm-manager/llm-duties/paraphrase-llm-duty'
|
||||
import { ChitChatLLMDuty } from '@/core/llm-manager/llm-duties/chit-chat-llm-duty'
|
||||
import { ConversationLLMDuty } from '@/core/llm-manager/llm-duties/conversation-llm-duty'
|
||||
import { ActionRecognitionLLMDuty } from '@/core/llm-manager/llm-duties/action-recognition-llm-duty'
|
||||
import { LLM_MANAGER } from '@/core'
|
||||
|
||||
@ -25,7 +25,7 @@ const LLM_DUTIES_MAP = {
|
||||
[LLMDuties.Summarization]: SummarizationLLMDuty,
|
||||
[LLMDuties.Translation]: TranslationLLMDuty,
|
||||
[LLMDuties.Paraphrase]: ParaphraseLLMDuty,
|
||||
[LLMDuties.ChitChat]: ChitChatLLMDuty
|
||||
[LLMDuties.Conversation]: ConversationLLMDuty
|
||||
}
|
||||
|
||||
export const postLLMInference: FastifyPluginAsync<APIOptions> = async (
|
||||
@ -67,11 +67,16 @@ export const postLLMInference: FastifyPluginAsync<APIOptions> = async (
|
||||
|
||||
let llmResult
|
||||
|
||||
// TODO: use long-live duty for chit-chat duty
|
||||
if (params.dutyType === LLMDuties.Conversation) {
|
||||
const chitChatLLMDuty = new ConversationLLMDuty()
|
||||
|
||||
if (params.dutyType === LLMDuties.ChitChat) {
|
||||
const chitChatLLMDuty = new ChitChatLLMDuty()
|
||||
await chitChatLLMDuty.init()
|
||||
if (params.data && params.data['useLoopHistory'] !== undefined) {
|
||||
await chitChatLLMDuty.init({
|
||||
useLoopHistory: params.data['useLoopHistory'] as boolean
|
||||
})
|
||||
} else {
|
||||
await chitChatLLMDuty.init()
|
||||
}
|
||||
|
||||
llmResult = await chitChatLLMDuty.execute()
|
||||
} else {
|
||||
|
@ -4,23 +4,26 @@ import {
|
||||
LLMDuty
|
||||
} from '@/core/llm-manager/llm-duty'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { LLM_MANAGER, LLM_PROVIDER } from '@/core'
|
||||
import { CONVERSATION_LOGGER, LLM_MANAGER, LLM_PROVIDER } from '@/core'
|
||||
import { LLM_THREADS } from '@/core/llm-manager/llm-manager'
|
||||
import { LLMProviders, LLMDuties } from '@/core/llm-manager/types'
|
||||
import { LLM_PROVIDER as LLM_PROVIDER_NAME } from '@/constants'
|
||||
import { StringHelper } from '@/helpers/string-helper'
|
||||
|
||||
interface ActionRecognitionLLMDutyParams extends LLMDutyParams {}
|
||||
export interface ActionRecognitionLLMDutyParams extends LLMDutyParams {
|
||||
data: {
|
||||
existingContextName: string | null
|
||||
}
|
||||
}
|
||||
|
||||
const JSON_KEY_RESPONSE = 'action_name'
|
||||
const RANDOM_STR = StringHelper.random(4)
|
||||
const JSON_KEY_RESPONSE = 'intent_name'
|
||||
|
||||
export class ActionRecognitionLLMDuty extends LLMDuty {
|
||||
protected readonly systemPrompt = `You are an AI expert in intent classification and matching.
|
||||
You look up every utterance sample and description. Then you return the most probable intent (action) to be triggered based on a given utterance.
|
||||
If the intent is not listed, do not make it up yourself. Instead you must return { "${JSON_KEY_RESPONSE}": "not_found" }. Test: ${RANDOM_STR}`
|
||||
protected readonly systemPrompt: LLMDutyParams['systemPrompt'] = null
|
||||
protected readonly name = 'Action Recognition LLM Duty'
|
||||
protected input: LLMDutyParams['input'] = null
|
||||
protected data = {
|
||||
existingContextName: null
|
||||
} as ActionRecognitionLLMDutyParams['data']
|
||||
|
||||
constructor(params: ActionRecognitionLLMDutyParams) {
|
||||
super()
|
||||
@ -29,6 +32,40 @@ If the intent is not listed, do not make it up yourself. Instead you must return
|
||||
LogHelper.success('New instance')
|
||||
|
||||
this.input = params.input
|
||||
this.data = params.data
|
||||
|
||||
const basePrompt = `INTENT MATCHING PROMPT:
|
||||
You are tasked with matching user utterances to their corresponding intents. Your goal is to identify the most probable intent from a given utterance, considering the context of the conversation when necessary.
|
||||
Once you have identified the intent, you must check again according to the sample whether the intent is correct or not.
|
||||
It is better to not match any intent than to match the wrong intent.
|
||||
|
||||
INTENT FORMAT:
|
||||
The intent format is "{domain}.{skill}.{action}", for example, "food_drink.advisor.suggest".
|
||||
|
||||
INTENT LIST:
|
||||
The valid intents are listed below. You must only respond with one of the intents from this list. Do not generate new intents.
|
||||
|
||||
${LLM_MANAGER.llmActionsClassifierContent}
|
||||
|
||||
RESPONSE GUIDELINES:
|
||||
* If the utterance matches one of the intents, respond with the corresponding intent in the format "{domain}.{skill}.{action}".
|
||||
* If the utterance does not match any of the intents, respond with { "${JSON_KEY_RESPONSE}": "not_found" }.
|
||||
* Never match a loop intent if the user's utterance does not explicitly mention the intent.`
|
||||
|
||||
if (this.data.existingContextName) {
|
||||
this.systemPrompt = `${basePrompt}
|
||||
* If the utterance is ambiguous and could match multiple intents, consider the context and history of the conversation to disambiguate the intent.
|
||||
* Remember, it is always better to not match any intent than to match the wrong intent.
|
||||
|
||||
CONTEXTUAL DISAMBIGUATION:
|
||||
When the utterance is ambiguous, consider the following context to disambiguate the intent:
|
||||
* The history of the conversation. Review the previous messages to understand the context.
|
||||
* Do not be creative to match the intent. Instead, you should only consider: the user's utterance, the context of the conversation, and the history of the conversation.
|
||||
|
||||
By considering the context, you should be able to resolve the ambiguity and respond with the most probable intent.`
|
||||
} else {
|
||||
this.systemPrompt = basePrompt
|
||||
}
|
||||
}
|
||||
|
||||
public async execute(): Promise<LLMDutyResult | null> {
|
||||
@ -39,7 +76,7 @@ If the intent is not listed, do not make it up yourself. Instead you must return
|
||||
const prompt = `Utterance: "${this.input}"`
|
||||
const completionParams = {
|
||||
dutyType: LLMDuties.ActionRecognition,
|
||||
systemPrompt: this.systemPrompt,
|
||||
systemPrompt: this.systemPrompt as string,
|
||||
data: {
|
||||
[JSON_KEY_RESPONSE]: {
|
||||
type: 'string'
|
||||
@ -61,6 +98,14 @@ If the intent is not listed, do not make it up yourself. Instead you must return
|
||||
systemPrompt: completionParams.systemPrompt
|
||||
})
|
||||
|
||||
const history = await LLM_MANAGER.loadHistory(
|
||||
CONVERSATION_LOGGER,
|
||||
session,
|
||||
{ nbOfLogsToLoad: 8 }
|
||||
)
|
||||
|
||||
session.setChatHistory(history)
|
||||
|
||||
completionResult = await LLM_PROVIDER.prompt(prompt, {
|
||||
...completionParams,
|
||||
session,
|
||||
@ -71,7 +116,9 @@ If the intent is not listed, do not make it up yourself. Instead you must return
|
||||
}
|
||||
|
||||
LogHelper.title(this.name)
|
||||
LogHelper.success(`Duty executed: ${JSON.stringify(completionResult)}`)
|
||||
LogHelper.success('Duty executed')
|
||||
LogHelper.success(`Prompt — ${prompt}`)
|
||||
LogHelper.success(`Output — ${JSON.stringify(completionResult?.output)}`)
|
||||
|
||||
return completionResult as unknown as LLMDutyResult
|
||||
} catch (e) {
|
||||
|
@ -12,6 +12,7 @@ import {
|
||||
PERSONA,
|
||||
NLU,
|
||||
LOOP_CONVERSATION_LOGGER,
|
||||
CONVERSATION_LOGGER,
|
||||
LLM_PROVIDER,
|
||||
SOCKET_SERVER
|
||||
} from '@/core'
|
||||
@ -20,37 +21,47 @@ import { LLMProviders, LLMDuties } from '@/core/llm-manager/types'
|
||||
import { LLM_PROVIDER as LLM_PROVIDER_NAME } from '@/constants'
|
||||
import { StringHelper } from '@/helpers/string-helper'
|
||||
|
||||
export class ChitChatLLMDuty extends LLMDuty {
|
||||
private static instance: ChitChatLLMDuty
|
||||
interface InitParams {
|
||||
/**
|
||||
* Whether to use the loop history which is erased when Leon's instance is restarted.
|
||||
* If set to false, the main conversation history will be used
|
||||
*/
|
||||
useLoopHistory?: boolean
|
||||
}
|
||||
|
||||
export class ConversationLLMDuty extends LLMDuty {
|
||||
private static instance: ConversationLLMDuty
|
||||
private static context: LlamaContext = null as unknown as LlamaContext
|
||||
private static session: LlamaChatSession = null as unknown as LlamaChatSession
|
||||
private static messagesHistoryForNonLocalProvider: MessageLog[] =
|
||||
null as unknown as MessageLog[]
|
||||
protected readonly systemPrompt = ``
|
||||
protected readonly name = 'Chit-Chat LLM Duty'
|
||||
protected readonly name = 'Conversation LLM Duty'
|
||||
protected input: LLMDutyParams['input'] = null
|
||||
|
||||
constructor() {
|
||||
super()
|
||||
|
||||
if (!ChitChatLLMDuty.instance) {
|
||||
if (!ConversationLLMDuty.instance) {
|
||||
LogHelper.title(this.name)
|
||||
LogHelper.success('New instance')
|
||||
|
||||
ChitChatLLMDuty.instance = this
|
||||
ConversationLLMDuty.instance = this
|
||||
}
|
||||
}
|
||||
|
||||
public async init(): Promise<void> {
|
||||
public async init(params: InitParams = {}): Promise<void> {
|
||||
params.useLoopHistory = params.useLoopHistory ?? true
|
||||
|
||||
if (LLM_PROVIDER_NAME === LLMProviders.Local) {
|
||||
/**
|
||||
* A new context and session will be created only
|
||||
* when Leon's instance is restarted
|
||||
*/
|
||||
if (!ChitChatLLMDuty.context || !ChitChatLLMDuty.session) {
|
||||
if (!ConversationLLMDuty.context || !ConversationLLMDuty.session) {
|
||||
await LOOP_CONVERSATION_LOGGER.clear()
|
||||
|
||||
ChitChatLLMDuty.context = await LLM_MANAGER.model.createContext({
|
||||
ConversationLLMDuty.context = await LLM_MANAGER.model.createContext({
|
||||
threads: LLM_THREADS
|
||||
})
|
||||
|
||||
@ -58,21 +69,27 @@ export class ChitChatLLMDuty extends LLMDuty {
|
||||
'return import("node-llama-cpp")'
|
||||
)()
|
||||
|
||||
ChitChatLLMDuty.session = new LlamaChatSession({
|
||||
contextSequence: ChitChatLLMDuty.context.getSequence(),
|
||||
systemPrompt: PERSONA.getChitChatSystemPrompt()
|
||||
ConversationLLMDuty.session = new LlamaChatSession({
|
||||
contextSequence: ConversationLLMDuty.context.getSequence(),
|
||||
systemPrompt: PERSONA.getConversationSystemPrompt()
|
||||
}) as LlamaChatSession
|
||||
} else {
|
||||
let conversationLogger = LOOP_CONVERSATION_LOGGER
|
||||
|
||||
if (!params.useLoopHistory) {
|
||||
conversationLogger = CONVERSATION_LOGGER
|
||||
}
|
||||
|
||||
/**
|
||||
* As long as Leon's instance has not been restarted,
|
||||
* the context, session with history will be loaded
|
||||
*/
|
||||
const history = await LLM_MANAGER.loadHistory(
|
||||
LOOP_CONVERSATION_LOGGER,
|
||||
ChitChatLLMDuty.session
|
||||
conversationLogger,
|
||||
ConversationLLMDuty.session
|
||||
)
|
||||
|
||||
ChitChatLLMDuty.session.setChatHistory(history)
|
||||
ConversationLLMDuty.session.setChatHistory(history)
|
||||
}
|
||||
} else {
|
||||
/**
|
||||
@ -81,12 +98,18 @@ export class ChitChatLLMDuty extends LLMDuty {
|
||||
* then load the messages history
|
||||
*/
|
||||
|
||||
if (!ChitChatLLMDuty.messagesHistoryForNonLocalProvider) {
|
||||
if (!ConversationLLMDuty.messagesHistoryForNonLocalProvider) {
|
||||
await LOOP_CONVERSATION_LOGGER.clear()
|
||||
}
|
||||
|
||||
ChitChatLLMDuty.messagesHistoryForNonLocalProvider =
|
||||
await LOOP_CONVERSATION_LOGGER.load()
|
||||
let conversationLogger = LOOP_CONVERSATION_LOGGER
|
||||
|
||||
if (!params.useLoopHistory) {
|
||||
conversationLogger = CONVERSATION_LOGGER
|
||||
}
|
||||
|
||||
ConversationLLMDuty.messagesHistoryForNonLocalProvider =
|
||||
await conversationLogger.load()
|
||||
}
|
||||
}
|
||||
|
||||
@ -102,8 +125,8 @@ export class ChitChatLLMDuty extends LLMDuty {
|
||||
|
||||
const prompt = NLU.nluResult.newUtterance
|
||||
const completionParams = {
|
||||
dutyType: LLMDuties.ChitChat,
|
||||
systemPrompt: PERSONA.getChitChatSystemPrompt(),
|
||||
dutyType: LLMDuties.Conversation,
|
||||
systemPrompt: PERSONA.getConversationSystemPrompt(),
|
||||
temperature: 1.3
|
||||
}
|
||||
let completionResult
|
||||
@ -112,8 +135,8 @@ export class ChitChatLLMDuty extends LLMDuty {
|
||||
const generationId = StringHelper.random(6, { onlyLetters: true })
|
||||
completionResult = await LLM_PROVIDER.prompt(prompt, {
|
||||
...completionParams,
|
||||
session: ChitChatLLMDuty.session,
|
||||
maxTokens: ChitChatLLMDuty.context.contextSize,
|
||||
session: ConversationLLMDuty.session,
|
||||
maxTokens: ConversationLLMDuty.context.contextSize,
|
||||
onToken: (chunk) => {
|
||||
const detokenizedChunk = LLM_PROVIDER.cleanUpResult(
|
||||
LLM_MANAGER.model.detokenize(chunk)
|
||||
@ -128,7 +151,7 @@ export class ChitChatLLMDuty extends LLMDuty {
|
||||
} else {
|
||||
completionResult = await LLM_PROVIDER.prompt(prompt, {
|
||||
...completionParams,
|
||||
history: ChitChatLLMDuty.messagesHistoryForNonLocalProvider
|
||||
history: ConversationLLMDuty.messagesHistoryForNonLocalProvider
|
||||
})
|
||||
}
|
||||
|
||||
@ -138,7 +161,9 @@ export class ChitChatLLMDuty extends LLMDuty {
|
||||
})
|
||||
|
||||
LogHelper.title(this.name)
|
||||
LogHelper.success(`Duty executed: ${JSON.stringify(completionResult)}`)
|
||||
LogHelper.success('Duty executed')
|
||||
LogHelper.success(`Prompt — ${prompt}`)
|
||||
LogHelper.success(`Output — ${completionResult?.output}`)
|
||||
|
||||
return completionResult as unknown as LLMDutyResult
|
||||
} catch (e) {
|
@ -70,7 +70,9 @@ export class CustomNERLLMDuty<T> extends LLMDuty {
|
||||
}
|
||||
|
||||
LogHelper.title(this.name)
|
||||
LogHelper.success(`Duty executed: ${JSON.stringify(completionResult)}`)
|
||||
LogHelper.success('Duty executed')
|
||||
LogHelper.success(`Prompt — ${prompt}`)
|
||||
LogHelper.success(`Output — ${completionResult?.output}`)
|
||||
|
||||
return completionResult as unknown as LLMDutyResult
|
||||
} catch (e) {
|
||||
|
@ -16,6 +16,7 @@ export class ParaphraseLLMDuty extends LLMDuty {
|
||||
protected readonly systemPrompt = `You are an AI system that generates answers (Natural Language Generation).
|
||||
You must provide a text alternative according to your current mood and your personality.
|
||||
Never indicate that it's a modified version.
|
||||
Do not interpret the text, just paraphrase it.
|
||||
You do not ask question if the original text does not contain any.
|
||||
If there are data in the original text, make sure to provide them.
|
||||
|
||||
@ -96,7 +97,9 @@ The sun is a star, it is the closest star to Earth.`
|
||||
}
|
||||
|
||||
LogHelper.title(this.name)
|
||||
LogHelper.success(`Duty executed: ${JSON.stringify(completionResult)}`)
|
||||
LogHelper.success('Duty executed')
|
||||
LogHelper.success(`Prompt — ${prompt}`)
|
||||
LogHelper.success(`Output — ${completionResult?.output}`)
|
||||
|
||||
return completionResult as unknown as LLMDutyResult
|
||||
} catch (e) {
|
||||
|
@ -61,7 +61,9 @@ export class SummarizationLLMDuty extends LLMDuty {
|
||||
}
|
||||
|
||||
LogHelper.title(this.name)
|
||||
LogHelper.success(`Duty executed: ${JSON.stringify(completionResult)}`)
|
||||
LogHelper.success('Duty executed')
|
||||
LogHelper.success(`Prompt — ${prompt}`)
|
||||
LogHelper.success(`Output — ${completionResult?.output}`)
|
||||
|
||||
return completionResult as unknown as LLMDutyResult
|
||||
} catch (e) {
|
||||
|
@ -79,7 +79,9 @@ export class TranslationLLMDuty extends LLMDuty {
|
||||
}
|
||||
|
||||
LogHelper.title(this.name)
|
||||
LogHelper.success(`Duty executed: ${JSON.stringify(completionResult)}`)
|
||||
LogHelper.success('Duty executed')
|
||||
LogHelper.success(`Prompt — ${prompt}`)
|
||||
LogHelper.success(`Output — ${completionResult?.output}`)
|
||||
|
||||
return completionResult as unknown as LLMDutyResult
|
||||
} catch (e) {
|
||||
|
@ -4,11 +4,11 @@
|
||||
* [OK] Custom NER
|
||||
* [OK] Summarization
|
||||
* [OK] Translation
|
||||
* More accurate NLU (per domain list vs per skill list) / Utterance shortener or paraphraser
|
||||
* [OK] Paraphraser
|
||||
* Knowledge base / RAG
|
||||
* Question answering
|
||||
* Sentiment analysis
|
||||
* Chit chat
|
||||
* [OK] Conversation
|
||||
* Intent fallback
|
||||
* Custom prompting (for specific use cases in skills)
|
||||
*/
|
||||
|
@ -15,7 +15,8 @@ import {
|
||||
LLM_MINIMUM_TOTAL_RAM,
|
||||
LLM_NAME_WITH_VERSION,
|
||||
LLM_PATH,
|
||||
LLM_PROVIDER
|
||||
LLM_PROVIDER,
|
||||
LLM_ACTIONS_CLASSIFIER_PATH
|
||||
} from '@/constants'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { SystemHelper } from '@/helpers/system-helper'
|
||||
@ -24,6 +25,7 @@ import { LLMProviders } from '@/core/llm-manager/types'
|
||||
|
||||
type LLMManagerLlama = Llama | null
|
||||
type LLMManagerModel = LlamaModel | null
|
||||
type ActionsClassifierContent = string | null
|
||||
|
||||
// Set to 0 to use the maximum threads supported by the current machine hardware
|
||||
export const LLM_THREADS = 4
|
||||
@ -39,6 +41,7 @@ export default class LLMManager {
|
||||
private _isLLMActionRecognitionEnabled = false
|
||||
private _llama: LLMManagerLlama = null
|
||||
private _model: LLMManagerModel = null
|
||||
private _llmActionsClassifierContent: ActionsClassifierContent = null
|
||||
|
||||
get llama(): Llama {
|
||||
return this._llama as Llama
|
||||
@ -48,6 +51,10 @@ export default class LLMManager {
|
||||
return this._model as LlamaModel
|
||||
}
|
||||
|
||||
get llmActionsClassifierContent(): ActionsClassifierContent {
|
||||
return this._llmActionsClassifierContent
|
||||
}
|
||||
|
||||
get isLLMEnabled(): boolean {
|
||||
return this._isLLMEnabled
|
||||
}
|
||||
@ -69,6 +76,43 @@ export default class LLMManager {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Post checking after loading the LLM to
|
||||
*/
|
||||
private async postCheck(): Promise<void> {
|
||||
if (this._isLLMActionRecognitionEnabled) {
|
||||
const isActionsClassifierPathFound = fs.existsSync(
|
||||
LLM_ACTIONS_CLASSIFIER_PATH
|
||||
)
|
||||
|
||||
if (!isActionsClassifierPathFound) {
|
||||
throw new Error(
|
||||
`The LLM action classifier is not found at "${LLM_ACTIONS_CLASSIFIER_PATH}". Please run "npm run train" and retry.`
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Load the LLM action classifier and other future
|
||||
* files that only need to be loaded once
|
||||
*/
|
||||
private async singleLoad(): Promise<void> {
|
||||
if (this._isLLMActionRecognitionEnabled) {
|
||||
try {
|
||||
this._llmActionsClassifierContent = await fs.promises.readFile(
|
||||
LLM_ACTIONS_CLASSIFIER_PATH,
|
||||
'utf-8'
|
||||
)
|
||||
|
||||
LogHelper.title('LLM Manager')
|
||||
LogHelper.success('LLM action classifier has been loaded')
|
||||
} catch (e) {
|
||||
throw new Error(`Failed to load the LLM action classifier: ${e}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public async loadLLM(): Promise<void> {
|
||||
if (!HAS_LLM) {
|
||||
LogHelper.title('LLM Manager')
|
||||
@ -171,14 +215,41 @@ export default class LLMManager {
|
||||
this._isLLMActionRecognitionEnabled = true
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
// Post checking after loading the LLM
|
||||
await this.postCheck()
|
||||
} catch (e) {
|
||||
LogHelper.title('LLM Manager')
|
||||
LogHelper.error(`LLM Manager failed to post check: ${e}`)
|
||||
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
try {
|
||||
// Load files that only need to be loaded once
|
||||
await this.singleLoad()
|
||||
} catch (e) {
|
||||
LogHelper.title('LLM Manager')
|
||||
LogHelper.error(`LLM Manager failed to single load: ${e}`)
|
||||
|
||||
process.exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
public async loadHistory(
|
||||
conversationLogger: ConversationLogger,
|
||||
session: LlamaChatSession
|
||||
session: LlamaChatSession,
|
||||
options?: { nbOfLogsToLoad?: number }
|
||||
): Promise<ChatHistoryItem[]> {
|
||||
const [systemMessage] = session.getChatHistory()
|
||||
const conversationLogs = await conversationLogger.load()
|
||||
let conversationLogs
|
||||
|
||||
if (options) {
|
||||
conversationLogs = await conversationLogger.load(options)
|
||||
} else {
|
||||
conversationLogs = await conversationLogger.load()
|
||||
}
|
||||
|
||||
if (!conversationLogs) {
|
||||
return [systemMessage] as ChatHistoryItem[]
|
||||
|
@ -272,7 +272,7 @@ ${YOUR_DUTY}
|
||||
${dutySystemPrompt}`
|
||||
}
|
||||
|
||||
public getChitChatSystemPrompt(): string {
|
||||
public getConversationSystemPrompt(): string {
|
||||
return `${this.whoYouAre}
|
||||
|
||||
${this.contextInfo}
|
||||
|
@ -8,7 +8,7 @@ export enum LLMDuties {
|
||||
Translation = 'translation',
|
||||
Summarization = 'summarization',
|
||||
Paraphrase = 'paraphrase',
|
||||
ChitChat = 'chit-chat'
|
||||
Conversation = 'conversation'
|
||||
// TODO
|
||||
/*SentimentAnalysis = 'sentiment-analysis',
|
||||
QuestionAnswering = 'question-answering',
|
||||
|
@ -14,7 +14,10 @@ import type {
|
||||
NLUResult
|
||||
} from '@/core/nlp/types'
|
||||
import { langs } from '@@/core/langs.json'
|
||||
import { PYTHON_TCP_SERVER_BIN_PATH } from '@/constants'
|
||||
import {
|
||||
PYTHON_TCP_SERVER_BIN_PATH,
|
||||
HAS_LLM_ACTION_RECOGNITION
|
||||
} from '@/constants'
|
||||
import {
|
||||
PYTHON_TCP_CLIENT,
|
||||
BRAIN,
|
||||
@ -29,6 +32,15 @@ import { SlotFilling } from '@/core/nlp/nlu/slot-filling'
|
||||
import Conversation, { DEFAULT_ACTIVE_CONTEXT } from '@/core/nlp/conversation'
|
||||
import { Telemetry } from '@/telemetry'
|
||||
import { SkillDomainHelper } from '@/helpers/skill-domain-helper'
|
||||
import {
|
||||
ActionRecognitionLLMDuty,
|
||||
type ActionRecognitionLLMDutyParams
|
||||
} from '@/core/llm-manager/llm-duties/action-recognition-llm-duty'
|
||||
|
||||
type MatchActionResult = Pick<
|
||||
NLPJSProcessResult,
|
||||
'locale' | 'sentiment' | 'answers' | 'intent' | 'domain' | 'score'
|
||||
>
|
||||
|
||||
export const DEFAULT_NLU_RESULT = {
|
||||
utterance: '',
|
||||
@ -157,6 +169,138 @@ export default class NLU {
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Match the action based on the utterance.
|
||||
* Fallback to chat action if no action is found
|
||||
*/
|
||||
private async matchAction(
|
||||
utterance: NLPUtterance
|
||||
): Promise<MatchActionResult> {
|
||||
const socialConversationDomain = 'social_communication'
|
||||
const chitChatSetupIntent = 'conversation.setup'
|
||||
let locale = null as unknown as NLPJSProcessResult['locale']
|
||||
let sentiment
|
||||
let answers = null as unknown as NLPJSProcessResult['answers']
|
||||
let intent = null as unknown as NLPJSProcessResult['intent']
|
||||
let domain = null as unknown as NLPJSProcessResult['domain']
|
||||
let score = 1
|
||||
let classifications =
|
||||
null as unknown as NLPJSProcessResult['classifications']
|
||||
let ownerHasExplicitlyRequestedChitChat = false
|
||||
|
||||
/**
|
||||
* Check if the owner has explicitly requested the chit-chat loop
|
||||
*/
|
||||
const mainClassifierResult =
|
||||
await MODEL_LOADER.mainNLPContainer.process(utterance)
|
||||
if (
|
||||
mainClassifierResult.domain === socialConversationDomain &&
|
||||
mainClassifierResult.intent === chitChatSetupIntent
|
||||
) {
|
||||
ownerHasExplicitlyRequestedChitChat = true
|
||||
}
|
||||
|
||||
if (HAS_LLM_ACTION_RECOGNITION && !ownerHasExplicitlyRequestedChitChat) {
|
||||
/**
|
||||
* Use LLM for action recognition
|
||||
*/
|
||||
|
||||
const dutyParams: ActionRecognitionLLMDutyParams = {
|
||||
input: utterance,
|
||||
data: {
|
||||
existingContextName: null
|
||||
}
|
||||
}
|
||||
|
||||
if (this.conversation.hasActiveContext()) {
|
||||
dutyParams.data.existingContextName =
|
||||
this.conversation.activeContext.name
|
||||
}
|
||||
|
||||
const actionRecognitionDuty = new ActionRecognitionLLMDuty(dutyParams)
|
||||
const actionRecognitionResult = await actionRecognitionDuty.execute()
|
||||
const foundAction = actionRecognitionResult?.output[
|
||||
'intent_name'
|
||||
] as string
|
||||
|
||||
locale = await MODEL_LOADER.mainNLPContainer.guessLanguage(utterance)
|
||||
;({ sentiment } =
|
||||
await MODEL_LOADER.mainNLPContainer.getSentiment(utterance))
|
||||
|
||||
const chitChatSetupAction = `${socialConversationDomain}.${chitChatSetupIntent}`
|
||||
/**
|
||||
* Check if the LLM did not find any action.
|
||||
* Ignore the chit-chat setup action as it is a special case
|
||||
*/
|
||||
const llmActionRecognitionDidNotFindAction =
|
||||
!foundAction ||
|
||||
foundAction === 'not_found' ||
|
||||
foundAction === chitChatSetupAction
|
||||
if (llmActionRecognitionDidNotFindAction) {
|
||||
Telemetry.utterance({ utterance, lang: BRAIN.lang })
|
||||
|
||||
domain = socialConversationDomain
|
||||
intent = 'conversation.converse'
|
||||
} else {
|
||||
// Check in case the LLM hallucinated an action
|
||||
const actionExists = await SkillDomainHelper.actionExists(
|
||||
locale,
|
||||
foundAction
|
||||
)
|
||||
|
||||
if (!actionExists) {
|
||||
Telemetry.utterance({ utterance, lang: BRAIN.lang })
|
||||
|
||||
domain = socialConversationDomain
|
||||
intent = 'conversation.converse'
|
||||
} else {
|
||||
const parsedAction = foundAction.split('.')
|
||||
const [, skillName, actionName] = parsedAction
|
||||
|
||||
domain = parsedAction[0] as string
|
||||
intent = `${skillName}.${actionName}`
|
||||
answers = await MODEL_LOADER.mainNLPContainer.findAllAnswers(
|
||||
locale,
|
||||
intent
|
||||
)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/**
|
||||
* Use classic NLP processing
|
||||
*/
|
||||
|
||||
;({ locale, answers, score, intent, domain, sentiment, classifications } =
|
||||
await MODEL_LOADER.mainNLPContainer.process(utterance))
|
||||
|
||||
/**
|
||||
* If a context is active, then use the appropriate classification based on score probability.
|
||||
* E.g. 1. Create my shopping list; 2. Actually delete it.
|
||||
* If there are several "delete it" across skills, Leon needs to make use of
|
||||
* the current context ({domain}.{skill}) to define the most accurate classification
|
||||
*/
|
||||
if (this.conversation.hasActiveContext()) {
|
||||
classifications.forEach(({ intent: newIntent, score: newScore }) => {
|
||||
if (newScore > 0.6) {
|
||||
const [skillName] = newIntent.split('.')
|
||||
const newDomain = MODEL_LOADER.mainNLPContainer.getIntentDomain(
|
||||
locale,
|
||||
newIntent
|
||||
)
|
||||
const contextName = `${newDomain}.${skillName}`
|
||||
if (this.conversation.activeContext.name === contextName) {
|
||||
score = newScore
|
||||
intent = newIntent
|
||||
domain = newDomain
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return { locale, sentiment, answers, intent, domain, score }
|
||||
}
|
||||
|
||||
/**
|
||||
* Classify the utterance,
|
||||
* pick-up the right classification
|
||||
@ -209,40 +353,11 @@ export default class NLU {
|
||||
}
|
||||
}
|
||||
|
||||
const result: NLPJSProcessResult =
|
||||
await MODEL_LOADER.mainNLPContainer.process(utterance)
|
||||
const { locale, answers, classifications } = result
|
||||
const sentiment = {
|
||||
vote: result.sentiment.vote,
|
||||
score: result.sentiment.score
|
||||
}
|
||||
let { score, intent, domain } = result
|
||||
|
||||
/**
|
||||
* If a context is active, then use the appropriate classification based on score probability.
|
||||
* E.g. 1. Create my shopping list; 2. Actually delete it.
|
||||
* If there are several "delete it" across skills, Leon needs to make use of
|
||||
* the current context ({domain}.{skill}) to define the most accurate classification
|
||||
*/
|
||||
if (this.conversation.hasActiveContext()) {
|
||||
classifications.forEach(({ intent: newIntent, score: newScore }) => {
|
||||
if (newScore > 0.6) {
|
||||
const [skillName] = newIntent.split('.')
|
||||
const newDomain = MODEL_LOADER.mainNLPContainer.getIntentDomain(
|
||||
locale,
|
||||
newIntent
|
||||
)
|
||||
const contextName = `${newDomain}.${skillName}`
|
||||
if (this.conversation.activeContext.name === contextName) {
|
||||
score = newScore
|
||||
intent = newIntent
|
||||
domain = newDomain
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
const { locale, sentiment, answers, intent, domain, score } =
|
||||
await this.matchAction(utterance)
|
||||
|
||||
const [skillName, actionName] = intent.split('.')
|
||||
|
||||
await this.setNLUResult({
|
||||
...DEFAULT_NLU_RESULT, // Reset entities, slots, etc.
|
||||
utterance,
|
||||
@ -280,7 +395,7 @@ export default class NLU {
|
||||
if (!fallback) {
|
||||
if (!BRAIN.isMuted) {
|
||||
await BRAIN.talk(
|
||||
`${BRAIN.wernicke('random_unknown_intents')}.`,
|
||||
`${BRAIN.wernicke('random_unknown_intents_legacy')}.`,
|
||||
true
|
||||
)
|
||||
}
|
||||
|
@ -12,10 +12,12 @@ import type {
|
||||
import { SKILLS_PATH } from '@/constants'
|
||||
|
||||
interface SkillDomain {
|
||||
domainId: string
|
||||
name: string
|
||||
path: string
|
||||
skills: {
|
||||
[key: string]: {
|
||||
domainId: string
|
||||
name: string
|
||||
path: string
|
||||
bridge: SkillBridgeSchema
|
||||
@ -28,6 +30,12 @@ interface SkillConfigWithGlobalEntities
|
||||
entities: Record<string, GlobalEntitySchema>
|
||||
}
|
||||
|
||||
interface SkillActionObject {
|
||||
domain: string
|
||||
skill: string
|
||||
action: string
|
||||
}
|
||||
|
||||
export class SkillDomainHelper {
|
||||
/**
|
||||
* List all skills domains with skills data inside
|
||||
@ -45,6 +53,8 @@ export class SkillDomainHelper {
|
||||
path.join(domainPath, 'domain.json')
|
||||
)) as DomainSchema
|
||||
const skillFolders = await fs.promises.readdir(domainPath)
|
||||
const domainPathParts = domainPath.split('/')
|
||||
const domainId = domainPathParts[domainPathParts.length - 1] as string
|
||||
|
||||
for (let i = 0; i < skillFolders.length; i += 1) {
|
||||
const skillAliasName = skillFolders[i] as string
|
||||
@ -62,6 +72,7 @@ export class SkillDomainHelper {
|
||||
) as SkillSchema
|
||||
|
||||
skills[skillName] = {
|
||||
domainId,
|
||||
name: skillAliasName,
|
||||
path: skillPath,
|
||||
bridge: skillBridge
|
||||
@ -69,6 +80,7 @@ export class SkillDomainHelper {
|
||||
}
|
||||
|
||||
const skillDomain: SkillDomain = {
|
||||
domainId,
|
||||
name: entity,
|
||||
path: domainPath,
|
||||
skills
|
||||
@ -200,4 +212,45 @@ export class SkillDomainHelper {
|
||||
|
||||
return JSON.parse(await fs.promises.readFile(skillMemoryPath, 'utf-8'))
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify if an action exists
|
||||
* @param lang Language short code
|
||||
* @param params Action to verify
|
||||
* @example actionExists('food_drink.advisor.suggest') // true
|
||||
* @example actionExists({ domain: 'food_drink', skill: 'advisor', action: 'suggest' }) // true
|
||||
*/
|
||||
public static async actionExists(
|
||||
lang: ShortLanguageCode,
|
||||
params: string | SkillActionObject
|
||||
): Promise<boolean> {
|
||||
const { domain, skill, action } =
|
||||
typeof params === 'string'
|
||||
? {
|
||||
domain: params.split('.')[0],
|
||||
skill: params.split('.')[1],
|
||||
action: params.split('.')[2]
|
||||
}
|
||||
: params
|
||||
|
||||
if (!domain || !skill || !action) {
|
||||
return false
|
||||
}
|
||||
|
||||
const skillPath = path.join(SKILLS_PATH, domain, skill)
|
||||
if (!fs.existsSync(skillPath)) {
|
||||
return false
|
||||
}
|
||||
|
||||
const skillConfigPath = path.join(skillPath, 'config', `${lang}.json`)
|
||||
if (!fs.existsSync(skillConfigPath)) {
|
||||
return false
|
||||
}
|
||||
|
||||
const { actions } = JSON.parse(
|
||||
await fs.promises.readFile(skillConfigPath, 'utf8')
|
||||
) as SkillConfigSchema
|
||||
|
||||
return !!actions[action]
|
||||
}
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ import { LogHelper } from '@/helpers/log-helper'
|
||||
}
|
||||
|
||||
/*const actionRecognitionDuty = new ActionRecognitionLLMDuty({
|
||||
input: 'Give me a random number'
|
||||
input: 'Provide a number'
|
||||
})
|
||||
await actionRecognitionDuty.execute()*/
|
||||
|
||||
|
@ -4,8 +4,8 @@
|
||||
"start": {
|
||||
"type": "dialog",
|
||||
"utterance_samples": [
|
||||
"Let's play rochambeau",
|
||||
"I wanna play rock paper scissors",
|
||||
"Play rochambeau",
|
||||
"Can we play paper rock scissors?",
|
||||
"I want to play rochambeau"
|
||||
],
|
||||
|
@ -1,6 +1,6 @@
|
||||
{
|
||||
"$schema": "../../../schemas/skill-schemas/skill.json",
|
||||
"name": "Welcome",
|
||||
"name": "Thanks",
|
||||
"bridge": null,
|
||||
"version": "1.0.0",
|
||||
"description": "Leon welcomes you.",
|
@ -3,27 +3,28 @@
|
||||
"actions": {
|
||||
"setup": {
|
||||
"type": "dialog",
|
||||
"utterance_samples": [
|
||||
"Start a [chat|chit-chat|talk] loop",
|
||||
"I want to [talk|chat|speak] with you",
|
||||
"Let's [chat|speak|talk]"
|
||||
],
|
||||
"utterance_samples": ["Start a [chat|chit-chat|talk] loop"],
|
||||
"answers": [
|
||||
"Alright, let's chat! What do you want to talk about?",
|
||||
"Sure, let's chat! What's on your mind?",
|
||||
"Great! Happy to chat. What's up?",
|
||||
"Glad you asked, anything you wanna talk about?"
|
||||
],
|
||||
"next_action": "chat"
|
||||
"next_action": "chit_chat"
|
||||
},
|
||||
"chat": {
|
||||
"chit_chat": {
|
||||
"type": "logic",
|
||||
"disable_llm_nlg": true,
|
||||
"loop": {
|
||||
"expected_item": {
|
||||
"type": "utterance",
|
||||
"name": "message"
|
||||
}
|
||||
}
|
||||
},
|
||||
"converse": {
|
||||
"type": "logic",
|
||||
"disable_llm_nlg": true
|
||||
}
|
||||
},
|
||||
"answers": {
|
@ -1,9 +1,9 @@
|
||||
{
|
||||
"$schema": "../../../schemas/skill-schemas/skill.json",
|
||||
"name": "Chit-Chat",
|
||||
"name": "Conversation",
|
||||
"bridge": "nodejs",
|
||||
"version": "1.0.0",
|
||||
"description": "A simple chit-chat skill where you can freely talk with Leon and get to know him better.",
|
||||
"description": "A simple conversation skill where you can freely talk with Leon and get to know him better.",
|
||||
"author": {
|
||||
"name": "Louis Grenard",
|
||||
"email": "louis@getleon.ai",
|
@ -15,11 +15,10 @@ export const run: ActionFunction = async function (params) {
|
||||
url: '/llm-inference',
|
||||
method: 'POST',
|
||||
data: {
|
||||
dutyType: 'chit-chat',
|
||||
dutyType: 'conversation',
|
||||
input: ownerMessage
|
||||
}
|
||||
})
|
||||
// const { leon_answer: leonAnswer } = response.data.output
|
||||
|
||||
await leon.answer({
|
||||
key: 'answer_message',
|
@ -0,0 +1,33 @@
|
||||
import type { ActionFunction } from '@sdk/types'
|
||||
import { leon } from '@sdk/leon'
|
||||
import { Network } from '@sdk/network'
|
||||
|
||||
export const run: ActionFunction = async function (params) {
|
||||
const ownerMessage = params.new_utterance
|
||||
const network = new Network({
|
||||
baseURL: `${process.env['LEON_HOST']}:${process.env['LEON_PORT']}/api/v1`
|
||||
})
|
||||
|
||||
/**
|
||||
* TODO: create SDK methods to handle request and response for every LLM duty
|
||||
*/
|
||||
const response = await network.request({
|
||||
url: '/llm-inference',
|
||||
method: 'POST',
|
||||
data: {
|
||||
dutyType: 'conversation',
|
||||
input: ownerMessage,
|
||||
data: {
|
||||
// Load/follow the main conversation history
|
||||
useLoopHistory: false
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
await leon.answer({
|
||||
key: 'answer_message',
|
||||
data: {
|
||||
output: response.data.output
|
||||
}
|
||||
})
|
||||
}
|
@ -3,7 +3,7 @@
|
||||
"actions": {
|
||||
"run": {
|
||||
"type": "logic",
|
||||
"utterance_samples": ["Show the widget playground"]
|
||||
"utterance_samples": []
|
||||
}
|
||||
},
|
||||
"answers": {
|
||||
|
@ -188,7 +188,7 @@ class TCPServer:
|
||||
audio_id = f'{int(time.time())}_{os.urandom(2).hex()}'
|
||||
output_file_name = f'{audio_id}.wav'
|
||||
output_path = os.path.join(TMP_PATH, output_file_name)
|
||||
speed = 0.9
|
||||
speed = 0.94
|
||||
|
||||
formatted_speech = speech.replace(' - ', '.').replace(',', '.').replace(': ', '. ')
|
||||
# Clean up emojis
|
||||
|
Loading…
Reference in New Issue
Block a user