diff --git a/core/skills-endpoints.json b/core/skills-endpoints.json index 88950cb6..d8258002 100644 --- a/core/skills-endpoints.json +++ b/core/skills-endpoints.json @@ -195,6 +195,11 @@ "route": "/api/action/social_communication/mbti/quiz", "params": [] }, + { + "method": "GET", + "route": "/api/action/unknown/widget-playground/run", + "params": [] + }, { "method": "GET", "route": "/api/action/utilities/date_time/current_date_time", @@ -242,14 +247,13 @@ "params": [] }, { - "method": "POST", - "route": "/api/action/utilities/translator-poc/short_translate", - "params": [null], - "entitiesType": "trim" + "method": "GET", + "route": "/api/action/utilities/translator-poc/translate_loop_entry", + "params": [] }, { "method": "GET", - "route": "/api/action/unknown/widget-playground/run", + "route": "/api/action/utilities/translator-poc/translate", "params": [] } ] diff --git a/server/src/constants.ts b/server/src/constants.ts index 136bfb90..d33a0300 100644 --- a/server/src/constants.ts +++ b/server/src/constants.ts @@ -161,12 +161,15 @@ export const LEON_FILE_PATH = path.join(process.cwd(), 'leon.json') /** * LLMs */ -// export const LLM_VERSION = 'v0.2.Q4_K_M' -export const LLM_VERSION = '1.1-7b-it-Q4_K_M' -// export const LLM_NAME = 'Mistral 7B Instruct' -export const LLM_NAME = 'Gemma 1.1 7B (IT)' -// export const LLM_FILE_NAME = `mistral-7b-instruct-${LLM_VERSION}.gguf` -export const LLM_FILE_NAME = `gemma-${LLM_VERSION}.gguf` +export const LLM_VERSION = 'v0.2.Q4_K_S' +// export const LLM_VERSION = '1.1-7b-it-Q4_K_M' +// export const LLM_VERSION = '8B-Instruct-Q4_K_S' +export const LLM_NAME = 'Mistral 7B Instruct' +// export const LLM_NAME = 'Gemma 1.1 7B (IT)' +// export const LLM_NAME = 'Meta Llama 3 8B Instruct' +export const LLM_FILE_NAME = `mistral-7b-instruct-${LLM_VERSION}.gguf` +// export const LLM_FILE_NAME = `gemma-${LLM_VERSION}.gguf` +// export const LLM_FILE_NAME = `Meta-Llama-3-${LLM_VERSION}.gguf` export const LLM_NAME_WITH_VERSION = `${LLM_NAME} (${LLM_VERSION})` export const LLM_DIR_PATH = path.join(MODELS_PATH, 'llm') export const LLM_PATH = path.join(LLM_DIR_PATH, LLM_FILE_NAME) @@ -176,10 +179,14 @@ export const LLM_HF_DOWNLOAD_URL = 'https://huggingface.co/bartowski/gemma-1.1-7b-it-GGUF/resolve/main/gemma-1.1-7b-it-Q4_K_M.gguf?download=true' /*export const LLM_HF_DOWNLOAD_URL = 'https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_K_M.gguf?download=true'*/ +/*export const LLM_HF_DOWNLOAD_URL = + 'https://huggingface.co/bartowski/Meta-Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q4_K_S.gguf?download=true'*/ export const LLM_MIRROR_DOWNLOAD_URL = 'https://hf-mirror.com/bartowski/gemma-1.1-7b-it-GGUF/resolve/main/gemma-1.1-7b-it-Q4_K_M.gguf?download=true' /*export const LLM_MIRROR_DOWNLOAD_URL = 'https://hf-mirror.com/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_K_M.gguf?download=true'*/ +/*export const LLM_MIRROR_DOWNLOAD_URL = + 'https://hf-mirror.com/bartowski/Meta-Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q4_K_S.gguf?download=true'*/ /** * @see llama.cpp releases: https://github.com/ggerganov/llama.cpp/releases */ diff --git a/server/src/core/brain/brain.ts b/server/src/core/brain/brain.ts index 4a7b11c8..fd5cdc7c 100644 --- a/server/src/core/brain/brain.ts +++ b/server/src/core/brain/brain.ts @@ -332,7 +332,7 @@ export default class Brain { } /** - * Execute Python skills + * Execute skills */ public execute(nluResult: NLUResult): Promise> { const executionTimeStart = Date.now() @@ -373,6 +373,8 @@ export default class Brain { ? actions[action.next_action] : null + console.log('nextAction', nextAction) + if (actionType === SkillActionTypes.Logic) { /** * "Logic" action skill execution @@ -582,6 +584,8 @@ export default class Brain { SOCKET_SERVER.socket?.emit('suggest', nextAction.suggestions) } + console.log('nluResult', nluResult, action, nextAction) + resolve({ utteranceID, lang: this._lang, diff --git a/server/src/core/llm-manager/llm-duties/custom-ner-llm-duty.ts b/server/src/core/llm-manager/llm-duties/custom-ner-llm-duty.ts index 0f901a3d..6184e1fc 100644 --- a/server/src/core/llm-manager/llm-duties/custom-ner-llm-duty.ts +++ b/server/src/core/llm-manager/llm-duties/custom-ner-llm-duty.ts @@ -54,7 +54,7 @@ export class CustomNERLLMDuty extends LLMDuty { ...this.data.schema } }) - const prompt = `${this.systemPrompt} Utterance: ${this.input}` + const prompt = `${this.systemPrompt} Utterance to parse: ${this.input}` const rawResult = await completion.generateCompletion(prompt, { contextShiftSize: context.contextSize / 2, grammar, diff --git a/server/src/core/llm-manager/llm-duties/summarization-llm-duty.ts b/server/src/core/llm-manager/llm-duties/summarization-llm-duty.ts index 5c169a0f..cecaa2c2 100644 --- a/server/src/core/llm-manager/llm-duties/summarization-llm-duty.ts +++ b/server/src/core/llm-manager/llm-duties/summarization-llm-duty.ts @@ -48,7 +48,7 @@ export class SummarizationLLMDuty extends LLMDuty { } } }) - const prompt = `${this.systemPrompt} Text: ${this.input}` + const prompt = `${this.systemPrompt} Text to summarize: ${this.input}` const rawResult = await completion.generateCompletion(prompt, { grammar, maxTokens: context.contextSize diff --git a/server/src/core/llm-manager/llm-duties/translation-llm-duty.ts b/server/src/core/llm-manager/llm-duties/translation-llm-duty.ts index d0bc806e..f460a2f2 100644 --- a/server/src/core/llm-manager/llm-duties/translation-llm-duty.ts +++ b/server/src/core/llm-manager/llm-duties/translation-llm-duty.ts @@ -65,7 +65,7 @@ export class TranslationLLMDuty extends LLMDuty { } } }) - const prompt = `${this.systemPrompt} Text: ${this.input}` + const prompt = `${this.systemPrompt} Text to translate: ${this.input}` const rawResult = await completion.generateCompletion(prompt, { contextShiftSize: context.contextSize / 2, grammar, diff --git a/server/src/core/nlp/nlu/action-loop.ts b/server/src/core/nlp/nlu/action-loop.ts index 2a6fcd2e..aea5c09a 100644 --- a/server/src/core/nlp/nlu/action-loop.ts +++ b/server/src/core/nlp/nlu/action-loop.ts @@ -20,6 +20,7 @@ export class ActionLoop { public static async handle( utterance: NLPUtterance ): Promise | null> { + console.log('here here here here here here here') const { domain, intent } = NLU.conversation.activeContext const [skillName, actionName] = intent.split('.') as [string, string] const skillConfigPath = join( @@ -56,10 +57,14 @@ export class ActionLoop { if (action?.loop) { const { name: expectedItemName, type: expectedItemType } = action.loop.expected_item + let hasMatchingUtterance = false let hasMatchingEntity = false let hasMatchingResolver = false - if (expectedItemType === 'entity') { + if (expectedItemType === 'utterance') { + console.log('action.loop.expected_item', action.loop.expected_item) + hasMatchingUtterance = true + } else if (expectedItemType === 'entity') { hasMatchingEntity = NLU.nluResult.entities.filter( ({ entity }) => expectedItemName === entity @@ -124,9 +129,10 @@ export class ActionLoop { } // Ensure expected items are in the utterance, otherwise clean context and reprocess - if (!hasMatchingEntity && !hasMatchingResolver) { + if (!hasMatchingEntity && !hasMatchingResolver && !hasMatchingUtterance) { BRAIN.talk(`${BRAIN.wernicke('random_context_out_of_topic')}.`) NLU.conversation.cleanActiveContext() + console.log('nlu process 2') await NLU.process(utterance) return null } @@ -140,6 +146,7 @@ export class ActionLoop { NLU.conversation.cleanActiveContext() if (originalUtterance !== null) { + console.log('nlu process 3') await NLU.process(originalUtterance) } @@ -160,6 +167,8 @@ export class ActionLoop { // Break the action loop and prepare for the next action if necessary if (processedData.core?.isInActionLoop === false) { + console.log('actionloop switch 1') + NLU.conversation.activeContext.isInActionLoop = !!processedData.action?.loop NLU.conversation.activeContext.actionName = processedData.action diff --git a/server/src/core/nlp/nlu/nlu.ts b/server/src/core/nlp/nlu/nlu.ts index 9f15641c..e94fc4a2 100644 --- a/server/src/core/nlp/nlu/nlu.ts +++ b/server/src/core/nlp/nlu/nlu.ts @@ -102,6 +102,8 @@ export default class NLU { LogHelper.title('NLU') LogHelper.info('Processing...') + console.log('HERE EHRHE RER UEIFJ EIF EIUF E') + if (!MODEL_LOADER.hasNlpModels()) { if (!BRAIN.isMuted) { BRAIN.talk(`${BRAIN.wernicke('random_errors')}!`) @@ -119,14 +121,19 @@ export default class NLU { // Pre NLU processing according to the active context if there is one if (this.conversation.hasActiveContext()) { + console.log('has active context', this.conversation.activeContext) // When the active context is in an action loop, then directly trigger the action if (this.conversation.activeContext.isInActionLoop) { + console.log('in action loop handle') return resolve(await ActionLoop.handle(utterance)) } // When the active context has slots filled if (Object.keys(this.conversation.activeContext.slots).length > 0) { try { + console.log('in slot filling handle') + // TODO: active action loop if next action has it? + return resolve(await SlotFilling.handle(utterance)) } catch (e) { return reject({}) @@ -244,6 +251,7 @@ export default class NLU { const shouldSlotLoop = await SlotFilling.route(intent) if (shouldSlotLoop) { + console.log('should slot loop') return resolve({}) } @@ -253,6 +261,7 @@ export default class NLU { Object.keys(this.conversation.activeContext.slots).length > 0 ) { try { + console.log('slot filling here') return resolve(await SlotFilling.handle(utterance)) } catch (e) { return reject({}) @@ -282,11 +291,15 @@ export default class NLU { this.nluResult.entities = this.conversation.activeContext.entities try { + console.log('before brain execute') const processedData = await BRAIN.execute(this.nluResult) + console.log('processedData.nextAction', processedData.nextAction) + // Prepare next action if there is one queuing if (processedData.nextAction) { this.conversation.cleanActiveContext() + console.log('actionloop switch 2') await this.conversation.setActiveContext({ ...DEFAULT_ACTIVE_CONTEXT, lang: BRAIN.lang, diff --git a/server/src/core/nlp/nlu/slot-filling.ts b/server/src/core/nlp/nlu/slot-filling.ts index f4a41e2a..8eb57db0 100644 --- a/server/src/core/nlp/nlu/slot-filling.ts +++ b/server/src/core/nlp/nlu/slot-filling.ts @@ -16,11 +16,14 @@ export class SlotFilling { ): Promise | null> { const processedData = await this.fillSlot(utterance) + console.log('processedData', processedData) + /** * In case the slot filling has been interrupted. e.g. context change, etc. * Then reprocess with the new utterance */ if (!processedData) { + console.log('nlu process 1') await NLU.process(utterance) return null } @@ -108,6 +111,8 @@ export class SlotFilling { if (!NLU.conversation.areSlotsAllFilled()) { BRAIN.talk(`${BRAIN.wernicke('random_context_out_of_topic')}.`) } else { + console.log('slot filling active context', NLU.conversation.activeContext) + NLU.nluResult = { ...DEFAULT_NLU_RESULT, // Reset entities, slots, etc. // Assign slots only if there is a next action @@ -124,7 +129,7 @@ export class SlotFilling { } } - NLU.conversation.cleanActiveContext() + // NLU.conversation.cleanActiveContext() return BRAIN.execute(NLU.nluResult) } diff --git a/server/src/core/socket-server.ts b/server/src/core/socket-server.ts index f74b7c9f..0b0ec201 100644 --- a/server/src/core/socket-server.ts +++ b/server/src/core/socket-server.ts @@ -116,6 +116,7 @@ export default class SocketServer { LogHelper.time('Utterance processed in') BRAIN.isMuted = false + console.log('nlu process 4') const processedData = await NLU.process(utterance) if (processedData) { diff --git a/server/src/schemas/skill-schemas.ts b/server/src/schemas/skill-schemas.ts index 4557d6cf..b46dbc5b 100644 --- a/server/src/schemas/skill-schemas.ts +++ b/server/src/schemas/skill-schemas.ts @@ -21,7 +21,8 @@ const skillActionTypes = [ const skillDataTypes = [ Type.Literal('skill_resolver'), Type.Literal('global_resolver'), - Type.Literal('entity') + Type.Literal('entity'), + Type.Literal('utterance') ] const answerTypes = Type.Union([ Type.String(), @@ -186,7 +187,10 @@ export const skillConfigSchemaObject = Type.Strict( type: Type.Union(skillDataTypes), name: Type.String() }, - { description: 'An item can be a entity or a resolver.' } + { + description: + 'An item can be a entity, a resolver or an utterance.' + } ) }, { diff --git a/skills/utilities/translator-poc/config/en.json b/skills/utilities/translator-poc/config/en.json index a175cbac..674a410d 100644 --- a/skills/utilities/translator-poc/config/en.json +++ b/skills/utilities/translator-poc/config/en.json @@ -1,25 +1,32 @@ { "$schema": "../../../../schemas/skill-schemas/skill-config.json", "actions": { - "short_translate": { - "type": "logic", - "utterance_samples": [ - "[Translate|Convert] @text_to_parse [to|in|into] @language", - "What is @text_to_parse in @language?", - "How [do|can] [I|we] say @text_to_parse in @language?", - "Can you tell [me|us] how to say @text_to_parse in @language?", - "[I|We] need to know how to say @text_to_parse in @language" - ], - "entities": [ + "translate_loop_entry": { + "type": "dialog", + "utterance_samples": ["Start a [translation|translate] loop"], + "slots": [ { - "type": "llm", - "schema": { - "text_to_parse": { - "type": "string" - } - } + "name": "target_language", + "item": { + "type": "entity", + "name": "language" + }, + "questions": [ + "What language would you like to translate to?", + "Sure, what language are you translating to?" + ] } - ] + ], + "next_action": "translate" + }, + "translate": { + "type": "logic", + "loop": { + "expected_item": { + "type": "utterance", + "name": "text_to_translate" + } + } } }, "entities": { diff --git a/skills/utilities/translator-poc/skill.json b/skills/utilities/translator-poc/skill.json index b7e3e61e..545de72c 100644 --- a/skills/utilities/translator-poc/skill.json +++ b/skills/utilities/translator-poc/skill.json @@ -1,7 +1,7 @@ { "$schema": "../../../schemas/skill-schemas/skill.json", "name": "Translator (PoC)", - "bridge": "nodejs", + "bridge": "python", "version": "1.0.0", "description": "Translate text from one language to another", "author": { diff --git a/skills/utilities/translator-poc/src/actions/short_translate.ts b/skills/utilities/translator-poc/src/actions/short_translate.ts index e5038d7b..a0091b67 100644 --- a/skills/utilities/translator-poc/src/actions/short_translate.ts +++ b/skills/utilities/translator-poc/src/actions/short_translate.ts @@ -8,7 +8,7 @@ export const run: ActionFunction = async function (params) { let targetLanguage = null for (const currentEntity of params.current_entities) { - if (currentEntity.entity === 'text_to_parse') { + if (currentEntity.entity === 'text_to_translate') { textToTranslate = currentEntity.resolution.value } if (currentEntity.entity === 'language') { diff --git a/skills/utilities/translator-poc/src/actions/translate.py b/skills/utilities/translator-poc/src/actions/translate.py new file mode 100644 index 00000000..9aa693dd --- /dev/null +++ b/skills/utilities/translator-poc/src/actions/translate.py @@ -0,0 +1,21 @@ +import os +from bridges.python.src.sdk.leon import leon +from bridges.python.src.sdk.network import Network +from bridges.python.src.sdk.types import ActionParams + + +def run(params: ActionParams) -> None: + """Define the winner""" + + print('params', params) + + target_language = 'French' + text_to_translate = params['utterance'] + network = Network({'base_url': f'{os.environ["LEON_HOST"]}:{os.environ["LEON_PORT"]}/api/v1'}) + + leon.answer({ + 'key': 'translate', + 'data': { + 'output': f'Translating "{text_to_translate}" to {target_language}' + } + }) diff --git a/skills/utilities/translator-poc/src/actions/translate.ts b/skills/utilities/translator-poc/src/actions/translate.ts index ff70cbbf..0fd6022c 100644 --- a/skills/utilities/translator-poc/src/actions/translate.ts +++ b/skills/utilities/translator-poc/src/actions/translate.ts @@ -4,31 +4,43 @@ import { Network } from '@sdk/network' export const run: ActionFunction = async function (params) { console.log('params', params) + + const targetLanguage = params.slots.target_language.resolution.value + const textToTranslate = params.utterance const network = new Network({ baseURL: `${process.env['LEON_HOST']}:${process.env['LEON_PORT']}/api/v1` }) + console.log('targetLanguage', targetLanguage) + console.log('textToTranslate', textToTranslate) + /** * TODO: create SDK methods to handle request and response for every LLM duty */ - const response = await network.request({ + /*const response = await network.request({ url: '/llm-inference', method: 'POST', data: { dutyType: 'translation', - // TODO: get text entity to translate - input: 'Bonjour tout le monde !', + input: textToTranslate, data: { - target: 'English', + target: targetLanguage, autoDetectLanguage: true } } }) + console.log('response', response)*/ + await leon.answer({ key: 'translate', + core: { + isInActionLoop: true, + restart: true + }, data: { - output: response.data.output.o + output: `just a test ${targetLanguage} ${textToTranslate}` + // output: response.data.output.translation } }) }