mirror of
https://github.com/leon-ai/leon.git
synced 2024-11-30 19:07:39 +03:00
Merge branch 'http-server-refactoring' into develop
This commit is contained in:
commit
5061cac591
@ -26,14 +26,29 @@
|
||||
"plugins": ["@typescript-eslint", "unicorn", "import"],
|
||||
"ignorePatterns": "*.spec.js",
|
||||
"rules": {
|
||||
"@typescript-eslint/no-non-null-assertion": ["off"],
|
||||
"no-async-promise-executor": ["off"],
|
||||
"no-underscore-dangle": ["error", { "allowAfterThis": true }],
|
||||
"prefer-destructuring": ["error"],
|
||||
"comma-dangle": ["error", "never"],
|
||||
"semi": ["error", "never"],
|
||||
"object-curly-spacing": ["error", "always"],
|
||||
"@typescript-eslint/explicit-function-return-type": "off",
|
||||
"unicorn/prefer-node-protocol": "error",
|
||||
"@typescript-eslint/member-delimiter-style": [
|
||||
"error",
|
||||
{
|
||||
"multiline": {
|
||||
"delimiter": "none",
|
||||
"requireLast": true
|
||||
},
|
||||
"singleline": {
|
||||
"delimiter": "comma",
|
||||
"requireLast": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"@typescript-eslint/explicit-function-return-type": "off",
|
||||
"@typescript-eslint/consistent-type-definitions": "error",
|
||||
"import/no-named-as-default": "off",
|
||||
"import/no-named-as-default-member": "off",
|
||||
"import/order": [
|
||||
|
4
.github/CONTRIBUTING.md
vendored
4
.github/CONTRIBUTING.md
vendored
@ -51,7 +51,7 @@ Choose the setup method you want to go for.
|
||||
|
||||
### Single-Click
|
||||
|
||||
Gitpod will automatically setup an environment and run an instance for you.
|
||||
Gitpod will automatically set up an environment and run an instance for you.
|
||||
|
||||
[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/leon-ai/leon)
|
||||
|
||||
@ -149,7 +149,7 @@ By sponsoring the project you make the project sustainable and faster to develop
|
||||
|
||||
The focus is not only limited to the activity you see on GitHub but also a lot of thinking about the direction of the project. Which is naturally related to the overall design, architecture, vision, learning process and so on...
|
||||
|
||||
## Contributing to the Python Bridge or TCP Server
|
||||
## Contributing to the Python Bridge or TCP HttpServer
|
||||
|
||||
Leon makes use of two binaries, the Python bridge and the TCP server. These binaries are compiled from Python sources.
|
||||
|
||||
|
@ -100,8 +100,8 @@ If you want to, Leon can communicate with you by being **offline to protect your
|
||||
> - Skills
|
||||
> - The web app
|
||||
> - The hotword node
|
||||
> - The TCP server (for inter-process communication between Leon and third-party processes such as spaCy)
|
||||
> - The Python bridge (the connector between Python core and skills)
|
||||
> - The TCP server (for inter-process communication between Leon and third-party nodes such as spaCy)
|
||||
> - The Python bridge (the connector between the core and skills made with Python)
|
||||
|
||||
### What is Leon able to do?
|
||||
|
||||
@ -113,7 +113,7 @@ Sounds good to you? Then let's get started!
|
||||
|
||||
## ☁️ Try with a Single-Click
|
||||
|
||||
Gitpod will automatically setup an environment and run an instance for you.
|
||||
Gitpod will automatically set up an environment and run an instance for you.
|
||||
|
||||
[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/leon-ai/leon)
|
||||
|
||||
|
@ -100,7 +100,7 @@ export default class Client {
|
||||
}
|
||||
}
|
||||
}
|
||||
}, 1000)
|
||||
}, 1_000)
|
||||
}, data.duration + 500)
|
||||
}
|
||||
})
|
||||
|
@ -11,7 +11,7 @@ const config = {
|
||||
server_host: import.meta.env.VITE_LEON_HOST,
|
||||
server_port: import.meta.env.VITE_LEON_PORT,
|
||||
min_decibels: -40, // Noise detection sensitivity
|
||||
max_blank_time: 1000 // Maximum time to consider a blank (ms)
|
||||
max_blank_time: 1_000 // Maximum time to consider a blank (ms)
|
||||
}
|
||||
const serverUrl =
|
||||
import.meta.env.VITE_LEON_NODE_ENV === 'production'
|
||||
@ -58,7 +58,7 @@ document.addEventListener('DOMContentLoaded', async () => {
|
||||
rec.enabled = false
|
||||
|
||||
// Ensure there are some data
|
||||
if (blob.size >= 1000) {
|
||||
if (blob.size >= 1_000) {
|
||||
client.socket.emit('recognize', blob)
|
||||
}
|
||||
})
|
||||
|
@ -2,8 +2,7 @@
|
||||
"answers": {
|
||||
"success": {},
|
||||
"errors": {
|
||||
"not_found": "Sorry, it seems I cannot find that",
|
||||
"nlu": "It might come from my natural language understanding, the error returned is: \"%error%\""
|
||||
"not_found": "Sorry, it seems I cannot find that"
|
||||
},
|
||||
"synchronizer": {
|
||||
"syncing_direct": "I will now synchronize the downloaded content on your current device. Don't worry, I will let you know once I'm done",
|
||||
@ -12,10 +11,10 @@
|
||||
"synced_google_drive": "The new content is now available on Google Drive"
|
||||
},
|
||||
"random_errors": [
|
||||
"Sorry, there is a problem with my system",
|
||||
"Sorry, I don't work correctly",
|
||||
"Sorry, you need to fix me",
|
||||
"Sorry, I cannot do that because I'm broken"
|
||||
"Sorry, there is a problem with my system. Please check my logs for further details",
|
||||
"Sorry, I don't work correctly. Please look at my logs for more information",
|
||||
"Sorry, you need to fix me. Please take a look at my logs for further information",
|
||||
"Sorry, I cannot do that because I'm broken. Please check my logs for further details"
|
||||
],
|
||||
"random_skill_errors": [
|
||||
"Sorry, it seems I have a problem with the \"%skill_name%\" skill from the \"%domain_name%\" domain",
|
||||
|
@ -22,7 +22,14 @@
|
||||
"That works",
|
||||
"Go ahead",
|
||||
"Why not",
|
||||
"Please"
|
||||
"Please",
|
||||
"Absolutely",
|
||||
"Precisely",
|
||||
"Spot on",
|
||||
"Undoubtedly",
|
||||
"Certainly",
|
||||
"Without a doubt",
|
||||
"Definitely"
|
||||
],
|
||||
"value": true
|
||||
},
|
||||
@ -36,7 +43,10 @@
|
||||
"No thanks",
|
||||
"No I'm fine",
|
||||
"Hell no",
|
||||
"Please do not"
|
||||
"Please do not",
|
||||
"I disagree",
|
||||
"Negative",
|
||||
"Not at all"
|
||||
],
|
||||
"value": false
|
||||
}
|
||||
|
@ -12,10 +12,10 @@
|
||||
"synced_google_drive": "Le nouveau contenu est maintenant disponible sur Google Drive"
|
||||
},
|
||||
"random_errors": [
|
||||
"Désolé, il y a un problème avec mon système",
|
||||
"Désolé, je ne fonctionne pas correctement",
|
||||
"Désolé, vous devez me réparer",
|
||||
"Désolé, je ne peux aboutir à votre demande parce que je suis cassé"
|
||||
"Désolé, il y a un problème avec mon système. Veuillez consulter mes logs pour plus de détails",
|
||||
"Désolé, je ne fonctionne pas correctement. Merci de regarder mes logs pour plus d'information",
|
||||
"Désolé, vous devez me réparer. Veuillez vérifier mes logs pour en savoir plus",
|
||||
"Désolé, je ne peux aboutir à votre demande parce que je suis cassé. Regardez mes logs pour plus de détails"
|
||||
],
|
||||
"random_skill_errors": [
|
||||
"Désolé, il semblerait y avoir un problème avec le skill \"%skill_name%\" du domaine \"%domain_name%\"",
|
||||
|
@ -1,5 +1,94 @@
|
||||
{
|
||||
"endpoints": [
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/news/github_trends/run",
|
||||
"params": ["number", "daterange"],
|
||||
"entitiesType": "builtIn"
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/news/product_hunt_trends/run",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/productivity/todo_list/create_list",
|
||||
"params": ["list"],
|
||||
"entitiesType": "trim"
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/productivity/todo_list/view_lists",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/productivity/todo_list/view_list",
|
||||
"params": ["list"],
|
||||
"entitiesType": "trim"
|
||||
},
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/productivity/todo_list/rename_list",
|
||||
"params": ["old_list", "new_list"],
|
||||
"entitiesType": "trim"
|
||||
},
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/productivity/todo_list/delete_list",
|
||||
"params": ["list"],
|
||||
"entitiesType": "trim"
|
||||
},
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/productivity/todo_list/add_todos",
|
||||
"params": ["todos", "list"],
|
||||
"entitiesType": "trim"
|
||||
},
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/productivity/todo_list/complete_todos",
|
||||
"params": ["todos", "list"],
|
||||
"entitiesType": "trim"
|
||||
},
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/productivity/todo_list/uncheck_todos",
|
||||
"params": ["todos", "list"],
|
||||
"entitiesType": "trim"
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/social_communication/mbti/setup",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/social_communication/mbti/quiz",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/utilities/have_i_been_pwned/run",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/utilities/is_it_down/run",
|
||||
"params": ["url"],
|
||||
"entitiesType": "builtIn"
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/utilities/speed_test/run",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/utilities/youtube_downloader/run",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/games/akinator/choose_thematic",
|
||||
@ -116,95 +205,6 @@
|
||||
"method": "GET",
|
||||
"route": "/api/action/leon/welcome/run",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/news/github_trends/run",
|
||||
"params": ["number", "daterange"],
|
||||
"entitiesType": "builtIn"
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/news/product_hunt_trends/run",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/productivity/todo_list/create_list",
|
||||
"params": ["list"],
|
||||
"entitiesType": "trim"
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/productivity/todo_list/view_lists",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/productivity/todo_list/view_list",
|
||||
"params": ["list"],
|
||||
"entitiesType": "trim"
|
||||
},
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/productivity/todo_list/rename_list",
|
||||
"params": ["old_list", "new_list"],
|
||||
"entitiesType": "trim"
|
||||
},
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/productivity/todo_list/delete_list",
|
||||
"params": ["list"],
|
||||
"entitiesType": "trim"
|
||||
},
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/productivity/todo_list/add_todos",
|
||||
"params": ["todos", "list"],
|
||||
"entitiesType": "trim"
|
||||
},
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/productivity/todo_list/complete_todos",
|
||||
"params": ["todos", "list"],
|
||||
"entitiesType": "trim"
|
||||
},
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/productivity/todo_list/uncheck_todos",
|
||||
"params": ["todos", "list"],
|
||||
"entitiesType": "trim"
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/social_communication/mbti/setup",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/social_communication/mbti/quiz",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/utilities/have_i_been_pwned/run",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "POST",
|
||||
"route": "/api/action/utilities/is_it_down/run",
|
||||
"params": ["url"],
|
||||
"entitiesType": "builtIn"
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/utilities/speed_test/run",
|
||||
"params": []
|
||||
},
|
||||
{
|
||||
"method": "GET",
|
||||
"route": "/api/action/utilities/youtube_downloader/run",
|
||||
"params": []
|
||||
}
|
||||
]
|
||||
}
|
||||
|
116
package.json
116
package.json
@ -40,6 +40,7 @@
|
||||
"postinstall": "ts-node scripts/setup/setup.js",
|
||||
"dev:app": "vite --config app/vite.config.js",
|
||||
"dev:server": "npm run train && npm run generate:skills-endpoints && cross-env LEON_NODE_ENV=development tsc-watch --noClear --onSuccess \"nodemon\"",
|
||||
"dev:server:no-lint": "npm run train && npm run generate:skills-endpoints && cross-env LEON_NODE_ENV=development \"nodemon\"",
|
||||
"wake": "cross-env LEON_HOST=http://localhost LEON_PORT=1337 node hotword/index.js",
|
||||
"delete-dist:server": "shx rm -rf ./server/dist",
|
||||
"clean:python-deps": "shx rm -rf ./bridges/python/src/.venv && npm run postinstall",
|
||||
@ -65,73 +66,76 @@
|
||||
"docker:check": "docker run --rm --interactive leon-ai/leon npm run check"
|
||||
},
|
||||
"dependencies": {
|
||||
"@aws-sdk/client-polly": "^3.18.0",
|
||||
"@fastify/static": "^6.5.0",
|
||||
"@ffmpeg-installer/ffmpeg": "^1.1.0",
|
||||
"@ffprobe-installer/ffprobe": "^1.4.1",
|
||||
"@google-cloud/speech": "^4.2.0",
|
||||
"@google-cloud/text-to-speech": "^3.2.1",
|
||||
"@nlpjs/builtin-microsoft": "^4.22.7",
|
||||
"@nlpjs/core-loader": "^4.22.7",
|
||||
"@nlpjs/lang-all": "^4.22.12",
|
||||
"@nlpjs/nlp": "^4.22.17",
|
||||
"@aws-sdk/client-polly": "3.18.0",
|
||||
"@fastify/static": "6.9.0",
|
||||
"@ffmpeg-installer/ffmpeg": "1.1.0",
|
||||
"@ffprobe-installer/ffprobe": "1.4.1",
|
||||
"@google-cloud/speech": "4.2.0",
|
||||
"@google-cloud/text-to-speech": "3.2.1",
|
||||
"@nlpjs/builtin-microsoft": "4.22.7",
|
||||
"@nlpjs/core-loader": "4.22.7",
|
||||
"@nlpjs/lang-all": "4.22.12",
|
||||
"@nlpjs/nlp": "4.22.17",
|
||||
"@segment/ajv-human-errors": "2.1.2",
|
||||
"@sinclair/typebox": "0.25.8",
|
||||
"ajv": "8.11.0",
|
||||
"ajv-formats": "2.1.1",
|
||||
"archiver": "^5.3.1",
|
||||
"async": "^3.2.0",
|
||||
"archiver": "5.3.1",
|
||||
"async": "3.2.4",
|
||||
"axios": "1.1.2",
|
||||
"cross-env": "^7.0.3",
|
||||
"dayjs": "^1.11.5",
|
||||
"dotenv": "^10.0.0",
|
||||
"execa": "^5.0.0",
|
||||
"cross-env": "7.0.3",
|
||||
"dayjs": "1.11.5",
|
||||
"dotenv": "10.0.0",
|
||||
"execa": "5.0.0",
|
||||
"extract-zip": "2.0.1",
|
||||
"fastify": "^4.5.3",
|
||||
"fluent-ffmpeg": "^2.1.2",
|
||||
"getos": "^3.2.1",
|
||||
"googleapis": "^67.1.1",
|
||||
"ibm-watson": "^6.1.1",
|
||||
"fastify": "4.15.0",
|
||||
"fluent-ffmpeg": "2.1.2",
|
||||
"getos": "3.2.1",
|
||||
"googleapis": "67.1.1",
|
||||
"ibm-watson": "6.1.1",
|
||||
"node-wav": "0.0.2",
|
||||
"os-name": "^4.0.1",
|
||||
"pretty-bytes": "^5.6.0",
|
||||
"pretty-ms": "^7.0.1",
|
||||
"socket.io": "^4.5.2",
|
||||
"socket.io-client": "^4.5.2",
|
||||
"stt": "^1.4.0",
|
||||
"tree-kill": "^1.2.2"
|
||||
"os-name": "4.0.1",
|
||||
"pretty-bytes": "5.6.0",
|
||||
"pretty-ms": "7.0.1",
|
||||
"socket.io": "4.5.2",
|
||||
"socket.io-client": "4.5.2",
|
||||
"stt": "1.4.0",
|
||||
"tree-kill": "1.2.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@nlpjs/utils": "^4.24.1",
|
||||
"@swc/core": "^1.3.14",
|
||||
"@tsconfig/node16-strictest": "^1.0.3",
|
||||
"@nlpjs/utils": "4.24.1",
|
||||
"@swc/core": "1.3.14",
|
||||
"@tsconfig/node16-strictest": "1.0.4",
|
||||
"@types/archiver": "5.3.2",
|
||||
"@types/cli-spinner": "0.2.1",
|
||||
"@types/node": "^18.7.13",
|
||||
"@typescript-eslint/eslint-plugin": "^5.36.1",
|
||||
"@typescript-eslint/parser": "^5.36.1",
|
||||
"cli-spinner": "^0.2.10",
|
||||
"@types/fluent-ffmpeg": "2.1.20",
|
||||
"@types/node": "18.7.13",
|
||||
"@types/node-wav": "0.0.0",
|
||||
"@typescript-eslint/eslint-plugin": "5.55.0",
|
||||
"@typescript-eslint/parser": "5.55.0",
|
||||
"cli-spinner": "0.2.10",
|
||||
"eslint": "8.22.0",
|
||||
"eslint-config-prettier": "^8.5.0",
|
||||
"eslint-import-resolver-typescript": "^3.5.1",
|
||||
"eslint-plugin-import": "^2.26.0",
|
||||
"eslint-config-prettier": "8.5.0",
|
||||
"eslint-import-resolver-typescript": "3.5.1",
|
||||
"eslint-plugin-import": "2.26.0",
|
||||
"eslint-plugin-unicorn": "43.0.2",
|
||||
"git-changelog": "^2.0.0",
|
||||
"husky": "^7.0.0",
|
||||
"inquirer": "^8.1.0",
|
||||
"jest": "^27.4.7",
|
||||
"jest-canvas-mock": "^2.3.1",
|
||||
"jest-extended": "^2.0.0",
|
||||
"json": "^10.0.0",
|
||||
"lint-staged": "^13.0.3",
|
||||
"nodemon": "^2.0.19",
|
||||
"prettier": "2.7.1",
|
||||
"resolve-tspaths": "^0.7.4",
|
||||
"semver": "^7.3.5",
|
||||
"shx": "^0.3.3",
|
||||
"ts-node": "^10.9.1",
|
||||
"tsc-watch": "^5.0.3",
|
||||
"tsconfig-paths": "^4.1.0",
|
||||
"typescript": "^4.8.2",
|
||||
"vite": "^3.0.9"
|
||||
"git-changelog": "2.0.0",
|
||||
"husky": "7.0.0",
|
||||
"inquirer": "8.1.0",
|
||||
"jest": "27.4.7",
|
||||
"jest-canvas-mock": "2.3.1",
|
||||
"jest-extended": "2.0.0",
|
||||
"json": "10.0.0",
|
||||
"lint-staged": "13.0.3",
|
||||
"nodemon": "2.0.19",
|
||||
"prettier": "2.8.7",
|
||||
"resolve-tspaths": "0.8.8",
|
||||
"semver": "7.3.5",
|
||||
"shx": "0.3.3",
|
||||
"ts-node": "10.9.1",
|
||||
"tsc-watch": "6.0.0",
|
||||
"tsconfig-paths": "4.1.2",
|
||||
"typescript": "5.0.2",
|
||||
"vite": "3.0.9"
|
||||
}
|
||||
}
|
||||
|
@ -14,9 +14,10 @@ import {
|
||||
PYTHON_BRIDGE_BIN_NAME,
|
||||
TCP_SERVER_BIN_NAME
|
||||
} from '@/constants'
|
||||
import { OSTypes } from '@/types'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { LoaderHelper } from '@/helpers/loader-helper'
|
||||
import { OSHelper, OSTypes } from '@/helpers/os-helper'
|
||||
import { SystemHelper } from '@/helpers/system-helper'
|
||||
|
||||
/**
|
||||
* Build binaries for the given OS according to the given build target
|
||||
@ -69,7 +70,7 @@ BUILD_TARGETS.set('tcp-server', {
|
||||
} = BUILD_TARGETS.get(givenBuildTarget)
|
||||
const buildPath = path.join(distPath, BINARIES_FOLDER_NAME)
|
||||
|
||||
const { type: osType } = OSHelper.getInformation()
|
||||
const { type: osType } = SystemHelper.getInformation()
|
||||
|
||||
/**
|
||||
* Install requirements
|
||||
|
@ -1,7 +1,7 @@
|
||||
import execa from 'execa'
|
||||
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { OSHelper } from '@/helpers/os-helper'
|
||||
import { SystemHelper } from '@/helpers/system-helper'
|
||||
|
||||
/**
|
||||
* Check OS environment
|
||||
@ -10,7 +10,7 @@ export default () =>
|
||||
new Promise(async (resolve, reject) => {
|
||||
LogHelper.info('Checking OS environment...')
|
||||
|
||||
const info = OSHelper.getInformation()
|
||||
const info = SystemHelper.getInformation()
|
||||
|
||||
if (info.type === 'windows') {
|
||||
LogHelper.error('Voice offline mode is not available on Windows')
|
||||
|
@ -13,7 +13,7 @@ import getos from 'getos'
|
||||
|
||||
import { version } from '@@/package.json'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { OSHelper } from '@/helpers/os-helper'
|
||||
import { SystemHelper } from '@/helpers/system-helper'
|
||||
import {
|
||||
PYTHON_BRIDGE_BIN_PATH,
|
||||
TCP_SERVER_BIN_PATH,
|
||||
@ -30,7 +30,7 @@ dotenv.config()
|
||||
;(async () => {
|
||||
try {
|
||||
const nodeMinRequiredVersion = '16'
|
||||
const npmMinRequiredVersion = '5'
|
||||
const npmMinRequiredVersion = '8'
|
||||
const minimumRequiredRAM = 4
|
||||
const flitePath = 'bin/flite/flite'
|
||||
const coquiLanguageModelPath = 'bin/coqui/huge-vocabulary.scorer'
|
||||
@ -142,7 +142,7 @@ dotenv.config()
|
||||
osName: osName(),
|
||||
distro: null
|
||||
}
|
||||
const totalRAMInGB = OSHelper.getTotalRAM()
|
||||
const totalRAMInGB = SystemHelper.getTotalRAM()
|
||||
|
||||
if (totalRAMInGB < minimumRequiredRAM) {
|
||||
report.can_run.v = false
|
||||
@ -215,7 +215,9 @@ dotenv.config()
|
||||
const p = await command(
|
||||
`${PYTHON_BRIDGE_BIN_PATH} "${path.join(
|
||||
process.cwd(),
|
||||
'scripts/assets/intent-object.json'
|
||||
'scripts',
|
||||
'assets',
|
||||
'intent-object.json'
|
||||
)}"`,
|
||||
{ shell: true }
|
||||
)
|
||||
@ -310,7 +312,8 @@ dotenv.config()
|
||||
|
||||
if (
|
||||
!fs.existsSync(globalResolversNlpModelPath) ||
|
||||
!Object.keys(fs.readFileSync(globalResolversNlpModelPath)).length
|
||||
!Object.keys(await fs.promises.readFile(globalResolversNlpModelPath))
|
||||
.length
|
||||
) {
|
||||
const state = 'Global resolvers NLP model not found or broken'
|
||||
|
||||
@ -338,7 +341,8 @@ dotenv.config()
|
||||
|
||||
if (
|
||||
!fs.existsSync(skillsResolversNlpModelPath) ||
|
||||
!Object.keys(fs.readFileSync(skillsResolversNlpModelPath)).length
|
||||
!Object.keys(await fs.promises.readFile(skillsResolversNlpModelPath))
|
||||
.length
|
||||
) {
|
||||
const state = 'Skills resolvers NLP model not found or broken'
|
||||
|
||||
@ -366,7 +370,7 @@ dotenv.config()
|
||||
|
||||
if (
|
||||
!fs.existsSync(mainNlpModelPath) ||
|
||||
!Object.keys(fs.readFileSync(mainNlpModelPath)).length
|
||||
!Object.keys(await fs.promises.readFile(mainNlpModelPath)).length
|
||||
) {
|
||||
const state = 'Main NLP model not found or broken'
|
||||
|
||||
@ -393,7 +397,7 @@ dotenv.config()
|
||||
LogHelper.info('Amazon Polly TTS')
|
||||
|
||||
try {
|
||||
const json = JSON.parse(fs.readFileSync(amazonPath))
|
||||
const json = JSON.parse(await fs.promises.readFile(amazonPath))
|
||||
if (
|
||||
json.credentials.accessKeyId === '' ||
|
||||
json.credentials.secretAccessKey === ''
|
||||
@ -411,7 +415,7 @@ dotenv.config()
|
||||
LogHelper.info('Google Cloud TTS/STT')
|
||||
|
||||
try {
|
||||
const json = JSON.parse(fs.readFileSync(googleCloudPath))
|
||||
const json = JSON.parse(await fs.promises.readFile(googleCloudPath))
|
||||
const results = []
|
||||
Object.keys(json).forEach((item) => {
|
||||
if (json[item] === '') results.push(false)
|
||||
@ -432,7 +436,7 @@ dotenv.config()
|
||||
LogHelper.info('Watson TTS')
|
||||
|
||||
try {
|
||||
const json = JSON.parse(fs.readFileSync(watsonTtsPath))
|
||||
const json = JSON.parse(await fs.promises.readFile(watsonTtsPath))
|
||||
const results = []
|
||||
Object.keys(json).forEach((item) => {
|
||||
if (json[item] === '') results.push(false)
|
||||
@ -453,7 +457,7 @@ dotenv.config()
|
||||
if (!fs.existsSync(flitePath)) {
|
||||
report.can_offline_tts.v = false
|
||||
LogHelper.warning(
|
||||
`Cannot find ${flitePath}. You can setup the offline TTS by running: "npm run setup:offline-tts"\n`
|
||||
`Cannot find ${flitePath}. You can set up the offline TTS by running: "npm run setup:offline-tts"\n`
|
||||
)
|
||||
} else {
|
||||
LogHelper.success(`Found Flite at ${flitePath}\n`)
|
||||
@ -462,7 +466,7 @@ dotenv.config()
|
||||
LogHelper.info('Watson STT')
|
||||
|
||||
try {
|
||||
const json = JSON.parse(fs.readFileSync(watsonSttPath))
|
||||
const json = JSON.parse(await fs.promises.readFile(watsonSttPath))
|
||||
const results = []
|
||||
Object.keys(json).forEach((item) => {
|
||||
if (json[item] === '') results.push(false)
|
||||
|
@ -22,13 +22,13 @@ export default () =>
|
||||
try {
|
||||
// TODO: handle case where the memory folder contain multiple DB nodes
|
||||
const dbFolder = join(currentSkill.path, 'memory')
|
||||
const dbTestFiles = fs
|
||||
.readdirSync(dbFolder)
|
||||
.filter((entity) => entity.indexOf('.spec.json') !== -1)
|
||||
const dbTestFiles = (await fs.promises.readdir(dbFolder)).filter(
|
||||
(entity) => entity.indexOf('.spec.json') !== -1
|
||||
)
|
||||
|
||||
if (dbTestFiles.length > 0) {
|
||||
LogHelper.info(`Deleting ${dbTestFiles[0]}...`)
|
||||
fs.unlinkSync(join(dbFolder, dbTestFiles[0]))
|
||||
await fs.promises.unlink(join(dbFolder, dbTestFiles[0]))
|
||||
LogHelper.success(`${dbTestFiles[0]} deleted`)
|
||||
}
|
||||
} catch (e) {
|
||||
|
@ -6,24 +6,29 @@ import { LogHelper } from '@/helpers/log-helper'
|
||||
* This script is executed after "git commit" or "git merge" (Git hook https://git-scm.com/docs/githooks#_commit_msg)
|
||||
* it ensures the authenticity of commit messages
|
||||
*/
|
||||
LogHelper.info('Checking commit message...')
|
||||
;(async () => {
|
||||
LogHelper.info('Checking commit message...')
|
||||
|
||||
const commitEditMsgFile = '.git/COMMIT_EDITMSG'
|
||||
const commitEditMsgFile = '.git/COMMIT_EDITMSG'
|
||||
|
||||
if (fs.existsSync(commitEditMsgFile)) {
|
||||
try {
|
||||
const commitMessage = fs.readFileSync(commitEditMsgFile, 'utf8')
|
||||
const regex =
|
||||
'(build|BREAKING|chore|ci|docs|feat|fix|perf|refactor|style|test)(\\((web app|docker|server|hotword|tcp server|python bridge|skill\\/([\\w-]+)))?\\)?: .{1,50}'
|
||||
if (fs.existsSync(commitEditMsgFile)) {
|
||||
try {
|
||||
const commitMessage = await fs.promises.readFile(
|
||||
commitEditMsgFile,
|
||||
'utf8'
|
||||
)
|
||||
const regex =
|
||||
'(build|BREAKING|chore|ci|docs|feat|fix|perf|refactor|style|test)(\\((web app|docker|server|hotword|tcp server|python bridge|skill\\/([\\w-]+)))?\\)?: .{1,50}'
|
||||
|
||||
if (commitMessage.match(regex) !== null) {
|
||||
LogHelper.success('Commit message validated')
|
||||
} else {
|
||||
LogHelper.error(`Commit message does not match the format: ${regex}`)
|
||||
if (commitMessage.match(regex) !== null) {
|
||||
LogHelper.success('Commit message validated')
|
||||
} else {
|
||||
LogHelper.error(`Commit message does not match the format: ${regex}`)
|
||||
process.exit(1)
|
||||
}
|
||||
} catch (e) {
|
||||
LogHelper.error(e.message)
|
||||
process.exit(1)
|
||||
}
|
||||
} catch (e) {
|
||||
LogHelper.error(e.message)
|
||||
process.exit(1)
|
||||
}
|
||||
}
|
||||
})()
|
||||
|
@ -23,7 +23,7 @@ const generateHttpApiKey = () =>
|
||||
const str = StringHelper.random(11)
|
||||
const dotEnvPath = path.join(process.cwd(), '.env')
|
||||
const envVarKey = 'LEON_HTTP_API_KEY'
|
||||
let content = fs.readFileSync(dotEnvPath, 'utf8')
|
||||
let content = await fs.promises.readFile(dotEnvPath, 'utf8')
|
||||
|
||||
shasum.update(str)
|
||||
const sha1 = shasum.digest('hex')
|
||||
@ -39,7 +39,7 @@ const generateHttpApiKey = () =>
|
||||
|
||||
content = lines.join('\n')
|
||||
|
||||
fs.writeFileSync(dotEnvPath, content)
|
||||
await fs.promises.writeFile(dotEnvPath, content)
|
||||
LogHelper.success('HTTP API key generated')
|
||||
|
||||
resolve()
|
||||
|
@ -39,7 +39,9 @@ export default () =>
|
||||
|
||||
// Check if a new routing generation is necessary
|
||||
if (fs.existsSync(outputFilePath)) {
|
||||
const mtimeEndpoints = fs.statSync(outputFilePath).mtime.getTime()
|
||||
const mtimeEndpoints = (
|
||||
await fs.promises.stat(outputFilePath)
|
||||
).mtime.getTime()
|
||||
|
||||
let i = 0
|
||||
for (const currentDomain of skillDomains.values()) {
|
||||
@ -49,7 +51,7 @@ export default () =>
|
||||
for (let j = 0; j < skillKeys.length; j += 1) {
|
||||
const skillFriendlyName = skillKeys[j]
|
||||
const currentSkill = currentDomain.skills[skillFriendlyName]
|
||||
const fileInfo = fs.statSync(
|
||||
const fileInfo = await fs.promises.stat(
|
||||
path.join(currentSkill.path, 'config', `${lang}.json`)
|
||||
)
|
||||
const mtime = fileInfo.mtime.getTime()
|
||||
@ -91,7 +93,7 @@ export default () =>
|
||||
`${lang}.json`
|
||||
)
|
||||
const { actions } = JSON.parse(
|
||||
fs.readFileSync(configFilePath, 'utf8')
|
||||
await fs.promises.readFile(configFilePath, 'utf8')
|
||||
)
|
||||
const actionsKeys = Object.keys(actions)
|
||||
|
||||
@ -145,7 +147,10 @@ export default () =>
|
||||
|
||||
LogHelper.info(`Writing ${outputFile} file...`)
|
||||
try {
|
||||
fs.writeFileSync(outputFilePath, JSON.stringify(finalObj, null, 2))
|
||||
await fs.promises.writeFile(
|
||||
outputFilePath,
|
||||
JSON.stringify(finalObj, null, 2)
|
||||
)
|
||||
LogHelper.success(`${outputFile} file generated`)
|
||||
resolve()
|
||||
} catch (e) {
|
||||
|
@ -35,9 +35,12 @@ export default (version) =>
|
||||
|
||||
const repoUrl = sh.stdout.substr(0, sh.stdout.lastIndexOf('.git'))
|
||||
const previousTag = sh.stdout.substr(sh.stdout.indexOf('\n') + 1).trim()
|
||||
const changelogData = fs.readFileSync(changelog, 'utf8')
|
||||
const changelogData = await fs.promises.readFile(changelog, 'utf8')
|
||||
const compareUrl = `${repoUrl}/compare/${previousTag}...v${version}`
|
||||
let tmpData = fs.readFileSync(`scripts/tmp/${tmpChangelog}`, 'utf8')
|
||||
let tmpData = await fs.promises.readFile(
|
||||
`scripts/tmp/${tmpChangelog}`,
|
||||
'utf8'
|
||||
)
|
||||
|
||||
LogHelper.success(`Remote origin URL gotten: ${repoUrl}.git`)
|
||||
LogHelper.success(`Previous tag gotten: ${previousTag}`)
|
||||
@ -46,14 +49,14 @@ export default (version) =>
|
||||
tmpData = tmpData.replace(version, `[${version}](${compareUrl})`)
|
||||
}
|
||||
|
||||
fs.writeFile(changelog, `${tmpData}${changelogData}`, (err) => {
|
||||
if (err) LogHelper.error(`Failed to write into file: ${err}`)
|
||||
else {
|
||||
fs.unlinkSync(`scripts/tmp/${tmpChangelog}`)
|
||||
LogHelper.success(`${changelog} generated`)
|
||||
resolve()
|
||||
}
|
||||
})
|
||||
try {
|
||||
await fs.promises.writeFile(changelog, `${tmpData}${changelogData}`)
|
||||
await fs.promises.unlink(`scripts/tmp/${tmpChangelog}`)
|
||||
LogHelper.success(`${changelog} generated`)
|
||||
resolve()
|
||||
} catch (error) {
|
||||
LogHelper.error(`Failed to write into file: ${error}`)
|
||||
}
|
||||
} catch (e) {
|
||||
LogHelper.error(`Error during git commands: ${e}`)
|
||||
reject(e)
|
||||
|
@ -9,6 +9,6 @@ import setupHotword from './setup-hotword'
|
||||
try {
|
||||
await setupHotword()
|
||||
} catch (e) {
|
||||
LogHelper.error(`Failed to setup offline hotword: ${e}`)
|
||||
LogHelper.error(`Failed to set up offline hotword: ${e}`)
|
||||
}
|
||||
})()
|
||||
|
@ -9,6 +9,6 @@ import setupStt from './setup-stt'
|
||||
try {
|
||||
await setupStt()
|
||||
} catch (e) {
|
||||
LogHelper.error(`Failed to setup offline STT: ${e}`)
|
||||
LogHelper.error(`Failed to set up offline STT: ${e}`)
|
||||
}
|
||||
})()
|
||||
|
@ -9,6 +9,6 @@ import setupTts from './setup-tts'
|
||||
try {
|
||||
await setupTts()
|
||||
} catch (e) {
|
||||
LogHelper.error(`Failed to setup offline TTS: ${e}`)
|
||||
LogHelper.error(`Failed to set up offline TTS: ${e}`)
|
||||
}
|
||||
})()
|
||||
|
@ -1,7 +1,7 @@
|
||||
import { command } from 'execa'
|
||||
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { OSHelper } from '@/helpers/os-helper'
|
||||
import { SystemHelper } from '@/helpers/system-helper'
|
||||
|
||||
/**
|
||||
* Setup offline hotword detection
|
||||
@ -10,7 +10,7 @@ export default () =>
|
||||
new Promise(async (resolve, reject) => {
|
||||
LogHelper.info('Setting up offline hotword detection...')
|
||||
|
||||
const info = OSHelper.getInformation()
|
||||
const info = SystemHelper.getInformation()
|
||||
let pkgm = 'apt-get install'
|
||||
if (info.type === 'macos') {
|
||||
pkgm = 'brew'
|
||||
|
@ -3,10 +3,10 @@ import fs from 'node:fs'
|
||||
import { command } from 'execa'
|
||||
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { OSHelper } from '@/helpers/os-helper'
|
||||
import { SystemHelper } from '@/helpers/system-helper'
|
||||
|
||||
/**
|
||||
* Setup offline speech-to-text
|
||||
* Set up offline speech-to-text
|
||||
*/
|
||||
export default () =>
|
||||
new Promise(async (resolve, reject) => {
|
||||
@ -17,7 +17,7 @@ export default () =>
|
||||
// check this repo for updates: https://github.com/coqui-ai/STT-models/tree/main/english/coqui
|
||||
const coquiModelVersion = '1.0.0'
|
||||
let downloader = 'wget'
|
||||
if (OSHelper.getInformation().type === 'macos') {
|
||||
if (SystemHelper.getInformation().type === 'macos') {
|
||||
downloader = 'curl -L -O'
|
||||
}
|
||||
|
||||
|
@ -3,10 +3,10 @@ import fs from 'node:fs'
|
||||
import { command } from 'execa'
|
||||
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { OSHelper } from '@/helpers/os-helper'
|
||||
import { SystemHelper } from '@/helpers/system-helper'
|
||||
|
||||
/**
|
||||
* Setup offline text-to-speech
|
||||
* Set up offline text-to-speech
|
||||
*/
|
||||
export default () =>
|
||||
new Promise(async (resolve, reject) => {
|
||||
@ -15,11 +15,11 @@ export default () =>
|
||||
const destFliteFolder = 'bin/flite'
|
||||
const tmpDir = 'scripts/tmp'
|
||||
let makeCores = ''
|
||||
if (OSHelper.getNumberOfCPUCores() > 2) {
|
||||
makeCores = `-j ${OSHelper.getNumberOfCPUCores() - 2}`
|
||||
if (SystemHelper.getNumberOfCPUCores() > 2) {
|
||||
makeCores = `-j ${SystemHelper.getNumberOfCPUCores() - 2}`
|
||||
}
|
||||
let downloader = 'wget'
|
||||
if (OSHelper.getInformation().type === 'macos') {
|
||||
if (SystemHelper.getInformation().type === 'macos') {
|
||||
downloader = 'curl -L -O'
|
||||
}
|
||||
|
||||
|
@ -4,23 +4,23 @@ import path from 'node:path'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
|
||||
/**
|
||||
* Setup Leon's core configuration
|
||||
* Set up Leon's core configuration
|
||||
*/
|
||||
export default () =>
|
||||
new Promise((resolve) => {
|
||||
new Promise(async (resolve) => {
|
||||
LogHelper.info('Configuring core...')
|
||||
|
||||
const dir = 'core/config'
|
||||
const list = (dir) => {
|
||||
const entities = fs.readdirSync(dir)
|
||||
const list = async (dir) => {
|
||||
const entities = await fs.promises.readdir(dir)
|
||||
|
||||
// Browse core config entities
|
||||
for (let i = 0; i < entities.length; i += 1) {
|
||||
const file = `${entities[i].replace('.sample.json', '.json')}`
|
||||
// Recursive if the entity is a directory
|
||||
const way = path.join(dir, entities[i])
|
||||
if (fs.statSync(way).isDirectory()) {
|
||||
list(way)
|
||||
if ((await fs.promises.stat(way)).isDirectory()) {
|
||||
await list(way)
|
||||
} else if (
|
||||
entities[i].indexOf('.sample.json') !== -1 &&
|
||||
!fs.existsSync(`${dir}/${file}`)
|
||||
@ -40,6 +40,6 @@ export default () =>
|
||||
}
|
||||
}
|
||||
|
||||
list(dir)
|
||||
await list(dir)
|
||||
resolve()
|
||||
})
|
||||
|
@ -11,9 +11,10 @@ import {
|
||||
PYTHON_BRIDGE_SRC_PATH,
|
||||
TCP_SERVER_SRC_PATH
|
||||
} from '@/constants'
|
||||
import { CPUArchitectures, OSTypes } from '@/types'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { LoaderHelper } from '@/helpers/loader-helper'
|
||||
import { CPUArchitectures, OSHelper, OSTypes } from '@/helpers/os-helper'
|
||||
import { SystemHelper } from '@/helpers/system-helper'
|
||||
|
||||
/**
|
||||
* Set up development environment according to the given setup target
|
||||
@ -128,19 +129,19 @@ SPACY_MODELS.set('fr', {
|
||||
|
||||
const pipfileMtime = fs.statSync(pipfilePath).mtime
|
||||
const hasDotVenv = fs.existsSync(dotVenvPath)
|
||||
const { type: osType, cpuArchitecture } = OSHelper.getInformation()
|
||||
const { type: osType, cpuArchitecture } = SystemHelper.getInformation()
|
||||
const installPythonPackages = async () => {
|
||||
LogHelper.info(`Installing Python packages from ${pipfilePath}.lock...`)
|
||||
|
||||
// Delete .venv directory to reset the development environment
|
||||
if (hasDotVenv) {
|
||||
LogHelper.info(`Deleting ${dotVenvPath}...`)
|
||||
fs.rmSync(dotVenvPath, { recursive: true, force: true })
|
||||
await fs.promises.rm(dotVenvPath, { recursive: true, force: true })
|
||||
LogHelper.success(`${dotVenvPath} deleted`)
|
||||
}
|
||||
|
||||
try {
|
||||
await command(`pipenv install --verbose --site-packages`, {
|
||||
await command('pipenv install --verbose --site-packages', {
|
||||
shell: true,
|
||||
stdio: 'inherit'
|
||||
})
|
||||
@ -154,21 +155,21 @@ SPACY_MODELS.set('fr', {
|
||||
LogHelper.info(
|
||||
'Installing Rust installer as it is needed for the "tokenizers" package for macOS ARM64 architecture...'
|
||||
)
|
||||
await command(`curl https://sh.rustup.rs -sSf | sh -s -- -y`, {
|
||||
await command('curl https://sh.rustup.rs -sSf | sh -s -- -y', {
|
||||
shell: true,
|
||||
stdio: 'inherit'
|
||||
})
|
||||
LogHelper.success('Rust installer installed')
|
||||
|
||||
LogHelper.info('Reloading configuration from "$HOME/.cargo/env"...')
|
||||
await command(`source "$HOME/.cargo/env"`, {
|
||||
await command('source "$HOME/.cargo/env"', {
|
||||
shell: true,
|
||||
stdio: 'inherit'
|
||||
})
|
||||
LogHelper.success('Configuration reloaded')
|
||||
|
||||
LogHelper.info('Checking Rust compiler version...')
|
||||
await command(`rustc --version`, {
|
||||
await command('rustc --version', {
|
||||
shell: true,
|
||||
stdio: 'inherit'
|
||||
})
|
||||
@ -210,7 +211,7 @@ SPACY_MODELS.set('fr', {
|
||||
await installPythonPackages()
|
||||
} else {
|
||||
if (fs.existsSync(dotProjectPath)) {
|
||||
const dotProjectMtime = fs.statSync(dotProjectPath).mtime
|
||||
const dotProjectMtime = (await fs.promises.stat(dotProjectPath)).mtime
|
||||
|
||||
// Check if Python deps tree has been modified since the initial setup
|
||||
if (pipfileMtime > dotProjectMtime) {
|
||||
|
@ -7,7 +7,7 @@ import { LogHelper } from '@/helpers/log-helper'
|
||||
import { SkillDomainHelper } from '@/helpers/skill-domain-helper'
|
||||
|
||||
/**
|
||||
* Setup skills configuration
|
||||
* Set up skills configuration
|
||||
*/
|
||||
export default () =>
|
||||
new Promise(async (resolve, reject) => {
|
||||
@ -31,10 +31,10 @@ export default () =>
|
||||
// Check if the config and config.sample file exist
|
||||
if (fs.existsSync(configFile) && fs.existsSync(configSampleFile)) {
|
||||
const config = JSON.parse(
|
||||
fs.readFileSync(configFile, 'utf8')
|
||||
await fs.promises.readFile(configFile, 'utf8')
|
||||
)?.configurations
|
||||
const configSample = JSON.parse(
|
||||
fs.readFileSync(configSampleFile, 'utf8')
|
||||
await fs.promises.readFile(configSampleFile, 'utf8')
|
||||
)?.configurations
|
||||
const configKeys = Object.keys(config)
|
||||
const configSampleKeys = Object.keys(configSample)
|
||||
|
@ -8,16 +8,17 @@ import { LogHelper } from '@/helpers/log-helper'
|
||||
* Add global entities annotations (@...)
|
||||
*/
|
||||
export default (lang, nlp) =>
|
||||
new Promise((resolve) => {
|
||||
new Promise(async (resolve) => {
|
||||
LogHelper.title('Global entities training')
|
||||
|
||||
const globalEntitiesPath = path.join(
|
||||
process.cwd(),
|
||||
'core/data',
|
||||
'core',
|
||||
'data',
|
||||
lang,
|
||||
'global-entities'
|
||||
)
|
||||
const globalEntityFiles = fs.readdirSync(globalEntitiesPath)
|
||||
const globalEntityFiles = await fs.promises.readdir(globalEntitiesPath)
|
||||
const newEntitiesObj = {}
|
||||
|
||||
for (let i = 0; i < globalEntityFiles.length; i += 1) {
|
||||
@ -27,7 +28,9 @@ export default (lang, nlp) =>
|
||||
globalEntitiesPath,
|
||||
globalEntityFileName
|
||||
)
|
||||
const { options } = JSON.parse(fs.readFileSync(globalEntityPath, 'utf8'))
|
||||
const { options } = JSON.parse(
|
||||
await fs.promises.readFile(globalEntityPath, 'utf8')
|
||||
)
|
||||
const optionKeys = Object.keys(options)
|
||||
const optionsObj = {}
|
||||
|
||||
|
@ -9,22 +9,23 @@ import { LogHelper } from '@/helpers/log-helper'
|
||||
* Train global resolvers
|
||||
*/
|
||||
export default (lang, nlp) =>
|
||||
new Promise((resolve) => {
|
||||
new Promise(async (resolve) => {
|
||||
LogHelper.title('Global resolvers training')
|
||||
|
||||
const resolversPath = path.join(
|
||||
process.cwd(),
|
||||
'core/data',
|
||||
'core',
|
||||
'data',
|
||||
lang,
|
||||
'global-resolvers'
|
||||
)
|
||||
const resolverFiles = fs.readdirSync(resolversPath)
|
||||
const resolverFiles = await fs.promises.readdir(resolversPath)
|
||||
|
||||
for (let i = 0; i < resolverFiles.length; i += 1) {
|
||||
const resolverFileName = resolverFiles[i]
|
||||
const resolverPath = path.join(resolversPath, resolverFileName)
|
||||
const { name: resolverName, intents: resolverIntents } = JSON.parse(
|
||||
fs.readFileSync(resolverPath, 'utf8')
|
||||
await fs.promises.readFile(resolverPath, 'utf8')
|
||||
)
|
||||
const intentKeys = Object.keys(resolverIntents)
|
||||
|
||||
|
@ -3,8 +3,8 @@ import fs from 'node:fs'
|
||||
|
||||
import dotenv from 'dotenv'
|
||||
|
||||
import type { LongLanguageCode } from '@/helpers/lang-helper'
|
||||
import { OSHelper } from '@/helpers/os-helper'
|
||||
import type { LongLanguageCode } from '@/types'
|
||||
import { SystemHelper } from '@/helpers/system-helper'
|
||||
|
||||
dotenv.config()
|
||||
|
||||
@ -17,7 +17,7 @@ export const GITHUB_URL = 'https://github.com/leon-ai/leon'
|
||||
/**
|
||||
* Binaries / distribution
|
||||
*/
|
||||
export const BINARIES_FOLDER_NAME = OSHelper.getBinariesFolderName()
|
||||
export const BINARIES_FOLDER_NAME = SystemHelper.getBinariesFolderName()
|
||||
export const PYTHON_BRIDGE_DIST_PATH = path.join('bridges', 'python', 'dist')
|
||||
export const TCP_SERVER_DIST_PATH = path.join('tcp_server', 'dist')
|
||||
|
||||
@ -99,5 +99,12 @@ export const TCP_SERVER_PORT = Number(process.env['LEON_PY_TCP_SERVER_PORT'])
|
||||
/**
|
||||
* Paths
|
||||
*/
|
||||
export const BIN_PATH = path.join('bin')
|
||||
export const GLOBAL_DATA_PATH = path.join('core', 'data')
|
||||
export const MODELS_PATH = path.join(GLOBAL_DATA_PATH, 'models')
|
||||
export const VOICE_CONFIG_PATH = path.join('core', 'config', 'voice')
|
||||
export const SERVER_PATH = path.join(
|
||||
'server',
|
||||
IS_PRODUCTION_ENV ? 'dist' : 'src'
|
||||
)
|
||||
export const TMP_PATH = path.join(SERVER_PATH, 'tmp')
|
||||
|
@ -1,77 +0,0 @@
|
||||
import fs from 'node:fs'
|
||||
|
||||
import { path as ffmpegPath } from '@ffmpeg-installer/ffmpeg'
|
||||
import Ffmpeg from 'fluent-ffmpeg'
|
||||
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
|
||||
const audios = {
|
||||
webm: `${__dirname}/../tmp/speech.webm`,
|
||||
wav: `${__dirname}/../tmp/speech.wav`
|
||||
}
|
||||
|
||||
class Asr {
|
||||
constructor() {
|
||||
this.blob = {}
|
||||
|
||||
LogHelper.title('ASR')
|
||||
LogHelper.success('New instance')
|
||||
}
|
||||
|
||||
static get audios() {
|
||||
return audios
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode audio blob to WAVE file
|
||||
* and forward the WAVE file to the STT parser
|
||||
*/
|
||||
run(blob, stt) {
|
||||
return new Promise((resolve, reject) => {
|
||||
LogHelper.title('ASR')
|
||||
|
||||
this.blob = blob
|
||||
|
||||
fs.writeFile(audios.webm, Buffer.from(this.blob), 'binary', (err) => {
|
||||
if (err) {
|
||||
reject({ type: 'error', obj: err })
|
||||
return
|
||||
}
|
||||
|
||||
const ffmpeg = new Ffmpeg()
|
||||
ffmpeg.setFfmpegPath(ffmpegPath)
|
||||
|
||||
/**
|
||||
* Encode WebM file to WAVE file
|
||||
* ffmpeg -i speech.webm -acodec pcm_s16le -ar 16000 -ac 1 speech.wav
|
||||
*/
|
||||
ffmpeg
|
||||
.addInput(audios.webm)
|
||||
.on('start', () => {
|
||||
LogHelper.info('Encoding WebM file to WAVE file...')
|
||||
})
|
||||
.on('end', () => {
|
||||
LogHelper.success('Encoding done')
|
||||
|
||||
if (Object.keys(stt).length === 0) {
|
||||
reject({
|
||||
type: 'warning',
|
||||
obj: new Error('The speech recognition is not ready yet')
|
||||
})
|
||||
} else {
|
||||
stt.parse(audios.wav)
|
||||
resolve()
|
||||
}
|
||||
})
|
||||
.on('error', (err) => {
|
||||
reject({ type: 'error', obj: new Error(`Encoding error ${err}`) })
|
||||
})
|
||||
.outputOptions(['-acodec pcm_s16le', '-ar 16000', '-ac 1'])
|
||||
.output(audios.wav)
|
||||
.run()
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export default Asr
|
77
server/src/core/asr/asr.ts
Normal file
77
server/src/core/asr/asr.ts
Normal file
@ -0,0 +1,77 @@
|
||||
import path from 'node:path'
|
||||
import fs from 'node:fs'
|
||||
|
||||
import { path as ffmpegPath } from '@ffmpeg-installer/ffmpeg'
|
||||
import ffmpeg from 'fluent-ffmpeg'
|
||||
|
||||
import { TMP_PATH } from '@/constants'
|
||||
import { STT } from '@/core'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
|
||||
export default class ASR {
|
||||
private static instance: ASR
|
||||
|
||||
public audioPaths = {
|
||||
webm: path.join(TMP_PATH, 'speech.webm'),
|
||||
wav: path.join(TMP_PATH, 'speech.wav')
|
||||
}
|
||||
|
||||
constructor() {
|
||||
if (!ASR.instance) {
|
||||
LogHelper.title('ASR')
|
||||
LogHelper.success('New instance')
|
||||
|
||||
ASR.instance = this
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode audio blob to WAVE file
|
||||
* and forward the WAVE file to the STT parser
|
||||
*/
|
||||
public encode(blob: Buffer): Promise<void> {
|
||||
return new Promise((resolve, reject) => {
|
||||
LogHelper.title('ASR')
|
||||
|
||||
fs.writeFile(
|
||||
this.audioPaths.webm,
|
||||
Buffer.from(blob),
|
||||
'binary',
|
||||
async (err) => {
|
||||
if (err) {
|
||||
reject(new Error(`${err}`))
|
||||
return
|
||||
}
|
||||
|
||||
ffmpeg.setFfmpegPath(ffmpegPath)
|
||||
|
||||
/**
|
||||
* Encode WebM file to WAVE file
|
||||
* ffmpeg -i speech.webm -acodec pcm_s16le -ar 16000 -ac 1 speech.wav
|
||||
*/
|
||||
ffmpeg()
|
||||
.addInput(this.audioPaths.webm)
|
||||
.on('start', () => {
|
||||
LogHelper.info('Encoding WebM file to WAVE file...')
|
||||
})
|
||||
.on('end', () => {
|
||||
LogHelper.success('Encoding done')
|
||||
|
||||
if (!STT.isParserReady) {
|
||||
reject(new Error('The speech recognition is not ready yet'))
|
||||
} else {
|
||||
STT.transcribe(this.audioPaths.wav)
|
||||
resolve()
|
||||
}
|
||||
})
|
||||
.on('error', (err) => {
|
||||
reject(new Error(`Encoding error ${err}`))
|
||||
})
|
||||
.outputOptions(['-acodec pcm_s16le', '-ar 16000', '-ac 1'])
|
||||
.output(this.audioPaths.wav)
|
||||
.run()
|
||||
}
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
1
server/src/core/asr/types.ts
Normal file
1
server/src/core/asr/types.ts
Normal file
@ -0,0 +1 @@
|
||||
export type ASRAudioFormat = 'wav' | 'webm'
|
@ -1,510 +0,0 @@
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
import { spawn } from 'node:child_process'
|
||||
|
||||
import { langs } from '@@/core/langs.json'
|
||||
import { HAS_TTS, PYTHON_BRIDGE_BIN_PATH } from '@/constants'
|
||||
import { LangHelper } from '@/helpers/lang-helper'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { SkillDomainHelper } from '@/helpers/skill-domain-helper'
|
||||
import { StringHelper } from '@/helpers/string-helper'
|
||||
import Synchronizer from '@/core/synchronizer'
|
||||
|
||||
class Brain {
|
||||
constructor() {
|
||||
this._lang = 'en'
|
||||
this.broca = JSON.parse(
|
||||
fs.readFileSync(
|
||||
path.join(process.cwd(), 'core/data', this._lang, 'answers.json'),
|
||||
'utf8'
|
||||
)
|
||||
)
|
||||
this.process = {}
|
||||
this.interOutput = {}
|
||||
this.finalOutput = {}
|
||||
this._socket = {}
|
||||
this._stt = {}
|
||||
this._tts = {}
|
||||
|
||||
LogHelper.title('Brain')
|
||||
LogHelper.success('New instance')
|
||||
}
|
||||
|
||||
get socket() {
|
||||
return this._socket
|
||||
}
|
||||
|
||||
set socket(newSocket) {
|
||||
this._socket = newSocket
|
||||
}
|
||||
|
||||
get stt() {
|
||||
return this._stt
|
||||
}
|
||||
|
||||
set stt(newStt) {
|
||||
this._stt = newStt
|
||||
}
|
||||
|
||||
get tts() {
|
||||
return this._tts
|
||||
}
|
||||
|
||||
set tts(newTts) {
|
||||
this._tts = newTts
|
||||
}
|
||||
|
||||
get lang() {
|
||||
return this._lang
|
||||
}
|
||||
|
||||
set lang(newLang) {
|
||||
this._lang = newLang
|
||||
// Update broca
|
||||
this.broca = JSON.parse(
|
||||
fs.readFileSync(
|
||||
path.join(process.cwd(), 'core/data', this._lang, 'answers.json'),
|
||||
'utf8'
|
||||
)
|
||||
)
|
||||
|
||||
if (HAS_TTS) {
|
||||
this._tts.init(this._lang, () => {
|
||||
LogHelper.title('Brain')
|
||||
LogHelper.info('Language has changed')
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete intent object file
|
||||
*/
|
||||
static deleteIntentObjFile(intentObjectPath) {
|
||||
try {
|
||||
if (fs.existsSync(intentObjectPath)) {
|
||||
fs.unlinkSync(intentObjectPath)
|
||||
}
|
||||
} catch (e) {
|
||||
LogHelper.error(`Failed to delete intent object file: ${e}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Make Leon talk
|
||||
*/
|
||||
talk(rawSpeech, end = false) {
|
||||
LogHelper.title('Leon')
|
||||
LogHelper.info('Talking...')
|
||||
|
||||
if (rawSpeech !== '') {
|
||||
if (HAS_TTS) {
|
||||
// Stripe HTML to a whitespace. Whitespace to let the TTS respects punctuation
|
||||
const speech = rawSpeech.replace(/<(?:.|\n)*?>/gm, ' ')
|
||||
|
||||
this._tts.add(speech, end)
|
||||
}
|
||||
|
||||
this._socket.emit('answer', rawSpeech)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Pickup speech info we need to return
|
||||
*/
|
||||
wernicke(type, key, obj) {
|
||||
let answer = ''
|
||||
|
||||
// Choose a random answer or a specific one
|
||||
const property = this.broca.answers[type]
|
||||
if (property.constructor === [].constructor) {
|
||||
answer = property[Math.floor(Math.random() * property.length)]
|
||||
} else {
|
||||
answer = property
|
||||
}
|
||||
|
||||
// Select a specific key
|
||||
if (key !== '' && typeof key !== 'undefined') {
|
||||
answer = answer[key]
|
||||
}
|
||||
|
||||
// Parse sentence's value(s) and replace with the given object
|
||||
if (typeof obj !== 'undefined' && Object.keys(obj).length > 0) {
|
||||
answer = StringHelper.findAndMap(answer, obj)
|
||||
}
|
||||
|
||||
return answer
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute Python skills
|
||||
* TODO: split into several methods
|
||||
*/
|
||||
execute(obj, opts) {
|
||||
const executionTimeStart = Date.now()
|
||||
opts = opts || {
|
||||
mute: false // Close Leon mouth e.g. over HTTP
|
||||
}
|
||||
|
||||
return new Promise(async (resolve, reject) => {
|
||||
const utteranceId = `${Date.now()}-${StringHelper.random(4)}`
|
||||
const intentObjectPath = path.join(
|
||||
__dirname,
|
||||
`../tmp/${utteranceId}.json`
|
||||
)
|
||||
const speeches = []
|
||||
|
||||
// Ask to repeat if Leon is not sure about the request
|
||||
if (
|
||||
obj.classification.confidence <
|
||||
langs[LangHelper.getLongCode(this._lang)].min_confidence
|
||||
) {
|
||||
if (!opts.mute) {
|
||||
const speech = `${this.wernicke('random_not_sure')}.`
|
||||
|
||||
speeches.push(speech)
|
||||
this.talk(speech, true)
|
||||
this._socket.emit('is-typing', false)
|
||||
}
|
||||
|
||||
const executionTimeEnd = Date.now()
|
||||
const executionTime = executionTimeEnd - executionTimeStart
|
||||
|
||||
resolve({
|
||||
speeches,
|
||||
executionTime
|
||||
})
|
||||
} else {
|
||||
const {
|
||||
configDataFilePath,
|
||||
classification: { action: actionName }
|
||||
} = obj
|
||||
const { actions } = JSON.parse(
|
||||
fs.readFileSync(configDataFilePath, 'utf8')
|
||||
)
|
||||
const action = actions[actionName]
|
||||
const { type: actionType } = action
|
||||
const nextAction = action.next_action
|
||||
? actions[action.next_action]
|
||||
: null
|
||||
|
||||
if (actionType === 'logic') {
|
||||
/**
|
||||
* "Logic" action skill execution
|
||||
*/
|
||||
|
||||
// Ensure the process is empty (to be able to execute other processes outside of Brain)
|
||||
if (Object.keys(this.process).length === 0) {
|
||||
/**
|
||||
* Execute a skill in a standalone way (CLI):
|
||||
*
|
||||
* 1. Need to be at the root of the project
|
||||
* 2. Edit: server/src/intent-object.sample.json
|
||||
* 3. Run: npm run python-bridge
|
||||
*/
|
||||
const slots = {}
|
||||
if (obj.slots) {
|
||||
Object.keys(obj.slots)?.forEach((slotName) => {
|
||||
slots[slotName] = obj.slots[slotName].value
|
||||
})
|
||||
}
|
||||
const intentObj = {
|
||||
id: utteranceId,
|
||||
lang: this._lang,
|
||||
domain: obj.classification.domain,
|
||||
skill: obj.classification.skill,
|
||||
action: obj.classification.action,
|
||||
utterance: obj.utterance,
|
||||
current_entities: obj.currentEntities,
|
||||
entities: obj.entities,
|
||||
current_resolvers: obj.currentResolvers,
|
||||
resolvers: obj.resolvers,
|
||||
slots
|
||||
}
|
||||
|
||||
try {
|
||||
fs.writeFileSync(intentObjectPath, JSON.stringify(intentObj))
|
||||
this.process = spawn(
|
||||
`${PYTHON_BRIDGE_BIN_PATH} "${intentObjectPath}"`,
|
||||
{ shell: true }
|
||||
)
|
||||
} catch (e) {
|
||||
LogHelper.error(`Failed to save intent object: ${e}`)
|
||||
}
|
||||
}
|
||||
|
||||
const domainName = obj.classification.domain
|
||||
const skillName = obj.classification.skill
|
||||
const { name: domainFriendlyName } =
|
||||
SkillDomainHelper.getSkillDomainInfo(domainName)
|
||||
const { name: skillFriendlyName } = SkillDomainHelper.getSkillInfo(
|
||||
domainName,
|
||||
skillName
|
||||
)
|
||||
let output = ''
|
||||
|
||||
// Read output
|
||||
this.process.stdout.on('data', (data) => {
|
||||
const executionTimeEnd = Date.now()
|
||||
const executionTime = executionTimeEnd - executionTimeStart
|
||||
|
||||
try {
|
||||
const obj = JSON.parse(data.toString())
|
||||
|
||||
if (typeof obj === 'object') {
|
||||
if (obj.output.type === 'inter') {
|
||||
LogHelper.title(`${skillFriendlyName} skill`)
|
||||
LogHelper.info(data.toString())
|
||||
|
||||
this.interOutput = obj.output
|
||||
|
||||
const speech = obj.output.speech.toString()
|
||||
if (!opts.mute) {
|
||||
this.talk(speech)
|
||||
}
|
||||
speeches.push(speech)
|
||||
} else {
|
||||
output += data
|
||||
}
|
||||
} else {
|
||||
/* istanbul ignore next */
|
||||
reject({
|
||||
type: 'warning',
|
||||
obj: new Error(
|
||||
`The "${skillFriendlyName}" skill from the "${domainFriendlyName}" domain is not well configured. Check the configuration file.`
|
||||
),
|
||||
speeches,
|
||||
executionTime
|
||||
})
|
||||
}
|
||||
} catch (e) {
|
||||
LogHelper.title('Brain')
|
||||
LogHelper.debug(`process.stdout: ${String(data)}`)
|
||||
|
||||
/* istanbul ignore next */
|
||||
reject({
|
||||
type: 'error',
|
||||
obj: new Error(
|
||||
`The "${skillFriendlyName}" skill from the "${domainFriendlyName}" domain isn't returning JSON format.`
|
||||
),
|
||||
speeches,
|
||||
executionTime
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// Handle error
|
||||
this.process.stderr.on('data', (data) => {
|
||||
const speech = `${this.wernicke('random_skill_errors', '', {
|
||||
'%skill_name%': skillFriendlyName,
|
||||
'%domain_name%': domainFriendlyName
|
||||
})}!`
|
||||
if (!opts.mute) {
|
||||
this.talk(speech)
|
||||
this._socket.emit('is-typing', false)
|
||||
}
|
||||
speeches.push(speech)
|
||||
|
||||
Brain.deleteIntentObjFile(intentObjectPath)
|
||||
|
||||
LogHelper.title(`${skillFriendlyName} skill`)
|
||||
LogHelper.error(data.toString())
|
||||
|
||||
const executionTimeEnd = Date.now()
|
||||
const executionTime = executionTimeEnd - executionTimeStart
|
||||
reject({
|
||||
type: 'error',
|
||||
obj: new Error(data),
|
||||
speeches,
|
||||
executionTime
|
||||
})
|
||||
})
|
||||
|
||||
// Catch the end of the skill execution
|
||||
this.process.stdout.on('end', () => {
|
||||
LogHelper.title(`${skillFriendlyName} skill`)
|
||||
LogHelper.info(output)
|
||||
|
||||
this.finalOutput = output
|
||||
|
||||
// Check if there is an output (no skill error)
|
||||
if (this.finalOutput !== '') {
|
||||
this.finalOutput = JSON.parse(this.finalOutput).output
|
||||
|
||||
let { speech } = this.finalOutput
|
||||
if (speech) {
|
||||
speech = speech.toString()
|
||||
if (!opts.mute) {
|
||||
this.talk(speech, true)
|
||||
}
|
||||
speeches.push(speech)
|
||||
|
||||
/* istanbul ignore next */
|
||||
// Synchronize the downloaded content if enabled
|
||||
if (
|
||||
this.finalOutput.type === 'end' &&
|
||||
this.finalOutput.options.synchronization &&
|
||||
this.finalOutput.options.synchronization.enabled &&
|
||||
this.finalOutput.options.synchronization.enabled === true
|
||||
) {
|
||||
const sync = new Synchronizer(
|
||||
this,
|
||||
obj.classification,
|
||||
this.finalOutput.options.synchronization
|
||||
)
|
||||
|
||||
// When the synchronization is finished
|
||||
sync.synchronize((speech) => {
|
||||
if (!opts.mute) {
|
||||
this.talk(speech)
|
||||
}
|
||||
speeches.push(speech)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Brain.deleteIntentObjFile(intentObjectPath)
|
||||
|
||||
if (!opts.mute) {
|
||||
this._socket.emit('is-typing', false)
|
||||
}
|
||||
|
||||
const executionTimeEnd = Date.now()
|
||||
const executionTime = executionTimeEnd - executionTimeStart
|
||||
|
||||
// Send suggestions to the client
|
||||
if (
|
||||
nextAction?.suggestions &&
|
||||
this.finalOutput.core?.showNextActionSuggestions
|
||||
) {
|
||||
this._socket.emit('suggest', nextAction.suggestions)
|
||||
}
|
||||
if (action?.suggestions && this.finalOutput.core?.showSuggestions) {
|
||||
this._socket.emit('suggest', action.suggestions)
|
||||
}
|
||||
|
||||
resolve({
|
||||
utteranceId,
|
||||
lang: this._lang,
|
||||
...obj,
|
||||
speeches,
|
||||
core: this.finalOutput.core,
|
||||
action,
|
||||
nextAction,
|
||||
executionTime // In ms, skill execution time only
|
||||
})
|
||||
})
|
||||
|
||||
// Reset the child process
|
||||
this.process = {}
|
||||
} else {
|
||||
/**
|
||||
* "Dialog" action skill execution
|
||||
*/
|
||||
|
||||
const configFilePath = path.join(
|
||||
process.cwd(),
|
||||
'skills',
|
||||
obj.classification.domain,
|
||||
obj.classification.skill,
|
||||
'config',
|
||||
`${this._lang}.json`
|
||||
)
|
||||
const { actions, entities } = await SkillDomainHelper.getSkillConfig(
|
||||
configFilePath,
|
||||
this._lang
|
||||
)
|
||||
const utteranceHasEntities = obj.entities.length > 0
|
||||
const { answers: rawAnswers } = obj
|
||||
let answers = rawAnswers
|
||||
let answer = ''
|
||||
|
||||
if (!utteranceHasEntities) {
|
||||
answers = answers.filter(
|
||||
({ answer }) => answer.indexOf('{{') === -1
|
||||
)
|
||||
} else {
|
||||
answers = answers.filter(
|
||||
({ answer }) => answer.indexOf('{{') !== -1
|
||||
)
|
||||
}
|
||||
|
||||
// When answers are simple without required entity
|
||||
if (answers.length === 0) {
|
||||
answer =
|
||||
rawAnswers[Math.floor(Math.random() * rawAnswers.length)]?.answer
|
||||
|
||||
// In case the expected answer requires a known entity
|
||||
if (answer.indexOf('{{') !== -1) {
|
||||
// TODO
|
||||
answers = actions[obj.classification.action]?.unknown_answers
|
||||
answer = answers[Math.floor(Math.random() * answers.length)]
|
||||
}
|
||||
} else {
|
||||
answer = answers[Math.floor(Math.random() * answers.length)]?.answer
|
||||
|
||||
/**
|
||||
* In case the utterance contains entities, and the picked up answer too,
|
||||
* then map them (utterance <-> answer)
|
||||
*/
|
||||
if (utteranceHasEntities && answer.indexOf('{{') !== -1) {
|
||||
obj.currentEntities.forEach((entityObj) => {
|
||||
answer = StringHelper.findAndMap(answer, {
|
||||
[`{{ ${entityObj.entity} }}`]: entityObj.resolution.value
|
||||
})
|
||||
|
||||
// Find matches and map deeper data from the NLU file (global entities)
|
||||
const matches = answer.match(/{{.+?}}/g)
|
||||
|
||||
matches?.forEach((match) => {
|
||||
let newStr = match.substring(3)
|
||||
|
||||
newStr = newStr.substring(0, newStr.indexOf('}}') - 1)
|
||||
|
||||
const [entity, dataKey] = newStr.split('.')
|
||||
|
||||
if (entity === entityObj.entity) {
|
||||
// e.g. entities.color.options.red.data.usage
|
||||
const valuesArr =
|
||||
entities[entity].options[entityObj.option].data[dataKey]
|
||||
|
||||
answer = StringHelper.findAndMap(answer, {
|
||||
[match]:
|
||||
valuesArr[Math.floor(Math.random() * valuesArr.length)]
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const executionTimeEnd = Date.now()
|
||||
const executionTime = executionTimeEnd - executionTimeStart
|
||||
|
||||
if (!opts.mute) {
|
||||
this.talk(answer, true)
|
||||
this._socket.emit('is-typing', false)
|
||||
}
|
||||
|
||||
// Send suggestions to the client
|
||||
if (nextAction?.suggestions) {
|
||||
this._socket.emit('suggest', nextAction.suggestions)
|
||||
}
|
||||
|
||||
resolve({
|
||||
utteranceId,
|
||||
lang: this._lang,
|
||||
...obj,
|
||||
speeches: [answer],
|
||||
core: this.finalOutput.core,
|
||||
action,
|
||||
nextAction,
|
||||
executionTime // In ms, skill execution time only
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export default Brain
|
583
server/src/core/brain/brain.ts
Normal file
583
server/src/core/brain/brain.ts
Normal file
@ -0,0 +1,583 @@
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
import { ChildProcessWithoutNullStreams, spawn } from 'node:child_process'
|
||||
|
||||
import type { ShortLanguageCode } from '@/types'
|
||||
import type { GlobalAnswersSchema } from '@/schemas/global-data-schemas'
|
||||
import type {
|
||||
CustomEnumEntity,
|
||||
NERCustomEntity,
|
||||
NLUResult
|
||||
} from '@/core/nlp/types'
|
||||
import type { SkillConfigSchema } from '@/schemas/skill-schemas'
|
||||
import type {
|
||||
BrainProcessResult,
|
||||
IntentObject,
|
||||
SkillResult
|
||||
} from '@/core/brain/types'
|
||||
import { SkillActionType, SkillOutputType } from '@/core/brain/types'
|
||||
import { langs } from '@@/core/langs.json'
|
||||
import { HAS_TTS, PYTHON_BRIDGE_BIN_PATH, TMP_PATH } from '@/constants'
|
||||
import { SOCKET_SERVER, TTS } from '@/core'
|
||||
import { LangHelper } from '@/helpers/lang-helper'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { SkillDomainHelper } from '@/helpers/skill-domain-helper'
|
||||
import { StringHelper } from '@/helpers/string-helper'
|
||||
import Synchronizer from '@/core/synchronizer'
|
||||
|
||||
export default class Brain {
|
||||
private static instance: Brain
|
||||
private _lang: ShortLanguageCode = 'en'
|
||||
private broca: GlobalAnswersSchema = JSON.parse(
|
||||
fs.readFileSync(
|
||||
path.join(process.cwd(), 'core', 'data', this._lang, 'answers.json'),
|
||||
'utf8'
|
||||
)
|
||||
)
|
||||
private skillProcess: ChildProcessWithoutNullStreams | undefined = undefined
|
||||
private domainFriendlyName = ''
|
||||
private skillFriendlyName = ''
|
||||
private skillOutput = ''
|
||||
private speeches: string[] = []
|
||||
public isMuted = false // Close Leon mouth if true; e.g. over HTTP
|
||||
|
||||
constructor() {
|
||||
if (!Brain.instance) {
|
||||
LogHelper.title('Brain')
|
||||
LogHelper.success('New instance')
|
||||
|
||||
Brain.instance = this
|
||||
}
|
||||
}
|
||||
|
||||
public get lang(): ShortLanguageCode {
|
||||
return this._lang
|
||||
}
|
||||
|
||||
public set lang(newLang: ShortLanguageCode) {
|
||||
this._lang = newLang
|
||||
// Update broca
|
||||
this.broca = JSON.parse(
|
||||
fs.readFileSync(
|
||||
path.join(process.cwd(), 'core', 'data', this._lang, 'answers.json'),
|
||||
'utf8'
|
||||
)
|
||||
)
|
||||
|
||||
if (HAS_TTS) {
|
||||
this.updateTTSLang(this._lang)
|
||||
}
|
||||
}
|
||||
|
||||
private async updateTTSLang(newLang: ShortLanguageCode): Promise<void> {
|
||||
await TTS.init(newLang)
|
||||
|
||||
LogHelper.title('Brain')
|
||||
LogHelper.info('Language has changed')
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete intent object file
|
||||
*/
|
||||
private static deleteIntentObjFile(intentObjectPath: string): void {
|
||||
try {
|
||||
if (fs.existsSync(intentObjectPath)) {
|
||||
fs.unlinkSync(intentObjectPath)
|
||||
}
|
||||
} catch (e) {
|
||||
LogHelper.error(`Failed to delete intent object file: ${e}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Make Leon talk
|
||||
*/
|
||||
public talk(rawSpeech: string, end = false): void {
|
||||
LogHelper.title('Brain')
|
||||
LogHelper.info('Talking...')
|
||||
|
||||
if (rawSpeech !== '') {
|
||||
if (HAS_TTS) {
|
||||
// Stripe HTML to a whitespace. Whitespace to let the TTS respects punctuation
|
||||
const speech = rawSpeech.replace(/<(?:.|\n)*?>/gm, ' ')
|
||||
|
||||
TTS.add(speech, end)
|
||||
}
|
||||
|
||||
SOCKET_SERVER.socket?.emit('answer', rawSpeech)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Pickup speech info we need to return
|
||||
*/
|
||||
public wernicke(
|
||||
type: string,
|
||||
key?: string,
|
||||
obj?: Record<string, unknown>
|
||||
): string {
|
||||
let answerObject: Record<string, string> = {}
|
||||
let answer = ''
|
||||
|
||||
// Choose a random answer or a specific one
|
||||
let property = this.broca.answers[type]
|
||||
if (property?.constructor === [].constructor) {
|
||||
property = property as string[]
|
||||
answer = property[Math.floor(Math.random() * property.length)] as string
|
||||
} else {
|
||||
answerObject = property as Record<string, string>
|
||||
}
|
||||
|
||||
// Select a specific key
|
||||
if (key !== '' && typeof key !== 'undefined') {
|
||||
answer = answerObject[key] as string
|
||||
}
|
||||
|
||||
// Parse sentence's value(s) and replace with the given object
|
||||
if (typeof obj !== 'undefined' && Object.keys(obj).length > 0) {
|
||||
answer = StringHelper.findAndMap(answer, obj)
|
||||
}
|
||||
|
||||
return answer
|
||||
}
|
||||
|
||||
private shouldAskToRepeat(nluResult: NLUResult): boolean {
|
||||
return (
|
||||
nluResult.classification.confidence <
|
||||
langs[LangHelper.getLongCode(this._lang)].min_confidence
|
||||
)
|
||||
}
|
||||
|
||||
private handleAskToRepeat(nluResult: NLUResult): void {
|
||||
if (!this.isMuted) {
|
||||
const speech = `${this.wernicke('random_not_sure')}.`
|
||||
|
||||
this.talk(speech, true)
|
||||
SOCKET_SERVER.socket?.emit('ask-to-repeat', nluResult)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the intent object that will be passed to the skill
|
||||
*/
|
||||
private createIntentObject(
|
||||
nluResult: NLUResult,
|
||||
utteranceId: string,
|
||||
slots: IntentObject['slots']
|
||||
): IntentObject {
|
||||
return {
|
||||
id: utteranceId,
|
||||
lang: this._lang,
|
||||
domain: nluResult.classification.domain,
|
||||
skill: nluResult.classification.skill,
|
||||
action: nluResult.classification.action,
|
||||
utterance: nluResult.utterance,
|
||||
current_entities: nluResult.currentEntities,
|
||||
entities: nluResult.entities,
|
||||
current_resolvers: nluResult.currentResolvers,
|
||||
resolvers: nluResult.resolvers,
|
||||
slots
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle the skill process output
|
||||
*/
|
||||
private handleLogicActionSkillProcessOutput(
|
||||
data: Buffer
|
||||
): Promise<Error | null> | void {
|
||||
try {
|
||||
const obj = JSON.parse(data.toString())
|
||||
|
||||
if (typeof obj === 'object') {
|
||||
if (obj.output.type === SkillOutputType.Intermediate) {
|
||||
LogHelper.title(`${this.skillFriendlyName} skill`)
|
||||
LogHelper.info(data.toString())
|
||||
|
||||
const speech = obj.output.speech.toString()
|
||||
if (!this.isMuted) {
|
||||
this.talk(speech)
|
||||
}
|
||||
this.speeches.push(speech)
|
||||
} else {
|
||||
this.skillOutput = data.toString()
|
||||
}
|
||||
|
||||
return Promise.resolve(null)
|
||||
} else {
|
||||
return Promise.reject(
|
||||
new Error(
|
||||
`The "${this.skillFriendlyName}" skill from the "${this.domainFriendlyName}" domain is not well configured. Check the configuration file.`
|
||||
)
|
||||
)
|
||||
}
|
||||
} catch (e) {
|
||||
LogHelper.title('Brain')
|
||||
LogHelper.debug(`process.stdout: ${String(data)}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Speak about an error happened regarding a specific skill
|
||||
*/
|
||||
private speakSkillError(): void {
|
||||
const speech = `${this.wernicke('random_skill_errors', '', {
|
||||
'%skill_name%': this.skillFriendlyName,
|
||||
'%domain_name%': this.domainFriendlyName
|
||||
})}!`
|
||||
if (!this.isMuted) {
|
||||
this.talk(speech)
|
||||
SOCKET_SERVER.socket?.emit('is-typing', false)
|
||||
}
|
||||
this.speeches.push(speech)
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle the skill process error
|
||||
*/
|
||||
private handleLogicActionSkillProcessError(
|
||||
data: Buffer,
|
||||
intentObjectPath: string
|
||||
): Error {
|
||||
this.speakSkillError()
|
||||
|
||||
Brain.deleteIntentObjFile(intentObjectPath)
|
||||
|
||||
LogHelper.title(`${this.skillFriendlyName} skill`)
|
||||
LogHelper.error(data.toString())
|
||||
|
||||
return new Error(data.toString())
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute an action logic skill in a standalone way (CLI):
|
||||
*
|
||||
* 1. Need to be at the root of the project
|
||||
* 2. Edit: server/src/intent-object.sample.json
|
||||
* 3. Run: npm run python-bridge
|
||||
*/
|
||||
private async executeLogicActionSkill(
|
||||
nluResult: NLUResult,
|
||||
utteranceId: string,
|
||||
intentObjectPath: string
|
||||
): Promise<void> {
|
||||
// Ensure the process is empty (to be able to execute other processes outside of Brain)
|
||||
if (!this.skillProcess) {
|
||||
const slots: IntentObject['slots'] = {}
|
||||
|
||||
if (nluResult.slots) {
|
||||
Object.keys(nluResult.slots)?.forEach((slotName) => {
|
||||
slots[slotName] = nluResult.slots[slotName]?.value
|
||||
})
|
||||
}
|
||||
|
||||
const intentObject = this.createIntentObject(
|
||||
nluResult,
|
||||
utteranceId,
|
||||
slots
|
||||
)
|
||||
|
||||
try {
|
||||
await fs.promises.writeFile(
|
||||
intentObjectPath,
|
||||
JSON.stringify(intentObject)
|
||||
)
|
||||
this.skillProcess = spawn(
|
||||
`${PYTHON_BRIDGE_BIN_PATH} "${intentObjectPath}"`,
|
||||
{ shell: true }
|
||||
)
|
||||
} catch (e) {
|
||||
LogHelper.error(`Failed to save intent object: ${e}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute Python skills
|
||||
*/
|
||||
public execute(nluResult: NLUResult): Promise<Partial<BrainProcessResult>> {
|
||||
const executionTimeStart = Date.now()
|
||||
|
||||
return new Promise(async (resolve) => {
|
||||
const utteranceId = `${Date.now()}-${StringHelper.random(4)}`
|
||||
const intentObjectPath = path.join(TMP_PATH, `${utteranceId}.json`)
|
||||
const speeches: string[] = []
|
||||
|
||||
// Reset skill output
|
||||
this.skillOutput = ''
|
||||
|
||||
// Ask to repeat if Leon is not sure about the request
|
||||
if (this.shouldAskToRepeat(nluResult)) {
|
||||
this.handleAskToRepeat(nluResult)
|
||||
|
||||
const executionTimeEnd = Date.now()
|
||||
const executionTime = executionTimeEnd - executionTimeStart
|
||||
|
||||
resolve({
|
||||
speeches,
|
||||
executionTime
|
||||
})
|
||||
} else {
|
||||
const {
|
||||
skillConfigPath,
|
||||
classification: { action: actionName }
|
||||
} = nluResult
|
||||
const { actions } = await SkillDomainHelper.getSkillConfig(
|
||||
skillConfigPath,
|
||||
this._lang
|
||||
)
|
||||
const action = actions[
|
||||
actionName
|
||||
] as SkillConfigSchema['actions'][string]
|
||||
const { type: actionType } = action
|
||||
const nextAction = action.next_action
|
||||
? actions[action.next_action]
|
||||
: null
|
||||
|
||||
if (actionType === SkillActionType.Logic) {
|
||||
/**
|
||||
* "Logic" action skill execution
|
||||
*/
|
||||
|
||||
this.executeLogicActionSkill(nluResult, utteranceId, intentObjectPath)
|
||||
|
||||
const domainName = nluResult.classification.domain
|
||||
const skillName = nluResult.classification.skill
|
||||
const { name: domainFriendlyName } =
|
||||
await SkillDomainHelper.getSkillDomainInfo(domainName)
|
||||
const { name: skillFriendlyName } =
|
||||
await SkillDomainHelper.getSkillInfo(domainName, skillName)
|
||||
|
||||
this.domainFriendlyName = domainFriendlyName
|
||||
this.skillFriendlyName = skillFriendlyName
|
||||
|
||||
// Read skill output
|
||||
this.skillProcess?.stdout.on('data', (data: Buffer) => {
|
||||
this.handleLogicActionSkillProcessOutput(data)
|
||||
})
|
||||
|
||||
// Handle error
|
||||
this.skillProcess?.stderr.on('data', (data: Buffer) => {
|
||||
this.handleLogicActionSkillProcessError(data, intentObjectPath)
|
||||
})
|
||||
|
||||
// Catch the end of the skill execution
|
||||
this.skillProcess?.stdout.on('end', () => {
|
||||
LogHelper.title(`${this.skillFriendlyName} skill`)
|
||||
LogHelper.info(this.skillOutput)
|
||||
|
||||
let skillResult: SkillResult | undefined = undefined
|
||||
|
||||
// Check if there is an output (no skill error)
|
||||
if (this.skillOutput !== '') {
|
||||
try {
|
||||
skillResult = JSON.parse(this.skillOutput)
|
||||
|
||||
if (skillResult?.output.speech) {
|
||||
skillResult.output.speech =
|
||||
skillResult.output.speech.toString()
|
||||
if (!this.isMuted) {
|
||||
this.talk(skillResult.output.speech, true)
|
||||
}
|
||||
speeches.push(skillResult.output.speech)
|
||||
|
||||
// Synchronize the downloaded content if enabled
|
||||
if (
|
||||
skillResult.output.type === SkillOutputType.End &&
|
||||
skillResult.output.options['synchronization'] &&
|
||||
skillResult.output.options['synchronization'].enabled &&
|
||||
skillResult.output.options['synchronization'].enabled ===
|
||||
true
|
||||
) {
|
||||
const sync = new Synchronizer(
|
||||
this,
|
||||
nluResult.classification,
|
||||
skillResult.output.options['synchronization']
|
||||
)
|
||||
|
||||
// When the synchronization is finished
|
||||
sync.synchronize((speech: string) => {
|
||||
if (!this.isMuted) {
|
||||
this.talk(speech)
|
||||
}
|
||||
speeches.push(speech)
|
||||
})
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
LogHelper.title(`${this.skillFriendlyName} skill`)
|
||||
LogHelper.error(
|
||||
`There is an error on the final output: ${String(e)}`
|
||||
)
|
||||
|
||||
this.speakSkillError()
|
||||
}
|
||||
}
|
||||
|
||||
Brain.deleteIntentObjFile(intentObjectPath)
|
||||
|
||||
if (!this.isMuted) {
|
||||
SOCKET_SERVER.socket?.emit('is-typing', false)
|
||||
}
|
||||
|
||||
const executionTimeEnd = Date.now()
|
||||
const executionTime = executionTimeEnd - executionTimeStart
|
||||
|
||||
// Send suggestions to the client
|
||||
if (
|
||||
nextAction?.suggestions &&
|
||||
skillResult?.output.core?.showNextActionSuggestions
|
||||
) {
|
||||
SOCKET_SERVER.socket?.emit('suggest', nextAction.suggestions)
|
||||
}
|
||||
if (
|
||||
action?.suggestions &&
|
||||
skillResult?.output.core?.showSuggestions
|
||||
) {
|
||||
SOCKET_SERVER.socket?.emit('suggest', action.suggestions)
|
||||
}
|
||||
|
||||
resolve({
|
||||
utteranceId,
|
||||
lang: this._lang,
|
||||
...nluResult,
|
||||
speeches,
|
||||
core: skillResult?.output.core,
|
||||
action,
|
||||
nextAction,
|
||||
executionTime // In ms, skill execution time only
|
||||
})
|
||||
})
|
||||
|
||||
// Reset the child process
|
||||
this.skillProcess = undefined
|
||||
} else {
|
||||
/**
|
||||
* "Dialog" action skill execution
|
||||
*/
|
||||
|
||||
const configFilePath = path.join(
|
||||
process.cwd(),
|
||||
'skills',
|
||||
nluResult.classification.domain,
|
||||
nluResult.classification.skill,
|
||||
'config',
|
||||
this._lang + '.json'
|
||||
)
|
||||
const { actions, entities: skillConfigEntities } =
|
||||
await SkillDomainHelper.getSkillConfig(configFilePath, this._lang)
|
||||
const utteranceHasEntities = nluResult.entities.length > 0
|
||||
const { answers: rawAnswers } = nluResult
|
||||
let answers = rawAnswers
|
||||
let answer: string | undefined = ''
|
||||
|
||||
if (!utteranceHasEntities) {
|
||||
answers = answers.filter(
|
||||
({ answer }) => answer.indexOf('{{') === -1
|
||||
)
|
||||
} else {
|
||||
answers = answers.filter(
|
||||
({ answer }) => answer.indexOf('{{') !== -1
|
||||
)
|
||||
}
|
||||
|
||||
// When answers are simple without required entity
|
||||
if (answers.length === 0) {
|
||||
answer =
|
||||
rawAnswers[Math.floor(Math.random() * rawAnswers.length)]?.answer
|
||||
|
||||
// In case the expected answer requires a known entity
|
||||
if (answer?.indexOf('{{') !== -1) {
|
||||
// TODO
|
||||
const unknownAnswers =
|
||||
actions[nluResult.classification.action]?.unknown_answers
|
||||
|
||||
if (unknownAnswers) {
|
||||
answer =
|
||||
unknownAnswers[
|
||||
Math.floor(Math.random() * unknownAnswers.length)
|
||||
]
|
||||
}
|
||||
}
|
||||
} else {
|
||||
answer = answers[Math.floor(Math.random() * answers.length)]?.answer
|
||||
|
||||
/**
|
||||
* In case the utterance contains entities, and the picked up answer too,
|
||||
* then map them (utterance <-> answer)
|
||||
*/
|
||||
if (utteranceHasEntities && answer?.indexOf('{{') !== -1) {
|
||||
nluResult.currentEntities.forEach((entityObj) => {
|
||||
answer = StringHelper.findAndMap(answer as string, {
|
||||
[`{{ ${entityObj.entity} }}`]: (entityObj as NERCustomEntity)
|
||||
.resolution.value
|
||||
})
|
||||
|
||||
/**
|
||||
* Find matches and map deeper data from the NLU file (global entities)
|
||||
* TODO: handle more entity types, not only enums for global entities?
|
||||
*/
|
||||
const matches = answer.match(/{{.+?}}/g)
|
||||
|
||||
matches?.forEach((match) => {
|
||||
let newStr = match.substring(3)
|
||||
|
||||
newStr = newStr.substring(0, newStr.indexOf('}}') - 1)
|
||||
|
||||
const [entity, dataKey] = newStr.split('.')
|
||||
|
||||
if (entity && dataKey && entity === entityObj.entity) {
|
||||
const { option } = entityObj as CustomEnumEntity
|
||||
|
||||
const entityOption =
|
||||
skillConfigEntities[entity]?.options[option]
|
||||
const entityOptionData = entityOption?.data
|
||||
let valuesArr: string[] = []
|
||||
|
||||
if (entityOptionData) {
|
||||
// e.g. entities.color.options.red.data.hexa[]
|
||||
valuesArr = entityOptionData[dataKey] as string[]
|
||||
}
|
||||
|
||||
if (valuesArr.length > 0) {
|
||||
answer = StringHelper.findAndMap(answer as string, {
|
||||
[match]:
|
||||
valuesArr[
|
||||
Math.floor(Math.random() * valuesArr.length)
|
||||
]
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const executionTimeEnd = Date.now()
|
||||
const executionTime = executionTimeEnd - executionTimeStart
|
||||
|
||||
if (!this.isMuted) {
|
||||
this.talk(answer as string, true)
|
||||
SOCKET_SERVER.socket?.emit('is-typing', false)
|
||||
}
|
||||
|
||||
// Send suggestions to the client
|
||||
if (nextAction?.suggestions) {
|
||||
SOCKET_SERVER.socket?.emit('suggest', nextAction.suggestions)
|
||||
}
|
||||
|
||||
resolve({
|
||||
utteranceId,
|
||||
lang: this._lang,
|
||||
...nluResult,
|
||||
speeches: [answer as string],
|
||||
core: {},
|
||||
action,
|
||||
nextAction,
|
||||
executionTime // In ms, skill execution time only
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
71
server/src/core/brain/types.ts
Normal file
71
server/src/core/brain/types.ts
Normal file
@ -0,0 +1,71 @@
|
||||
import type {
|
||||
NEREntity,
|
||||
NLPAction,
|
||||
NLPDomain,
|
||||
NLPSkill,
|
||||
NLPUtterance,
|
||||
NLUResolver,
|
||||
NLUResult,
|
||||
NLUSlot,
|
||||
NLUSlots
|
||||
} from '@/core/nlp/types'
|
||||
import type { SkillConfigSchema } from '@/schemas/skill-schemas'
|
||||
import type { ShortLanguageCode } from '@/types'
|
||||
|
||||
interface SkillCoreData {
|
||||
restart?: boolean
|
||||
isInActionLoop?: boolean
|
||||
showNextActionSuggestions?: boolean
|
||||
showSuggestions?: boolean
|
||||
}
|
||||
|
||||
export interface SkillResult {
|
||||
domain: NLPDomain
|
||||
skill: NLPSkill
|
||||
action: NLPAction
|
||||
lang: ShortLanguageCode
|
||||
utterance: NLPUtterance
|
||||
entities: NEREntity[]
|
||||
slots: NLUSlots
|
||||
output: {
|
||||
type: SkillOutputType
|
||||
codes: string[]
|
||||
speech: string
|
||||
core: SkillCoreData | undefined
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
options: Record<string, any>
|
||||
}
|
||||
}
|
||||
|
||||
export enum SkillOutputType {
|
||||
Intermediate = 'inter',
|
||||
End = 'end'
|
||||
}
|
||||
export enum SkillActionType {
|
||||
Logic = 'logic',
|
||||
Dialog = 'dialog'
|
||||
}
|
||||
|
||||
export interface IntentObject {
|
||||
id: string
|
||||
lang: ShortLanguageCode
|
||||
domain: NLPDomain
|
||||
skill: NLPSkill
|
||||
action: NLPAction
|
||||
utterance: NLPUtterance
|
||||
current_entities: NEREntity[]
|
||||
entities: NEREntity[]
|
||||
current_resolvers: NLUResolver[]
|
||||
resolvers: NLUResolver[]
|
||||
slots: { [key: string]: NLUSlot['value'] | undefined }
|
||||
}
|
||||
|
||||
export interface BrainProcessResult extends NLUResult {
|
||||
speeches: string[]
|
||||
executionTime: number
|
||||
utteranceId?: string
|
||||
lang?: ShortLanguageCode
|
||||
core?: SkillCoreData | undefined
|
||||
action?: SkillConfigSchema['actions'][string]
|
||||
nextAction?: SkillConfigSchema['actions'][string] | null | undefined
|
||||
}
|
@ -1,134 +0,0 @@
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
|
||||
import archiver from 'archiver'
|
||||
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { StringHelper } from '@/helpers/string-helper'
|
||||
|
||||
const getDownloads = async (fastify, options) => {
|
||||
fastify.get(`/api/${options.apiVersion}/downloads`, (request, reply) => {
|
||||
LogHelper.title('GET /downloads')
|
||||
|
||||
const clean = (dir, files) => {
|
||||
LogHelper.info('Cleaning skill download directory...')
|
||||
for (let i = 0; i < files.length; i += 1) {
|
||||
fs.unlinkSync(`${dir}/${files[i]}`)
|
||||
}
|
||||
fs.rmdirSync(dir)
|
||||
LogHelper.success('Downloads directory cleaned')
|
||||
}
|
||||
let message = ''
|
||||
|
||||
if (request.query.domain && request.query.skill) {
|
||||
const dlDomainDir = path.join(
|
||||
process.cwd(),
|
||||
'downloads',
|
||||
request.query.domain
|
||||
)
|
||||
const skill = path.join(dlDomainDir, `${request.query.skill}.py`)
|
||||
|
||||
LogHelper.info(
|
||||
`Checking existence of the ${StringHelper.ucFirst(
|
||||
request.query.skill
|
||||
)} skill...`
|
||||
)
|
||||
if (fs.existsSync(skill)) {
|
||||
LogHelper.success(
|
||||
`${StringHelper.ucFirst(request.query.skill)} skill exists`
|
||||
)
|
||||
const downloadsDir = `${dlDomainDir}/${request.query.skill}`
|
||||
|
||||
LogHelper.info('Reading downloads directory...')
|
||||
fs.readdir(downloadsDir, (err, files) => {
|
||||
if (err && err.code === 'ENOENT') {
|
||||
message = 'There is no content to download for this skill.'
|
||||
LogHelper.error(message)
|
||||
reply.code(404).send({
|
||||
success: false,
|
||||
status: 404,
|
||||
code: 'skill_dir_not_found',
|
||||
message
|
||||
})
|
||||
} else {
|
||||
if (err) LogHelper.error(err)
|
||||
|
||||
// Download the file if there is only one
|
||||
if (files.length === 1) {
|
||||
LogHelper.info(`${files[0]} is downloading...`)
|
||||
reply.download(`${downloadsDir}/${files[0]}`)
|
||||
LogHelper.success(`${files[0]} downloaded`)
|
||||
clean(downloadsDir, files)
|
||||
} else {
|
||||
LogHelper.info('Deleting previous archives...')
|
||||
const zipSlug = `leon-${request.query.domain}-${request.query.skill}`
|
||||
const domainsFiles = fs.readdirSync(dlDomainDir)
|
||||
|
||||
for (let i = 0; i < domainsFiles.length; i += 1) {
|
||||
if (
|
||||
domainsFiles[i].indexOf('.zip') !== -1 &&
|
||||
domainsFiles[i].indexOf(zipSlug) !== -1
|
||||
) {
|
||||
fs.unlinkSync(`${dlDomainDir}/${domainsFiles[i]}`)
|
||||
LogHelper.success(`${domainsFiles[i]} archive deleted`)
|
||||
}
|
||||
}
|
||||
|
||||
LogHelper.info('Preparing new archive...')
|
||||
const zipName = `${zipSlug}-${Date.now()}.zip`
|
||||
const zipFile = `${dlDomainDir}/${zipName}`
|
||||
const output = fs.createWriteStream(zipFile)
|
||||
const archive = archiver('zip', { zlib: { level: 9 } })
|
||||
|
||||
// When the archive is ready
|
||||
output.on('close', () => {
|
||||
LogHelper.info(`${zipName} is downloading...`)
|
||||
reply.download(zipFile, (err) => {
|
||||
if (err) LogHelper.error(err)
|
||||
|
||||
LogHelper.success(`${zipName} downloaded`)
|
||||
|
||||
clean(downloadsDir, files)
|
||||
})
|
||||
})
|
||||
archive.on('error', (err) => {
|
||||
LogHelper.error(err)
|
||||
})
|
||||
|
||||
// Add the content to the archive
|
||||
LogHelper.info('Adding content...')
|
||||
archive.directory(downloadsDir, false)
|
||||
|
||||
// Inject stream data to the archive
|
||||
LogHelper.info('Injecting stream data...')
|
||||
archive.pipe(output)
|
||||
|
||||
LogHelper.info('Finalizing...')
|
||||
archive.finalize()
|
||||
}
|
||||
}
|
||||
})
|
||||
} else {
|
||||
message = 'This skill does not exist.'
|
||||
LogHelper.error(message)
|
||||
reply.code(404).send({
|
||||
success: false,
|
||||
status: 404,
|
||||
code: 'skill_not_found',
|
||||
message
|
||||
})
|
||||
}
|
||||
} else {
|
||||
message = 'Bad request.'
|
||||
LogHelper.error(message)
|
||||
reply.code(400).send({
|
||||
success: false,
|
||||
status: 400,
|
||||
code: 'bad_request',
|
||||
message
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
export default getDownloads
|
152
server/src/core/http-server/api/downloads/get.ts
Normal file
152
server/src/core/http-server/api/downloads/get.ts
Normal file
@ -0,0 +1,152 @@
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
|
||||
import type { FastifyPluginAsync, FastifySchema } from 'fastify'
|
||||
import archiver from 'archiver'
|
||||
import { Type } from '@sinclair/typebox'
|
||||
import type { Static } from '@sinclair/typebox'
|
||||
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { StringHelper } from '@/helpers/string-helper'
|
||||
import type { APIOptions } from '@/core/http-server/http-server'
|
||||
|
||||
const getDownloadsSchema = {
|
||||
querystring: Type.Object({
|
||||
domain: Type.String(),
|
||||
skill: Type.String()
|
||||
})
|
||||
} satisfies FastifySchema
|
||||
|
||||
interface GetDownloadsSchema {
|
||||
querystring: Static<typeof getDownloadsSchema.querystring>
|
||||
}
|
||||
|
||||
export const getDownloads: FastifyPluginAsync<APIOptions> = async (
|
||||
fastify,
|
||||
options
|
||||
) => {
|
||||
fastify.route<{
|
||||
Querystring: GetDownloadsSchema['querystring']
|
||||
}>({
|
||||
method: 'GET',
|
||||
url: `/api/${options.apiVersion}/downloads`,
|
||||
schema: getDownloadsSchema,
|
||||
handler: async (request, reply) => {
|
||||
LogHelper.title('GET /downloads')
|
||||
|
||||
const clean = async (dir: string, files: string[]): Promise<void> => {
|
||||
LogHelper.info('Cleaning skill download directory...')
|
||||
for (let i = 0; i < files.length; i += 1) {
|
||||
await fs.promises.unlink(`${dir}/${files[i]}`)
|
||||
}
|
||||
await fs.promises.rmdir(dir)
|
||||
LogHelper.success('Downloads directory cleaned')
|
||||
}
|
||||
let message = ''
|
||||
|
||||
if (request.query.domain && request.query.skill) {
|
||||
const dlDomainDir = path.join(
|
||||
process.cwd(),
|
||||
'downloads',
|
||||
request.query.domain
|
||||
)
|
||||
const skill = path.join(dlDomainDir, `${request.query.skill}.py`)
|
||||
|
||||
LogHelper.info(
|
||||
`Checking existence of the ${StringHelper.ucFirst(
|
||||
request.query.skill
|
||||
)} skill...`
|
||||
)
|
||||
if (fs.existsSync(skill)) {
|
||||
LogHelper.success(
|
||||
`${StringHelper.ucFirst(request.query.skill)} skill exists`
|
||||
)
|
||||
const downloadsDir = `${dlDomainDir}/${request.query.skill}`
|
||||
|
||||
LogHelper.info('Reading downloads directory...')
|
||||
try {
|
||||
const files = await fs.promises.readdir(downloadsDir)
|
||||
// Download the file if there is only one
|
||||
if (files.length === 1) {
|
||||
LogHelper.info(`${files[0]} is downloading...`)
|
||||
reply.download(`${downloadsDir}/${files[0]}`)
|
||||
LogHelper.success(`${files[0]} downloaded`)
|
||||
await clean(downloadsDir, files)
|
||||
} else {
|
||||
LogHelper.info('Deleting previous archives...')
|
||||
const zipSlug = `leon-${request.query.domain}-${request.query.skill}`
|
||||
const domainsFiles = await fs.promises.readdir(dlDomainDir)
|
||||
|
||||
for (let i = 0; i < domainsFiles.length; i += 1) {
|
||||
if (
|
||||
domainsFiles[i]?.indexOf('.zip') !== -1 &&
|
||||
domainsFiles[i]?.indexOf(zipSlug) !== -1
|
||||
) {
|
||||
await fs.promises.unlink(`${dlDomainDir}/${domainsFiles[i]}`)
|
||||
LogHelper.success(`${domainsFiles[i]} archive deleted`)
|
||||
}
|
||||
}
|
||||
|
||||
LogHelper.info('Preparing new archive...')
|
||||
const zipName = `${zipSlug}-${Date.now()}.zip`
|
||||
const zipFile = `${dlDomainDir}/${zipName}`
|
||||
const output = fs.createWriteStream(zipFile)
|
||||
const archive = archiver('zip', { zlib: { level: 9 } })
|
||||
|
||||
// When the archive is ready
|
||||
output.on('close', async () => {
|
||||
LogHelper.info(`${zipName} is downloading...`)
|
||||
reply.download(zipFile)
|
||||
LogHelper.success(`${zipName} downloaded`)
|
||||
await clean(downloadsDir, files)
|
||||
})
|
||||
archive.on('error', (err) => {
|
||||
LogHelper.error(err.message)
|
||||
})
|
||||
|
||||
// Add the content to the archive
|
||||
LogHelper.info('Adding content...')
|
||||
archive.directory(downloadsDir, false)
|
||||
|
||||
// Inject stream data to the archive
|
||||
LogHelper.info('Injecting stream data...')
|
||||
archive.pipe(output)
|
||||
|
||||
LogHelper.info('Finalizing...')
|
||||
archive.finalize()
|
||||
}
|
||||
} catch (error) {
|
||||
message = 'There is no content to download for this skill.'
|
||||
LogHelper.error(message)
|
||||
reply.code(404).send({
|
||||
success: false,
|
||||
status: 404,
|
||||
code: 'skill_dir_not_found',
|
||||
message
|
||||
})
|
||||
|
||||
LogHelper.error(message)
|
||||
}
|
||||
} else {
|
||||
message = 'This skill does not exist.'
|
||||
LogHelper.error(message)
|
||||
reply.code(404).send({
|
||||
success: false,
|
||||
status: 404,
|
||||
code: 'skill_not_found',
|
||||
message
|
||||
})
|
||||
}
|
||||
} else {
|
||||
message = 'Bad request.'
|
||||
LogHelper.error(message)
|
||||
reply.code(400).send({
|
||||
success: false,
|
||||
status: 400,
|
||||
code: 'bad_request',
|
||||
message
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
import getDownloads from '@/core/http-server/api/downloads/get'
|
||||
|
||||
const downloadsPlugin = async (fastify, options) => {
|
||||
// Get downloads to download skill content
|
||||
fastify.register(getDownloads, options)
|
||||
}
|
||||
|
||||
export default downloadsPlugin
|
12
server/src/core/http-server/api/downloads/index.ts
Normal file
12
server/src/core/http-server/api/downloads/index.ts
Normal file
@ -0,0 +1,12 @@
|
||||
import type { FastifyPluginAsync } from 'fastify'
|
||||
|
||||
import type { APIOptions } from '@/core/http-server/http-server'
|
||||
import { getDownloads } from '@/core/http-server/api/downloads/get'
|
||||
|
||||
export const downloadsPlugin: FastifyPluginAsync<APIOptions> = async (
|
||||
fastify,
|
||||
options
|
||||
) => {
|
||||
// Get downloads to download skill content
|
||||
await fastify.register(getDownloads, options)
|
||||
}
|
@ -1,40 +0,0 @@
|
||||
import { version } from '@@/package.json'
|
||||
import {
|
||||
HAS_AFTER_SPEECH,
|
||||
HAS_LOGGER,
|
||||
HAS_STT,
|
||||
HAS_TTS,
|
||||
STT_PROVIDER,
|
||||
TTS_PROVIDER
|
||||
} from '@/constants'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
|
||||
const getInfo = async (fastify, options) => {
|
||||
fastify.get(`/api/${options.apiVersion}/info`, (request, reply) => {
|
||||
LogHelper.title('GET /info')
|
||||
|
||||
const message = 'Information pulled.'
|
||||
|
||||
LogHelper.success(message)
|
||||
|
||||
reply.send({
|
||||
success: true,
|
||||
status: 200,
|
||||
code: 'info_pulled',
|
||||
message,
|
||||
after_speech: HAS_AFTER_SPEECH,
|
||||
logger: HAS_LOGGER,
|
||||
stt: {
|
||||
enabled: HAS_STT,
|
||||
provider: STT_PROVIDER
|
||||
},
|
||||
tts: {
|
||||
enabled: HAS_TTS,
|
||||
provider: TTS_PROVIDER
|
||||
},
|
||||
version
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
export default getInfo
|
46
server/src/core/http-server/api/info/get.ts
Normal file
46
server/src/core/http-server/api/info/get.ts
Normal file
@ -0,0 +1,46 @@
|
||||
import type { FastifyPluginAsync } from 'fastify'
|
||||
|
||||
import { version } from '@@/package.json'
|
||||
import {
|
||||
HAS_AFTER_SPEECH,
|
||||
HAS_LOGGER,
|
||||
HAS_STT,
|
||||
HAS_TTS,
|
||||
STT_PROVIDER,
|
||||
TTS_PROVIDER
|
||||
} from '@/constants'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import type { APIOptions } from '@/core/http-server/http-server'
|
||||
|
||||
export const getInfo: FastifyPluginAsync<APIOptions> = async (
|
||||
fastify,
|
||||
options
|
||||
) => {
|
||||
fastify.route({
|
||||
method: 'GET',
|
||||
url: `/api/${options.apiVersion}/info`,
|
||||
handler: async (_request, reply) => {
|
||||
LogHelper.title('GET /info')
|
||||
const message = 'Information pulled.'
|
||||
LogHelper.success(message)
|
||||
|
||||
reply.send({
|
||||
success: true,
|
||||
status: 200,
|
||||
code: 'info_pulled',
|
||||
message,
|
||||
after_speech: HAS_AFTER_SPEECH,
|
||||
logger: HAS_LOGGER,
|
||||
stt: {
|
||||
enabled: HAS_STT,
|
||||
provider: STT_PROVIDER
|
||||
},
|
||||
tts: {
|
||||
enabled: HAS_TTS,
|
||||
provider: TTS_PROVIDER
|
||||
},
|
||||
version
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
import getInfo from '@/core/http-server/api/info/get'
|
||||
|
||||
const infoPlugin = async (fastify, options) => {
|
||||
// Get information to init client
|
||||
fastify.register(getInfo, options)
|
||||
}
|
||||
|
||||
export default infoPlugin
|
12
server/src/core/http-server/api/info/index.ts
Normal file
12
server/src/core/http-server/api/info/index.ts
Normal file
@ -0,0 +1,12 @@
|
||||
import type { FastifyPluginAsync } from 'fastify'
|
||||
|
||||
import { getInfo } from '@/core/http-server/api/info/get'
|
||||
import type { APIOptions } from '@/core/http-server/http-server'
|
||||
|
||||
export const infoPlugin: FastifyPluginAsync<APIOptions> = async (
|
||||
fastify,
|
||||
options
|
||||
) => {
|
||||
// Get information to init client
|
||||
await fastify.register(getInfo, options)
|
||||
}
|
150
server/src/core/http-server/http-server.ts
Normal file
150
server/src/core/http-server/http-server.ts
Normal file
@ -0,0 +1,150 @@
|
||||
import { join } from 'node:path'
|
||||
|
||||
import Fastify, { FastifySchema } from 'fastify'
|
||||
import fastifyStatic from '@fastify/static'
|
||||
import { Type } from '@sinclair/typebox'
|
||||
import type { Static } from '@sinclair/typebox'
|
||||
|
||||
import { version } from '@@/package.json'
|
||||
import { LEON_NODE_ENV, HAS_LOGGER, HAS_OVER_HTTP } from '@/constants'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { DateHelper } from '@/helpers/date-helper'
|
||||
import { corsMidd } from '@/core/http-server/plugins/cors'
|
||||
import { otherMidd } from '@/core/http-server/plugins/other'
|
||||
import { infoPlugin } from '@/core/http-server/api/info'
|
||||
import { downloadsPlugin } from '@/core/http-server/api/downloads'
|
||||
import { keyMidd } from '@/core/http-server/plugins/key'
|
||||
import { NLU, BRAIN } from '@/core'
|
||||
|
||||
const API_VERSION = 'v1'
|
||||
|
||||
export interface APIOptions {
|
||||
apiVersion: string
|
||||
}
|
||||
|
||||
const postQuerySchema = {
|
||||
body: Type.Object({
|
||||
utterance: Type.String()
|
||||
})
|
||||
} satisfies FastifySchema
|
||||
|
||||
interface PostQuerySchema {
|
||||
body: Static<typeof postQuerySchema.body>
|
||||
}
|
||||
|
||||
export default class HTTPServer {
|
||||
private static instance: HTTPServer
|
||||
|
||||
private fastify = Fastify()
|
||||
|
||||
public httpServer = this.fastify.server
|
||||
|
||||
constructor(public readonly host: string, public readonly port: number) {
|
||||
if (!HTTPServer.instance) {
|
||||
LogHelper.title('HTTP Server')
|
||||
LogHelper.success('New instance')
|
||||
|
||||
HTTPServer.instance = this
|
||||
}
|
||||
|
||||
this.host = host
|
||||
this.port = port
|
||||
}
|
||||
|
||||
/**
|
||||
* Server entry point
|
||||
*/
|
||||
public async init(): Promise<void> {
|
||||
this.fastify.addHook('onRequest', corsMidd)
|
||||
this.fastify.addHook('preValidation', otherMidd)
|
||||
|
||||
LogHelper.title('Initialization')
|
||||
LogHelper.info(`The current env is ${LEON_NODE_ENV}`)
|
||||
LogHelper.info(`The current version is ${version}`)
|
||||
|
||||
LogHelper.info(`The current time zone is ${DateHelper.getTimeZone()}`)
|
||||
|
||||
const sLogger = !HAS_LOGGER ? 'disabled' : 'enabled'
|
||||
LogHelper.info(`Collaborative logger ${sLogger}`)
|
||||
|
||||
await this.bootstrap()
|
||||
}
|
||||
|
||||
/**
|
||||
* Bootstrap API
|
||||
*/
|
||||
private async bootstrap(): Promise<void> {
|
||||
// Render the web app
|
||||
this.fastify.register(fastifyStatic, {
|
||||
root: join(process.cwd(), 'app', 'dist'),
|
||||
prefix: '/'
|
||||
})
|
||||
this.fastify.get('/', (_request, reply) => {
|
||||
reply.sendFile('index.html')
|
||||
})
|
||||
|
||||
this.fastify.register(infoPlugin, { apiVersion: API_VERSION })
|
||||
this.fastify.register(downloadsPlugin, { apiVersion: API_VERSION })
|
||||
|
||||
if (HAS_OVER_HTTP) {
|
||||
this.fastify.register((instance, _opts, next) => {
|
||||
instance.addHook('preHandler', keyMidd)
|
||||
|
||||
instance.route<{
|
||||
Body: PostQuerySchema['body']
|
||||
}>({
|
||||
method: 'POST',
|
||||
url: '/api/query',
|
||||
schema: postQuerySchema,
|
||||
handler: async (request, reply) => {
|
||||
const { utterance } = request.body
|
||||
|
||||
try {
|
||||
BRAIN.isMuted = true
|
||||
const data = await NLU.process(utterance)
|
||||
|
||||
reply.send({
|
||||
...data,
|
||||
success: true
|
||||
})
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : error
|
||||
reply.statusCode = 500
|
||||
reply.send({
|
||||
message,
|
||||
success: false
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// TODO: reimplement skills routes once the new core is ready
|
||||
// server.generateSkillsRoutes(instance)
|
||||
|
||||
next()
|
||||
})
|
||||
}
|
||||
|
||||
try {
|
||||
await this.listen()
|
||||
} catch (e) {
|
||||
LogHelper.error((e as Error).message)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Launch server
|
||||
*/
|
||||
private async listen(): Promise<void> {
|
||||
this.fastify.listen(
|
||||
{
|
||||
port: this.port,
|
||||
host: '0.0.0.0'
|
||||
},
|
||||
() => {
|
||||
LogHelper.title('Initialization')
|
||||
LogHelper.success(`Server is available at ${this.host}:${this.port}`)
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
@ -1,3 +1,7 @@
|
||||
/* eslint-disable */
|
||||
|
||||
// TODO: delete this file once multi clients are reimplemented
|
||||
|
||||
import { join } from 'node:path'
|
||||
|
||||
import Fastify from 'fastify'
|
||||
@ -21,7 +25,7 @@ import {
|
||||
import { TCP_CLIENT } from '@/core'
|
||||
import Nlu from '@/core/nlu'
|
||||
import Brain from '@/core/brain'
|
||||
import Asr from '@/core/asr'
|
||||
import Asr from '@/core/asr/asr'
|
||||
import Stt from '@/stt/stt'
|
||||
import Tts from '@/tts/tts'
|
||||
import corsMidd from '@/core/http-server/plugins/cors'
|
@ -1,6 +1,8 @@
|
||||
import type { onRequestHookHandler } from 'fastify'
|
||||
|
||||
import { HOST, IS_PRODUCTION_ENV } from '@/constants'
|
||||
|
||||
const corsMidd = async (request, reply) => {
|
||||
export const corsMidd: onRequestHookHandler = async (_request, reply) => {
|
||||
// Allow only a specific client to request to the API (depending on the env)
|
||||
if (!IS_PRODUCTION_ENV) {
|
||||
reply.header('Access-Control-Allow-Origin', `${HOST}:3000`)
|
||||
@ -14,5 +16,3 @@ const corsMidd = async (request, reply) => {
|
||||
|
||||
reply.header('Access-Control-Allow-Credentials', true)
|
||||
}
|
||||
|
||||
export default corsMidd
|
@ -1,6 +1,8 @@
|
||||
import type { preHandlerHookHandler } from 'fastify'
|
||||
|
||||
import { HTTP_API_KEY } from '@/constants'
|
||||
|
||||
const keyMidd = async (request, reply) => {
|
||||
export const keyMidd: preHandlerHookHandler = async (request, reply) => {
|
||||
const apiKey = request.headers['x-api-key']
|
||||
if (!apiKey || apiKey !== HTTP_API_KEY) {
|
||||
reply.statusCode = 401
|
||||
@ -10,5 +12,3 @@ const keyMidd = async (request, reply) => {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export default keyMidd
|
@ -1,10 +1,10 @@
|
||||
import type { preValidationHookHandler } from 'fastify'
|
||||
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
|
||||
const otherMidd = async (request, reply) => {
|
||||
export const otherMidd: preValidationHookHandler = async (request, reply) => {
|
||||
// Disable from the header, else it makes hacker's life easier to know more about our system
|
||||
reply.removeHeader('X-Powered-By')
|
||||
LogHelper.title('Requesting')
|
||||
LogHelper.info(`${request.method} ${request.url}`)
|
||||
}
|
||||
|
||||
export default otherMidd
|
@ -1,5 +1,14 @@
|
||||
import { TCP_SERVER_HOST, TCP_SERVER_PORT } from '@/constants'
|
||||
import { HOST, PORT, TCP_SERVER_HOST, TCP_SERVER_PORT } from '@/constants'
|
||||
import TCPClient from '@/core/tcp-client'
|
||||
import HTTPServer from '@/core/http-server/http-server'
|
||||
import SocketServer from '@/core/socket-server'
|
||||
import SpeechToText from '@/core/stt/stt'
|
||||
import TextToSpeech from '@/core/tts/tts'
|
||||
import AutomaticSpeechRecognition from '@/core/asr/asr'
|
||||
import NamedEntityRecognition from '@/core/nlp/nlu/ner'
|
||||
import ModelLoader from '@/core/nlp/nlu/model-loader'
|
||||
import NaturalLanguageUnderstanding from '@/core/nlp/nlu/nlu'
|
||||
import Brain from '@/core/brain/brain'
|
||||
|
||||
/**
|
||||
* Register core singletons
|
||||
@ -9,3 +18,21 @@ export const TCP_CLIENT = new TCPClient(
|
||||
String(TCP_SERVER_HOST),
|
||||
TCP_SERVER_PORT
|
||||
)
|
||||
|
||||
export const HTTP_SERVER = new HTTPServer(String(HOST), PORT)
|
||||
|
||||
export const SOCKET_SERVER = new SocketServer()
|
||||
|
||||
export const STT = new SpeechToText()
|
||||
|
||||
export const TTS = new TextToSpeech()
|
||||
|
||||
export const ASR = new AutomaticSpeechRecognition()
|
||||
|
||||
export const NER = new NamedEntityRecognition()
|
||||
|
||||
export const MODEL_LOADER = new ModelLoader()
|
||||
|
||||
export const NLU = new NaturalLanguageUnderstanding()
|
||||
|
||||
export const BRAIN = new Brain()
|
||||
|
@ -1,203 +0,0 @@
|
||||
/**
|
||||
* @nlpjs/core is dedicated to web (browsers)
|
||||
* @nlpjs/core-loader can make use of file system
|
||||
* https://github.com/axa-group/nlp.js/issues/766#issuecomment-750315909
|
||||
*/
|
||||
import fs from 'node:fs'
|
||||
|
||||
import { TCP_CLIENT } from '@/core'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { StringHelper } from '@/helpers/string-helper'
|
||||
|
||||
class Ner {
|
||||
constructor(ner) {
|
||||
this.ner = ner
|
||||
|
||||
LogHelper.title('NER')
|
||||
LogHelper.success('New instance')
|
||||
}
|
||||
|
||||
static logExtraction(entities) {
|
||||
LogHelper.title('NER')
|
||||
LogHelper.success('Entities found:')
|
||||
entities.forEach((ent) =>
|
||||
LogHelper.success(`{ value: ${ent.sourceText}, entity: ${ent.entity} }`)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Grab entities and match them with the utterance
|
||||
*/
|
||||
extractEntities(lang, utteranceSamplesFilePath, obj) {
|
||||
return new Promise(async (resolve) => {
|
||||
LogHelper.title('NER')
|
||||
LogHelper.info('Searching for entities...')
|
||||
|
||||
const { classification } = obj
|
||||
// Remove end-punctuation and add an end-whitespace
|
||||
const utterance = `${StringHelper.removeEndPunctuation(obj.utterance)} `
|
||||
const { actions } = JSON.parse(
|
||||
fs.readFileSync(utteranceSamplesFilePath, 'utf8')
|
||||
)
|
||||
const { action } = classification
|
||||
const promises = []
|
||||
const actionEntities = actions[action].entities || []
|
||||
|
||||
/**
|
||||
* Browse action entities
|
||||
* Dynamic injection of the action entities depending on the entity type
|
||||
*/
|
||||
for (let i = 0; i < actionEntities.length; i += 1) {
|
||||
const entity = actionEntities[i]
|
||||
|
||||
if (entity.type === 'regex') {
|
||||
promises.push(this.injectRegexEntity(lang, entity))
|
||||
} else if (entity.type === 'trim') {
|
||||
promises.push(this.injectTrimEntity(lang, entity))
|
||||
} else if (entity.type === 'enum') {
|
||||
promises.push(this.injectEnumEntity(lang, entity))
|
||||
}
|
||||
}
|
||||
|
||||
await Promise.all(promises)
|
||||
|
||||
const { entities } = await this.ner.process({
|
||||
locale: lang,
|
||||
text: utterance
|
||||
})
|
||||
|
||||
// Normalize entities
|
||||
entities.map((entity) => {
|
||||
// Trim whitespace at the beginning and the end of the entity value
|
||||
entity.sourceText = entity.sourceText.trim()
|
||||
entity.utteranceText = entity.utteranceText.trim()
|
||||
|
||||
// Add resolution property to stay consistent with all entities
|
||||
if (!entity.resolution) {
|
||||
entity.resolution = { value: entity.sourceText }
|
||||
}
|
||||
|
||||
return entity
|
||||
})
|
||||
|
||||
if (entities.length > 0) {
|
||||
Ner.logExtraction(entities)
|
||||
return resolve(entities)
|
||||
}
|
||||
|
||||
LogHelper.title('NER')
|
||||
LogHelper.info('No entity found')
|
||||
return resolve([])
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Get spaCy entities from the TCP server
|
||||
*/
|
||||
static getSpacyEntities(utterance) {
|
||||
return new Promise((resolve) => {
|
||||
const spacyEntitiesReceivedHandler = async ({ spacyEntities }) => {
|
||||
resolve(spacyEntities)
|
||||
}
|
||||
|
||||
TCP_CLIENT.ee.removeAllListeners()
|
||||
TCP_CLIENT.ee.on('spacy-entities-received', spacyEntitiesReceivedHandler)
|
||||
|
||||
TCP_CLIENT.emit('get-spacy-entities', utterance)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Inject trim type entities
|
||||
*/
|
||||
injectTrimEntity(lang, entity) {
|
||||
return new Promise((resolve) => {
|
||||
for (let j = 0; j < entity.conditions.length; j += 1) {
|
||||
const condition = entity.conditions[j]
|
||||
const conditionMethod = `add${StringHelper.snakeToPascalCase(
|
||||
condition.type
|
||||
)}Condition`
|
||||
|
||||
if (condition.type === 'between') {
|
||||
/**
|
||||
* Conditions: https://github.com/axa-group/nlp.js/blob/master/docs/v3/ner-manager.md#trim-named-entities
|
||||
* e.g. list.addBetweenCondition('en', 'list', 'create a', 'list')
|
||||
*/
|
||||
this.ner[conditionMethod](
|
||||
lang,
|
||||
entity.name,
|
||||
condition.from,
|
||||
condition.to
|
||||
)
|
||||
} else if (condition.type.indexOf('after') !== -1) {
|
||||
const rule = {
|
||||
type: 'afterLast',
|
||||
words: condition.from,
|
||||
options: {}
|
||||
}
|
||||
this.ner.addRule(lang, entity.name, 'trim', rule)
|
||||
this.ner[conditionMethod](lang, entity.name, condition.from)
|
||||
} else if (condition.type.indexOf('before') !== -1) {
|
||||
this.ner[conditionMethod](lang, entity.name, condition.to)
|
||||
}
|
||||
}
|
||||
|
||||
resolve()
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Inject regex type entities
|
||||
*/
|
||||
injectRegexEntity(lang, entity) {
|
||||
return new Promise((resolve) => {
|
||||
this.ner.addRegexRule(lang, entity.name, new RegExp(entity.regex, 'g'))
|
||||
|
||||
resolve()
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Inject enum type entities
|
||||
*/
|
||||
injectEnumEntity(lang, entity) {
|
||||
return new Promise((resolve) => {
|
||||
const { name: entityName, options } = entity
|
||||
const optionKeys = Object.keys(options)
|
||||
|
||||
optionKeys.forEach((optionName) => {
|
||||
const { synonyms } = options[optionName]
|
||||
|
||||
this.ner.addRuleOptionTexts(lang, entityName, optionName, synonyms)
|
||||
})
|
||||
|
||||
resolve()
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Microsoft builtin entities
|
||||
* https://github.com/axa-group/nlp.js/blob/master/packages/builtin-microsoft/src/builtin-microsoft.js
|
||||
*/
|
||||
static getMicrosoftBuiltinEntities() {
|
||||
return [
|
||||
'Number',
|
||||
'Ordinal',
|
||||
'Percentage',
|
||||
'Age',
|
||||
'Currency',
|
||||
'Dimension',
|
||||
'Temperature',
|
||||
'DateTime',
|
||||
'PhoneNumber',
|
||||
'IpAddress',
|
||||
// Disable booleans to handle it ourselves
|
||||
// 'Boolean',
|
||||
'Email',
|
||||
'Hashtag',
|
||||
'URL'
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
export default Ner
|
@ -1,60 +1,94 @@
|
||||
import fs from 'node:fs'
|
||||
|
||||
import type { ShortLanguageCode } from '@/types'
|
||||
import type {
|
||||
NEREntity,
|
||||
NLPAction,
|
||||
NLPDomain,
|
||||
NLPUtterance,
|
||||
NLUSlot,
|
||||
NLUSlots
|
||||
} from '@/core/nlp/types'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { SkillDomainHelper } from '@/helpers/skill-domain-helper'
|
||||
|
||||
const maxContextHistory = 5
|
||||
const defaultActiveContext = {
|
||||
interface ConversationContext {
|
||||
name: string | null
|
||||
domain: NLPDomain
|
||||
intent: string
|
||||
currentEntities: NEREntity[]
|
||||
entities: NEREntity[]
|
||||
slots: NLUSlots
|
||||
isInActionLoop: boolean
|
||||
nextAction: NLPAction | null
|
||||
originalUtterance: NLPUtterance | null
|
||||
activatedAt: number
|
||||
skillConfigPath: string
|
||||
actionName: NLPAction
|
||||
lang: ShortLanguageCode
|
||||
}
|
||||
|
||||
type ConversationPreviousContext = Record<string, ConversationContext> | null
|
||||
|
||||
const MAX_CONTEXT_HISTORY = 5
|
||||
export const DEFAULT_ACTIVE_CONTEXT = {
|
||||
name: null,
|
||||
domain: null,
|
||||
intent: null,
|
||||
domain: '',
|
||||
intent: '',
|
||||
currentEntities: [],
|
||||
entities: [],
|
||||
slots: {},
|
||||
isInActionLoop: false,
|
||||
nextAction: null,
|
||||
originalUtterance: null,
|
||||
activatedAt: 0
|
||||
activatedAt: 0,
|
||||
skillConfigPath: '',
|
||||
actionName: '',
|
||||
lang: 'en'
|
||||
}
|
||||
|
||||
class Conversation {
|
||||
export default class Conversation {
|
||||
// Identify conversations to allow more features in the future (multiple speakers, etc.)
|
||||
public id: string
|
||||
private _previousContexts: ConversationPreviousContext = {}
|
||||
private _activeContext: ConversationContext = DEFAULT_ACTIVE_CONTEXT
|
||||
|
||||
constructor(id = 'conv0') {
|
||||
// Identify conversations to allow more features in the future (multiple speakers, etc.)
|
||||
this._id = id
|
||||
this._activeContext = defaultActiveContext
|
||||
this._previousContexts = {}
|
||||
this.id = id
|
||||
|
||||
LogHelper.title('Conversation')
|
||||
LogHelper.success('New instance')
|
||||
}
|
||||
|
||||
get id() {
|
||||
return this._id
|
||||
}
|
||||
|
||||
get activeContext() {
|
||||
public get activeContext(): ConversationContext {
|
||||
return this._activeContext
|
||||
}
|
||||
|
||||
/**
|
||||
* Activate context according to the triggered action
|
||||
*/
|
||||
set activeContext(contextObj) {
|
||||
public async setActiveContext(
|
||||
nluContext: ConversationContext
|
||||
): Promise<void> {
|
||||
const {
|
||||
slots,
|
||||
isInActionLoop,
|
||||
configDataFilePath,
|
||||
skillConfigPath,
|
||||
actionName,
|
||||
lang,
|
||||
domain,
|
||||
intent,
|
||||
entities
|
||||
} = contextObj
|
||||
} = nluContext
|
||||
const slotKeys = Object.keys(slots)
|
||||
const [skillName] = intent.split('.')
|
||||
const newContextName = `${domain}.${skillName}`
|
||||
const { actions } = JSON.parse(fs.readFileSync(configDataFilePath, 'utf8'))
|
||||
const { actions } = await SkillDomainHelper.getSkillConfig(
|
||||
skillConfigPath,
|
||||
lang
|
||||
)
|
||||
// Grab next action from the NLU data file
|
||||
const { next_action: nextAction } = actions[actionName]
|
||||
const { next_action: nextAction } = actions[actionName] as {
|
||||
next_action: string
|
||||
}
|
||||
|
||||
// If slots are required to trigger next actions, then go through the context activation
|
||||
if (slotKeys.length > 0) {
|
||||
@ -66,6 +100,7 @@ class Conversation {
|
||||
this.pushToPreviousContextsStack()
|
||||
// Activate new context
|
||||
this._activeContext = {
|
||||
...DEFAULT_ACTIVE_CONTEXT,
|
||||
name: newContextName,
|
||||
domain,
|
||||
intent,
|
||||
@ -74,7 +109,7 @@ class Conversation {
|
||||
slots: {},
|
||||
isInActionLoop,
|
||||
nextAction,
|
||||
originalUtterance: contextObj.originalUtterance,
|
||||
originalUtterance: nluContext.originalUtterance,
|
||||
activatedAt: Date.now()
|
||||
}
|
||||
|
||||
@ -101,6 +136,7 @@ class Conversation {
|
||||
if (this._activeContext.name !== newContextName) {
|
||||
// Activate new context
|
||||
this._activeContext = {
|
||||
...DEFAULT_ACTIVE_CONTEXT,
|
||||
name: newContextName,
|
||||
domain,
|
||||
intent,
|
||||
@ -109,7 +145,7 @@ class Conversation {
|
||||
slots: {},
|
||||
isInActionLoop,
|
||||
nextAction,
|
||||
originalUtterance: contextObj.originalUtterance,
|
||||
originalUtterance: nluContext.originalUtterance,
|
||||
activatedAt: Date.now()
|
||||
}
|
||||
|
||||
@ -123,26 +159,30 @@ class Conversation {
|
||||
}
|
||||
}
|
||||
|
||||
get previousContexts() {
|
||||
public get previousContexts(): ConversationPreviousContext {
|
||||
return this._previousContexts
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether there is an active context
|
||||
*/
|
||||
hasActiveContext() {
|
||||
public hasActiveContext(): boolean {
|
||||
return !!this._activeContext.name
|
||||
}
|
||||
|
||||
/**
|
||||
* Set slots in active context
|
||||
*/
|
||||
setSlots(lang, entities, slots = this._activeContext.slots) {
|
||||
public setSlots(
|
||||
lang: ShortLanguageCode,
|
||||
entities: NEREntity[],
|
||||
slots = this._activeContext.slots
|
||||
): void {
|
||||
const slotKeys = Object.keys(slots)
|
||||
|
||||
for (let i = 0; i < slotKeys.length; i += 1) {
|
||||
const key = slotKeys[i]
|
||||
const slotObj = slots[key]
|
||||
const key = slotKeys[i] as string
|
||||
const slotObj = slots[key] as NLUSlot
|
||||
const isFirstSet = key.includes('#')
|
||||
let slotName = slotObj.name
|
||||
let slotEntity = slotObj.expectedEntity
|
||||
@ -150,16 +190,17 @@ class Conversation {
|
||||
|
||||
// If it's the first slot setting grabbed from the model or not
|
||||
if (isFirstSet) {
|
||||
;[slotName, slotEntity] = key.split('#')
|
||||
questions = slotObj.locales[lang]
|
||||
;[slotName, slotEntity] = key.split('#') as [string, string]
|
||||
questions = slotObj.locales?.[lang] as string[]
|
||||
}
|
||||
|
||||
// Match the slot with the submitted entity and ensure the slot hasn't been filled yet
|
||||
const [foundEntity] = entities.filter(
|
||||
({ entity }) => entity === slotEntity && !slotObj.isFilled
|
||||
)
|
||||
const pickedQuestion =
|
||||
questions[Math.floor(Math.random() * questions.length)]
|
||||
const pickedQuestion = questions[
|
||||
Math.floor(Math.random() * questions.length)
|
||||
] as string
|
||||
const slot = this._activeContext.slots[slotName]
|
||||
const newSlot = {
|
||||
name: slotName,
|
||||
@ -181,6 +222,8 @@ class Conversation {
|
||||
!slot.isFilled ||
|
||||
(slot.isFilled &&
|
||||
newSlot.isFilled &&
|
||||
'value' in slot.value.resolution &&
|
||||
'value' in newSlot.value.resolution &&
|
||||
slot.value.resolution.value !== newSlot.value.resolution.value)
|
||||
) {
|
||||
if (newSlot?.isFilled) {
|
||||
@ -200,48 +243,54 @@ class Conversation {
|
||||
/**
|
||||
* Get the not yet filled slot if there is any
|
||||
*/
|
||||
getNotFilledSlot() {
|
||||
public getNotFilledSlot(): NLUSlot | null {
|
||||
const slotsKeys = Object.keys(this._activeContext.slots)
|
||||
const [notFilledSlotKey] = slotsKeys.filter(
|
||||
(slotKey) => !this._activeContext.slots[slotKey].isFilled
|
||||
(slotKey) => !this._activeContext.slots[slotKey]?.isFilled
|
||||
)
|
||||
|
||||
return this._activeContext.slots[notFilledSlotKey]
|
||||
if (notFilledSlotKey !== undefined) {
|
||||
return this._activeContext.slots[notFilledSlotKey] as NLUSlot
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether slots are all filled
|
||||
*/
|
||||
areSlotsAllFilled() {
|
||||
public areSlotsAllFilled(): boolean {
|
||||
return !this.getNotFilledSlot()
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up active context
|
||||
*/
|
||||
cleanActiveContext() {
|
||||
public cleanActiveContext(): void {
|
||||
LogHelper.title('Conversation')
|
||||
LogHelper.info('Clean active context')
|
||||
|
||||
this.pushToPreviousContextsStack()
|
||||
this._activeContext = defaultActiveContext
|
||||
this._activeContext = DEFAULT_ACTIVE_CONTEXT
|
||||
}
|
||||
|
||||
/**
|
||||
* Push active context to the previous contexts stack
|
||||
*/
|
||||
pushToPreviousContextsStack() {
|
||||
const previousContextsKeys = Object.keys(this._previousContexts)
|
||||
private pushToPreviousContextsStack(): void {
|
||||
if (this._previousContexts) {
|
||||
const previousContextsKeys = Object.keys(this._previousContexts)
|
||||
|
||||
// Remove the oldest context from the history stack if it reaches the maximum limit
|
||||
if (previousContextsKeys.length >= maxContextHistory) {
|
||||
delete this._previousContexts[previousContextsKeys[0]]
|
||||
}
|
||||
// Remove the oldest context from the history stack if it reaches the maximum limit
|
||||
if (previousContextsKeys.length >= MAX_CONTEXT_HISTORY) {
|
||||
delete this._previousContexts[previousContextsKeys[0] as string]
|
||||
}
|
||||
|
||||
if (this._activeContext.name) {
|
||||
this._previousContexts[this._activeContext.name] = this._activeContext
|
||||
if (this._activeContext.name) {
|
||||
this._previousContexts[this._activeContext.name] = this._activeContext
|
||||
}
|
||||
} else {
|
||||
LogHelper.warning('No previous context found')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default Conversation
|
178
server/src/core/nlp/nlu/action-loop.ts
Normal file
178
server/src/core/nlp/nlu/action-loop.ts
Normal file
@ -0,0 +1,178 @@
|
||||
import fs from 'node:fs'
|
||||
import { join } from 'node:path'
|
||||
|
||||
import type { NLPUtterance } from '@/core/nlp/types'
|
||||
import type { BrainProcessResult } from '@/core/brain/types'
|
||||
import { BRAIN, MODEL_LOADER, NER, NLU } from '@/core'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { SkillDomainHelper } from '@/helpers/skill-domain-helper'
|
||||
import { DEFAULT_NLU_RESULT } from '@/core/nlp/nlu/nlu'
|
||||
|
||||
interface ResolveResolversResult {
|
||||
name: string
|
||||
value: string
|
||||
}
|
||||
|
||||
export class ActionLoop {
|
||||
/**
|
||||
* Handle action loop logic before NLU processing
|
||||
*/
|
||||
public static async handle(
|
||||
utterance: NLPUtterance
|
||||
): Promise<Partial<BrainProcessResult> | null> {
|
||||
const { domain, intent } = NLU.conversation.activeContext
|
||||
const [skillName, actionName] = intent.split('.') as [string, string]
|
||||
const skillConfigPath = join(
|
||||
process.cwd(),
|
||||
'skills',
|
||||
domain,
|
||||
skillName,
|
||||
'config',
|
||||
BRAIN.lang + '.json'
|
||||
)
|
||||
NLU.nluResult = {
|
||||
...DEFAULT_NLU_RESULT, // Reset entities, slots, etc.
|
||||
slots: NLU.conversation.activeContext.slots,
|
||||
utterance,
|
||||
skillConfigPath,
|
||||
classification: {
|
||||
domain,
|
||||
skill: skillName,
|
||||
action: actionName,
|
||||
confidence: 1
|
||||
}
|
||||
}
|
||||
NLU.nluResult.entities = await NER.extractEntities(
|
||||
BRAIN.lang,
|
||||
skillConfigPath,
|
||||
NLU.nluResult
|
||||
)
|
||||
|
||||
const { actions, resolvers } = await SkillDomainHelper.getSkillConfig(
|
||||
skillConfigPath,
|
||||
BRAIN.lang
|
||||
)
|
||||
const action = actions[NLU.nluResult.classification.action]
|
||||
if (action?.loop) {
|
||||
const { name: expectedItemName, type: expectedItemType } =
|
||||
action.loop.expected_item
|
||||
let hasMatchingEntity = false
|
||||
let hasMatchingResolver = false
|
||||
|
||||
if (expectedItemType === 'entity') {
|
||||
hasMatchingEntity =
|
||||
NLU.nluResult.entities.filter(
|
||||
({ entity }) => expectedItemName === entity
|
||||
).length > 0
|
||||
} else if (expectedItemType.indexOf('resolver') !== -1) {
|
||||
const nlpObjs = {
|
||||
global_resolver: MODEL_LOADER.globalResolversNLPContainer,
|
||||
skill_resolver: MODEL_LOADER.skillsResolversNLPContainer
|
||||
}
|
||||
const result = await nlpObjs[expectedItemType].process(utterance)
|
||||
const { intent } = result
|
||||
|
||||
const resolveResolvers = async (
|
||||
resolver: string,
|
||||
intent: string
|
||||
): Promise<[ResolveResolversResult]> => {
|
||||
const resolversPath = join(
|
||||
process.cwd(),
|
||||
'core',
|
||||
'data',
|
||||
BRAIN.lang,
|
||||
'global-resolvers'
|
||||
)
|
||||
// Load the skill resolver or the global resolver
|
||||
const resolvedIntents = !intent.includes('resolver.global')
|
||||
? resolvers && resolvers[resolver]
|
||||
: JSON.parse(
|
||||
await fs.promises.readFile(
|
||||
join(resolversPath, `${resolver}.json`),
|
||||
'utf8'
|
||||
)
|
||||
)
|
||||
|
||||
// E.g. resolver.global.denial -> denial
|
||||
intent = intent.substring(intent.lastIndexOf('.') + 1)
|
||||
|
||||
return [
|
||||
{
|
||||
name: expectedItemName,
|
||||
value: resolvedIntents.intents[intent].value
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
// Resolve resolver if global resolver or skill resolver has been found
|
||||
if (
|
||||
intent &&
|
||||
(intent.includes('resolver.global') ||
|
||||
intent.includes(`resolver.${skillName}`))
|
||||
) {
|
||||
LogHelper.title('NLU')
|
||||
LogHelper.success('Resolvers resolved:')
|
||||
NLU.nluResult.resolvers = await resolveResolvers(
|
||||
expectedItemName,
|
||||
intent
|
||||
)
|
||||
NLU.nluResult.resolvers.forEach((resolver) =>
|
||||
LogHelper.success(`${intent}: ${JSON.stringify(resolver)}`)
|
||||
)
|
||||
hasMatchingResolver = NLU.nluResult.resolvers.length > 0
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure expected items are in the utterance, otherwise clean context and reprocess
|
||||
if (!hasMatchingEntity && !hasMatchingResolver) {
|
||||
BRAIN.talk(`${BRAIN.wernicke('random_context_out_of_topic')}.`)
|
||||
NLU.conversation.cleanActiveContext()
|
||||
await NLU.process(utterance)
|
||||
return null
|
||||
}
|
||||
|
||||
try {
|
||||
const processedData = await BRAIN.execute(NLU.nluResult)
|
||||
// Reprocess with the original utterance that triggered the context at first
|
||||
if (processedData.core?.restart === true) {
|
||||
const { originalUtterance } = NLU.conversation.activeContext
|
||||
|
||||
NLU.conversation.cleanActiveContext()
|
||||
|
||||
if (originalUtterance !== null) {
|
||||
await NLU.process(originalUtterance)
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* In case there is no next action to prepare anymore
|
||||
* and there is an explicit stop of the loop from the skill
|
||||
*/
|
||||
if (
|
||||
!processedData.action?.next_action &&
|
||||
processedData.core?.isInActionLoop === false
|
||||
) {
|
||||
NLU.conversation.cleanActiveContext()
|
||||
return null
|
||||
}
|
||||
|
||||
// Break the action loop and prepare for the next action if necessary
|
||||
if (processedData.core?.isInActionLoop === false) {
|
||||
NLU.conversation.activeContext.isInActionLoop =
|
||||
!!processedData.action?.loop
|
||||
NLU.conversation.activeContext.actionName = processedData.action
|
||||
?.next_action as string
|
||||
NLU.conversation.activeContext.intent = `${processedData.classification?.skill}.${processedData.action?.next_action}`
|
||||
}
|
||||
|
||||
return processedData
|
||||
} catch (e) {
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
}
|
185
server/src/core/nlp/nlu/model-loader.ts
Normal file
185
server/src/core/nlp/nlu/model-loader.ts
Normal file
@ -0,0 +1,185 @@
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
|
||||
import { containerBootstrap } from '@nlpjs/core-loader'
|
||||
import { Nlp } from '@nlpjs/nlp'
|
||||
import { BuiltinMicrosoft } from '@nlpjs/builtin-microsoft'
|
||||
import { LangAll } from '@nlpjs/lang-all'
|
||||
|
||||
import { MODELS_PATH } from '@/constants'
|
||||
import { NER } from '@/core'
|
||||
import { MICROSOFT_BUILT_IN_ENTITIES } from '@/core/nlp/nlu/ner'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
type NLPContainer = undefined | any
|
||||
|
||||
export default class ModelLoader {
|
||||
private static instance: ModelLoader
|
||||
public mainNLPContainer: NLPContainer
|
||||
public globalResolversNLPContainer: NLPContainer
|
||||
public skillsResolversNLPContainer: NLPContainer
|
||||
|
||||
constructor() {
|
||||
if (!ModelLoader.instance) {
|
||||
LogHelper.title('Model Loader')
|
||||
LogHelper.success('New instance')
|
||||
|
||||
ModelLoader.instance = this
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if NLP models exists
|
||||
*/
|
||||
public hasNlpModels(): boolean {
|
||||
return (
|
||||
!!this.globalResolversNLPContainer &&
|
||||
!!this.skillsResolversNLPContainer &&
|
||||
!!this.mainNLPContainer
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Load all NLP models at once
|
||||
*/
|
||||
public loadNLPModels(): Promise<[void, void, void]> {
|
||||
return Promise.all([
|
||||
this.loadGlobalResolversModel(
|
||||
path.join(MODELS_PATH, 'leon-global-resolvers-model.nlp')
|
||||
),
|
||||
this.loadSkillsResolversModel(
|
||||
path.join(MODELS_PATH, 'leon-skills-resolvers-model.nlp')
|
||||
),
|
||||
this.loadMainModel(path.join(MODELS_PATH, 'leon-main-model.nlp'))
|
||||
])
|
||||
}
|
||||
|
||||
/**
|
||||
* Load the global resolvers NLP model from the latest training
|
||||
*/
|
||||
private loadGlobalResolversModel(modelPath: string): Promise<void> {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
if (!fs.existsSync(modelPath)) {
|
||||
LogHelper.title('Model Loader')
|
||||
|
||||
reject(
|
||||
new Error(
|
||||
'The global resolvers NLP model does not exist, please run: npm run train'
|
||||
)
|
||||
)
|
||||
} else {
|
||||
LogHelper.title('Model Loader')
|
||||
|
||||
try {
|
||||
const container = await containerBootstrap()
|
||||
|
||||
container.use(Nlp)
|
||||
container.use(LangAll)
|
||||
|
||||
this.globalResolversNLPContainer = container.get('nlp')
|
||||
const nluManager = container.get('nlu-manager')
|
||||
nluManager.settings.spellCheck = true
|
||||
|
||||
await this.globalResolversNLPContainer.load(modelPath)
|
||||
LogHelper.success('Global resolvers NLP model loaded')
|
||||
|
||||
resolve()
|
||||
} catch (e) {
|
||||
reject(
|
||||
new Error(
|
||||
'An error occurred while loading the global resolvers NLP model'
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Load the skills resolvers NLP model from the latest training
|
||||
*/
|
||||
private loadSkillsResolversModel(modelPath: string): Promise<void> {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
if (!fs.existsSync(modelPath)) {
|
||||
LogHelper.title('Model Loader')
|
||||
|
||||
reject({
|
||||
type: 'warning',
|
||||
obj: new Error(
|
||||
'The skills resolvers NLP model does not exist, please run: npm run train'
|
||||
)
|
||||
})
|
||||
} else {
|
||||
try {
|
||||
const container = await containerBootstrap()
|
||||
|
||||
container.use(Nlp)
|
||||
container.use(LangAll)
|
||||
|
||||
this.skillsResolversNLPContainer = container.get('nlp')
|
||||
const nluManager = container.get('nlu-manager')
|
||||
nluManager.settings.spellCheck = true
|
||||
|
||||
await this.skillsResolversNLPContainer.load(modelPath)
|
||||
LogHelper.success('Skills resolvers NLP model loaded')
|
||||
|
||||
resolve()
|
||||
} catch (e) {
|
||||
reject(
|
||||
new Error(
|
||||
'An error occurred while loading the skills resolvers NLP model'
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Load the main NLP model from the latest training
|
||||
*/
|
||||
private loadMainModel(modelPath: string): Promise<void> {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
if (!fs.existsSync(modelPath)) {
|
||||
LogHelper.title('Model Loader')
|
||||
|
||||
reject({
|
||||
type: 'warning',
|
||||
obj: new Error(
|
||||
'The main NLP model does not exist, please run: npm run train'
|
||||
)
|
||||
})
|
||||
} else {
|
||||
try {
|
||||
const container = await containerBootstrap()
|
||||
|
||||
container.register(
|
||||
'extract-builtin-??',
|
||||
new BuiltinMicrosoft({
|
||||
builtins: MICROSOFT_BUILT_IN_ENTITIES
|
||||
}),
|
||||
true
|
||||
)
|
||||
container.use(Nlp)
|
||||
container.use(LangAll)
|
||||
|
||||
this.mainNLPContainer = container.get('nlp')
|
||||
const nluManager = container.get('nlu-manager')
|
||||
nluManager.settings.spellCheck = true
|
||||
|
||||
await this.mainNLPContainer.load(modelPath)
|
||||
LogHelper.success('Main NLP model loaded')
|
||||
|
||||
NER.manager = this.mainNLPContainer.ner
|
||||
|
||||
resolve()
|
||||
} catch (e) {
|
||||
reject(
|
||||
new Error('An error occurred while loading the main NLP model')
|
||||
)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
263
server/src/core/nlp/nlu/ner.ts
Normal file
263
server/src/core/nlp/nlu/ner.ts
Normal file
@ -0,0 +1,263 @@
|
||||
import type { ShortLanguageCode } from '@/types'
|
||||
import type {
|
||||
NEREntity,
|
||||
NERSpacyEntity,
|
||||
NLPUtterance,
|
||||
NLUResult
|
||||
} from '@/core/nlp/types'
|
||||
import type {
|
||||
SkillCustomEnumEntityTypeSchema,
|
||||
SkillCustomRegexEntityTypeSchema,
|
||||
SkillCustomTrimEntityTypeSchema
|
||||
} from '@/schemas/skill-schemas'
|
||||
import { BRAIN, MODEL_LOADER, TCP_CLIENT } from '@/core'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { StringHelper } from '@/helpers/string-helper'
|
||||
import { SkillDomainHelper } from '@/helpers/skill-domain-helper'
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
type NERManager = undefined | any
|
||||
|
||||
// https://github.com/axa-group/nlp.js/blob/master/packages/builtin-microsoft/src/builtin-microsoft.js
|
||||
export const MICROSOFT_BUILT_IN_ENTITIES = [
|
||||
'Number',
|
||||
'Ordinal',
|
||||
'Percentage',
|
||||
'Age',
|
||||
'Currency',
|
||||
'Dimension',
|
||||
'Temperature',
|
||||
'DateTime',
|
||||
'PhoneNumber',
|
||||
'IpAddress',
|
||||
// Disable booleans to handle it ourselves
|
||||
// 'Boolean',
|
||||
'Email',
|
||||
'Hashtag',
|
||||
'URL'
|
||||
]
|
||||
|
||||
export default class NER {
|
||||
private static instance: NER
|
||||
public manager: NERManager
|
||||
|
||||
constructor() {
|
||||
if (!NER.instance) {
|
||||
LogHelper.title('NER')
|
||||
LogHelper.success('New instance')
|
||||
|
||||
NER.instance = this
|
||||
}
|
||||
}
|
||||
|
||||
private static logExtraction(entities: NEREntity[]): void {
|
||||
LogHelper.title('NER')
|
||||
LogHelper.success('Entities found:')
|
||||
|
||||
entities.forEach((entity) =>
|
||||
LogHelper.success(
|
||||
`{ value: ${entity.sourceText}, entity: ${entity.entity} }`
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Grab entities and match them with the utterance
|
||||
*/
|
||||
public extractEntities(
|
||||
lang: ShortLanguageCode,
|
||||
skillConfigPath: string,
|
||||
nluResult: NLUResult
|
||||
): Promise<NEREntity[]> {
|
||||
return new Promise(async (resolve) => {
|
||||
LogHelper.title('NER')
|
||||
LogHelper.info('Looking for entities...')
|
||||
|
||||
const { classification } = nluResult
|
||||
// Remove end-punctuation and add an end-whitespace
|
||||
const utterance = `${StringHelper.removeEndPunctuation(
|
||||
nluResult.utterance
|
||||
)} `
|
||||
const { actions } = await SkillDomainHelper.getSkillConfig(
|
||||
skillConfigPath,
|
||||
lang
|
||||
)
|
||||
const { action } = classification
|
||||
const promises = []
|
||||
const actionEntities = actions[action]?.entities || []
|
||||
|
||||
/**
|
||||
* Browse action entities
|
||||
* Dynamic injection of the action entities depending on the entity type
|
||||
*/
|
||||
for (let i = 0; i < actionEntities.length; i += 1) {
|
||||
const entity = actionEntities[i]
|
||||
|
||||
if (entity?.type === 'regex') {
|
||||
promises.push(this.injectRegexEntity(lang, entity))
|
||||
} else if (entity?.type === 'trim') {
|
||||
promises.push(this.injectTrimEntity(lang, entity))
|
||||
} else if (entity?.type === 'enum') {
|
||||
promises.push(this.injectEnumEntity(lang, entity))
|
||||
}
|
||||
}
|
||||
|
||||
await Promise.all(promises)
|
||||
|
||||
const { entities }: { entities: NEREntity[] } =
|
||||
await this.manager.process({
|
||||
locale: lang,
|
||||
text: utterance
|
||||
})
|
||||
|
||||
// Normalize entities
|
||||
entities.map((entity) => {
|
||||
// Trim whitespace at the beginning and the end of the entity value
|
||||
entity.sourceText = entity.sourceText.trim()
|
||||
entity.utteranceText = entity.utteranceText.trim()
|
||||
|
||||
// Add resolution property to stay consistent with all entities
|
||||
if (!entity.resolution) {
|
||||
entity.resolution = { value: entity.sourceText }
|
||||
}
|
||||
|
||||
return entity
|
||||
})
|
||||
|
||||
if (entities.length > 0) {
|
||||
NER.logExtraction(entities)
|
||||
return resolve(entities)
|
||||
}
|
||||
|
||||
LogHelper.title('NER')
|
||||
LogHelper.info('No entity found')
|
||||
return resolve([])
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge spaCy entities with the NER instance
|
||||
*/
|
||||
public async mergeSpacyEntities(utterance: NLPUtterance): Promise<void> {
|
||||
const spacyEntities = await this.getSpacyEntities(utterance)
|
||||
|
||||
if (spacyEntities.length > 0) {
|
||||
spacyEntities.forEach(({ entity, resolution }) => {
|
||||
const spacyEntity = {
|
||||
[entity]: {
|
||||
options: {
|
||||
[resolution.value]: [StringHelper.ucFirst(resolution.value)]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MODEL_LOADER.mainNLPContainer.addEntities(spacyEntity, BRAIN.lang)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get spaCy entities from the TCP server
|
||||
*/
|
||||
private getSpacyEntities(utterance: NLPUtterance): Promise<NERSpacyEntity[]> {
|
||||
return new Promise((resolve) => {
|
||||
const spacyEntitiesReceivedHandler = async ({
|
||||
spacyEntities
|
||||
}: {
|
||||
spacyEntities: NERSpacyEntity[]
|
||||
}): Promise<void> => {
|
||||
resolve(spacyEntities)
|
||||
}
|
||||
|
||||
TCP_CLIENT.ee.removeAllListeners()
|
||||
TCP_CLIENT.ee.on('spacy-entities-received', spacyEntitiesReceivedHandler)
|
||||
|
||||
TCP_CLIENT.emit('get-spacy-entities', utterance)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Inject trim type entities
|
||||
*/
|
||||
private injectTrimEntity(
|
||||
lang: ShortLanguageCode,
|
||||
entityConfig: SkillCustomTrimEntityTypeSchema
|
||||
): Promise<void> {
|
||||
return new Promise((resolve) => {
|
||||
for (let j = 0; j < entityConfig.conditions.length; j += 1) {
|
||||
const condition = entityConfig.conditions[j]
|
||||
const conditionMethod = `add${StringHelper.snakeToPascalCase(
|
||||
condition?.type || ''
|
||||
)}Condition`
|
||||
|
||||
if (condition?.type === 'between') {
|
||||
/**
|
||||
* Conditions: https://github.com/axa-group/nlp.js/blob/master/docs/v3/ner-manager.md#trim-named-entities
|
||||
* e.g. list.addBetweenCondition('en', 'list', 'create a', 'list')
|
||||
*/
|
||||
this.manager[conditionMethod](
|
||||
lang,
|
||||
entityConfig.name,
|
||||
condition?.from,
|
||||
condition?.to
|
||||
)
|
||||
} else if (condition?.type.indexOf('after') !== -1) {
|
||||
const rule = {
|
||||
type: 'afterLast',
|
||||
words: condition?.from,
|
||||
options: {}
|
||||
}
|
||||
this.manager.addRule(lang, entityConfig.name, 'trim', rule)
|
||||
this.manager[conditionMethod](
|
||||
lang,
|
||||
entityConfig.name,
|
||||
condition?.from
|
||||
)
|
||||
} else if (condition.type.indexOf('before') !== -1) {
|
||||
this.manager[conditionMethod](lang, entityConfig.name, condition.to)
|
||||
}
|
||||
}
|
||||
|
||||
resolve()
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Inject regex type entities
|
||||
*/
|
||||
private injectRegexEntity(
|
||||
lang: ShortLanguageCode,
|
||||
entityConfig: SkillCustomRegexEntityTypeSchema
|
||||
): Promise<void> {
|
||||
return new Promise((resolve) => {
|
||||
this.manager.addRegexRule(
|
||||
lang,
|
||||
entityConfig.name,
|
||||
new RegExp(entityConfig.regex, 'g')
|
||||
)
|
||||
|
||||
resolve()
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Inject enum type entities
|
||||
*/
|
||||
private injectEnumEntity(
|
||||
lang: ShortLanguageCode,
|
||||
entityConfig: SkillCustomEnumEntityTypeSchema
|
||||
): Promise<void> {
|
||||
return new Promise((resolve) => {
|
||||
const { name: entityName, options } = entityConfig
|
||||
const optionKeys = Object.keys(options)
|
||||
|
||||
optionKeys.forEach((optionName) => {
|
||||
const { synonyms } = options[optionName] as { synonyms: string[] }
|
||||
|
||||
this.manager.addRuleOptionTexts(lang, entityName, optionName, synonyms)
|
||||
})
|
||||
|
||||
resolve()
|
||||
})
|
||||
}
|
||||
}
|
353
server/src/core/nlp/nlu/nlu.ts
Normal file
353
server/src/core/nlp/nlu/nlu.ts
Normal file
@ -0,0 +1,353 @@
|
||||
import { join } from 'node:path'
|
||||
import { spawn } from 'node:child_process'
|
||||
|
||||
import kill from 'tree-kill'
|
||||
|
||||
import type { Language, ShortLanguageCode } from '@/types'
|
||||
import type {
|
||||
NLPAction,
|
||||
NLPDomain,
|
||||
NLPJSProcessResult,
|
||||
NLPSkill,
|
||||
NLPUtterance,
|
||||
NLUResult
|
||||
} from '@/core/nlp/types'
|
||||
import type { BrainProcessResult } from '@/core/brain/types'
|
||||
import { langs } from '@@/core/langs.json'
|
||||
import { TCP_SERVER_BIN_PATH } from '@/constants'
|
||||
import { TCP_CLIENT, BRAIN, SOCKET_SERVER, MODEL_LOADER, NER } from '@/core'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { LangHelper } from '@/helpers/lang-helper'
|
||||
import { ActionLoop } from '@/core/nlp/nlu/action-loop'
|
||||
import { SlotFilling } from '@/core/nlp/nlu/slot-filling'
|
||||
import Conversation, { DEFAULT_ACTIVE_CONTEXT } from '@/core/nlp/conversation'
|
||||
|
||||
type NLUProcessResult = Promise<Partial<
|
||||
BrainProcessResult & {
|
||||
processingTime: number
|
||||
nluProcessingTime: number
|
||||
}
|
||||
> | null>
|
||||
|
||||
export const DEFAULT_NLU_RESULT = {
|
||||
utterance: '',
|
||||
currentEntities: [],
|
||||
entities: [],
|
||||
currentResolvers: [],
|
||||
resolvers: [],
|
||||
slots: {},
|
||||
skillConfigPath: '',
|
||||
answers: [], // For dialog action type
|
||||
classification: {
|
||||
domain: '',
|
||||
skill: '',
|
||||
action: '',
|
||||
confidence: 0
|
||||
}
|
||||
}
|
||||
|
||||
export default class NLU {
|
||||
private static instance: NLU
|
||||
public nluResult: NLUResult = DEFAULT_NLU_RESULT
|
||||
public conversation = new Conversation('conv0')
|
||||
|
||||
constructor() {
|
||||
if (!NLU.instance) {
|
||||
LogHelper.title('NLU')
|
||||
LogHelper.success('New instance')
|
||||
|
||||
NLU.instance = this
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set new language; recreate a new TCP server with new language; and reprocess understanding
|
||||
*/
|
||||
private switchLanguage(
|
||||
utterance: NLPUtterance,
|
||||
locale: ShortLanguageCode
|
||||
): void {
|
||||
const connectedHandler = async (): Promise<void> => {
|
||||
await this.process(utterance)
|
||||
}
|
||||
|
||||
BRAIN.lang = locale
|
||||
BRAIN.talk(`${BRAIN.wernicke('random_language_switch')}.`, true)
|
||||
|
||||
// Recreate a new TCP server process and reconnect the TCP client
|
||||
kill(global.tcpServerProcess.pid as number, () => {
|
||||
global.tcpServerProcess = spawn(`${TCP_SERVER_BIN_PATH} ${locale}`, {
|
||||
shell: true
|
||||
})
|
||||
|
||||
TCP_CLIENT.connect()
|
||||
TCP_CLIENT.ee.removeListener('connected', connectedHandler)
|
||||
TCP_CLIENT.ee.on('connected', connectedHandler)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Classify the utterance,
|
||||
* pick-up the right classification
|
||||
* and extract entities
|
||||
*/
|
||||
public process(utterance: NLPUtterance): NLUProcessResult {
|
||||
const processingTimeStart = Date.now()
|
||||
|
||||
return new Promise(async (resolve, reject) => {
|
||||
LogHelper.title('NLU')
|
||||
LogHelper.info('Processing...')
|
||||
|
||||
if (!MODEL_LOADER.hasNlpModels()) {
|
||||
if (!BRAIN.isMuted) {
|
||||
BRAIN.talk(`${BRAIN.wernicke('random_errors')}!`)
|
||||
SOCKET_SERVER.socket?.emit('is-typing', false)
|
||||
}
|
||||
|
||||
const msg =
|
||||
'An NLP model is missing, please rebuild the project or if you are in dev run: npm run train'
|
||||
LogHelper.error(msg)
|
||||
return reject(msg)
|
||||
}
|
||||
|
||||
// Add spaCy entities
|
||||
await NER.mergeSpacyEntities(utterance)
|
||||
|
||||
// Pre NLU processing according to the active context if there is one
|
||||
if (this.conversation.hasActiveContext()) {
|
||||
// When the active context is in an action loop, then directly trigger the action
|
||||
if (this.conversation.activeContext.isInActionLoop) {
|
||||
return resolve(await ActionLoop.handle(utterance))
|
||||
}
|
||||
|
||||
// When the active context has slots filled
|
||||
if (Object.keys(this.conversation.activeContext.slots).length > 0) {
|
||||
try {
|
||||
return resolve(await SlotFilling.handle(utterance))
|
||||
} catch (e) {
|
||||
return reject({})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const result: NLPJSProcessResult =
|
||||
await MODEL_LOADER.mainNLPContainer.process(utterance)
|
||||
const { locale, answers, classifications } = result
|
||||
let { score, intent, domain } = result
|
||||
|
||||
/**
|
||||
* If a context is active, then use the appropriate classification based on score probability.
|
||||
* E.g. 1. Create my shopping list; 2. Actually delete it.
|
||||
* If there are several "delete it" across skills, Leon needs to make use of
|
||||
* the current context ({domain}.{skill}) to define the most accurate classification
|
||||
*/
|
||||
if (this.conversation.hasActiveContext()) {
|
||||
classifications.forEach(({ intent: newIntent, score: newScore }) => {
|
||||
if (newScore > 0.6) {
|
||||
const [skillName] = newIntent.split('.')
|
||||
const newDomain = MODEL_LOADER.mainNLPContainer.getIntentDomain(
|
||||
locale,
|
||||
newIntent
|
||||
)
|
||||
const contextName = `${newDomain}.${skillName}`
|
||||
if (this.conversation.activeContext.name === contextName) {
|
||||
score = newScore
|
||||
intent = newIntent
|
||||
domain = newDomain
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const [skillName, actionName] = intent.split('.')
|
||||
this.nluResult = {
|
||||
...DEFAULT_NLU_RESULT, // Reset entities, slots, etc.
|
||||
utterance,
|
||||
answers, // For dialog action type
|
||||
classification: {
|
||||
domain,
|
||||
skill: skillName || '',
|
||||
action: actionName || '',
|
||||
confidence: score
|
||||
}
|
||||
}
|
||||
|
||||
const isSupportedLanguage = LangHelper.getShortCodes().includes(locale)
|
||||
if (!isSupportedLanguage) {
|
||||
BRAIN.talk(`${BRAIN.wernicke('random_language_not_supported')}.`, true)
|
||||
SOCKET_SERVER.socket?.emit('is-typing', false)
|
||||
return resolve({})
|
||||
}
|
||||
|
||||
// Trigger language switching
|
||||
if (BRAIN.lang !== locale) {
|
||||
this.switchLanguage(utterance, locale)
|
||||
return resolve(null)
|
||||
}
|
||||
|
||||
// this.sendLog()
|
||||
|
||||
if (intent === 'None') {
|
||||
const fallback = this.fallback(
|
||||
langs[LangHelper.getLongCode(locale)].fallbacks
|
||||
)
|
||||
|
||||
if (!fallback) {
|
||||
if (!BRAIN.isMuted) {
|
||||
BRAIN.talk(`${BRAIN.wernicke('random_unknown_intents')}.`, true)
|
||||
SOCKET_SERVER.socket?.emit('is-typing', false)
|
||||
}
|
||||
|
||||
LogHelper.title('NLU')
|
||||
const msg = 'Intent not found'
|
||||
LogHelper.warning(msg)
|
||||
|
||||
return resolve(null)
|
||||
}
|
||||
|
||||
this.nluResult = fallback
|
||||
}
|
||||
|
||||
LogHelper.title('NLU')
|
||||
LogHelper.success(
|
||||
`Intent found: ${this.nluResult.classification.skill}.${this.nluResult.classification.action} (domain: ${this.nluResult.classification.domain})`
|
||||
)
|
||||
|
||||
const skillConfigPath = join(
|
||||
process.cwd(),
|
||||
'skills',
|
||||
this.nluResult.classification.domain,
|
||||
this.nluResult.classification.skill,
|
||||
'config',
|
||||
BRAIN.lang + '.json'
|
||||
)
|
||||
this.nluResult.skillConfigPath = skillConfigPath
|
||||
|
||||
try {
|
||||
this.nluResult.entities = await NER.extractEntities(
|
||||
BRAIN.lang,
|
||||
skillConfigPath,
|
||||
this.nluResult
|
||||
)
|
||||
} catch (e) {
|
||||
LogHelper.error(`Failed to extract entities: ${e}`)
|
||||
}
|
||||
|
||||
const shouldSlotLoop = await SlotFilling.route(intent)
|
||||
if (shouldSlotLoop) {
|
||||
return resolve({})
|
||||
}
|
||||
|
||||
// In case all slots have been filled in the first utterance
|
||||
if (
|
||||
this.conversation.hasActiveContext() &&
|
||||
Object.keys(this.conversation.activeContext.slots).length > 0
|
||||
) {
|
||||
try {
|
||||
return resolve(await SlotFilling.handle(utterance))
|
||||
} catch (e) {
|
||||
return reject({})
|
||||
}
|
||||
}
|
||||
|
||||
const newContextName = `${this.nluResult.classification.domain}.${skillName}`
|
||||
if (this.conversation.activeContext.name !== newContextName) {
|
||||
this.conversation.cleanActiveContext()
|
||||
}
|
||||
await this.conversation.setActiveContext({
|
||||
...DEFAULT_ACTIVE_CONTEXT,
|
||||
lang: BRAIN.lang,
|
||||
slots: {},
|
||||
isInActionLoop: false,
|
||||
originalUtterance: this.nluResult.utterance,
|
||||
skillConfigPath: this.nluResult.skillConfigPath,
|
||||
actionName: this.nluResult.classification.action,
|
||||
domain: this.nluResult.classification.domain,
|
||||
intent,
|
||||
entities: this.nluResult.entities
|
||||
})
|
||||
// Pass current utterance entities to the NLU result object
|
||||
this.nluResult.currentEntities =
|
||||
this.conversation.activeContext.currentEntities
|
||||
// Pass context entities to the NLU result object
|
||||
this.nluResult.entities = this.conversation.activeContext.entities
|
||||
|
||||
try {
|
||||
const processedData = await BRAIN.execute(this.nluResult)
|
||||
|
||||
// Prepare next action if there is one queuing
|
||||
if (processedData.nextAction) {
|
||||
this.conversation.cleanActiveContext()
|
||||
await this.conversation.setActiveContext({
|
||||
...DEFAULT_ACTIVE_CONTEXT,
|
||||
lang: BRAIN.lang,
|
||||
slots: {},
|
||||
isInActionLoop: !!processedData.nextAction.loop,
|
||||
originalUtterance: processedData.utterance ?? '',
|
||||
skillConfigPath: processedData.skillConfigPath || '',
|
||||
actionName: processedData.action?.next_action || '',
|
||||
domain: processedData.classification?.domain || '',
|
||||
intent: `${processedData.classification?.skill}.${processedData.action?.next_action}`,
|
||||
entities: []
|
||||
})
|
||||
}
|
||||
|
||||
const processingTimeEnd = Date.now()
|
||||
const processingTime = processingTimeEnd - processingTimeStart
|
||||
|
||||
return resolve({
|
||||
processingTime, // In ms, total time
|
||||
...processedData,
|
||||
nluProcessingTime:
|
||||
processingTime - (processedData?.executionTime || 0) // In ms, NLU processing time only
|
||||
})
|
||||
} catch (e) {
|
||||
const errorMessage = `Failed to execute action: ${e}`
|
||||
|
||||
LogHelper.error(errorMessage)
|
||||
|
||||
if (!BRAIN.isMuted) {
|
||||
SOCKET_SERVER.socket?.emit('is-typing', false)
|
||||
}
|
||||
|
||||
return reject(new Error(errorMessage))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Pickup and compare the right fallback
|
||||
* according to the wished skill action
|
||||
*/
|
||||
private fallback(fallbacks: Language['fallbacks']): NLUResult | null {
|
||||
const words = this.nluResult.utterance.toLowerCase().split(' ')
|
||||
|
||||
if (fallbacks.length > 0) {
|
||||
LogHelper.info('Looking for fallbacks...')
|
||||
const tmpWords = []
|
||||
|
||||
for (let i = 0; i < fallbacks.length; i += 1) {
|
||||
for (let j = 0; j < fallbacks[i]!.words.length; j += 1) {
|
||||
if (words.includes(fallbacks[i]!.words[j] as string)) {
|
||||
tmpWords.push(fallbacks[i]?.words[j])
|
||||
}
|
||||
}
|
||||
|
||||
if (JSON.stringify(tmpWords) === JSON.stringify(fallbacks[i]?.words)) {
|
||||
this.nluResult.entities = []
|
||||
this.nluResult.classification.domain = fallbacks[i]
|
||||
?.domain as NLPDomain
|
||||
this.nluResult.classification.skill = fallbacks[i]?.skill as NLPSkill
|
||||
this.nluResult.classification.action = fallbacks[i]
|
||||
?.action as NLPAction
|
||||
this.nluResult.classification.confidence = 1
|
||||
|
||||
LogHelper.success('Fallback found')
|
||||
return this.nluResult
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
}
|
183
server/src/core/nlp/nlu/slot-filling.ts
Normal file
183
server/src/core/nlp/nlu/slot-filling.ts
Normal file
@ -0,0 +1,183 @@
|
||||
import { join } from 'node:path'
|
||||
|
||||
import type { NLPUtterance } from '@/core/nlp/types'
|
||||
import type { BrainProcessResult } from '@/core/brain/types'
|
||||
import { BRAIN, MODEL_LOADER, NER, NLU, SOCKET_SERVER } from '@/core'
|
||||
import { DEFAULT_NLU_RESULT } from '@/core/nlp/nlu/nlu'
|
||||
import { SkillDomainHelper } from '@/helpers/skill-domain-helper'
|
||||
import { DEFAULT_ACTIVE_CONTEXT } from '@/core/nlp/conversation'
|
||||
|
||||
export class SlotFilling {
|
||||
/**
|
||||
* Handle slot filling
|
||||
*/
|
||||
public static async handle(
|
||||
utterance: NLPUtterance
|
||||
): Promise<Partial<BrainProcessResult> | null> {
|
||||
const processedData = await this.fillSlot(utterance)
|
||||
|
||||
/**
|
||||
* In case the slot filling has been interrupted. e.g. context change, etc.
|
||||
* Then reprocess with the new utterance
|
||||
*/
|
||||
if (!processedData) {
|
||||
await NLU.process(utterance)
|
||||
return null
|
||||
}
|
||||
|
||||
if (processedData && Object.keys(processedData).length > 0) {
|
||||
// Set new context with the next action if there is one
|
||||
if (processedData.action?.next_action) {
|
||||
await NLU.conversation.setActiveContext({
|
||||
...DEFAULT_ACTIVE_CONTEXT,
|
||||
lang: BRAIN.lang,
|
||||
slots: processedData.slots || {},
|
||||
isInActionLoop: !!processedData.nextAction?.loop,
|
||||
originalUtterance: processedData.utterance ?? null,
|
||||
skillConfigPath: processedData.skillConfigPath || '',
|
||||
actionName: processedData.action.next_action,
|
||||
domain: processedData.classification?.domain || '',
|
||||
intent: `${processedData.classification?.skill}.${processedData.action.next_action}`,
|
||||
entities: []
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return processedData
|
||||
}
|
||||
|
||||
/**
|
||||
* Build NLU data result object based on slots
|
||||
* and ask for more entities if necessary
|
||||
*/
|
||||
public static async fillSlot(
|
||||
utterance: NLPUtterance
|
||||
): Promise<Partial<BrainProcessResult> | null> {
|
||||
if (!NLU.conversation.activeContext.nextAction) {
|
||||
return null
|
||||
}
|
||||
|
||||
const { domain, intent } = NLU.conversation.activeContext
|
||||
const [skillName, actionName] = intent.split('.') as [string, string]
|
||||
const skillConfigPath = join(
|
||||
process.cwd(),
|
||||
'skills',
|
||||
domain,
|
||||
skillName,
|
||||
'config',
|
||||
BRAIN.lang + '.json'
|
||||
)
|
||||
|
||||
NLU.nluResult = {
|
||||
...DEFAULT_NLU_RESULT, // Reset entities, slots, etc.
|
||||
utterance,
|
||||
classification: {
|
||||
domain,
|
||||
skill: skillName,
|
||||
action: actionName,
|
||||
confidence: 1
|
||||
}
|
||||
}
|
||||
|
||||
const entities = await NER.extractEntities(
|
||||
BRAIN.lang,
|
||||
skillConfigPath,
|
||||
NLU.nluResult
|
||||
)
|
||||
|
||||
// Continue to loop for questions if a slot has been filled correctly
|
||||
let notFilledSlot = NLU.conversation.getNotFilledSlot()
|
||||
if (notFilledSlot && entities.length > 0) {
|
||||
const hasMatch = entities.some(
|
||||
({ entity }) => entity === notFilledSlot?.expectedEntity
|
||||
)
|
||||
|
||||
if (hasMatch) {
|
||||
NLU.conversation.setSlots(BRAIN.lang, entities)
|
||||
|
||||
notFilledSlot = NLU.conversation.getNotFilledSlot()
|
||||
if (notFilledSlot) {
|
||||
BRAIN.talk(notFilledSlot.pickedQuestion)
|
||||
SOCKET_SERVER.socket?.emit('is-typing', false)
|
||||
|
||||
return {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!NLU.conversation.areSlotsAllFilled()) {
|
||||
BRAIN.talk(`${BRAIN.wernicke('random_context_out_of_topic')}.`)
|
||||
} else {
|
||||
NLU.nluResult = {
|
||||
...DEFAULT_NLU_RESULT, // Reset entities, slots, etc.
|
||||
// Assign slots only if there is a next action
|
||||
slots: NLU.conversation.activeContext.nextAction
|
||||
? NLU.conversation.activeContext.slots
|
||||
: {},
|
||||
utterance: NLU.conversation.activeContext.originalUtterance ?? '',
|
||||
skillConfigPath,
|
||||
classification: {
|
||||
domain,
|
||||
skill: skillName,
|
||||
action: NLU.conversation.activeContext.nextAction,
|
||||
confidence: 1
|
||||
}
|
||||
}
|
||||
|
||||
NLU.conversation.cleanActiveContext()
|
||||
|
||||
return BRAIN.execute(NLU.nluResult)
|
||||
}
|
||||
|
||||
NLU.conversation.cleanActiveContext()
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Decide what to do with slot filling.
|
||||
* 1. Activate context
|
||||
* 2. If the context is expecting slots, then loop over questions to slot fill
|
||||
* 3. Or go to the brain executor if all slots have been filled in one shot
|
||||
*/
|
||||
public static async route(intent: string): Promise<boolean> {
|
||||
const slots =
|
||||
await MODEL_LOADER.mainNLPContainer.slotManager.getMandatorySlots(intent)
|
||||
const hasMandatorySlots = Object.keys(slots)?.length > 0
|
||||
|
||||
if (hasMandatorySlots) {
|
||||
await NLU.conversation.setActiveContext({
|
||||
...DEFAULT_ACTIVE_CONTEXT,
|
||||
lang: BRAIN.lang,
|
||||
slots,
|
||||
isInActionLoop: false,
|
||||
originalUtterance: NLU.nluResult.utterance,
|
||||
skillConfigPath: NLU.nluResult.skillConfigPath,
|
||||
actionName: NLU.nluResult.classification.action,
|
||||
domain: NLU.nluResult.classification.domain,
|
||||
intent,
|
||||
entities: NLU.nluResult.entities
|
||||
})
|
||||
|
||||
const notFilledSlot = NLU.conversation.getNotFilledSlot()
|
||||
// Loop for questions if a slot hasn't been filled
|
||||
if (notFilledSlot) {
|
||||
const { actions } = await SkillDomainHelper.getSkillConfig(
|
||||
NLU.nluResult.skillConfigPath,
|
||||
BRAIN.lang
|
||||
)
|
||||
const [currentSlot] =
|
||||
actions[NLU.nluResult.classification.action]?.slots?.filter(
|
||||
({ name }) => name === notFilledSlot.name
|
||||
) ?? []
|
||||
|
||||
SOCKET_SERVER.socket?.emit('suggest', currentSlot?.suggestions)
|
||||
BRAIN.talk(notFilledSlot.pickedQuestion)
|
||||
SOCKET_SERVER.socket?.emit('is-typing', false)
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
345
server/src/core/nlp/types.ts
Normal file
345
server/src/core/nlp/types.ts
Normal file
@ -0,0 +1,345 @@
|
||||
import type { ShortLanguageCode } from '@/types'
|
||||
|
||||
/**
|
||||
* NLP types
|
||||
*/
|
||||
|
||||
export type NLPDomain = string
|
||||
export type NLPSkill = string
|
||||
export type NLPAction = string
|
||||
export type NLPUtterance = string
|
||||
|
||||
export interface NLPJSProcessResult {
|
||||
locale: ShortLanguageCode
|
||||
utterance: NLPUtterance
|
||||
settings: unknown
|
||||
languageGuessed: boolean
|
||||
localeIso2: ShortLanguageCode
|
||||
language: string
|
||||
explanation: []
|
||||
classifications: {
|
||||
intent: string
|
||||
score: number
|
||||
}[]
|
||||
intent: string // E.g. "greeting.run"
|
||||
score: number
|
||||
domain: NLPDomain
|
||||
sourceEntities: unknown[]
|
||||
entities: NEREntity[]
|
||||
answers: {
|
||||
answer: string
|
||||
}[]
|
||||
answer: string | undefined
|
||||
actions: NLPAction[]
|
||||
sentiment: {
|
||||
score: number
|
||||
numWords: number
|
||||
numHits: number
|
||||
average: number
|
||||
type: string
|
||||
locale: ShortLanguageCode
|
||||
vote: string
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* NLU types
|
||||
*/
|
||||
|
||||
export interface NLUSlot {
|
||||
name: string
|
||||
expectedEntity: string
|
||||
value: NEREntity
|
||||
isFilled: boolean
|
||||
questions: string[]
|
||||
pickedQuestion: string
|
||||
locales?: Record<string, string[]> // From NLP.js
|
||||
}
|
||||
|
||||
export interface NLUClassification {
|
||||
domain: NLPDomain
|
||||
skill: NLPSkill
|
||||
action: NLPAction
|
||||
confidence: number
|
||||
}
|
||||
|
||||
export interface NLUResolver {
|
||||
name: string
|
||||
value: string
|
||||
}
|
||||
|
||||
export interface NLUResult {
|
||||
currentEntities: NEREntity[]
|
||||
entities: NEREntity[]
|
||||
currentResolvers: NLUResolver[]
|
||||
resolvers: NLUResolver[]
|
||||
slots: NLUSlots
|
||||
utterance: NLPUtterance
|
||||
skillConfigPath: string
|
||||
answers: { answer: string }[]
|
||||
classification: NLUClassification
|
||||
}
|
||||
|
||||
export type NLUSlots = Record<string, NLUSlot>
|
||||
|
||||
/**
|
||||
* NER types
|
||||
*/
|
||||
|
||||
/* eslint-disable @typescript-eslint/no-empty-interface */
|
||||
|
||||
interface Entity {
|
||||
start: number
|
||||
end: number
|
||||
len: number
|
||||
accuracy: number
|
||||
sourceText: string
|
||||
utteranceText: string
|
||||
entity: unknown
|
||||
resolution: unknown
|
||||
}
|
||||
|
||||
/**
|
||||
* Built-in entity types
|
||||
*/
|
||||
|
||||
interface BuiltInEntity extends Entity {}
|
||||
|
||||
interface BuiltInNumberEntity extends BuiltInEntity {
|
||||
resolution: {
|
||||
strValue: string
|
||||
value: number
|
||||
subtype: string
|
||||
}
|
||||
}
|
||||
interface BuiltInIPEntity extends BuiltInEntity {
|
||||
resolution: {
|
||||
value: string
|
||||
type: string
|
||||
}
|
||||
}
|
||||
interface BuiltInHashtagEntity extends BuiltInEntity {
|
||||
resolution: {
|
||||
value: string
|
||||
}
|
||||
}
|
||||
interface BuiltInPhoneNumberEntity extends BuiltInEntity {
|
||||
resolution: {
|
||||
value: string
|
||||
score: string
|
||||
}
|
||||
}
|
||||
interface BuiltInCurrencyEntity extends BuiltInEntity {
|
||||
resolution: {
|
||||
strValue: string
|
||||
value: number
|
||||
unit: string
|
||||
localeUnit: string
|
||||
}
|
||||
}
|
||||
interface BuiltInPercentageEntity extends BuiltInEntity {
|
||||
resolution: {
|
||||
strValue: string
|
||||
value: number
|
||||
subtype: string
|
||||
}
|
||||
}
|
||||
interface BuiltInDateEntity extends BuiltInEntity {
|
||||
resolution: {
|
||||
type: string
|
||||
timex: string
|
||||
strPastValue: string
|
||||
pastDate: Date
|
||||
strFutureValue: string
|
||||
futureDate: Date
|
||||
}
|
||||
}
|
||||
interface BuiltInTimeEntity extends BuiltInEntity {
|
||||
resolution: {
|
||||
values: {
|
||||
timex: string
|
||||
type: string
|
||||
value: string
|
||||
}[]
|
||||
}
|
||||
}
|
||||
interface BuiltInTimeRangeEntity extends BuiltInEntity {
|
||||
resolution: {
|
||||
values: {
|
||||
timex: string
|
||||
type: string
|
||||
start: string
|
||||
end: string
|
||||
}[]
|
||||
}
|
||||
}
|
||||
interface BuiltInDateRangeEntity extends BuiltInEntity {
|
||||
resolution: {
|
||||
type: string
|
||||
timex: string
|
||||
strPastStartValue: string
|
||||
pastStartDate: Date
|
||||
strPastEndValue: string
|
||||
pastEndDate: Date
|
||||
strFutureStartValue: string
|
||||
futureStartDate: Date
|
||||
strFutureEndValue: string
|
||||
futureEndDate: Date
|
||||
}
|
||||
}
|
||||
interface BuiltInDateTimeRangeEntity extends BuiltInEntity {
|
||||
resolution: {
|
||||
type: string
|
||||
timex: string
|
||||
strPastStartValue: string
|
||||
pastStartDate: Date
|
||||
strPastEndValue: string
|
||||
pastEndDate: Date
|
||||
strFutureStartValue: string
|
||||
futureStartDate: Date
|
||||
strFutureEndValue: string
|
||||
futureEndDate: Date
|
||||
}
|
||||
}
|
||||
interface BuiltInDurationEntity extends BuiltInEntity {
|
||||
resolution: {
|
||||
values: {
|
||||
timex: string
|
||||
type: string
|
||||
value: string
|
||||
}[]
|
||||
}
|
||||
}
|
||||
interface BuiltInDimensionEntity extends BuiltInEntity {
|
||||
resolution: {
|
||||
strValue: string
|
||||
value: number
|
||||
unit: string
|
||||
localeUnit: string
|
||||
}
|
||||
}
|
||||
interface BuiltInEmailEntity extends BuiltInEntity {
|
||||
resolution: {
|
||||
value: string
|
||||
}
|
||||
}
|
||||
interface BuiltInOrdinalEntity extends BuiltInEntity {
|
||||
resolution: {
|
||||
strValue: string
|
||||
value: number
|
||||
subtype: string
|
||||
}
|
||||
}
|
||||
interface BuiltInAgeEntity extends BuiltInEntity {
|
||||
resolution: {
|
||||
strValue: string
|
||||
value: number
|
||||
unit: string
|
||||
localeUnit: string
|
||||
}
|
||||
}
|
||||
interface BuiltInURLEntity extends BuiltInEntity {
|
||||
resolution: {
|
||||
value: string
|
||||
}
|
||||
}
|
||||
interface BuiltInTemperatureEntity extends BuiltInEntity {
|
||||
resolution: {
|
||||
strValue: string
|
||||
value: number
|
||||
unit: string
|
||||
localeUnit: string
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom entity types
|
||||
*/
|
||||
|
||||
interface CustomEntity<T> extends Entity {
|
||||
type: T
|
||||
}
|
||||
|
||||
export interface CustomEnumEntity extends CustomEntity<'enum'> {
|
||||
levenshtein: number
|
||||
option: string
|
||||
resolution: {
|
||||
value: string
|
||||
}
|
||||
alias?: string // E.g. "location:country_0"; "location:country_1"
|
||||
}
|
||||
type GlobalEntity = CustomEnumEntity
|
||||
export interface CustomRegexEntity extends CustomEntity<'regex'> {
|
||||
resolution: {
|
||||
value: string
|
||||
}
|
||||
}
|
||||
interface CustomTrimEntity extends CustomEntity<'trim'> {
|
||||
subtype:
|
||||
| 'between'
|
||||
| 'after'
|
||||
| 'afterFirst'
|
||||
| 'afterLast'
|
||||
| 'before'
|
||||
| 'beforeFirst'
|
||||
| 'beforeLast'
|
||||
resolution: {
|
||||
value: string
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* spaCy's entity types
|
||||
*/
|
||||
|
||||
interface SpacyEntity<T> extends CustomEnumEntity {
|
||||
entity: T
|
||||
}
|
||||
|
||||
interface SpacyLocationCountryEntity extends SpacyEntity<'location:country'> {}
|
||||
interface SpacyLocationCityEntity extends SpacyEntity<'location:city'> {}
|
||||
interface SpacyPersonEntity extends SpacyEntity<'person'> {}
|
||||
interface SpacyOrganizationEntity extends SpacyEntity<'organization'> {}
|
||||
|
||||
/**
|
||||
* Exported entity types
|
||||
*/
|
||||
|
||||
export type NERBuiltInEntity =
|
||||
| BuiltInNumberEntity
|
||||
| BuiltInIPEntity
|
||||
| BuiltInHashtagEntity
|
||||
| BuiltInPhoneNumberEntity
|
||||
| BuiltInCurrencyEntity
|
||||
| BuiltInPercentageEntity
|
||||
| BuiltInDateEntity
|
||||
| BuiltInTimeEntity
|
||||
| BuiltInTimeRangeEntity
|
||||
| BuiltInDateRangeEntity
|
||||
| BuiltInDateTimeRangeEntity
|
||||
| BuiltInDurationEntity
|
||||
| BuiltInDimensionEntity
|
||||
| BuiltInEmailEntity
|
||||
| BuiltInOrdinalEntity
|
||||
| BuiltInAgeEntity
|
||||
| BuiltInURLEntity
|
||||
| BuiltInTemperatureEntity
|
||||
|
||||
export type NERCustomEntity =
|
||||
| CustomEnumEntity
|
||||
| CustomRegexEntity
|
||||
| CustomTrimEntity
|
||||
|
||||
export type NERGlobalEntity = GlobalEntity
|
||||
|
||||
export type NERSpacyEntity =
|
||||
| SpacyLocationCountryEntity
|
||||
| SpacyLocationCityEntity
|
||||
| SpacyPersonEntity
|
||||
| SpacyOrganizationEntity
|
||||
|
||||
export type NEREntity =
|
||||
| NERBuiltInEntity
|
||||
| NERCustomEntity
|
||||
| NERGlobalEntity
|
||||
| NERSpacyEntity
|
@ -1,848 +0,0 @@
|
||||
import fs from 'node:fs'
|
||||
import { join } from 'node:path'
|
||||
import { spawn } from 'node:child_process'
|
||||
|
||||
import { containerBootstrap } from '@nlpjs/core-loader'
|
||||
import { Nlp } from '@nlpjs/nlp'
|
||||
import { BuiltinMicrosoft } from '@nlpjs/builtin-microsoft'
|
||||
import { LangAll } from '@nlpjs/lang-all'
|
||||
import axios from 'axios'
|
||||
import kill from 'tree-kill'
|
||||
|
||||
import { langs } from '@@/core/langs.json'
|
||||
import { version } from '@@/package.json'
|
||||
import { HAS_LOGGER, IS_TESTING_ENV, TCP_SERVER_BIN_PATH } from '@/constants'
|
||||
import { TCP_CLIENT } from '@/core'
|
||||
import Ner from '@/core/ner'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { StringHelper } from '@/helpers/string-helper'
|
||||
import { LangHelper } from '@/helpers/lang-helper'
|
||||
import Conversation from '@/core/conversation'
|
||||
|
||||
const defaultNluResultObj = {
|
||||
utterance: null,
|
||||
currentEntities: [],
|
||||
entities: [],
|
||||
currentResolvers: [],
|
||||
resolvers: [],
|
||||
slots: null,
|
||||
configDataFilePath: null,
|
||||
answers: [], // For dialog action type
|
||||
classification: {
|
||||
domain: null,
|
||||
skill: null,
|
||||
action: null,
|
||||
confidence: 0
|
||||
}
|
||||
}
|
||||
|
||||
class Nlu {
|
||||
constructor(brain) {
|
||||
this.brain = brain
|
||||
this.globalResolversNlp = {}
|
||||
this.skillsResolversNlp = {}
|
||||
this.mainNlp = {}
|
||||
this.ner = {}
|
||||
this.conv = new Conversation('conv0')
|
||||
this.nluResultObj = defaultNluResultObj // TODO
|
||||
|
||||
LogHelper.title('NLU')
|
||||
LogHelper.success('New instance')
|
||||
}
|
||||
|
||||
/**
|
||||
* Load the global resolvers NLP model from the latest training
|
||||
*/
|
||||
loadGlobalResolversModel(nlpModel) {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
if (!fs.existsSync(nlpModel)) {
|
||||
LogHelper.title('NLU')
|
||||
reject({
|
||||
type: 'warning',
|
||||
obj: new Error(
|
||||
'The global resolvers NLP model does not exist, please run: npm run train'
|
||||
)
|
||||
})
|
||||
} else {
|
||||
LogHelper.title('NLU')
|
||||
|
||||
try {
|
||||
const container = await containerBootstrap()
|
||||
|
||||
container.use(Nlp)
|
||||
container.use(LangAll)
|
||||
|
||||
this.globalResolversNlp = container.get('nlp')
|
||||
const nluManager = container.get('nlu-manager')
|
||||
nluManager.settings.spellCheck = true
|
||||
|
||||
await this.globalResolversNlp.load(nlpModel)
|
||||
LogHelper.success('Global resolvers NLP model loaded')
|
||||
|
||||
resolve()
|
||||
} catch (err) {
|
||||
this.brain.talk(
|
||||
`${this.brain.wernicke('random_errors')}! ${this.brain.wernicke(
|
||||
'errors',
|
||||
'nlu',
|
||||
{ '%error%': err.message }
|
||||
)}.`
|
||||
)
|
||||
this.brain.socket.emit('is-typing', false)
|
||||
|
||||
reject({ type: 'error', obj: err })
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Load the skills resolvers NLP model from the latest training
|
||||
*/
|
||||
loadSkillsResolversModel(nlpModel) {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
if (!fs.existsSync(nlpModel)) {
|
||||
LogHelper.title('NLU')
|
||||
reject({
|
||||
type: 'warning',
|
||||
obj: new Error(
|
||||
'The skills resolvers NLP model does not exist, please run: npm run train'
|
||||
)
|
||||
})
|
||||
} else {
|
||||
try {
|
||||
const container = await containerBootstrap()
|
||||
|
||||
container.use(Nlp)
|
||||
container.use(LangAll)
|
||||
|
||||
this.skillsResolversNlp = container.get('nlp')
|
||||
const nluManager = container.get('nlu-manager')
|
||||
nluManager.settings.spellCheck = true
|
||||
|
||||
await this.skillsResolversNlp.load(nlpModel)
|
||||
LogHelper.success('Skills resolvers NLP model loaded')
|
||||
|
||||
resolve()
|
||||
} catch (err) {
|
||||
this.brain.talk(
|
||||
`${this.brain.wernicke('random_errors')}! ${this.brain.wernicke(
|
||||
'errors',
|
||||
'nlu',
|
||||
{ '%error%': err.message }
|
||||
)}.`
|
||||
)
|
||||
this.brain.socket.emit('is-typing', false)
|
||||
|
||||
reject({ type: 'error', obj: err })
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Load the main NLP model from the latest training
|
||||
*/
|
||||
loadMainModel(nlpModel) {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
if (!fs.existsSync(nlpModel)) {
|
||||
LogHelper.title('NLU')
|
||||
reject({
|
||||
type: 'warning',
|
||||
obj: new Error(
|
||||
'The main NLP model does not exist, please run: npm run train'
|
||||
)
|
||||
})
|
||||
} else {
|
||||
try {
|
||||
const container = await containerBootstrap()
|
||||
|
||||
container.register(
|
||||
'extract-builtin-??',
|
||||
new BuiltinMicrosoft({
|
||||
builtins: Ner.getMicrosoftBuiltinEntities()
|
||||
}),
|
||||
true
|
||||
)
|
||||
container.use(Nlp)
|
||||
container.use(LangAll)
|
||||
|
||||
this.mainNlp = container.get('nlp')
|
||||
const nluManager = container.get('nlu-manager')
|
||||
nluManager.settings.spellCheck = true
|
||||
|
||||
await this.mainNlp.load(nlpModel)
|
||||
LogHelper.success('Main NLP model loaded')
|
||||
|
||||
this.ner = new Ner(this.mainNlp.ner)
|
||||
|
||||
resolve()
|
||||
} catch (err) {
|
||||
this.brain.talk(
|
||||
`${this.brain.wernicke('random_errors')}! ${this.brain.wernicke(
|
||||
'errors',
|
||||
'nlu',
|
||||
{ '%error%': err.message }
|
||||
)}.`
|
||||
)
|
||||
this.brain.socket.emit('is-typing', false)
|
||||
|
||||
reject({ type: 'error', obj: err })
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if NLP models exists
|
||||
*/
|
||||
hasNlpModels() {
|
||||
return (
|
||||
Object.keys(this.globalResolversNlp).length > 0 &&
|
||||
Object.keys(this.skillsResolversNlp).length > 0 &&
|
||||
Object.keys(this.mainNlp).length > 0
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Set new language; recreate a new TCP server with new language; and reprocess understanding
|
||||
*/
|
||||
switchLanguage(utterance, locale, opts) {
|
||||
const connectedHandler = async () => {
|
||||
await this.process(utterance, opts)
|
||||
}
|
||||
|
||||
this.brain.lang = locale
|
||||
this.brain.talk(`${this.brain.wernicke('random_language_switch')}.`, true)
|
||||
|
||||
// Recreate a new TCP server process and reconnect the TCP client
|
||||
kill(global.tcpServerProcess.pid, () => {
|
||||
global.tcpServerProcess = spawn(`${TCP_SERVER_BIN_PATH} ${locale}`, {
|
||||
shell: true
|
||||
})
|
||||
|
||||
TCP_CLIENT.connect()
|
||||
TCP_CLIENT.ee.removeListener('connected', connectedHandler)
|
||||
TCP_CLIENT.ee.on('connected', connectedHandler)
|
||||
})
|
||||
|
||||
return {}
|
||||
}
|
||||
|
||||
/**
|
||||
* Collaborative logger request
|
||||
*/
|
||||
sendLog(utterance) {
|
||||
/* istanbul ignore next */
|
||||
if (HAS_LOGGER && !IS_TESTING_ENV) {
|
||||
axios.request({
|
||||
method: 'POST',
|
||||
url: 'https://logger.getleon.ai/v1/expressions',
|
||||
headers: { 'X-Origin': 'leon-core' },
|
||||
data: {
|
||||
version,
|
||||
utterance,
|
||||
lang: this.brain.lang,
|
||||
classification: this.nluResultObj.classification
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge spaCy entities with the current NER instance
|
||||
*/
|
||||
async mergeSpacyEntities(utterance) {
|
||||
const spacyEntities = await Ner.getSpacyEntities(utterance)
|
||||
if (spacyEntities.length > 0) {
|
||||
spacyEntities.forEach(({ entity, resolution }) => {
|
||||
const spacyEntity = {
|
||||
[entity]: {
|
||||
options: {
|
||||
[resolution.value]: [StringHelper.ucFirst(resolution.value)]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this.mainNlp.addEntities(spacyEntity, this.brain.lang)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle in action loop logic before NLU processing
|
||||
*/
|
||||
async handleActionLoop(utterance, opts) {
|
||||
const { domain, intent } = this.conv.activeContext
|
||||
const [skillName, actionName] = intent.split('.')
|
||||
const configDataFilePath = join(
|
||||
process.cwd(),
|
||||
'skills',
|
||||
domain,
|
||||
skillName,
|
||||
`config/${this.brain.lang}.json`
|
||||
)
|
||||
this.nluResultObj = {
|
||||
...defaultNluResultObj, // Reset entities, slots, etc.
|
||||
slots: this.conv.activeContext.slots,
|
||||
utterance,
|
||||
configDataFilePath,
|
||||
classification: {
|
||||
domain,
|
||||
skill: skillName,
|
||||
action: actionName,
|
||||
confidence: 1
|
||||
}
|
||||
}
|
||||
this.nluResultObj.entities = await this.ner.extractEntities(
|
||||
this.brain.lang,
|
||||
configDataFilePath,
|
||||
this.nluResultObj
|
||||
)
|
||||
|
||||
const { actions, resolvers } = JSON.parse(
|
||||
fs.readFileSync(configDataFilePath, 'utf8')
|
||||
)
|
||||
const action = actions[this.nluResultObj.classification.action]
|
||||
const { name: expectedItemName, type: expectedItemType } =
|
||||
action.loop.expected_item
|
||||
let hasMatchingEntity = false
|
||||
let hasMatchingResolver = false
|
||||
|
||||
if (expectedItemType === 'entity') {
|
||||
hasMatchingEntity =
|
||||
this.nluResultObj.entities.filter(
|
||||
({ entity }) => expectedItemName === entity
|
||||
).length > 0
|
||||
} else if (expectedItemType.indexOf('resolver') !== -1) {
|
||||
const nlpObjs = {
|
||||
global_resolver: this.globalResolversNlp,
|
||||
skill_resolver: this.skillsResolversNlp
|
||||
}
|
||||
const result = await nlpObjs[expectedItemType].process(utterance)
|
||||
const { intent } = result
|
||||
|
||||
const resolveResolvers = (resolver, intent) => {
|
||||
const resolversPath = join(
|
||||
process.cwd(),
|
||||
'core/data',
|
||||
this.brain.lang,
|
||||
'global-resolvers'
|
||||
)
|
||||
// Load the skill resolver or the global resolver
|
||||
const resolvedIntents = !intent.includes('resolver.global')
|
||||
? resolvers[resolver]
|
||||
: JSON.parse(fs.readFileSync(join(resolversPath, `${resolver}.json`)))
|
||||
|
||||
// E.g. resolver.global.denial -> denial
|
||||
intent = intent.substring(intent.lastIndexOf('.') + 1)
|
||||
|
||||
return [
|
||||
{
|
||||
name: expectedItemName,
|
||||
value: resolvedIntents.intents[intent].value
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
// Resolve resolver if global resolver or skill resolver has been found
|
||||
if (
|
||||
intent &&
|
||||
(intent.includes('resolver.global') ||
|
||||
intent.includes(`resolver.${skillName}`))
|
||||
) {
|
||||
LogHelper.title('NLU')
|
||||
LogHelper.success('Resolvers resolved:')
|
||||
this.nluResultObj.resolvers = resolveResolvers(expectedItemName, intent)
|
||||
this.nluResultObj.resolvers.forEach((resolver) =>
|
||||
LogHelper.success(`${intent}: ${JSON.stringify(resolver)}`)
|
||||
)
|
||||
hasMatchingResolver = this.nluResultObj.resolvers.length > 0
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure expected items are in the utterance, otherwise clean context and reprocess
|
||||
if (!hasMatchingEntity && !hasMatchingResolver) {
|
||||
this.brain.talk(`${this.brain.wernicke('random_context_out_of_topic')}.`)
|
||||
this.conv.cleanActiveContext()
|
||||
await this.process(utterance, opts)
|
||||
return null
|
||||
}
|
||||
|
||||
try {
|
||||
const processedData = await this.brain.execute(this.nluResultObj, {
|
||||
mute: opts.mute
|
||||
})
|
||||
// Reprocess with the original utterance that triggered the context at first
|
||||
if (processedData.core?.restart === true) {
|
||||
const { originalUtterance } = this.conv.activeContext
|
||||
|
||||
this.conv.cleanActiveContext()
|
||||
await this.process(originalUtterance, opts)
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* In case there is no next action to prepare anymore
|
||||
* and there is an explicit stop of the loop from the skill
|
||||
*/
|
||||
if (
|
||||
!processedData.action.next_action &&
|
||||
processedData.core?.isInActionLoop === false
|
||||
) {
|
||||
this.conv.cleanActiveContext()
|
||||
return null
|
||||
}
|
||||
|
||||
// Break the action loop and prepare for the next action if necessary
|
||||
if (processedData.core?.isInActionLoop === false) {
|
||||
this.conv.activeContext.isInActionLoop = !!processedData.action.loop
|
||||
this.conv.activeContext.actionName = processedData.action.next_action
|
||||
this.conv.activeContext.intent = `${processedData.classification.skill}.${processedData.action.next_action}`
|
||||
}
|
||||
|
||||
return processedData
|
||||
} catch (e) /* istanbul ignore next */ {
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle slot filling
|
||||
*/
|
||||
async handleSlotFilling(utterance, opts) {
|
||||
const processedData = await this.slotFill(utterance, opts)
|
||||
|
||||
/**
|
||||
* In case the slot filling has been interrupted. e.g. context change, etc.
|
||||
* Then reprocess with the new utterance
|
||||
*/
|
||||
if (!processedData) {
|
||||
await this.process(utterance, opts)
|
||||
return null
|
||||
}
|
||||
|
||||
if (processedData && Object.keys(processedData).length > 0) {
|
||||
// Set new context with the next action if there is one
|
||||
if (processedData.action.next_action) {
|
||||
this.conv.activeContext = {
|
||||
lang: this.brain.lang,
|
||||
slots: processedData.slots,
|
||||
isInActionLoop: !!processedData.nextAction.loop,
|
||||
originalUtterance: processedData.utterance,
|
||||
configDataFilePath: processedData.configDataFilePath,
|
||||
actionName: processedData.action.next_action,
|
||||
domain: processedData.classification.domain,
|
||||
intent: `${processedData.classification.skill}.${processedData.action.next_action}`,
|
||||
entities: []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return processedData
|
||||
}
|
||||
|
||||
/**
|
||||
* Classify the utterance,
|
||||
* pick-up the right classification
|
||||
* and extract entities
|
||||
*/
|
||||
process(utterance, opts) {
|
||||
const processingTimeStart = Date.now()
|
||||
|
||||
return new Promise(async (resolve, reject) => {
|
||||
LogHelper.title('NLU')
|
||||
LogHelper.info('Processing...')
|
||||
|
||||
opts = opts || {
|
||||
mute: false // Close Leon mouth e.g. over HTTP
|
||||
}
|
||||
|
||||
if (!this.hasNlpModels()) {
|
||||
if (!opts.mute) {
|
||||
this.brain.talk(`${this.brain.wernicke('random_errors')}!`)
|
||||
this.brain.socket.emit('is-typing', false)
|
||||
}
|
||||
|
||||
const msg =
|
||||
'The NLP model is missing, please rebuild the project or if you are in dev run: npm run train'
|
||||
LogHelper.error(msg)
|
||||
return reject(msg)
|
||||
}
|
||||
|
||||
// Add spaCy entities
|
||||
await this.mergeSpacyEntities(utterance)
|
||||
|
||||
// Pre NLU processing according to the active context if there is one
|
||||
if (this.conv.hasActiveContext()) {
|
||||
// When the active context is in an action loop, then directly trigger the action
|
||||
if (this.conv.activeContext.isInActionLoop) {
|
||||
return resolve(await this.handleActionLoop(utterance, opts))
|
||||
}
|
||||
|
||||
// When the active context has slots filled
|
||||
if (Object.keys(this.conv.activeContext.slots).length > 0) {
|
||||
try {
|
||||
return resolve(await this.handleSlotFilling(utterance, opts))
|
||||
} catch (e) {
|
||||
return reject({})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const result = await this.mainNlp.process(utterance)
|
||||
const { locale, answers, classifications } = result
|
||||
let { score, intent, domain } = result
|
||||
|
||||
/**
|
||||
* If a context is active, then use the appropriate classification based on score probability.
|
||||
* E.g. 1. Create my shopping list; 2. Actually delete it.
|
||||
* If there are several "delete it" across skills, Leon needs to make use of
|
||||
* the current context ({domain}.{skill}) to define the most accurate classification
|
||||
*/
|
||||
if (this.conv.hasActiveContext()) {
|
||||
classifications.forEach(({ intent: newIntent, score: newScore }) => {
|
||||
if (newScore > 0.6) {
|
||||
const [skillName] = newIntent.split('.')
|
||||
const newDomain = this.mainNlp.getIntentDomain(locale, newIntent)
|
||||
const contextName = `${newDomain}.${skillName}`
|
||||
if (this.conv.activeContext.name === contextName) {
|
||||
score = newScore
|
||||
intent = newIntent
|
||||
domain = newDomain
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const [skillName, actionName] = intent.split('.')
|
||||
this.nluResultObj = {
|
||||
...defaultNluResultObj, // Reset entities, slots, etc.
|
||||
utterance,
|
||||
answers, // For dialog action type
|
||||
classification: {
|
||||
domain,
|
||||
skill: skillName,
|
||||
action: actionName,
|
||||
confidence: score
|
||||
}
|
||||
}
|
||||
|
||||
const isSupportedLanguage = LangHelper.getShortCodes().includes(locale)
|
||||
if (!isSupportedLanguage) {
|
||||
this.brain.talk(
|
||||
`${this.brain.wernicke('random_language_not_supported')}.`,
|
||||
true
|
||||
)
|
||||
this.brain.socket.emit('is-typing', false)
|
||||
return resolve({})
|
||||
}
|
||||
|
||||
// Trigger language switching
|
||||
if (this.brain.lang !== locale) {
|
||||
return resolve(this.switchLanguage(utterance, locale, opts))
|
||||
}
|
||||
|
||||
// this.sendLog()
|
||||
|
||||
if (intent === 'None') {
|
||||
const fallback = this.fallback(
|
||||
langs[LangHelper.getLongCode(locale)].fallbacks
|
||||
)
|
||||
|
||||
if (fallback === false) {
|
||||
if (!opts.mute) {
|
||||
this.brain.talk(
|
||||
`${this.brain.wernicke('random_unknown_intents')}.`,
|
||||
true
|
||||
)
|
||||
this.brain.socket.emit('is-typing', false)
|
||||
}
|
||||
|
||||
LogHelper.title('NLU')
|
||||
const msg = 'Intent not found'
|
||||
LogHelper.warning(msg)
|
||||
|
||||
const processingTimeEnd = Date.now()
|
||||
const processingTime = processingTimeEnd - processingTimeStart
|
||||
|
||||
return resolve({
|
||||
processingTime,
|
||||
message: msg
|
||||
})
|
||||
}
|
||||
|
||||
this.nluResultObj = fallback
|
||||
}
|
||||
|
||||
LogHelper.title('NLU')
|
||||
LogHelper.success(
|
||||
`Intent found: ${this.nluResultObj.classification.skill}.${this.nluResultObj.classification.action} (domain: ${this.nluResultObj.classification.domain})`
|
||||
)
|
||||
|
||||
const configDataFilePath = join(
|
||||
process.cwd(),
|
||||
'skills',
|
||||
this.nluResultObj.classification.domain,
|
||||
this.nluResultObj.classification.skill,
|
||||
`config/${this.brain.lang}.json`
|
||||
)
|
||||
this.nluResultObj.configDataFilePath = configDataFilePath
|
||||
|
||||
try {
|
||||
this.nluResultObj.entities = await this.ner.extractEntities(
|
||||
this.brain.lang,
|
||||
configDataFilePath,
|
||||
this.nluResultObj
|
||||
)
|
||||
} catch (e) /* istanbul ignore next */ {
|
||||
if (LogHelper[e.type]) {
|
||||
LogHelper[e.type](e.obj.message)
|
||||
}
|
||||
|
||||
if (!opts.mute) {
|
||||
this.brain.talk(`${this.brain.wernicke(e.code, '', e.data)}!`)
|
||||
}
|
||||
}
|
||||
|
||||
const shouldSlotLoop = await this.routeSlotFilling(intent)
|
||||
if (shouldSlotLoop) {
|
||||
return resolve({})
|
||||
}
|
||||
|
||||
// In case all slots have been filled in the first utterance
|
||||
if (
|
||||
this.conv.hasActiveContext() &&
|
||||
Object.keys(this.conv.activeContext.slots).length > 0
|
||||
) {
|
||||
try {
|
||||
return resolve(await this.handleSlotFilling(utterance, opts))
|
||||
} catch (e) {
|
||||
return reject({})
|
||||
}
|
||||
}
|
||||
|
||||
const newContextName = `${this.nluResultObj.classification.domain}.${skillName}`
|
||||
if (this.conv.activeContext.name !== newContextName) {
|
||||
this.conv.cleanActiveContext()
|
||||
}
|
||||
this.conv.activeContext = {
|
||||
lang: this.brain.lang,
|
||||
slots: {},
|
||||
isInActionLoop: false,
|
||||
originalUtterance: this.nluResultObj.utterance,
|
||||
configDataFilePath: this.nluResultObj.configDataFilePath,
|
||||
actionName: this.nluResultObj.classification.action,
|
||||
domain: this.nluResultObj.classification.domain,
|
||||
intent,
|
||||
entities: this.nluResultObj.entities
|
||||
}
|
||||
// Pass current utterance entities to the NLU result object
|
||||
this.nluResultObj.currentEntities =
|
||||
this.conv.activeContext.currentEntities
|
||||
// Pass context entities to the NLU result object
|
||||
this.nluResultObj.entities = this.conv.activeContext.entities
|
||||
|
||||
try {
|
||||
const processedData = await this.brain.execute(this.nluResultObj, {
|
||||
mute: opts.mute
|
||||
})
|
||||
|
||||
// Prepare next action if there is one queuing
|
||||
if (processedData.nextAction) {
|
||||
this.conv.cleanActiveContext()
|
||||
this.conv.activeContext = {
|
||||
lang: this.brain.lang,
|
||||
slots: {},
|
||||
isInActionLoop: !!processedData.nextAction.loop,
|
||||
originalUtterance: processedData.utterance,
|
||||
configDataFilePath: processedData.configDataFilePath,
|
||||
actionName: processedData.action.next_action,
|
||||
domain: processedData.classification.domain,
|
||||
intent: `${processedData.classification.skill}.${processedData.action.next_action}`,
|
||||
entities: []
|
||||
}
|
||||
}
|
||||
|
||||
const processingTimeEnd = Date.now()
|
||||
const processingTime = processingTimeEnd - processingTimeStart
|
||||
|
||||
return resolve({
|
||||
processingTime, // In ms, total time
|
||||
...processedData,
|
||||
nluProcessingTime: processingTime - processedData?.executionTime // In ms, NLU processing time only
|
||||
})
|
||||
} catch (e) /* istanbul ignore next */ {
|
||||
LogHelper[e.type](e.obj.message)
|
||||
|
||||
if (!opts.mute) {
|
||||
this.brain.socket.emit('is-typing', false)
|
||||
}
|
||||
|
||||
return reject(e.obj)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Build NLU data result object based on slots
|
||||
* and ask for more entities if necessary
|
||||
*/
|
||||
async slotFill(utterance, opts) {
|
||||
if (!this.conv.activeContext.nextAction) {
|
||||
return null
|
||||
}
|
||||
|
||||
const { domain, intent } = this.conv.activeContext
|
||||
const [skillName, actionName] = intent.split('.')
|
||||
const configDataFilePath = join(
|
||||
process.cwd(),
|
||||
'skills',
|
||||
domain,
|
||||
skillName,
|
||||
`config/${this.brain.lang}.json`
|
||||
)
|
||||
|
||||
this.nluResultObj = {
|
||||
...defaultNluResultObj, // Reset entities, slots, etc.
|
||||
utterance,
|
||||
classification: {
|
||||
domain,
|
||||
skill: skillName,
|
||||
action: actionName
|
||||
}
|
||||
}
|
||||
const entities = await this.ner.extractEntities(
|
||||
this.brain.lang,
|
||||
configDataFilePath,
|
||||
this.nluResultObj
|
||||
)
|
||||
|
||||
// Continue to loop for questions if a slot has been filled correctly
|
||||
let notFilledSlot = this.conv.getNotFilledSlot()
|
||||
if (notFilledSlot && entities.length > 0) {
|
||||
const hasMatch = entities.some(
|
||||
({ entity }) => entity === notFilledSlot.expectedEntity
|
||||
)
|
||||
|
||||
if (hasMatch) {
|
||||
this.conv.setSlots(this.brain.lang, entities)
|
||||
|
||||
notFilledSlot = this.conv.getNotFilledSlot()
|
||||
if (notFilledSlot) {
|
||||
this.brain.talk(notFilledSlot.pickedQuestion)
|
||||
this.brain.socket.emit('is-typing', false)
|
||||
|
||||
return {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!this.conv.areSlotsAllFilled()) {
|
||||
this.brain.talk(`${this.brain.wernicke('random_context_out_of_topic')}.`)
|
||||
} else {
|
||||
this.nluResultObj = {
|
||||
...defaultNluResultObj, // Reset entities, slots, etc.
|
||||
// Assign slots only if there is a next action
|
||||
slots: this.conv.activeContext.nextAction
|
||||
? this.conv.activeContext.slots
|
||||
: {},
|
||||
utterance: this.conv.activeContext.originalUtterance,
|
||||
configDataFilePath,
|
||||
classification: {
|
||||
domain,
|
||||
skill: skillName,
|
||||
action: this.conv.activeContext.nextAction,
|
||||
confidence: 1
|
||||
}
|
||||
}
|
||||
|
||||
this.conv.cleanActiveContext()
|
||||
|
||||
return this.brain.execute(this.nluResultObj, { mute: opts.mute })
|
||||
}
|
||||
|
||||
this.conv.cleanActiveContext()
|
||||
return null
|
||||
}
|
||||
|
||||
/**
|
||||
* Decide what to do with slot filling.
|
||||
* 1. Activate context
|
||||
* 2. If the context is expecting slots, then loop over questions to slot fill
|
||||
* 3. Or go to the brain executor if all slots have been filled in one shot
|
||||
*/
|
||||
async routeSlotFilling(intent) {
|
||||
const slots = await this.mainNlp.slotManager.getMandatorySlots(intent)
|
||||
const hasMandatorySlots = Object.keys(slots)?.length > 0
|
||||
|
||||
if (hasMandatorySlots) {
|
||||
this.conv.activeContext = {
|
||||
lang: this.brain.lang,
|
||||
slots,
|
||||
isInActionLoop: false,
|
||||
originalUtterance: this.nluResultObj.utterance,
|
||||
configDataFilePath: this.nluResultObj.configDataFilePath,
|
||||
actionName: this.nluResultObj.classification.action,
|
||||
domain: this.nluResultObj.classification.domain,
|
||||
intent,
|
||||
entities: this.nluResultObj.entities
|
||||
}
|
||||
|
||||
const notFilledSlot = this.conv.getNotFilledSlot()
|
||||
// Loop for questions if a slot hasn't been filled
|
||||
if (notFilledSlot) {
|
||||
const { actions } = JSON.parse(
|
||||
fs.readFileSync(this.nluResultObj.configDataFilePath, 'utf8')
|
||||
)
|
||||
const [currentSlot] = actions[
|
||||
this.nluResultObj.classification.action
|
||||
].slots.filter(({ name }) => name === notFilledSlot.name)
|
||||
|
||||
this.brain.socket.emit('suggest', currentSlot.suggestions)
|
||||
this.brain.talk(notFilledSlot.pickedQuestion)
|
||||
this.brain.socket.emit('is-typing', false)
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
/**
|
||||
* Pickup and compare the right fallback
|
||||
* according to the wished skill action
|
||||
*/
|
||||
fallback(fallbacks) {
|
||||
const words = this.nluResultObj.utterance.toLowerCase().split(' ')
|
||||
|
||||
if (fallbacks.length > 0) {
|
||||
LogHelper.info('Looking for fallbacks...')
|
||||
const tmpWords = []
|
||||
|
||||
for (let i = 0; i < fallbacks.length; i += 1) {
|
||||
for (let j = 0; j < fallbacks[i].words.length; j += 1) {
|
||||
if (words.includes(fallbacks[i].words[j]) === true) {
|
||||
tmpWords.push(fallbacks[i].words[j])
|
||||
}
|
||||
}
|
||||
|
||||
if (JSON.stringify(tmpWords) === JSON.stringify(fallbacks[i].words)) {
|
||||
this.nluResultObj.entities = []
|
||||
this.nluResultObj.classification.domain = fallbacks[i].domain
|
||||
this.nluResultObj.classification.skill = fallbacks[i].skill
|
||||
this.nluResultObj.classification.action = fallbacks[i].action
|
||||
this.nluResultObj.classification.confidence = 1
|
||||
|
||||
LogHelper.success('Fallback found')
|
||||
return this.nluResultObj
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
export default Nlu
|
146
server/src/core/socket-server.ts
Normal file
146
server/src/core/socket-server.ts
Normal file
@ -0,0 +1,146 @@
|
||||
import type { DefaultEventsMap } from 'socket.io/dist/typed-events'
|
||||
import { Server as SocketIOServer, Socket } from 'socket.io'
|
||||
|
||||
import { LANG, HAS_STT, HAS_TTS, IS_DEVELOPMENT_ENV } from '@/constants'
|
||||
import {
|
||||
HTTP_SERVER,
|
||||
TCP_CLIENT,
|
||||
ASR,
|
||||
STT,
|
||||
TTS,
|
||||
NLU,
|
||||
BRAIN,
|
||||
MODEL_LOADER
|
||||
} from '@/core'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { LangHelper } from '@/helpers/lang-helper'
|
||||
|
||||
interface HotwordDataEvent {
|
||||
hotword: string
|
||||
buffer: Buffer
|
||||
}
|
||||
|
||||
interface UtteranceDataEvent {
|
||||
client: string
|
||||
value: string
|
||||
}
|
||||
|
||||
export default class SocketServer {
|
||||
private static instance: SocketServer
|
||||
|
||||
public socket: Socket<DefaultEventsMap, DefaultEventsMap> | undefined =
|
||||
undefined
|
||||
|
||||
constructor() {
|
||||
if (!SocketServer.instance) {
|
||||
LogHelper.title('Socket Server')
|
||||
LogHelper.success('New instance')
|
||||
|
||||
SocketServer.instance = this
|
||||
}
|
||||
}
|
||||
|
||||
public async init(): Promise<void> {
|
||||
const io = IS_DEVELOPMENT_ENV
|
||||
? new SocketIOServer(HTTP_SERVER.httpServer, {
|
||||
cors: { origin: `${HTTP_SERVER.host}:3000` }
|
||||
})
|
||||
: new SocketIOServer(HTTP_SERVER.httpServer)
|
||||
|
||||
let sttState = 'disabled'
|
||||
let ttsState = 'disabled'
|
||||
|
||||
if (HAS_STT) {
|
||||
sttState = 'enabled'
|
||||
|
||||
await STT.init()
|
||||
}
|
||||
if (HAS_TTS) {
|
||||
ttsState = 'enabled'
|
||||
|
||||
await TTS.init(LangHelper.getShortCode(LANG))
|
||||
}
|
||||
|
||||
LogHelper.title('Initialization')
|
||||
LogHelper.success(`STT ${sttState}`)
|
||||
LogHelper.success(`TTS ${ttsState}`)
|
||||
|
||||
try {
|
||||
await MODEL_LOADER.loadNLPModels()
|
||||
} catch (e) {
|
||||
LogHelper.error(`Failed to load NLP models: ${e}`)
|
||||
}
|
||||
|
||||
io.on('connection', (socket) => {
|
||||
LogHelper.title('Client')
|
||||
LogHelper.success('Connected')
|
||||
|
||||
this.socket = socket
|
||||
|
||||
// Init
|
||||
this.socket.on('init', (data: string) => {
|
||||
LogHelper.info(`Type: ${data}`)
|
||||
LogHelper.info(`Socket ID: ${this.socket?.id}`)
|
||||
|
||||
// TODO
|
||||
// const provider = await addProvider(socket.id)
|
||||
|
||||
// Check whether the TCP client is connected to the TCP server
|
||||
if (TCP_CLIENT.isConnected) {
|
||||
this.socket?.emit('ready')
|
||||
} else {
|
||||
TCP_CLIENT.ee.on('connected', () => {
|
||||
this.socket?.emit('ready')
|
||||
})
|
||||
}
|
||||
|
||||
if (data === 'hotword-node') {
|
||||
// Hotword triggered
|
||||
this.socket?.on('hotword-detected', (data: HotwordDataEvent) => {
|
||||
LogHelper.title('Socket')
|
||||
LogHelper.success(`Hotword ${data.hotword} detected`)
|
||||
|
||||
this.socket?.broadcast.emit('enable-record')
|
||||
})
|
||||
} else {
|
||||
// Listen for new utterance
|
||||
this.socket?.on('utterance', async (data: UtteranceDataEvent) => {
|
||||
LogHelper.title('Socket')
|
||||
LogHelper.info(`${data.client} emitted: ${data.value}`)
|
||||
|
||||
this.socket?.emit('is-typing', true)
|
||||
|
||||
const { value: utterance } = data
|
||||
try {
|
||||
LogHelper.time('Utterance processed in')
|
||||
|
||||
BRAIN.isMuted = false
|
||||
await NLU.process(utterance)
|
||||
|
||||
LogHelper.title('Execution Time')
|
||||
LogHelper.timeEnd('Utterance processed in')
|
||||
} catch (e) {
|
||||
LogHelper.error(`Failed to process utterance: ${e}`)
|
||||
}
|
||||
})
|
||||
|
||||
// Handle automatic speech recognition
|
||||
this.socket?.on('recognize', async (data: Buffer) => {
|
||||
try {
|
||||
await ASR.encode(data)
|
||||
} catch (e) {
|
||||
LogHelper.error(
|
||||
`ASR - Failed to encode audio blob to WAVE file: ${e}`
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
this.socket.once('disconnect', () => {
|
||||
// TODO
|
||||
// deleteProvider(this.socket.id)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
75
server/src/core/stt/parsers/coqui-stt-parser.ts
Normal file
75
server/src/core/stt/parsers/coqui-stt-parser.ts
Normal file
@ -0,0 +1,75 @@
|
||||
import path from 'node:path'
|
||||
import fs from 'node:fs'
|
||||
|
||||
import wav from 'node-wav'
|
||||
import { Model } from 'stt'
|
||||
|
||||
import { STTParserBase } from '@/core/stt/stt-parser-base'
|
||||
import { BIN_PATH } from '@/constants'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
|
||||
export default class CoquiSTTParser extends STTParserBase {
|
||||
protected readonly name = 'Coqui STT Parser'
|
||||
private readonly model: Model | undefined = undefined
|
||||
private readonly desiredSampleRate: number = 16_000
|
||||
|
||||
constructor() {
|
||||
super()
|
||||
|
||||
LogHelper.title(this.name)
|
||||
LogHelper.success('New instance')
|
||||
|
||||
const modelPath = path.join(BIN_PATH, 'coqui', 'model.tflite')
|
||||
const scorerPath = path.join(BIN_PATH, 'coqui', 'huge-vocabulary.scorer')
|
||||
|
||||
LogHelper.info(`Loading model from file ${modelPath}...`)
|
||||
|
||||
if (!fs.existsSync(modelPath)) {
|
||||
LogHelper.error(
|
||||
`Cannot find ${modelPath}. You can set up the offline STT by running: "npm run setup:offline-stt"`
|
||||
)
|
||||
}
|
||||
|
||||
if (!fs.existsSync(scorerPath)) {
|
||||
LogHelper.error(
|
||||
`Cannot find ${scorerPath}. You can setup the offline STT by running: "npm run setup:offline-stt"`
|
||||
)
|
||||
}
|
||||
|
||||
try {
|
||||
this.model = new Model(modelPath)
|
||||
} catch (e) {
|
||||
throw Error(`${this.name} - Failed to load the model: ${e}`)
|
||||
}
|
||||
|
||||
this.desiredSampleRate = this.model.sampleRate()
|
||||
|
||||
try {
|
||||
this.model.enableExternalScorer(scorerPath)
|
||||
} catch (e) {
|
||||
throw Error(`${this.name} - Failed to enable external scorer: ${e}`)
|
||||
}
|
||||
|
||||
LogHelper.success('Parser initialized')
|
||||
}
|
||||
|
||||
/**
|
||||
* Read audio buffer and return the transcript (decoded string)
|
||||
*/
|
||||
public async parse(buffer: Buffer): Promise<string | null> {
|
||||
const wavDecode = wav.decode(buffer)
|
||||
|
||||
if (this.model) {
|
||||
if (wavDecode.sampleRate < this.desiredSampleRate) {
|
||||
LogHelper.warning(
|
||||
`Original sample rate (${wavDecode.sampleRate}) is lower than ${this.desiredSampleRate}Hz. Up-sampling might produce erratic speech recognition`
|
||||
)
|
||||
}
|
||||
|
||||
// Decoded string
|
||||
return this.model.stt(buffer)
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
}
|
70
server/src/core/stt/parsers/google-cloud-stt-parser.ts
Normal file
70
server/src/core/stt/parsers/google-cloud-stt-parser.ts
Normal file
@ -0,0 +1,70 @@
|
||||
import path from 'node:path'
|
||||
|
||||
import type { SpeechClient } from '@google-cloud/speech'
|
||||
import stt from '@google-cloud/speech'
|
||||
|
||||
import { STTParserBase } from '@/core/stt/stt-parser-base'
|
||||
import { LANG, VOICE_CONFIG_PATH } from '@/constants'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
|
||||
export default class GoogleCloudSTTParser extends STTParserBase {
|
||||
protected readonly name = 'Google Cloud STT Parser'
|
||||
private readonly client: SpeechClient | undefined = undefined
|
||||
|
||||
constructor() {
|
||||
super()
|
||||
|
||||
LogHelper.title(this.name)
|
||||
LogHelper.success('New instance')
|
||||
|
||||
/**
|
||||
* Initialize Google Cloud Speech-to-Text based on the credentials in the JSON file
|
||||
* the env variable "GOOGLE_APPLICATION_CREDENTIALS" provides the JSON file path
|
||||
*/
|
||||
|
||||
process.env['GOOGLE_APPLICATION_CREDENTIALS'] = path.join(
|
||||
VOICE_CONFIG_PATH,
|
||||
'google-cloud.json'
|
||||
)
|
||||
|
||||
try {
|
||||
this.client = new stt.SpeechClient()
|
||||
|
||||
LogHelper.success('Parser initialized')
|
||||
} catch (e) {
|
||||
LogHelper.error(`${this.name} - Failed to initialize: ${e}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read audio buffer and return the transcript (decoded string)
|
||||
*/
|
||||
public async parse(buffer: Buffer): Promise<string | null> {
|
||||
if (this.client) {
|
||||
const audioBytes = buffer.toString('base64')
|
||||
const audio = { content: audioBytes }
|
||||
|
||||
try {
|
||||
const [res] = await this.client.recognize({
|
||||
audio,
|
||||
config: {
|
||||
languageCode: LANG,
|
||||
encoding: 'LINEAR16',
|
||||
sampleRateHertz: 16000
|
||||
}
|
||||
})
|
||||
|
||||
// Decoded string
|
||||
return (res.results || [])
|
||||
.map((data) => data.alternatives && data.alternatives[0]?.transcript)
|
||||
.join('\n')
|
||||
} catch (e) {
|
||||
LogHelper.error(`${this.name} - Failed to parse: ${e}`)
|
||||
}
|
||||
} else {
|
||||
LogHelper.error(`${this.name} - Not initialized`)
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
}
|
67
server/src/core/stt/parsers/watson-stt-parser.ts
Normal file
67
server/src/core/stt/parsers/watson-stt-parser.ts
Normal file
@ -0,0 +1,67 @@
|
||||
import path from 'node:path'
|
||||
import fs from 'node:fs'
|
||||
import { Duplex } from 'node:stream'
|
||||
|
||||
import Stt from 'ibm-watson/speech-to-text/v1'
|
||||
import { IamAuthenticator } from 'ibm-watson/auth'
|
||||
|
||||
import type { WatsonVoiceConfigurationSchema } from '@/schemas/voice-config-schemas'
|
||||
import { STTParserBase } from '@/core/stt/stt-parser-base'
|
||||
import { LANG, VOICE_CONFIG_PATH } from '@/constants'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
|
||||
export default class WatsonSTTParser extends STTParserBase {
|
||||
protected readonly name = 'Watson STT Parser'
|
||||
private readonly client: Stt | undefined = undefined
|
||||
|
||||
constructor() {
|
||||
super()
|
||||
|
||||
LogHelper.title(this.name)
|
||||
LogHelper.success('New instance')
|
||||
|
||||
const config: WatsonVoiceConfigurationSchema = JSON.parse(
|
||||
fs.readFileSync(path.join(VOICE_CONFIG_PATH, 'watson-stt.json'), 'utf8')
|
||||
)
|
||||
|
||||
try {
|
||||
this.client = new Stt({
|
||||
authenticator: new IamAuthenticator({ apikey: config.apikey }),
|
||||
serviceUrl: config.url
|
||||
})
|
||||
|
||||
LogHelper.success('Parser initialized')
|
||||
} catch (e) {
|
||||
LogHelper.error(`${this.name} - Failed to initialize: ${e}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read audio buffer and return the transcript (decoded string)
|
||||
*/
|
||||
public async parse(buffer: Buffer): Promise<string | null> {
|
||||
if (this.client) {
|
||||
const stream = new Duplex()
|
||||
|
||||
stream.push(buffer)
|
||||
stream.push(null)
|
||||
|
||||
try {
|
||||
const { result } = await this.client.recognize({
|
||||
contentType: 'audio/wav',
|
||||
model: `${LANG}_BroadbandModel`,
|
||||
audio: stream
|
||||
})
|
||||
|
||||
// Decoded string
|
||||
return (result.results || [])
|
||||
.map((data) => data.alternatives && data.alternatives[0]?.transcript)
|
||||
.join('\n')
|
||||
} catch (e) {
|
||||
LogHelper.error(`${this.name} - Failed to parse: ${e}`)
|
||||
}
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
}
|
5
server/src/core/stt/stt-parser-base.ts
Normal file
5
server/src/core/stt/stt-parser-base.ts
Normal file
@ -0,0 +1,5 @@
|
||||
export abstract class STTParserBase {
|
||||
protected abstract name: string
|
||||
|
||||
protected abstract parse(buffer: Buffer): Promise<string | null>
|
||||
}
|
139
server/src/core/stt/stt.ts
Normal file
139
server/src/core/stt/stt.ts
Normal file
@ -0,0 +1,139 @@
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
|
||||
import type { ASRAudioFormat } from '@/core/asr/types'
|
||||
import type { STTParser } from '@/core/stt/types'
|
||||
import { STT_PROVIDER, VOICE_CONFIG_PATH } from '@/constants'
|
||||
import { SOCKET_SERVER, ASR } from '@/core'
|
||||
import { STTParserNames, STTProviders } from '@/core/stt/types'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
|
||||
const PROVIDERS_MAP = {
|
||||
[STTProviders.GoogleCloudSTT]: STTParserNames.GoogleCloudSTT,
|
||||
[STTProviders.WatsonSTT]: STTParserNames.WatsonSTT,
|
||||
[STTProviders.CoquiSTT]: STTParserNames.CoquiSTT
|
||||
}
|
||||
|
||||
export default class STT {
|
||||
private static instance: STT
|
||||
|
||||
private parser: STTParser = undefined
|
||||
|
||||
constructor() {
|
||||
if (!STT.instance) {
|
||||
LogHelper.title('STT')
|
||||
LogHelper.success('New instance')
|
||||
|
||||
STT.instance = this
|
||||
}
|
||||
}
|
||||
|
||||
public get isParserReady(): boolean {
|
||||
return !!this.parser
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the STT provider
|
||||
*/
|
||||
public async init(): Promise<boolean> {
|
||||
LogHelper.title('STT')
|
||||
LogHelper.info('Initializing STT...')
|
||||
|
||||
if (!Object.values(STTProviders).includes(STT_PROVIDER as STTProviders)) {
|
||||
LogHelper.error(
|
||||
`The STT provider "${STT_PROVIDER}" does not exist or is not yet supported`
|
||||
)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
if (
|
||||
STT_PROVIDER === STTProviders.GoogleCloudSTT &&
|
||||
typeof process.env['GOOGLE_APPLICATION_CREDENTIALS'] === 'undefined'
|
||||
) {
|
||||
process.env['GOOGLE_APPLICATION_CREDENTIALS'] = path.join(
|
||||
VOICE_CONFIG_PATH,
|
||||
'google-cloud.json'
|
||||
)
|
||||
} else if (
|
||||
typeof process.env['GOOGLE_APPLICATION_CREDENTIALS'] !== 'undefined' &&
|
||||
process.env['GOOGLE_APPLICATION_CREDENTIALS'].indexOf(
|
||||
'google-cloud.json'
|
||||
) === -1
|
||||
) {
|
||||
LogHelper.warning(
|
||||
`The "GOOGLE_APPLICATION_CREDENTIALS" env variable is already settled with the following value: "${process.env['GOOGLE_APPLICATION_CREDENTIALS']}"`
|
||||
)
|
||||
}
|
||||
|
||||
// Dynamically attribute the parser
|
||||
const { default: parser } = await import(
|
||||
path.join(
|
||||
__dirname,
|
||||
'parsers',
|
||||
PROVIDERS_MAP[STT_PROVIDER as STTProviders]
|
||||
)
|
||||
)
|
||||
this.parser = new parser() as STTParser
|
||||
|
||||
LogHelper.title('STT')
|
||||
LogHelper.success('STT initialized')
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the speech file and transcribe
|
||||
*/
|
||||
public async transcribe(audioFilePath: string): Promise<boolean> {
|
||||
LogHelper.info('Parsing WAVE file...')
|
||||
|
||||
if (!fs.existsSync(audioFilePath)) {
|
||||
LogHelper.error(`The WAVE file "${audioFilePath}" does not exist`)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
const buffer = fs.readFileSync(audioFilePath)
|
||||
const transcript = await this.parser?.parse(buffer)
|
||||
|
||||
if (transcript && transcript !== '') {
|
||||
// Forward the string to the client
|
||||
this.forward(transcript)
|
||||
} else {
|
||||
this.deleteAudios()
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Forward string output to the client
|
||||
* and delete audio files once it has been forwarded
|
||||
*/
|
||||
private forward(str: string): void {
|
||||
SOCKET_SERVER.socket?.emit('recognized', str, (confirmation: string) => {
|
||||
if (confirmation === 'string-received') {
|
||||
this.deleteAudios()
|
||||
}
|
||||
})
|
||||
|
||||
LogHelper.success(`Parsing result: ${str}`)
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete audio files
|
||||
*/
|
||||
private deleteAudios(): void {
|
||||
const audioPaths = Object.keys(ASR.audioPaths)
|
||||
|
||||
for (let i = 0; i < audioPaths.length; i += 1) {
|
||||
const audioType = audioPaths[i] as ASRAudioFormat
|
||||
const audioPath = ASR.audioPaths[audioType]
|
||||
|
||||
if (fs.existsSync(audioPath)) {
|
||||
fs.unlinkSync(audioPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
21
server/src/core/stt/types.ts
Normal file
21
server/src/core/stt/types.ts
Normal file
@ -0,0 +1,21 @@
|
||||
import type CoquiSTTParser from '@/core/stt/parsers/coqui-stt-parser'
|
||||
import type GoogleCloudSTTParser from '@/core/stt/parsers/google-cloud-stt-parser'
|
||||
import type WatsonSTTParser from '@/core/stt/parsers/watson-stt-parser'
|
||||
|
||||
export enum STTProviders {
|
||||
GoogleCloudSTT = 'google-cloud-stt',
|
||||
WatsonSTT = 'watson-stt',
|
||||
CoquiSTT = 'coqui-stt'
|
||||
}
|
||||
|
||||
export enum STTParserNames {
|
||||
GoogleCloudSTT = 'google-cloud-stt-parser',
|
||||
WatsonSTT = 'watson-stt-parser',
|
||||
CoquiSTT = 'coqui-stt-parser'
|
||||
}
|
||||
|
||||
export type STTParser =
|
||||
| GoogleCloudSTTParser
|
||||
| WatsonSTTParser
|
||||
| CoquiSTTParser
|
||||
| undefined
|
@ -1,3 +1,7 @@
|
||||
/* eslint-disable */
|
||||
|
||||
// TODO: remove the synchronization capability
|
||||
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
|
||||
@ -60,13 +64,12 @@ class Synchronizer {
|
||||
* Google Drive synchronization method
|
||||
*/
|
||||
googleDrive() {
|
||||
/* istanbul ignore next */
|
||||
return new Promise((resolve, reject) => {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
const driveFolderName = `leon-${this.classification.domain}-${this.classification.skill}`
|
||||
const folderMimeType = 'application/vnd.google-apps.folder'
|
||||
const entities = fs.readdirSync(this.downloadDir)
|
||||
const entities = await fs.promises.readdir(this.downloadDir)
|
||||
const key = JSON.parse(
|
||||
fs.readFileSync(
|
||||
await fs.promises.readFile(
|
||||
path.join(
|
||||
process.cwd(),
|
||||
'core/config/synchronizer/google-drive.json'
|
||||
|
@ -2,14 +2,20 @@ import Net from 'node:net'
|
||||
import { EventEmitter } from 'node:events'
|
||||
|
||||
import { IS_PRODUCTION_ENV } from '@/constants'
|
||||
import { OSTypes } from '@/types'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { OSHelper, OSTypes } from '@/helpers/os-helper'
|
||||
import { SystemHelper } from '@/helpers/system-helper'
|
||||
|
||||
// Time interval between each try (in ms)
|
||||
const INTERVAL = IS_PRODUCTION_ENV ? 3000 : 500
|
||||
// Number of retries to connect to the TCP server
|
||||
const RETRIES_NB = IS_PRODUCTION_ENV ? 8 : 30
|
||||
|
||||
interface ChunkData {
|
||||
topic: string
|
||||
data: unknown
|
||||
}
|
||||
|
||||
export default class TCPClient {
|
||||
private static instance: TCPClient
|
||||
|
||||
@ -49,7 +55,7 @@ export default class TCPClient {
|
||||
this.ee.emit('connected', null)
|
||||
})
|
||||
|
||||
this.tcpSocket.on('data', (chunk: { topic: string; data: unknown }) => {
|
||||
this.tcpSocket.on('data', (chunk: ChunkData) => {
|
||||
LogHelper.title('TCP Client')
|
||||
LogHelper.info(`Received data: ${String(chunk)}`)
|
||||
|
||||
@ -63,7 +69,7 @@ export default class TCPClient {
|
||||
if (err.code === 'ECONNREFUSED') {
|
||||
this.reconnectCounter += 1
|
||||
|
||||
const { type: osType } = OSHelper.getInformation()
|
||||
const { type: osType } = SystemHelper.getInformation()
|
||||
|
||||
if (this.reconnectCounter >= RETRIES_NB) {
|
||||
LogHelper.error('Failed to connect to the TCP server')
|
||||
@ -76,7 +82,7 @@ export default class TCPClient {
|
||||
if (this.reconnectCounter >= 5) {
|
||||
if (osType === OSTypes.MacOS) {
|
||||
LogHelper.warning(
|
||||
'The cold start of the TCP server can take a few more seconds on macOS. It should be a one time think, no worries'
|
||||
'The cold start of the TCP server can take a few more seconds on macOS. It should be a one-time thing, no worries'
|
||||
)
|
||||
}
|
||||
}
|
||||
|
99
server/src/core/tts/synthesizers/amazon-polly-synthesizer.ts
Normal file
99
server/src/core/tts/synthesizers/amazon-polly-synthesizer.ts
Normal file
@ -0,0 +1,99 @@
|
||||
import type { Stream } from 'node:stream'
|
||||
import path from 'node:path'
|
||||
import fs from 'node:fs'
|
||||
|
||||
import { Polly, SynthesizeSpeechCommand } from '@aws-sdk/client-polly'
|
||||
|
||||
import type { LongLanguageCode } from '@/types'
|
||||
import type { SynthesizeResult } from '@/core/tts/types'
|
||||
import type { AmazonVoiceConfigurationSchema } from '@/schemas/voice-config-schemas'
|
||||
import { LANG, VOICE_CONFIG_PATH, TMP_PATH } from '@/constants'
|
||||
import { TTS } from '@/core'
|
||||
import { TTSSynthesizerBase } from '@/core/tts/tts-synthesizer-base'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { StringHelper } from '@/helpers/string-helper'
|
||||
|
||||
const VOICES = {
|
||||
'en-US': {
|
||||
VoiceId: 'Matthew'
|
||||
},
|
||||
'fr-FR': {
|
||||
VoiceId: 'Mathieu'
|
||||
}
|
||||
}
|
||||
|
||||
export default class AmazonPollySynthesizer extends TTSSynthesizerBase {
|
||||
protected readonly name = 'Amazon Polly TTS Synthesizer'
|
||||
protected readonly lang = LANG as LongLanguageCode
|
||||
private readonly client: Polly | undefined = undefined
|
||||
|
||||
constructor(lang: LongLanguageCode) {
|
||||
super()
|
||||
|
||||
LogHelper.title(this.name)
|
||||
LogHelper.success('New instance')
|
||||
|
||||
const config: AmazonVoiceConfigurationSchema = JSON.parse(
|
||||
fs.readFileSync(path.join(VOICE_CONFIG_PATH, 'amazon.json'), 'utf8')
|
||||
)
|
||||
|
||||
try {
|
||||
this.lang = lang
|
||||
this.client = new Polly(config)
|
||||
|
||||
LogHelper.success('Synthesizer initialized')
|
||||
} catch (e) {
|
||||
LogHelper.error(`${this.name} - Failed to initialize: ${e}`)
|
||||
}
|
||||
}
|
||||
|
||||
public async synthesize(speech: string): Promise<SynthesizeResult | null> {
|
||||
const audioFilePath = path.join(
|
||||
TMP_PATH,
|
||||
`${Date.now()}-${StringHelper.random(4)}.mp3`
|
||||
)
|
||||
|
||||
try {
|
||||
if (this.client) {
|
||||
const result = await this.client.send(
|
||||
new SynthesizeSpeechCommand({
|
||||
OutputFormat: 'mp3',
|
||||
VoiceId: VOICES[this.lang].VoiceId,
|
||||
Text: speech
|
||||
})
|
||||
)
|
||||
// Cast to Node.js stream as the SDK returns a custom type that does not have a pipe method
|
||||
const AudioStream = result.AudioStream as Stream
|
||||
|
||||
if (!AudioStream) {
|
||||
LogHelper.error(`${this.name} - AudioStream is undefined`)
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
const wStream = fs.createWriteStream(audioFilePath)
|
||||
AudioStream.pipe(wStream)
|
||||
|
||||
await new Promise((resolve, reject) => {
|
||||
wStream.on('finish', resolve)
|
||||
wStream.on('error', reject)
|
||||
})
|
||||
|
||||
const duration = await this.getAudioDuration(audioFilePath)
|
||||
|
||||
TTS.em.emit('saved', duration)
|
||||
|
||||
return {
|
||||
audioFilePath,
|
||||
duration
|
||||
}
|
||||
}
|
||||
|
||||
LogHelper.error(`${this.name} - Client is not defined yet`)
|
||||
} catch (e) {
|
||||
LogHelper.error(`${this.name} - Failed to synthesize speech: ${e} `)
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
}
|
81
server/src/core/tts/synthesizers/flite-synthesizer.ts
Normal file
81
server/src/core/tts/synthesizers/flite-synthesizer.ts
Normal file
@ -0,0 +1,81 @@
|
||||
import path from 'node:path'
|
||||
import fs from 'node:fs'
|
||||
import { spawn } from 'node:child_process'
|
||||
|
||||
import type { LongLanguageCode } from '@/types'
|
||||
import type { SynthesizeResult } from '@/core/tts/types'
|
||||
import { LANG, TMP_PATH, BIN_PATH } from '@/constants'
|
||||
import { TTS } from '@/core'
|
||||
import { TTSSynthesizerBase } from '@/core/tts/tts-synthesizer-base'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { StringHelper } from '@/helpers/string-helper'
|
||||
|
||||
const FLITE_CONFIG = {
|
||||
int_f0_target_mean: 115.0, // Intonation (85-180 Hz men; 165-255 Hz women)
|
||||
f0_shift: 1.0, // Low or high
|
||||
duration_stretch: 1.0, // Speed (lower = faster)
|
||||
int_f0_target_stddev: 15.0 // Pitch variability (lower = more flat)
|
||||
}
|
||||
|
||||
export default class FliteSynthesizer extends TTSSynthesizerBase {
|
||||
protected readonly name = 'Flite TTS Synthesizer'
|
||||
protected readonly lang = LANG as LongLanguageCode
|
||||
private readonly binPath = path.join(BIN_PATH, 'flite', 'flite')
|
||||
|
||||
constructor(lang: LongLanguageCode) {
|
||||
super()
|
||||
|
||||
LogHelper.title(this.name)
|
||||
LogHelper.success('New instance')
|
||||
|
||||
this.lang = lang
|
||||
|
||||
if (this.lang !== 'en-US') {
|
||||
LogHelper.warning(
|
||||
'The Flite synthesizer only accepts the "en-US" language at the moment'
|
||||
)
|
||||
}
|
||||
|
||||
if (!fs.existsSync(this.binPath)) {
|
||||
LogHelper.error(
|
||||
`Cannot find ${this.binPath} You can set up the offline TTS by running: "npm run setup:offline-tts"`
|
||||
)
|
||||
} else {
|
||||
LogHelper.success('Synthesizer initialized')
|
||||
}
|
||||
}
|
||||
|
||||
public async synthesize(speech: string): Promise<SynthesizeResult | null> {
|
||||
const audioFilePath = path.join(
|
||||
TMP_PATH,
|
||||
`${Date.now()}-${StringHelper.random(4)}.wav`
|
||||
)
|
||||
const process = spawn(this.binPath, [
|
||||
speech,
|
||||
'--setf',
|
||||
`int_f0_target_mean=${FLITE_CONFIG.int_f0_target_mean}`,
|
||||
'--setf',
|
||||
`f0_shift=${FLITE_CONFIG.f0_shift}`,
|
||||
'--setf',
|
||||
`duration_stretch=${FLITE_CONFIG.duration_stretch}`,
|
||||
'--setf',
|
||||
`int_f0_target_stddev=${FLITE_CONFIG.int_f0_target_stddev}`,
|
||||
'-o',
|
||||
audioFilePath
|
||||
])
|
||||
|
||||
await new Promise((resolve, reject) => {
|
||||
process.stdout.on('end', resolve)
|
||||
process.stderr.on('data', reject)
|
||||
})
|
||||
|
||||
const duration = await this.getAudioDuration(audioFilePath)
|
||||
|
||||
TTS.em.emit('saved', duration)
|
||||
|
||||
return {
|
||||
audioFilePath,
|
||||
duration
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,99 @@
|
||||
import path from 'node:path'
|
||||
import fs from 'node:fs'
|
||||
|
||||
import type { TextToSpeechClient } from '@google-cloud/text-to-speech'
|
||||
import tts from '@google-cloud/text-to-speech'
|
||||
import { google } from '@google-cloud/text-to-speech/build/protos/protos'
|
||||
|
||||
import type { LongLanguageCode } from '@/types'
|
||||
import type { SynthesizeResult } from '@/core/tts/types'
|
||||
import { LANG, VOICE_CONFIG_PATH, TMP_PATH } from '@/constants'
|
||||
import { TTS } from '@/core'
|
||||
import { TTSSynthesizerBase } from '@/core/tts/tts-synthesizer-base'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { StringHelper } from '@/helpers/string-helper'
|
||||
|
||||
import SsmlVoiceGender = google.cloud.texttospeech.v1.SsmlVoiceGender
|
||||
|
||||
const VOICES = {
|
||||
'en-US': {
|
||||
languageCode: 'en-US',
|
||||
name: 'en-US-Wavenet-A',
|
||||
// name: 'en-GB-Standard-B', // Standard
|
||||
ssmlGender: SsmlVoiceGender.MALE
|
||||
},
|
||||
'fr-FR': {
|
||||
languageCode: 'fr-FR',
|
||||
name: 'fr-FR-Wavenet-B',
|
||||
ssmlGender: SsmlVoiceGender.MALE
|
||||
}
|
||||
}
|
||||
|
||||
export default class GoogleCloudTTSSynthesizer extends TTSSynthesizerBase {
|
||||
protected readonly name = 'Google Cloud TTS Synthesizer'
|
||||
protected readonly lang = LANG as LongLanguageCode
|
||||
private readonly client: TextToSpeechClient | undefined = undefined
|
||||
|
||||
constructor(lang: LongLanguageCode) {
|
||||
super()
|
||||
|
||||
LogHelper.title(this.name)
|
||||
LogHelper.success('New instance')
|
||||
|
||||
process.env['GOOGLE_APPLICATION_CREDENTIALS'] = path.join(
|
||||
VOICE_CONFIG_PATH,
|
||||
'google-cloud.json'
|
||||
)
|
||||
|
||||
try {
|
||||
this.lang = lang
|
||||
this.client = new tts.TextToSpeechClient()
|
||||
|
||||
LogHelper.success('Synthesizer initialized')
|
||||
} catch (e) {
|
||||
LogHelper.error(`${this.name}: ${e}`)
|
||||
}
|
||||
}
|
||||
|
||||
public async synthesize(speech: string): Promise<SynthesizeResult | null> {
|
||||
const audioFilePath = path.join(
|
||||
TMP_PATH,
|
||||
`${Date.now()}-${StringHelper.random(4)}.mp3`
|
||||
)
|
||||
|
||||
try {
|
||||
if (this.client) {
|
||||
const [response] = await this.client.synthesizeSpeech({
|
||||
input: {
|
||||
text: speech
|
||||
},
|
||||
voice: VOICES[this.lang],
|
||||
audioConfig: {
|
||||
audioEncoding: 'MP3'
|
||||
}
|
||||
})
|
||||
|
||||
await fs.promises.writeFile(
|
||||
audioFilePath,
|
||||
response.audioContent as Uint8Array | string,
|
||||
'binary'
|
||||
)
|
||||
|
||||
const duration = await this.getAudioDuration(audioFilePath)
|
||||
|
||||
TTS.em.emit('saved', duration)
|
||||
|
||||
return {
|
||||
audioFilePath,
|
||||
duration
|
||||
}
|
||||
}
|
||||
|
||||
LogHelper.error(`${this.name} - client is not defined yet`)
|
||||
} catch (e) {
|
||||
LogHelper.error(`${this.name} - Failed to synthesize speech: ${e} `)
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
}
|
94
server/src/core/tts/synthesizers/watson-tts-synthesizer.ts
Normal file
94
server/src/core/tts/synthesizers/watson-tts-synthesizer.ts
Normal file
@ -0,0 +1,94 @@
|
||||
import type { Stream } from 'node:stream'
|
||||
import path from 'node:path'
|
||||
import fs from 'node:fs'
|
||||
|
||||
import Tts from 'ibm-watson/text-to-speech/v1'
|
||||
import { IamAuthenticator } from 'ibm-watson/auth'
|
||||
|
||||
import type { WatsonVoiceConfigurationSchema } from '@/schemas/voice-config-schemas'
|
||||
import type { LongLanguageCode } from '@/types'
|
||||
import type { SynthesizeResult } from '@/core/tts/types'
|
||||
import { LANG, VOICE_CONFIG_PATH, TMP_PATH } from '@/constants'
|
||||
import { TTS } from '@/core'
|
||||
import { TTSSynthesizerBase } from '@/core/tts/tts-synthesizer-base'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { StringHelper } from '@/helpers/string-helper'
|
||||
|
||||
const VOICES = {
|
||||
'en-US': {
|
||||
voice: 'en-US_MichaelV3Voice'
|
||||
},
|
||||
'fr-FR': {
|
||||
voice: 'fr-FR_NicolasV3Voice'
|
||||
}
|
||||
}
|
||||
|
||||
export default class WatsonTTSSynthesizer extends TTSSynthesizerBase {
|
||||
protected readonly name = 'Watson TTS Synthesizer'
|
||||
protected readonly lang: LongLanguageCode = LANG as LongLanguageCode
|
||||
private readonly client: Tts | undefined = undefined
|
||||
|
||||
constructor(lang: LongLanguageCode) {
|
||||
super()
|
||||
|
||||
LogHelper.title(this.name)
|
||||
LogHelper.success('New instance')
|
||||
|
||||
const config: WatsonVoiceConfigurationSchema = JSON.parse(
|
||||
fs.readFileSync(path.join(VOICE_CONFIG_PATH, 'watson-stt.json'), 'utf8')
|
||||
)
|
||||
|
||||
try {
|
||||
this.lang = lang
|
||||
this.client = new Tts({
|
||||
authenticator: new IamAuthenticator({ apikey: config.apikey }),
|
||||
serviceUrl: config.url
|
||||
})
|
||||
|
||||
LogHelper.success('Synthesizer initialized')
|
||||
} catch (e) {
|
||||
LogHelper.error(`${this.name} - Failed to initialize: ${e}`)
|
||||
}
|
||||
}
|
||||
|
||||
public async synthesize(speech: string): Promise<SynthesizeResult | null> {
|
||||
const audioFilePath = path.join(
|
||||
TMP_PATH,
|
||||
`${Date.now()}-${StringHelper.random(4)}.mp3`
|
||||
)
|
||||
|
||||
try {
|
||||
if (this.client) {
|
||||
const response = await this.client.synthesize({
|
||||
voice: VOICES[this.lang].voice,
|
||||
text: speech,
|
||||
accept: 'audio/wav'
|
||||
})
|
||||
const result = response.result as Stream
|
||||
|
||||
const wStream = fs.createWriteStream(audioFilePath)
|
||||
result.pipe(wStream)
|
||||
|
||||
await new Promise((resolve, reject) => {
|
||||
wStream.on('finish', resolve)
|
||||
wStream.on('error', reject)
|
||||
})
|
||||
|
||||
const duration = await this.getAudioDuration(audioFilePath)
|
||||
|
||||
TTS.em.emit('saved', duration)
|
||||
|
||||
return {
|
||||
audioFilePath,
|
||||
duration
|
||||
}
|
||||
}
|
||||
|
||||
LogHelper.error(`${this.name} - Client is not defined yet`)
|
||||
} catch (e) {
|
||||
LogHelper.error(`${this.name} - Failed to synthesize speech: ${e} `)
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
}
|
42
server/src/core/tts/tts-synthesizer-base.ts
Normal file
42
server/src/core/tts/tts-synthesizer-base.ts
Normal file
@ -0,0 +1,42 @@
|
||||
import ffmpeg from 'fluent-ffmpeg'
|
||||
import { path as ffmpegPath } from '@ffmpeg-installer/ffmpeg'
|
||||
import { path as ffprobePath } from '@ffprobe-installer/ffprobe'
|
||||
|
||||
import type { LongLanguageCode } from '@/types'
|
||||
import type { SynthesizeResult } from '@/core/tts/types'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
|
||||
export abstract class TTSSynthesizerBase {
|
||||
protected abstract name: string
|
||||
protected abstract lang: LongLanguageCode
|
||||
|
||||
protected abstract synthesize(
|
||||
speech: string
|
||||
): Promise<SynthesizeResult | null>
|
||||
|
||||
protected async getAudioDuration(audioFilePath: string): Promise<number> {
|
||||
ffmpeg.setFfmpegPath(ffmpegPath)
|
||||
ffmpeg.setFfprobePath(ffprobePath)
|
||||
|
||||
// Use ffprobe to get the duration of the audio file and return the duration in milliseconds
|
||||
return new Promise((resolve, reject) => {
|
||||
ffmpeg.ffprobe(audioFilePath, (err, data) => {
|
||||
if (err) {
|
||||
LogHelper.error(`${this.name} - Failed to get audio duration: ${err}`)
|
||||
|
||||
return reject(0)
|
||||
}
|
||||
|
||||
const { duration } = data.format
|
||||
|
||||
if (!duration) {
|
||||
LogHelper.error(`${this.name} - Audio duration is undefined`)
|
||||
|
||||
return reject(0)
|
||||
}
|
||||
|
||||
return resolve(duration * 1_000)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
176
server/src/core/tts/tts.ts
Normal file
176
server/src/core/tts/tts.ts
Normal file
@ -0,0 +1,176 @@
|
||||
import path from 'node:path'
|
||||
import events from 'node:events'
|
||||
import fs from 'node:fs'
|
||||
|
||||
import type { ShortLanguageCode } from '@/types'
|
||||
import type { TTSSynthesizer } from '@/core/tts/types'
|
||||
import { SOCKET_SERVER } from '@/core'
|
||||
import { TTS_PROVIDER, VOICE_CONFIG_PATH } from '@/constants'
|
||||
import { TTSSynthesizers, TTSProviders } from '@/core/tts/types'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { LangHelper } from '@/helpers/lang-helper'
|
||||
|
||||
interface Speech {
|
||||
text: string
|
||||
isFinalAnswer: boolean
|
||||
}
|
||||
|
||||
const PROVIDERS_MAP = {
|
||||
[TTSProviders.GoogleCloudTTS]: TTSSynthesizers.GoogleCloudTTS,
|
||||
[TTSProviders.WatsonTTS]: TTSSynthesizers.WatsonTTS,
|
||||
[TTSProviders.AmazonPolly]: TTSSynthesizers.AmazonPolly,
|
||||
[TTSProviders.Flite]: TTSSynthesizers.Flite
|
||||
}
|
||||
|
||||
export default class TTS {
|
||||
private static instance: TTS
|
||||
|
||||
private synthesizer: TTSSynthesizer = undefined
|
||||
private speeches: Speech[] = []
|
||||
|
||||
public lang: ShortLanguageCode = 'en'
|
||||
public em = new events.EventEmitter()
|
||||
|
||||
constructor() {
|
||||
if (!TTS.instance) {
|
||||
LogHelper.title('TTS')
|
||||
LogHelper.success('New instance')
|
||||
|
||||
TTS.instance = this
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the TTS provider
|
||||
*/
|
||||
public async init(newLang: ShortLanguageCode): Promise<boolean> {
|
||||
LogHelper.title('TTS')
|
||||
LogHelper.info('Initializing TTS...')
|
||||
|
||||
this.lang = newLang || this.lang
|
||||
|
||||
if (!Object.values(TTSProviders).includes(TTS_PROVIDER as TTSProviders)) {
|
||||
LogHelper.error(
|
||||
`The TTS provider "${TTS_PROVIDER}" does not exist or is not yet supported`
|
||||
)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
if (
|
||||
TTS_PROVIDER === TTSProviders.GoogleCloudTTS &&
|
||||
typeof process.env['GOOGLE_APPLICATION_CREDENTIALS'] === 'undefined'
|
||||
) {
|
||||
process.env['GOOGLE_APPLICATION_CREDENTIALS'] = path.join(
|
||||
VOICE_CONFIG_PATH,
|
||||
'google-cloud.json'
|
||||
)
|
||||
} else if (
|
||||
typeof process.env['GOOGLE_APPLICATION_CREDENTIALS'] !== 'undefined' &&
|
||||
process.env['GOOGLE_APPLICATION_CREDENTIALS'].indexOf(
|
||||
'google-cloud.json'
|
||||
) === -1
|
||||
) {
|
||||
LogHelper.warning(
|
||||
`The "GOOGLE_APPLICATION_CREDENTIALS" env variable is already settled with the following value: "${process.env['GOOGLE_APPLICATION_CREDENTIALS']}"`
|
||||
)
|
||||
}
|
||||
|
||||
// Dynamically attribute the synthesizer
|
||||
const { default: synthesizer } = await import(
|
||||
path.join(
|
||||
__dirname,
|
||||
'synthesizers',
|
||||
PROVIDERS_MAP[TTS_PROVIDER as TTSProviders]
|
||||
)
|
||||
)
|
||||
this.synthesizer = new synthesizer(
|
||||
LangHelper.getLongCode(this.lang)
|
||||
) as TTSSynthesizer
|
||||
|
||||
this.onSaved()
|
||||
|
||||
LogHelper.title('TTS')
|
||||
LogHelper.success('TTS initialized')
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Forward buffer audio file and duration to the client
|
||||
* and delete audio file once it has been forwarded
|
||||
*/
|
||||
private async forward(speech: Speech): Promise<void> {
|
||||
if (this.synthesizer) {
|
||||
const result = await this.synthesizer.synthesize(speech.text)
|
||||
|
||||
if (!result) {
|
||||
LogHelper.error(
|
||||
'The TTS synthesizer failed to synthesize the speech as the result is null'
|
||||
)
|
||||
} else {
|
||||
const { audioFilePath, duration } = result
|
||||
const bitmap = await fs.promises.readFile(audioFilePath)
|
||||
|
||||
SOCKET_SERVER.socket?.emit(
|
||||
'audio-forwarded',
|
||||
{
|
||||
buffer: Buffer.from(bitmap),
|
||||
is_final_answer: speech.isFinalAnswer,
|
||||
duration
|
||||
},
|
||||
(confirmation: string) => {
|
||||
if (confirmation === 'audio-received') {
|
||||
fs.unlinkSync(audioFilePath)
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
} else {
|
||||
LogHelper.error('The TTS synthesizer is not initialized yet')
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* When the synthesizer saved a new audio file
|
||||
* then shift the queue according to the audio file duration
|
||||
*/
|
||||
private onSaved(): void {
|
||||
this.em.on('saved', (duration) => {
|
||||
setTimeout(async () => {
|
||||
this.speeches.shift()
|
||||
|
||||
if (this.speeches[0]) {
|
||||
await this.forward(this.speeches[0])
|
||||
}
|
||||
}, duration)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Add speeches to the queue
|
||||
*/
|
||||
public async add(
|
||||
text: Speech['text'],
|
||||
isFinalAnswer: Speech['isFinalAnswer']
|
||||
): Promise<Speech[]> {
|
||||
/**
|
||||
* Flite fix. When the string is only one word,
|
||||
* Flite cannot save to a file. So we add a space at the end of the string
|
||||
*/
|
||||
if (TTS_PROVIDER === TTSProviders.Flite && text.indexOf(' ') === -1) {
|
||||
text += ' '
|
||||
}
|
||||
|
||||
const speech = { text, isFinalAnswer }
|
||||
|
||||
if (this.speeches.length > 0) {
|
||||
this.speeches.push(speech)
|
||||
} else {
|
||||
this.speeches.push(speech)
|
||||
await this.forward(speech)
|
||||
}
|
||||
|
||||
return this.speeches
|
||||
}
|
||||
}
|
30
server/src/core/tts/types.ts
Normal file
30
server/src/core/tts/types.ts
Normal file
@ -0,0 +1,30 @@
|
||||
import type AmazonPollySynthesizer from '@/core/tts/synthesizers/amazon-polly-synthesizer'
|
||||
import type FliteSynthesizer from '@/core/tts/synthesizers/flite-synthesizer'
|
||||
import type GoogleCloudTTSSynthesizer from '@/core/tts/synthesizers/google-cloud-tts-synthesizer'
|
||||
import type WatsonTTSSynthesizer from '@/core/tts/synthesizers/watson-tts-synthesizer'
|
||||
|
||||
export enum TTSProviders {
|
||||
AmazonPolly = 'amazon-polly',
|
||||
GoogleCloudTTS = 'google-cloud-tts',
|
||||
WatsonTTS = 'watson-tts',
|
||||
Flite = 'flite'
|
||||
}
|
||||
|
||||
export enum TTSSynthesizers {
|
||||
AmazonPolly = 'amazon-polly-synthesizer',
|
||||
GoogleCloudTTS = 'google-cloud-tts-synthesizer',
|
||||
WatsonTTS = 'watson-tts-synthesizer',
|
||||
Flite = 'flite-synthesizer'
|
||||
}
|
||||
|
||||
export interface SynthesizeResult {
|
||||
audioFilePath: string
|
||||
duration: number
|
||||
}
|
||||
|
||||
export type TTSSynthesizer =
|
||||
| AmazonPollySynthesizer
|
||||
| FliteSynthesizer
|
||||
| GoogleCloudTTSSynthesizer
|
||||
| WatsonTTSSynthesizer
|
||||
| undefined
|
32
server/src/declarations.d.ts
vendored
Normal file
32
server/src/declarations.d.ts
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
/* eslint-disable @typescript-eslint/ban-types */
|
||||
|
||||
declare module '@ffprobe-installer/ffprobe' {
|
||||
export const path: string
|
||||
}
|
||||
|
||||
/**
|
||||
* NLP.js type definitions
|
||||
* @see https://github.com/axa-group/nlp.js/tree/master/packages
|
||||
*/
|
||||
interface BuiltinMicrosoft<T> {
|
||||
new (settings: unknown, container: unknown): T
|
||||
}
|
||||
interface Nlp<T> {
|
||||
new (settings: unknown, container: unknown): T
|
||||
}
|
||||
interface LangAll {
|
||||
register(container: unknown)
|
||||
}
|
||||
|
||||
declare module '@nlpjs/core-loader' {
|
||||
export const containerBootstrap: Function
|
||||
}
|
||||
declare module '@nlpjs/nlp' {
|
||||
export const Nlp: Nlp
|
||||
}
|
||||
declare module '@nlpjs/builtin-microsoft' {
|
||||
export const BuiltinMicrosoft: BuiltinMicrosoft
|
||||
}
|
||||
declare module '@nlpjs/lang-all' {
|
||||
export const LangAll: LangAll
|
||||
}
|
@ -1,15 +1,5 @@
|
||||
import { langs } from '@@/core/langs.json'
|
||||
|
||||
/**
|
||||
* ISO 639-1 (Language codes) - ISO 3166-1 (Country Codes)
|
||||
* @see https://www.iso.org/iso-639-language-codes.html
|
||||
* @see https://www.iso.org/iso-3166-country-codes.html
|
||||
*/
|
||||
|
||||
type Languages = typeof langs
|
||||
export type LongLanguageCode = keyof Languages
|
||||
type Language = Languages[LongLanguageCode]
|
||||
export type ShortLanguageCode = Language['short']
|
||||
import type { LongLanguageCode, ShortLanguageCode } from '@/types'
|
||||
|
||||
export class LangHelper {
|
||||
/**
|
||||
@ -27,9 +17,7 @@ export class LangHelper {
|
||||
* @param shortCode The short language code of the language
|
||||
* @example getLongCode('en') // en-US
|
||||
*/
|
||||
public static getLongCode(
|
||||
shortCode: ShortLanguageCode
|
||||
): LongLanguageCode | null {
|
||||
public static getLongCode(shortCode: ShortLanguageCode): LongLanguageCode {
|
||||
for (const longLanguage in langs) {
|
||||
const longLanguageType = longLanguage as LongLanguageCode
|
||||
const lang = langs[longLanguageType]
|
||||
@ -39,7 +27,7 @@ export class LangHelper {
|
||||
}
|
||||
}
|
||||
|
||||
return null
|
||||
return 'en-US'
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1,7 +1,6 @@
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
|
||||
import { IS_TESTING_ENV } from '@/constants'
|
||||
import { DateHelper } from '@/helpers/date-helper'
|
||||
|
||||
export class LogHelper {
|
||||
@ -48,12 +47,10 @@ export class LogHelper {
|
||||
public static error(value: string): void {
|
||||
const data = `${DateHelper.getDateTime()} - ${value}`
|
||||
|
||||
if (!IS_TESTING_ENV) {
|
||||
if (fs.existsSync(LogHelper.ERRORS_PATH)) {
|
||||
fs.appendFileSync(LogHelper.ERRORS_PATH, `\n${data}`)
|
||||
} else {
|
||||
fs.writeFileSync(LogHelper.ERRORS_PATH, data, { flag: 'wx' })
|
||||
}
|
||||
if (fs.existsSync(LogHelper.ERRORS_PATH)) {
|
||||
fs.appendFileSync(LogHelper.ERRORS_PATH, `\n${data}`)
|
||||
} else {
|
||||
fs.writeFileSync(LogHelper.ERRORS_PATH, data, { flag: 'wx' })
|
||||
}
|
||||
|
||||
console.error('\x1b[31m🚨 %s\x1b[0m', value)
|
||||
|
@ -1,13 +1,13 @@
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
|
||||
import type { ShortLanguageCode } from '@/helpers/lang-helper'
|
||||
import type { GlobalEntity } from '@/schemas/global-data-schemas'
|
||||
import type { ShortLanguageCode } from '@/types'
|
||||
import type { GlobalEntitySchema } from '@/schemas/global-data-schemas'
|
||||
import type {
|
||||
Domain,
|
||||
Skill,
|
||||
SkillConfig,
|
||||
SkillBridge
|
||||
DomainSchema,
|
||||
SkillSchema,
|
||||
SkillConfigSchema,
|
||||
SkillBridgeSchema
|
||||
} from '@/schemas/skill-schemas'
|
||||
|
||||
interface SkillDomain {
|
||||
@ -17,13 +17,14 @@ interface SkillDomain {
|
||||
[key: string]: {
|
||||
name: string
|
||||
path: string
|
||||
bridge: SkillBridge
|
||||
bridge: SkillBridgeSchema
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
interface SkillConfigWithGlobalEntities extends Omit<SkillConfig, 'entities'> {
|
||||
entities: Record<string, GlobalEntity>
|
||||
interface SkillConfigWithGlobalEntities
|
||||
extends Omit<SkillConfigSchema, 'entities'> {
|
||||
entities: Record<string, GlobalEntitySchema>
|
||||
}
|
||||
|
||||
const DOMAINS_DIR = path.join(process.cwd(), 'skills')
|
||||
@ -36,24 +37,29 @@ export class SkillDomainHelper {
|
||||
const skillDomains = new Map<string, SkillDomain>()
|
||||
|
||||
await Promise.all(
|
||||
fs.readdirSync(DOMAINS_DIR).map(async (entity) => {
|
||||
(
|
||||
await fs.promises.readdir(DOMAINS_DIR)
|
||||
).map(async (entity) => {
|
||||
const domainPath = path.join(DOMAINS_DIR, entity)
|
||||
|
||||
if (fs.statSync(domainPath).isDirectory()) {
|
||||
if ((await fs.promises.stat(domainPath)).isDirectory()) {
|
||||
const skills: SkillDomain['skills'] = {}
|
||||
const { name: domainName } = (await import(
|
||||
path.join(domainPath, 'domain.json')
|
||||
)) as Domain
|
||||
const skillFolders = fs.readdirSync(domainPath)
|
||||
)) as DomainSchema
|
||||
const skillFolders = await fs.promises.readdir(domainPath)
|
||||
|
||||
for (let i = 0; i < skillFolders.length; i += 1) {
|
||||
const skillAliasName = skillFolders[i] as string
|
||||
const skillPath = path.join(domainPath, skillAliasName)
|
||||
|
||||
if (fs.statSync(skillPath).isDirectory()) {
|
||||
if ((await fs.promises.stat(skillPath)).isDirectory()) {
|
||||
const { name: skillName, bridge: skillBridge } = JSON.parse(
|
||||
fs.readFileSync(path.join(skillPath, 'skill.json'), 'utf8')
|
||||
) as Skill
|
||||
await fs.promises.readFile(
|
||||
path.join(skillPath, 'skill.json'),
|
||||
'utf8'
|
||||
)
|
||||
) as SkillSchema
|
||||
|
||||
skills[skillName] = {
|
||||
name: skillAliasName,
|
||||
@ -82,9 +88,14 @@ export class SkillDomainHelper {
|
||||
* Get information of a specific domain
|
||||
* @param domain Domain to get info from
|
||||
*/
|
||||
public static getSkillDomainInfo(domain: SkillDomain['name']): Domain {
|
||||
public static async getSkillDomainInfo(
|
||||
domain: SkillDomain['name']
|
||||
): Promise<DomainSchema> {
|
||||
return JSON.parse(
|
||||
fs.readFileSync(path.join(DOMAINS_DIR, domain, 'domain.json'), 'utf8')
|
||||
await fs.promises.readFile(
|
||||
path.join(DOMAINS_DIR, domain, 'domain.json'),
|
||||
'utf8'
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
@ -93,12 +104,12 @@ export class SkillDomainHelper {
|
||||
* @param domain Domain where the skill belongs
|
||||
* @param skill Skill to get info from
|
||||
*/
|
||||
public static getSkillInfo(
|
||||
public static async getSkillInfo(
|
||||
domain: SkillDomain['name'],
|
||||
skill: Skill['name']
|
||||
): Skill {
|
||||
skill: SkillSchema['name']
|
||||
): Promise<SkillSchema> {
|
||||
return JSON.parse(
|
||||
fs.readFileSync(
|
||||
await fs.promises.readFile(
|
||||
path.join(DOMAINS_DIR, domain, skill, 'skill.json'),
|
||||
'utf8'
|
||||
)
|
||||
@ -110,14 +121,14 @@ export class SkillDomainHelper {
|
||||
* @param configFilePath Path of the skill config file
|
||||
* @param lang Language short code
|
||||
*/
|
||||
public static getSkillConfig(
|
||||
public static async getSkillConfig(
|
||||
configFilePath: string,
|
||||
lang: ShortLanguageCode
|
||||
): SkillConfigWithGlobalEntities {
|
||||
): Promise<SkillConfigWithGlobalEntities> {
|
||||
const sharedDataPath = path.join(process.cwd(), 'core', 'data', lang)
|
||||
const configData = JSON.parse(
|
||||
fs.readFileSync(configFilePath, 'utf8')
|
||||
) as SkillConfig
|
||||
await fs.promises.readFile(configFilePath, 'utf8')
|
||||
) as SkillConfigSchema
|
||||
const result: SkillConfigWithGlobalEntities = {
|
||||
...configData,
|
||||
entities: {}
|
||||
@ -128,19 +139,23 @@ export class SkillDomainHelper {
|
||||
if (entities) {
|
||||
const entitiesKeys = Object.keys(entities)
|
||||
|
||||
entitiesKeys.forEach((entity) => {
|
||||
if (typeof entities[entity] === 'string') {
|
||||
const entityFilePath = path.join(
|
||||
sharedDataPath,
|
||||
entities[entity] as string
|
||||
)
|
||||
const entityRawData = fs.readFileSync(entityFilePath, {
|
||||
encoding: 'utf8'
|
||||
})
|
||||
await Promise.all(
|
||||
entitiesKeys.map(async (entity) => {
|
||||
if (typeof entities[entity] === 'string') {
|
||||
const entityFilePath = path.join(
|
||||
sharedDataPath,
|
||||
entities[entity] as string
|
||||
)
|
||||
const entityRawData = await fs.promises.readFile(entityFilePath, {
|
||||
encoding: 'utf8'
|
||||
})
|
||||
|
||||
result.entities[entity] = JSON.parse(entityRawData) as GlobalEntity
|
||||
}
|
||||
})
|
||||
result.entities[entity] = JSON.parse(
|
||||
entityRawData
|
||||
) as GlobalEntitySchema
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
configData.entities = entities
|
||||
}
|
||||
|
@ -1,15 +1,7 @@
|
||||
import os from 'node:os'
|
||||
|
||||
export enum OSTypes {
|
||||
Windows = 'windows',
|
||||
MacOS = 'macos',
|
||||
Linux = 'linux',
|
||||
Unknown = 'unknown'
|
||||
}
|
||||
export enum CPUArchitectures {
|
||||
X64 = 'x64',
|
||||
ARM64 = 'arm64'
|
||||
}
|
||||
import { OSTypes, CPUArchitectures } from '@/types'
|
||||
|
||||
enum OSNames {
|
||||
Windows = 'Windows',
|
||||
MacOS = 'macOS',
|
||||
@ -39,7 +31,7 @@ type PartialInformation = {
|
||||
}
|
||||
}
|
||||
|
||||
export class OSHelper {
|
||||
export class SystemHelper {
|
||||
/**
|
||||
* Get information about your OS
|
||||
* N.B. Node.js returns info based on the compiled binary we are running on. Not based our machine hardware
|
@ -5,9 +5,8 @@ import {
|
||||
LANG as LEON_LANG,
|
||||
TCP_SERVER_BIN_PATH
|
||||
} from '@/constants'
|
||||
import { TCP_CLIENT } from '@/core'
|
||||
import { TCP_CLIENT, HTTP_SERVER, SOCKET_SERVER } from '@/core'
|
||||
import { LangHelper } from '@/helpers/lang-helper'
|
||||
import server from '@/core/http-server/server'
|
||||
;(async (): Promise<void> => {
|
||||
process.title = 'leon'
|
||||
|
||||
@ -23,6 +22,13 @@ import server from '@/core/http-server/server'
|
||||
// Connect the TCP client to the TCP server
|
||||
TCP_CLIENT.connect()
|
||||
|
||||
// Start the core server
|
||||
await server.init()
|
||||
// Start the HTTP server
|
||||
await HTTP_SERVER.init()
|
||||
|
||||
// TODO
|
||||
// Register HTTP API endpoints
|
||||
// await HTTP_API.register()
|
||||
|
||||
// Start the socket server
|
||||
SOCKET_SERVER.init()
|
||||
})()
|
||||
|
@ -8,23 +8,23 @@ import {
|
||||
amazonVoiceConfiguration,
|
||||
googleCloudVoiceConfiguration,
|
||||
watsonVoiceConfiguration,
|
||||
VoiceConfiguration
|
||||
VoiceConfigurationSchema
|
||||
} from '@/schemas/voice-config-schemas'
|
||||
import {
|
||||
globalAnswersSchemaObject,
|
||||
globalEntitySchemaObject,
|
||||
globalResolverSchemaObject,
|
||||
GlobalEntity,
|
||||
GlobalResolver,
|
||||
GlobalAnswers
|
||||
GlobalEntitySchema,
|
||||
GlobalResolverSchema,
|
||||
GlobalAnswersSchema
|
||||
} from '@/schemas/global-data-schemas'
|
||||
import {
|
||||
domainSchemaObject,
|
||||
skillSchemaObject,
|
||||
skillConfigSchemaObject,
|
||||
Domain,
|
||||
Skill,
|
||||
SkillConfig
|
||||
DomainSchema,
|
||||
SkillSchema,
|
||||
SkillConfigSchema
|
||||
} from '@/schemas/skill-schemas'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { LangHelper } from '@/helpers/lang-helper'
|
||||
@ -39,12 +39,12 @@ interface ObjectUnknown {
|
||||
const validateSchema = (
|
||||
schema: ObjectUnknown,
|
||||
contentToValidate: ObjectUnknown,
|
||||
customErrorMesage: string
|
||||
customErrorMessage: string
|
||||
): void => {
|
||||
const validate = ajv.compile(schema)
|
||||
const isValid = validate(contentToValidate)
|
||||
if (!isValid) {
|
||||
LogHelper.error(customErrorMesage)
|
||||
LogHelper.error(customErrorMessage)
|
||||
const errors = new AggregateAjvError(validate.errors ?? [])
|
||||
for (const error of errors) {
|
||||
LogHelper.error(error.message)
|
||||
@ -83,7 +83,7 @@ const GLOBAL_DATA_SCHEMAS = {
|
||||
|
||||
for (const file of voiceConfigFiles) {
|
||||
const voiceConfigPath = path.join(VOICE_CONFIG_PATH, file)
|
||||
const config: VoiceConfiguration = JSON.parse(
|
||||
const config: VoiceConfigurationSchema = JSON.parse(
|
||||
await fs.promises.readFile(voiceConfigPath, 'utf8')
|
||||
)
|
||||
const [configName] = file.split('.') as [keyof typeof VOICE_CONFIG_SCHEMAS]
|
||||
@ -112,7 +112,7 @@ const GLOBAL_DATA_SCHEMAS = {
|
||||
|
||||
for (const file of globalEntityFiles) {
|
||||
const globalEntityPath = path.join(globalEntitiesPath, file)
|
||||
const globalEntity: GlobalEntity = JSON.parse(
|
||||
const globalEntity: GlobalEntitySchema = JSON.parse(
|
||||
await fs.promises.readFile(globalEntityPath, 'utf8')
|
||||
)
|
||||
validateSchema(
|
||||
@ -132,7 +132,7 @@ const GLOBAL_DATA_SCHEMAS = {
|
||||
|
||||
for (const file of globalResolverFiles) {
|
||||
const globalResolverPath = path.join(globalResolversPath, file)
|
||||
const globalResolver: GlobalResolver = JSON.parse(
|
||||
const globalResolver: GlobalResolverSchema = JSON.parse(
|
||||
await fs.promises.readFile(globalResolverPath, 'utf8')
|
||||
)
|
||||
validateSchema(
|
||||
@ -146,11 +146,8 @@ const GLOBAL_DATA_SCHEMAS = {
|
||||
* Global answers checking
|
||||
*/
|
||||
const globalAnswersPath = path.join(GLOBAL_DATA_PATH, lang, 'answers.json')
|
||||
const answers: GlobalAnswers = JSON.parse(
|
||||
await fs.promises.readFile(
|
||||
globalAnswersPath,
|
||||
'utf8'
|
||||
)
|
||||
const answers: GlobalAnswersSchema = JSON.parse(
|
||||
await fs.promises.readFile(globalAnswersPath, 'utf8')
|
||||
)
|
||||
validateSchema(
|
||||
GLOBAL_DATA_SCHEMAS.answers,
|
||||
@ -172,7 +169,7 @@ const GLOBAL_DATA_SCHEMAS = {
|
||||
* Domain checking
|
||||
*/
|
||||
const pathToDomain = path.join(currentDomain.path, 'domain.json')
|
||||
const domainObject: Domain = JSON.parse(
|
||||
const domainObject: DomainSchema = JSON.parse(
|
||||
await fs.promises.readFile(pathToDomain, 'utf8')
|
||||
)
|
||||
validateSchema(
|
||||
@ -191,7 +188,7 @@ const GLOBAL_DATA_SCHEMAS = {
|
||||
*/
|
||||
if (currentSkill) {
|
||||
const pathToSkill = path.join(currentSkill.path, 'skill.json')
|
||||
const skillObject: Skill = JSON.parse(
|
||||
const skillObject: SkillSchema = JSON.parse(
|
||||
await fs.promises.readFile(pathToSkill, 'utf8')
|
||||
)
|
||||
validateSchema(
|
||||
@ -210,7 +207,7 @@ const GLOBAL_DATA_SCHEMAS = {
|
||||
|
||||
for (const file of skillConfigFiles) {
|
||||
const skillConfigPath = path.join(pathToSkillConfig, file)
|
||||
const skillConfig: SkillConfig = JSON.parse(
|
||||
const skillConfig: SkillConfigSchema = JSON.parse(
|
||||
await fs.promises.readFile(skillConfigPath, 'utf8')
|
||||
)
|
||||
validateSchema(
|
||||
|
@ -53,6 +53,6 @@ export const globalAnswersSchemaObject = Type.Strict(
|
||||
)
|
||||
)
|
||||
|
||||
export type GlobalEntity = Static<typeof globalEntitySchemaObject>
|
||||
export type GlobalResolver = Static<typeof globalResolverSchemaObject>
|
||||
export type GlobalAnswers = Static<typeof globalAnswersSchemaObject>
|
||||
export type GlobalEntitySchema = Static<typeof globalEntitySchemaObject>
|
||||
export type GlobalResolverSchema = Static<typeof globalResolverSchemaObject>
|
||||
export type GlobalAnswersSchema = Static<typeof globalAnswersSchemaObject>
|
||||
|
@ -10,69 +10,66 @@ const skillDataTypes = [
|
||||
Type.Literal('global_resolver'),
|
||||
Type.Literal('entity')
|
||||
]
|
||||
const skillCustomEntityTypes = [
|
||||
Type.Array(
|
||||
Type.Object(
|
||||
{
|
||||
type: Type.Literal('trim'),
|
||||
name: Type.String({ minLength: 1 }),
|
||||
conditions: Type.Array(
|
||||
Type.Object(
|
||||
{
|
||||
type: Type.Union([
|
||||
Type.Literal('between'),
|
||||
Type.Literal('after'),
|
||||
Type.Literal('after_first'),
|
||||
Type.Literal('after_last'),
|
||||
Type.Literal('before'),
|
||||
Type.Literal('before_first'),
|
||||
Type.Literal('before_last')
|
||||
]),
|
||||
from: Type.Optional(
|
||||
Type.Union([
|
||||
Type.Array(Type.String({ minLength: 1 })),
|
||||
Type.String({ minLength: 1 })
|
||||
])
|
||||
),
|
||||
to: Type.Optional(
|
||||
Type.Union([
|
||||
Type.Array(Type.String({ minLength: 1 })),
|
||||
Type.String({ minLength: 1 })
|
||||
])
|
||||
)
|
||||
},
|
||||
{ additionalProperties: false }
|
||||
const skillCustomEnumEntityType = Type.Object(
|
||||
{
|
||||
type: Type.Literal('enum'),
|
||||
name: Type.String(),
|
||||
options: Type.Record(
|
||||
Type.String({ minLength: 1 }),
|
||||
Type.Object({
|
||||
synonyms: Type.Array(Type.String({ minLength: 1 }))
|
||||
})
|
||||
)
|
||||
},
|
||||
{ additionalProperties: false }
|
||||
)
|
||||
const skillCustomRegexEntityType = Type.Object(
|
||||
{
|
||||
type: Type.Literal('regex'),
|
||||
name: Type.String({ minLength: 1 }),
|
||||
regex: Type.String({ minLength: 1 })
|
||||
},
|
||||
{ additionalProperties: false }
|
||||
)
|
||||
const skillCustomTrimEntityType = Type.Object(
|
||||
{
|
||||
type: Type.Literal('trim'),
|
||||
name: Type.String({ minLength: 1 }),
|
||||
conditions: Type.Array(
|
||||
Type.Object(
|
||||
{
|
||||
type: Type.Union([
|
||||
Type.Literal('between'),
|
||||
Type.Literal('after'),
|
||||
Type.Literal('after_first'),
|
||||
Type.Literal('after_last'),
|
||||
Type.Literal('before'),
|
||||
Type.Literal('before_first'),
|
||||
Type.Literal('before_last')
|
||||
]),
|
||||
from: Type.Optional(
|
||||
Type.Union([
|
||||
Type.Array(Type.String({ minLength: 1 })),
|
||||
Type.String({ minLength: 1 })
|
||||
])
|
||||
),
|
||||
to: Type.Optional(
|
||||
Type.Union([
|
||||
Type.Array(Type.String({ minLength: 1 })),
|
||||
Type.String({ minLength: 1 })
|
||||
])
|
||||
)
|
||||
)
|
||||
},
|
||||
{ additionalProperties: false }
|
||||
},
|
||||
{ additionalProperties: false }
|
||||
)
|
||||
)
|
||||
),
|
||||
Type.Array(
|
||||
Type.Object(
|
||||
{
|
||||
type: Type.Literal('regex'),
|
||||
name: Type.String({ minLength: 1 }),
|
||||
regex: Type.String({ minLength: 1 })
|
||||
},
|
||||
{ additionalProperties: false }
|
||||
)
|
||||
),
|
||||
Type.Array(
|
||||
Type.Object(
|
||||
{
|
||||
type: Type.Literal('enum'),
|
||||
name: Type.String(),
|
||||
options: Type.Record(
|
||||
Type.String({ minLength: 1 }),
|
||||
Type.Object({
|
||||
synonyms: Type.Array(Type.String({ minLength: 1 }))
|
||||
})
|
||||
)
|
||||
},
|
||||
{ additionalProperties: false }
|
||||
)
|
||||
)
|
||||
},
|
||||
{ additionalProperties: false }
|
||||
)
|
||||
const skillCustomEntityTypes = [
|
||||
Type.Array(skillCustomTrimEntityType),
|
||||
Type.Array(skillCustomRegexEntityType),
|
||||
Type.Array(skillCustomEnumEntityType)
|
||||
]
|
||||
|
||||
export const domainSchemaObject = Type.Strict(
|
||||
@ -191,7 +188,16 @@ export const skillConfigSchemaObject = Type.Strict(
|
||||
)
|
||||
)
|
||||
|
||||
export type Domain = Static<typeof domainSchemaObject>
|
||||
export type Skill = Static<typeof skillSchemaObject>
|
||||
export type SkillConfig = Static<typeof skillConfigSchemaObject>
|
||||
export type SkillBridge = Static<typeof skillSchemaObject.bridge>
|
||||
export type DomainSchema = Static<typeof domainSchemaObject>
|
||||
export type SkillSchema = Static<typeof skillSchemaObject>
|
||||
export type SkillConfigSchema = Static<typeof skillConfigSchemaObject>
|
||||
export type SkillBridgeSchema = Static<typeof skillSchemaObject.bridge>
|
||||
export type SkillCustomTrimEntityTypeSchema = Static<
|
||||
typeof skillCustomTrimEntityType
|
||||
>
|
||||
export type SkillCustomRegexEntityTypeSchema = Static<
|
||||
typeof skillCustomRegexEntityType
|
||||
>
|
||||
export type SkillCustomEnumEntityTypeSchema = Static<
|
||||
typeof skillCustomEnumEntityType
|
||||
>
|
||||
|
@ -40,12 +40,16 @@ export const watsonVoiceConfiguration = Type.Strict(
|
||||
)
|
||||
)
|
||||
|
||||
export type AmazonVoiceConfiguration = Static<typeof amazonVoiceConfiguration>
|
||||
export type GoogleCloudVoiceConfiguration = Static<
|
||||
export type AmazonVoiceConfigurationSchema = Static<
|
||||
typeof amazonVoiceConfiguration
|
||||
>
|
||||
export type GoogleCloudVoiceConfigurationSchema = Static<
|
||||
typeof googleCloudVoiceConfiguration
|
||||
>
|
||||
export type WatsonVoiceConfiguration = Static<typeof watsonVoiceConfiguration>
|
||||
export type VoiceConfiguration =
|
||||
| AmazonVoiceConfiguration
|
||||
| GoogleCloudVoiceConfiguration
|
||||
| WatsonVoiceConfiguration
|
||||
export type WatsonVoiceConfigurationSchema = Static<
|
||||
typeof watsonVoiceConfiguration
|
||||
>
|
||||
export type VoiceConfigurationSchema =
|
||||
| AmazonVoiceConfigurationSchema
|
||||
| GoogleCloudVoiceConfigurationSchema
|
||||
| WatsonVoiceConfigurationSchema
|
||||
|
@ -1,88 +0,0 @@
|
||||
import fs from 'node:fs'
|
||||
|
||||
import wav from 'node-wav'
|
||||
import { Model } from 'stt'
|
||||
|
||||
import { IS_TESTING_ENV } from '@/constants'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
|
||||
LogHelper.title('Coqui STT Parser')
|
||||
|
||||
const parser = {}
|
||||
let model = {}
|
||||
let desiredSampleRate = 16000
|
||||
|
||||
/**
|
||||
* Model and language model paths
|
||||
*/
|
||||
parser.conf = {
|
||||
model: 'bin/coqui/model.tflite',
|
||||
scorer: 'bin/coqui/huge-vocabulary.scorer'
|
||||
}
|
||||
|
||||
/**
|
||||
* Load models
|
||||
*/
|
||||
parser.init = (args) => {
|
||||
LogHelper.info(`Loading model from file ${args.model}...`)
|
||||
|
||||
if (!fs.existsSync(args.model)) {
|
||||
LogHelper.error(
|
||||
`Cannot find ${args.model}. You can setup the offline STT by running: "npm run setup:offline-stt"`
|
||||
)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
if (!fs.existsSync(args.scorer)) {
|
||||
LogHelper.error(
|
||||
`Cannot find ${args.scorer}. You can setup the offline STT by running: "npm run setup:offline-stt"`
|
||||
)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
/* istanbul ignore if */
|
||||
if (!IS_TESTING_ENV) {
|
||||
try {
|
||||
model = new Model(args.model)
|
||||
} catch (error) {
|
||||
throw Error(`model.stt: ${error}`)
|
||||
}
|
||||
desiredSampleRate = model.sampleRate()
|
||||
|
||||
try {
|
||||
model.enableExternalScorer(args.scorer)
|
||||
} catch (error) {
|
||||
throw Error(`model.enableExternalScorer: ${error}`)
|
||||
}
|
||||
}
|
||||
|
||||
LogHelper.success('Model loaded')
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse file and infer
|
||||
*/
|
||||
parser.parse = (buffer, cb) => {
|
||||
const wavDecode = wav.decode(buffer)
|
||||
|
||||
if (wavDecode.sampleRate < desiredSampleRate) {
|
||||
LogHelper.warning(
|
||||
`Original sample rate (${wavDecode.sampleRate}) is lower than ${desiredSampleRate}Hz. Up-sampling might produce erratic speech recognition`
|
||||
)
|
||||
}
|
||||
|
||||
/* istanbul ignore if */
|
||||
if (!IS_TESTING_ENV) {
|
||||
const string = model.stt(buffer)
|
||||
|
||||
cb({ string })
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
export default parser
|
@ -1,60 +0,0 @@
|
||||
import path from 'node:path'
|
||||
|
||||
import stt from '@google-cloud/speech'
|
||||
|
||||
import { LANG } from '@/constants'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
|
||||
LogHelper.title('Google Cloud STT Parser')
|
||||
|
||||
const parser = {}
|
||||
let client = {}
|
||||
|
||||
parser.conf = {
|
||||
languageCode: LANG,
|
||||
encoding: 'LINEAR16',
|
||||
sampleRateHertz: 16000
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize Google Cloud Speech-to-Text based on the credentials in the JSON file
|
||||
* the env variable "GOOGLE_APPLICATION_CREDENTIALS" provides the JSON file path
|
||||
*/
|
||||
parser.init = () => {
|
||||
process.env.GOOGLE_APPLICATION_CREDENTIALS = path.join(
|
||||
process.cwd(),
|
||||
'core/config/voice/google-cloud.json'
|
||||
)
|
||||
|
||||
try {
|
||||
client = new stt.SpeechClient()
|
||||
|
||||
LogHelper.success('Parser initialized')
|
||||
} catch (e) {
|
||||
LogHelper.error(`Google Cloud STT: ${e}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read buffer and give back a string
|
||||
*/
|
||||
parser.parse = async (buffer, cb) => {
|
||||
const audioBytes = buffer.toString('base64')
|
||||
const audio = { content: audioBytes }
|
||||
|
||||
try {
|
||||
const res = await client.recognize({
|
||||
audio,
|
||||
config: parser.conf
|
||||
})
|
||||
const string = res[0].results
|
||||
.map((data) => data.alternatives[0].transcript)
|
||||
.join('\n')
|
||||
|
||||
cb({ string })
|
||||
} catch (e) {
|
||||
LogHelper.error(`Google Cloud STT: ${e}`)
|
||||
}
|
||||
}
|
||||
|
||||
export default parser
|
@ -1,133 +0,0 @@
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
|
||||
import { IS_TESTING_ENV } from '@/constants'
|
||||
import Asr from '@/core/asr'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
|
||||
class Stt {
|
||||
constructor(socket, provider) {
|
||||
this.socket = socket
|
||||
this.provider = provider
|
||||
this.providers = ['google-cloud-stt', 'watson-stt', 'coqui-stt']
|
||||
this.parser = {}
|
||||
|
||||
LogHelper.title('STT')
|
||||
LogHelper.success('New instance')
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the STT provider
|
||||
*/
|
||||
init(cb) {
|
||||
LogHelper.info('Initializing STT...')
|
||||
|
||||
if (!this.providers.includes(this.provider)) {
|
||||
LogHelper.error(
|
||||
`The STT provider "${this.provider}" does not exist or is not yet supported`
|
||||
)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
/* istanbul ignore next */
|
||||
if (
|
||||
this.provider === 'google-cloud-stt' &&
|
||||
typeof process.env.GOOGLE_APPLICATION_CREDENTIALS === 'undefined'
|
||||
) {
|
||||
process.env.GOOGLE_APPLICATION_CREDENTIALS = path.join(
|
||||
process.cwd(),
|
||||
'core/config/voice/google-cloud.json'
|
||||
)
|
||||
} else if (
|
||||
typeof process.env.GOOGLE_APPLICATION_CREDENTIALS !== 'undefined' &&
|
||||
process.env.GOOGLE_APPLICATION_CREDENTIALS.indexOf(
|
||||
'google-cloud.json'
|
||||
) === -1
|
||||
) {
|
||||
LogHelper.warning(
|
||||
`The "GOOGLE_APPLICATION_CREDENTIALS" env variable is already settled with the following value: "${process.env.GOOGLE_APPLICATION_CREDENTIALS}"`
|
||||
)
|
||||
}
|
||||
|
||||
/* istanbul ignore if */
|
||||
if (!IS_TESTING_ENV) {
|
||||
// Dynamically attribute the parser
|
||||
this.parser = require(`${__dirname}/${this.provider}/parser`)
|
||||
this.parser.default.init(this.parser.default.conf)
|
||||
}
|
||||
|
||||
LogHelper.title('STT')
|
||||
LogHelper.success('STT initialized')
|
||||
|
||||
cb(this)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Forward string output to the client
|
||||
* and delete audio files once it has been forwarded
|
||||
*/
|
||||
forward(string) {
|
||||
this.socket.emit('recognized', string, (confirmation) => {
|
||||
/* istanbul ignore next */
|
||||
if (confirmation === 'string-received') {
|
||||
Stt.deleteAudios()
|
||||
}
|
||||
})
|
||||
|
||||
LogHelper.success(`Parsing result: ${string}`)
|
||||
}
|
||||
|
||||
/**
|
||||
* Read the speech file and parse
|
||||
*/
|
||||
parse(file) {
|
||||
LogHelper.info('Parsing WAVE file...')
|
||||
|
||||
if (!fs.existsSync(file)) {
|
||||
LogHelper.error(`The WAVE file "${file}" does not exist`)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
const buffer = fs.readFileSync(file)
|
||||
/* istanbul ignore if */
|
||||
if (!IS_TESTING_ENV) {
|
||||
this.parser.default.parse(buffer, (data) => {
|
||||
if (data.string !== '') {
|
||||
// Forward the string to the client
|
||||
this.forward(data.string)
|
||||
} else {
|
||||
Stt.deleteAudios()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete audio files
|
||||
*/
|
||||
static deleteAudios() {
|
||||
return new Promise((resolve) => {
|
||||
const audios = Object.keys(Asr.audios)
|
||||
|
||||
for (let i = 0; i < audios.length; i += 1) {
|
||||
const audio = Asr.audios[audios[i]]
|
||||
|
||||
if (fs.existsSync(audio)) {
|
||||
fs.unlinkSync(Asr.audios[audios[i]])
|
||||
}
|
||||
|
||||
if (i + 1 === audios.length) {
|
||||
resolve()
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export default Stt
|
@ -1,79 +0,0 @@
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
import { Duplex } from 'node:stream'
|
||||
|
||||
import Stt from 'ibm-watson/speech-to-text/v1'
|
||||
import { IamAuthenticator } from 'ibm-watson/auth'
|
||||
|
||||
import { LANG } from '@/constants'
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
|
||||
LogHelper.title('Watson STT Parser')
|
||||
|
||||
const parser = {}
|
||||
let client = {}
|
||||
|
||||
parser.conf = {
|
||||
contentType: 'audio/wav',
|
||||
model: `${LANG}_BroadbandModel`
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize Watson Speech-to-Text based on credentials in the JSON file
|
||||
*/
|
||||
parser.init = () => {
|
||||
const config = JSON.parse(
|
||||
fs.readFileSync(
|
||||
path.join(process.cwd(), 'core/config/voice/watson-stt.json'),
|
||||
'utf8'
|
||||
)
|
||||
)
|
||||
|
||||
try {
|
||||
client = new Stt({
|
||||
authenticator: new IamAuthenticator({ apikey: config.apikey }),
|
||||
serviceUrl: config.url
|
||||
})
|
||||
|
||||
LogHelper.success('Parser initialized')
|
||||
} catch (e) {
|
||||
LogHelper.error(`Watson STT: ${e}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read buffer and give back a string
|
||||
*/
|
||||
parser.parse = async (buffer, cb) => {
|
||||
const stream = new Duplex()
|
||||
stream.push(buffer)
|
||||
stream.push(null)
|
||||
parser.conf.audio = stream
|
||||
|
||||
client
|
||||
.recognize(parser.conf)
|
||||
.then(({ result }) => {
|
||||
const string = result.results
|
||||
.map((data) => data.alternatives[0].transcript)
|
||||
.join('\n')
|
||||
|
||||
cb({ string })
|
||||
})
|
||||
.catch((err) => {
|
||||
LogHelper.error(`Watson STT: ${err}`)
|
||||
})
|
||||
|
||||
client.recognize(parser.conf, (err, res) => {
|
||||
if (err) {
|
||||
LogHelper.error(`Watson STT: ${err}`)
|
||||
} else {
|
||||
const string = res.results
|
||||
.map((data) => data.alternatives[0].transcript)
|
||||
.join('\n')
|
||||
|
||||
cb({ string })
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
export default parser
|
@ -1,99 +0,0 @@
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
|
||||
import { Polly, SynthesizeSpeechCommand } from '@aws-sdk/client-polly'
|
||||
import Ffmpeg from 'fluent-ffmpeg'
|
||||
import { path as ffmpegPath } from '@ffmpeg-installer/ffmpeg'
|
||||
import { path as ffprobePath } from '@ffprobe-installer/ffprobe'
|
||||
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { StringHelper } from '@/helpers/string-helper'
|
||||
|
||||
LogHelper.title('Amazon Polly Synthesizer')
|
||||
|
||||
const synthesizer = {}
|
||||
const voices = {
|
||||
'en-US': {
|
||||
VoiceId: 'Matthew'
|
||||
},
|
||||
'fr-FR': {
|
||||
VoiceId: 'Mathieu'
|
||||
}
|
||||
}
|
||||
let client = {}
|
||||
|
||||
synthesizer.conf = {
|
||||
OutputFormat: 'mp3',
|
||||
VoiceId: ''
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize Amazon Polly based on credentials in the JSON file
|
||||
*/
|
||||
synthesizer.init = (lang) => {
|
||||
const config = JSON.parse(
|
||||
fs.readFileSync(
|
||||
path.join(process.cwd(), 'core/config/voice/amazon.json'),
|
||||
'utf8'
|
||||
)
|
||||
)
|
||||
synthesizer.conf.VoiceId = voices[lang].VoiceId
|
||||
|
||||
try {
|
||||
client = new Polly(config)
|
||||
|
||||
LogHelper.success('Synthesizer initialized')
|
||||
} catch (e) {
|
||||
LogHelper.error(`Amazon Polly: ${e}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save string to audio file
|
||||
*/
|
||||
synthesizer.save = (speech, em, cb) => {
|
||||
const file = `${__dirname}/../../tmp/${Date.now()}-${StringHelper.random(
|
||||
4
|
||||
)}.mp3`
|
||||
|
||||
synthesizer.conf.Text = speech
|
||||
|
||||
client
|
||||
.send(new SynthesizeSpeechCommand(synthesizer.conf))
|
||||
.then(({ AudioStream }) => {
|
||||
const wStream = fs.createWriteStream(file)
|
||||
|
||||
AudioStream.pipe(wStream)
|
||||
|
||||
wStream.on('finish', () => {
|
||||
const ffmpeg = new Ffmpeg()
|
||||
ffmpeg.setFfmpegPath(ffmpegPath)
|
||||
ffmpeg.setFfprobePath(ffprobePath)
|
||||
|
||||
// Get file duration thanks to ffprobe
|
||||
ffmpeg.input(file).ffprobe((err, data) => {
|
||||
if (err) LogHelper.error(err)
|
||||
else {
|
||||
const duration = data.streams[0].duration * 1000
|
||||
em.emit('saved', duration)
|
||||
cb(file, duration)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
wStream.on('error', (err) => {
|
||||
LogHelper.error(`Amazon Polly: ${err}`)
|
||||
})
|
||||
})
|
||||
.catch((err) => {
|
||||
if (err.code === 'UnknownEndpoint') {
|
||||
LogHelper.error(
|
||||
`Amazon Polly: the region "${err.region}" does not exist or does not support the Polly service`
|
||||
)
|
||||
} else {
|
||||
LogHelper.error(`Amazon Polly: ${err.message}`)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
export default synthesizer
|
@ -1,93 +0,0 @@
|
||||
import { spawn } from 'node:child_process'
|
||||
import fs from 'node:fs'
|
||||
|
||||
import Ffmpeg from 'fluent-ffmpeg'
|
||||
import { path as ffmpegPath } from '@ffmpeg-installer/ffmpeg'
|
||||
import { path as ffprobePath } from '@ffprobe-installer/ffprobe'
|
||||
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { StringHelper } from '@/helpers/string-helper'
|
||||
|
||||
LogHelper.title('Flite Synthesizer')
|
||||
|
||||
const synthesizer = {}
|
||||
|
||||
synthesizer.conf = {
|
||||
int_f0_target_mean: 115.0, // Intonation (85-180 Hz men; 165-255 Hz women)
|
||||
f0_shift: 1.0, // Low or high
|
||||
duration_stretch: 1.0, // Speed (lower = faster)
|
||||
int_f0_target_stddev: 15.0 // Pitch variability (lower = more flat)
|
||||
}
|
||||
|
||||
/**
|
||||
* There is nothing to initialize for this synthesizer
|
||||
*/
|
||||
synthesizer.init = (lang) => {
|
||||
const flitePath = 'bin/flite/flite'
|
||||
|
||||
/* istanbul ignore if */
|
||||
if (lang !== 'en-US') {
|
||||
LogHelper.warning(
|
||||
'The Flite synthesizer only accepts the "en-US" language for the moment'
|
||||
)
|
||||
}
|
||||
|
||||
/* istanbul ignore if */
|
||||
if (!fs.existsSync(flitePath)) {
|
||||
LogHelper.error(
|
||||
`Cannot find ${flitePath} You can setup the offline TTS by running: "npm run setup:offline-tts"`
|
||||
)
|
||||
return false
|
||||
}
|
||||
|
||||
LogHelper.success('Synthesizer initialized')
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Save string to audio file
|
||||
*/
|
||||
synthesizer.save = (speech, em, cb) => {
|
||||
const file = `${__dirname}/../../tmp/${Date.now()}-${StringHelper.random(
|
||||
4
|
||||
)}.wav`
|
||||
const process = spawn('bin/flite/flite', [
|
||||
speech,
|
||||
'--setf',
|
||||
`int_f0_target_mean=${synthesizer.conf.int_f0_target_mean}`,
|
||||
'--setf',
|
||||
`f0_shift=${synthesizer.conf.f0_shift}`,
|
||||
'--setf',
|
||||
`duration_stretch=${synthesizer.conf.duration_stretch}`,
|
||||
'--setf',
|
||||
`int_f0_target_stddev=${synthesizer.conf.int_f0_target_stddev}`,
|
||||
'-o',
|
||||
file
|
||||
])
|
||||
|
||||
/* istanbul ignore next */
|
||||
// Handle error
|
||||
process.stderr.on('data', (data) => {
|
||||
LogHelper.error(data.toString())
|
||||
})
|
||||
|
||||
process.stdout.on('end', () => {
|
||||
const ffmpeg = new Ffmpeg()
|
||||
ffmpeg.setFfmpegPath(ffmpegPath)
|
||||
ffmpeg.setFfprobePath(ffprobePath)
|
||||
|
||||
// Get file duration thanks to ffprobe
|
||||
ffmpeg.input(file).ffprobe((err, data) => {
|
||||
/* istanbul ignore if */
|
||||
if (err) LogHelper.error(err)
|
||||
else {
|
||||
const duration = data.streams[0].duration * 1000
|
||||
em.emit('saved', duration)
|
||||
cb(file, duration)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
export default synthesizer
|
@ -1,96 +0,0 @@
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
|
||||
import tts from '@google-cloud/text-to-speech'
|
||||
import Ffmpeg from 'fluent-ffmpeg'
|
||||
import { path as ffmpegPath } from '@ffmpeg-installer/ffmpeg'
|
||||
import { path as ffprobePath } from '@ffprobe-installer/ffprobe'
|
||||
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { StringHelper } from '@/helpers/string-helper'
|
||||
|
||||
LogHelper.title('Google Cloud TTS Synthesizer')
|
||||
|
||||
const synthesizer = {}
|
||||
const voices = {
|
||||
'en-US': {
|
||||
languageCode: 'en-US',
|
||||
name: 'en-US-Wavenet-A',
|
||||
// name: 'en-GB-Standard-B', // Standard
|
||||
ssmlGender: 'MALE'
|
||||
},
|
||||
'fr-FR': {
|
||||
languageCode: 'fr-FR',
|
||||
name: 'fr-FR-Wavenet-B',
|
||||
ssmlGender: 'MALE'
|
||||
}
|
||||
}
|
||||
let client = {}
|
||||
|
||||
synthesizer.conf = {
|
||||
voice: '',
|
||||
audioConfig: {
|
||||
audioEncoding: 'MP3'
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize Google Cloud Text-to-Speech based on credentials in the JSON file
|
||||
* The env variable "GOOGLE_APPLICATION_CREDENTIALS" provides the JSON file path
|
||||
*/
|
||||
synthesizer.init = (lang) => {
|
||||
process.env.GOOGLE_APPLICATION_CREDENTIALS = path.join(
|
||||
process.cwd(),
|
||||
'core/config/voice/google-cloud.json'
|
||||
)
|
||||
synthesizer.conf.voice = voices[lang]
|
||||
|
||||
try {
|
||||
client = new tts.TextToSpeechClient()
|
||||
|
||||
LogHelper.success('Synthesizer initialized')
|
||||
} catch (e) {
|
||||
LogHelper.error(`Google Cloud TTS: ${e}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save string to audio file
|
||||
*/
|
||||
synthesizer.save = (speech, em, cb) => {
|
||||
const file = `${__dirname}/../../tmp/${Date.now()}-${StringHelper.random(
|
||||
4
|
||||
)}.mp3`
|
||||
|
||||
synthesizer.conf.input = { text: speech }
|
||||
|
||||
client.synthesizeSpeech(synthesizer.conf, (err, res) => {
|
||||
if (err) {
|
||||
LogHelper.error(`Google Cloud TTS: ${err}`)
|
||||
return
|
||||
}
|
||||
|
||||
fs.writeFile(file, res.audioContent, 'binary', (err) => {
|
||||
if (err) {
|
||||
LogHelper.error(`Google Cloud TTS: ${err}`)
|
||||
return
|
||||
}
|
||||
|
||||
const ffmpeg = new Ffmpeg()
|
||||
ffmpeg.setFfmpegPath(ffmpegPath)
|
||||
ffmpeg.setFfprobePath(ffprobePath)
|
||||
|
||||
// Get file duration thanks to ffprobe
|
||||
ffmpeg.input(file).ffprobe((err, data) => {
|
||||
if (err) LogHelper.error(err)
|
||||
else {
|
||||
const duration = data.streams[0].duration * 1000
|
||||
em.emit('saved', duration)
|
||||
cb(file, duration)
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
export default synthesizer
|
@ -1,142 +0,0 @@
|
||||
import events from 'node:events'
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { LangHelper } from '@/helpers/lang-helper'
|
||||
|
||||
class Tts {
|
||||
constructor(socket, provider) {
|
||||
this.socket = socket
|
||||
this.provider = provider
|
||||
this.providers = ['flite', 'google-cloud-tts', 'amazon-polly', 'watson-tts']
|
||||
this.synthesizer = {}
|
||||
this.em = new events.EventEmitter()
|
||||
this.speeches = []
|
||||
this.lang = 'en'
|
||||
|
||||
LogHelper.title('TTS')
|
||||
LogHelper.success('New instance')
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the TTS provider
|
||||
*/
|
||||
init(newLang, cb) {
|
||||
LogHelper.info('Initializing TTS...')
|
||||
|
||||
this.lang = newLang || this.lang
|
||||
|
||||
if (!this.providers.includes(this.provider)) {
|
||||
LogHelper.error(
|
||||
`The TTS provider "${this.provider}" does not exist or is not yet supported`
|
||||
)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
/* istanbul ignore next */
|
||||
if (
|
||||
this.provider === 'google-cloud-tts' &&
|
||||
typeof process.env.GOOGLE_APPLICATION_CREDENTIALS === 'undefined'
|
||||
) {
|
||||
process.env.GOOGLE_APPLICATION_CREDENTIALS = path.join(
|
||||
process.cwd(),
|
||||
'core/config/voice/google-cloud.json'
|
||||
)
|
||||
} else if (
|
||||
typeof process.env.GOOGLE_APPLICATION_CREDENTIALS !== 'undefined' &&
|
||||
process.env.GOOGLE_APPLICATION_CREDENTIALS.indexOf(
|
||||
'google-cloud.json'
|
||||
) === -1
|
||||
) {
|
||||
LogHelper.warning(
|
||||
`The "GOOGLE_APPLICATION_CREDENTIALS" env variable is already settled with the following value: "${process.env.GOOGLE_APPLICATION_CREDENTIALS}"`
|
||||
)
|
||||
}
|
||||
|
||||
// Dynamically attribute the synthesizer
|
||||
this.synthesizer = require(`${__dirname}/${this.provider}/synthesizer`)
|
||||
this.synthesizer.default.init(LangHelper.getLongCode(this.lang))
|
||||
|
||||
this.onSaved()
|
||||
|
||||
LogHelper.title('TTS')
|
||||
LogHelper.success('TTS initialized')
|
||||
|
||||
cb(this)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Forward buffer audio file and duration to the client
|
||||
* and delete audio file once it has been forwarded
|
||||
*/
|
||||
forward(speech) {
|
||||
this.synthesizer.default.save(speech.text, this.em, (file, duration) => {
|
||||
/* istanbul ignore next */
|
||||
const bitmap = fs.readFileSync(file)
|
||||
/* istanbul ignore next */
|
||||
this.socket.emit(
|
||||
'audio-forwarded',
|
||||
{
|
||||
buffer: Buffer.from(bitmap),
|
||||
is_final_answer: speech.isFinalAnswer,
|
||||
duration
|
||||
},
|
||||
(confirmation) => {
|
||||
if (confirmation === 'audio-received') {
|
||||
fs.unlinkSync(file)
|
||||
}
|
||||
}
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* When the synthesizer saved a new audio file
|
||||
* then shift the queue according to the audio file duration
|
||||
*/
|
||||
onSaved() {
|
||||
return new Promise((resolve) => {
|
||||
this.em.on('saved', (duration) => {
|
||||
setTimeout(() => {
|
||||
this.speeches.shift()
|
||||
|
||||
if (this.speeches[0]) {
|
||||
this.forward(this.speeches[0])
|
||||
}
|
||||
|
||||
resolve()
|
||||
}, duration)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Add speeches to the queue
|
||||
*/
|
||||
add(text, isFinalAnswer) {
|
||||
/**
|
||||
* Flite fix. When the string is only one word,
|
||||
* Flite cannot save to a file. So we add a space at the end of the string
|
||||
*/
|
||||
if (this.provider === 'flite' && text.indexOf(' ') === -1) {
|
||||
text += ' '
|
||||
}
|
||||
|
||||
const speech = { text, isFinalAnswer }
|
||||
|
||||
if (this.speeches.length > 0) {
|
||||
this.speeches.push(speech)
|
||||
} else {
|
||||
this.speeches.push(speech)
|
||||
this.forward(speech)
|
||||
}
|
||||
|
||||
return this.speeches
|
||||
}
|
||||
}
|
||||
|
||||
export default Tts
|
@ -1,97 +0,0 @@
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
|
||||
import Tts from 'ibm-watson/text-to-speech/v1'
|
||||
import { IamAuthenticator } from 'ibm-watson/auth'
|
||||
import Ffmpeg from 'fluent-ffmpeg'
|
||||
import { path as ffmpegPath } from '@ffmpeg-installer/ffmpeg'
|
||||
import { path as ffprobePath } from '@ffprobe-installer/ffprobe'
|
||||
|
||||
import { LogHelper } from '@/helpers/log-helper'
|
||||
import { StringHelper } from '@/helpers/string-helper'
|
||||
|
||||
LogHelper.title('Watson TTS Synthesizer')
|
||||
|
||||
const synthesizer = {}
|
||||
const voices = {
|
||||
'en-US': {
|
||||
voice: 'en-US_MichaelV3Voice'
|
||||
},
|
||||
'fr-FR': {
|
||||
voice: 'fr-FR_NicolasV3Voice'
|
||||
}
|
||||
}
|
||||
let client = {}
|
||||
|
||||
synthesizer.conf = {
|
||||
voice: '',
|
||||
accept: 'audio/wav'
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize Watson Text-to-Speech based on credentials in the JSON file
|
||||
*/
|
||||
synthesizer.init = (lang) => {
|
||||
const config = JSON.parse(
|
||||
fs.readFileSync(
|
||||
path.join(process.cwd(), 'core/config/voice/watson-tts.json'),
|
||||
'utf8'
|
||||
)
|
||||
)
|
||||
synthesizer.conf.voice = voices[lang].voice
|
||||
|
||||
try {
|
||||
client = new Tts({
|
||||
authenticator: new IamAuthenticator({ apikey: config.apikey }),
|
||||
serviceUrl: config.url
|
||||
})
|
||||
|
||||
LogHelper.success('Synthesizer initialized')
|
||||
} catch (e) {
|
||||
LogHelper.error(`Watson TTS: ${e}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save string to audio file
|
||||
*/
|
||||
synthesizer.save = (speech, em, cb) => {
|
||||
const file = `${__dirname}/../../tmp/${Date.now()}-${StringHelper.random(
|
||||
4
|
||||
)}.wav`
|
||||
|
||||
synthesizer.conf.text = speech
|
||||
|
||||
client
|
||||
.synthesize(synthesizer.conf)
|
||||
.then(({ result }) => {
|
||||
const wStream = fs.createWriteStream(file)
|
||||
|
||||
result.pipe(wStream)
|
||||
|
||||
wStream.on('finish', () => {
|
||||
const ffmpeg = new Ffmpeg()
|
||||
ffmpeg.setFfmpegPath(ffmpegPath)
|
||||
ffmpeg.setFfprobePath(ffprobePath)
|
||||
|
||||
// Get file duration thanks to ffprobe
|
||||
ffmpeg.input(file).ffprobe((err, data) => {
|
||||
if (err) LogHelper.error(err)
|
||||
else {
|
||||
const duration = data.streams[0].duration * 1000
|
||||
em.emit('saved', duration)
|
||||
cb(file, duration)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
wStream.on('error', (err) => {
|
||||
LogHelper.error(`Watson TTS: ${err}`)
|
||||
})
|
||||
})
|
||||
.catch((err) => {
|
||||
LogHelper.error(`Watson TTS: ${err}`)
|
||||
})
|
||||
}
|
||||
|
||||
export default synthesizer
|
36
server/src/types.ts
Normal file
36
server/src/types.ts
Normal file
@ -0,0 +1,36 @@
|
||||
import type { langs } from '@@/core/langs.json'
|
||||
|
||||
/**
|
||||
* Contain common/shared types that are universal across the project
|
||||
* and cannot be placed in the respective core nodes
|
||||
*/
|
||||
|
||||
/**
|
||||
* Language
|
||||
*/
|
||||
|
||||
/**
|
||||
* ISO 639-1 (Language codes) - ISO 3166-1 (Country Codes)
|
||||
* @see https://www.iso.org/iso-639-language-codes.html
|
||||
* @see https://www.iso.org/iso-3166-country-codes.html
|
||||
*/
|
||||
|
||||
export type Languages = typeof langs
|
||||
export type LongLanguageCode = keyof Languages
|
||||
export type Language = Languages[LongLanguageCode]
|
||||
export type ShortLanguageCode = Language['short']
|
||||
|
||||
/**
|
||||
* System
|
||||
*/
|
||||
|
||||
export enum OSTypes {
|
||||
Windows = 'windows',
|
||||
MacOS = 'macos',
|
||||
Linux = 'linux',
|
||||
Unknown = 'unknown'
|
||||
}
|
||||
export enum CPUArchitectures {
|
||||
X64 = 'x64',
|
||||
ARM64 = 'arm64'
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
import path from 'node:path'
|
||||
|
||||
import type { ShortLanguageCode } from '@/helpers/lang-helper'
|
||||
import type { ShortLanguageCode } from '@/types'
|
||||
import { GLOBAL_DATA_PATH } from '@/constants'
|
||||
|
||||
/**
|
||||
|
@ -68,11 +68,16 @@
|
||||
"answer": {
|
||||
"intents": {
|
||||
"yes": {
|
||||
"utterance_samples": ["[Yes|Yep|Yup|Yeah]", "Sure", "Correct"],
|
||||
"utterance_samples": [
|
||||
"[Yes|Yep|Yup|Yeah]",
|
||||
"Of course",
|
||||
"Sure",
|
||||
"Correct"
|
||||
],
|
||||
"value": "y"
|
||||
},
|
||||
"no": {
|
||||
"utterance_samples": ["[No|Nope|Nah]"],
|
||||
"utterance_samples": ["[No|Nope|Nah]", "Not at all", "Of course not"],
|
||||
"value": "n"
|
||||
},
|
||||
"idk": {
|
||||
|
@ -34,7 +34,7 @@ def guess(params):
|
||||
aki.question_filter = session['question_filter']
|
||||
|
||||
resp = aki._parse_response(response)
|
||||
aki._update(resp, '"step":"0"' in response)
|
||||
aki._update(resp, '"step": "0"' in response)
|
||||
|
||||
if session['progression'] > 80:
|
||||
aki.win()
|
||||
|
@ -121,4 +121,4 @@ def raise_connection_error(response):
|
||||
elif response == "KO - ELEM LIST IS EMPTY" or response == "WARN - NO QUESTION":
|
||||
raise AkiNoQuestions("\"Akinator.step\" reached 79. No more questions")
|
||||
else:
|
||||
raise AkiConnectionFailure("An unknown error has occured. Server response: {}".format(response))
|
||||
raise AkiConnectionFailure("An unknown error has occured. HttpServer response: {}".format(response))
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user