1
1
mirror of https://github.com/leon-ai/leon.git synced 2024-11-23 20:12:08 +03:00

refactor: check script according to binaries changes

This commit is contained in:
louistiti 2022-10-19 00:22:37 +08:00
parent 02c28b85f6
commit 2463759717
No known key found for this signature in database
GPG Key ID: 0A1C3B043E70C77D
4 changed files with 270 additions and 159 deletions

View File

@ -57,7 +57,7 @@
"prepare-release": "ts-node scripts/release/prepare-release.js",
"pre-release:python-bridge": "ts-node scripts/release/pre-release-binaries.js python-bridge",
"pre-release:tcp-server": "ts-node scripts/release/pre-release-binaries.js tcp-server",
"check": "ts-node scripts/run-check.js",
"check": "ts-node scripts/check.js",
"docker:build": "docker build -t leon-ai/leon .",
"docker:run": "docker compose up",
"docker:dev": "docker compose --file=docker-compose.dev.yml up",

View File

@ -1,157 +1,256 @@
import fs from 'node:fs'
import os from 'node:os'
import { spawn } from 'node:child_process'
import dotenv from 'dotenv'
import { command } from 'execa'
import semver from 'semver'
import kill from 'tree-kill'
import { version } from '@@/package.json'
import { LogHelper } from '@/helpers/log-helper'
import { PYTHON_BRIDGE_BIN_PATH } from '@/constants'
import { PYTHON_BRIDGE_BIN_PATH, TCP_SERVER_BIN_PATH } from '@/constants'
dotenv.config()
/**
* Checking script
* Help to figure out what is installed or not
* Help to figure out the setup state
*/
export default () =>
new Promise(async (resolve, reject) => {
try {
const nodeMinRequiredVersion = '10'
const npmMinRequiredVersion = '5'
const flitePath = 'bin/flite/flite'
const coquiLanguageModelPath = 'bin/coqui/huge-vocabulary.scorer'
const amazonPath = 'core/config/voice/amazon.json'
const googleCloudPath = 'core/config/voice/google-cloud.json'
const watsonSttPath = 'core/config/voice/watson-stt.json'
const watsonTtsPath = 'core/config/voice/watson-tts.json'
const globalResolversNlpModelPath =
'core/data/models/leon-global-resolvers-model.nlp'
const skillsResolversNlpModelPath =
'core/data/models/leon-skills-resolvers-model.nlp'
const mainNlpModelPath = 'core/data/models/leon-main-model.nlp'
const report = {
can_run: { title: 'Run', type: 'error', v: true },
can_run_skill: { title: 'Run skills', type: 'error', v: true },
can_text: { title: 'Reply you by texting', type: 'error', v: true },
can_amazon_polly_tts: {
title: 'Amazon Polly text-to-speech',
type: 'warning',
v: true
},
can_google_cloud_tts: {
title: 'Google Cloud text-to-speech',
type: 'warning',
v: true
},
can_watson_tts: {
title: 'Watson text-to-speech',
type: 'warning',
v: true
},
can_offline_tts: {
title: 'Offline text-to-speech',
type: 'warning',
v: true
},
can_google_cloud_stt: {
title: 'Google Cloud speech-to-text',
type: 'warning',
v: true
},
can_watson_stt: {
title: 'Watson speech-to-text',
type: 'warning',
v: true
},
can_offline_stt: {
title: 'Offline speech-to-text',
type: 'warning',
v: true
}
;(async () => {
try {
const nodeMinRequiredVersion = '16'
const npmMinRequiredVersion = '5'
const flitePath = 'bin/flite/flite'
const coquiLanguageModelPath = 'bin/coqui/huge-vocabulary.scorer'
const amazonPath = 'core/config/voice/amazon.json'
const googleCloudPath = 'core/config/voice/google-cloud.json'
const watsonSttPath = 'core/config/voice/watson-stt.json'
const watsonTtsPath = 'core/config/voice/watson-tts.json'
const globalResolversNlpModelPath =
'core/data/models/leon-global-resolvers-model.nlp'
const skillsResolversNlpModelPath =
'core/data/models/leon-skills-resolvers-model.nlp'
const mainNlpModelPath = 'core/data/models/leon-main-model.nlp'
const pastebinData = {
leonVersion: null,
environment: {
osDetails: null,
nodeVersion: null,
npmVersion: null
},
nlpModels: {
globalResolversModelState: null,
skillsResolversModelState: null,
mainModelState: null
},
skillExecution: {
executionTime: null,
command: null,
output: null,
error: null
},
tcpServer: {
startTime: null,
command: null,
output: null,
error: null
},
report: null
}
const report = {
can_run: { title: 'Run', type: 'error', v: true },
can_run_skill: { title: 'Run skills', type: 'error', v: true },
can_text: { title: 'Reply you by texting', type: 'error', v: true },
can_start_tcp_server: {
title: 'Start the TCP server',
type: 'error',
v: true
},
can_amazon_polly_tts: {
title: 'Amazon Polly text-to-speech',
type: 'warning',
v: true
},
can_google_cloud_tts: {
title: 'Google Cloud text-to-speech',
type: 'warning',
v: true
},
can_watson_tts: {
title: 'Watson text-to-speech',
type: 'warning',
v: true
},
can_offline_tts: {
title: 'Offline text-to-speech',
type: 'warning',
v: true
},
can_google_cloud_stt: {
title: 'Google Cloud speech-to-text',
type: 'warning',
v: true
},
can_watson_stt: {
title: 'Watson speech-to-text',
type: 'warning',
v: true
},
can_offline_stt: {
title: 'Offline speech-to-text',
type: 'warning',
v: true
}
}
LogHelper.title('Checking')
LogHelper.title('Checking')
/**
* Leon version checking
*/
/**
* Leon version checking
*/
LogHelper.info('Leon version')
LogHelper.success(`${version}\n`)
LogHelper.info('Leon version')
LogHelper.success(`${version}\n`)
pastebinData.leonVersion = version
/**
* Environment checking
*/
/**
* Environment checking
*/
LogHelper.info('OS')
LogHelper.info('OS')
const osInfo = {
type: os.type(),
platform: os.platform(),
arch: os.arch(),
cpus: os.cpus().length,
release: os.release()
}
LogHelper.success(`${JSON.stringify(osInfo)}\n`)
;(
await Promise.all([
command('node --version', { shell: true }),
command('npm --version', { shell: true })
])
).forEach((p) => {
LogHelper.info(p.command)
const osInfo = {
type: os.type(),
platform: os.platform(),
arch: os.arch(),
cpus: os.cpus().length,
release: os.release()
}
LogHelper.success(`${JSON.stringify(osInfo)}\n`)
pastebinData.environment.osDetails = osInfo
;(
await Promise.all([
command('node --version', { shell: true }),
command('npm --version', { shell: true })
])
).forEach((p) => {
LogHelper.info(p.command)
if (
p.command.indexOf('node --version') !== -1 &&
!semver.satisfies(
semver.clean(p.stdout),
`>=${nodeMinRequiredVersion}`
)
) {
Object.keys(report).forEach((item) => {
if (report[item].type === 'error') report[item].v = false
})
LogHelper.error(
`${p.stdout}\nThe Node.js version must be >=${nodeMinRequiredVersion}. Please install it: https://nodejs.org (or use nvm)\n`
)
} else if (
p.command.indexOf('npm --version') !== -1 &&
!semver.satisfies(
semver.clean(p.stdout),
`>=${npmMinRequiredVersion}`
)
) {
Object.keys(report).forEach((item) => {
if (report[item].type === 'error') report[item].v = false
})
LogHelper.error(
`${p.stdout}\nThe npm version must be >=${npmMinRequiredVersion}. Please install it: https://www.npmjs.com/get-npm (or use nvm)\n`
)
} else {
LogHelper.success(`${p.stdout}\n`)
}
})
/**
* Skill execution checking
*/
try {
LogHelper.time('Skill execution time')
const p = await command(
`${PYTHON_BRIDGE_BIN_PATH} scripts/assets/intent-object.json`,
{ shell: true }
if (
p.command.indexOf('node --version') !== -1 &&
!semver.satisfies(semver.clean(p.stdout), `>=${nodeMinRequiredVersion}`)
) {
Object.keys(report).forEach((item) => {
if (report[item].type === 'error') report[item].v = false
})
LogHelper.error(
`${p.stdout}\nThe Node.js version must be >=${nodeMinRequiredVersion}. Please install it: https://nodejs.org (or use nvm)\n`
)
LogHelper.timeEnd('Skill execution time')
LogHelper.info(p.command)
} else if (
p.command.indexOf('npm --version') !== -1 &&
!semver.satisfies(semver.clean(p.stdout), `>=${npmMinRequiredVersion}`)
) {
Object.keys(report).forEach((item) => {
if (report[item].type === 'error') report[item].v = false
})
LogHelper.error(
`${p.stdout}\nThe npm version must be >=${npmMinRequiredVersion}. Please install it: https://www.npmjs.com/get-npm (or use nvm)\n`
)
} else {
LogHelper.success(`${p.stdout}\n`)
} catch (e) {
LogHelper.info(e.command)
report.can_run_skill.v = false
LogHelper.error(`${e}\n`)
if (p.command.includes('node --version')) {
pastebinData.environment.nodeVersion = p.stdout
} else if (p.command.includes('npm --version')) {
pastebinData.environment.npmVersion = p.stdout
}
}
})
/**
* Skill execution checking
*/
LogHelper.info('Executing a skill...')
try {
const executionStart = Date.now()
const p = await command(
`${PYTHON_BRIDGE_BIN_PATH} scripts/assets/intent-object.json`,
{ shell: true }
)
const executionEnd = Date.now()
const executionTime = executionEnd - executionStart
LogHelper.info(p.command)
pastebinData.skillExecution.command = p.command
LogHelper.success(p.stdout)
pastebinData.skillExecution.output = p.stdout
LogHelper.info(`Skill execution time: ${executionTime}ms\n`)
pastebinData.skillExecution.executionTime = `${executionTime}ms`
} catch (e) {
LogHelper.info(e.command)
report.can_run_skill.v = false
LogHelper.error(`${e}\n`)
pastebinData.skillExecution.error = JSON.stringify(e)
}
/**
* TCP server startup checking
*/
LogHelper.info('Starting the TCP server...')
const tcpServerCommand = `${TCP_SERVER_BIN_PATH} en`
const tcpServerStart = Date.now()
const p = spawn(tcpServerCommand, { shell: true })
LogHelper.info(tcpServerCommand)
pastebinData.tcpServer.command = tcpServerCommand
if (osInfo.platform === 'darwin') {
LogHelper.info(
'For the first start, it may take a few minutes to cold start the TCP server on macOS. No worries it is a one-time thing'
)
}
let tcpServerOutput = ''
p.stdout.on('data', (data) => {
const newData = data.toString()
tcpServerOutput += newData
if (newData?.toLowerCase().includes('waiting for')) {
kill(p.pid)
LogHelper.success('The TCP server can successfully start')
}
})
p.stderr.on('data', (data) => {
const newData = data.toString()
tcpServerOutput += newData
report.can_start_tcp_server.v = false
pastebinData.tcpServer.error = newData
LogHelper.error(`Cannot start the TCP server: ${newData}`)
})
const timeout = 3 * 60_000
// In case it takes too long, force kill
setTimeout(() => {
kill(p.pid)
const error = `The TCP server timed out after ${timeout}ms`
LogHelper.error(error)
pastebinData.tcpServer.error = error
report.can_start_tcp_server.v = false
}, timeout)
p.stdout.on('end', () => {
const tcpServerEnd = Date.now()
pastebinData.tcpServer.output = tcpServerOutput
pastebinData.tcpServer.startTime = `${tcpServerEnd - tcpServerStart}ms`
LogHelper.info(
`TCP server startup time: ${pastebinData.tcpServer.startTime}ms\n`
)
/**
* Global resolvers NLP model checking
@ -163,16 +262,22 @@ export default () =>
!fs.existsSync(globalResolversNlpModelPath) ||
!Object.keys(fs.readFileSync(globalResolversNlpModelPath)).length
) {
const state = 'Global resolvers NLP model not found or broken'
report.can_text.v = false
Object.keys(report).forEach((item) => {
if (item.indexOf('stt') !== -1 || item.indexOf('tts') !== -1)
report[item].v = false
})
LogHelper.error(
'Global resolvers NLP model not found or broken. Try to generate a new one: "npm run train"\n'
`${state}. Try to generate a new one: "npm run train"\n`
)
pastebinData.nlpModels.globalResolversModelState = state
} else {
LogHelper.success('Found and valid\n')
const state = 'Found and valid'
LogHelper.success(`${state}\n`)
pastebinData.nlpModels.globalResolversModelState = state
}
/**
@ -185,16 +290,22 @@ export default () =>
!fs.existsSync(skillsResolversNlpModelPath) ||
!Object.keys(fs.readFileSync(skillsResolversNlpModelPath)).length
) {
const state = 'Skills resolvers NLP model not found or broken'
report.can_text.v = false
Object.keys(report).forEach((item) => {
if (item.indexOf('stt') !== -1 || item.indexOf('tts') !== -1)
report[item].v = false
})
LogHelper.error(
'Skills resolvers NLP model not found or broken. Try to generate a new one: "npm run train"\n'
`${state}. Try to generate a new one: "npm run train"\n`
)
pastebinData.nlpModels.skillsResolversModelState = state
} else {
LogHelper.success('Found and valid\n')
const state = 'Found and valid'
LogHelper.success(`${state}\n`)
pastebinData.nlpModels.skillsResolversModelState = state
}
/**
@ -207,16 +318,22 @@ export default () =>
!fs.existsSync(mainNlpModelPath) ||
!Object.keys(fs.readFileSync(mainNlpModelPath)).length
) {
const state = 'Main NLP model not found or broken'
report.can_text.v = false
Object.keys(report).forEach((item) => {
if (item.indexOf('stt') !== -1 || item.indexOf('tts') !== -1)
report[item].v = false
})
LogHelper.error(
'Main NLP model not found or broken. Try to generate a new one: "npm run train"\n'
`${state}. Try to generate a new one: "npm run train"\n`
)
pastebinData.nlpModels.mainModelState = state
} else {
LogHelper.success('Found and valid\n')
const state = 'Found and valid'
LogHelper.success(`${state}\n`)
pastebinData.nlpModels.mainModelState = state
}
/**
@ -340,7 +457,12 @@ export default () =>
})
LogHelper.default('')
if (report.can_run.v && report.can_run_skill.v && report.can_text.v) {
if (
report.can_run.v &&
report.can_run_skill.v &&
report.can_text.v &&
report.can_start_tcp_server.v
) {
LogHelper.success('Hooray! Leon can run correctly')
LogHelper.info(
'If you have some yellow warnings, it is all good. It means some entities are not yet configured'
@ -349,9 +471,13 @@ export default () =>
LogHelper.error('Please fix the errors above')
}
resolve()
} catch (e) {
LogHelper.error(e)
reject()
}
})
pastebinData.report = report
console.log('pastebinData', pastebinData)
process.exit(0)
})
} catch (e) {
LogHelper.error(e)
}
})()

View File

@ -1,16 +0,0 @@
import { LoaderHelper } from '@/helpers/loader-helper'
import check from './check'
/**
* Execute the checking script
*/
;(async () => {
try {
LoaderHelper.start()
await check()
LoaderHelper.stop()
} catch (e) {
LoaderHelper.stop()
}
})()

View File

@ -21,7 +21,8 @@ class TCPServer:
self.tcp_socket.listen()
while True:
print('Waiting for connection...')
# Flush buffered output to make it IPC friendly (readable on stdout)
print('Waiting for connection...', flush=True)
# Our TCP server only needs to support one connection
self.conn, self.addr = self.tcp_socket.accept()