mirror of
https://github.com/leon-ai/leon.git
synced 2025-01-04 15:55:58 +03:00
refactor(server): action loop
This commit is contained in:
parent
d41e960373
commit
99af936803
@ -27,6 +27,7 @@
|
|||||||
"ignorePatterns": "*.spec.js",
|
"ignorePatterns": "*.spec.js",
|
||||||
"rules": {
|
"rules": {
|
||||||
"quotes": ["error", "single"],
|
"quotes": ["error", "single"],
|
||||||
|
"@typescript-eslint/no-non-null-assertion": ["off"],
|
||||||
"no-async-promise-executor": ["off"],
|
"no-async-promise-executor": ["off"],
|
||||||
"no-underscore-dangle": ["error", { "allowAfterThis": true }],
|
"no-underscore-dangle": ["error", { "allowAfterThis": true }],
|
||||||
"prefer-destructuring": ["error"],
|
"prefer-destructuring": ["error"],
|
||||||
|
@ -1 +1,149 @@
|
|||||||
// TODO
|
import fs from 'node:fs'
|
||||||
|
import { join } from 'node:path'
|
||||||
|
|
||||||
|
import type { NLPUtterance } from '@/core/nlp/types'
|
||||||
|
import type { BrainProcessResult } from '@/core/brain/types'
|
||||||
|
import { BRAIN, MODEL_LOADER, NER, NLU } from '@/core'
|
||||||
|
import { LogHelper } from '@/helpers/log-helper'
|
||||||
|
import { DEFAULT_NLU_RESULT } from '@/core/nlp/nlu/nlu'
|
||||||
|
|
||||||
|
export class ActionLoop {
|
||||||
|
/**
|
||||||
|
* Handle action loop logic before NLU processing
|
||||||
|
*/
|
||||||
|
public static async handle(
|
||||||
|
utterance: NLPUtterance
|
||||||
|
): Promise<Partial<BrainProcessResult> | null> {
|
||||||
|
const { domain, intent } = NLU.conversation.activeContext
|
||||||
|
const [skillName, actionName] = intent.split('.')
|
||||||
|
const skillConfigPath = join(
|
||||||
|
process.cwd(),
|
||||||
|
'skills',
|
||||||
|
domain,
|
||||||
|
skillName,
|
||||||
|
`config/${BRAIN.lang}.json`
|
||||||
|
)
|
||||||
|
NLU.nluResult = {
|
||||||
|
...DEFAULT_NLU_RESULT, // Reset entities, slots, etc.
|
||||||
|
slots: NLU.conversation.activeContext.slots,
|
||||||
|
utterance,
|
||||||
|
skillConfigPath,
|
||||||
|
classification: {
|
||||||
|
domain,
|
||||||
|
skill: skillName,
|
||||||
|
action: actionName,
|
||||||
|
confidence: 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
NLU.nluResult.entities = await NER.extractEntities(
|
||||||
|
BRAIN.lang,
|
||||||
|
skillConfigPath,
|
||||||
|
NLU.nluResult
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: type
|
||||||
|
const { actions, resolvers } = JSON.parse(
|
||||||
|
fs.readFileSync(skillConfigPath, 'utf8')
|
||||||
|
)
|
||||||
|
const action = actions[NLU.nluResult.classification.action]
|
||||||
|
const { name: expectedItemName, type: expectedItemType } =
|
||||||
|
action.loop.expected_item
|
||||||
|
let hasMatchingEntity = false
|
||||||
|
let hasMatchingResolver = false
|
||||||
|
|
||||||
|
if (expectedItemType === 'entity') {
|
||||||
|
hasMatchingEntity =
|
||||||
|
NLU.nluResult.entities.filter(
|
||||||
|
({ entity }) => expectedItemName === entity
|
||||||
|
).length > 0
|
||||||
|
} else if (expectedItemType.indexOf('resolver') !== -1) {
|
||||||
|
const nlpObjs = {
|
||||||
|
global_resolver: MODEL_LOADER.globalResolversNLPContainer,
|
||||||
|
skill_resolver: MODEL_LOADER.skillsResolversNLPContainer
|
||||||
|
}
|
||||||
|
const result = await nlpObjs[expectedItemType].process(utterance)
|
||||||
|
const { intent } = result
|
||||||
|
|
||||||
|
const resolveResolvers = (resolver, intent) => {
|
||||||
|
const resolversPath = join(
|
||||||
|
process.cwd(),
|
||||||
|
'core/data',
|
||||||
|
BRAIN.lang,
|
||||||
|
'global-resolvers'
|
||||||
|
)
|
||||||
|
// Load the skill resolver or the global resolver
|
||||||
|
const resolvedIntents = !intent.includes('resolver.global')
|
||||||
|
? resolvers[resolver]
|
||||||
|
: JSON.parse(fs.readFileSync(join(resolversPath, `${resolver}.json`)))
|
||||||
|
|
||||||
|
// E.g. resolver.global.denial -> denial
|
||||||
|
intent = intent.substring(intent.lastIndexOf('.') + 1)
|
||||||
|
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
name: expectedItemName,
|
||||||
|
value: resolvedIntents.intents[intent].value
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve resolver if global resolver or skill resolver has been found
|
||||||
|
if (
|
||||||
|
intent &&
|
||||||
|
(intent.includes('resolver.global') ||
|
||||||
|
intent.includes(`resolver.${skillName}`))
|
||||||
|
) {
|
||||||
|
LogHelper.title('NLU')
|
||||||
|
LogHelper.success('Resolvers resolved:')
|
||||||
|
NLU.nluResult.resolvers = resolveResolvers(expectedItemName, intent)
|
||||||
|
NLU.nluResult.resolvers.forEach((resolver) =>
|
||||||
|
LogHelper.success(`${intent}: ${JSON.stringify(resolver)}`)
|
||||||
|
)
|
||||||
|
hasMatchingResolver = NLU.nluResult.resolvers.length > 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure expected items are in the utterance, otherwise clean context and reprocess
|
||||||
|
if (!hasMatchingEntity && !hasMatchingResolver) {
|
||||||
|
BRAIN.talk(`${BRAIN.wernicke('random_context_out_of_topic')}.`)
|
||||||
|
NLU.conversation.cleanActiveContext()
|
||||||
|
await NLU.process(utterance)
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const processedData = await BRAIN.execute(NLU.nluResult)
|
||||||
|
// Reprocess with the original utterance that triggered the context at first
|
||||||
|
if (processedData.core?.restart === true) {
|
||||||
|
const { originalUtterance } = NLU.conversation.activeContext
|
||||||
|
|
||||||
|
NLU.conversation.cleanActiveContext()
|
||||||
|
await NLU.process(originalUtterance)
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* In case there is no next action to prepare anymore
|
||||||
|
* and there is an explicit stop of the loop from the skill
|
||||||
|
*/
|
||||||
|
if (
|
||||||
|
!processedData.action.next_action &&
|
||||||
|
processedData.core?.isInActionLoop === false
|
||||||
|
) {
|
||||||
|
NLU.conversation.cleanActiveContext()
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
// Break the action loop and prepare for the next action if necessary
|
||||||
|
if (processedData.core?.isInActionLoop === false) {
|
||||||
|
NLU.conversation.activeContext.isInActionLoop = !!processedData.action.loop
|
||||||
|
NLU.conversation.activeContext.actionName = processedData.action.next_action
|
||||||
|
NLU.conversation.activeContext.intent = `${processedData.classification.skill}.${processedData.action.next_action}`
|
||||||
|
}
|
||||||
|
|
||||||
|
return processedData
|
||||||
|
} catch (e) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -4,17 +4,18 @@ import { spawn } from 'node:child_process'
|
|||||||
|
|
||||||
import kill from 'tree-kill'
|
import kill from 'tree-kill'
|
||||||
|
|
||||||
import type { ShortLanguageCode } from '@/types'
|
import type { Language, ShortLanguageCode } from '@/types'
|
||||||
import type { NLPUtterance, NLUResult } from '@/core/nlp/types'
|
import type { NLPAction, NLPDomain, NLPSkill, NLPUtterance, NLUResult } from '@/core/nlp/types'
|
||||||
import type { BrainProcessResult } from '@/core/brain/types'
|
import type { BrainProcessResult } from '@/core/brain/types'
|
||||||
import { langs } from '@@/core/langs.json'
|
import { langs } from '@@/core/langs.json'
|
||||||
import { TCP_SERVER_BIN_PATH } from '@/constants'
|
import { TCP_SERVER_BIN_PATH } from '@/constants'
|
||||||
import { TCP_CLIENT, BRAIN, SOCKET_SERVER, MODEL_LOADER, NER } from '@/core'
|
import { TCP_CLIENT, BRAIN, SOCKET_SERVER, MODEL_LOADER, NER } from '@/core'
|
||||||
import { LogHelper } from '@/helpers/log-helper'
|
import { LogHelper } from '@/helpers/log-helper'
|
||||||
import { LangHelper } from '@/helpers/lang-helper'
|
import { LangHelper } from '@/helpers/lang-helper'
|
||||||
|
import { ActionLoop } from '@/core/nlp/action-loop'
|
||||||
import Conversation from '@/core/nlp/conversation'
|
import Conversation from '@/core/nlp/conversation'
|
||||||
|
|
||||||
const DEFAULT_NLU_RESULT = {
|
export const DEFAULT_NLU_RESULT = {
|
||||||
utterance: '',
|
utterance: '',
|
||||||
currentEntities: [],
|
currentEntities: [],
|
||||||
entities: [],
|
entities: [],
|
||||||
@ -33,8 +34,8 @@ const DEFAULT_NLU_RESULT = {
|
|||||||
|
|
||||||
export default class NLU {
|
export default class NLU {
|
||||||
private static instance: NLU
|
private static instance: NLU
|
||||||
private conversation = new Conversation('conv0')
|
public nluResult: NLUResult = DEFAULT_NLU_RESULT
|
||||||
private nluResult: NLUResult = DEFAULT_NLU_RESULT
|
public conversation = new Conversation('conv0')
|
||||||
|
|
||||||
constructor() {
|
constructor() {
|
||||||
if (!NLU.instance) {
|
if (!NLU.instance) {
|
||||||
@ -48,7 +49,10 @@ export default class NLU {
|
|||||||
/**
|
/**
|
||||||
* Set new language; recreate a new TCP server with new language; and reprocess understanding
|
* Set new language; recreate a new TCP server with new language; and reprocess understanding
|
||||||
*/
|
*/
|
||||||
private switchLanguage(utterance: NLPUtterance, locale: ShortLanguageCode): unknown {
|
private switchLanguage(
|
||||||
|
utterance: NLPUtterance,
|
||||||
|
locale: ShortLanguageCode
|
||||||
|
): unknown {
|
||||||
const connectedHandler = async (): Promise<void> => {
|
const connectedHandler = async (): Promise<void> => {
|
||||||
await this.process(utterance)
|
await this.process(utterance)
|
||||||
}
|
}
|
||||||
@ -70,147 +74,10 @@ export default class NLU {
|
|||||||
return {}
|
return {}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Handle in action loop logic before NLU processing
|
|
||||||
*/
|
|
||||||
private async handleActionLoop(utterance: NLPUtterance): Promise<Partial<BrainProcessResult> | null> {
|
|
||||||
const { domain, intent } = this.conversation.activeContext
|
|
||||||
const [skillName, actionName] = intent.split('.')
|
|
||||||
const skillConfigPath = join(
|
|
||||||
process.cwd(),
|
|
||||||
'skills',
|
|
||||||
domain,
|
|
||||||
skillName,
|
|
||||||
`config/${BRAIN.lang}.json`
|
|
||||||
)
|
|
||||||
this.nluResult = {
|
|
||||||
...DEFAULT_NLU_RESULT, // Reset entities, slots, etc.
|
|
||||||
slots: this.conversation.activeContext.slots,
|
|
||||||
utterance,
|
|
||||||
skillConfigPath,
|
|
||||||
classification: {
|
|
||||||
domain,
|
|
||||||
skill: skillName,
|
|
||||||
action: actionName,
|
|
||||||
confidence: 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
this.nluResult.entities = await NER.extractEntities(
|
|
||||||
BRAIN.lang,
|
|
||||||
skillConfigPath,
|
|
||||||
this.nluResult
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: type
|
|
||||||
const { actions, resolvers } = JSON.parse(
|
|
||||||
fs.readFileSync(skillConfigPath, 'utf8')
|
|
||||||
)
|
|
||||||
const action = actions[this.nluResult.classification.action]
|
|
||||||
const { name: expectedItemName, type: expectedItemType } =
|
|
||||||
action.loop.expected_item
|
|
||||||
let hasMatchingEntity = false
|
|
||||||
let hasMatchingResolver = false
|
|
||||||
|
|
||||||
if (expectedItemType === 'entity') {
|
|
||||||
hasMatchingEntity =
|
|
||||||
this.nluResult.entities.filter(
|
|
||||||
({ entity }) => expectedItemName === entity
|
|
||||||
).length > 0
|
|
||||||
} else if (expectedItemType.indexOf('resolver') !== -1) {
|
|
||||||
const nlpObjs = {
|
|
||||||
global_resolver: MODEL_LOADER.globalResolversNLPContainer,
|
|
||||||
skill_resolver: MODEL_LOADER.skillsResolversNLPContainer
|
|
||||||
}
|
|
||||||
const result = await nlpObjs[expectedItemType].process(utterance)
|
|
||||||
const { intent } = result
|
|
||||||
|
|
||||||
const resolveResolvers = (resolver, intent) => {
|
|
||||||
const resolversPath = join(
|
|
||||||
process.cwd(),
|
|
||||||
'core/data',
|
|
||||||
BRAIN.lang,
|
|
||||||
'global-resolvers'
|
|
||||||
)
|
|
||||||
// Load the skill resolver or the global resolver
|
|
||||||
const resolvedIntents = !intent.includes('resolver.global')
|
|
||||||
? resolvers[resolver]
|
|
||||||
: JSON.parse(fs.readFileSync(join(resolversPath, `${resolver}.json`)))
|
|
||||||
|
|
||||||
// E.g. resolver.global.denial -> denial
|
|
||||||
intent = intent.substring(intent.lastIndexOf('.') + 1)
|
|
||||||
|
|
||||||
return [
|
|
||||||
{
|
|
||||||
name: expectedItemName,
|
|
||||||
value: resolvedIntents.intents[intent].value
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve resolver if global resolver or skill resolver has been found
|
|
||||||
if (
|
|
||||||
intent &&
|
|
||||||
(intent.includes('resolver.global') ||
|
|
||||||
intent.includes(`resolver.${skillName}`))
|
|
||||||
) {
|
|
||||||
LogHelper.title('NLU')
|
|
||||||
LogHelper.success('Resolvers resolved:')
|
|
||||||
this.nluResult.resolvers = resolveResolvers(expectedItemName, intent)
|
|
||||||
this.nluResult.resolvers.forEach((resolver) =>
|
|
||||||
LogHelper.success(`${intent}: ${JSON.stringify(resolver)}`)
|
|
||||||
)
|
|
||||||
hasMatchingResolver = this.nluResult.resolvers.length > 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure expected items are in the utterance, otherwise clean context and reprocess
|
|
||||||
if (!hasMatchingEntity && !hasMatchingResolver) {
|
|
||||||
BRAIN.talk(`${BRAIN.wernicke('random_context_out_of_topic')}.`)
|
|
||||||
this.conversation.cleanActiveContext()
|
|
||||||
await this.process(utterance)
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
const processedData = await BRAIN.execute(this.nluResult)
|
|
||||||
// Reprocess with the original utterance that triggered the context at first
|
|
||||||
if (processedData.core?.restart === true) {
|
|
||||||
const { originalUtterance } = this.conversation.activeContext
|
|
||||||
|
|
||||||
this.conversation.cleanActiveContext()
|
|
||||||
await this.process(originalUtterance)
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* In case there is no next action to prepare anymore
|
|
||||||
* and there is an explicit stop of the loop from the skill
|
|
||||||
*/
|
|
||||||
if (
|
|
||||||
!processedData.action.next_action &&
|
|
||||||
processedData.core?.isInActionLoop === false
|
|
||||||
) {
|
|
||||||
this.conversation.cleanActiveContext()
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
// Break the action loop and prepare for the next action if necessary
|
|
||||||
if (processedData.core?.isInActionLoop === false) {
|
|
||||||
this.conversation.activeContext.isInActionLoop = !!processedData.action.loop
|
|
||||||
this.conversation.activeContext.actionName = processedData.action.next_action
|
|
||||||
this.conversation.activeContext.intent = `${processedData.classification.skill}.${processedData.action.next_action}`
|
|
||||||
}
|
|
||||||
|
|
||||||
return processedData
|
|
||||||
} catch (e) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Handle slot filling
|
* Handle slot filling
|
||||||
*/
|
*/
|
||||||
private async handleSlotFilling(utterance: NLPUtterance) {
|
private async handleSlotFilling(utterance: NLPUtterance): Promise<Partial<BrainProcessResult> | null> {
|
||||||
const processedData = await this.slotFill(utterance)
|
const processedData = await this.slotFill(utterance)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -273,7 +140,7 @@ export default class NLU {
|
|||||||
if (this.conversation.hasActiveContext()) {
|
if (this.conversation.hasActiveContext()) {
|
||||||
// When the active context is in an action loop, then directly trigger the action
|
// When the active context is in an action loop, then directly trigger the action
|
||||||
if (this.conversation.activeContext.isInActionLoop) {
|
if (this.conversation.activeContext.isInActionLoop) {
|
||||||
return resolve(await this.handleActionLoop(utterance))
|
return resolve(await ActionLoop.handle(utterance))
|
||||||
}
|
}
|
||||||
|
|
||||||
// When the active context has slots filled
|
// When the active context has slots filled
|
||||||
@ -346,7 +213,7 @@ export default class NLU {
|
|||||||
langs[LangHelper.getLongCode(locale)].fallbacks
|
langs[LangHelper.getLongCode(locale)].fallbacks
|
||||||
)
|
)
|
||||||
|
|
||||||
if (fallback === false) {
|
if (!fallback) {
|
||||||
if (!BRAIN.isMuted) {
|
if (!BRAIN.isMuted) {
|
||||||
BRAIN.talk(
|
BRAIN.talk(
|
||||||
`${BRAIN.wernicke('random_unknown_intents')}.`,
|
`${BRAIN.wernicke('random_unknown_intents')}.`,
|
||||||
@ -569,7 +436,7 @@ export default class NLU {
|
|||||||
* 2. If the context is expecting slots, then loop over questions to slot fill
|
* 2. If the context is expecting slots, then loop over questions to slot fill
|
||||||
* 3. Or go to the brain executor if all slots have been filled in one shot
|
* 3. Or go to the brain executor if all slots have been filled in one shot
|
||||||
*/
|
*/
|
||||||
private async routeSlotFilling(intent) {
|
private async routeSlotFilling(intent: string): Promise<boolean> {
|
||||||
const slots = await MODEL_LOADER.mainNLPContainer.slotManager.getMandatorySlots(intent)
|
const slots = await MODEL_LOADER.mainNLPContainer.slotManager.getMandatorySlots(intent)
|
||||||
const hasMandatorySlots = Object.keys(slots)?.length > 0
|
const hasMandatorySlots = Object.keys(slots)?.length > 0
|
||||||
|
|
||||||
@ -611,7 +478,7 @@ export default class NLU {
|
|||||||
* Pickup and compare the right fallback
|
* Pickup and compare the right fallback
|
||||||
* according to the wished skill action
|
* according to the wished skill action
|
||||||
*/
|
*/
|
||||||
private fallback(fallbacks) {
|
private fallback(fallbacks: Language['fallbacks']): NLUResult | null {
|
||||||
const words = this.nluResult.utterance.toLowerCase().split(' ')
|
const words = this.nluResult.utterance.toLowerCase().split(' ')
|
||||||
|
|
||||||
if (fallbacks.length > 0) {
|
if (fallbacks.length > 0) {
|
||||||
@ -619,17 +486,17 @@ export default class NLU {
|
|||||||
const tmpWords = []
|
const tmpWords = []
|
||||||
|
|
||||||
for (let i = 0; i < fallbacks.length; i += 1) {
|
for (let i = 0; i < fallbacks.length; i += 1) {
|
||||||
for (let j = 0; j < fallbacks[i].words.length; j += 1) {
|
for (let j = 0; j < fallbacks[i]!.words.length; j += 1) {
|
||||||
if (words.includes(fallbacks[i].words[j]) === true) {
|
if (words.includes(fallbacks[i]!.words[j] as string)) {
|
||||||
tmpWords.push(fallbacks[i].words[j])
|
tmpWords.push(fallbacks[i]?.words[j])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (JSON.stringify(tmpWords) === JSON.stringify(fallbacks[i].words)) {
|
if (JSON.stringify(tmpWords) === JSON.stringify(fallbacks[i]?.words)) {
|
||||||
this.nluResult.entities = []
|
this.nluResult.entities = []
|
||||||
this.nluResult.classification.domain = fallbacks[i].domain
|
this.nluResult.classification.domain = fallbacks[i]?.domain as NLPDomain
|
||||||
this.nluResult.classification.skill = fallbacks[i].skill
|
this.nluResult.classification.skill = fallbacks[i]?.skill as NLPSkill
|
||||||
this.nluResult.classification.action = fallbacks[i].action
|
this.nluResult.classification.action = fallbacks[i]?.action as NLPAction
|
||||||
this.nluResult.classification.confidence = 1
|
this.nluResult.classification.confidence = 1
|
||||||
|
|
||||||
LogHelper.success('Fallback found')
|
LogHelper.success('Fallback found')
|
||||||
@ -638,6 +505,6 @@ export default class NLU {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return null
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1 +1,22 @@
|
|||||||
// TODO
|
export class SlotFilling {
|
||||||
|
/**
|
||||||
|
* TODO
|
||||||
|
*/
|
||||||
|
public static handle() {
|
||||||
|
// TODO
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* TODO
|
||||||
|
*/
|
||||||
|
public static fillSlot() {
|
||||||
|
// TODO
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* TODO
|
||||||
|
*/
|
||||||
|
public static route() {
|
||||||
|
// TODO
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -15,9 +15,9 @@ import type { langs } from '@@/core/langs.json'
|
|||||||
* @see https://www.iso.org/iso-3166-country-codes.html
|
* @see https://www.iso.org/iso-3166-country-codes.html
|
||||||
*/
|
*/
|
||||||
|
|
||||||
type Languages = typeof langs
|
export type Languages = typeof langs
|
||||||
export type LongLanguageCode = keyof Languages
|
export type LongLanguageCode = keyof Languages
|
||||||
type Language = Languages[LongLanguageCode]
|
export type Language = Languages[LongLanguageCode]
|
||||||
export type ShortLanguageCode = Language['short']
|
export type ShortLanguageCode = Language['short']
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Loading…
Reference in New Issue
Block a user