2020-06-03 15:39:18 +03:00
|
|
|
# -*-makefile-*-
|
2020-06-08 14:14:55 +03:00
|
|
|
#
|
|
|
|
# create sentence piece models
|
|
|
|
#
|
|
|
|
# - create models from each part of a bitext
|
|
|
|
# - individual models for each language in each language pair
|
|
|
|
# - do not create new models if the data changes
|
|
|
|
# ---> models need to use the same segmentation/vocab
|
|
|
|
#
|
|
|
|
# TODO: should we do that for monolingual files instead
|
|
|
|
# for creating them from the bilingual data only?
|
|
|
|
# ---> could use more data
|
|
|
|
# ---> don't need to re-create models for each language pair
|
|
|
|
#
|
2020-06-03 15:39:18 +03:00
|
|
|
|
2020-06-11 00:54:40 +03:00
|
|
|
.INTERMEDIATE: ${LOCAL_MONO_DATA}.${PRE}.charfreq
|
|
|
|
.INTERMEDIATE: ${LOCAL_TRAIN_SRC}.charfreq ${LOCAL_TRAIN_TRG}.charfreq
|
2020-06-03 15:39:18 +03:00
|
|
|
|
|
|
|
##----------------------------------------------
|
|
|
|
## sentence piece
|
|
|
|
##----------------------------------------------
|
|
|
|
|
|
|
|
spm-models: ${SPMSRCMODEL} ${SPMTRGMODEL}
|
|
|
|
|
|
|
|
# SPMEXTRA = --split_by_whitespace=false
|
|
|
|
SPMEXTRA =
|
|
|
|
|
2020-06-08 14:14:55 +03:00
|
|
|
## set to 1 if you want to generate SPM vocab file
|
2020-06-03 15:39:18 +03:00
|
|
|
GENERATE_SPM_VOC = 0
|
|
|
|
|
2021-03-30 00:00:28 +03:00
|
|
|
# SPM_INPUT_SIZE = 10000000
|
|
|
|
SPM_INPUT_SIZE = 2000000
|
|
|
|
SPM_SHUFFLE_INPUT = 0
|
2020-06-08 14:14:55 +03:00
|
|
|
|
2021-12-14 23:18:45 +03:00
|
|
|
ifneq (${DATA_IS_SHUFFLED},1)
|
|
|
|
SPM_PREPROCESS = grep . | ${SHUFFLE}
|
|
|
|
else
|
|
|
|
SPM_PREPROCESS = grep .
|
|
|
|
endif
|
|
|
|
|
2021-12-22 18:31:22 +03:00
|
|
|
|
|
|
|
##-------------------------------------------
|
|
|
|
## simple trick to use a joint subword model:
|
|
|
|
## just duplicate the model to work for
|
|
|
|
## source and target language texts
|
|
|
|
##-------------------------------------------
|
|
|
|
|
|
|
|
ifeq ($(USE_JOINT_SUBWORD_MODEL),1)
|
|
|
|
|
|
|
|
${SPMSRCMODEL}: ${SPM_MODEL}
|
|
|
|
ln -s $< $@
|
|
|
|
ln -s $<.vocab $@.vocab
|
|
|
|
|
|
|
|
${SPMTRGMODEL}: ${SPM_MODEL}
|
|
|
|
ln -s $< $@
|
|
|
|
ln -s $<.vocab $@.vocab
|
|
|
|
|
|
|
|
else
|
|
|
|
|
|
|
|
##-------------------------------------------
|
|
|
|
## source and target side specific subword models:
|
|
|
|
##
|
2020-06-08 14:14:55 +03:00
|
|
|
## we keep the dependency on LOCAL_TRAIN_SRC
|
|
|
|
## to make multi-threaded make calls behave properly
|
|
|
|
## --> otherwise there can be multiple threads writing to the same file!
|
2021-12-22 18:31:22 +03:00
|
|
|
##-------------------------------------------
|
2020-06-08 14:14:55 +03:00
|
|
|
|
|
|
|
${SPMSRCMODEL}: ${LOCAL_TRAIN_SRC}
|
2020-07-25 22:52:33 +03:00
|
|
|
ifneq (${wildcard ${SPMSRCMODEL}},)
|
2020-06-08 14:14:55 +03:00
|
|
|
@echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
|
|
|
|
@echo "!!!!!!!! $@ already exists!"
|
|
|
|
@echo "!!!!!!!! re-use the old one even if there is new training data"
|
|
|
|
@echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
|
2020-07-25 22:52:33 +03:00
|
|
|
@echo "!!!!!!!! back-date $<"
|
|
|
|
touch -r $@ $<
|
2020-06-08 14:14:55 +03:00
|
|
|
else
|
2020-06-03 15:39:18 +03:00
|
|
|
mkdir -p ${dir $@}
|
2020-06-06 20:49:54 +03:00
|
|
|
ifeq (${USE_TARGET_LABELS},1)
|
2021-12-14 23:18:45 +03:00
|
|
|
cut -f2- -d ' ' ${LOCAL_TRAIN_SRC} | ${SPM_PREPROCESS} | head -${SPM_INPUT_SIZE} > ${LOCAL_TRAIN_SRC}.text
|
2020-06-06 20:49:54 +03:00
|
|
|
else
|
2021-12-14 23:18:45 +03:00
|
|
|
cat ${LOCAL_TRAIN_SRC} | ${SPM_PREPROCESS} | head -${SPM_INPUT_SIZE} > ${LOCAL_TRAIN_SRC}.text
|
2020-06-03 15:39:18 +03:00
|
|
|
endif
|
|
|
|
${MAKE} ${LOCAL_TRAIN_SRC}.charfreq
|
|
|
|
if [ `cat ${LOCAL_TRAIN_SRC}.charfreq | wc -l` -gt 1000 ]; then \
|
2020-08-28 15:51:37 +03:00
|
|
|
${SPM_TRAIN} ${SPMEXTRA} \
|
2021-12-27 19:53:30 +03:00
|
|
|
--model_prefix=$@ --vocab_size=$(SUBWORD_SRCVOCAB_SIZE) --input=${LOCAL_TRAIN_SRC}.text \
|
2021-03-30 00:00:28 +03:00
|
|
|
--input_sentence_size ${SPM_INPUT_SIZE} --shuffle_input_sentence ${SPM_SHUFFLE_INPUT} \
|
2020-06-03 15:39:18 +03:00
|
|
|
--character_coverage=0.9995 --hard_vocab_limit=false; \
|
|
|
|
else \
|
2020-08-28 15:51:37 +03:00
|
|
|
${SPM_TRAIN} ${SPMEXTRA} \
|
2021-12-27 19:53:30 +03:00
|
|
|
--model_prefix=$@ --vocab_size=$(SUBWORD_SRCVOCAB_SIZE) --input=${LOCAL_TRAIN_SRC}.text \
|
2021-03-30 00:00:28 +03:00
|
|
|
--input_sentence_size ${SPM_INPUT_SIZE} --shuffle_input_sentence ${SPM_SHUFFLE_INPUT} \
|
2020-06-03 15:39:18 +03:00
|
|
|
--character_coverage=1.0 --hard_vocab_limit=false; \
|
|
|
|
fi
|
|
|
|
mv $@.model $@
|
|
|
|
ifeq (${GENERATE_SPM_VOC},1)
|
2020-08-28 15:51:37 +03:00
|
|
|
${SPM_ENCODE} --model=$@ --generate_vocabulary < ${LOCAL_TRAIN_SRC}.text > $@.voc
|
2020-06-03 15:39:18 +03:00
|
|
|
endif
|
|
|
|
rm -f ${LOCAL_TRAIN_SRC}.text
|
2020-06-08 14:14:55 +03:00
|
|
|
endif
|
2020-06-03 15:39:18 +03:00
|
|
|
|
|
|
|
|
|
|
|
## no labels on the target language side
|
2020-06-08 14:14:55 +03:00
|
|
|
${SPMTRGMODEL}: ${LOCAL_TRAIN_TRG}
|
2020-07-25 22:52:33 +03:00
|
|
|
ifneq (${wildcard ${SPMTRGMODEL}},)
|
2020-06-08 14:14:55 +03:00
|
|
|
@echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
|
|
|
|
@echo "!!!!!!!! $@ already exists!"
|
|
|
|
@echo "!!!!!!!! re-use the old one even if there is new training data"
|
|
|
|
@echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
|
2020-07-25 22:52:33 +03:00
|
|
|
@echo "!!!!!!!! back-date $<"
|
|
|
|
touch -r $@ $<
|
2020-06-08 14:14:55 +03:00
|
|
|
else
|
2020-06-03 15:39:18 +03:00
|
|
|
mkdir -p ${dir $@}
|
2021-12-14 23:18:45 +03:00
|
|
|
cat ${LOCAL_TRAIN_TRG} | ${SPM_PREPROCESS} | head -${SPM_INPUT_SIZE} > ${LOCAL_TRAIN_TRG}.text
|
2020-06-03 15:39:18 +03:00
|
|
|
${MAKE} ${LOCAL_TRAIN_TRG}.charfreq
|
|
|
|
if [ `cat ${LOCAL_TRAIN_TRG}.charfreq | wc -l` -gt 1000 ]; then \
|
2020-08-28 15:51:37 +03:00
|
|
|
${SPM_TRAIN} ${SPMEXTRA} \
|
2021-12-27 19:53:30 +03:00
|
|
|
--model_prefix=$@ --vocab_size=$(SUBWORD_TRGVOCAB_SIZE) --input=${LOCAL_TRAIN_TRG}.text \
|
2021-03-30 00:00:28 +03:00
|
|
|
--input_sentence_size ${SPM_INPUT_SIZE} --shuffle_input_sentence ${SPM_SHUFFLE_INPUT} \
|
2020-06-03 15:39:18 +03:00
|
|
|
--character_coverage=0.9995 --hard_vocab_limit=false; \
|
|
|
|
else \
|
2020-08-28 15:51:37 +03:00
|
|
|
${SPM_TRAIN} ${SPMEXTRA} \
|
2021-12-27 19:53:30 +03:00
|
|
|
--model_prefix=$@ --vocab_size=$(SUBWORD_TRGVOCAB_SIZE) --input=${LOCAL_TRAIN_TRG}.text \
|
2021-03-30 00:00:28 +03:00
|
|
|
--input_sentence_size ${SPM_INPUT_SIZE} --shuffle_input_sentence ${SPM_SHUFFLE_INPUT} \
|
2020-06-03 15:39:18 +03:00
|
|
|
--character_coverage=1.0 --hard_vocab_limit=false; \
|
|
|
|
fi
|
|
|
|
mv $@.model $@
|
|
|
|
ifeq (${GENERATE_SPM_VOC},1)
|
2020-08-28 15:51:37 +03:00
|
|
|
${SPM_ENCODE} --model=$@ --generate_vocabulary < ${LOCAL_TRAIN_TRG}.text > $@.voc
|
2020-06-03 15:39:18 +03:00
|
|
|
endif
|
|
|
|
rm -f ${LOCAL_TRAIN_TRG}.text
|
2020-06-08 14:14:55 +03:00
|
|
|
endif
|
2020-06-03 15:39:18 +03:00
|
|
|
|
2021-12-22 18:31:22 +03:00
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
|
|
##-------------------------------------------
|
|
|
|
## joint sentence piece model
|
|
|
|
## (concatenate both, source and target language texts)
|
|
|
|
##-------------------------------------------
|
|
|
|
|
|
|
|
${SPM_MODEL}: ${LOCAL_TRAIN_SRC} ${LOCAL_TRAIN_TRG}
|
|
|
|
ifneq (${wildcard ${SPM_MODEL}},)
|
|
|
|
@echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
|
|
|
|
@echo "!!!!!!!! $@ already exists!"
|
|
|
|
@echo "!!!!!!!! re-use the old one even if there is new training data"
|
|
|
|
@echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
|
|
|
|
@echo "!!!!!!!! back-date $^"
|
|
|
|
touch -r $@ $<
|
|
|
|
else
|
|
|
|
mkdir -p ${dir $@}
|
|
|
|
cat ${LOCAL_TRAIN_TRG} | ${SPM_PREPROCESS} | head -$$((${SPM_INPUT_SIZE}/2)) > ${LOCAL_TRAIN}.tmp
|
|
|
|
cat ${LOCAL_TRAIN_TRG} | ${SPM_PREPROCESS} | head -$$((${SPM_INPUT_SIZE}/2)) >> ${LOCAL_TRAIN}.tmp
|
|
|
|
${SHUFFLE} < ${LOCAL_TRAIN}.tmp > ${LOCAL_TRAIN}.text
|
|
|
|
rm -f ${LOCAL_TRAIN}.tmp
|
|
|
|
${MAKE} ${LOCAL_TRAIN}.text.charfreq
|
|
|
|
if [ `cat ${LOCAL_TRAIN}.text.charfreq | wc -l` -gt 1000 ]; then \
|
|
|
|
${SPM_TRAIN} ${SPMEXTRA} \
|
2021-12-27 19:53:30 +03:00
|
|
|
--model_prefix=$@ --vocab_size=$(SUBWORD_TRGVOCAB_SIZE) --input=${LOCAL_TRAIN}.text \
|
2021-12-22 18:31:22 +03:00
|
|
|
--input_sentence_size ${SPM_INPUT_SIZE} --shuffle_input_sentence ${SPM_SHUFFLE_INPUT} \
|
|
|
|
--character_coverage=0.9995 --hard_vocab_limit=false; \
|
|
|
|
else \
|
|
|
|
${SPM_TRAIN} ${SPMEXTRA} \
|
2021-12-27 19:53:30 +03:00
|
|
|
--model_prefix=$@ --vocab_size=$(SUBWORD_TRGVOCAB_SIZE) --input=${LOCAL_TRAIN}.text \
|
2021-12-22 18:31:22 +03:00
|
|
|
--input_sentence_size ${SPM_INPUT_SIZE} --shuffle_input_sentence ${SPM_SHUFFLE_INPUT} \
|
|
|
|
--character_coverage=1.0 --hard_vocab_limit=false; \
|
|
|
|
fi
|
|
|
|
mv $@.model $@
|
|
|
|
ifeq (${GENERATE_SPM_VOC},1)
|
|
|
|
${SPM_ENCODE} --model=$@ --generate_vocabulary < ${LOCAL_TRAIN}.text > $@.voc
|
|
|
|
endif
|
|
|
|
rm -f ${LOCAL_TRAIN}.text
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-06-03 15:39:18 +03:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## sentence piece model trained on monolingual data
|
2021-12-27 19:53:30 +03:00
|
|
|
SPM_MONO = ${SPMDIR}/${LANGSTR}/${SUBWORD_MODEL_NAME}.${SUBWORDS}${BPESIZE:000=}k-model
|
|
|
|
SPM_SRCMONO = ${SPMDIR}/${LANGSRCSTR}/${SUBWORD_MODEL_NAME}.${SUBWORDS}${SUBWORD_SRCVOCAB_SIZE:000=}k-model
|
|
|
|
SPM_TRGMONO = ${SPMDIR}/${LANGTRGSTR}/${SUBWORD_MODEL_NAME}.${SUBWORDS}${SUBWORD_TRGVOCAB_SIZE:000=}k-model
|
2020-06-03 15:39:18 +03:00
|
|
|
|
|
|
|
## vocabulary files created from monolingual data
|
2021-12-27 19:53:30 +03:00
|
|
|
SPMVOCAB = ${SPMDIR}/${LANGSTR}/${SUBWORD_MODEL_NAME}.${SUBWORDS}${BPESIZE:000=}k.vocab.yml
|
|
|
|
SPMSRCVOCAB = ${SPMDIR}/${LANGSRCSTR}/${SUBWORD_MODEL_NAME}.${SUBWORDS}${SUBWORD_SRCVOCAB_SIZE:000=}k.vocab.yml
|
|
|
|
SPMTRGVOCAB = ${SPMDIR}/${LANGTRGSTR}/${SUBWORD_MODEL_NAME}.${SUBWORDS}${SUBWORD_TRGVOCAB_SIZE:000=}k.vocab.yml
|
2020-06-03 15:39:18 +03:00
|
|
|
|
2021-12-22 18:31:22 +03:00
|
|
|
.PRECIOUS: ${SPM_MONO} ${SPM_SRCMONO} ${SPM_TRGMONO} ${SPMVOCAB}
|
2020-06-03 15:39:18 +03:00
|
|
|
|
|
|
|
mono-spm-vocab: ${SPMVOCAB}
|
|
|
|
|
2021-12-18 01:27:04 +03:00
|
|
|
|
2020-06-03 15:39:18 +03:00
|
|
|
ifneq (${SPMVOCAB},${SPMSRCVOCAB})
|
|
|
|
${SPMSRCVOCAB}:
|
2021-12-27 19:53:30 +03:00
|
|
|
${MAKE} LANGS="${SRCLANGS}" BPESIZE=${SUBWORD_SRCVOCAB_SIZE} mono-spm-vocab
|
2020-06-03 15:39:18 +03:00
|
|
|
endif
|
|
|
|
|
2021-12-18 01:27:04 +03:00
|
|
|
ifneq (${SPMSRCVOCAB},${SPMTRGVOCAB})
|
2020-06-03 15:39:18 +03:00
|
|
|
ifneq (${SPMVOCAB},${SPMTRGVOCAB})
|
|
|
|
${SPMTRGVOCAB}:
|
2021-12-27 19:53:30 +03:00
|
|
|
${MAKE} LANGS="${TRGLANGS}" BPESIZE=${SUBWORD_TRGVOCAB_SIZE} mono-spm-vocab
|
2021-12-18 01:27:04 +03:00
|
|
|
endif
|
2020-06-03 15:39:18 +03:00
|
|
|
endif
|
|
|
|
|
2021-12-22 18:31:22 +03:00
|
|
|
${SPMVOCAB}: ${LOCAL_MONO_DATA}.${PRE} ${SPM_MONO}
|
2020-06-03 15:39:18 +03:00
|
|
|
ifeq ($(wildcard ${SPMVOCAB}),)
|
|
|
|
mkdir -p ${dir $@}
|
2021-12-22 18:31:22 +03:00
|
|
|
${SPM_ENCODE} --model ${SPM_MONO} < $< |\
|
2020-08-28 15:51:37 +03:00
|
|
|
${MARIAN_VOCAB} --max-size ${VOCABSIZE} > $@
|
2020-06-03 15:39:18 +03:00
|
|
|
else
|
|
|
|
@echo "$@ already exists!"
|
|
|
|
@echo "WARNING! No new vocabulary is created even though the data has changed!"
|
|
|
|
@echo "WARNING! Delete the file if you want to start from scratch!"
|
|
|
|
touch $@
|
|
|
|
endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## sentence piece model trained on monolingual data
|
|
|
|
|
2021-12-22 18:31:22 +03:00
|
|
|
mono-spm-model: ${SPM_MONO}
|
2020-06-03 15:39:18 +03:00
|
|
|
|
2021-12-22 18:31:22 +03:00
|
|
|
ifneq (${SPM_MONO},${SPM_SRCMONO})
|
|
|
|
${SPM_SRCMONO}:
|
2021-12-27 19:53:30 +03:00
|
|
|
${MAKE} LANGS="${SRCLANGS}" BPESIZE=${SUBWORD_SRCVOCAB_SIZE} mono-spm-model
|
2020-06-03 15:39:18 +03:00
|
|
|
endif
|
|
|
|
|
2022-02-17 10:43:50 +03:00
|
|
|
ifneq (${SPM_SRCMONO},${SPM_TRGMONO})
|
2021-12-22 18:31:22 +03:00
|
|
|
ifneq (${SPM_MONO},${SPM_TRGMONO})
|
|
|
|
${SPM_TRGMONO}:
|
2021-12-27 19:53:30 +03:00
|
|
|
${MAKE} LANGS="${TRGLANGS}" BPESIZE=${SUBWORD_TRGVOCAB_SIZE} mono-spm-model
|
2020-06-03 15:39:18 +03:00
|
|
|
endif
|
2021-12-18 01:27:04 +03:00
|
|
|
endif
|
2020-06-03 15:39:18 +03:00
|
|
|
|
|
|
|
|
2021-12-22 18:31:22 +03:00
|
|
|
${SPM_MONO}: ${LOCAL_MONO_DATA}.${PRE}
|
|
|
|
ifeq ($(wildcard ${SPM_MONO}),)
|
2020-06-03 15:39:18 +03:00
|
|
|
mkdir -p ${dir $@}
|
2021-12-14 23:18:45 +03:00
|
|
|
cat $< | ${SPM_PREPROCESS} | head -${SPM_INPUT_SIZE} > $<.text
|
2020-06-03 15:39:18 +03:00
|
|
|
${MAKE} ${LOCAL_MONO_DATA}.${PRE}.charfreq
|
|
|
|
if [ `cat ${LOCAL_MONO_DATA}.${PRE}.charfreq | wc -l` -gt 1000 ]; then \
|
2020-08-28 15:51:37 +03:00
|
|
|
${SPM_TRAIN} ${SPMEXTRA} \
|
2021-12-27 19:53:30 +03:00
|
|
|
--model_prefix=$@ --vocab_size=$(SUBWORD_TRGVOCAB_SIZE) --input=$<.text \
|
2021-03-30 00:00:28 +03:00
|
|
|
--input_sentence_size ${SPM_INPUT_SIZE} --shuffle_input_sentence ${SPM_SHUFFLE_INPUT} \
|
2020-06-03 15:39:18 +03:00
|
|
|
--character_coverage=0.9995 --hard_vocab_limit=false; \
|
|
|
|
else \
|
2020-08-28 15:51:37 +03:00
|
|
|
${SPM_TRAIN} ${SPMEXTRA} \
|
2021-12-27 19:53:30 +03:00
|
|
|
--model_prefix=$@ --vocab_size=$(SUBWORD_TRGVOCAB_SIZE) --input=$<.text \
|
2021-03-30 00:00:28 +03:00
|
|
|
--input_sentence_size ${SPM_INPUT_SIZE} --shuffle_input_sentence ${SPM_SHUFFLE_INPUT} \
|
2020-06-03 15:39:18 +03:00
|
|
|
--character_coverage=1.0 --hard_vocab_limit=false; \
|
|
|
|
fi
|
|
|
|
mv $@.model $@
|
2020-08-28 15:51:37 +03:00
|
|
|
${SPM_ENCODE} --model=$@ --generate_vocabulary < $<.text > $@.voc
|
2020-06-03 15:39:18 +03:00
|
|
|
rm -f $<.text
|
|
|
|
else
|
|
|
|
@echo "$@ already exists!"
|
|
|
|
@echo "WARNING! No new SPM model created!"
|
|
|
|
@echo "WARNING! Delete the file if you want to start from scratch!"
|
|
|
|
endif
|
|
|
|
|
|
|
|
## SentencePiece parameters:
|
|
|
|
##
|
|
|
|
# --input_sentence_size (maximum size of sentences the trainer loads) type: int32 default: 10000000
|
|
|
|
# --hard_vocab_limit (If set to false, --vocab_size is considered as a soft limit.) type: bool default: true
|
|
|
|
# --training_sentence_size (maximum size of sentences to train sentence pieces) type: int32 default: 10000000
|
|
|
|
# --vocab_size (vocabulary size) type: int32 default: 8000
|
|
|
|
|
|
|
|
|
|
|
|
## character frequence table
|
|
|
|
## --> used to decide about the character coverage level
|
|
|
|
|
|
|
|
## awk-based char-counter
|
|
|
|
#%.charfreq: %
|
|
|
|
# sed 's/./& /g' < $< | tr ' ' "\n" | grep . |\
|
|
|
|
# awk '!/^$$/{a[$$0]++}END{for (i in a)print i,a[i];}' > $@
|
|
|
|
|
|
|
|
## python-based char-counter (seems to be the fastest version)
|
2020-06-08 17:24:39 +03:00
|
|
|
## restrict to 1 million lines
|
2020-06-03 15:39:18 +03:00
|
|
|
%.charfreq: %
|
2020-06-08 17:24:39 +03:00
|
|
|
head -1000000 $< > $<.1m
|
|
|
|
-python -c "import collections, pprint; pprint.pprint(dict(collections.Counter(open('$<.1m', 'r').read())))" > $@
|
|
|
|
rm -f $<.1m
|
2020-06-03 15:39:18 +03:00
|
|
|
|
2020-06-06 20:49:54 +03:00
|
|
|
%.charfreq: %.gz
|
2020-06-08 17:24:39 +03:00
|
|
|
${GZIP} -cd < $< | head -1000000 > $<.1m
|
|
|
|
-python -c "import collections, pprint; pprint.pprint(dict(collections.Counter(open('$<.1m', 'r').read())))" > $@
|
|
|
|
rm -f $<.1m
|
2020-06-06 20:49:54 +03:00
|
|
|
|
|
|
|
|
2020-06-03 15:39:18 +03:00
|
|
|
## slow version
|
|
|
|
%.charfreq2: %
|
|
|
|
head -10000000 $< |\
|
|
|
|
sed 's/./& /g' | \
|
|
|
|
tr ' ' "\n" | grep . |\
|
|
|
|
sort | uniq -c > $@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## TODO: should we have vocab limits?
|
|
|
|
## --vocabulary={vocab_file}.L1 --vocabulary_threshold=50
|
|
|
|
## see https://github.com/google/sentencepiece#c-from-source
|
|
|
|
|
2021-12-27 19:53:30 +03:00
|
|
|
%.src.${SUBWORDS}${SUBWORD_SRCVOCAB_SIZE:000=}k: %.src ${SUBWORD_SRC_MODEL}
|
2020-06-06 20:49:54 +03:00
|
|
|
ifeq (${USE_TARGET_LABELS},1)
|
2020-06-03 15:39:18 +03:00
|
|
|
cut -f1 -d ' ' $< > $<.labels
|
|
|
|
cut -f2- -d ' ' $< > $<.txt
|
2020-08-28 15:51:37 +03:00
|
|
|
${SPM_ENCODE} --model $(word 2,$^) < $<.txt > $@.txt
|
2020-06-03 15:39:18 +03:00
|
|
|
paste -d ' ' $<.labels $@.txt > $@
|
|
|
|
rm -f $<.labels $<.txt $@.txt
|
2020-06-06 20:49:54 +03:00
|
|
|
else
|
2020-08-28 15:51:37 +03:00
|
|
|
${SPM_ENCODE} --model $(word 2,$^) < $< > $@
|
2020-06-03 15:39:18 +03:00
|
|
|
endif
|
|
|
|
|
2021-12-27 19:53:30 +03:00
|
|
|
%.trg.${SUBWORDS}${SUBWORD_TRGVOCAB_SIZE:000=}k: %.trg ${SUBWORD_TRG_MODEL}
|
2020-08-28 15:51:37 +03:00
|
|
|
${SPM_ENCODE} --model $(word 2,$^) < $< > $@
|
2020-06-03 15:39:18 +03:00
|
|
|
|
|
|
|
|
|
|
|
## document-level models (with guided alignment)
|
2021-12-27 19:53:30 +03:00
|
|
|
%.src.${SUBWORDS}${SUBWORD_TRGVOCAB_SIZE:000=}k.doc${CONTEXT_SIZE}.gz:
|
|
|
|
${MAKE} PRE_SRC=spm${SUBWORD_SRCVOCAB_SIZE:000=}k PRE_TRG=spm${SUBWORD_TRGVOCAB_SIZE:000=}k wordalign
|
2020-06-11 00:54:40 +03:00
|
|
|
${SCRIPTDIR}/large-context.pl -l ${CONTEXT_SIZE} \
|
2021-12-27 19:53:30 +03:00
|
|
|
${patsubst %.src.${SUBWORDS}${SUBWORD_TRGVOCAB_SIZE:000=}k.doc${CONTEXT_SIZE}.gz,%.src.${SUBWORDS}${SUBWORD_SRCVOCAB_SIZE:000=}k.gz,$@} \
|
|
|
|
${patsubst %.src.${SUBWORDS}${SUBWORD_TRGVOCAB_SIZE:000=}k.doc${CONTEXT_SIZE}.gz,%.trg.${SUBWORDS}${SUBWORD_TRGVOCAB_SIZE:000=}k.gz,$@} \
|
|
|
|
${patsubst %.src.${SUBWORDS}${SUBWORD_TRGVOCAB_SIZE:000=}k.doc${CONTEXT_SIZE}.gz,%.${SUBWORDS}${SUBWORD_SRCVOCAB_SIZE:000=}k-spm${SUBWORD_TRGVOCAB_SIZE:000=}k.src-trg.alg.gz,$@} \
|
2020-06-03 15:39:18 +03:00
|
|
|
| ${GZIP} > $@.tmp.gz
|
|
|
|
${GZIP} -cd < $@.tmp.gz | cut -f1 | ${GZIP} -c > $@
|
|
|
|
${GZIP} -cd < $@.tmp.gz | cut -f2 | ${GZIP} -c > ${subst .src.,.trg.,$@}
|
|
|
|
${GZIP} -cd < $@.tmp.gz | cut -f3 | \
|
2021-12-27 19:53:30 +03:00
|
|
|
${GZIP} -c > ${patsubst %.src.${SUBWORDS}${SUBWORD_TRGVOCAB_SIZE:000=}k.doc${CONTEXT_SIZE}.gz,\
|
|
|
|
%.${SUBWORDS}${SUBWORD_SRCVOCAB_SIZE:000=}k.doc${CONTEXT_SIZE}-spm${SUBWORD_TRGVOCAB_SIZE:000=}k.doc${CONTEXT_SIZE}.src-trg.alg.gz,$@}
|
2020-06-03 15:39:18 +03:00
|
|
|
rm -f $@.tmp.gz
|
|
|
|
|
2021-12-27 19:53:30 +03:00
|
|
|
%.trg.${SUBWORDS}${SUBWORD_TRGVOCAB_SIZE:000=}k.doc${CONTEXT_SIZE}.gz: %.src.${SUBWORDS}${SUBWORD_SRCVOCAB_SIZE:000=}k.doc${CONTEXT_SIZE}.gz
|
2020-06-03 15:39:18 +03:00
|
|
|
@echo "done!"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## for validation and test data:
|
2021-12-27 19:53:30 +03:00
|
|
|
%.src.${SUBWORDS}${SUBWORD_TRGVOCAB_SIZE:000=}k.doc${CONTEXT_SIZE}:
|
|
|
|
${MAKE} PRE_SRC=spm${SUBWORD_SRCVOCAB_SIZE:000=}k PRE_TRG=spm${SUBWORD_TRGVOCAB_SIZE:000=}k devdata
|
|
|
|
${MAKE} PRE_SRC=spm${SUBWORD_SRCVOCAB_SIZE:000=}k PRE_TRG=spm${SUBWORD_TRGVOCAB_SIZE:000=}k testdata
|
2020-11-26 13:57:46 +03:00
|
|
|
${SCRIPTDIR}/large-context.pl -l ${CONTEXT_SIZE} \
|
2021-12-27 19:53:30 +03:00
|
|
|
${patsubst %.src.${SUBWORDS}${SUBWORD_TRGVOCAB_SIZE:000=}k.doc${CONTEXT_SIZE},%.src.${SUBWORDS}${SUBWORD_SRCVOCAB_SIZE:000=}k,$@} \
|
|
|
|
${patsubst %.src.${SUBWORDS}${SUBWORD_TRGVOCAB_SIZE:000=}k.doc${CONTEXT_SIZE},%.trg.${SUBWORDS}${SUBWORD_TRGVOCAB_SIZE:000=}k,$@} \
|
2020-06-03 15:39:18 +03:00
|
|
|
| ${GZIP} > $@.tmp.gz
|
|
|
|
${GZIP} -cd < $@.tmp.gz | cut -f1 > $@
|
|
|
|
${GZIP} -cd < $@.tmp.gz | cut -f2 > ${subst .src.,.trg.,$@}
|
|
|
|
rm -f $@.tmp.gz
|
|
|
|
|
2021-12-27 19:53:30 +03:00
|
|
|
%.trg.${SUBWORDS}${SUBWORD_TRGVOCAB_SIZE:000=}k.doc${CONTEXT_SIZE}: %.src.${SUBWORDS}${SUBWORD_TRGVOCAB_SIZE:000=}k.doc${CONTEXT_SIZE}
|
2020-06-03 15:39:18 +03:00
|
|
|
@echo "done!"
|
|
|
|
|