mirror of
https://github.com/facebookresearch/fairseq.git
synced 2024-10-26 17:32:57 +03:00
fix issubclass() call on python 3.7+ (#1462)
Summary: Fixes #2897 Also updates readmes to use --config-dir instead of --config-path for hydra runs, and adds __init__.py to config dir Pull Request resolved: https://github.com/fairinternal/fairseq-py/pull/1462 Reviewed By: myleott Differential Revision: D25163789 Pulled By: alexeib fbshipit-source-id: f45f432174771c5c458480f984aedf12130b8522
This commit is contained in:
parent
168480c9f1
commit
f13f299093
@ -211,7 +211,7 @@ works for migrated tasks and models.
|
||||
|
||||
```shell script
|
||||
$ fairseq-hydra-train \
|
||||
--config-path /path/to/external/configs \
|
||||
--config-dir /path/to/external/configs \
|
||||
--config-name wiki103
|
||||
```
|
||||
|
||||
|
@ -58,11 +58,11 @@ Note that the input is expected to be single channel, sampled at 16 kHz
|
||||
```shell script
|
||||
$ fairseq-hydra-train \
|
||||
task.data=/path/to/data \
|
||||
--config-path /path/to/fairseq-py/examples/wav2vec/config/pretraining \
|
||||
--config-dir /path/to/fairseq-py/examples/wav2vec/config/pretraining \
|
||||
--config-name wav2vec2_base_librispeech
|
||||
```
|
||||
|
||||
Note: you can simulate 64 GPUs by using k GPUs and adding command line parameters (before --config-path)
|
||||
Note: you can simulate 64 GPUs by using k GPUs and adding command line parameters (before `--config-dir`)
|
||||
`distributed_training.distributed_world_size=k` `+optimization.update_freq='[x]'` where x = 64/k
|
||||
|
||||
### Train a wav2vec 2.0 large model:
|
||||
@ -72,11 +72,11 @@ This configuration was used for the large model trained on the Libri-light datas
|
||||
```shell script
|
||||
$ fairseq-hydra-train \
|
||||
task.data=/path/to/data \
|
||||
--config-path /path/to/fairseq-py/examples/wav2vec/config/pretraining \
|
||||
--config-dir /path/to/fairseq-py/examples/wav2vec/config/pretraining \
|
||||
--config-name wav2vec2_large_librivox
|
||||
```
|
||||
|
||||
Note: you can simulate 128 GPUs by using k GPUs and adding command line parameters (before --config-path)
|
||||
Note: you can simulate 128 GPUs by using k GPUs and adding command line parameters (before `--config-dir`)
|
||||
`distributed_training.distributed_world_size=k` `+optimization.update_freq='[x]'` where x = 128/k
|
||||
|
||||
### Fine-tune a pre-trained model with CTC:
|
||||
@ -96,14 +96,14 @@ $ fairseq-hydra-train \
|
||||
distributed_training.distributed_port=$PORT \
|
||||
task.data=/path/to/data \
|
||||
model.w2v_path=/path/to/model.pt \
|
||||
--config-path /path/to/fairseq-py/examples/wav2vec/config/finetuning \
|
||||
--config-dir /path/to/fairseq-py/examples/wav2vec/config/finetuning \
|
||||
--config-name base_100h
|
||||
```
|
||||
|
||||
There are other config files in the config/finetuning directory that can be used to fine-tune on other splits.
|
||||
You can specify the right config via the --config-name parameter.
|
||||
You can specify the right config via the `--config-name` parameter.
|
||||
|
||||
Note: you can simulate 24 GPUs by using k GPUs and adding command line parameters (before --config-path)
|
||||
Note: you can simulate 24 GPUs by using k GPUs and adding command line parameters (before `--config-dir`)
|
||||
`distributed_training.distributed_world_size=k` `+optimization.update_freq='[x]'` where x = 24/k
|
||||
|
||||
Decoding with a language model during training requires wav2letter [python bindings](https://github.com/facebookresearch/wav2letter/wiki/Building-Python-bindings).
|
||||
|
@ -47,6 +47,9 @@ def get_parser():
|
||||
def main(args):
|
||||
assert args.valid_percent >= 0 and args.valid_percent <= 1.0
|
||||
|
||||
if not os.path.exists(args.dest):
|
||||
os.makedirs(args.dest)
|
||||
|
||||
dir_path = os.path.realpath(args.root)
|
||||
search_path = os.path.join(dir_path, "**/*." + args.ext)
|
||||
rand = random.Random(args.seed)
|
||||
|
4
fairseq/config/__init__.py
Normal file
4
fairseq/config/__init__.py
Normal file
@ -0,0 +1,4 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
#
|
||||
# This source code is licensed under the MIT license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
@ -1,4 +1,9 @@
|
||||
# @package _group_
|
||||
|
||||
hydra:
|
||||
run:
|
||||
dir: .
|
||||
|
||||
defaults:
|
||||
- task: null
|
||||
- model: null
|
||||
|
@ -218,8 +218,7 @@ def _override_attr(
|
||||
isinstance(val, str)
|
||||
and not val.startswith("${") # not interpolation
|
||||
and field_type != str
|
||||
and inspect.isclass(field_type)
|
||||
and not issubclass(field_type, Enum) # not choices enum
|
||||
and (not inspect.isclass(field_type) or not issubclass(field_type, Enum)) # not choices enum
|
||||
):
|
||||
# upgrade old models that stored complex parameters as string
|
||||
val = ast.literal_eval(val)
|
||||
|
Loading…
Reference in New Issue
Block a user