diff --git a/fairseq/optim/adadelta.py b/fairseq/optim/adadelta.py new file mode 100644 index 000000000..f9d82047c --- /dev/null +++ b/fairseq/optim/adadelta.py @@ -0,0 +1,40 @@ +# Copyright (c) 2017-present, Facebook, Inc. +# All rights reserved. +# +# This source code is licensed under the license found in the LICENSE file in +# the root directory of this source tree. An additional grant of patent rights +# can be found in the PATENTS file in the same directory. + +import torch.optim + +from . import FairseqOptimizer, register_optimizer + + +@register_optimizer('adadelta') +class Adadelta(FairseqOptimizer): + def __init__(self, args, params): + super().__init__(args, params) + self._optimizer = torch.optim.Adadelta(params, **self.optimizer_config) + + @staticmethod + def add_args(parser): + """Add optimizer-specific arguments to the parser.""" + parser.add_argument('--adadelta-rho', type=float, default=0.9, metavar='RHO', + help='coefficient used for computing a running average of squared gradients') + parser.add_argument('--adadelta-eps', type=float, default=1e-6, metavar='EPS', + help='term added to the denominator to improve numerical stability') + + @property + def optimizer_config(self): + """ + Return a kwarg dictionary that will be used to override optimizer + args stored in checkpoints. This allows us to load a checkpoint and + resume training using a different set of optimizer args, e.g., with a + different learning rate. + """ + return { + 'lr': self.args.lr[0], + 'rho': self.args.adadelta_rho, + 'eps': self.args.adadelta_eps, + 'weight_decay': self.args.weight_decay, + } diff --git a/tests/test_binaries.py b/tests/test_binaries.py index feff2f282..69276e498 100644 --- a/tests/test_binaries.py +++ b/tests/test_binaries.py @@ -220,6 +220,28 @@ class TestLanguageModeling(unittest.TestCase): eval_lm_main(data_dir) +class TestCommonOptions(unittest.TestCase): + + def test_optimizers(self): + with contextlib.redirect_stdout(StringIO()): + with tempfile.TemporaryDirectory('test_optimizers') as data_dir: + # Use just a bit of data and tiny model to keep this test runtime reasonable + create_dummy_data(data_dir, num_examples=10, maxlen=5) + preprocess_translation_data(data_dir) + optimizers = ['adafactor', 'adam', 'nag', 'adagrad', 'sgd', 'adadelta'] + last_checkpoint = os.path.join(data_dir, 'checkpoint_last.pt') + for optimizer in optimizers: + if os.path.exists(last_checkpoint): + os.remove(last_checkpoint) + train_translation_model(data_dir, 'lstm', [ + '--encoder-layers', '1', + '--encoder-hidden-size', '32', + '--decoder-layers', '1', + '--optimizer', optimizer, + ]) + generate_main(data_dir) + + def create_dummy_data(data_dir, num_examples=1000, maxlen=20): def _create_dummy_data(filename): @@ -267,7 +289,6 @@ def train_translation_model(data_dir, arch, extra_flags=None): data_dir, '--save-dir', data_dir, '--arch', arch, - '--optimizer', 'nag', '--lr', '0.05', '--max-tokens', '500', '--max-epoch', '1',