v0.7.1: fix PyPI setup and tests

Summary: Pull Request resolved: https://github.com/pytorch/fairseq/pull/818

Differential Revision: D15916265

Pulled By: myleott

fbshipit-source-id: c66c0bd988d3472c4150226952f34ee8d4c3db86
This commit is contained in:
Myle Ott 2019-06-20 06:23:49 -07:00 committed by Facebook Github Bot
parent 9462a819e4
commit 881381cfc7
6 changed files with 34 additions and 24 deletions

View File

@ -60,9 +60,9 @@ github_doc_root = 'https://github.com/pytorch/fairseq/tree/master/docs/'
# built documents. # built documents.
# #
# The short X.Y version. # The short X.Y version.
version = '0.7.0' version = '0.7.1'
# The full version, including alpha/beta/rc tags. # The full version, including alpha/beta/rc tags.
release = '0.7.0' release = '0.7.1'
# The language for content autogenerated by Sphinx. Refer to documentation # The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages. # for a list of supported languages.

View File

@ -6,7 +6,7 @@
# can be found in the PATENTS file in the same directory. # can be found in the PATENTS file in the same directory.
__all__ = ['pdb'] __all__ = ['pdb']
__version__ = '0.7.0' __version__ = '0.7.1'
import fairseq.criterions import fairseq.criterions
import fairseq.models import fairseq.models

View File

View File

@ -233,11 +233,11 @@ class Trainer(object):
# forward and backward pass # forward and backward pass
logging_outputs, sample_sizes, ooms = [], [], 0 logging_outputs, sample_sizes, ooms = [], [], 0
for i, sample in enumerate(samples): for i, sample in enumerate(samples):
sample = self._prepare_sample(sample, self.args.fp16) sample = self._prepare_sample(sample)
if sample is None: if sample is None:
# when sample is None, run forward/backward on a dummy batch # when sample is None, run forward/backward on a dummy batch
# and ignore the resulting gradients # and ignore the resulting gradients
sample = self._prepare_sample(self._dummy_batch, self.args.fp16) sample = self._prepare_sample(self._dummy_batch)
ignore_grad = True ignore_grad = True
else: else:
ignore_grad = False ignore_grad = False
@ -381,9 +381,9 @@ class Trainer(object):
self.model.eval() self.model.eval()
self.criterion.eval() self.criterion.eval()
sample = self._prepare_sample(sample, self.args.fp16) sample = self._prepare_sample(sample)
if sample is None: if sample is None:
sample = self._prepare_sample(self._dummy_batch, self.args.fp16) sample = self._prepare_sample(self._dummy_batch)
ignore_results = True ignore_results = True
else: else:
ignore_results = False ignore_results = False
@ -488,7 +488,7 @@ class Trainer(object):
self._num_updates = num_updates self._num_updates = num_updates
self.lr_step_update() self.lr_step_update()
def _prepare_sample(self, sample, fp16): def _prepare_sample(self, sample):
if sample is None or len(sample) == 0: if sample is None or len(sample) == 0:
return None return None
@ -500,7 +500,10 @@ class Trainer(object):
return t.half() return t.half()
return t return t
return utils.apply(apply_half, sample) if fp16 else sample if self.args.fp16:
sample = utils.apply_to_sample(apply_half, sample)
return sample
def _set_seed(self): def _set_seed(self):
# Set seed based on args.seed and the update number so that we get # Set seed based on args.seed and the update number so that we get

View File

@ -31,26 +31,32 @@ def load_ensemble_for_inference(filenames, task, model_arg_overrides=None):
) )
def apply(f, sample): def apply_to_sample(f, sample):
if len(sample) == 0: if len(sample) == 0:
return {} return {}
if torch.is_tensor(sample):
return f(sample) def _apply(x):
elif isinstance(sample, dict): if torch.is_tensor(x):
return { return f(x)
key: apply(f, value) elif isinstance(x, dict):
for key, value in sample.items() return {
} key: _apply(value)
elif isinstance(sample, list): for key, value in x.items()
return [apply(f, x) for x in sample] }
else: elif isinstance(x, list):
return sample return [_apply(x) for x in x]
else:
return x
return _apply(sample)
def move_to_cuda(sample): def move_to_cuda(sample):
def _move_to_cuda(tensor): def _move_to_cuda(tensor):
return tensor.cuda() return tensor.cuda()
return apply(_move_to_cuda, sample)
return apply_to_sample(_move_to_cuda, sample)
INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0) INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0)

View File

@ -29,7 +29,7 @@ bleu = Extension(
setup( setup(
name='fairseq', name='fairseq',
version='0.7.0', version='0.7.1',
description='Facebook AI Research Sequence-to-Sequence Toolkit', description='Facebook AI Research Sequence-to-Sequence Toolkit',
url='https://github.com/pytorch/fairseq', url='https://github.com/pytorch/fairseq',
classifiers=[ classifiers=[
@ -40,6 +40,7 @@ setup(
'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Scientific/Engineering :: Artificial Intelligence',
], ],
long_description=readme, long_description=readme,
long_description_content_type='text/markdown',
install_requires=[ install_requires=[
'cffi', 'cffi',
'numpy', 'numpy',