v0.7.1: fix PyPI setup and tests

Summary: Pull Request resolved: https://github.com/pytorch/fairseq/pull/818

Differential Revision: D15916265

Pulled By: myleott

fbshipit-source-id: c66c0bd988d3472c4150226952f34ee8d4c3db86
This commit is contained in:
Myle Ott 2019-06-20 06:23:49 -07:00 committed by Facebook Github Bot
parent 9462a819e4
commit 881381cfc7
6 changed files with 34 additions and 24 deletions

View File

@ -60,9 +60,9 @@ github_doc_root = 'https://github.com/pytorch/fairseq/tree/master/docs/'
# built documents.
#
# The short X.Y version.
version = '0.7.0'
version = '0.7.1'
# The full version, including alpha/beta/rc tags.
release = '0.7.0'
release = '0.7.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.

View File

@ -6,7 +6,7 @@
# can be found in the PATENTS file in the same directory.
__all__ = ['pdb']
__version__ = '0.7.0'
__version__ = '0.7.1'
import fairseq.criterions
import fairseq.models

View File

View File

@ -233,11 +233,11 @@ class Trainer(object):
# forward and backward pass
logging_outputs, sample_sizes, ooms = [], [], 0
for i, sample in enumerate(samples):
sample = self._prepare_sample(sample, self.args.fp16)
sample = self._prepare_sample(sample)
if sample is None:
# when sample is None, run forward/backward on a dummy batch
# and ignore the resulting gradients
sample = self._prepare_sample(self._dummy_batch, self.args.fp16)
sample = self._prepare_sample(self._dummy_batch)
ignore_grad = True
else:
ignore_grad = False
@ -381,9 +381,9 @@ class Trainer(object):
self.model.eval()
self.criterion.eval()
sample = self._prepare_sample(sample, self.args.fp16)
sample = self._prepare_sample(sample)
if sample is None:
sample = self._prepare_sample(self._dummy_batch, self.args.fp16)
sample = self._prepare_sample(self._dummy_batch)
ignore_results = True
else:
ignore_results = False
@ -488,7 +488,7 @@ class Trainer(object):
self._num_updates = num_updates
self.lr_step_update()
def _prepare_sample(self, sample, fp16):
def _prepare_sample(self, sample):
if sample is None or len(sample) == 0:
return None
@ -500,7 +500,10 @@ class Trainer(object):
return t.half()
return t
return utils.apply(apply_half, sample) if fp16 else sample
if self.args.fp16:
sample = utils.apply_to_sample(apply_half, sample)
return sample
def _set_seed(self):
# Set seed based on args.seed and the update number so that we get

View File

@ -31,26 +31,32 @@ def load_ensemble_for_inference(filenames, task, model_arg_overrides=None):
)
def apply(f, sample):
def apply_to_sample(f, sample):
if len(sample) == 0:
return {}
if torch.is_tensor(sample):
return f(sample)
elif isinstance(sample, dict):
return {
key: apply(f, value)
for key, value in sample.items()
}
elif isinstance(sample, list):
return [apply(f, x) for x in sample]
else:
return sample
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {
key: _apply(value)
for key, value in x.items()
}
elif isinstance(x, list):
return [_apply(x) for x in x]
else:
return x
return _apply(sample)
def move_to_cuda(sample):
def _move_to_cuda(tensor):
return tensor.cuda()
return apply(_move_to_cuda, sample)
return tensor.cuda()
return apply_to_sample(_move_to_cuda, sample)
INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0)

View File

@ -29,7 +29,7 @@ bleu = Extension(
setup(
name='fairseq',
version='0.7.0',
version='0.7.1',
description='Facebook AI Research Sequence-to-Sequence Toolkit',
url='https://github.com/pytorch/fairseq',
classifiers=[
@ -40,6 +40,7 @@ setup(
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
long_description=readme,
long_description_content_type='text/markdown',
install_requires=[
'cffi',
'numpy',