fix inconsistency w/ recent pytorch cuda device logic

Summary: Pull Request resolved: https://github.com/fairinternal/fairseq-py/pull/892

Differential Revision: D18109685

Pulled By: jma127

fbshipit-source-id: f96e1080a5577b8ee0748dfdd956bf72bed47474
This commit is contained in:
Jerry Ma 2019-10-23 20:50:42 -07:00 committed by Facebook Github Bot
parent 39faa0a419
commit d0358bb38e

View File

@ -330,7 +330,7 @@ class Trainer(object):
print(msg, file=sys.stderr)
if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"):
for device_idx in range(torch.cuda.device_count()):
print(torch.cuda.memory_summary(device=torch.cuda.device(device_idx)),
print(torch.cuda.memory_summary(device=device_idx),
file=sys.stderr)
sys.stderr.flush()