mirror of
https://github.com/facebookresearch/fairseq.git
synced 2024-09-19 05:09:20 +03:00
fix inconsistency w/ recent pytorch cuda device logic
Summary: Pull Request resolved: https://github.com/fairinternal/fairseq-py/pull/892 Differential Revision: D18109685 Pulled By: jma127 fbshipit-source-id: f96e1080a5577b8ee0748dfdd956bf72bed47474
This commit is contained in:
parent
39faa0a419
commit
d0358bb38e
@ -330,7 +330,7 @@ class Trainer(object):
|
||||
print(msg, file=sys.stderr)
|
||||
if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"):
|
||||
for device_idx in range(torch.cuda.device_count()):
|
||||
print(torch.cuda.memory_summary(device=torch.cuda.device(device_idx)),
|
||||
print(torch.cuda.memory_summary(device=device_idx),
|
||||
file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user