Skip to content

Commit

Permalink
Check -gpu_ranks option to ensure saving a model (OpenNMT#1407)
Browse files Browse the repository at this point in the history
* Check -gpu_ranks option to ensure saving a model
* split condition to check -gpu_ranks inconsistency
  • Loading branch information
dalgarak authored and vince62s committed Apr 27, 2019
1 parent dcaeb23 commit 624a0b3
Showing 1 changed file with 12 additions and 3 deletions.
15 changes: 12 additions & 3 deletions onmt/utils/parse.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,15 +81,24 @@ def ckpt_model_opts(cls, ckpt_opt):
def validate_train_opts(cls, opt):
if opt.epochs:
raise AssertionError(
"-epochs is deprecated please use -train_steps.")
"-epochs is deprecated please use -train_steps.")
if opt.truncated_decoder > 0 and max(opt.accum_count) > 1:
raise AssertionError("BPTT is not compatible with -accum > 1")
if opt.gpuid:
raise AssertionError("gpuid is deprecated \
see world_size and gpu_ranks")
raise AssertionError(
"gpuid is deprecated see world_size and gpu_ranks")
if torch.cuda.is_available() and not opt.gpu_ranks:
logger.info("WARNING: You have a CUDA device, \
should run with -gpu_ranks")
if opt.world_size < len(opt.gpu_ranks):
raise AssertionError(
"parameter counts of -gpu_ranks must be less or equal "
"than -world_size.")
if opt.world_size == len(opt.gpu_ranks) and \
min(opt.gpu_ranks) > 0:
raise AssertionError(
"-gpu_ranks should have master(=0) rank "
"unless -world_size is greater than len(gpu_ranks).")

@classmethod
def validate_translate_opts(cls, opt):
Expand Down

0 comments on commit 624a0b3

Please sign in to comment.