Skip to content
This repository was archived by the owner on Jul 7, 2023. It is now read-only.

Use xrange from six to fix Python 3 support #1468

Merged
merged 1 commit into from
Mar 1, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion tensor2tensor/data_generators/all_problems.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from __future__ import print_function

import importlib
from six.moves import range # pylint: disable=redefined-builtin

MODULES = [
"tensor2tensor.data_generators.algorithmic",
Expand Down Expand Up @@ -97,7 +98,7 @@

def _is_import_err_msg(err_str, module):
parts = module.split(".")
suffixes = [".".join(parts[i:]) for i in xrange(len(parts))]
suffixes = [".".join(parts[i:]) for i in range(len(parts))]
return err_str in (
["No module named %s" % suffix for suffix in suffixes] +
["No module named '%s'" % suffix for suffix in suffixes])
Expand Down
4 changes: 2 additions & 2 deletions tensor2tensor/data_generators/generator_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -969,11 +969,11 @@ def random_deinterleave(text, separator_symbol="X"):
cut = [False] * n
cut[0] = True
num_cuts = int(math.exp(random.uniform(0, math.log(n))))
for _ in xrange(num_cuts):
for _ in range(num_cuts):
cut[random.randint(1, n -1)] = True
out = [[], []]
part = random.randint(0, 1)
for i in xrange(n):
for i in range(n):
if cut[i]:
out[part].append(separator_symbol)
part = 1 - part
Expand Down
6 changes: 3 additions & 3 deletions tensor2tensor/data_generators/transduction_problems.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
import os
import random

from six.moves import xrange # pylint: disable=redefined-builtin
from six.moves import range # pylint: disable=redefined-builtin

from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
Expand Down Expand Up @@ -123,7 +123,7 @@ def sequence_length(self, dataset_split):
self.max_sequence_length(dataset_split))

def build_vocab(self):
return ["sym_%d" % i for i in xrange(1, self.num_symbols + 1)]
return ["sym_%d" % i for i in range(1, self.num_symbols + 1)]

def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False):
vocab_filename = os.path.join(data_dir, self.vocab_filename)
Expand All @@ -144,7 +144,7 @@ def transpose_sequence(self, input_sequence):
raise NotImplementedError()

def generate_samples(self, data_dir, tmp_dir, dataset_split):
for _ in xrange(self.num_samples(dataset_split)):
for _ in range(self.num_samples(dataset_split)):
source = self.generate_random_sequence(dataset_split)
target = self.transpose_sequence(source)
yield {
Expand Down
4 changes: 2 additions & 2 deletions tensor2tensor/models/research/vqa_self_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from __future__ import division
from __future__ import print_function

from six.moves import xrange
from six.moves import range # pylint: disable=redefined-builtin

from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_hparams
Expand Down Expand Up @@ -657,7 +657,7 @@ def iterative_encoder_decoder(encoder_input,
query,
hparams):
"""Iterative encoder decoder."""
for _ in xrange(hparams.num_rec_steps):
for _ in range(hparams.num_rec_steps):
with tf.variable_scope("step", reuse=tf.AUTO_REUSE):
encoder_output = image_question_encoder(
encoder_input,
Expand Down