Skip to content

Commit

Permalink
Revert "isort"
Browse files Browse the repository at this point in the history
This reverts commit 3f03344.
  • Loading branch information
sshleifer committed Feb 10, 2020
1 parent 3f03344 commit 808bbd5
Show file tree
Hide file tree
Showing 135 changed files with 281 additions and 1,210 deletions.
9 changes: 2 additions & 7 deletions examples/benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,10 @@
import csv
import timeit
from time import time

from transformers import (
AutoConfig,
AutoTokenizer,
is_tf_available,
is_torch_available,
)
from typing import List

from transformers import AutoConfig, AutoTokenizer, is_tf_available, is_torch_available


if is_tf_available():
import tensorflow as tf
Expand Down
12 changes: 2 additions & 10 deletions examples/contrib/run_openai_gpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,17 +35,9 @@

import numpy as np
import torch
from torch.utils.data import (
DataLoader,
RandomSampler,
SequentialSampler,
TensorDataset,
)
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange

from tqdm import (
tqdm,
trange,
)
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
Expand Down
12 changes: 2 additions & 10 deletions examples/contrib/run_swag.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,18 +27,10 @@

import numpy as np
import torch
from torch.utils.data import (
DataLoader,
RandomSampler,
SequentialSampler,
TensorDataset,
)
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange

from tqdm import (
tqdm,
trange,
)
from transformers import (
WEIGHTS_NAME,
AdamW,
Expand Down
5 changes: 1 addition & 4 deletions examples/contrib/run_transfo_xl.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,7 @@

import torch

from transformers import (
TransfoXLCorpus,
TransfoXLLMHeadModel,
)
from transformers import TransfoXLCorpus, TransfoXLLMHeadModel


logging.basicConfig(
Expand Down
13 changes: 3 additions & 10 deletions examples/distillation/distiller.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,19 +24,12 @@
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import AdamW
from torch.utils.data import (
BatchSampler,
DataLoader,
RandomSampler,
)
from torch.utils.data import BatchSampler, DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm

from grouped_batch_sampler import (
GroupedBatchSampler,
create_lengths_groups,
)
from grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups
from lm_seqs_dataset import LmSeqsDataset
from tqdm import tqdm
from transformers import get_linear_schedule_with_warmup
from utils import logger

Expand Down
5 changes: 1 addition & 4 deletions examples/distillation/grouped_batch_sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,7 @@
from collections import defaultdict

import numpy as np
from torch.utils.data.sampler import (
BatchSampler,
Sampler,
)
from torch.utils.data.sampler import BatchSampler, Sampler

from utils import logger

Expand Down
17 changes: 3 additions & 14 deletions examples/distillation/run_squad_w_distillation.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,17 +26,10 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import (
DataLoader,
RandomSampler,
SequentialSampler,
)
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange

from tqdm import (
tqdm,
trange,
)
from transformers import (
WEIGHTS_NAME,
AdamW,
Expand All @@ -60,11 +53,7 @@
compute_predictions_logits,
squad_evaluate,
)
from transformers.data.processors.squad import (
SquadResult,
SquadV1Processor,
SquadV2Processor,
)
from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor


try:
Expand Down
6 changes: 1 addition & 5 deletions examples/distillation/scripts/binarized_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,7 @@

import numpy as np

from transformers import (
BertTokenizer,
GPT2Tokenizer,
RobertaTokenizer,
)
from transformers import BertTokenizer, GPT2Tokenizer, RobertaTokenizer


logging.basicConfig(
Expand Down
5 changes: 1 addition & 4 deletions examples/distillation/scripts/extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,7 @@

import torch

from transformers import (
GPT2LMHeadModel,
RobertaForMaskedLM,
)
from transformers import GPT2LMHeadModel, RobertaForMaskedLM


if __name__ == "__main__":
Expand Down
7 changes: 1 addition & 6 deletions examples/distillation/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,7 @@
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import (
git_log,
init_gpu_params,
logger,
set_seed,
)
from utils import git_log, init_gpu_params, logger, set_seed


MODEL_CLASSES = {
Expand Down
6 changes: 1 addition & 5 deletions examples/hans/hans_processors.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,7 @@
import os

from transformers.file_utils import is_tf_available
from utils_hans import (
DataProcessor,
InputExample,
InputFeatures,
)
from utils_hans import DataProcessor, InputExample, InputFeatures


if is_tf_available():
Expand Down
18 changes: 3 additions & 15 deletions examples/hans/test_hans.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,7 @@
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa)."""

from __future__ import (
absolute_import,
division,
print_function,
)
from __future__ import absolute_import, division, print_function

import argparse
import glob
Expand All @@ -29,21 +25,13 @@

import numpy as np
import torch
from torch.utils.data import (
DataLoader,
RandomSampler,
SequentialSampler,
TensorDataset,
)
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange

from hans_processors import glue_output_modes as output_modes
from hans_processors import glue_processors as processors
from hans_processors import hans_convert_examples_to_features as convert_examples_to_features
from tqdm import (
tqdm,
trange,
)
from transformers import (
WEIGHTS_NAME,
AdamW,
Expand Down
19 changes: 3 additions & 16 deletions examples/mm-imdb/run_mmimdb.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,17 +27,10 @@
import torch
import torch.nn as nn
from sklearn.metrics import f1_score
from torch.utils.data import (
DataLoader,
RandomSampler,
SequentialSampler,
)
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange

from tqdm import (
tqdm,
trange,
)
from transformers import (
WEIGHTS_NAME,
AdamW,
Expand All @@ -63,13 +56,7 @@
XLNetTokenizer,
get_linear_schedule_with_warmup,
)
from utils_mmimdb import (
ImageEncoder,
JsonlDataset,
collate_fn,
get_image_transforms,
get_mmimdb_labels,
)
from utils_mmimdb import ImageEncoder, JsonlDataset, collate_fn, get_image_transforms, get_mmimdb_labels


try:
Expand Down
9 changes: 2 additions & 7 deletions examples/pplm/run_pplm.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,23 +26,18 @@
import argparse
import json
from operator import add
from typing import List, Optional, Tuple, Union

import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from tqdm import trange

from pplm_classification_head import ClassificationHead
from tqdm import trange
from transformers import GPT2Tokenizer
from transformers.file_utils import cached_path
from transformers.modeling_gpt2 import GPT2LMHeadModel
from typing import (
List,
Optional,
Tuple,
Union,
)


PPLM_BOW = 1
Expand Down
10 changes: 2 additions & 8 deletions examples/pplm/run_pplm_discrim_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,16 +29,10 @@
from nltk.tokenize.treebank import TreebankWordDetokenizer
from torchtext import data as torchtext_data
from torchtext import datasets
from tqdm import tqdm, trange

from pplm_classification_head import ClassificationHead
from tqdm import (
tqdm,
trange,
)
from transformers import (
GPT2LMHeadModel,
GPT2Tokenizer,
)
from transformers import GPT2LMHeadModel, GPT2Tokenizer


torch.manual_seed(0)
Expand Down
15 changes: 3 additions & 12 deletions examples/run_bertology.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,20 +26,11 @@

import numpy as np
import torch
from torch.utils.data import (
DataLoader,
SequentialSampler,
Subset,
)
from torch.utils.data import DataLoader, SequentialSampler, Subset
from torch.utils.data.distributed import DistributedSampler

from run_glue import (
ALL_MODELS,
MODEL_CLASSES,
load_and_cache_examples,
set_seed,
)
from tqdm import tqdm

from run_glue import ALL_MODELS, MODEL_CLASSES, load_and_cache_examples, set_seed
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
Expand Down
20 changes: 6 additions & 14 deletions examples/run_glue.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,22 +25,10 @@

import numpy as np
import torch
from torch.utils.data import (
DataLoader,
RandomSampler,
SequentialSampler,
TensorDataset,
)
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange

from tqdm import (
tqdm,
trange,
)
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
from transformers import (
WEIGHTS_NAME,
AdamW,
Expand Down Expand Up @@ -70,6 +58,10 @@
XLNetTokenizer,
get_linear_schedule_with_warmup,
)
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors


try:
Expand Down
Loading

0 comments on commit 808bbd5

Please sign in to comment.