Skip to content

Commit 01bfb22

Browse files
authored
[CI] Try introducing isort. (vllm-project#3495)
1 parent e67c295 commit 01bfb22

File tree

144 files changed

+506
-459
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

144 files changed

+506
-459
lines changed

.github/workflows/ruff.yml

+5-2
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,13 @@ jobs:
2525
- name: Install dependencies
2626
run: |
2727
python -m pip install --upgrade pip
28-
pip install ruff==0.1.5 codespell==2.2.6 tomli==2.0.1
28+
pip install ruff==0.1.5 codespell==2.2.6 tomli==2.0.1 isort==5.13.2
2929
- name: Analysing the code with ruff
3030
run: |
3131
ruff .
3232
- name: Spelling check with codespell
3333
run: |
34-
codespell --toml pyproject.toml
34+
codespell --toml pyproject.toml
35+
- name: Run isort
36+
run: |
37+
isort . --check-only

benchmarks/benchmark_prefix_caching.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,7 @@
11
import argparse
22
import time
33

4-
from vllm import LLM
5-
from vllm import SamplingParams
4+
from vllm import LLM, SamplingParams
65

76
PROMPT = "You are a helpful assistant in recognizes the content of tables in markdown format. Here is a table as fellows. You need to answer my question about the table.\n# Table\n|Opening|Opening|Sl. No.|Film|Cast|Director|Music Director|Notes|\n|----|----|----|----|----|----|----|----|\n|J A N|9|1|Agni Pushpam|Jayabharathi, Kamalahasan|Jeassy|M. K. Arjunan||\n|J A N|16|2|Priyamvada|Mohan Sharma, Lakshmi, KPAC Lalitha|K. S. Sethumadhavan|V. Dakshinamoorthy||\n|J A N|23|3|Yakshagaanam|Madhu, Sheela|Sheela|M. S. Viswanathan||\n|J A N|30|4|Paalkkadal|Sheela, Sharada|T. K. Prasad|A. T. Ummer||\n|F E B|5|5|Amma|Madhu, Srividya|M. Krishnan Nair|M. K. Arjunan||\n|F E B|13|6|Appooppan|Thikkurissi Sukumaran Nair, Kamal Haasan|P. Bhaskaran|M. S. Baburaj||\n|F E B|20|7|Srishti|Chowalloor Krishnankutty, Ravi Alummoodu|K. T. Muhammad|M. S. Baburaj||\n|F E B|20|8|Vanadevatha|Prem Nazir, Madhubala|Yusufali Kechery|G. Devarajan||\n|F E B|27|9|Samasya|Madhu, Kamalahaasan|K. Thankappan|Shyam||\n|F E B|27|10|Yudhabhoomi|K. P. Ummer, Vidhubala|Crossbelt Mani|R. K. Shekhar||\n|M A R|5|11|Seemantha Puthran|Prem Nazir, Jayabharathi|A. B. Raj|M. K. Arjunan||\n|M A R|12|12|Swapnadanam|Rani Chandra, Dr. Mohandas|K. G. George|Bhaskar Chandavarkar||\n|M A R|19|13|Thulavarsham|Prem Nazir, sreedevi, Sudheer|N. Sankaran Nair|V. Dakshinamoorthy||\n|M A R|20|14|Aruthu|Kaviyoor Ponnamma, Kamalahasan|Ravi|G. Devarajan||\n|M A R|26|15|Swimming Pool|Kamal Haasan, M. G. Soman|J. Sasikumar|M. K. Arjunan||\n\n# Question\nWhat' s the content in the (1,1) cells\n" # noqa: E501
87

benchmarks/benchmark_serving.py

+3-6
Original file line numberDiff line numberDiff line change
@@ -25,15 +25,12 @@
2525
from typing import AsyncGenerator, List, Tuple
2626

2727
import numpy as np
28+
from backend_request_func import (ASYNC_REQUEST_FUNCS, RequestFuncInput,
29+
RequestFuncOutput)
2830
from tqdm.asyncio import tqdm
2931
from transformers import PreTrainedTokenizerBase
30-
from vllm.transformers_utils.tokenizer import get_tokenizer
3132

32-
from backend_request_func import (
33-
ASYNC_REQUEST_FUNCS,
34-
RequestFuncInput,
35-
RequestFuncOutput,
36-
)
33+
from vllm.transformers_utils.tokenizer import get_tokenizer
3734

3835

3936
@dataclass

benchmarks/benchmark_throughput.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@
66
from typing import List, Optional, Tuple
77

88
import torch
9+
from tqdm import tqdm
910
from transformers import (AutoModelForCausalLM, AutoTokenizer,
1011
PreTrainedTokenizerBase)
11-
from tqdm import tqdm
1212

1313

1414
def sample_requests(

benchmarks/kernels/benchmark_mixtral_moe.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,13 @@
22
import os
33
import sys
44

5-
from vllm.model_executor.layers.fused_moe import fused_moe, get_config_file_name
65
import torch
76
import torch.nn.functional as F
87
import triton
98

9+
from vllm.model_executor.layers.fused_moe import (fused_moe,
10+
get_config_file_name)
11+
1012
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
1113

1214

benchmarks/kernels/benchmark_paged_attention.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
from typing import Optional
21
import argparse
32
import random
43
import time
4+
from typing import Optional
55

66
import torch
77

8-
from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, create_kv_caches_with_random
98
from vllm._C import ops
9+
from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, create_kv_caches_with_random
1010

1111
NUM_BLOCKS = 1024
1212
PARTITION_SIZE = 512

benchmarks/kernels/benchmark_rope.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,10 @@
1+
import argparse
2+
from itertools import accumulate
13
from typing import Optional
24

3-
import argparse
4-
import torch
55
import nvtx
6-
from itertools import accumulate
6+
import torch
7+
78
from vllm.model_executor.layers.rotary_embedding import get_rope
89

910

cmake/hipify.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@
99
#
1010

1111
import argparse
12-
import shutil
1312
import os
13+
import shutil
1414

1515
from torch.utils.hipify.hipify_python import hipify
1616

collect_env.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,10 @@
66
# Run it with `python collect_env.py` or `python -m torch.utils.collect_env`
77
import datetime
88
import locale
9+
import os
910
import re
1011
import subprocess
1112
import sys
12-
import os
1313
from collections import namedtuple
1414

1515
try:

docs/source/conf.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,11 @@
1010
# add these directories to sys.path here. If the directory is relative to the
1111
# documentation root, use os.path.abspath to make it absolute, like shown here.
1212

13+
import logging
1314
import os
1415
import sys
16+
1517
from sphinx.ext import autodoc
16-
import logging
1718

1819
sys.path.insert(0, os.path.abspath(os.path.join('..', '..')))
1920

examples/gradio_openai_chatbot_webserver.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import argparse
2-
from openai import OpenAI
2+
33
import gradio as gr
4+
from openai import OpenAI
45

56
# Argument parser setup
67
parser = argparse.ArgumentParser(

examples/llm_engine_example.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import argparse
22
from typing import List, Tuple
33

4-
from vllm import EngineArgs, LLMEngine, SamplingParams, RequestOutput
4+
from vllm import EngineArgs, LLMEngine, RequestOutput, SamplingParams
55

66

77
def create_test_prompts() -> List[Tuple[str, SamplingParams]]:

examples/multilora_inference.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,11 @@
55
Requires HuggingFace credentials for access to Llama2.
66
"""
77

8-
from typing import Optional, List, Tuple
8+
from typing import List, Optional, Tuple
99

1010
from huggingface_hub import snapshot_download
1111

12-
from vllm import EngineArgs, LLMEngine, SamplingParams, RequestOutput
12+
from vllm import EngineArgs, LLMEngine, RequestOutput, SamplingParams
1313
from vllm.lora.request import LoRARequest
1414

1515

examples/offline_inference_distributed.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,13 @@
55
Learn more about Ray Data in https://docs.ray.io/en/latest/data/data.html
66
"""
77

8-
from vllm import LLM, SamplingParams
98
from typing import Dict
9+
1010
import numpy as np
1111
import ray
1212

13+
from vllm import LLM, SamplingParams
14+
1315
# Create a sampling params object.
1416
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
1517

format.sh

+42
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ YAPF_VERSION=$(yapf --version | awk '{print $2}')
2525
RUFF_VERSION=$(ruff --version | awk '{print $2}')
2626
MYPY_VERSION=$(mypy --version | awk '{print $2}')
2727
CODESPELL_VERSION=$(codespell --version)
28+
ISORT_VERSION=$(isort --vn)
2829

2930
# # params: tool name, tool version, required version
3031
tool_version_check() {
@@ -37,6 +38,7 @@ tool_version_check() {
3738
tool_version_check "yapf" $YAPF_VERSION "$(grep yapf requirements-dev.txt | cut -d'=' -f3)"
3839
tool_version_check "ruff" $RUFF_VERSION "$(grep "ruff==" requirements-dev.txt | cut -d'=' -f3)"
3940
tool_version_check "mypy" "$MYPY_VERSION" "$(grep mypy requirements-dev.txt | cut -d'=' -f3)"
41+
tool_version_check "isort" "$ISORT_VERSION" "$(grep isort requirements-dev.txt | cut -d'=' -f3)"
4042
tool_version_check "codespell" "$CODESPELL_VERSION" "$(grep codespell requirements-dev.txt | cut -d'=' -f3)"
4143

4244
YAPF_FLAGS=(
@@ -178,6 +180,46 @@ else
178180
lint_changed
179181
fi
180182

183+
# check spelling of specified files
184+
isort_check() {
185+
isort "$@"
186+
}
187+
188+
isort_check_all(){
189+
isort .
190+
}
191+
192+
# Spelling check of files that differ from main branch.
193+
isort_check_changed() {
194+
# The `if` guard ensures that the list of filenames is not empty, which
195+
# could cause ruff to receive 0 positional arguments, making it hang
196+
# waiting for STDIN.
197+
#
198+
# `diff-filter=ACM` and $MERGEBASE is to ensure we only lint files that
199+
# exist on both branches.
200+
MERGEBASE="$(git merge-base origin/main HEAD)"
201+
202+
if ! git diff --diff-filter=ACM --quiet --exit-code "$MERGEBASE" -- '*.py' '*.pyi' &>/dev/null; then
203+
git diff --name-only --diff-filter=ACM "$MERGEBASE" -- '*.py' '*.pyi' | xargs \
204+
isort
205+
fi
206+
}
207+
208+
# Run Isort
209+
# This flag runs spell check of individual files. --files *must* be the first command line
210+
# arg to use this option.
211+
if [[ "$1" == '--files' ]]; then
212+
isort_check "${@:2}"
213+
# If `--all` is passed, then any further arguments are ignored and the
214+
# entire python directory is linted.
215+
elif [[ "$1" == '--all' ]]; then
216+
isort_check_all
217+
else
218+
# Check spelling only of the files that changed in last commit.
219+
isort_check_changed
220+
fi
221+
echo 'vLLM isort: Done'
222+
181223
if ! git diff --quiet &>/dev/null; then
182224
echo 'Reformatted files. Please review and stage the changes.'
183225
echo 'Changes not staged for commit:'

pyproject.toml

+4
Original file line numberDiff line numberDiff line change
@@ -51,3 +51,7 @@ exclude = "vllm/model_executor/parallel_utils/|vllm/model_executor/models/"
5151
[tool.codespell]
5252
ignore-words-list = "dout, te, indicies"
5353
skip = "./tests/prompts"
54+
55+
[tool.isort]
56+
use_parentheses = true
57+
skip_gitignore = true

requirements-dev.txt

+1
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ toml==0.10.2
44
tomli==2.0.1
55
ruff==0.1.5
66
codespell==2.2.6
7+
isort==5.13.2
78

89
# type checking
910
mypy==0.991

setup.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,16 @@
11
import io
2+
import logging
23
import os
34
import re
4-
import logging
55
import subprocess
66
import sys
7+
from shutil import which
78
from typing import List
89

9-
from packaging.version import parse, Version
10-
from setuptools import setup, find_packages, Extension
11-
from setuptools.command.build_ext import build_ext
12-
from shutil import which
1310
import torch
11+
from packaging.version import Version, parse
12+
from setuptools import Extension, find_packages, setup
13+
from setuptools.command.build_ext import build_ext
1414
from torch.utils.cpp_extension import CUDA_HOME
1515

1616
ROOT_DIR = os.path.dirname(__file__)

tests/async_engine/test_chat_template.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
from dataclasses import dataclass
21
import os
32
import pathlib
3+
from dataclasses import dataclass
44

55
import pytest
66

7-
from vllm.transformers_utils.tokenizer import get_tokenizer
8-
from vllm.entrypoints.openai.serving_chat import OpenAIServingChat
97
from vllm.entrypoints.openai.protocol import ChatCompletionRequest
8+
from vllm.entrypoints.openai.serving_chat import OpenAIServingChat
9+
from vllm.transformers_utils.tokenizer import get_tokenizer
1010

1111
chatml_jinja_path = pathlib.Path(os.path.dirname(os.path.abspath(
1212
__file__))).parent.parent / "examples/template_chatml.jinja"

tests/conftest.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66
from transformers import AutoModelForCausalLM
77

88
from vllm import LLM, SamplingParams
9-
from vllm.transformers_utils.tokenizer import get_tokenizer
109
from vllm.config import TokenizerPoolConfig
10+
from vllm.transformers_utils.tokenizer import get_tokenizer
1111

1212
_TEST_DIR = os.path.dirname(__file__)
1313
_TEST_PROMPTS = [os.path.join(_TEST_DIR, "prompts", "example.txt")]

tests/core/test_block_manager.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,14 @@
1-
import pytest
21
import time
32
from typing import List
43

4+
import pytest
5+
56
from vllm import SamplingParams
67
from vllm.block import PhysicalTokenBlock
7-
from vllm.core.block_manager import (UncachedBlockAllocator, BlockSpaceManager,
8-
AllocStatus)
8+
from vllm.core.block_manager import (AllocStatus, BlockSpaceManager,
9+
UncachedBlockAllocator)
10+
from vllm.sequence import Logprob, Sequence, SequenceGroup, SequenceStatus
911
from vllm.utils import Device
10-
from vllm.sequence import Sequence, SequenceGroup, SequenceStatus, Logprob
1112

1213
from .utils import create_dummy_prompt
1314

tests/core/test_scheduler.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,11 @@
1+
import time
12
from typing import List
3+
24
import pytest # noqa
3-
import time
45

56
from vllm.config import CacheConfig, SchedulerConfig
67
from vllm.core.scheduler import Scheduler
7-
from vllm.sequence import SequenceGroup, Logprob
8+
from vllm.sequence import Logprob, SequenceGroup
89

910
from .utils import create_dummy_prompt
1011

tests/distributed/test_comm_ops.py

+3-5
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,12 @@
33
Run `pytest tests/distributed/test_comm_ops.py --forked`.
44
"""
55
import pytest
6-
import torch
76
import ray
7+
import torch
88

99
from vllm.model_executor.parallel_utils.communication_op import (
10-
tensor_model_parallel_all_reduce,
11-
tensor_model_parallel_all_gather,
12-
broadcast_tensor_dict,
13-
)
10+
broadcast_tensor_dict, tensor_model_parallel_all_gather,
11+
tensor_model_parallel_all_reduce)
1412
from vllm.test_utils import (init_test_distributed_environment,
1513
multi_process_tensor_parallel)
1614

tests/distributed/test_custom_all_reduce.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1+
import os
12
import random
23

3-
import os
44
import pytest
55
import ray
66
import torch

tests/entrypoints/test_guided_processors.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
# This unit test should be moved to a new
22
# tests/test_guided_decoding directory.
33

4-
from transformers import AutoTokenizer
54
import torch
5+
from transformers import AutoTokenizer
66

7-
from vllm.model_executor.guided_logits_processors import (RegexLogitsProcessor,
8-
JSONLogitsProcessor)
7+
from vllm.model_executor.guided_logits_processors import (JSONLogitsProcessor,
8+
RegexLogitsProcessor)
99

1010
TEST_SCHEMA = {
1111
"type": "object",

0 commit comments

Comments
 (0)