-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathsetup.py
113 lines (98 loc) · 3.14 KB
/
setup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import subprocess
import os
from packaging.version import parse, Version
from pathlib import Path
from setuptools import setup, find_packages
from torch.utils.cpp_extension import (
BuildExtension,
CUDAExtension,
CUDA_HOME,
)
# package name managed by pip, which can be remove by `pip uninstall tiny_pkg`
PACKAGE_NAME = "tiny_attention_cutlass"
ext_modules = []
generator_flag = []
cc_flag = []
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
# helper function to get cuda version
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
bare_metal_version = parse(output[release_idx].split(",")[0])
return raw_output, bare_metal_version
if CUDA_HOME is not None:
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version >= Version("11.8"):
cc_flag.append("-gencode")
cc_flag.append("arch=compute_90,code=sm_90")
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
# cuda module
ext_modules.append(
CUDAExtension(
# package name for import
name="attention_cutlass",
sources=[
"csrc/attention_api.cpp",
"csrc/flash_attention.cu",
"csrc/flash_api.cpp",
],
extra_compile_args={
# add c compile flags
"cxx": ["-O3", "-std=c++17"] + generator_flag,
# add nvcc compile flags
"nvcc": [
"-O3",
"-std=c++17",
"-U__CUDA_NO_HALF_OPERATORS__",
"--use_fast_math",
"-lineinfo",
"--ptxas-options=-v",
"--ptxas-options=-O2",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_HALF2_OPERATORS__",
"-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
]
+ generator_flag
+ cc_flag,
},
include_dirs=[
Path(this_dir) / "csrc",
Path(this_dir) / "include",
Path(this_dir) / "deps/cutlass/include",
Path(this_dir) / "deps/cutlass/tools/utils/include" ,
Path(this_dir) / "deps/cutlass/examples/common" ,
# Path(this_dir) / "some" / "thing" / "more",
],
)
)
setup(
name=PACKAGE_NAME,
packages=find_packages(
exclude=(
"build",
"csrc",
"include",
"tests",
"dist",
"docs",
"benchmarks",
)
),
description="Attention mechanism implement by CUDA",
ext_modules=ext_modules,
cmdclass={ "build_ext": BuildExtension},
python_requires=">=3.7",
install_requires=[
"torch",
"einops",
"packaging",
"ninja",
],
)