Skip to content

Commit ec275e8

Browse files
committed
lint
1 parent dfca0a5 commit ec275e8

File tree

3 files changed

+8
-6
lines changed

3 files changed

+8
-6
lines changed

python/tvm/runtime/ndarray.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,9 @@ def copyfrom(self, source_array):
147147
source_array.shape, shape
148148
)
149149
)
150-
source_array = np.ascontiguousarray(source_array, dtype="uint16" if dtype == "bfloat16" else dtype)
150+
source_array = np.ascontiguousarray(
151+
source_array, dtype="uint16" if dtype == "bfloat16" else dtype
152+
)
151153
assert source_array.flags["C_CONTIGUOUS"]
152154
data = source_array.ctypes.data_as(ctypes.c_void_p)
153155
nbytes = ctypes.c_size_t(source_array.size * source_array.dtype.itemsize)

src/target/source/codegen_cuda.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -378,8 +378,8 @@ void CodeGenCUDA::PrintVecElemStore(const std::string& vec, DataType t, int i,
378378
stream << "((half2*)(&(" << vec << "." << access[i / 2] << ")))->" << access[i % 2] << " = "
379379
<< value << ";\n";
380380
} else if (t.is_bfloat16()) {
381-
stream << "((nv_bfloat162*)(&(" << vec << "." << access[i / 2] << ")))->" << access[i % 2] << " = "
382-
<< value << ";\n";
381+
stream << "((nv_bfloat162*)(&(" << vec << "." << access[i / 2] << ")))->" << access[i % 2]
382+
<< " = " << value << ";\n";
383383
} else {
384384
stream << vec << "." << access[i] << " = " << value << ";\n";
385385
}

tests/python/unittest/test_target_codegen_cuda.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -95,9 +95,9 @@ def check_cuda(n, lanes):
9595
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
9696
s[B].bind(xo, bx)
9797
s[B].bind(xi, tx)
98-
with tvm.transform.PassContext(disabled_pass=["tir.BF16Promote",
99-
"tir.BF16CastElimination",
100-
"tir.BF16TypeLowering"]):
98+
with tvm.transform.PassContext(
99+
disabled_pass=["tir.BF16Promote", "tir.BF16CastElimination", "tir.BF16TypeLowering"]
100+
):
101101
fun = tvm.build(s, [A, B], "cuda")
102102
ctx = tvm.gpu(0)
103103
np_a = np.random.uniform(size=(n, lanes)).astype("float32")

0 commit comments

Comments
 (0)