Skip to content

Commit

Permalink
[SYCL][Matrix]update recent tests to use the new API and remove depre…
Browse files Browse the repository at this point in the history
…cated bfloat16::from_bits (intel#1494)

- Update recently added tests to use the new API (col major and row major tests VNNI/transpose)
- Fix make_fp32 function so it does not do extra conversion
- remove deprecated bfloat16::from_bits
  • Loading branch information
dkhaldi authored Jan 7, 2023
1 parent 770c8ab commit 2722bd1
Show file tree
Hide file tree
Showing 18 changed files with 547 additions and 126 deletions.
4 changes: 2 additions & 2 deletions SYCL/Matrix/Legacy/joint_matrix_bfloat16_32x64_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -120,13 +120,13 @@ int main() {
for (int j = 0; j < MATRIX_K; j++) {
// bfloat16 is created using unsigned short since conversion from float to
// bfloat16 is not supported on the host side yet
A[i][j] = bfloat16::from_bits(make_bf16(1.0f * (i + j)));
A[i][j] = bfloat16(1.0f * (i + j));
Aref[i][j] = make_bf16(1.0f * (i + j));
}
}
for (int i = 0; i < MATRIX_K / 2; i++) {
for (int j = 0; j < MATRIX_N * 2; j++) {
B[i][j] = bfloat16::from_bits((make_bf16(2.0f * i + 3.0f * j)));
B[i][j] = bfloat16(2.0f * i + 3.0f * j);
Bref[i][j] = make_bf16(2.0f * i + 3.0f * j);
}
}
Expand Down
28 changes: 28 additions & 0 deletions SYCL/Matrix/Legacy/joint_matrix_bfloat16_colmajorA_colmajorB.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
//==-- joint_matrix_bfloat16_colmajorA_colmajorB.cpp - DPC++ joint_matrix--==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// REQUIRES: matrix

// RUN: %clangxx -fsycl %s -o %t.out -DSYCL_EXT_ONEAPI_MATRIX_VERSION=1
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out

// This tests support of col major layout for matrix B which does transpose and
// then VNNI transform. This is currently only available on AMX

// XFAIL: gpu

#include <iostream>
#include <sycl/sycl.hpp>

using namespace sycl;
using namespace sycl::ext::oneapi::experimental::matrix;
using bfloat16 = sycl::ext::oneapi::bfloat16;

#define SG_SZ 16

#include "joint_matrix_bfloat16_colmajorA_colmajorB_impl.hpp"
129 changes: 129 additions & 0 deletions SYCL/Matrix/Legacy/joint_matrix_bfloat16_colmajorA_colmajorB_impl.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
#define TM 8
#define TN SG_SZ
#define TK 16
#define BF16_EPSILON 0.00781250

template <typename T, size_t NUM_ROWS, size_t NUM_COLS> struct big_matrix {
private:
T *mat;

public:
T *get_data() { return mat; }
void set_data(T *data) { mat = data; }
big_matrix(T *data) : mat(data) {}
};

template <typename T1, typename T2, size_t M, size_t N, size_t K>
void matrix_multiply(big_matrix<T1, M, N> &C, big_matrix<T2, M, K> &A,
big_matrix<T2, K, N> &B) {
size_t NDRangeM = M / TM;
size_t NDRangeN = N / TN;
buffer<bfloat16, 2> bufA(A.get_data(), range<2>(M, K));
buffer<bfloat16, 2> bufB(B.get_data(), range<2>(K, N));
buffer<float, 2> bufC((float *)C.get_data(), range<2>(M, N));

queue q;
q.submit([&](handler &cgh) {
auto accC = bufC.get_access<access::mode::read_write>(cgh);
auto accA = bufA.get_access<access::mode::read_write>(cgh);
auto accB = bufB.get_access<access::mode::read_write>(cgh);

cgh.parallel_for<class imatrix>(
nd_range<2>({NDRangeM, NDRangeN * SG_SZ}, {1, 1 * SG_SZ}),
[=](nd_item<2> spmd_item) [[intel::reqd_sub_group_size(SG_SZ)]]

{
// The submatrix API has to be accessed by all the workitems in a
// subgroup these functions will be called once by the subgroup no
// code divergence between the workitems
const auto global_idx = spmd_item.get_global_id(0);
const auto global_idy = spmd_item.get_global_id(1);
const auto sg_startx = global_idx - spmd_item.get_local_id(0);
const auto sg_starty = global_idy - spmd_item.get_local_id(1);

ext::oneapi::sub_group sg = spmd_item.get_sub_group();
joint_matrix<bfloat16, TM, TK> sub_a(sg);
joint_matrix<bfloat16, TK, TN, matrix_layout::packed_b> sub_b(sg);
joint_matrix<float, TM, TN> sub_c(sg);

joint_matrix_load(sg, sub_c,
accC.get_pointer() + (sg_startx * TM) * N +
sg_starty / SG_SZ * TN,
N, matrix_layout::row_major);
for (int k = 0; k < K / TK; k += 1) { //
joint_matrix_load(
sg, sub_a, accA.get_pointer() + (k * TK) * M + sg_startx * TM,
M, matrix_layout::col_major);
joint_matrix_load(sg, sub_b,
accB.get_pointer() +
(sg_starty / SG_SZ * TN) * K + k * TK,
K, matrix_layout::col_major);
sub_c = joint_matrix_mad(sg, sub_a, sub_b, sub_c);
}
joint_matrix_store(sg, sub_c,
accC.get_pointer() + (sg_startx * TM) * N +
sg_starty / SG_SZ * TN,
N, matrix_layout::row_major);
}); // parallel for
}).wait();
}

static constexpr size_t MATRIX_M = TM * 2;
static constexpr size_t MATRIX_N = TN * 2;
static constexpr size_t MATRIX_K = TK * 2;
bfloat16 A[MATRIX_K][MATRIX_M];
bfloat16 B[MATRIX_N][MATRIX_K];
float C[MATRIX_M][MATRIX_N];
float D[MATRIX_M][MATRIX_N];

float make_fp32(bfloat16 x) {
unsigned int y = *((int *)&x);
y = y << 16;
float *res = reinterpret_cast<float *>(&y);
return *res;
}

void matrix_multiply_ref(int M, int N, int K) {
for (int m = 0; m < M; m++)
for (int n = 0; n < N; n++) {
for (int k = 0; k < K; k++) {
D[m][n] += make_fp32(A[k][m]) * make_fp32(B[n][k]);
}
}
}

int main() {
for (int i = 0; i < MATRIX_K; i++) {
for (int j = 0; j < MATRIX_M; j++) {
A[i][j] = bfloat16(1.0f * (i + j));
}
}
for (int i = 0; i < MATRIX_N; i++) {
for (int j = 0; j < MATRIX_K; j++) {
B[i][j] = bfloat16(2.0f * i + 3.0f * j);
}
}
for (int i = 0; i < MATRIX_M; i++) {
for (int j = 0; j < MATRIX_N; j++) {
C[i][j] = 1.0;
D[i][j] = 1.0;
}
}

big_matrix<float, MATRIX_M, MATRIX_N> MC((float *)&C);
big_matrix<float, MATRIX_M, MATRIX_N> MD((float *)&D);
big_matrix<bfloat16, MATRIX_M, MATRIX_K> MA((bfloat16 *)&A);
big_matrix<bfloat16, MATRIX_K, MATRIX_N> MB((bfloat16 *)&B);
matrix_multiply(MC, MA, MB);
matrix_multiply_ref(MATRIX_M, MATRIX_N, MATRIX_K);

bool res = true;
for (int i = 0; i < MATRIX_M; i++) {
for (int j = 0; j < MATRIX_N; j++) {
if ((fabs(C[i][j]) - fabs(D[i][j])) > BF16_EPSILON)
res = false;
}
}
std::cout << (res ? "passed" : "failed") << std::endl;
return !res;
}
10 changes: 4 additions & 6 deletions SYCL/Matrix/Legacy/joint_matrix_bfloat16_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -120,13 +120,13 @@ int main() {
for (int j = 0; j < MATRIX_K; j++) {
// bfloat16 is created using unsigned short since conversion from float to
// bfloat16 is not supported on the host side yet
A[i][j] = bfloat16::from_bits(make_bf16(1.0f * (i + j)));
A[i][j] = bfloat16(1.0f * (i + j));
Aref[i][j] = make_bf16(1.0f * (i + j));
}
}
for (int i = 0; i < MATRIX_K / 2; i++) {
for (int j = 0; j < MATRIX_N * 2; j++) {
B[i][j] = bfloat16::from_bits((make_bf16(2.0f * i + 3.0f * j)));
B[i][j] = bfloat16(2.0f * i + 3.0f * j);
Bref[i][j] = make_bf16(2.0f * i + 3.0f * j);
}
}
Expand All @@ -152,8 +152,6 @@ int main() {
res = false;
}
}
if (res)
std::cout << "passed\n";
else
std::cout << "failed\n";
std::cout << (res ? "passed" : "failed") << std::endl;
return !res;
}
28 changes: 28 additions & 0 deletions SYCL/Matrix/Legacy/joint_matrix_bfloat16_rowmajorA_rowmajorB.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
//==--joint_matrix_bfloat16_rowmajorA_rowmajorB.cpp - DPC++ joint_matrix---==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// REQUIRES: matrix

// RUN: %clangxx -fsycl %s -o %t.out -DSYCL_EXT_ONEAPI_MATRIX_VERSION=1
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out

// This tests support of row major layout for matrix B which does automatic VNNI
// transform. This is currently only available on AMX

// XFAIL: gpu

#include <iostream>
#include <sycl/sycl.hpp>

using namespace sycl;
using namespace sycl::ext::oneapi::experimental::matrix;
using bfloat16 = sycl::ext::oneapi::bfloat16;

#define SG_SZ 16

#include "joint_matrix_bfloat16_rowmajorA_rowmajorB_impl.hpp"
129 changes: 129 additions & 0 deletions SYCL/Matrix/Legacy/joint_matrix_bfloat16_rowmajorA_rowmajorB_impl.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
#define TM 8
#define TN SG_SZ
#define TK 16
#define BF16_EPSILON 0.00781250

template <typename T, size_t NUM_ROWS, size_t NUM_COLS> struct big_matrix {
private:
T *mat;

public:
T *get_data() { return mat; }
void set_data(T *data) { mat = data; }
big_matrix(T *data) : mat(data) {}
};

template <typename T1, typename T2, size_t M, size_t N, size_t K>
void matrix_multiply(big_matrix<T1, M, N> &C, big_matrix<T2, M, K> &A,
big_matrix<T2, K, N> &B) {
size_t NDRangeM = M / TM;
size_t NDRangeN = N / TN;
buffer<bfloat16, 2> bufA(A.get_data(), range<2>(M, K));
buffer<bfloat16, 2> bufB(B.get_data(), range<2>(K, N));
buffer<float, 2> bufC((float *)C.get_data(), range<2>(M, N));

queue q;
q.submit([&](handler &cgh) {
auto accC = bufC.get_access<access::mode::read_write>(cgh);
auto accA = bufA.get_access<access::mode::read_write>(cgh);
auto accB = bufB.get_access<access::mode::read_write>(cgh);

cgh.parallel_for<class imatrix>(
nd_range<2>({NDRangeM, NDRangeN * SG_SZ}, {1, 1 * SG_SZ}),
[=](nd_item<2> spmd_item) [[intel::reqd_sub_group_size(SG_SZ)]]

{
// The submatrix API has to be accessed by all the workitems in a
// subgroup these functions will be called once by the subgroup no
// code divergence between the workitems
const auto global_idx = spmd_item.get_global_id(0);
const auto global_idy = spmd_item.get_global_id(1);
const auto sg_startx = global_idx - spmd_item.get_local_id(0);
const auto sg_starty = global_idy - spmd_item.get_local_id(1);

ext::oneapi::sub_group sg = spmd_item.get_sub_group();
joint_matrix<bfloat16, TM, TK> sub_a(sg);
joint_matrix<bfloat16, TK, TN, matrix_layout::packed_b> sub_b(sg);
joint_matrix<float, TM, TN> sub_c(sg);

joint_matrix_load(sg, sub_c,
accC.get_pointer() + (sg_startx * TM) * N +
sg_starty / SG_SZ * TN,
N, matrix_layout::row_major);
for (int k = 0; k < K / TK; k += 1) {
joint_matrix_load(
sg, sub_a, accA.get_pointer() + (sg_startx * TM) * K + k * TK,
K, matrix_layout::row_major);
joint_matrix_load(sg, sub_b,
accB.get_pointer() + (k * TK) * (N) +
sg_starty / SG_SZ * TN,
N, matrix_layout::row_major);
sub_c = joint_matrix_mad(sg, sub_a, sub_b, sub_c);
}
joint_matrix_store(sg, sub_c,
accC.get_pointer() + (sg_startx * TM) * N +
sg_starty / SG_SZ * TN,
N, matrix_layout::row_major);
}); // parallel for
}).wait();
}

static constexpr size_t MATRIX_M = TM * 2;
static constexpr size_t MATRIX_N = TN * 2;
static constexpr size_t MATRIX_K = TK * 2;
bfloat16 A[MATRIX_M][MATRIX_K];
bfloat16 B[MATRIX_K][MATRIX_N];
float C[MATRIX_M][MATRIX_N];
float D[MATRIX_M][MATRIX_N];

float make_fp32(bfloat16 x) {
unsigned int y = *((int *)&x);
y = y << 16;
float *res = reinterpret_cast<float *>(&y);
return *res;
}

void matrix_multiply_ref(int M, int N, int K) {
for (int m = 0; m < M; m++)
for (int n = 0; n < N; n++) {
for (int k = 0; k < K; k++) {
D[m][n] += make_fp32(A[m][k]) * make_fp32(B[k][n]);
}
}
}

int main() {
for (int i = 0; i < MATRIX_M; i++) {
for (int j = 0; j < MATRIX_K; j++) {
A[i][j] = bfloat16(1.0f * (i + j));
}
}
for (int i = 0; i < MATRIX_K; i++) {
for (int j = 0; j < MATRIX_N; j++) {
B[i][j] = bfloat16(2.0f * i + 3.0f * j);
}
}
for (int i = 0; i < MATRIX_M; i++) {
for (int j = 0; j < MATRIX_N; j++) {
C[i][j] = 1.0;
D[i][j] = 1.0;
}
}

big_matrix<float, MATRIX_M, MATRIX_N> MC((float *)&C);
big_matrix<float, MATRIX_M, MATRIX_N> MD((float *)&D);
big_matrix<bfloat16, MATRIX_M, MATRIX_K> MA((bfloat16 *)&A);
big_matrix<bfloat16, MATRIX_K, MATRIX_N> MB((bfloat16 *)&B);
matrix_multiply(MC, MA, MB);
matrix_multiply_ref(MATRIX_M, MATRIX_N, MATRIX_K);

bool res = true;
for (int i = 0; i < MATRIX_M; i++) {
for (int j = 0; j < MATRIX_N; j++) {
if ((fabs(C[i][j]) - fabs(D[i][j])) > BF16_EPSILON)
res = false;
}
}
std::cout << (res ? "passed" : "failed") << std::endl;
return !res;
}
27 changes: 27 additions & 0 deletions SYCL/Matrix/Legacy/joint_matrix_int8_colmajorA_colmajorB.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
//==----- joint_matrix_int8_colmajorA_colmajorB.cpp - DPC++ joint_matrix---==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// REQUIRES: matrix

// RUN: %clangxx -fsycl %s -o %t.out -DSYCL_EXT_ONEAPI_MATRIX_VERSION=1
// RUN: %CPU_RUN_PLACEHOLDER %t.out
// RUN: %GPU_RUN_PLACEHOLDER %t.out

// This tests support of col major layout for matrix B which does transpose and
// then VNNI transform. This is currently only available on AMX

// XFAIL: gpu

#include <iostream>
#include <sycl/sycl.hpp>

using namespace sycl;
using namespace sycl::ext::oneapi::experimental::matrix;

#define SG_SZ 16

#include "joint_matrix_int8_colmajorA_colmajorB_impl.hpp"
Loading

0 comments on commit 2722bd1

Please sign in to comment.