Triton Tutorial Practice: 08 Group Gemm
這篇教學的目標是用 triton 實作 GEMM 然後跟 torch.matmul(a,b) 比較
比較結果(只有四個點 1024, 512, 256, 128)
GEMM 定義
General Matrix Multiply
Group GEMM
A_addrs = []
B_addrs = []
C_addrs = []
g_sizes = []
g_lds = []
group_C = []
for i in range(group_size):
A = group_A[i]
B = group_B[i]
assert A.shape[1] == B.shape[0]
M, K = A.shape
K, N = B.shape
C = torch.empty((M, N), device=device, dtype=A.dtype)
group_C.append(C)
A_addrs.append(A.data_ptr())
B_addrs.append(B.data_ptr())
C_addrs.append(C.data_ptr())
g_sizes += [M, N, K]
g_lds += [A.stride(0), B.stride(0), C.stride(0)]
Group Matmul
實作
group_gemm_fn(group_A, group_B)
是 operator 的實作
triton_perf_fn
是為了要在後面 benchmark 的地方確保兩種方式用的是同一個 memroy 裡面的matrix 所以把 group_gemm_fn 前半段的 matrix setup 抽出去的 function,直接 call groupmatmul
由這個部分也可以看得出來這份教學的說明未被完善的完成
matrix setup from bench mark
group_size = 4
group_A = []
group_B = []
A_addrs = []
B_addrs = []
C_addrs = []
g_sizes = []
g_lds = []
group_C = []
for i in range(group_size):
A = torch.rand((N, N), device="cuda", dtype=torch.float16)
B = torch.rand((N, N), device="cuda", dtype=torch.float16)
C = torch.empty((N, N), device="cuda", dtype=torch.float16)
group_A.append(A)
group_B.append(B)
group_C.append(C)
A_addrs.append(A.data_ptr())
B_addrs.append(B.data_ptr())
C_addrs.append(C.data_ptr())
g_sizes += [N, N, N]
g_lds += [N, N, N]
d_a_ptrs = torch.tensor(A_addrs, device="cuda")
d_b_ptrs = torch.tensor(B_addrs, device="cuda")
d_c_ptrs = torch.tensor(C_addrs, device="cuda")
d_g_sizes = torch.tensor(g_sizes, dtype=torch.int32, device="cuda")
d_g_lds = torch.tensor(g_lds, dtype=torch.int32, device="cuda")
Bench mark 測試
設置的目標,以下數值其實是使用 triton.testing.Benchmark 設定的
這段 script 在原本的 juptyer notebook 中移除是無礙的
group_m = [1024, 512, 256, 128]
group_n = [1024, 512, 256, 128]
group_k = [1024, 512, 256, 128]
group_A = []
group_B = []
assert len(group_m) == len(group_n)
assert len(group_n) == len(group_k)
group_size = len(group_m)
for i in range(group_size):
M = group_m[i]
N = group_n[i]
K = group_k[i]
A = torch.rand((M, K), device="cuda", dtype=torch.float16)
B = torch.rand((K, N), device="cuda", dtype=torch.float16)
group_A.append(A)
group_B.append(B)
tri_out = group_gemm_fn(group_A, group_B)
ref_out = [torch.matmul(a, b) for a, b in zip(group_A, group_B)]
for i in range(group_size):
assert torch.allclose(ref_out[i], tri_out[i], atol=1e-2, rtol=0)
@triton.testing.perf_report(
triton.testing.Benchmark(
# argument names to use as an x-axis for the plot
x_names=['N'],
x_vals=[2**i for i in range(7, 11)], # different possible values for `x_name`
line_arg='provider',
# argument name whose value corresponds to a different line in the plot
# possible values for `line_arg``
line_vals=['cublas', 'triton'],
# label name for the lines
line_names=["cuBLAS", "Triton"],
# line styles
styles=[('green', '-'), ('blue', '-')],
ylabel="runtime(ms)", # label name for the y-axis
plot_name="group-gemm-performance",
# name for the plot. Used also as a file name for saving the plot.
args={},
))
def benchmark(N, provider):
d_a_ptrs = torch.tensor(A_addrs, device="cuda")
d_b_ptrs = torch.tensor(B_addrs, device="cuda")
d_c_ptrs = torch.tensor(C_addrs, device="cuda")
d_g_sizes = torch.tensor(g_sizes, dtype=torch.int32, device="cuda")
d_g_lds = torch.tensor(g_lds, dtype=torch.int32, device="cuda")
quantiles = [0.5, 0.2, 0.8]
if provider == 'cublas':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch_perf_fn(group_A, group_B), quantiles=quantiles)
if provider == 'triton':
ms, min_ms, max_ms = triton.testing.do_bench(
lambda: triton_perf_fn(d_a_ptrs, d_b_ptrs, d_c_ptrs, d_g_sizes, d_g_lds, group_size), quantiles=quantiles)
return ms, max_ms, min_ms
benchmark.run(show_plots=True, print_data=True)
延伸問題
為什麼效能會比較好?
Reference
https://medium.com/@champ.yen/spatial-tutorial-general-matrix-multiply-gemm-efb930cabd59