Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Continuation] Merge EmbeddedLLM/vllm-rocm into vLLM main #1836

Merged
merged 63 commits into from
Dec 8, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
63 commits
Select commit Hold shift + click to select a range
43af310
port dtype_float16.cuh and cache_kernels.cu
pcmoritz Oct 10, 2023
cc81866
port dtype_bfloat16.cuh
pcmoritz Oct 10, 2023
475b5e2
port attention_utils.cuh
pcmoritz Oct 10, 2023
ddc496c
port more kernels
pcmoritz Oct 10, 2023
5eaa7a1
fix typo
pcmoritz Oct 10, 2023
f7273c6
add cuda_compat.h
pcmoritz Oct 10, 2023
99c3be7
Merge branch 'main' into port-to-rocm
pcmoritz Oct 16, 2023
f8093dc
sync branches
pcmoritz Oct 16, 2023
41df689
update
pcmoritz Oct 16, 2023
93be9c5
update
pcmoritz Oct 16, 2023
d96fa3c
fixes
pcmoritz Oct 16, 2023
421365b
cleanup
pcmoritz Oct 16, 2023
06b800e
update
pcmoritz Oct 16, 2023
2312beb
update
pcmoritz Oct 16, 2023
2958b39
update
pcmoritz Oct 16, 2023
3f89734
fmt
pcmoritz Oct 16, 2023
5397a57
cleanup
pcmoritz Oct 16, 2023
90e02d2
refactor
pcmoritz Oct 16, 2023
a420202
update
pcmoritz Oct 16, 2023
b072182
Merge branch 'main' into port-to-rocm
pcmoritz Oct 17, 2023
2d1e435
detecting rocm and adding flag for compiling
iAmir97 Oct 17, 2023
e231b79
using asm volatile instead of hip api
iAmir97 Oct 17, 2023
31bb335
using asm volatile for type casting of f16
iAmir97 Oct 17, 2023
b027d06
Hipifying csrc file to accomodate rocm builds
kliuae Nov 27, 2023
9a1781c
Checked CUDA ROCm Compatibility (#15)
tjtanaa Nov 29, 2023
0f67117
merged with latest upstream
kliuae Nov 29, 2023
7dbf2d4
format code
kliuae Nov 29, 2023
52ffcf0
downgrade torch requirement in toml to torch 2.0.1 to accommodate ROC…
kliuae Nov 29, 2023
27f0513
Merged changes from vllm main
kliuae Dec 1, 2023
5cce649
Merged with changes in vllm main
kliuae Dec 1, 2023
16d3ccc
Updated Dockerfile, rocm installation guide and setuppy
kliuae Dec 1, 2023
d764f9d
Updated amd installation guide and dockerfile
kliuae Dec 2, 2023
e798632
Added num_gpus for ray init in ROCm
kliuae Dec 2, 2023
0e8129f
Synced torch version with vllm main in pyproject.toml
kliuae Dec 2, 2023
2b3821b
Format code
kliuae Dec 2, 2023
0c8795a
Merge branch 'main' into vllm-cuda-rocm-dev
kliuae Dec 4, 2023
5793f30
Updated dockerfile.rocm and requirements-rocm.txt
kliuae Dec 4, 2023
b172cdd
Disable mistral for ROCm
kliuae Dec 4, 2023
9cd5b18
Format code
kliuae Dec 4, 2023
b86f88a
Revert to cuda kernels
kliuae Dec 5, 2023
9727ab4
Merge remote-tracking branch 'pcmoritz/port-to-rocm'
kliuae Dec 5, 2023
c4aa2af
Port latest kernels to ROCm
kliuae Dec 5, 2023
f8c304e
Update readme
kliuae Dec 5, 2023
e608c30
Cleaned up kernel code
kliuae Dec 5, 2023
951e225
Added wrapper for setting devFuncAttributeMaxDynamicSharedMemorySize
kliuae Dec 6, 2023
25f9a97
Added wrapper for setting devFuncAttributeMaxDynamicSharedMemorySize
kliuae Dec 6, 2023
e984ada
Updated ROCm warp size
kliuae Dec 6, 2023
cc1195f
Format code
kliuae Dec 6, 2023
f92980e
Check hip from wrapper
kliuae Dec 6, 2023
66b4aa1
Format code
kliuae Dec 6, 2023
4a0ecb8
Enable support for mistral models
kliuae Dec 6, 2023
acf51a8
Fixed hip device attribute
kliuae Dec 6, 2023
4a52977
Format code
kliuae Dec 6, 2023
23a987a
Restored awq file
kliuae Dec 7, 2023
8787a4e
Format code
kliuae Dec 7, 2023
5911131
Merge latest vllm main
kliuae Dec 7, 2023
9fa8075
Updated rocm dockerfile
kliuae Dec 7, 2023
81e052d
Update amd installation guide
kliuae Dec 7, 2023
fb8ac26
Update vLLM Documentations (#18)
tjtanaa Dec 7, 2023
98f5487
Updated setup.py, vllm/utils.py and amd-installation doc
kliuae Dec 8, 2023
d90187a
Updated setup.py
kliuae Dec 8, 2023
c840531
Format code
kliuae Dec 8, 2023
9dba1d8
Merge branch 'main' into vllm-cuda-rocm-mod
kliuae Dec 8, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Added wrapper for setting devFuncAttributeMaxDynamicSharedMemorySize
  • Loading branch information
kliuae committed Dec 6, 2023
commit 951e225e5d206594e61fd43f2f021187b4dc07df
70 changes: 45 additions & 25 deletions csrc/attention/attention_kernels.cu
Original file line number Diff line number Diff line change
Expand Up @@ -542,31 +542,52 @@ __global__ void paged_attention_v2_reduce_kernel(

} // namespace vllm

#ifndef USE_ROCM
#define LAUNCH_PAGED_ATTENTION_V1(HEAD_SIZE) \
cudaFuncSetAttribute( \
(void*)vllm::paged_attention_v1_kernel<T, HEAD_SIZE, BLOCK_SIZE, NUM_THREADS>, \
cudaFuncAttributeMaxDynamicSharedMemorySize, shared_mem_size); \
vllm::paged_attention_v1_kernel<T, HEAD_SIZE, BLOCK_SIZE, NUM_THREADS> \
<<<grid, block, shared_mem_size, stream>>>( \
out_ptr, \
query_ptr, \
key_cache_ptr, \
value_cache_ptr, \
head_mapping_ptr, \
scale, \
block_tables_ptr, \
context_lens_ptr, \
max_num_blocks_per_seq, \
alibi_slopes_ptr, \
q_stride, \
kv_block_stride, \
kv_head_stride);
#else
// #ifndef USE_ROCM
// #define LAUNCH_PAGED_ATTENTION_V1(HEAD_SIZE) \
// cudaFuncSetAttribute( \
// (void*)vllm::paged_attention_v1_kernel<T, HEAD_SIZE, BLOCK_SIZE, NUM_THREADS>, \
// cudaFuncAttributeMaxDynamicSharedMemorySize, shared_mem_size); \
// vllm::paged_attention_v1_kernel<T, HEAD_SIZE, BLOCK_SIZE, NUM_THREADS> \
// <<<grid, block, shared_mem_size, stream>>>( \
// out_ptr, \
// query_ptr, \
// key_cache_ptr, \
// value_cache_ptr, \
// head_mapping_ptr, \
// scale, \
// block_tables_ptr, \
// context_lens_ptr, \
// max_num_blocks_per_seq, \
// alibi_slopes_ptr, \
// q_stride, \
// kv_block_stride, \
// kv_head_stride);
// #else
// #define LAUNCH_PAGED_ATTENTION_V1(HEAD_SIZE) \
// hipFuncSetAttribute( \
// (void*)vllm::paged_attention_v1_kernel<T, HEAD_SIZE, BLOCK_SIZE, NUM_THREADS>, \
// hipFuncAttributeMaxDynamicSharedMemorySize, shared_mem_size); \
// vllm::paged_attention_v1_kernel<T, HEAD_SIZE, BLOCK_SIZE, NUM_THREADS> \
// <<<grid, block, shared_mem_size, stream>>>( \
// out_ptr, \
// query_ptr, \
// key_cache_ptr, \
// value_cache_ptr, \
// head_mapping_ptr, \
// scale, \
// block_tables_ptr, \
// context_lens_ptr, \
// max_num_blocks_per_seq, \
// alibi_slopes_ptr, \
// q_stride, \
// kv_block_stride, \
// kv_head_stride);
// #endif

#define LAUNCH_PAGED_ATTENTION_V1(HEAD_SIZE) \
hipFuncSetAttribute( \
(void*)vllm::paged_attention_v1_kernel<T, HEAD_SIZE, BLOCK_SIZE, NUM_THREADS>, \
hipFuncAttributeMaxDynamicSharedMemorySize, shared_mem_size); \
VLLM_DevFuncAttribute_SET_MaxDynamicSharedMemorySize( \
((void*)vllm::paged_attention_v1_kernel<T, HEAD_SIZE, BLOCK_SIZE, NUM_THREADS>), \
shared_mem_size); \
vllm::paged_attention_v1_kernel<T, HEAD_SIZE, BLOCK_SIZE, NUM_THREADS> \
<<<grid, block, shared_mem_size, stream>>>( \
out_ptr, \
Expand All @@ -582,7 +603,6 @@ __global__ void paged_attention_v2_reduce_kernel(
q_stride, \
kv_block_stride, \
kv_head_stride);
#endif

// TODO(woosuk): Tune NUM_THREADS.
template<
Expand Down
12 changes: 10 additions & 2 deletions csrc/cuda_compat.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,15 @@
#endif

#ifndef USE_ROCM
#define VLLM_SHFL_SYNC(var, src_lane) __shfl_sync(uint32_t(-1), var, src_lane);
#define VLLM_SHFL_SYNC(var, src_lane) __shfl_sync(uint32_t(-1), var, src_lane)
#else
#define VLLM_SHFL_SYNC(var, src_lane) __shfl(var, src_lane)
#endif
#endif

#ifndef USE_ROCM
#define VLLM_DevFuncAttribute_SET_MaxDynamicSharedMemorySize(FUNC, VAL) \
cudaFuncSetAttribute(FUNC, cudaFuncAttributeMaxDynamicSharedMemorySize, VAL)
#else
#define VLLM_DevFuncAttribute_SET_MaxDynamicSharedMemorySize(FUNC, VAL) \
hipFuncSetAttribute(FUNC, hipFuncAttributeMaxDynamicSharedMemorySize, VAL)
#endif