Skip to content
This repository has been archived by the owner on Oct 11, 2024. It is now read-only.

Commit

Permalink
here goes ... nightly's almost there
Browse files Browse the repository at this point in the history
  • Loading branch information
andy-neuma committed May 17, 2024
1 parent 0aa702d commit cc9f05c
Show file tree
Hide file tree
Showing 4 changed files with 46 additions and 9 deletions.
17 changes: 14 additions & 3 deletions .github/actions/nm-set-env/action.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
name: set neuralmagic env
description: 'sets environment variables for neuralmagic'
inputs:
wf_category:
description: "categories: REMOTE, NIGHTLY, RELEASE"
required: true
hf_token:
description: 'Hugging Face home'
required: true
Expand All @@ -14,19 +17,27 @@ runs:
using: composite
steps:
- run: |
# setup.py defaults to making 'nightly' package with 'nightly' version
if [[ "${{inputs.wf_category}}" == "RELEASE" ]]; then
echo "NM_RELEASE_TYPE=${{inputs.wf_category}}" >> $GITHUB_ENV
fi
# CUDA
echo "TORCH_CUDA_ARCH_LIST=7.0 7.5 8.0 8.6 8.9 9.0+PTX" >> $GITHUB_ENV
echo "PATH=/usr/local/apps/pyenv/plugins/pyenv-virtualenv/shims:/usr/local/apps/pyenv/shims:/usr/local/apps/pyenv/bin:/usr/local/apps/nvm/versions/node/v19.9.0/bin:/usr/local/apps/nvm/versions/node/v16.20.2/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/home/${WHOAMI}/.local/bin:" >> $GITHUB_ENV
echo "LD_LIBRARY_PATH=/usr/local/cuda-12.1/lib64::/usr/local/cuda-12.1/lib64:" >> $GITHUB_ENV
# HF Cache
echo "HF_TOKEN=${HF_TOKEN_SECRET}" >> $GITHUB_ENV
echo "HF_HOME=/EFS/hf_home" >> $GITHUB_ENV
# build
NUM_THREADS=$(./.github/scripts/determine-threading -G ${{ inputs.Gi_per_thread }})
echo "MAX_JOBS=${NUM_THREADS}" >> $GITHUB_ENV
echo "NVCC_THREADS=${{ inputs.nvcc_threads }}" >> $GITHUB_ENV
echo "VLLM_INSTALL_PUNICA_KERNELS=1" >> $GITHUB_ENV
echo "NCCL_IGNORE_DISABLED_P2P=1" >> $GITHUB_ENV
# pyenv
echo "PYENV_ROOT=/usr/local/apps/pyenv" >> $GITHUB_ENV
# testmo
echo "XDG_CONFIG_HOME=/usr/local/apps" >> $GITHUB_ENV
WHOAMI=$(whoami)
echo "PATH=/usr/local/apps/pyenv/plugins/pyenv-virtualenv/shims:/usr/local/apps/pyenv/shims:/usr/local/apps/pyenv/bin:/usr/local/apps/nvm/versions/node/v19.9.0/bin:/usr/local/apps/nvm/versions/node/v16.20.2/bin:/usr/local/cuda-12.1/bin:/usr/local/cuda-12.1/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/home/${WHOAMI}/.local/bin:" >> $GITHUB_ENV
echo "LD_LIBRARY_PATH=/usr/local/cuda-12.1/lib64::/usr/local/cuda-12.1/lib64:" >> $GITHUB_ENV
echo "PROJECT_ID=12" >> $GITHUB_ENV
env:
HF_TOKEN_SECRET: ${{ inputs.hf_token }}
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/build-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ jobs:
BUILD:
uses: ./.github/workflows/build.yml
with:
wf_category: ${{ inputs.wf_category }}
build_label: ${{ inputs.build_label }}
timeout: ${{ inputs.build_timeout }}
gitref: ${{ github.ref }}
Expand Down
12 changes: 7 additions & 5 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ jobs:
id: setenv
uses: ./.github/actions/nm-set-env/
with:
wf_category: ${{ inputs.wf_category }}
hf_token: ${{ secrets.NM_HF_TOKEN }}
Gi_per_thread: ${{ inputs.Gi_per_thread }}
nvcc_threads: ${{ inputs.nvcc_threads }}
Expand All @@ -111,11 +112,12 @@ jobs:
testmo_token: ${{ secrets.TESTMO_TEST_TOKEN }}
source: 'build-test'

- name: rename for nightly
uses: ./.github/actions/nm-rename-for-nightly-whl/
if: contains(fromJSON('["NIGHTLY"]'), inputs.wf_category)
with:
package_name: "nm-vllm"
# skip this ... should no be handled by setup.py
# - name: rename for nightly
# uses: ./.github/actions/nm-rename-for-nightly-whl/
# if: contains(fromJSON('["NIGHTLY"]'), inputs.wf_category)
# with:
# package_name: "nm-vllm"

- name: build
id: build
Expand Down
25 changes: 24 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
# UPSTREAM SYNC: noqa is required for passing ruff.
# This file has been modified by Neural Magic

import datetime
import importlib.util
import io
import logging
Expand Down Expand Up @@ -306,9 +307,28 @@ def find_version(filepath: str) -> str:
raise RuntimeError("Unable to find version string.")


# Neuralmagic packaging ENV's
NM_RELEASE_TYPE = 'NM_RELEASE_TYPE'


def get_nm_vllm_package_name() -> str:
nm_release_type = os.getenv(NM_RELEASE_TYPE)
package_name = None
if nm_release_type == 'RELEASE':
package_name = 'nm-vllm'
else:
package_name = 'nm-vllm-nightly'
return package_name


def get_vllm_version() -> str:
version = find_version(get_path("vllm", "__init__.py"))

nm_release_type = os.getenv(NM_RELEASE_TYPE)
if nm_release_type != 'RELEASE':
date = datetime.date.today().strftime("%Y%m%d")
version += f'.{date}'

if _is_cuda():
cuda_version = str(get_nvcc_cuda_version())
if cuda_version != MAIN_CUDA_VERSION:
Expand Down Expand Up @@ -393,6 +413,9 @@ def _read_requirements(filename: str) -> List[str]:

# UPSTREAM SYNC: needed for sparsity
_sparsity_deps = ["nm-magic-wand-nightly"]
nm_release_type = os.getenv(NM_RELEASE_TYPE)
if nm_release_type == 'RELEASE':
_sparsity_deps = ["nm-magic-wand"]

package_data = {
"vllm": ["py.typed", "model_executor/layers/fused_moe/configs/*.json"]
Expand All @@ -402,7 +425,7 @@ def _read_requirements(filename: str) -> List[str]:
package_data["vllm"].append("*.so")

setup(
name="nm-vllm",
name=get_nm_vllm_package_name(),
version=get_vllm_version(),
author="vLLM Team, Neural Magic",
author_email="support@neuralmagic.com",
Expand Down

1 comment on commit cc9f05c

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bigger_is_better

Benchmark suite Current: cc9f05c Previous: 59cf939 Ratio
{"name": "request_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA A10G x 1", "vllm_version": "0.3.0", "python_version": "3.10.12 (main, May 10 2024, 13:42:25) [GCC 9.4.0]", "torch_version": "2.3.0+cu121"} 3.836775297340475 prompts/s 3.8356107992916173 prompts/s 1.00
{"name": "token_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA A10G x 1", "vllm_version": "0.3.0", "python_version": "3.10.12 (main, May 10 2024, 13:42:25) [GCC 9.4.0]", "torch_version": "2.3.0+cu121"} 1473.3217141787425 tokens/s 1472.874546927981 tokens/s 1.00

This comment was automatically generated by workflow using github-action-benchmark.

Please sign in to comment.