Skip to content

Commit

Permalink
[Rename] Fix all other references to torchchat
Browse files Browse the repository at this point in the history
  • Loading branch information
orionr committed Apr 16, 2024
1 parent 4985c08 commit d0277ff
Show file tree
Hide file tree
Showing 9 changed files with 87 additions and 87 deletions.
4 changes: 2 additions & 2 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Contributing to torchat
# Contributing to torchchat
We want to make contributing to this project as easy and transparent as
possible.

Expand Down Expand Up @@ -28,5 +28,5 @@ disclosure of security bugs. In those cases, please go through the process
outlined on that page and do not file a public issue.

## License
By contributing to `torchat`, you agree that your contributions will be licensed
By contributing to `torchchat`, you agree that your contributions will be licensed
under the LICENSE file in the root directory of this source tree.
96 changes: 48 additions & 48 deletions README.md

Large diffs are not rendered by default.

10 changes: 5 additions & 5 deletions cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,27 +63,27 @@ def cli_args():
parser.add_argument(
"--export",
action="store_true",
help="Use torchat to export a model.",
help="Use torchchat to export a model.",
)
parser.add_argument(
"--eval",
action="store_true",
help="Use torchat to eval a model.",
help="Use torchchat to eval a model.",
)
parser.add_argument(
"--generate",
action="store_true",
help="Use torchat to generate a sequence using a model.",
help="Use torchchat to generate a sequence using a model.",
)
parser.add_argument(
"--chat",
action="store_true",
help="Use torchat to for an interactive chat session.",
help="Use torchchat to for an interactive chat session.",
)
parser.add_argument(
"--gui",
action="store_true",
help="Use torchat to for an interactive gui-chat session.",
help="Use torchchat to for an interactive gui-chat session.",
)
parser.add_argument("--num-samples", type=int, default=1, help="Number of samples.")
parser.add_argument(
Expand Down
2 changes: 1 addition & 1 deletion generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@

@dataclass
class GeneratorArgs:
prompt: str = "torchat is pronounced torch-chat and is so cool because"
prompt: str = "torchchat is pronounced torch-chat and is so cool because"
chat: bool = (False,)
gui: bool = (False,)
num_samples: int = (1,)
Expand Down
2 changes: 1 addition & 1 deletion parking_lot/runner_et.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ jobs:
pip install zstd
pip install -r requirements.txt
export TORCHAT_ROOT=${PWD}
export TORCHCHAT_ROOT=${PWD}
export ENABLE_ET_PYBIND=false
./scripts/install_et.sh $ENABLE_ET_PYBIND
cmake -S ./runner-et -B build/cmake-out -G Ninja
Expand Down
18 changes: 9 additions & 9 deletions quantized_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,14 @@
import torch.nn.functional as F
from torch.library import impl, impl_abstract

torchat_lib = torch.library.Library("torchat", "DEF")
torchchat_lib = torch.library.Library("torchchat", "DEF")

torchat_lib.define(
torchchat_lib.define(
"embedding_int8(Tensor input, Tensor weight, " "Tensor scales) -> Tensor",
)


@impl(torchat_lib, "embedding_int8", "CompositeExplicitAutograd")
@impl(torchchat_lib, "embedding_int8", "CompositeExplicitAutograd")
def embedding_int8(
input: torch.Tensor,
weight: torch.Tensor,
Expand Down Expand Up @@ -66,13 +66,13 @@ def embedding_int8(
return r.view(indices.size() + (-1,))


torchat_lib.define(
torchchat_lib.define(
"linear_int8(Tensor input, Tensor weight, Tensor scales, "
"Tensor bias = None) -> Tensor",
)


@impl(torchat_lib, "linear_int8", "CompositeExplicitAutograd")
@impl(torchchat_lib, "linear_int8", "CompositeExplicitAutograd")
def linear_int8(
input: torch.Tensor,
weight: torch.Tensor,
Expand All @@ -98,14 +98,14 @@ def linear_int8(
)


torchat_lib.define(
torchchat_lib.define(
"linear_int4(Tensor input, Tensor weight, Tensor scales_and_zeros, "
"Tensor bias=None, *, int groupsize, int origin_in_features, "
"int int_features, int out_features, bool padding = True) -> Tensor",
)


@impl(torchat_lib, "linear_int4", "CompositeExplicitAutograd")
@impl(torchchat_lib, "linear_int4", "CompositeExplicitAutograd")
def linear_int4(
input: torch.Tensor,
weight: torch.Tensor,
Expand Down Expand Up @@ -140,14 +140,14 @@ def linear_int4(
return c


torchat_lib.define(
torchchat_lib.define(
"linear_a8w4dq(Tensor input, Tensor weight, Tensor scales, "
"Tensor zeros, int out_features, int groupsize, "
"dtype precision) -> Tensor",
)


@impl(torchat_lib, "linear_a8w4dq", "CompositeExplicitAutograd")
@impl(torchchat_lib, "linear_a8w4dq", "CompositeExplicitAutograd")
def linear_a8w4dq(input, weight, scales, zeros, out_features, groupsize, precision):
x = per_token_dynamic_quant(input)
weight_int8 = weight
Expand Down
12 changes: 6 additions & 6 deletions runner-et/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
cmake_minimum_required(VERSION 3.24)
set(CMAKE_CXX_STANDARD 17)

project(Torchat)
project(Torchchat)

include(CMakePrintHelpers)
set(TORCHAT_ROOT $ENV{TORCHAT_ROOT})
cmake_print_variables(TORCHAT_ROOT)
set(TORCHCHAT_ROOT $ENV{TORCHCHAT_ROOT})
cmake_print_variables(TORCHCHAT_ROOT)

find_package(executorch CONFIG REQUIRED PATHS ${TORCHAT_ROOT}/build/install/lib/cmake/ExecuTorch)
set(_common_include_directories ${TORCHAT_ROOT}/build/src)
find_package(executorch CONFIG REQUIRED PATHS ${TORCHCHAT_ROOT}/build/install/lib/cmake/ExecuTorch)
set(_common_include_directories ${TORCHCHAT_ROOT}/build/src)
cmake_print_variables(_common_include_directories)

target_include_directories(executorch INTERFACE ${_common_include_directories}) # Ideally ExecuTorch installation process would do this
Expand All @@ -19,7 +19,7 @@ target_link_libraries(
runner_et PRIVATE
executorch
extension_module
${TORCHAT_ROOT}/build/src/executorch/cmake-out/extension/data_loader/libextension_data_loader.a # This one does not get installed by ExecuTorch
${TORCHCHAT_ROOT}/build/src/executorch/cmake-out/extension/data_loader/libextension_data_loader.a # This one does not get installed by ExecuTorch
optimized_kernels
portable_kernels
cpublas
Expand Down
12 changes: 6 additions & 6 deletions scripts/android_example.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@

set -eu

cd ${TORCHAT_ROOT}
echo "Inside: $TORCHAT_ROOT"
cd ${TORCHCHAT_ROOT}
echo "Inside: $TORCHCHAT_ROOT"

which curl

Expand All @@ -25,8 +25,8 @@ else
exit -1
fi

mkdir -p ${TORCHAT_ROOT}/build/android
pushd ${TORCHAT_ROOT}/build/android
mkdir -p ${TORCHCHAT_ROOT}/build/android
pushd ${TORCHCHAT_ROOT}/build/android

echo "Download Java 17"
curl "${JAVA_URL}" -o jdk-17.0.10.tar.gz
Expand Down Expand Up @@ -69,8 +69,8 @@ pushd build/src/executorch/examples/demo-apps/android/LlamaDemo
./gradlew :app:build
popd

avdmanager create avd --name "torchat" --package "system-images;android-34;google_apis;${ANDROID_ABI}"
sdk/emulator/emulator @torchat &
avdmanager create avd --name "torchchat" --package "system-images;android-34;google_apis;${ANDROID_ABI}"
sdk/emulator/emulator @torchchat &

adb wait-for-device
adb shell mkdir /data/local/tmp/llama
Expand Down
18 changes: 9 additions & 9 deletions scripts/install_et.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,25 +14,25 @@ install_pip_dependencies() {
pip install cmake
pip install ninja
pip install zstd
pushd ${TORCHAT_ROOT}
pushd ${TORCHCHAT_ROOT}
pip install -r ./requirements.txt
popd
}

install_executorch() {
echo "Cloning executorch to ${TORCHAT_ROOT}/build/src"
rm -rf ${TORCHAT_ROOT}/build
mkdir -p ${TORCHAT_ROOT}/build/src
pushd ${TORCHAT_ROOT}/build/src
echo "Cloning executorch to ${TORCHCHAT_ROOT}/build/src"
rm -rf ${TORCHCHAT_ROOT}/build
mkdir -p ${TORCHCHAT_ROOT}/build/src
pushd ${TORCHCHAT_ROOT}/build/src
git clone https://github.com/pytorch/executorch.git
cd executorch
echo "Install executorch: submodule update"
git submodule sync
git submodule update --init

echo "Applying fixes"
cp ${TORCHAT_ROOT}/scripts/fixes_et/module.cpp ${TORCHAT_ROOT}/build/src/executorch/extension/module/module.cpp # ET uses non-standard C++ that does not compile in GCC
cp ${TORCHAT_ROOT}/scripts/fixes_et/managed_tensor.h ${TORCHAT_ROOT}/build/src/executorch/extension/runner_util/managed_tensor.h # ET is missing headers for vector/memory. This causes downstream issues when building runner-et.
cp ${TORCHCHAT_ROOT}/scripts/fixes_et/module.cpp ${TORCHCHAT_ROOT}/build/src/executorch/extension/module/module.cpp # ET uses non-standard C++ that does not compile in GCC
cp ${TORCHCHAT_ROOT}/scripts/fixes_et/managed_tensor.h ${TORCHCHAT_ROOT}/build/src/executorch/extension/runner_util/managed_tensor.h # ET is missing headers for vector/memory. This causes downstream issues when building runner-et.

echo "Building and installing python libraries"
echo "Building and installing python libraries"
Expand All @@ -50,14 +50,14 @@ install_executorch() {
mkdir cmake-out
cmake -DCMAKE_BUILD_TYPE=Release -DEXECUTORCH_BUILD_OPTIMIZED=ON -DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON -DEXECUTORCH_BUILD_XNNPACK=ON -S . -B cmake-out -G Ninja
cmake --build cmake-out
cmake --install cmake-out --prefix ${TORCHAT_ROOT}/build/install
cmake --install cmake-out --prefix ${TORCHCHAT_ROOT}/build/install
popd
}


ENABLE_ET_PYBIND="${1:-true}"

pushd ${TORCHAT_ROOT}
pushd ${TORCHCHAT_ROOT}
install_pip_dependencies
install_executorch $ENABLE_ET_PYBIND
popd

0 comments on commit d0277ff

Please sign in to comment.