Skip to content

Commit fc1b714

Browse files
larryliu0820facebook-github-bot
authored andcommitted
Remove old tokenizer/ directory in ExecuTorch
Summary: See what happens Differential Revision: D72007597
1 parent 5531a0e commit fc1b714

36 files changed

+16
-130937
lines changed

CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -751,7 +751,7 @@ if(EXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR)
751751
endif()
752752

753753
if(EXECUTORCH_BUILD_EXTENSION_LLM)
754-
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/llm/tokenizer)
754+
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/llm/tokenizers)
755755
endif()
756756

757757
if(EXECUTORCH_BUILD_EXTENSION_MODULE)

examples/models/llama/TARGETS

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,7 @@ runtime.python_library(
202202
":export_library",
203203
"//executorch/examples/models/llama/tokenizer:tiktoken_py",
204204
"//executorch/extension/llm/export:export_lib",
205-
"//executorch/extension/llm/tokenizer:tokenizer_py_lib",
205+
"//pytorch/tokenizers/pytorch_tokenizers:tokenizers",
206206
"//executorch/extension/pybindings:portable_lib",
207207
],
208208
)

examples/models/llama/eval_llama_lib.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,13 @@
1515
from executorch.examples.models.llama.export_llama_lib import (
1616
get_quantizer_and_quant_params,
1717
)
18-
from executorch.examples.models.llama.tokenizer.tiktoken import Tokenizer as Tiktoken
18+
from pytorch_tokenizers.tiktoken import TiktokenTokenizer as Tiktoken
1919

2020
from executorch.extension.llm.export.builder import LLMEdgeManager
21-
from executorch.extension.llm.tokenizer.tokenizer import (
22-
Tokenizer as SentencePieceTokenizer,
21+
from pytorch_tokenizers.llama2c import (
22+
Llama2cTokenizer as SentencePieceTokenizer,
2323
)
24-
from executorch.extension.llm.tokenizer.utils import get_tokenizer
24+
from pytorch_tokenizers import get_tokenizer
2525
from lm_eval.evaluator import simple_evaluate
2626
from torch.nn import CrossEntropyLoss
2727
from tqdm import tqdm

examples/models/llama/runner/generation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
import torch
1212

13-
from executorch.extension.llm.tokenizer.utils import get_tokenizer
13+
from pytorch_tokenizers import get_tokenizer
1414

1515

1616
def sample_top_p(probs, p):

examples/models/llama/tokenizer/targets.bzl

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ def define_common_targets():
1616
],
1717
exported_deps = [
1818
"//pytorch/tokenizers:tiktoken",
19-
"//executorch/extension/llm/tokenizer:tiktoken", # TODO: remove
2019
],
2120
visibility = [
2221
"@EXECUTORCH_CLIENTS",

examples/models/llama/tokenizer/test/test_tiktoken.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
#include <vector>
1212

13-
#include <executorch/extension/llm/tokenizer/tiktoken.h>
13+
#include <pytorch/tokenizers/tiktoken.h>
1414

1515
#include <gtest/gtest.h>
1616

@@ -21,9 +21,9 @@
2121
using namespace ::testing;
2222

2323
using ::example::Version;
24-
using ::executorch::extension::llm::Tokenizer;
25-
using ::executorch::runtime::Error;
26-
using ::executorch::runtime::Result;
24+
using ::tokenizers::Tokenizer;
25+
using ::tokenizers::Error;
26+
using ::tokenizers::Result;
2727

2828
static std::string get_resource_path(const std::string& name) {
2929
#ifdef EXECUTORCH_FB_BUCK
@@ -36,7 +36,7 @@ static std::string get_resource_path(const std::string& name) {
3636
class MultimodalTiktokenV5ExtensionTest : public Test {
3737
public:
3838
void SetUp() override {
39-
tokenizer_ = std::make_unique<executorch::extension::llm::Tiktoken>(
39+
tokenizer_ = std::make_unique<tokenizers::Tiktoken>(
4040
example::get_multimodal_special_tokens(), 0, 1);
4141
modelPath_ = get_resource_path("test_tiktoken_tokenizer.model");
4242
}

examples/qualcomm/oss_scripts/llama/CMakeLists.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
# LICENSE file in the root directory of this source tree.
66

77
# model sharding with custom op
8-
set(CUSTOM_OP_SRCS_FILE
8+
set(CUSTOM_OP_SRCS_FILE
99
"${EXECUTORCH_SOURCE_DIR}/extension/llm/custom_ops/op_fallback.cpp"
1010
)
1111
add_library(custom_ops ${CUSTOM_OP_SRCS_FILE})
@@ -35,7 +35,7 @@ list(
3535
list(
3636
APPEND
3737
_llama_runner__srcs
38-
${CMAKE_CURRENT_SOURCE_DIR}/../../../../extension/llm/tokenizer/tiktoken.cpp
38+
${CMAKE_CURRENT_SOURCE_DIR}/../../../../extension/llm/tokenizers/src/tiktoken.cpp
3939
${CMAKE_CURRENT_SOURCE_DIR}/../../../models/llama/tokenizer/llama_tiktoken.cpp
4040
)
4141

extension/llm/export/TARGETS

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,6 @@ runtime.python_library(
4040
"//executorch/exir:lib",
4141
"//executorch/exir/backend:backend_details",
4242
"//executorch/extension/export_util:export_util",
43-
"//executorch/extension/llm/tokenizer:tokenizer_py_lib",
43+
"//pytorch/tokenizers/pytorch_tokenizers:tokenizers",
4444
],
4545
)

extension/llm/export/builder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
from executorch.extension.export_util.utils import export_to_edge, save_pte_program
3636

3737
from executorch.extension.llm.export.export_passes import RemoveRedundantTransposes
38-
from executorch.extension.llm.tokenizer.utils import get_tokenizer
38+
from pytorch_tokenizers import get_tokenizer
3939
from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e
4040
from torch.ao.quantization.quantizer import Quantizer
4141
from torch.ao.quantization.quantizer.composable_quantizer import ComposableQuantizer

extension/llm/tokenizer/CMakeLists.txt

Lines changed: 0 additions & 62 deletions
This file was deleted.

0 commit comments

Comments
 (0)