From 0b5b4754eb68cc19ba9919bfd9b1a57b8849c499 Mon Sep 17 00:00:00 2001 From: ezerhouni <61225408+ezerhouni@users.noreply.github.com> Date: Fri, 22 Jul 2022 12:03:14 +0200 Subject: [PATCH] Rename models to follow icefall convention (#71) * Rename to follow icefall convention * Fix github CI and docs * Modify name in docs and README * Rename wenet ci --- .../run-streaming-conformer-test.yaml | 4 ++-- .../run-streaming-conv-emformer-test.yaml | 4 ++-- .github/workflows/run-test-aishell.yaml | 4 ++-- .github/workflows/run-test-windows-cpu.yaml | 4 ++-- .github/workflows/run-test.yaml | 4 ++-- ...tspeech-streaming-conformer-rnnt-test.yaml | 4 ++-- README.md | 20 +++++++++---------- docs/source/offline_asr/conformer/aishell.rst | 12 +++++------ .../code/start-the-client-librispeech.sh | 2 +- .../conformer/code/start-the-client.sh | 2 +- .../code/start-the-server-librispeech.sh | 2 +- .../conformer/code/start-the-server.sh | 2 +- .../offline_asr/conformer/librispeech.rst | 12 +++++------ .../streaming_asr/conv_emformer/server.rst | 4 ++-- sherpa/bin/README.md | 12 +++++------ .../decode.py | 0 .../streaming_client.py | 0 .../streaming_server.py | 0 .../__init__.py | 0 .../decode.py | 0 .../decode_manifest.py | 0 .../offline_asr.py | 4 ++-- .../offline_client.py | 0 .../offline_server.py | 0 .../decode.py | 0 .../streaming_client.py | 0 .../streaming_server.py | 0 27 files changed, 48 insertions(+), 48 deletions(-) rename sherpa/bin/{conv_emformer_transducer_stateless => conv_emformer_transducer_stateless2}/decode.py (100%) rename sherpa/bin/{conv_emformer_transducer_stateless => conv_emformer_transducer_stateless2}/streaming_client.py (100%) rename sherpa/bin/{conv_emformer_transducer_stateless => conv_emformer_transducer_stateless2}/streaming_server.py (100%) rename sherpa/bin/{conformer_rnnt => pruned_transducer_statelessX}/__init__.py (100%) rename sherpa/bin/{conformer_rnnt => pruned_transducer_statelessX}/decode.py (100%) rename sherpa/bin/{conformer_rnnt => pruned_transducer_statelessX}/decode_manifest.py (100%) rename sherpa/bin/{conformer_rnnt => pruned_transducer_statelessX}/offline_asr.py (99%) rename sherpa/bin/{conformer_rnnt => pruned_transducer_statelessX}/offline_client.py (100%) rename sherpa/bin/{conformer_rnnt => pruned_transducer_statelessX}/offline_server.py (100%) rename sherpa/bin/{streaming_conformer_rnnt => streaming_pruned_transducer_statelessX}/decode.py (100%) rename sherpa/bin/{streaming_conformer_rnnt => streaming_pruned_transducer_statelessX}/streaming_client.py (100%) rename sherpa/bin/{streaming_conformer_rnnt => streaming_pruned_transducer_statelessX}/streaming_server.py (100%) diff --git a/.github/workflows/run-streaming-conformer-test.yaml b/.github/workflows/run-streaming-conformer-test.yaml index 0315752a..4af33c01 100644 --- a/.github/workflows/run-streaming-conformer-test.yaml +++ b/.github/workflows/run-streaming-conformer-test.yaml @@ -118,7 +118,7 @@ jobs: export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH - ./sherpa/bin/streaming_conformer_rnnt/streaming_server.py \ + ./sherpa/bin/streaming_pruned_transducer_statelessX/streaming_server.py \ --port 6006 \ --max-batch-size 50 \ --max-wait-ms 5 \ @@ -133,7 +133,7 @@ jobs: - name: Start client shell: bash run: | - ./sherpa/bin/streaming_conformer_rnnt/streaming_client.py \ + ./sherpa/bin/streaming_pruned_transducer_statelessX/streaming_client.py \ --server-addr localhost \ --server-port 6006 \ ./icefall-asr-librispeech-pruned-stateless-streaming-conformer-rnnt4-2022-06-10/test_wavs/1221-135766-0002.wav diff --git a/.github/workflows/run-streaming-conv-emformer-test.yaml b/.github/workflows/run-streaming-conv-emformer-test.yaml index 3a2aba5e..70bbe7b9 100644 --- a/.github/workflows/run-streaming-conv-emformer-test.yaml +++ b/.github/workflows/run-streaming-conv-emformer-test.yaml @@ -118,7 +118,7 @@ jobs: export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH - ./sherpa/bin/conv_emformer_transducer_stateless/streaming_server.py \ + ./sherpa/bin/conv_emformer_transducer_stateless2/streaming_server.py \ --port 6006 \ --max-batch-size 50 \ --max-wait-ms 5 \ @@ -133,7 +133,7 @@ jobs: - name: Start client shell: bash run: | - ./sherpa/bin/conv_emformer_transducer_stateless/streaming_client.py \ + ./sherpa/bin/conv_emformer_transducer_stateless2/streaming_client.py \ --server-addr localhost \ --server-port 6006 \ ./icefall-asr-librispeech-conv-emformer-transducer-stateless2-2022-07-05/test_wavs/1221-135766-0002.wav diff --git a/.github/workflows/run-test-aishell.yaml b/.github/workflows/run-test-aishell.yaml index 25836ce4..32d62995 100644 --- a/.github/workflows/run-test-aishell.yaml +++ b/.github/workflows/run-test-aishell.yaml @@ -119,7 +119,7 @@ jobs: export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH - sherpa/bin/conformer_rnnt/offline_server.py \ + sherpa/bin/pruned_transducer_statelessX/offline_server.py \ --port 6006 \ --num-device 0 \ --max-batch-size 10 \ @@ -135,7 +135,7 @@ jobs: - name: Start client shell: bash run: | - sherpa/bin/conformer_rnnt/offline_client.py \ + sherpa/bin/pruned_transducer_statelessX/offline_client.py \ --server-addr localhost \ --server-port 6006 \ ./icefall-aishell-pruned-transducer-stateless3-2022-06-20/test_wavs/BAC009S0764W0121.wav \ diff --git a/.github/workflows/run-test-windows-cpu.yaml b/.github/workflows/run-test-windows-cpu.yaml index 96dee117..d471eab9 100644 --- a/.github/workflows/run-test-windows-cpu.yaml +++ b/.github/workflows/run-test-windows-cpu.yaml @@ -84,7 +84,7 @@ jobs: export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH - sherpa/bin/conformer_rnnt/offline_server.py \ + sherpa/bin/pruned_transducer_statelessX/offline_server.py \ --port 6006 \ --num-device 0 \ --max-batch-size 10 \ @@ -99,7 +99,7 @@ jobs: - name: Start client shell: bash run: | - sherpa/bin/conformer_rnnt/offline_client.py \ + sherpa/bin/pruned_transducer_statelessX/offline_client.py \ --server-addr localhost \ --server-port 6006 \ icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13/test_wavs/1089-134686-0001.wav \ diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml index 4558c5ed..7648acd8 100644 --- a/.github/workflows/run-test.yaml +++ b/.github/workflows/run-test.yaml @@ -119,7 +119,7 @@ jobs: export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH - sherpa/bin/conformer_rnnt/offline_server.py \ + sherpa/bin/pruned_transducer_statelessX/offline_server.py \ --port 6006 \ --num-device 0 \ --max-batch-size 10 \ @@ -134,7 +134,7 @@ jobs: - name: Start client shell: bash run: | - sherpa/bin/conformer_rnnt/offline_client.py \ + sherpa/bin/pruned_transducer_statelessX/offline_client.py \ --server-addr localhost \ --server-port 6006 \ icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13/test_wavs/1089-134686-0001.wav \ diff --git a/.github/workflows/run-wenetspeech-streaming-conformer-rnnt-test.yaml b/.github/workflows/run-wenetspeech-streaming-conformer-rnnt-test.yaml index fe41d75d..a6c189a4 100644 --- a/.github/workflows/run-wenetspeech-streaming-conformer-rnnt-test.yaml +++ b/.github/workflows/run-wenetspeech-streaming-conformer-rnnt-test.yaml @@ -107,7 +107,7 @@ jobs: run: | export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH - ./sherpa/bin/streaming_conformer_rnnt/streaming_server.py \ + ./sherpa/bin/streaming_pruned_transducer_statelessX/streaming_server.py \ --port 6006 \ --max-batch-size 50 \ --max-wait-ms 5 \ @@ -120,7 +120,7 @@ jobs: - name: Start client shell: bash run: | - ./sherpa/bin/streaming_conformer_rnnt/streaming_client.py \ + ./sherpa/bin/streaming_pruned_transducer_statelessX/streaming_client.py \ --server-addr localhost \ --server-port 6006 \ ./icefall_asr_wenetspeech_pruned_transducer_stateless5_streaming/test_wavs/DEV_T0000000001.wav diff --git a/README.md b/README.md index d8642800..a33a030f 100644 --- a/README.md +++ b/README.md @@ -184,7 +184,7 @@ following command: # If you provide a bpe.model, e.g., for LibriSpeech, # you can use the following command: # -sherpa/bin/conformer_rnnt/offline_server.py \ +sherpa/bin/pruned_transducer_statelessX/offline_server.py \ --port 6006 \ --num-device 1 \ --max-batch-size 10 \ @@ -200,7 +200,7 @@ sherpa/bin/conformer_rnnt/offline_server.py \ # If you provide a tokens.txt, e.g., for aishell, # you can use the following command: # -sherpa/bin/conformer_rnnt/offline_server.py \ +sherpa/bin/pruned_transducer_statelessX/offline_server.py \ --port 6006 \ --num-device 1 \ --max-batch-size 10 \ @@ -212,7 +212,7 @@ sherpa/bin/conformer_rnnt/offline_server.py \ --token-filename ./path/to/data/lang_char/tokens.txt ``` -You can use `./sherpa/bin/conformer_rnnt/offline_server.py --help` to view the help message. +You can use `./sherpa/bin/pruned_transducer_statelessX/offline_server.py --help` to view the help message. **HINT**: If you don't have GPU, please set `--num-device` to `0`. @@ -235,7 +235,7 @@ The following shows how to use the above pretrained models to start the server. git lfs install git clone https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13 -sherpa/bin/conformer_rnnt/offline_server.py \ +sherpa/bin/pruned_transducer_statelessX/offline_server.py \ --port 6006 \ --num-device 1 \ --max-batch-size 10 \ @@ -253,7 +253,7 @@ sherpa/bin/conformer_rnnt/offline_server.py \ git lfs install git clone https://huggingface.co/csukuangfj/icefall-aishell-pruned-transducer-stateless3-2022-06-20 -sherpa/bin/conformer_rnnt/offline_server.py \ +sherpa/bin/pruned_transducer_statelessX/offline_server.py \ --port 6006 \ --num-device 1 \ --max-batch-size 10 \ @@ -269,21 +269,21 @@ sherpa/bin/conformer_rnnt/offline_server.py \ After starting the server, you can use the following command to start the client: ```bash -./sherpa/bin/conformer_rnnt/offline_client.py \ +./sherpa/bin/pruned_transducer_statelessX/offline_client.py \ --server-addr localhost \ --server-port 6006 \ /path/to/foo.wav \ /path/to/bar.wav ``` -You can use `./sherpa/bin/conformer_rnnt/offline_client.py --help` to view the usage message. +You can use `./sherpa/bin/pruned_transducer_statelessX/offline_client.py --help` to view the usage message. The following shows how to use the client to send some test waves to the server for recognition. ```bash # If you use the pretrained model from the LibriSpeech dataset -sherpa/bin/conformer_rnnt/offline_client.py \ +sherpa/bin/pruned_transducer_statelessX/offline_client.py \ --server-addr localhost \ --server-port 6006 \ icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13//test_wavs/1089-134686-0001.wav \ @@ -293,7 +293,7 @@ sherpa/bin/conformer_rnnt/offline_client.py \ ```bash # If you use the pretrained model from the aishell dataset -sherpa/bin/conformer_rnnt/offline_client.py \ +sherpa/bin/pruned_transducer_statelessX/offline_client.py \ --server-addr localhost \ --server-port 6006 \ ./icefall-aishell-pruned-transducer-stateless3-2022-06-20/test_wavs/BAC009S0764W0121.wav \ @@ -303,7 +303,7 @@ sherpa/bin/conformer_rnnt/offline_client.py \ #### RTF test -We provide a demo [./sherpa/bin/conformer_rnnt/decode_manifest.py](./sherpa/bin/conformer_rnnt/decode_manifest.py) +We provide a demo [./sherpa/bin/pruned_transducer_statelessX/decode_manifest.py](./sherpa/bin/pruned_transducer_statelessX/decode_manifest.py) to decode the `test-clean` dataset from the LibriSpeech corpus. It creates 50 connections to the server using websockets and sends audio files diff --git a/docs/source/offline_asr/conformer/aishell.rst b/docs/source/offline_asr/conformer/aishell.rst index 3ead2753..58cc32d3 100644 --- a/docs/source/offline_asr/conformer/aishell.rst +++ b/docs/source/offline_asr/conformer/aishell.rst @@ -80,7 +80,7 @@ Start the server ---------------- The entry point of the server is -`sherpa/bin/conformer_rnnt/offline_server.py `_. +`sherpa/bin/pruned_transducer_statelessX/offline_server.py `_. One thing worth mentioning is that the entry point is a Python script. In `sherpa`_, the server is implemented using `asyncio`_, where **IO-bound** @@ -100,12 +100,12 @@ To view the usage information of the server, you can use: .. code-block:: bash - $ ./sherpa/bin/conformer_rnnt/offline_server.py --help + $ ./sherpa/bin/pruned_transducer_statelessX/offline_server.py --help which gives the following output: .. literalinclude:: ./code/offline-server-help.txt - :caption: Output of ``./sherpa/bin/conformer_rnnt/offline_server.py --help`` + :caption: Output of ``./sherpa/bin/pruned_transducer_statelessX/offline_server.py --help`` The following shows an example about how to use the above pre-trained model to start the server: @@ -128,16 +128,16 @@ Start the client ---------------- We also provide a Python script -`sherpa/bin/conformer_rnnt/offline_client.py `_ for the client. +`sherpa/bin/pruned_transducer_statelessX/offline_client.py `_ for the client. .. code-block:: bash - ./sherpa/bin/conformer_rnnt/offline_client.py --help + ./sherpa/bin/pruned_transducer_statelessX/offline_client.py --help shows the following help information: .. literalinclude:: ./code/offline-client-help.txt - :caption: Output of ``./sherpa/bin/conformer_rnnt/offline_client.py --help`` + :caption: Output of ``./sherpa/bin/pruned_transducer_statelessX/offline_client.py --help`` We provide some test waves in the git repo you just cloned. The following command shows you how to start the client: diff --git a/docs/source/offline_asr/conformer/code/start-the-client-librispeech.sh b/docs/source/offline_asr/conformer/code/start-the-client-librispeech.sh index 6bcb6095..5e9fc5f4 100644 --- a/docs/source/offline_asr/conformer/code/start-the-client-librispeech.sh +++ b/docs/source/offline_asr/conformer/code/start-the-client-librispeech.sh @@ -2,7 +2,7 @@ export CUDA_VISIBLE_DEVICES=0 -sherpa/bin/conformer_rnnt/offline_client.py \ +sherpa/bin/pruned_transducer_statelessX/offline_client.py \ --server-addr localhost \ --server-port 6010 \ ./icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13/test_wavs/1089-134686-0001.wav \ diff --git a/docs/source/offline_asr/conformer/code/start-the-client.sh b/docs/source/offline_asr/conformer/code/start-the-client.sh index 59093216..540785c6 100644 --- a/docs/source/offline_asr/conformer/code/start-the-client.sh +++ b/docs/source/offline_asr/conformer/code/start-the-client.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -sherpa/bin/conformer_rnnt/offline_client.py \ +sherpa/bin/pruned_transducer_statelessX/offline_client.py \ --server-addr localhost \ --server-port 6010 \ ./icefall-aishell-pruned-transducer-stateless3-2022-06-20/test_wavs/BAC009S0764W0121.wav \ diff --git a/docs/source/offline_asr/conformer/code/start-the-server-librispeech.sh b/docs/source/offline_asr/conformer/code/start-the-server-librispeech.sh index 7e84ea5c..06af17cc 100644 --- a/docs/source/offline_asr/conformer/code/start-the-server-librispeech.sh +++ b/docs/source/offline_asr/conformer/code/start-the-server-librispeech.sh @@ -5,7 +5,7 @@ export CUDA_VISIBLE_DEVICES=0 nn_model_filename=./icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13/exp/cpu_jit-torch-1.6.0.pt bpe_model=./icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13/data/lang_bpe_500/bpe.model -sherpa/bin/conformer_rnnt/offline_server.py \ +sherpa/bin/pruned_transducer_statelessX/offline_server.py \ --port 6010 \ --num-device 1 \ --max-batch-size 10 \ diff --git a/docs/source/offline_asr/conformer/code/start-the-server.sh b/docs/source/offline_asr/conformer/code/start-the-server.sh index 868426f4..aeccdca9 100644 --- a/docs/source/offline_asr/conformer/code/start-the-server.sh +++ b/docs/source/offline_asr/conformer/code/start-the-server.sh @@ -5,7 +5,7 @@ export CUDA_VISIBLE_DEVICES=1 nn_model_filename=./icefall-aishell-pruned-transducer-stateless3-2022-06-20/exp/cpu_jit-epoch-29-avg-5-torch-1.6.0.pt token_filename=./icefall-aishell-pruned-transducer-stateless3-2022-06-20/data/lang_char/tokens.txt -sherpa/bin/conformer_rnnt/offline_server.py \ +sherpa/bin/pruned_transducer_statelessX/offline_server.py \ --port 6010 \ --num-device 1 \ --max-batch-size 10 \ diff --git a/docs/source/offline_asr/conformer/librispeech.rst b/docs/source/offline_asr/conformer/librispeech.rst index 836a90e0..3fb714ed 100644 --- a/docs/source/offline_asr/conformer/librispeech.rst +++ b/docs/source/offline_asr/conformer/librispeech.rst @@ -81,7 +81,7 @@ Start the server ---------------- The entry point of the server is -`sherpa/bin/conformer_rnnt/offline_server.py `_. +`sherpa/bin/pruned_transducer_statelessX/offline_server.py `_. One thing worth mentioning is that the entry point is a Python script. In `sherpa`_, the server is implemented using `asyncio`_, where **IO-bound** @@ -101,12 +101,12 @@ To view the usage information of the server, you can use: .. code-block:: bash - $ ./sherpa/bin/conformer_rnnt/offline_server.py --help + $ ./sherpa/bin/pruned_transducer_statelessX/offline_server.py --help which gives the following output: .. literalinclude:: ./code/offline-server-help.txt - :caption: Output of ``./sherpa/bin/conformer_rnnt/offline_server.py --help`` + :caption: Output of ``./sherpa/bin/pruned_transducer_statelessX/offline_server.py --help`` The following shows an example about how to use the above pre-trained model to start the server: @@ -129,16 +129,16 @@ Start the client ---------------- We also provide a Python script -`sherpa/bin/conformer_rnnt/offline_client.py `_ for the client. +`sherpa/bin/pruned_transducer_statelessX/offline_client.py `_ for the client. .. code-block:: bash - ./sherpa/bin/conformer_rnnt/offline_client.py --help + ./sherpa/bin/pruned_transducer_statelessX/offline_client.py --help shows the following help information: .. literalinclude:: ./code/offline-client-help.txt - :caption: Output of ``./sherpa/bin/conformer_rnnt/offline_client.py --help`` + :caption: Output of ``./sherpa/bin/pruned_transducer_statelessX/offline_client.py --help`` We provide some test waves in the git repo you just cloned. The following command shows you how to start the client: diff --git a/docs/source/streaming_asr/conv_emformer/server.rst b/docs/source/streaming_asr/conv_emformer/server.rst index 228a3557..a509fbf0 100644 --- a/docs/source/streaming_asr/conv_emformer/server.rst +++ b/docs/source/streaming_asr/conv_emformer/server.rst @@ -19,7 +19,7 @@ Usage .. code-block:: cd /path/to/sherpa - ./sherpa/bin/conv_emformer_transducer_stateless/streaming_server.py --help + ./sherpa/bin/conv_emformer_transducer_stateless2/streaming_server.py --help shows the usage message. @@ -51,7 +51,7 @@ The following shows you how to start the server with the above pretrained model. git lfs install git clone https://huggingface.co/Zengwei/icefall-asr-librispeech-conv-emformer-transducer-stateless2-2022-07-05 - ./sherpa/bin/conv_emformer_transducer_stateless/streaming_server.py \ + ./sherpa/bin/conv_emformer_transducer_stateless2/streaming_server.py \ --port 6007 \ --max-batch-size 50 \ --max-wait-ms 5 \ diff --git a/sherpa/bin/README.md b/sherpa/bin/README.md index 033033e0..29dd68c0 100644 --- a/sherpa/bin/README.md +++ b/sherpa/bin/README.md @@ -8,9 +8,9 @@ where `X>=2`. | Filename | Description | |----------|-------------| -| [conformer_rnnt/offline_server.py](./conformer_rnnt/offline_server.py) | The server for offline ASR | -| [conformer_rnnt/offline_client.py](./conformer/offline_client.py) | The client for offline ASR | -| [conformer_rnnt/decode_manifest.py](./conformer_rnnt/decode_manifest.py) | Demo for computing RTF and WER| +| [pruned_transducer_statelessX/offline_server.py](./pruned_transducer_statelessX/offline_server.py) | The server for offline ASR | +| [pruned_transducer_statelessX/offline_client.py](./pruned_transducer_statelessX/offline_client.py) | The client for offline ASR | +| [pruned_transducer_statelessX/decode_manifest.py](./pruned_transducer_statelessX/decode_manifest.py) | Demo for computing RTF and WER| If you want to test the offline server without training your own model, you can download pretrained models on the LibriSpeech corpus by visiting @@ -42,9 +42,9 @@ where `X>=2`. And the model is trained for streaming recognition. | Filename | Description | |----------|-------------| -| [streaming_conformer_rnnt/streaming_conformer_rnnt/streaming_server.py](./streaming_conformer_rnnt/streaming_server.py) | The server for streaming ASR | -| [streaming_conformer_rnnt/streaming_client.py](./streaming_conformer_rnnt/streaming_client.py) | The client for streaming ASR | -| [streaming_conformer_rnnt/decode.py](./streaming_conformer_rnnt/decode.py) | Utilities for streaming ASR| +| [streaming_pruned_transducer_statelessX/streaming_server.py](./streaming_pruned_transducer_statelessX/streaming_server.py) | The server for streaming ASR | +| [streaming_pruned_transducer_statelessX/streaming_client.py](./streaming_pruned_transducer_statelessX/streaming_client.py) | The client for streaming ASR | +| [streaming_pruned_transducer_statelessX/decode.py](./streaming_pruned_transducer_statelessX/decode.py) | Utilities for streaming ASR| You can use the pretrained model from diff --git a/sherpa/bin/conv_emformer_transducer_stateless/decode.py b/sherpa/bin/conv_emformer_transducer_stateless2/decode.py similarity index 100% rename from sherpa/bin/conv_emformer_transducer_stateless/decode.py rename to sherpa/bin/conv_emformer_transducer_stateless2/decode.py diff --git a/sherpa/bin/conv_emformer_transducer_stateless/streaming_client.py b/sherpa/bin/conv_emformer_transducer_stateless2/streaming_client.py similarity index 100% rename from sherpa/bin/conv_emformer_transducer_stateless/streaming_client.py rename to sherpa/bin/conv_emformer_transducer_stateless2/streaming_client.py diff --git a/sherpa/bin/conv_emformer_transducer_stateless/streaming_server.py b/sherpa/bin/conv_emformer_transducer_stateless2/streaming_server.py similarity index 100% rename from sherpa/bin/conv_emformer_transducer_stateless/streaming_server.py rename to sherpa/bin/conv_emformer_transducer_stateless2/streaming_server.py diff --git a/sherpa/bin/conformer_rnnt/__init__.py b/sherpa/bin/pruned_transducer_statelessX/__init__.py similarity index 100% rename from sherpa/bin/conformer_rnnt/__init__.py rename to sherpa/bin/pruned_transducer_statelessX/__init__.py diff --git a/sherpa/bin/conformer_rnnt/decode.py b/sherpa/bin/pruned_transducer_statelessX/decode.py similarity index 100% rename from sherpa/bin/conformer_rnnt/decode.py rename to sherpa/bin/pruned_transducer_statelessX/decode.py diff --git a/sherpa/bin/conformer_rnnt/decode_manifest.py b/sherpa/bin/pruned_transducer_statelessX/decode_manifest.py similarity index 100% rename from sherpa/bin/conformer_rnnt/decode_manifest.py rename to sherpa/bin/pruned_transducer_statelessX/decode_manifest.py diff --git a/sherpa/bin/conformer_rnnt/offline_asr.py b/sherpa/bin/pruned_transducer_statelessX/offline_asr.py similarity index 99% rename from sherpa/bin/conformer_rnnt/offline_asr.py rename to sherpa/bin/pruned_transducer_statelessX/offline_asr.py index eb3ed944..96ff0c3f 100755 --- a/sherpa/bin/conformer_rnnt/offline_asr.py +++ b/sherpa/bin/pruned_transducer_statelessX/offline_asr.py @@ -57,7 +57,7 @@ wav2=./icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13/test_wavs/1221-135766-0001.wav wav3=./icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13/test_wavs/1221-135766-0002.wav - sherpa/bin/conformer_rnnt/offline_asr.py \ + sherpa/bin/pruned_transducer_statelessX/offline_asr.py \ --nn-model-filename $nn_model_filename \ --bpe-model $bpe_model \ $wav1 \ @@ -77,7 +77,7 @@ wav2=./icefall-aishell-pruned-transducer-stateless3-2022-06-20/test_wavs/BAC009S0764W0122.wav wav3=./icefall-aishell-pruned-transducer-stateless3-2022-06-20/test_wavs/BAC009S0764W0123.wav - sherpa/bin/conformer_rnnt/offline_asr.py \ + sherpa/bin/pruned_transducer_statelessX/offline_asr.py \ --nn-model-filename $nn_model_filename \ --token-filename $token_filename \ $wav1 \ diff --git a/sherpa/bin/conformer_rnnt/offline_client.py b/sherpa/bin/pruned_transducer_statelessX/offline_client.py similarity index 100% rename from sherpa/bin/conformer_rnnt/offline_client.py rename to sherpa/bin/pruned_transducer_statelessX/offline_client.py diff --git a/sherpa/bin/conformer_rnnt/offline_server.py b/sherpa/bin/pruned_transducer_statelessX/offline_server.py similarity index 100% rename from sherpa/bin/conformer_rnnt/offline_server.py rename to sherpa/bin/pruned_transducer_statelessX/offline_server.py diff --git a/sherpa/bin/streaming_conformer_rnnt/decode.py b/sherpa/bin/streaming_pruned_transducer_statelessX/decode.py similarity index 100% rename from sherpa/bin/streaming_conformer_rnnt/decode.py rename to sherpa/bin/streaming_pruned_transducer_statelessX/decode.py diff --git a/sherpa/bin/streaming_conformer_rnnt/streaming_client.py b/sherpa/bin/streaming_pruned_transducer_statelessX/streaming_client.py similarity index 100% rename from sherpa/bin/streaming_conformer_rnnt/streaming_client.py rename to sherpa/bin/streaming_pruned_transducer_statelessX/streaming_client.py diff --git a/sherpa/bin/streaming_conformer_rnnt/streaming_server.py b/sherpa/bin/streaming_pruned_transducer_statelessX/streaming_server.py similarity index 100% rename from sherpa/bin/streaming_conformer_rnnt/streaming_server.py rename to sherpa/bin/streaming_pruned_transducer_statelessX/streaming_server.py