# Set path to CUDA, NCCL
CUDAROOT=/usr/local/cuda
NCCL_ROOT=/usr/local/nccl
export CPATH=$NCCL_ROOT/include:$CPATH
export LD_LIBRARY_PATH=$NCCL_ROOT/lib/:$CUDAROOT/lib64:$LD_LIBRARY_PATH
export LIBRARY_PATH=$NCCL_ROOT/lib/:$LIBRARY_PATH
export CUDA_HOME=$CUDAROOT
export CUDA_PATH=$CUDAROOT
export CPATH=$CUDA_PATH/include:$CPATH # for warp-rnnt
# Install miniconda, python libraries, and other tools
cd tools
make KALDI=/path/to/kaldi
-
ASR
- AISHELL-1
- CSJ
- Librispeech
- Switchboard (+ Fisher)
- TEDLIUM2/TEDLIUM3
- TIMIT
- WSJ
-
LM
- Penn Tree Bank
- WikiText2
- RNN encoder
- (CNN-)BLSTM, (CNN-)LSTM, (CNN-)BLGRU, (CNN-)LGRU
- Latency-controlled BLSTM [link]
- Transformer encoder [link]
- Time-Depth Seprarabel (TDS) convolutional encoder [link]
- Gated CNN encoder (GLU) [link]
- Conformer encoder [link]
- Forced alignment
- Beam search
- Shallow fusion
- RNN decoder
- Streaming RNN decoder
- RNN transducer [link]
- Transformer decoder [link]
- Streaming Transformer decoder
- RNNLM (recurrent neural network language model)
- Gated convolutional LM [link]
- Transformer LM
- Transformer-XL LM [link]
- Adaptive softmax [link]
- Phoneme
- Grapheme
- Wordpiece (BPE, sentencepiece)
- Word
- Word-char mix
Multi-task learning (MTL) with different units are supported to alleviate data sparseness.
- Hybrid CTC/attention [link]
- Hierarchical Attention (e.g., word attention + character attention) [link]
- Hierarchical CTC (e.g., word CTC + character CTC) [link]
- Hierarchical CTC+Attention (e.g., word attention + character CTC) [link]
- Forward-backward attention [link]
- LM objective
model | dev | test |
---|---|---|
Transformer | 5.0 | 5.4 |
Streaming MMA | 6.1 | 6.6 |
model | eval1 | eval2 | eval3 |
---|---|---|---|
LAS | 6.5 | 5.1 | 5.6 |
model | SWB | CH |
---|---|---|
LAS | 9.1 | 18.8 |
model | SWB | CH |
---|---|---|
LAS | 7.8 | 13.8 |
model | dev-clean | dev-other | test-clean | test-other |
---|---|---|---|---|
Transformer | 2.3 | 5.8 | 2.5 | 6.1 |
Streaming MMA | 2.5 | 6.9 | 2.7 | 7.1 |
model | dev | test |
---|---|---|
LAS | 10.9 | 11.2 |
model | test_dev93 | test_eval92 |
---|---|---|
LAS | 8.8 | 6.2 |
model | valid | test |
---|---|---|
RNNLM | 87.99 | 86.06 |
+ cache=100 | 79.58 | 79.12 |
+ cache=500 | 77.36 | 76.94 |
model | valid | test |
---|---|---|
RNNLM | 104.53 | 98.73 |
+ cache=100 | 90.86 | 85.87 |
+ cache=2000 | 76.10 | 72.77 |