SYMBOL INDEX (80 symbols across 19 files) FILE: debug/debug_dataset.py function check_for_invalid_values (line 13) | def check_for_invalid_values(inp, labels): function check_empty (line 21) | def check_empty(inp, labels): function get_dataset (line 46) | def get_dataset(data_dir, function main (line 68) | def main(args): function parse_args (line 83) | def parse_args(): FILE: debug/get_common_voice_stats.py function main (line 7) | def main(args): function parse_args (line 43) | def parse_args(): FILE: model.py class TimeReduction (line 8) | class TimeReduction(tf.keras.layers.Layer): method __init__ (line 10) | def __init__(self, method call (line 20) | def call(self, inputs): function encoder (line 39) | def encoder(specs_shape, function prediction_network (line 84) | def prediction_network(vocab_size, function build_keras_model (line 119) | def build_keras_model(hparams, FILE: preprocess_common_voice.py function write_dataset (line 24) | def write_dataset(dataset, name): function main (line 36) | def main(_): FILE: preprocess_librispeech.py function write_dataset (line 24) | def write_dataset(dataset, name): function main (line 36) | def main(_): FILE: quantize_model.py function main (line 8) | def main(args): function parse_args (line 31) | def parse_args(): FILE: run_rnnt.py function get_dataset (line 66) | def get_dataset(data_dir, function configure_environment (line 93) | def configure_environment(gpu_names, function setup_hparams (line 134) | def setup_hparams(log_dir, function run_metrics (line 223) | def run_metrics(inputs, function run_training (line 233) | def run_training(model, function run_evaluate (line 380) | def run_evaluate(model, function main (line 444) | def main(_): FILE: scripts/remove_missing_samples.py function remove_missing (line 5) | def remove_missing(data_dir, fname, replace_old=True): function main (line 25) | def main(args): function parse_args (line 37) | def parse_args(): FILE: streaming_transcribe.py function main (line 23) | def main(args): function parse_args (line 84) | def parse_args(): FILE: transcribe_file.py function main (line 14) | def main(args): function parse_args (line 44) | def parse_args(): FILE: utils/data/common_voice.py function tf_parse_line (line 7) | def tf_parse_line(line, data_dir): function load_dataset (line 22) | def load_dataset(base_path, name): function texts_generator (line 35) | def texts_generator(base_path): FILE: utils/data/librispeech.py function load_audio (line 6) | def load_audio(filepath): function tf_load_audio (line 11) | def tf_load_audio(filepath): function tf_file_exists (line 19) | def tf_file_exists(filepath): function tf_parse_line (line 27) | def tf_parse_line(line, data_dir, split_names): function get_transcript_files (line 52) | def get_transcript_files(base_path, split_names): function load_dataset (line 66) | def load_dataset(base_path, split_names): function texts_generator (line 77) | def texts_generator(base_path, split_names): FILE: utils/decoding.py function joint (line 6) | def joint(model, f, g): function greedy_decode_fn (line 21) | def greedy_decode_fn(model, hparams): FILE: utils/encoding.py function build_lookup_table (line 9) | def build_lookup_table(keys, values=None, default_value=-1): function wordpiece_encode (line 21) | def wordpiece_encode(text, encoder): function tf_wordpiece_encode (line 27) | def tf_wordpiece_encode(text, encoder): function wordpiece_decode (line 33) | def wordpiece_decode(ids, encoder): function tf_wordpiece_decode (line 38) | def tf_wordpiece_decode(ids, encoder): function tf_vocab_encode (line 44) | def tf_vocab_encode(text, vocab_table): function get_encoder (line 51) | def get_encoder(encoder_dir, FILE: utils/loss.py function get_loss_fn (line 12) | def get_loss_fn(reduction_factor): FILE: utils/metrics.py function error_rate (line 6) | def error_rate(y_true, decoded): function string_to_sparse (line 29) | def string_to_sparse(str_tensor): function token_error_rate (line 42) | def token_error_rate(y_true, decoded, tok_fn, idx_to_text): function build_accuracy_fn (line 59) | def build_accuracy_fn(decode_fn): function build_wer_fn (line 76) | def build_wer_fn(decode_fn, idx_to_text): FILE: utils/model.py function load_hparams (line 9) | def load_hparams(model_dir): function save_hparams (line 15) | def save_hparams(hparams, model_dir): FILE: utils/preprocessing.py function tf_load_audio (line 12) | def tf_load_audio(path, pre_emphasis=0.97): function normalize_text (line 24) | def normalize_text(text): function tf_normalize_text (line 32) | def tf_normalize_text(text): function print_tensor (line 40) | def print_tensor(t, template='{}'): function compute_mel_spectrograms (line 48) | def compute_mel_spectrograms(audio_arr, function downsample_spec (line 84) | def downsample_spec(mel_spec, n=3): function load_dataset (line 97) | def load_dataset(data_dir, name): function parse_example (line 110) | def parse_example(serialized_example): function serialize_example (line 132) | def serialize_example(mel_specs, function tf_serialize_example (line 163) | def tf_serialize_example(mel_specs, function preprocess_text (line 177) | def preprocess_text(text, encoder_fn, vocab_size): function plot_spec (line 186) | def plot_spec(spec, sr, transcription, name): function tf_plot_spec (line 199) | def tf_plot_spec(spec, sr, transcription, name): function plot_audio (line 213) | def plot_audio(audio_arr, sr, trans, name): function tf_plot_audio (line 227) | def tf_plot_audio(audio_arr, sr, trans, name): function preprocess_audio (line 238) | def preprocess_audio(audio, function preprocess_dataset (line 256) | def preprocess_dataset(dataset, FILE: utils/vocabulary.py function init_vocab (line 1) | def init_vocab(): function load_vocab (line 9) | def load_vocab(filepath): function save_vocab (line 25) | def save_vocab(vocab, filepath):