Copy disabled (too large)
Download .txt
Showing preview only (16,739K chars total). Download the full file to get everything.
Repository: oleg-yaroshevskiy/quest_qa_labeling
Branch: master
Commit: 730a9632314e
Files: 498
Total size: 15.8 MB
Directory structure:
gitextract_liiwo2a9/
├── .gitignore
├── README.md
├── bash/
│ ├── blending_n_postprocessing.sh
│ ├── download_all_model_ckpts_for_inference.sh
│ ├── download_comp_data.sh
│ ├── inference/
│ │ ├── model1_inference.sh
│ │ ├── model2_inference.sh
│ │ ├── model3_inference.sh
│ │ ├── model4_inference.sh
│ │ └── run_inference.sh
│ ├── pseudo/
│ │ ├── create_all_pseudo_labels.sh
│ │ ├── create_all_pseudo_labels_toy.sh
│ │ ├── create_pseudo_base.sh
│ │ ├── create_pseudo_base_pretrained.sh
│ │ ├── create_pseudo_large.sh
│ │ ├── train_base.sh
│ │ ├── train_base_pretrained.sh
│ │ └── train_large.sh
│ ├── setup.sh
│ └── training/
│ ├── load_roberta_weights.sh
│ ├── train1a_prepare_stackx_data.sh
│ ├── train1b_train_bert_stackx_lang_model.sh
│ ├── train2_pseudo_labels.sh
│ ├── train3_bert_base_cased_stackx_pretrained.sh
│ ├── train4_bert_base_cased_stackx_with_pseudo_labels.sh
│ ├── train5_roberta_with_pseudo_labels.sh
│ └── train6_bart_with_pseudo_labels.sh
├── experiments/
│ ├── 1-8-5-head_tail-pseudonoleakrandom100k-1e-05-210-260-500-26-200/
│ │ ├── command
│ │ ├── commit_hash
│ │ └── config.json
│ ├── 2-4-5-head_tail-pseudonoleakrandom100k-1e-05-210-260-500-26-roberta-200/
│ │ ├── command
│ │ ├── commit_hash
│ │ └── config.json
│ ├── 2-4-roberta-base-saved-5-head_tail-roberta-stackx-base-v2-pl1kksample20k-1e-05-210-260-500-26-roberta-200/
│ │ ├── config.json
│ │ └── config_train.json
│ └── 4-2-5-head_tail-bart-2e-05-210-260-500-26-split_pseudo-250/
│ ├── command
│ ├── commit_hash
│ └── config.json
├── input/
│ ├── google-quest-challenge/
│ │ ├── sample_submission_toy.csv
│ │ ├── test_toy.csv
│ │ └── train_toy.csv
│ ├── qa_stackexchange_cleaned.csv
│ ├── qa_stackexchange_cleaned_toy.csv
│ └── stackx-base-cased/
│ ├── config.json
│ ├── stackx-base-cased-config.json
│ ├── stackx-base-cased-vocab.txt
│ ├── training_log.csv
│ └── vocab.txt
├── packages/
│ ├── fairseq-hacked/
│ │ ├── .gitignore
│ │ ├── CODE_OF_CONDUCT.md
│ │ ├── CONTRIBUTING.md
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── docs/
│ │ │ ├── Makefile
│ │ │ ├── _static/
│ │ │ │ └── theme_overrides.css
│ │ │ ├── command_line_tools.rst
│ │ │ ├── conf.py
│ │ │ ├── criterions.rst
│ │ │ ├── data.rst
│ │ │ ├── docutils.conf
│ │ │ ├── getting_started.rst
│ │ │ ├── index.rst
│ │ │ ├── lr_scheduler.rst
│ │ │ ├── make.bat
│ │ │ ├── models.rst
│ │ │ ├── modules.rst
│ │ │ ├── optim.rst
│ │ │ ├── overview.rst
│ │ │ ├── requirements.txt
│ │ │ ├── tasks.rst
│ │ │ ├── tutorial_classifying_names.rst
│ │ │ └── tutorial_simple_lstm.rst
│ │ ├── eval_lm.py
│ │ ├── examples/
│ │ │ ├── .gitignore
│ │ │ ├── __init__.py
│ │ │ ├── backtranslation/
│ │ │ │ └── README.md
│ │ │ ├── bart/
│ │ │ │ ├── README.cnn.md
│ │ │ │ ├── README.glue.md
│ │ │ │ └── README.md
│ │ │ ├── camembert/
│ │ │ │ └── README.md
│ │ │ ├── conv_seq2seq/
│ │ │ │ └── README.md
│ │ │ ├── cross_lingual_language_model/
│ │ │ │ └── README.md
│ │ │ ├── joint_alignment_translation/
│ │ │ │ ├── README.md
│ │ │ │ └── prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh
│ │ │ ├── language_model/
│ │ │ │ ├── README.md
│ │ │ │ ├── conv_lm/
│ │ │ │ │ └── README.md
│ │ │ │ ├── prepare-wikitext-103.sh
│ │ │ │ └── transformer_lm/
│ │ │ │ └── README.md
│ │ │ ├── layerdrop/
│ │ │ │ └── README.md
│ │ │ ├── noisychannel/
│ │ │ │ ├── README.md
│ │ │ │ ├── __init__.py
│ │ │ │ ├── rerank.py
│ │ │ │ ├── rerank_generate.py
│ │ │ │ ├── rerank_options.py
│ │ │ │ ├── rerank_score_bw.py
│ │ │ │ ├── rerank_score_lm.py
│ │ │ │ ├── rerank_tune.py
│ │ │ │ └── rerank_utils.py
│ │ │ ├── nonautoregressive_translation/
│ │ │ │ ├── README.md
│ │ │ │ └── scripts.md
│ │ │ ├── pay_less_attention_paper/
│ │ │ │ └── README.md
│ │ │ ├── roberta/
│ │ │ │ ├── README.custom_classification.md
│ │ │ │ ├── README.glue.md
│ │ │ │ ├── README.md
│ │ │ │ ├── README.pretraining.md
│ │ │ │ ├── README.race.md
│ │ │ │ ├── commonsense_qa/
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── commonsense_qa_task.py
│ │ │ │ │ └── download_cqa_data.sh
│ │ │ │ ├── multiprocessing_bpe_encoder.py
│ │ │ │ ├── preprocess_GLUE_tasks.sh
│ │ │ │ ├── preprocess_RACE.py
│ │ │ │ ├── preprocess_RACE.sh
│ │ │ │ └── wsc/
│ │ │ │ ├── README.md
│ │ │ │ ├── __init__.py
│ │ │ │ ├── wsc_criterion.py
│ │ │ │ ├── wsc_task.py
│ │ │ │ └── wsc_utils.py
│ │ │ ├── scaling_nmt/
│ │ │ │ └── README.md
│ │ │ ├── speech_recognition/
│ │ │ │ ├── README.md
│ │ │ │ ├── __init__.py
│ │ │ │ ├── criterions/
│ │ │ │ │ ├── ASG_loss.py
│ │ │ │ │ ├── CTC_loss.py
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── cross_entropy_acc.py
│ │ │ │ ├── datasets/
│ │ │ │ │ ├── asr_prep_json.py
│ │ │ │ │ └── prepare-librispeech.sh
│ │ │ │ ├── infer.py
│ │ │ │ ├── models/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── vggtransformer.py
│ │ │ │ │ └── w2l_conv_glu_enc.py
│ │ │ │ ├── tasks/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── speech_recognition.py
│ │ │ │ ├── utils/
│ │ │ │ │ └── wer_utils.py
│ │ │ │ └── w2l_decoder.py
│ │ │ ├── stories/
│ │ │ │ └── README.md
│ │ │ ├── translation/
│ │ │ │ ├── README.md
│ │ │ │ ├── prepare-iwslt14.sh
│ │ │ │ ├── prepare-iwslt17-multilingual.sh
│ │ │ │ ├── prepare-wmt14en2de.sh
│ │ │ │ └── prepare-wmt14en2fr.sh
│ │ │ ├── translation_moe/
│ │ │ │ ├── README.md
│ │ │ │ └── score.py
│ │ │ ├── wav2vec/
│ │ │ │ └── README.md
│ │ │ ├── wmt19/
│ │ │ │ └── README.md
│ │ │ └── xlmr/
│ │ │ └── README.md
│ │ ├── fairseq/
│ │ │ ├── __init__.py
│ │ │ ├── binarizer.py
│ │ │ ├── bleu.py
│ │ │ ├── checkpoint_utils.py
│ │ │ ├── clib/
│ │ │ │ ├── libbleu/
│ │ │ │ │ ├── libbleu.cpp
│ │ │ │ │ └── module.cpp
│ │ │ │ └── libnat/
│ │ │ │ └── edit_dist.cpp
│ │ │ ├── criterions/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── adaptive_loss.py
│ │ │ │ ├── binary_cross_entropy.py
│ │ │ │ ├── composite_loss.py
│ │ │ │ ├── cross_entropy.py
│ │ │ │ ├── fairseq_criterion.py
│ │ │ │ ├── label_smoothed_cross_entropy.py
│ │ │ │ ├── label_smoothed_cross_entropy_with_alignment.py
│ │ │ │ ├── legacy_masked_lm.py
│ │ │ │ ├── masked_lm.py
│ │ │ │ ├── nat_loss.py
│ │ │ │ ├── sentence_prediction.py
│ │ │ │ └── sentence_ranking.py
│ │ │ ├── distributed_utils.py
│ │ │ ├── file_utils.py
│ │ │ ├── hub_utils.py
│ │ │ ├── iterative_refinement_generator.py
│ │ │ ├── legacy_distributed_data_parallel.py
│ │ │ ├── meters.py
│ │ │ ├── models/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── bart/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── hub_interface.py
│ │ │ │ │ └── model.py
│ │ │ │ ├── cmlm_transformer.py
│ │ │ │ ├── composite_encoder.py
│ │ │ │ ├── distributed_fairseq_model.py
│ │ │ │ ├── fairseq_decoder.py
│ │ │ │ ├── fairseq_encoder.py
│ │ │ │ ├── fairseq_incremental_decoder.py
│ │ │ │ ├── fairseq_model.py
│ │ │ │ ├── fconv.py
│ │ │ │ ├── fconv_lm.py
│ │ │ │ ├── fconv_self_att.py
│ │ │ │ ├── insertion_transformer.py
│ │ │ │ ├── iterative_nonautoregressive_transformer.py
│ │ │ │ ├── levenshtein_transformer.py
│ │ │ │ ├── lightconv.py
│ │ │ │ ├── lightconv_lm.py
│ │ │ │ ├── lstm.py
│ │ │ │ ├── masked_lm.py
│ │ │ │ ├── model_utils.py
│ │ │ │ ├── multilingual_transformer.py
│ │ │ │ ├── nonautoregressive_ensembles.py
│ │ │ │ ├── nonautoregressive_transformer.py
│ │ │ │ ├── roberta/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── alignment_utils.py
│ │ │ │ │ ├── hub_interface.py
│ │ │ │ │ └── model.py
│ │ │ │ ├── transformer.py
│ │ │ │ ├── transformer_from_pretrained_xlm.py
│ │ │ │ ├── transformer_lm.py
│ │ │ │ └── wav2vec.py
│ │ │ ├── modules/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── adaptive_input.py
│ │ │ │ ├── adaptive_softmax.py
│ │ │ │ ├── beamable_mm.py
│ │ │ │ ├── character_token_embedder.py
│ │ │ │ ├── conv_tbc.py
│ │ │ │ ├── cuda_utils.cu
│ │ │ │ ├── downsampled_multihead_attention.py
│ │ │ │ ├── dynamic_convolution.py
│ │ │ │ ├── dynamicconv_layer/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── cuda_function_gen.py
│ │ │ │ │ ├── dynamicconv_cuda.cpp
│ │ │ │ │ ├── dynamicconv_cuda.cuh
│ │ │ │ │ ├── dynamicconv_cuda_kernel.cu
│ │ │ │ │ ├── dynamicconv_layer.py
│ │ │ │ │ ├── dynamiconv_cpu.cpp
│ │ │ │ │ └── setup.py
│ │ │ │ ├── gelu.py
│ │ │ │ ├── grad_multiply.py
│ │ │ │ ├── highway.py
│ │ │ │ ├── layer_norm.py
│ │ │ │ ├── learned_positional_embedding.py
│ │ │ │ ├── lightconv_layer/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── cuda_function_gen.py
│ │ │ │ │ ├── lightconv_cuda.cpp
│ │ │ │ │ ├── lightconv_cuda.cuh
│ │ │ │ │ ├── lightconv_cuda_kernel.cu
│ │ │ │ │ ├── lightconv_layer.py
│ │ │ │ │ └── setup.py
│ │ │ │ ├── lightweight_convolution.py
│ │ │ │ ├── linearized_convolution.py
│ │ │ │ ├── logsumexp_moe.py
│ │ │ │ ├── mean_pool_gating_network.py
│ │ │ │ ├── multihead_attention.py
│ │ │ │ ├── positional_embedding.py
│ │ │ │ ├── scalar_bias.py
│ │ │ │ ├── sinusoidal_positional_embedding.py
│ │ │ │ ├── sparse_multihead_attention.py
│ │ │ │ ├── sparse_transformer_sentence_encoder.py
│ │ │ │ ├── sparse_transformer_sentence_encoder_layer.py
│ │ │ │ ├── transformer_layer.py
│ │ │ │ ├── transformer_sentence_encoder.py
│ │ │ │ ├── transformer_sentence_encoder_layer.py
│ │ │ │ ├── unfold.py
│ │ │ │ └── vggblock.py
│ │ │ ├── optim/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── adadelta.py
│ │ │ │ ├── adafactor.py
│ │ │ │ ├── adagrad.py
│ │ │ │ ├── adam.py
│ │ │ │ ├── adamax.py
│ │ │ │ ├── bmuf.py
│ │ │ │ ├── fairseq_optimizer.py
│ │ │ │ ├── fp16_optimizer.py
│ │ │ │ ├── lr_scheduler/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── cosine_lr_scheduler.py
│ │ │ │ │ ├── fairseq_lr_scheduler.py
│ │ │ │ │ ├── fixed_schedule.py
│ │ │ │ │ ├── inverse_square_root_schedule.py
│ │ │ │ │ ├── polynomial_decay_schedule.py
│ │ │ │ │ ├── reduce_lr_on_plateau.py
│ │ │ │ │ ├── tri_stage_lr_scheduler.py
│ │ │ │ │ └── triangular_lr_scheduler.py
│ │ │ │ ├── nag.py
│ │ │ │ └── sgd.py
│ │ │ ├── options.py
│ │ │ ├── pdb.py
│ │ │ ├── progress_bar.py
│ │ │ ├── registry.py
│ │ │ ├── search.py
│ │ │ ├── sequence_generator.py
│ │ │ ├── sequence_scorer.py
│ │ │ ├── tasks/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── audio_pretraining.py
│ │ │ │ ├── cross_lingual_lm.py
│ │ │ │ ├── denoising.py
│ │ │ │ ├── fairseq_task.py
│ │ │ │ ├── language_modeling.py
│ │ │ │ ├── legacy_masked_lm.py
│ │ │ │ ├── masked_lm.py
│ │ │ │ ├── multilingual_masked_lm.py
│ │ │ │ ├── multilingual_translation.py
│ │ │ │ ├── semisupervised_translation.py
│ │ │ │ ├── sentence_prediction.py
│ │ │ │ ├── sentence_ranking.py
│ │ │ │ ├── translation.py
│ │ │ │ ├── translation_from_pretrained_xlm.py
│ │ │ │ ├── translation_lev.py
│ │ │ │ └── translation_moe.py
│ │ │ ├── tokenizer.py
│ │ │ ├── trainer.py
│ │ │ └── utils.py
│ │ ├── fairseq_cli/
│ │ │ ├── __init__.py
│ │ │ ├── eval_lm.py
│ │ │ ├── generate.py
│ │ │ ├── interactive.py
│ │ │ ├── preprocess.py
│ │ │ ├── score.py
│ │ │ ├── setup.py
│ │ │ └── train.py
│ │ ├── generate.py
│ │ ├── hubconf.py
│ │ ├── interactive.py
│ │ ├── preprocess.py
│ │ ├── score.py
│ │ ├── scripts/
│ │ │ ├── __init__.py
│ │ │ ├── average_checkpoints.py
│ │ │ ├── build_sym_alignment.py
│ │ │ ├── compare_namespaces.py
│ │ │ ├── compound_split_bleu.sh
│ │ │ ├── convert_dictionary.lua
│ │ │ ├── convert_model.lua
│ │ │ ├── count_docs.py
│ │ │ ├── read_binarized.py
│ │ │ ├── rm_pt.py
│ │ │ ├── sacrebleu_pregen.sh
│ │ │ ├── shard_docs.py
│ │ │ ├── split_train_valid_docs.py
│ │ │ ├── spm_decode.py
│ │ │ ├── spm_encode.py
│ │ │ ├── spm_train.py
│ │ │ ├── wav2vec_featurize.py
│ │ │ └── wav2vec_manifest.py
│ │ ├── setup.py
│ │ ├── tests/
│ │ │ ├── __init__.py
│ │ │ ├── speech_recognition/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── asr_test_base.py
│ │ │ │ ├── test_collaters.py
│ │ │ │ ├── test_cross_entropy.py
│ │ │ │ └── test_vggtransformer.py
│ │ │ ├── test_average_checkpoints.py
│ │ │ ├── test_backtranslation_dataset.py
│ │ │ ├── test_binaries.py
│ │ │ ├── test_bmuf.py
│ │ │ ├── test_character_token_embedder.py
│ │ │ ├── test_concat_dataset.py
│ │ │ ├── test_convtbc.py
│ │ │ ├── test_dictionary.py
│ │ │ ├── test_iterators.py
│ │ │ ├── test_label_smoothing.py
│ │ │ ├── test_memory_efficient_fp16.py
│ │ │ ├── test_multi_corpus_sampled_dataset.py
│ │ │ ├── test_multihead_attention.py
│ │ │ ├── test_noising.py
│ │ │ ├── test_reproducibility.py
│ │ │ ├── test_resampling_dataset.py
│ │ │ ├── test_sequence_generator.py
│ │ │ ├── test_sequence_scorer.py
│ │ │ ├── test_sparse_multihead_attention.py
│ │ │ ├── test_token_block_dataset.py
│ │ │ ├── test_train.py
│ │ │ ├── test_utils.py
│ │ │ └── utils.py
│ │ ├── train.py
│ │ └── validate.py
│ └── gpt2bpe/
│ ├── encoder.json
│ └── vocab.bpe
├── requirements_full.txt
├── requirements_minimal.txt
├── step11_final/
│ └── blending_n_postprocessing.py
├── step1_lm_finetuning/
│ ├── callbacks.py
│ ├── data/
│ │ ├── __init__.py
│ │ ├── augmentation/
│ │ │ ├── __init__.py
│ │ │ └── tokenization.py
│ │ ├── config.json
│ │ ├── dataset.py
│ │ ├── folds.csv
│ │ ├── group_kf_folds.csv
│ │ ├── make_folds.py
│ │ ├── sampler.py
│ │ └── vocab.txt
│ ├── data_preparation/
│ │ ├── clean_stack_exchange_qa.py
│ │ ├── clean_stackexchange_QA_demonstration.ipynb
│ │ ├── download_and_process_stackexchange_dump_demonstration.ipynb
│ │ └── scrape_stack_exchange.py
│ ├── train_stackx_lm.py
│ └── utils.py
├── step2_pseudo_labeling/
│ ├── bert-base/
│ │ ├── apply_swa.py
│ │ ├── args.py
│ │ ├── dataset.py
│ │ ├── evaluation.py
│ │ ├── infer.py
│ │ ├── infer_pseudo.py
│ │ ├── loops.py
│ │ ├── misc.py
│ │ ├── model.py
│ │ └── run.py
│ ├── bert-base-pretrained/
│ │ ├── apply_swa.py
│ │ ├── args.py
│ │ ├── dataset.py
│ │ ├── evaluation.py
│ │ ├── infer.py
│ │ ├── infer_pseudo.py
│ │ ├── loops.py
│ │ ├── misc.py
│ │ ├── model.py
│ │ └── run.py
│ ├── bert-large/
│ │ ├── apply_swa.py
│ │ ├── args.py
│ │ ├── dataset.py
│ │ ├── evaluation.py
│ │ ├── infer.py
│ │ ├── infer_pseudo.py
│ │ ├── loops.py
│ │ ├── misc.py
│ │ ├── model.py
│ │ └── run.py
│ └── blend_pseudo.py
├── step3_model1_bert_code/
│ ├── bert.py
│ ├── callbacks.py
│ ├── data/
│ │ ├── __init__.py
│ │ ├── augmentation/
│ │ │ ├── __init__.py
│ │ │ └── tokenization.py
│ │ ├── dataset.py
│ │ ├── folds.csv
│ │ ├── group_kf_folds.csv
│ │ ├── make_folds.py
│ │ └── sampler.py
│ ├── metrics.py
│ ├── models.py
│ ├── schedule.py
│ ├── train.py
│ └── utils.py
├── step4_model2_bert_code/
│ ├── apply_swa.py
│ ├── args.py
│ ├── dataset.py
│ ├── evaluation.py
│ ├── infer.py
│ ├── loops.py
│ ├── misc.py
│ ├── model.py
│ └── run.py
├── step5_model3_roberta_code/
│ ├── args.py
│ ├── augmentation.py
│ ├── dataset.py
│ ├── evaluation.py
│ ├── infer.py
│ ├── loops.py
│ ├── misc.py
│ ├── model.py
│ └── run.py
├── step6_model4_bart_code/
│ ├── apply_swa.py
│ ├── args.py
│ ├── dataset.py
│ ├── evaluation.py
│ ├── infer.py
│ ├── loops.py
│ ├── misc.py
│ ├── model.py
│ └── run.py
├── steps7_10_inference/
│ ├── model1_bert_code/
│ │ ├── callbacks.py
│ │ ├── data/
│ │ │ ├── __init__.py
│ │ │ ├── augmentation/
│ │ │ │ ├── __init__.py
│ │ │ │ └── tokenization.py
│ │ │ ├── dataset.py
│ │ │ ├── folds.csv
│ │ │ ├── group_kf_folds.csv
│ │ │ ├── make_folds.py
│ │ │ └── sampler.py
│ │ ├── metrics.py
│ │ ├── models.py
│ │ ├── predict_test.py
│ │ ├── schedule.py
│ │ └── utils.py
│ ├── model2_bert_code/
│ │ ├── args.py
│ │ ├── dataset.py
│ │ ├── loops.py
│ │ ├── model.py
│ │ └── run.py
│ ├── model3_roberta_code/
│ │ ├── args.py
│ │ ├── augmentation.py
│ │ ├── dataset.py
│ │ ├── evaluation.py
│ │ ├── infer.py
│ │ ├── loops.py
│ │ ├── misc.py
│ │ ├── model.py
│ │ └── run.py
│ └── model4_bart_code/
│ ├── args.py
│ ├── dataset.py
│ ├── loops.py
│ ├── model.py
│ └── run.py
└── submissions/
├── model1_submission.csv
├── model2_bert_base_cased_pred.csv
├── model3_roberta-base-output/
│ ├── fold-0.csv
│ ├── fold-1.csv
│ ├── fold-2.csv
│ ├── fold-3.csv
│ └── fold-4.csv
├── model4_bart_large_pred.csv
└── submission.csv
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Custom
*.pth
*.bin
input/model*
input/google-quest-challenge/*.csv
!input/google-quest-challenge/*toy.csv
input/lid.176.bin
input/leak-free-pseudo-labels-100k-stackx/
input/sx_dump/
input/stackx-large-cased/
packages/fairseq-hacked/examples/speech_recognition/data/
packages/fairseq-hacked/fairseq/data/
experiments/*/checkpoints
experiments/1-8-5-head_tail-pretrained-1e-05-210-260-500-26-100/
experiments/1-8-5-head_tail-qa-1e-05-210-260-500-26-300/
experiments/4-2-5-head_tail-large-1e-05-210-260-500-26-300/
pseudo-predictions/
.DS_Store
.idea/
mag/
================================================
FILE: README.md
================================================
# Google QUEST Q&A Labeling 1st place solution
Below you can find an outline of how to reproduce our solution for the Google QUEST Q&A Labeling competition. If you run into any trouble with the setup/code or have any questions please contact me at [yury.kashnitsky@gmail.com](mailto:yury.kashnitsky@gmail.com).
The solution is also described in [this post](https://www.kaggle.com/c/google-quest-challenge/discussion/129840) on Kaggle, the inference part is fully reproduced in [this Kaggle Notebook](https://www.kaggle.com/ddanevskyi/1st-place-solution).
## Archive contents
[The archive](https://tinyurl.com/t9bjvlm) contains the following files:
- `stackx-base-cased.tar.gz` – pretrained BERT language model, output of step 1 in the ["Model training" section](#model-training)
- `sampled_sx_so.csv.gz` – chunk of StackExchange dump used to generate pseudo-labels
- `pseudo-100k-3x-blend-no-leak.tar.gz` – pseudo-labels, output of step 2 in the ["Model training" section](#model-training)
## Hardware
- 1 x NVIDIA Quadro P6000
- 2 x NVIDIA 1080 Ti
- 5 x NVIDIA 1080 Ti (only for language model training)
## Software and environments
- Conda 4.7.10 with Python 3.6.6
- CUDA 10.0.130
- cuDNN 7.5.0
- NVIDIA drivers v. 418.67
We run all experiments in a Conda environment and provide a full list of required packages `requirements_full.txt`. To do the same, run:
- `conda create -n qa_quest_env python=3.6.6`
- `conda activate qa_quest_env `
- `pip install -r requirements_full.txt`
However, there are a lot of packages listed there which are not much relevant to this project. A minimal list of requirements is specified in `requirements_minimal.txt` which was formed with the `pipreqs` utility. This handy utility crawls project source code and lists all required packages. So you can run `pip install -r requirements_minimal.txt` and then install missing packages on the go (if any).
Apart from pip-installable packages, we use a custom lightweight library called [mag](https://github.com/ex4sperans/mag) to keep track of experiments. Also our installation of the `fairseq` library is a bit different (actually, it's hacked for our needs). These two can be installed by running `sh bash/setup.sh`
## Model training
For some of our models, we perform language model finetuning with StackExchange data. Then we run 5-fold cross-validation for 4 models (2 [BERT](https://arxiv.org/abs/1810.04805) ones, one [RoBERTa](https://arxiv.org/abs/1907.11692), and one [BART](https://arxiv.org/abs/1910.13461)) averaging predictions of all 5 model checkpoints for each model type. Finally, blending 4 predictions. In this section, we cover everything related to model training:
1. Language model finetuning with StackExchange data
1. Generating pseudo-labels
1. Training BERT-base-cased pretrained with StackExchange
1. Training BERT-base-cased pretrained with StackExchange + pseudo-labels
1. Training RoBERTa-base with pseudo-labels
1. Training BART-large with pseudo-labels
### 1. Language model finetuning with StackExchange data
#### 1a. Scraping and processing StackExchange questions and answers
For this purpose, we download and process StackExchange dumps, to reproduce these steps from scratch for a small subsample, run `sh bash/training/train1a_prepare_stackx_data.sh`. It the same as running the following two commands:
- `python step1_lm_finetuning/data_preparation/scrape_stack_exchange.py`
- `python step1_lm_finetuning/data_preparation/clean_stack_exchange_qa.py`
See comments in these files. Also, you can find 2 corresponding Jupyter notebooks in the same folder, just for demonstration of the scraping/cleaning process.
Output is written to the `input/qa_stackexchange_cleaned.csv` file. This files is needed for the next step.
#### 1b. Fine-tuning BERT language model with StackExchange data:
`sh bash/training/train1b_train_bert_stackx_lang_model.sh` – this runs BERT language model fine-tuning with StackExchange data from the previous step (`input/qa_stackexchange_cleaned.csv`).
This script writes model checkpoints and training logs to `input/stackx-base-cased`, the following two BERT models use this checkpoint. The checkpoint is also shared as `stackx-base-cased.tar.gz` in the [archive](https://tinyurl.com/t9bjvlm). To go on reproducing results, it's better to unzip contents of `stackx-base-cased.tar.gz` into `input/stackx-base-cased`.
### 2. Generating pseudo-labels
`sh bash/pseudo/create_all_pseudo_labels_toy.sh` – this runs 3 basic models (bert-base, bert-large, and bert-base-pretrained) on the competition data (a toy example, first 50 rows), then creates pseudo-labels using these 3 models, the result is stored in the `pseudo-predictions/pseudo-100k-3x-blend-no-leak/` folder. Run `sh bash/pseudo/create_all_pseudo_labels_toy.sh` (without `_toy`) for the actual generation of pseudo-labels. This requires `sampled_sx_so.csv.gz` from [the shared archive](https://tinyurl.com/t9bjvlm) and results in `pseudo-100k-3x-blend-no-leak.tar.gz` shared in the same archive.
### 3. BERT-base-cased pretrained with StackExchange
Training 5 BERT-base models (cross-validation): `sh bash/training/train3_bert_base_cased_stackx_pretrained.sh toy`. The result (one model checkpoint for each fold) is written to `input/model1_ckpt`. Full training is done without the `toy` argument, the result is found in [this Kaggle Dataset](kashnitsky/google-qa-quest-labeling-bibimorph-model-1-5-folds).
### 4. BERT-base-cased pretrained with StackExchange + pseudo-labels
Training 5 BERT-base models (cross-validation): `sh bash/training/train4_bert_base_cased_stackx_with_pseudo_labels.sh toy`. The result is written to `experiments/1-8-5-head_tail-pseudonoleakrandom100k-1e-05-210-260-500-26-200`. Full training is done without the `toy` argument, the result is found in [this Kaggle Dataset](https://www.kaggle.com/yaroshevskiy/bert-base-pretrained).
### 5. RoBERTa-base with pseudo-labels
Download RoBERTa-base checkpoint by running `sh bash/training/load_roberta_weights.sh`
Training 5 RoBERTa-base models (cross-validation): `sh bash/training/train5_roberta_with_pseudo_labels.sh`. The result is written to `experiments/2-4-5-head_tail-pseudonoleakrandom100k-1e-05-210-260-500-26-roberta-200` and can also be found in [this Kaggle Dataset](https://www.kaggle.com/ddanevskyi/roberta-base-model) Also, [here](https://www.kaggle.com/dmitriyab/roberta-stackx-base-pl20k) 5 model checkpoints (one per each fold) are stored.
### 6. BART-large with pseudo-labels
Training 5 BART-large models (cross-validation): `sh bash/training/train6_bart_with_pseudo_labels.sh toy`. The result is written to `experiments/4-2-5-head_tail-bart-2e-05-210-260-500-26-split_pseudo-250`. Full training is done without the `toy` argument, the result is found in [this Kaggle Dataset](https://www.kaggle.com/yaroshevskiy/bart-large), and [here](https://www.kaggle.com/yaroshevskiy/quest-bart) 5 model checkpoints (one per each fold) are stored.
## Inference
These are the steps to reproduce our final solution (same as our Kaggle Notebook [1st place solution](https://www.kaggle.com/ddanevskyi/1st-place-solution)). You can just run `sh bash/inference/run_inference.sh` (execution log is found in `logs/all_inference.log`, takes ~10 minutes) to run the whole pipeline (from data loading to forming a submission file). Here is a breakdown of all steps:
1. Make sure you've got a fresh [Kaggle API token](https://www.kaggle.com/docs/api) and download competition data by running `sh bash/download_comp_data.sh`. This will populate `input/google-quest-challenge` with three more CSV files - `train.csv`, `test.csv` and `sample_submission.csv`
2. Run `sh bash/download_all_model_ckpts_for_inference.sh`. This will download all models needed for inference (about 18 Gb, might take from several minutes to more that an hour depending on Internet speed):
- BERT checkpoints from [this Dataset](https://www.kaggle.com/kashnitsky/google-qa-quest-labeling-bibimorph-model-1-5-folds) (the result of running steps 1, 3 above)
- BERT checkpoints from [this Dataset](https://www.kaggle.com/yaroshevskiy/bert-base-pretrained) (the result of running steps 2, 4 above)
- RoBERTa checkpoints from [this Dataset](https://www.kaggle.com/kashnitsky/google-qa-quest-labeling-bibimorph-model-3-roberta) (the result of running steps 1, 2, 5 above)
- BART checkpoints from [this Dataset](https://www.kaggle.com/yaroshevskiy/quest-bart) (the result of running steps 2, 6 above)
2. Inference with 5 checkpoints of BERT-base-cased finetuned with StackExchange data: `sh bash/inference/model1_inference.sh`
3. Same for the BERT model with pseudo-labels: `sh bash/inference/model2_inference.sh`
4. Inference with with 5 checkpoints of RoBERTa finetuned with StackExchange data, with pseudo-labels: `sh bash/inference/model3_inference.sh`
5. Inference with with 5 checkpoints of BART with pseudo-labels: `sh bash/inference/model4_inference.sh`
6. Once inference is done, final steps include blending, and postprocessing model predictions: `sh bash/blending_n_postprocessing.sh`
Final submission `submissions/submission.csv` scores 0.46893 on the public competition [leaderboard](https://www.kaggle.com/c/google-quest-challenge/leaderboard). To actually submit a CSV file, you can check [this Kaggle Notebook](https://www.kaggle.com/kashnitsky/google-quest-q-a-submit-from-a-csv-file).
================================================
FILE: bash/blending_n_postprocessing.sh
================================================
#!/bin/bash
python step11_final/blending_n_postprocessing.py
================================================
FILE: bash/download_all_model_ckpts_for_inference.sh
================================================
#!/bin/bash
# model 1
(kaggle datasets download -d kashnitsky/google-qa-quest-labeling-bibimorph-model-1-5-folds > /dev/null 2>&1 && \
unzip google-qa-quest-labeling-bibimorph-model-1-5-folds.zip -d input/model1_ckpt \
&& rm google-qa-quest-labeling-bibimorph-model-1-5-folds.zip &)
# model 2
(kaggle datasets download -d yaroshevskiy/bert-base-pretrained > /dev/null 2>&1 && \
unzip bert-base-pretrained.zip -d input/ && bert-base-pretrained.zip && \
mv input/stackx-base-cased input/model2_ckpt &)
(kaggle datasets download -d ddanevskyi/bert-base-pseudo-noleak-random > /dev/null 2>&1 && \
unzip bert-base-pseudo-noleak-random.zip -d input/model2_ckpt/bert-base-pseudo-noleak-random && \
rm bert-base-pseudo-noleak-random.zip)
# model 3
(kaggle datasets download -d kashnitsky/google-qa-quest-labeling-bibimorph-model-3-roberta > /dev/null 2>&1 && \
mkdir input/model3_ckpt/ && \
unzip google-qa-quest-labeling-bibimorph-model-3-roberta.zip -d input/model3_ckpt/folds && \
rm google-qa-quest-labeling-bibimorph-model-3-roberta.zip &)
(kaggle datasets download -d ddanevskyi/roberta-base-model > /dev/null 2>&1 && \
unzip roberta-base-model.zip -d input/model3_ckpt/roberta-base-model && \
rm roberta-base-model.zip &)
# model 4
(kaggle datasets download -d yaroshevskiy/quest-bart > /dev/null 2>&1 && \
unzip quest-bart.zip -d input/model4_ckpt && rm quest-bart.zip && \
kaggle datasets download -d yaroshevskiy/bart-large > /dev/null 2>&1 && \
unzip bart-large.zip -d input/model4_ckpt && rm bart-large.zip &)
================================================
FILE: bash/download_comp_data.sh
================================================
#!/bin/bash
# competition data
(kaggle competitions download -c google-quest-challenge && \
unzip google-quest-challenge.zip; rm google-quest-challenge.zip; mv *.csv input/google-quest-challenge &)
================================================
FILE: bash/inference/model1_inference.sh
================================================
#!/bin/bash
python steps7_10_inference/model1_bert_code/predict_test.py \
--model_dir input/model1_ckpt/ \
--data_path input/google-quest-challenge/ \
--sub_file submissions/model1_submission.csv \
> logs/model1_inference.log 2>&1
================================================
FILE: bash/inference/model2_inference.sh
================================================
#!/bin/bash
python steps7_10_inference/model2_bert_code/run.py \
--sub_file=submissions/model2_bert_base_cased_pred.csv \
--data_path=input/google-quest-challenge/ \
--max_sequence_length=500 \
--max_title_length=26 \
--max_question_length=260 \
--max_answer_length=210 \
--batch_size=8 \
--bert_model=input/model2_ckpt/ \
--checkpoints=input/model2_ckpt/bert-base-pseudo-noleak-random \
> logs/model2_inference.log 2>&1
================================================
FILE: bash/inference/model3_inference.sh
================================================
#!/bin/bash
# some mag setup to go on with the Experiment
ROBERTA_EXPERIMENT_DIR=2-4-roberta-base-saved-5-head_tail-roberta-stackx-base-v2-pl1kksample20k-1e-05-210-260-500-26-roberta-200
OUTPUT_DIR=submissions/model3_roberta-base-output
cp -r input/model3_ckpt/folds/* experiments/$ROBERTA_EXPERIMENT_DIR
python steps7_10_inference/model3_roberta_code/infer.py \
--experiment=$ROBERTA_EXPERIMENT_DIR \
--checkpoint=best_model.pth \
--bert_model=input/model3_ckpt/roberta-base-model/ \
--dataframe=input/google-quest-challenge/test.csv \
--output_dir=$OUTPUT_DIR \
> logs/model3_inference.log 2>&1
================================================
FILE: bash/inference/model4_inference.sh
================================================
#!/bin/bash
python steps7_10_inference/model4_bart_code/run.py \
--sub_file=submissions/model4_bart_large_pred.csv \
--data_path=input/google-quest-challenge/ \
--max_sequence_length=500 \
--max_title_length=26 \
--max_question_length=260 \
--max_answer_length=210 \
--batch_size=4 \
--bert_model=input/model4_ckpt/bart.large/ \
> logs/model4_inference.log 2>&1
================================================
FILE: bash/inference/run_inference.sh
================================================
#!/bin/bash
#echo "Downloading all model checkpoints" && \
#sh bash/download_all_model_ckpts_for_inference.sh && \
echo "Inference with the 1st model (BERT-base-cased)" && \
sh bash/inference/model1_inference.sh && \
echo "Inference with the 2nd model (BERT-base-cased with pseudo-labels)" && \
sh bash/inference/model2_inference.sh && \
echo "Inference with the 3rd model (RoBERTa with pseudo-labels)" && \
sh bash/inference/model3_inference.sh && \
echo "Inference with the 4th model (BART with pseudo-labels)" && \
sh bash/inference/model4_inference.sh && \
echo "Blending and postprocessing" && \
sh bash/blending_n_postprocessing.sh \
================================================
FILE: bash/pseudo/create_all_pseudo_labels.sh
================================================
#!/bin/bash
# train three models
sh bash/pseudo/train_base.sh && \
sh bash/pseudo/train_base_pretrained.sh && \
sh bash/pseudo/train_large.sh
# create pseudo-labels with all models
sh bash/pseudo/create_pseudo_base.sh && \
sh bash/pseudo/create_pseudo_base_pretrained.sh && \
sh bash/pseudo/create_pseudo_large.sh
# blend pseudo-labels
python step2_pseudo_labeling/blend_pseudo.py
================================================
FILE: bash/pseudo/create_all_pseudo_labels_toy.sh
================================================
#!/bin/bash
# train three models
sh bash/pseudo/train_base.sh toy && \
sh bash/pseudo/train_base_pretrained.sh toy && \
sh bash/pseudo/train_large.sh toy
# create pseudo-labels with all models
sh bash/pseudo/create_pseudo_base.sh toy && \
sh bash/pseudo/create_pseudo_base_pretrained.sh toy && \
sh bash/pseudo/create_pseudo_large.sh toy
# blend pseudo-labels
python step2_pseudo_labeling/blend_pseudo.py toy
================================================
FILE: bash/pseudo/create_pseudo_base.sh
================================================
#!/bin/bash
toy=${1:-False}
if [ $toy = 'toy' ]; then
dataframe=input/qa_stackexchange_cleaned_toy.csv
else
dataframe=input/qa_stackexchange_cleaned.csv
fi
python step2_pseudo_labeling/bert-base/infer_pseudo.py \
--experiment=experiments/1-8-5-head_tail-qa-1e-05-210-260-500-26-300 \
--checkpoint=best_model.pth \
--dataframe=$dataframe \
--output_dir=pseudo-predictions/base/
================================================
FILE: bash/pseudo/create_pseudo_base_pretrained.sh
================================================
#!/bin/bash
toy=${1:-False}
if [ $toy = 'toy' ]; then
dataframe=input/qa_stackexchange_cleaned_toy.csv
else
dataframe=input/qa_stackexchange_cleaned.csv
fi
python step2_pseudo_labeling/bert-base-pretrained/infer_pseudo.py \
--experiment=experiments/1-8-5-head_tail-pretrained-1e-05-210-260-500-26-100 \
--checkpoint=best_model.pth \
--dataframe=$dataframe \
--output_dir=pseudo-predictions/base-pretrained/
================================================
FILE: bash/pseudo/create_pseudo_large.sh
================================================
#!/bin/bash
toy=${1:-False}
if [ $toy = 'toy' ]; then
dataframe=input/qa_stackexchange_cleaned_toy.csv
else
dataframe=input/qa_stackexchange_cleaned.csv
fi
python step2_pseudo_labeling/bert-large/infer_pseudo.py \
--experiment=experiments/4-2-5-head_tail-large-1e-05-210-260-500-26-300 \
--checkpoint=best_model.pth \
--dataframe=$dataframe \
--output_dir=pseudo-predictions/large/
================================================
FILE: bash/pseudo/train_base.sh
================================================
#!/bin/bash
toy=${1:-False}
if [ $toy = 'toy' ]; then
epochs=1
else
epochs=5
fi
python step2_pseudo_labeling/bert-base/run.py \
--epochs=$epochs \
--max_sequence_length=500 \
--max_title_length=26 \
--max_question_length=260 \
--max_answer_length=210 \
--data_path=input/google-quest-challenge/ \
--batch_accumulation=1 \
--batch_size=8 \
--warmup=300 \
--lr=1e-5 \
--bert_model=bert-base-uncased \
--toy=$toy
================================================
FILE: bash/pseudo/train_base_pretrained.sh
================================================
#!/bin/bash
toy=${1:-False}
if [ $toy = 'toy' ]; then
epochs=1
else
epochs=3
fi
python step2_pseudo_labeling/bert-base-pretrained/run.py \
--epochs=$epochs \
--max_sequence_length=500 \
--max_title_length=26 \
--max_question_length=260 \
--max_answer_length=210 \
--data_path=input/google-quest-challenge/ \
--batch_accumulation=1 \
--batch_size=8 \
--warmup=100 \
--lr=1e-5 \
--bert_model=input/stackx-base-cased \
--label=pretrained \
--toy=$toy
================================================
FILE: bash/pseudo/train_large.sh
================================================
#!/bin/bash
toy=${1:-False}
if [ $toy = 'toy' ]; then
epochs=1
else
epochs=5
fi
python step2_pseudo_labeling/bert-large/run.py \
--epochs=$epochs \
--max_sequence_length=500 \
--max_title_length=26 \
--max_question_length=260 \
--max_answer_length=210 \
--data_path=input/google-quest-challenge/ \
--batch_accumulation=4 \
--batch_size=2 \
--warmup=300 \
--lr=1e-5 \
--bert_model=bert-large-uncased \
--label=large \
--toy=$toy
================================================
FILE: bash/setup.sh
================================================
#!/bin/bash
# install mag,
# a custom lightweight library to keep track of experiments
git clone https://github.com/ex4sperans/mag.git
cd mag; python setup.py install; cd ../;
# install hacked version of fairseq
# Sorry, this is very-very hacky and ugly but works
export PATH_TO_GPT2BPE=packages/gpt2bpe; \
export PATH_TO_BART_MODEL=input/model4_ckpt/bart.large/; \
cd packages/fairseq-hacked/; python setup.py develop; cd ../..
================================================
FILE: bash/training/load_roberta_weights.sh
================================================
#!/bin/bash
mkdir input/roberta-base/; \
wget https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-config.json -O input/roberta-base/config.json;\
wget https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json -O input/roberta-base/vocab.json;\
wget https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt -O input/roberta-base/merges.json;\
wget https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-pytorch_model.bin -O input/roberta-base/pytorch_model.bin
================================================
FILE: bash/training/train1a_prepare_stackx_data.sh
================================================
#!/bin/bash
# scraping StackExchange
python step1_lm_finetuning/data_preparation/scrape_stack_exchange.py
# processing results
python step1_lm_finetuning/data_preparation/clean_stack_exchange_qa.py
================================================
FILE: bash/training/train1b_train_bert_stackx_lang_model.sh
================================================
#!/bin/bash
python step1_lm_finetuning/train_stackx_lm.py
================================================
FILE: bash/training/train2_pseudo_labels.sh
================================================
#!/bin/bash
# actually not needed here, just for consistency
sh bash/pseudo/create_all_pseudo_labels.sh
================================================
FILE: bash/training/train3_bert_base_cased_stackx_pretrained.sh
================================================
#!/bin/bash
toy=${1:-False}
python step3_model1_bert_code/train.py $toy
================================================
FILE: bash/training/train4_bert_base_cased_stackx_with_pseudo_labels.sh
================================================
#!/bin/bash
toy=${1:-False}
if [ $toy = 'toy' ]; then
epochs=1
n_pseudo=20
else
epochs=5
n_pseudo=20000
fi
python step4_model2_bert_code/run.py \
--epochs=$epochs \
--max_sequence_length=500 \
--max_title_length=26 \
--max_question_length=260 \
--max_answer_length=210 \
--data_path=input/google-quest-challenge \
--batch_accumulation=1 \
--batch_size=8 \
--warmup=200 \
--lr=1e-5 \
--bert_model=input/stackx-base-cased \
--label=pseudonoleakrandom100k \
--pseudo_file pseudo-predictions/pseudo-100k-3x-blend-no-leak/fold-{}.csv.gz \
--n_pseudo=$n_pseudo \
--toy=$toy
================================================
FILE: bash/training/train5_roberta_with_pseudo_labels.sh
================================================
#!/bin/bash
toy=${1:-False}
if [ $toy = 'toy' ]; then
epochs=1
else
epochs=5
fi
python step5_model3_roberta_code/run.py \
--epochs=5 \
--max_sequence_length=500 \
--max_title_length=26 \
--max_question_length=260 \
--max_answer_length=210 \
--data_path=input/google-quest-challenge \
--batch_accumulation=2 \
--batch_size=4 \
--warmup=200 \
--lr=1e-5 \
--bert_model=input/roberta-base \
--label=pseudonoleakrandom100k \
--pseudo_file pseudo-predictions/pseudo-100k-3x-blend-no-leak/fold-{}.csv.gz \
--n_pseudo=20000 \
--model_type=roberta \
--toy=$toy
================================================
FILE: bash/training/train6_bart_with_pseudo_labels.sh
================================================
#!/bin/bash
toy=${1:-False}
if [ $toy = 'toy' ]; then
epochs=1
else
epochs=4
fi
python step6_model4_bart_code/run.py \
--data_path=input/google-quest-challenge \
--epochs=$epochs \
--max_sequence_length=500 \
--max_title_length=26 \
--max_question_length=260 \
--max_answer_length=210 \
--batch_accumulation=4 \
--batch_size=2 \
--warmup=250 \
--lr=2e-5 \
--bert_model=input/model4_ckpt/bart.large/ \
--pseudo_file=pseudo-predictions/pseudo-100k-3x-blend-no-leak/fold-{}.csv.gz \
--split_pseudo \
--leak_free_pseudo \
--label=bart \
--toy=$toy
================================================
FILE: experiments/1-8-5-head_tail-pseudonoleakrandom100k-1e-05-210-260-500-26-200/command
================================================
step4_model2_bert_code/run.py --epochs=5 --max_sequence_length=500 --max_title_length=26 --max_question_length=260 --max_answer_length=210 --data_path=input/google-quest-challenge --batch_accumulation=1 --batch_size=8 --warmup=200 --lr=1e-5 --bert_model=input/stackx-base-cased --label=pseudonoleakrandom100k --pseudo_file pseudo-predictions/pseudo-100k-3x-blend-no-leak/fold-{}.csv.gz --n_pseudo=20000 --toy=False
================================================
FILE: experiments/1-8-5-head_tail-pseudonoleakrandom100k-1e-05-210-260-500-26-200/commit_hash
================================================
5386dc5f3ba53b28bbe7628630cb365ccac00122
================================================
FILE: experiments/1-8-5-head_tail-pseudonoleakrandom100k-1e-05-210-260-500-26-200/config.json
================================================
{
"_bert_model": "input/stackx-base-cased",
"_pseudo_file": "pseudo-predictions/pseudo-100k-3x-blend-no-leak/fold-{}.csv.gz",
"_seed": 42,
"batch_accumulation": 1,
"batch_size": 8,
"folds": 5,
"head_tail": true,
"label": "pseudonoleakrandom100k",
"lr": 1e-05,
"max_answer_length": 210,
"max_question_length": 260,
"max_sequence_length": 500,
"max_title_length": 26,
"warmup": 200
}
================================================
FILE: experiments/2-4-5-head_tail-pseudonoleakrandom100k-1e-05-210-260-500-26-roberta-200/command
================================================
step5_model3_roberta_code/run.py --epochs=5 --max_sequence_length=500 --max_title_length=26 --max_question_length=260 --max_answer_length=210 --data_path=input/google-quest-challenge --batch_accumulation=2 --batch_size=4 --warmup=200 --lr=1e-5 --bert_model=input/roberta-base --label=pseudonoleakrandom100k --pseudo_file pseudo-predictions/pseudo-100k-3x-blend-no-leak/fold-{}.csv.gz --n_pseudo=20000 --model_type=roberta --toy=False
================================================
FILE: experiments/2-4-5-head_tail-pseudonoleakrandom100k-1e-05-210-260-500-26-roberta-200/commit_hash
================================================
1c4ae95b166fc5aea653bef3b4ecda64fbdbe9cf
================================================
FILE: experiments/2-4-5-head_tail-pseudonoleakrandom100k-1e-05-210-260-500-26-roberta-200/config.json
================================================
{
"_bert_model": "input/roberta-base",
"_pseudo_file": "pseudo-predictions/pseudo-100k-3x-blend-no-leak/fold-{}.csv.gz",
"_seed": 42,
"batch_accumulation": 2,
"batch_size": 4,
"folds": 5,
"head_tail": true,
"label": "pseudonoleakrandom100k",
"lr": 1e-05,
"max_answer_length": 210,
"max_question_length": 260,
"max_sequence_length": 500,
"max_title_length": 26,
"model_type": "roberta",
"warmup": 200
}
================================================
FILE: experiments/2-4-roberta-base-saved-5-head_tail-roberta-stackx-base-v2-pl1kksample20k-1e-05-210-260-500-26-roberta-200/config.json
================================================
{"_seed": 42, "batch_accumulation": 2, "batch_size": 4, "bert_model": "roberta-base-saved", "folds": 5, "head_tail": true, "label": "roberta-stackx-base-v2-pl1kksample20k", "lr": 1e-05, "max_answer_length": 210, "max_question_length": 260, "max_sequence_length": 500, "max_title_length": 26, "model_type": "roberta", "warmup": 200}
================================================
FILE: experiments/2-4-roberta-base-saved-5-head_tail-roberta-stackx-base-v2-pl1kksample20k-1e-05-210-260-500-26-roberta-200/config_train.json
================================================
{
"_pseudo_file": "/data/dis/monty/common_crawl/1kk/pseudo-1kk-blend-fold-{}.csv.gz",
"_seed": 42,
"batch_accumulation": 2,
"batch_size": 4,
"bert_model": "/data/dis/monty/pretrained_transformers/roberta_stackx_base_2",
"folds": 5,
"head_tail": true,
"label": "roberta-stackx-base-v2-pl1kksample20k",
"lr": 1e-05,
"max_answer_length": 210,
"max_question_length": 260,
"max_sequence_length": 500,
"max_title_length": 26,
"model_type": "roberta",
"warmup": 200
}
================================================
FILE: experiments/4-2-5-head_tail-bart-2e-05-210-260-500-26-split_pseudo-250/command
================================================
step6_model4_bart_code/run.py --data_path=input/google-quest-challenge --epochs=1 --max_sequence_length=500 --max_title_length=26 --max_question_length=260 --max_answer_length=210 --batch_accumulation=4 --batch_size=2 --warmup=250 --lr=2e-5 --bert_model=input/model4_ckpt/bart.large/ --pseudo_file=pseudo-predictions/pseudo-100k-3x-blend-no-leak/fold-{}.csv.gz --split_pseudo --leak_free_pseudo --label=bart --toy=toy
================================================
FILE: experiments/4-2-5-head_tail-bart-2e-05-210-260-500-26-split_pseudo-250/commit_hash
================================================
5386dc5f3ba53b28bbe7628630cb365ccac00122
================================================
FILE: experiments/4-2-5-head_tail-bart-2e-05-210-260-500-26-split_pseudo-250/config.json
================================================
{
"_bert_model": "input/model4_ckpt/bart.large/",
"_pseudo_file": "pseudo-predictions/pseudo-100k-3x-blend-no-leak/fold-{}.csv.gz",
"_seed": 42,
"batch_accumulation": 4,
"batch_size": 2,
"folds": 5,
"head_tail": true,
"label": "bart",
"lr": 2e-05,
"max_answer_length": 210,
"max_question_length": 260,
"max_sequence_length": 500,
"max_title_length": 26,
"split_pseudo": true,
"warmup": 250
}
================================================
FILE: input/google-quest-challenge/sample_submission_toy.csv
================================================
qa_id,question_asker_intent_understanding,question_body_critical,question_conversational,question_expect_short_answer,question_fact_seeking,question_has_commonly_accepted_answer,question_interestingness_others,question_interestingness_self,question_multi_intent,question_not_really_a_question,question_opinion_seeking,question_type_choice,question_type_compare,question_type_consequence,question_type_definition,question_type_entity,question_type_instructions,question_type_procedure,question_type_reason_explanation,question_type_spelling,question_well_written,answer_helpful,answer_level_of_information,answer_plausible,answer_relevance,answer_satisfaction,answer_type_instructions,answer_type_procedure,answer_type_reason_explanation,answer_well_written
39,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003,0.0030800000000000003
46,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448,0.00448
70,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673,0.00673
132,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401,0.01401
200,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074,0.02074
245,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494,0.02494
257,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635,0.02635
267,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719,0.02719
284,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997,0.028589999999999997
292,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004,0.029710000000000004
================================================
FILE: input/google-quest-challenge/test_toy.csv
================================================
qa_id,question_title,question_body,question_user_name,question_user_page,answer,answer_user_name,answer_user_page,url,category,host
39,Will leaving corpses lying around upset my prisoners?,"I see questions/information online about how to get rid of corpses but I'm not sure what the consequences to having them lying around are.
I know that I can use a morgue to store them and that they will eventually be taken away in a hearse. However, I've always just left them lying around. This doesn't seem to have any obvious consequences.
Do they upset prisoners or staff (e.g. increasing the danger level beyond what death normally does)? Make them sick? Slow down pathing? Is this something that's not implemented yet or is leaving dead bodies all over the place meant to be completely fine?
The wiki's pages for Death and Morgue don't shed any light on this. All I'm finding is the how of getting rid of corpses/glitches related to it but not why it matters in the first place. All I've seen that's related is that apparently prisoners can loot guard corpses for keys, but there's no obvious consequences for prisoner corpses which are what I tend to end up with.
So why not just leave them in the corridors?
",Dylan,https://gaming.stackexchange.com/users/64471,"There is no consequence for leaving corpses anywhere. All it does if leave a mark of failure in your path of making a fully functional prison.
",Nelson868,https://gaming.stackexchange.com/users/97324,http://gaming.stackexchange.com/questions/197934/will-leaving-corpses-lying-around-upset-my-prisoners,CULTURE,gaming.stackexchange.com
46,Url link to feature image in the portfolio,"I am new to Wordpress. i have issue with Feature image. just i need to add URL to feature image(when we click on that feature image , it should redirect to that particular URL).
also is it possible to give URL to Title of the Portfolio categories page which i used in normal page.
This is Portfolio , i have used in the ""mypage"" . so in that"" mypage"" when we click on that image and title it should be redirect to the link (should able to give individual link)
Any help would be appreciated. Thanks.
",Anu,https://wordpress.stackexchange.com/users/72927,"I think it is possible with custom fields.
Add a custom fields, for example, named link. Put the link into the value field. Then you will be able to access it something like that:
<a href=""<?php echo get_post_meta( get_the_ID(), 'link', true ); ?>"">
<?php the_post_thumbnail(); ?>
</a>
",Irina,https://wordpress.stackexchange.com/users/27233,http://wordpress.stackexchange.com/questions/187970/url-link-to-feature-image-in-the-portfolio,TECHNOLOGY,wordpress.stackexchange.com
70,"Is accuracy, recoil or bullet spread affected by enemy hits?","To experiment I started a bot game, toggled invincibility and let the bots attack me. There were two clear hit effects: a major one that tilts view up a lot and sideways and a minor one that only tilts it up.
So I tried releasing single shots exactly at the tilts: during a major one a single bullet ended up here (slightly covered by text). Actually the crosshair never moves that high so there tends to be a good amount of additional upward deviation of single shots fired.
But when single-shooting during the minor tilts the marks seemed to end up exactly at the location of the crosshair.
Now, when switching to spraying while being shot this pattern emerged. This was mostly caused by the minor tilts and, I guess, ""standard recoil"". Occasionally a major tilt caused a single bullet to lie severly outside of the depicted cone.
",Konsta,https://gaming.stackexchange.com/users/37545,"You do not have armour in the screenshots. This suggests you are being affected by a large amount of aim punch.
",Damon Smithies,https://gaming.stackexchange.com/users/70641,http://gaming.stackexchange.com/questions/215490/is-accuracy-recoil-or-bullet-spread-affected-by-enemy-hits,CULTURE,gaming.stackexchange.com
132,Suddenly got an I/O error from my external HDD,"I have used my Raspberry Pi as a torrent-server for quite a while now. My Western Digital ""My Passport-Ultra"" external HDD and Transmission running on RPi has worked flawlessly for more than a year. Suddenly last week my setup started producing I/O errors and now when I SSH into my RPi I cant find the HDD folder.
I have not changed anything with my RPi or the torrent-server setup since I set it up.
Can someone help me find a solution to my problem?
EDIT:
The external HHD works fine the first 20 minutes or so, and then Transmisson gives me ""Error: Input/output error"".
lsusb -t output:
/: Bus 01.Port 1: Dev 1, Class=root_hub, Driver=dwc_otg/1p, 480M
|__ Port 1: Dev 2, If 0, Class=hub, Driver=hub/3p, 480M
|__ Port 1: Dev 3, If 0, Class=vend., Driver=smsc95xx, 480M
|__ Port 3: Dev 4, If 0, Class=stor., Driver=usb-storage, 480M
lsusb -t output after a while:
/: Bus 01.Port 1: Dev 1, Class=root_hub, Driver=dwc_otg/1p, 480M
|__ Port 1: Dev 2, If 0, Class=hub, Driver=hub/3p, 480M
|__ Port 1: Dev 3, If 0, Class=vend., Driver=smsc95xx, 480M
mount output:
/dev/root on / type ext4 (rw,noatime,data=ordered)
devtmpfs on /dev type devtmpfs (rw,relatime,size=216132k,nr_inodes=54033,mode=755)
tmpfs on /run type tmpfs (rw,nosuid,noexec,relatime,size=44880k,mode=755)
tmpfs on /run/lock type tmpfs (rw,nosuid,nodev,noexec,relatime,size=5120k)
proc on /proc type proc (rw,nosuid,nodev,noexec,relatime)
sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime)
tmpfs on /run/shm type tmpfs (rw,nosuid,nodev,noexec,relatime,size=89740k)
devpts on /dev/pts type devpts (rw,nosuid,noexec,relatime,gid=5,mode=620)
/dev/mmcblk0p5 on /boot type vfat (rw,relatime,fmask=0022,dmask=0022,codepage=cp437,iocharset=ascii,shortname=mixed,errors=remount-ro)
fusectl on /sys/fs/fuse/connections type fusectl (rw,relatime)
/dev/sda1 on /NAS/USBHDD type fuseblk (rw,nosuid,nodev,noatime,user_id=0,group_id=0,allow_other,blksize=4096)
lsusb -v output:
Bus 001 Device 002: ID 0424:9512 Standard Microsystems Corp.
Couldn't open device, some information will be missing
Device Descriptor:
bLength 18
bDescriptorType 1
bcdUSB 2.00
bDeviceClass 9 Hub
bDeviceSubClass 0 Unused
bDeviceProtocol 2 TT per port
bMaxPacketSize0 64
idVendor 0x0424 Standard Microsystems Corp.
idProduct 0x9512
bcdDevice 2.00
iManufacturer 0
iProduct 0
iSerial 0
bNumConfigurations 1
Configuration Descriptor:
bLength 9
bDescriptorType 2
wTotalLength 41
bNumInterfaces 1
bConfigurationValue 1
iConfiguration 0
bmAttributes 0xe0
Self Powered
Remote Wakeup
MaxPower 2mA
Interface Descriptor:
bLength 9
bDescriptorType 4
bInterfaceNumber 0
bAlternateSetting 0
bNumEndpoints 1
bInterfaceClass 9 Hub
bInterfaceSubClass 0 Unused
bInterfaceProtocol 1 Single TT
iInterface 0
Endpoint Descriptor:
bLength 7
bDescriptorType 5
bEndpointAddress 0x81 EP 1 IN
bmAttributes 3
Transfer Type Interrupt
Synch Type None
Usage Type Data
wMaxPacketSize 0x0001 1x 1 bytes
bInterval 12
Interface Descriptor:
bLength 9
bDescriptorType 4
bInterfaceNumber 0
bAlternateSetting 1
bNumEndpoints 1
bInterfaceClass 9 Hub
bInterfaceSubClass 0 Unused
bInterfaceProtocol 2 TT per port
iInterface 0
Endpoint Descriptor:
bLength 7
bDescriptorType 5
bEndpointAddress 0x81 EP 1 IN
bmAttributes 3
Transfer Type Interrupt
Synch Type None
Usage Type Data
wMaxPacketSize 0x0001 1x 1 bytes
bInterval 12
Bus 001 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub
Couldn't open device, some information will be missing
Device Descriptor:
bLength 18
bDescriptorType 1
bcdUSB 2.00
bDeviceClass 9 Hub
bDeviceSubClass 0 Unused
bDeviceProtocol 1 Single TT
bMaxPacketSize0 64
idVendor 0x1d6b Linux Foundation
idProduct 0x0002 2.0 root hub
bcdDevice 3.06
iManufacturer 3
iProduct 2
iSerial 1
bNumConfigurations 1
Configuration Descriptor:
bLength 9
bDescriptorType 2
wTotalLength 25
bNumInterfaces 1
bConfigurationValue 1
iConfiguration 0
bmAttributes 0xe0
Self Powered
Remote Wakeup
MaxPower 0mA
Interface Descriptor:
bLength 9
bDescriptorType 4
bInterfaceNumber 0
bAlternateSetting 0
bNumEndpoints 1
bInterfaceClass 9 Hub
bInterfaceSubClass 0 Unused
bInterfaceProtocol 0 Full speed (or root) hub
iInterface 0
Endpoint Descriptor:
bLength 7
bDescriptorType 5
bEndpointAddress 0x81 EP 1 IN
bmAttributes 3
Transfer Type Interrupt
Synch Type None
Usage Type Data
wMaxPacketSize 0x0004 1x 4 bytes
bInterval 12
Bus 001 Device 003: ID 0424:ec00 Standard Microsystems Corp.
Couldn't open device, some information will be missing
Device Descriptor:
bLength 18
bDescriptorType 1
bcdUSB 2.00
bDeviceClass 255 Vendor Specific Class
bDeviceSubClass 0
bDeviceProtocol 1
bMaxPacketSize0 64
idVendor 0x0424 Standard Microsystems Corp.
idProduct 0xec00
bcdDevice 2.00
iManufacturer 0
iProduct 0
iSerial 0
bNumConfigurations 1
Configuration Descriptor:
bLength 9
bDescriptorType 2
wTotalLength 39
bNumInterfaces 1
bConfigurationValue 1
iConfiguration 0
bmAttributes 0xe0
Self Powered
Remote Wakeup
MaxPower 2mA
Interface Descriptor:
bLength 9
bDescriptorType 4
bInterfaceNumber 0
bAlternateSetting 0
bNumEndpoints 3
bInterfaceClass 255 Vendor Specific Class
bInterfaceSubClass 0
bInterfaceProtocol 255
iInterface 0
Endpoint Descriptor:
bLength 7
bDescriptorType 5
bEndpointAddress 0x81 EP 1 IN
bmAttributes 2
Transfer Type Bulk
Synch Type None
Usage Type Data
wMaxPacketSize 0x0200 1x 512 bytes
bInterval 0
Endpoint Descriptor:
bLength 7
bDescriptorType 5
bEndpointAddress 0x02 EP 2 OUT
bmAttributes 2
Transfer Type Bulk
Synch Type None
Usage Type Data
wMaxPacketSize 0x0200 1x 512 bytes
bInterval 0
Endpoint Descriptor:
bLength 7
bDescriptorType 5
bEndpointAddress 0x83 EP 3 IN
bmAttributes 3
Transfer Type Interrupt
Synch Type None
Usage Type Data
wMaxPacketSize 0x0010 1x 16 bytes
bInterval 4
Bus 001 Device 004: ID 1058:0810 Western Digital Technologies, Inc.
Couldn't open device, some information will be missing
Device Descriptor:
bLength 18
bDescriptorType 1
bcdUSB 2.10
bDeviceClass 0 (Defined at Interface level)
bDeviceSubClass 0
bDeviceProtocol 0
bMaxPacketSize0 64
idVendor 0x1058 Western Digital Technologies, Inc.
idProduct 0x0810
bcdDevice 10.42
iManufacturer 1
iProduct 2
iSerial 3
bNumConfigurations 1
Configuration Descriptor:
bLength 9
bDescriptorType 2
wTotalLength 32
bNumInterfaces 1
bConfigurationValue 1
iConfiguration 0
bmAttributes 0x80
(Bus Powered)
MaxPower 100mA
Interface Descriptor:
bLength 9
bDescriptorType 4
bInterfaceNumber 0
bAlternateSetting 0
bNumEndpoints 2
bInterfaceClass 8 Mass Storage
bInterfaceSubClass 6 SCSI
bInterfaceProtocol 80 Bulk-Only
iInterface 0
Endpoint Descriptor:
bLength 7
bDescriptorType 5
bEndpointAddress 0x8b EP 11 IN
bmAttributes 2
Transfer Type Bulk
Synch Type None
Usage Type Data
wMaxPacketSize 0x0200 1x 512 bytes
bInterval 0
Endpoint Descriptor:
bLength 7
bDescriptorType 5
bEndpointAddress 0x0a EP 10 OUT
bmAttributes 2
Transfer Type Bulk
Synch Type None
Usage Type Data
wMaxPacketSize 0x0200 1x 512 bytes
bInterval 0
lsusb -v output after a while:
Bus 001 Device 002: ID 0424:9512 Standard Microsystems Corp.
Couldn't open device, some information will be missing
Device Descriptor:
bLength 18
bDescriptorType 1
bcdUSB 2.00
bDeviceClass 9 Hub
bDeviceSubClass 0 Unused
bDeviceProtocol 2 TT per port
bMaxPacketSize0 64
idVendor 0x0424 Standard Microsystems Corp.
idProduct 0x9512
bcdDevice 2.00
iManufacturer 0
iProduct 0
iSerial 0
bNumConfigurations 1
Configuration Descriptor:
bLength 9
bDescriptorType 2
wTotalLength 41
bNumInterfaces 1
bConfigurationValue 1
iConfiguration 0
bmAttributes 0xe0
Self Powered
Remote Wakeup
MaxPower 2mA
Interface Descriptor:
bLength 9
bDescriptorType 4
bInterfaceNumber 0
bAlternateSetting 0
bNumEndpoints 1
bInterfaceClass 9 Hub
bInterfaceSubClass 0 Unused
bInterfaceProtocol 1 Single TT
iInterface 0
Endpoint Descriptor:
bLength 7
bDescriptorType 5
bEndpointAddress 0x81 EP 1 IN
bmAttributes 3
Transfer Type Interrupt
Synch Type None
Usage Type Data
wMaxPacketSize 0x0001 1x 1 bytes
bInterval 12
Interface Descriptor:
bLength 9
bDescriptorType 4
bInterfaceNumber 0
bAlternateSetting 1
bNumEndpoints 1
bInterfaceClass 9 Hub
bInterfaceSubClass 0 Unused
bInterfaceProtocol 2 TT per port
iInterface 0
Endpoint Descriptor:
bLength 7
bDescriptorType 5
bEndpointAddress 0x81 EP 1 IN
bmAttributes 3
Transfer Type Interrupt
Synch Type None
Usage Type Data
wMaxPacketSize 0x0001 1x 1 bytes
bInterval 12
Bus 001 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub
Couldn't open device, some information will be missing
Device Descriptor:
bLength 18
bDescriptorType 1
bcdUSB 2.00
bDeviceClass 9 Hub
bDeviceSubClass 0 Unused
bDeviceProtocol 1 Single TT
bMaxPacketSize0 64
idVendor 0x1d6b Linux Foundation
idProduct 0x0002 2.0 root hub
bcdDevice 3.06
iManufacturer 3
iProduct 2
iSerial 1
bNumConfigurations 1
Configuration Descriptor:
bLength 9
bDescriptorType 2
wTotalLength 25
bNumInterfaces 1
bConfigurationValue 1
iConfiguration 0
bmAttributes 0xe0
Self Powered
Remote Wakeup
MaxPower 0mA
Interface Descriptor:
bLength 9
bDescriptorType 4
bInterfaceNumber 0
bAlternateSetting 0
bNumEndpoints 1
bInterfaceClass 9 Hub
bInterfaceSubClass 0 Unused
bInterfaceProtocol 0 Full speed (or root) hub
iInterface 0
Endpoint Descriptor:
bLength 7
bDescriptorType 5
bEndpointAddress 0x81 EP 1 IN
bmAttributes 3
Transfer Type Interrupt
Synch Type None
Usage Type Data
wMaxPacketSize 0x0004 1x 4 bytes
bInterval 12
Bus 001 Device 003: ID 0424:ec00 Standard Microsystems Corp.
Couldn't open device, some information will be missing
Device Descriptor:
bLength 18
bDescriptorType 1
bcdUSB 2.00
bDeviceClass 255 Vendor Specific Class
bDeviceSubClass 0
bDeviceProtocol 1
bMaxPacketSize0 64
idVendor 0x0424 Standard Microsystems Corp.
idProduct 0xec00
bcdDevice 2.00
iManufacturer 0
iProduct 0
iSerial 0
bNumConfigurations 1
Configuration Descriptor:
bLength 9
bDescriptorType 2
wTotalLength 39
bNumInterfaces 1
bConfigurationValue 1
iConfiguration 0
bmAttributes 0xe0
Self Powered
Remote Wakeup
MaxPower 2mA
Interface Descriptor:
bLength 9
bDescriptorType 4
bInterfaceNumber 0
bAlternateSetting 0
bNumEndpoints 3
bInterfaceClass 255 Vendor Specific Class
bInterfaceSubClass 0
bInterfaceProtocol 255
iInterface 0
Endpoint Descriptor:
bLength 7
bDescriptorType 5
bEndpointAddress 0x81 EP 1 IN
bmAttributes 2
Transfer Type Bulk
Synch Type None
Usage Type Data
wMaxPacketSize 0x0200 1x 512 bytes
bInterval 0
Endpoint Descriptor:
bLength 7
bDescriptorType 5
bEndpointAddress 0x02 EP 2 OUT
bmAttributes 2
Transfer Type Bulk
Synch Type None
Usage Type Data
wMaxPacketSize 0x0200 1x 512 bytes
bInterval 0
Endpoint Descriptor:
bLength 7
bDescriptorType 5
bEndpointAddress 0x83 EP 3 IN
bmAttributes 3
Transfer Type Interrupt
Synch Type None
Usage Type Data
wMaxPacketSize 0x0010 1x 16 bytes
bInterval 4
",robbannn,https://raspberrypi.stackexchange.com/users/17341,"Your Western Digital hard drive is disappearing as shown in your lsusb output (Port 3):
lsusb -t output:
/: Bus 01.Port 1: Dev 1, Class=root_hub, Driver=dwc_otg/1p, 480M
|__ Port 1: Dev 2, If 0, Class=hub, Driver=hub/3p, 480M
|__ Port 1: Dev 3, If 0, Class=vend., Driver=smsc95xx, 480M
|__ Port 3: Dev 4, If 0, Class=stor., Driver=usb-storage, 480M
lsusb -t output after a while:
/: Bus 01.Port 1: Dev 1, Class=root_hub, Driver=dwc_otg/1p, 480M
|__ Port 1: Dev 2, If 0, Class=hub, Driver=hub/3p, 480M
|__ Port 1: Dev 3, If 0, Class=vend., Driver=smsc95xx, 480M
I recommend that you copy all of your data off of this drive to another hard drive before it fails completely.
It is possible that it could be the USB connection on the Pi instead of the drive. Does the drive behave like this on a different computer system?
Again, I strongly recommend that you backup any irreplaceable data on this drive before it fails completely. If you have another USB drive I recommend using rsync. I use the following command to backup one external hard drive to a 2nd device on my Pi where /media/USBHDD1 is the mount point of the 1st drive and /media/USBHDD2 is the mount point of the 2nd drive. rsync can recover from an interrupted copy in case your drive disappears while performing the copy. You should be able to just power cycle failed drive and perform the copy command again. rsync should pick up where it left off.
rsync -av --delete /media/USBHDD1/* /media/USBHDD2/
",HeatfanJohn,https://raspberrypi.stackexchange.com/users/1311,http://raspberrypi.stackexchange.com/questions/19357/suddenly-got-an-i-o-error-from-my-external-hdd,TECHNOLOGY,raspberrypi.stackexchange.com
200,Passenger Name - Flight Booking Passenger only have First Name in Passport,"I have bought Delhi-London return flights for 4 on Saudi Airlines.
2 of the passengers only have ‘First name’ in their passports, hence i have used the same in ‘First Name’ and ‘Last Name’ while booking the tickets, as that is generally a common practice suggested by many airlines.
Saudi Airlines do not mention anything about this situation on their website and when i called their UK helpline, they were not very sure
Do you see travelers encountering any problem at airport because of this?
",Amit,https://travel.stackexchange.com/users/29089,"I called two persons who work for Saudia (ticketing agents), they both said adding the first name in both fields should work, one of them said he would add ""MR"" as the first name and then add the only name as the last name, either way it should work.
They also mentioned there is no clear policy regarding this and it might be confusing for people, but once the passport is found with one name people usually accept any of the solutions above. So no worries, you will be fine.
",Nean Der Thal,https://travel.stackexchange.com/users/10051,http://travel.stackexchange.com/questions/47043/passenger-name-flight-booking-passenger-only-have-first-name-in-passport,CULTURE,travel.stackexchange.com
245,Exactly how are Avenger elites buffed when one of them dies?,"I understand that elites with the Avenger property get stronger as the others in the pack with that property die, but how exactly do they get stronger?
I'm looking for hard numbers.
",Sterno,https://gaming.stackexchange.com/users/3062,"
Only Champion monster types will spawn with the Avenger trait. If a
Champion touting this ability appears, it can imbue all other
Champions in the area with the same type of powers it spawned with,
increasing movement and attack speed and damage inflicted until the
foe is felled. This effect may be stacked up to three times.
Source
I highlighted the important bits.
",ayckoster,https://gaming.stackexchange.com/users/10933,http://gaming.stackexchange.com/questions/70417/exactly-how-are-avenger-elites-buffed-when-one-of-them-dies,CULTURE,gaming.stackexchange.com
257,Configure which plugin custom post types get registered,"I am writing a plugin that creates custom post types, but i want users to be able to turn them on and off via a filter in the functions.php file.
The issue i'm having is that custom post types register_post_type has to be called during init and functions.php is read far after this.
How can I allow users to provide an array using a filter or action that will control which post types register_post_type gets called?
// currently the custom post type constructor has this
if(!post_type_exists($this->name)) {
add_action(""init"", array($this, ""register_post_type""));
add_action(""init"", array($this, ""configure_meta_boxes""));
add_action(""save_post"", array($this, ""save""));
add_filter('myplugin_get_shortcodes', array($this, ""configure_shortcodes""));
}
The filter works just fine so I know it's being called, but for whatever reason the actions aren't.
",Rabbott,https://wordpress.stackexchange.com/users/35459,"
The issue i'm having is that custom post types register_post_type has
to be called during init and functions.php is read far after this.
That's not true. functions.php is read before init because you can declare your functions in there and then hook them to init.
Since you want to avoid a settings page and you want your users to modify the code (at your own risk), you could use some flags:
add_action('init', 'wpse_109719_post_types');
function wpse_109719_post_types(){
$post_type1 = true;
$post_type2 = true;
$post_type3 = false; // your users don't want this one
if( $post_type1 ){
register_post_type( 'your_post_type_1', $args );
}
if( $post_type2 ){
register_post_type( 'your_post_type_2', $args );
}
if( $post_type3 ){
// won't be registered since $post_type3 was declared as false
register_post_type( 'your_post_type_3', $args );
}
}
Not tested, but you can use something like that to collect the post types:
$val = array ();
$options_array = apply_filters( 'my_unique_filter', $val );
print_r( $options_array );
// make a loop and register_post_type here. don't forget to hook at init
In your plugins:
function filter1( $val ){
$val[] = 'post_type1';
return $val;
}
function filter2( $val ){
$val[] = 'post_type2';
return $val;
}
function filter3( $val ){
$val[] = 'post_type3';
return $val;
}
add_filter('my_unique_filter', 'filter1');
add_filter('my_unique_filter', 'filter2');
add_filter('my_unique_filter', 'filter3');
",RRikesh,https://wordpress.stackexchange.com/users/17305,http://wordpress.stackexchange.com/questions/109719/configure-which-plugin-custom-post-types-get-registered,TECHNOLOGY,wordpress.stackexchange.com
267,"Why say ""it rhymes with Grape""?","There is a scene in 21 Jumpstreet where Dave Franco' character Eric says ""do you know what they'll do to me in prison it rhymes with grape"" and it made me curious as to why it was phrased that way.
In Knocked Up a similar joke was made ""it rhymes with smabortion"" which I believe I read was done due to censors. However the repeating setup for the joke has me curious if this time it was due to censors or if it was just considered a funny setup that bared repeating. So was it censoring or continuing a joke?
",Kevin Howell,https://movies.stackexchange.com/users/645,"Opinion: It was a funny(?) joke.
Although a similar setup was used in Knocked Up, the truth is similar setups have been used in plenty of comedy movies.
Whilst I understand your query regarding the censors, it seems difficult to imagine why censorship would be an issue, the main reason I can think of being:
There is a colossal amount of crass language, violence, partial nudity and drug taking in the film. Why would all this be considered acceptable but a line about rape wouldn't be?
It seems more likely to me that it was a funny line that works in the context of the scene as it seems so abhorrent that he can't bring himself to utter the actual word, instead resorting to childish rhyming (suggesting the mere thought of it makes him regress to his inner child in fear).
Whether it was a homage to his (extremely popular) line from Knocked Up is something I can find no reference to online, but solely in my own opinion, I can't see how this was a censorship issue.
",Andrew Martin,https://movies.stackexchange.com/users/8219,http://movies.stackexchange.com/questions/21090/why-say-it-rhymes-with-grape,LIFE_ARTS,movies.stackexchange.com
284,c++ getline and stringstream,"I'm trying to read in a file, which has 5 lines, and every line is 3-4 string long.
Here's my input file:
10:30 Hurley 1234567A 10:15
10:45 Hurley 1234567A 11:30
08:35 Jacob 1x1x1x1x1x
08:35 Jacob 1x1x1x1x1x 08:10
08:05 Jacob 1x1x1x1x1x
08:45 Sayid 33332222 09:15
And this is what I get:
10:30 Hurley 1234567A 10:15
10:45 Hurley 1234567A 11:30
08:35 Jacob 1x1x1x1x1x 11:30
08:35 Jacob 1x1x1x1x1x 08:10
08:05 Jacob 1x1x1x1x1x 08:10
08:45 Sayid 33332222 09:15
This is my code:
void enor::Read(status &sx,isle &dx,ifstream &x){
string str;
getline(x, str, '\n');
stringstream ss;
ss << str;
ss >> dx.in >> dx.name >> dx.id >> dx.out;
/*getline(x, str, '\n');
x>>dx.in>>dx.name>>dx.id>>dx.out;*/
if(x.fail())
sx=abnorm;
else
sx=norm;
}
How can I read in the file without having the 3rd and 5th line filled with the 2nd and 4th line's time? I want the dx.out to be empty. Should I use another method, or is it possible to be done with stringstream?
",wfmn17,https://stackoverflow.com/users/1430743,"If >> sees that there is nothing left in the stringstream, it will leave the variable untouched - so dx.out keeps its value from the last line. However, you can do
ss >> dx.in >> dx.name >> dx.id;
if (!(ss >> dx.out))
dx.out = """";
because ss >> dx.out returns ss, and when a stream is converted to a bool (such as when it is used in an if condition), it returns false if the last read attempt failed.
",Aasmund Eldhuset,https://stackoverflow.com/users/626853,http://stackoverflow.com/questions/16374187/c-getline-and-stringstream,STACKOVERFLOW,stackoverflow.com
292,"Outlook doesn't download all emails, whereas iphone does","When I check my emails on my iPhone, I notice I am seeing emails that are not appearing in Outlook on my computer. This has only happened since I downloaded an Outlook upgrade. I'm afraid of missing important emails. Where are they going?
",Samantha,https://superuser.com/users/148771,"It could be that your iPhone is deleting the messages off the server before your Outlook client downloads them. Check for a setting related to this. It might be called something like ""Leave a copy on the server"" or ""Delete messages from server after downloading.""
",Tanner Faulkner,https://superuser.com/users/146694,http://superuser.com/questions/454556,TECHNOLOGY,superuser.com
================================================
FILE: input/google-quest-challenge/train_toy.csv
================================================
qa_id,question_title,question_body,question_user_name,question_user_page,answer,answer_user_name,answer_user_page,url,category,host,question_asker_intent_understanding,question_body_critical,question_conversational,question_expect_short_answer,question_fact_seeking,question_has_commonly_accepted_answer,question_interestingness_others,question_interestingness_self,question_multi_intent,question_not_really_a_question,question_opinion_seeking,question_type_choice,question_type_compare,question_type_consequence,question_type_definition,question_type_entity,question_type_instructions,question_type_procedure,question_type_reason_explanation,question_type_spelling,question_well_written,answer_helpful,answer_level_of_information,answer_plausible,answer_relevance,answer_satisfaction,answer_type_instructions,answer_type_procedure,answer_type_reason_explanation,answer_well_written
0,What am I losing when using extension tubes instead of a macro lens?,"After playing around with macro photography on-the-cheap (read: reversed lens, rev. lens mounted on a straight lens, passive extension tubes), I would like to get further with this. The problems with the techniques I used is that focus is manual and aperture control is problematic at best. This limited my setup to still subjects (read: dead insects) Now, as spring is approaching, I want to be able to shoot live insects. I believe that for this, autofocus and settable aperture will be of great help.
So, one obvious but expensive option is a macro lens (say, EF 100mm Macro) However, I am not really interested in yet another prime lens. An alternative is the electrical extension tubes.
Except for maximum focusing distance, what am I losing when using tubes (coupled with a fine lens, say EF70-200/2.8) instead of a macro lens?
",ysap,https://photo.stackexchange.com/users/1024,"I just got extension tubes, so here's the skinny.
...what am I losing when using tubes...?
A very considerable amount of light! Increasing that distance from the end of the lens to the sensor can cut your light several stops. Combined with the fact that you'll usually shoot stopped down - expect to need to increase your ISO considerably.
The fact the macro's are usually considered very very sharp, although I believe that 70-200mm 2.8 is supposed to be quite sharp.
The ultra low distortion typical of many macros.
I wouldn't worry too much about the bokeh since the DOF will still be quite limited.
Coupled on my 50mm, a full 60mm'ish extension tube results in a DOF of about a couple inches in front of the lens. On my 70-300, its probably around 2-3 feet in front of the lens to about a foot in front of the lens.
",rfusca,https://photo.stackexchange.com/users/1917,http://photo.stackexchange.com/questions/9169/what-am-i-losing-when-using-extension-tubes-instead-of-a-macro-lens,LIFE_ARTS,photo.stackexchange.com,1.0,0.3333333333333333,0.0,0.0,0.0,0.0,1.0,1.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,1.0,0.6666666666666666,1.0,1.0,0.8,1.0,0.0,0.0,1.0
1,What is the distinction between a city and a sprawl/metroplex... between downtown and a commercial district?,"I am trying to understand what kinds of places the spam values on p 231 refer to in the 5th Edition main book for Shadowrun.
Per p 15, a sprawl is a plex, a plex is a ""metropolitan complex, short for metroplex"". Per Google a metroplex is "" a very large metropolitan area, especially one that is an aggregation of two or more cities"". A city downtown and sprawl downtown would tend to have similar densities, but for some reason the sprawl (which includes suburbs?) has a higher spam zone noise rating (p 231). Similarly, I'd think of a downtown as being more dense and noisy (e.g. Office buildings and street vendors) than a commercial district, e.g. an outdoor mall. The noise ratings make me think that I am thinking about this incorrectly. What is a better way of thinking of them?
",russellpierce,https://rpg.stackexchange.com/users/8774,"It might be helpful to look into the definition of spam zone:
(p.216) spam zone: An area flooded with invasive and/or viral AR advertising, causing noise.
Because a metroplex has so many marketing targets, it seems a safe assumption that marketers would drown the plex with spam. Spam from the less dense areas would bleed into the urban cores. A smaller city with less urban/suburban territory surrounding it ostensibly wouldn't have as much spam.
",Erik Schmidt,https://rpg.stackexchange.com/users/1871,http://rpg.stackexchange.com/questions/47820/what-is-the-distinction-between-a-city-and-a-sprawl-metroplex-between-downtow,CULTURE,rpg.stackexchange.com,1.0,1.0,0.0,0.5,1.0,1.0,0.4444444444444444,0.4444444444444444,0.6666666666666666,0.0,0.0,0.6666666666666666,0.6666666666666666,0.0,0.3333333333333333,0.0,0.0,0.0,0.3333333333333333,0.0,0.8888888888888888,0.8888888888888888,0.5555555555555556,0.8888888888888888,0.8888888888888888,0.6666666666666667,0.0,0.0,0.6666666666666666,0.8888888888888888
2,Maximum protusion length for through-hole component pins,"I'm working on a PCB that has through-hole components on both sides of the board. The ""top"" side of the board is mounted flush to a Delrin plastic block (the only top-side component is a gas sensor that is fed air samples through hose fittings in the plastic block).
The flush mounting means that I have to add grooves to the plastic block to accommodate the soldered pins of the bottom-side components. Assuming a standard 0.062"" thickness FR4 board, how deep do I need to make the grooves in the plastic block? The only thing I could find is this NASA workmanship standard that states 0.5mm to 2.29mm, but I'm not sure if that will always hold true.
",Joe Baker,https://electronics.stackexchange.com/users/10157,"Do you even need grooves? We make several products using through-hole components that are intended to mount using VHB double-sided foam tape. The boards are 0.062"" thick double-sided with PTH and we use a table-top vertical belt sander to bring the component leads almost flush with the solder mask. In other words, the solder mask isn't touched by the sand paper but the leads are all sanded flat and sitting just proud of the solder mask.
This works well for small boards.
For what it's worth, there are commercial machines available that use a rotary saw blade to do the same thing. The board is held horizontal in a mounting / clamping system on the base and the saw motor is vertical on a sliding X-Y mechanism. The saw blade simply cuts all of the leads almost flush with the board surface.
This system is suited for boards of all sizes but especially for those boards larger than can be handled easily to be sanded with the belt sander.
Also note that these techniques are suitable only for PC boards with plated-through holes.
",Dwayne Reid,https://electronics.stackexchange.com/users/64754,http://electronics.stackexchange.com/questions/154225/maximum-protusion-length-for-through-hole-component-pins,SCIENCE,electronics.stackexchange.com,0.8888888888888888,0.6666666666666666,0.0,1.0,1.0,1.0,0.6666666666666666,0.4444444444444444,0.3333333333333333,0.0,0.3333333333333333,0.0,0.0,0.0,0.0,0.0,1.0,0.3333333333333333,0.3333333333333333,0.0,0.7777777777777778,0.7777777777777778,0.5555555555555556,1.0,1.0,0.6666666666666667,0.0,0.3333333333333333,1.0,0.8888888888888888
3,Can an affidavit be used in Beit Din?,"An affidavit, from what i understand, is basically a signed document given by a witness to be used as evidence in a trial, without the witness themselves needing to take a stand.
Can an affidavit be used in Beit Din? Or must witnesses take the stand in person for their testimony to count?
(In case i'm misunderstanding what exactly an affidavit is, simply treat it as a signed document by a witness with their testimony.)
",Scimonster,https://judaism.stackexchange.com/users/5151,"Sending an ""affidavit"" it is a dispute between Rashi and Rabbeinu Tam.
Devarim 19:15:
לא יקום עד אחד באיש לכל עון ולכל חטאת בכל חטא אשר יחטא על פי שני עדים או על פי שלשה עדים יקום דבר
Rashi:
ולא שיכתבו עדותם באגרת וישלחו לבית דין
And not that they write their testimony in a letter and send it to Beis Din
Tosefos Bava Basra 40a (continued from 39b):
ועוד אומר ר""י ששמע מן ר""ת שנוהגים לשלח העדים עדותם באיגרת לב""ד וחשיב עדות והא דדרשינן בספרי. מפיהם ולא מפי כתבם לא אתא אלא למעוטי דוקא אלם שאינו בר הגדה אבל ראוי להגדה אין הגדה מעכבת בו
R""i said that he heard from Rabbeinu Tam that the custom is to send testimony by a letter and it is considered [valid] testimony. And that which it expounds in the Sifre ""From their mouths and not from their writing"" is only coming to exclude a mute who is not able to speak, but someone who is able to speak does not need to speak.
Rambam concludes it is not allowed, but in monetary law the Chachomim enacted that it would be accepted in order to not prohibit the ability of people to secure loans (Hilchos Edus 3:4)
דין תורה שאין מקבלין עדות, לא בדיני ממונות ולא בדיני נפשות, אלא מפי העדים: שנאמר ""על פי שניים עדים"" (דברים יז,ו)--מפיהם, ולא מכתב ידן. אבל מדברי סופרים שחותכין דיני ממונות בעדות שבשטר, אף על פי שאין העדים קיימין, כדי שלא תנעול דלת בפני לווין.
",Y e z,https://judaism.stackexchange.com/users/4794,http://judaism.stackexchange.com/questions/55182/can-an-affidavit-be-used-in-beit-din,CULTURE,judaism.stackexchange.com,0.8888888888888888,0.6666666666666666,0.6666666666666666,1.0,1.0,1.0,0.4444444444444444,0.4444444444444444,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.8888888888888888,0.8333333333333334,0.3333333333333333,0.8333333333333334,1.0,0.8,0.0,0.0,1.0,1.0
5,How do you make a binary image in Photoshop?,"I am trying to make a binary image. I want more than just the look of the image to be black/white, but I want the actual file to be a binary file. Every pixel should be either black, or white.
I don't just want a monochrome image. I can't have varying shades of gray, every pixel needs to be black or white.
Is this possible? I looked under Image > Mode but nothing there seems to indiciate a binary style image.
",leigero,https://graphicdesign.stackexchange.com/users/21313,"Check out Image Trace in Adobe Illustrator.
I like using python and PIL, however.
from PIL import Image
image_file = Image.open(""myimage.bmp"")
image_file = image_file.convert('1') # convert
image_file.save('result.bmp')
",q2ra,https://graphicdesign.stackexchange.com/users/27160,http://graphicdesign.stackexchange.com/questions/35791/how-do-you-make-a-binary-image-in-photoshop,LIFE_ARTS,graphicdesign.stackexchange.com,1.0,0.6666666666666666,0.0,1.0,1.0,1.0,0.6666666666666666,0.6666666666666666,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,0.0,1.0,1.0,0.6666666666666666,1.0,1.0,0.8,1.0,0.0,1.0,1.0
6,Column Grouping with Title in DataTables,"I am creating an HTML table with jQuery's DataTables plug-in. I would like to know if there is a way to group a number of columns together with a Title which describes what the grouping represents.
In my specific case, 3 of my columns will display an address (Street, City, State). I'd like them to have an additional border around just those columns with a title that indicates to the end-user that those 3 columns represent the address.
I'm considering just changing the background color of those 3 columns which will do the job but I'm wanting the Address title to be displayed. I've seen that rows can be grouped together and sections divided by separators; however, I haven't yet come across anything that makes it possible to group columns together.
",Joe,https://stackoverflow.com/users/2888211,"To show group headers see this example (build the <thead> element using colspan and rowspan).
To show borders around you group build CSS classes with border-left and border-right and use the columns.className option for the first and last column in your group.
",Raidri,https://stackoverflow.com/users/2610249,http://stackoverflow.com/questions/23815544/column-grouping-with-title-in-datatables,STACKOVERFLOW,stackoverflow.com,1.0,0.6666666666666666,0.0,0.5,1.0,1.0,0.4444444444444444,0.3333333333333333,0.0,0.0,0.5,0.0,0.0,0.0,0.0,0.0,1.0,0.5,0.0,0.0,0.8333333333333334,0.8888888888888888,0.6666666666666666,0.8888888888888888,1.0,0.7333333333333333,0.6666666666666666,0.6666666666666666,0.0,0.7777777777777778
7,core file size with ulimit,"My question is probably not related to Ubuntu in particular, but since my desktop running this OS, I came to this forum.
I am trying to change the core file size using ulimit -c command as follows:
$ ulimit -a
core file size (blocks, -c) 0
data seg size (kbytes, -d) unlimited
scheduling priority (-e) 0
file size (blocks, -f) unlimited
pending signals (-i) 7959
max locked memory (kbytes, -l) 64
max memory size (kbytes, -m) unlimited
open files (-n) 1024
pipe size (512 bytes, -p) 8
POSIX message queues (bytes, -q) 819200
real-time priority (-r) 0
stack size (kbytes, -s) 8192
cpu time (seconds, -t) unlimited
max user processes (-u) 1024
virtual memory (kbytes, -v) unlimited
file locks (-x) unlimited
Changing the limitation:
$ ulimit -c unlimited
Observing the result:
$ ulimit -a
core file size (blocks, -c) unlimited
data seg size (kbytes, -d) unlimited
scheduling priority (-e) 0
file size (blocks, -f) unlimited
pending signals (-i) 7959
max locked memory (kbytes, -l) 64
max memory size (kbytes, -m) unlimited
open files (-n) 1024
pipe size (512 bytes, -p) 8
POSIX message queues (bytes, -q) 819200
real-time priority (-r) 0
stack size (kbytes, -s) 8192
cpu time (seconds, -t) unlimited
max user processes (-u) 1024
virtual memory (kbytes, -v) unlimited
file locks (-x) unlimited
Indeed the limit is changed.
However, when I open another terminal and check the value, I still see zero value in core file size.
Questions:
Are changes made using ulimit command affect only current process, i.e. in this case the bash?
I launch a program from shell as a fore- or-background process. Does the ulimit change apply for new process ?
How can I make that all user processes are affected with this configuration ?
",maximb,https://askubuntu.com/users/425067,"ulimit is a shell builtin, and thus only affects the current shell, and processes started by that shell:
$ type ulimit
ulimit is a shell builtin
From man ulimit:
The ulimit utility shall set or report the file-size writing limit
imposed on files written by the shell and its child processes (files of
any size may be read). Only a process with appropriate privileges can
increase the limit.
So, yes, child processes are affected.
To set limits permanently or for all processes, edit /etc/security/limits.conf and reboot. The examples in the manpage are fairly good. You just need to add something like:
username - core unlimited
",muru,https://askubuntu.com/users/158442,http://askubuntu.com/questions/642656/core-file-size-with-ulimit,TECHNOLOGY,askubuntu.com,0.8888888888888888,0.3333333333333333,0.0,0.0,1.0,1.0,0.4444444444444444,0.3333333333333333,1.0,0.0,0.6666666666666666,0.6666666666666666,0.0,0.0,0.0,0.0,1.0,0.0,0.3333333333333333,0.0,0.6666666666666666,1.0,0.7777777777777778,1.0,0.8888888888888888,0.8666666666666666,1.0,0.0,1.0,0.8888888888888888
9,How do you get your Steam games to run on Ubuntu through Wine or something similar?,"Ok, I was kind of surprised that this hadn't been asked here before, but maybe it's too technical for this site. You guys decide.
I've heard lots of different stories about setting up Wine on Ubuntu, WineTricks, PlayOnLinux etc., but never a 'This is the best way to do it for Steam and Steam games' thread.
So has anyone had any real success getting their Steam games to run on Ubuntu through Wine or something similar? If so, could we get some specific steps?
",LoveMeSomeCode,https://gaming.stackexchange.com/users/7157,"You could try http://transgaming.com/ (Cedega). I did this in the past and it worked fine, but you have to pay for it - :\
",VoltaicShock,https://gaming.stackexchange.com/users/8419,http://gaming.stackexchange.com/questions/16751/how-do-you-get-your-steam-games-to-run-on-ubuntu-through-wine-or-something-simil,CULTURE,gaming.stackexchange.com,1.0,1.0,0.0,0.0,1.0,1.0,0.8888888888888888,0.7777777777777778,0.3333333333333333,0.0,0.0,0.3333333333333333,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.6666666666666666,0.6666666666666666,0.8888888888888888,1.0,0.8666666666666666,0.6666666666666666,0.0,0.0,0.8888888888888888
11,High memory usage Windows Server 2008r2 on VMware,"We are running Windows Server 2008r2 on VMware and are experiencing extremely high memory use when nothing is running. The server memory usages slowly creeps up to 98-99%. The server is configured to use 8GB of memory. Is there some setting we should be using so the server can better manage it's memory usage. It's behaving as if there is a memory leak.
",Tim Britton,https://serverfault.com/users/72141,"Well to better assist with your question:
-Which VMware Product & Version
-What is the Windows 2008 R2 VM running?
-Where are you seeing the high memory usage? Windows or your VMware product?
",colealtdelete,https://serverfault.com/users/47027,http://serverfault.com/questions/240076,TECHNOLOGY,serverfault.com,1.0,0.3333333333333333,0.0,1.0,1.0,1.0,0.6666666666666666,0.5555555555555556,0.0,0.0,0.3333333333333333,0.3333333333333333,0.0,0.0,0.0,0.0,0.6666666666666666,0.0,0.3333333333333333,0.0,0.8888888888888888,0.6666666666666666,0.3333333333333333,0.6666666666666666,0.6666666666666666,0.26666666666666666,0.0,0.0,0.0,0.8888888888888888
12,How do you grapple in Dead Rising 3?,"I just started playing Dead Rising 3 on the Xbox One. I got to the first grapple, and I can't figure out what I'm supposed to do. The top of the screen says ""Perform a gesture to escape a grapple"" - what does ""gesture"" mean in this context? Is it something to do with Kinect?
",Nick Heiner,https://gaming.stackexchange.com/users/31532,"You can also switch gesture based grapple escapes off in the Kinect settings of Dead Rising 3 (along with all the rest of the Kinect features) so that you have to do QTE type button presses instead.
",Dewi Rees,https://gaming.stackexchange.com/users/43859,http://gaming.stackexchange.com/questions/147970/how-do-you-grapple-in-dead-rising-3,CULTURE,gaming.stackexchange.com,1.0,0.7777777777777778,0.0,0.3333333333333333,1.0,1.0,0.7777777777777778,0.8888888888888888,0.6666666666666666,0.0,0.0,0.3333333333333333,0.0,0.0,0.0,0.0,1.0,0.0,0.6666666666666666,0.0,1.0,0.7777777777777778,0.3333333333333333,0.7777777777777778,0.7777777777777778,0.5333333333333333,0.3333333333333333,0.0,0.6666666666666666,0.7777777777777778
14,How to compile and install programs from source,"This is an issue that really limits my enjoyment of Linux. If the application isn't on a repository or if it doesn't have an installer script, then I really struggle where and how to install an application from source.
Comparatively to Windows, it's easy. You're (pretty much) required to use an installer application that does all of the work in a Wizard. With Linux... not so much.
So, do you have any tips or instructions on this or are there any websites that explicitly explain how, why and where to install Linux programs from source?
",Nitrodist,https://unix.stackexchange.com/users/193,"I think it's just best to read the documentation coming with that specific program or application that you're wanting to install.
Usually there are readmes/READMEs inside the tarballs (the application source archive which you can usually download) or maybe even INSTALL files to read and learn about what is the preferred way of installing said application.
In short: RTFM ;)
",reiche,https://unix.stackexchange.com/users/280,http://unix.stackexchange.com/questions/173/how-to-compile-and-install-programs-from-source,TECHNOLOGY,unix.stackexchange.com,0.8333333333333334,0.6666666666666666,0.0,1.0,1.0,1.0,0.6666666666666666,0.5,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.6666666666666666,0.6666666666666666,0.5,0.6666666666666666,0.8333333333333334,0.6,1.0,0.0,0.0,0.8333333333333334
15,sleeping SPID blocking other transactions,"I'm really having trouble tracking down some blocking we are experiencing.
The root blocking SPID's status is 'sleeping', the cmd is 'AWAITING COMMAND', and the sqltext is 'SET TRANSACTION ISOLATION LEVEL READ COMMITTED'.
When I view the Top Transactions by Blocked Transactions Count report, the Blocking SQL Statement is '--'.
I've performed a trace on the SQL and when the blocking happens tracing the root blocking SPID but it hasn't really led me anywhere. The last trace statement is the same as the sqltext above 'SET TRANSACTION ISOLATION LEVEL READ COMMITTED'.
I've checked all the related SPROCs I can find to make sure they have TRY/CATCH BEGIN TRAN/COMMIT TRAN/ROLLBACK TRAN statements (we use SPROCs for everything so there are no standalone statements being ran). This issue just started happening over the last 24 hours and no one is claiming to have made any changes to the system.
Solution. One of our seldomly used SPROCs had an error with an insert (number of columns didn't match), but we are still confused on what exactly was happening.
When looking at all the trace information, the EXEC statement for this SPROC was listed at times, but NEVER just before the BLOCK happened on the blockking SPID. It seemed that when it starting blocking, the trace didn't record the execution of it (or any of the statements within it either). However there are other times were the trace did record it's execution and no blocking occurred.
The SPROC error report came from a user, and I was able to find multiple EXEC statements in traces and run them in SSMS. No time when I ran them did we have any blocking occur or did they hang. They ran as expected (the catch block fired and rolled back the transaction after the error). After resolving the fixing the SPROC, we have not seen the issue again.
",Brad,https://dba.stackexchange.com/users/10561,"Have you tried using Adam Machanic's sp_whoisactive? There's an option to get the outer command to see if it really is within a proc. It could be the application is holding open a transaction instead of committing it. Try looking at DBCC OPENTRAN as well.
",Eric Humphrey - lotsahelp,https://dba.stackexchange.com/users/247,http://dba.stackexchange.com/questions/41709/sleeping-spid-blocking-other-transactions,TECHNOLOGY,dba.stackexchange.com,0.8888888888888888,0.4444444444444444,0.0,0.6666666666666666,0.3333333333333333,0.6666666666666666,0.5555555555555556,0.4444444444444444,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,1.0,0.3333333333333333,0.6666666666666666,0.0,0.6666666666666666,1.0,0.6666666666666666,1.0,1.0,0.8,1.0,0.3333333333333333,0.6666666666666666,1.0
16,Verify $_POST script will work correctly,"can someone read through this script really quick and verify that I didn't miss anything... I'm not getting any errors in my IDE so just have to make sure the structure is correct
<?php
require_once '/usr/local/cpanel/3rdparty/lib/php/Mail.php';
$db_server = 'localhost';
$db_user = '-----';
$db_pass = '-----';
$dbc = mysql_connect ($db_server, $db_user, $db_pass);
if (!$dbc) {
die(mysql_error());
header ('Location: /contact');
exit;
}
if ($_POST['contactsent'] != 'yes') {
header ('Location: /contact');
exit;
} else {
if (is_array($_POST)) {
foreach ($_POST as $key => $value) {
$_POST[$key] = mysql_real_escape_string(stripslashes($value));
}
}
$RequestType = $_POST[""RequestType""];
$ConsumerBusiness = $_POST[""ConsumerBusiness""];
$GlobalLocation = $_POST[""GlobalLocation""];
$FirstName = strtolower(str_replace(""'"",""''"",$_POST[""FirstName""]));
$FirstName = strtoupper(substr($FirstName,0,1)).substr($FirstName,1);
$LastName = strtolower(str_replace(""'"",""''"",$_POST[""LastName""]));
$LastName = strtoupper(substr($LastName,0,1)).substr($LastName,1);
$Email = strtolower(str_replace(""'"",""''"",$_POST[""Email""]));
$Title = strtolower(str_replace(""'"",""''"",$_POST[""Title""]));
$Title = strtoupper(substr($Title,0,1)).substr($Title,1);
$Company = strtolower(str_replace(""'"",""''"",$_POST[""Company""]));
$Company = strtoupper(substr($Company,0,1)).substr($Company,1);
$Address = strtolower(str_replace(""'"",""''"",$_POST[""Address""]));
$Address = strtoupper(substr($Address,0,1)).substr($Address,1);
$City = strtolower(str_replace(""'"",""''"",$_POST[""City""]));
$City = strtoupper(substr($City,0,1)).substr($City,1);
$State = $_POST[""State""];
$Zip = $_POST[""Zip""];
$Phone = $_POST[""Phone""];
$F = $_POST[""F""];
$ProductDesc = $_POST[""ProductDesc""];
$Comment = $_POST[""Comment""];
if ($GlobalLocation == ""Canada""):
$SendTo=""canadainfo@------.com"";
elseif ($GlobalLocation == ""Central America""):
$SendTo=""customer.service@------.com.pa"";
elseif ($GlobalLocation == ""Europe""):
$SendTo=""marketing@-----.uk"";
elseif ($GlobalLocation == ""Mexico""):
$SendTo=""ventas@------.com.mx"";
else:
$SendTo=""info@------.com"";
endif;
function dbSet($fields, $source = array()) {
$set='';
if (!source) $source = &$_POST;
foreach ($fields as $field) {
if (isset($source[$field])) {
$set.=""`$field`='"".mysql_real_escape_string($source[$field]).""', "";
}
}
return substr($set, 0, -2);
}
// INSERT INTO DATABASE
mysql_select_db(""new_contact"",$dbc) or die(""Could not select new_contact"");
$fields = explode("" "", ""RequestType ConsumerBusiness GlobalLocation FirstName LastName Email Title Company Address City State Zip Phone F ProductDesc Comment"");
$query = ""INSERT INTO new_contact SET "".dbSet($fields, $_POST);
mysql_query($query);
// SETUP EMAIL
$Bodycopy = ""This information was submitted via the ------.com website and sent to you because of the location
identified by the user. <br>If this has reached you in error, please forward this email to info@------.com"";
$Bodycopy. ""<br>----------------------------------------------------------------------------------------------<br><br>"";
if ($RequestType != """") $Bodycopy. ""What kind of information do you need? : "" .$RequestType. ""<br>"";
if ($ConsumerBusiness != """") $Bodycopy. ""What type of customer or vendor are you? : "" .$ConsumerBusiness. ""<br>"";
if ($GlobalLocation != """") $Bodycopy. ""Global Location : "" .$GlobalLocation. ""<br>"";
if ($Company != """") $Bodycopy. ""Company : "" .$Company. ""<br>"";
if ($FirstName != """") $Bodycopy. ""First Name : "" .$FirstName. ""<br>"";
if ($LastName != """") $Bodycopy. ""Last Name : "" .$LastName. ""<br>"";
if ($Title != """") $Bodycopy. ""Title : "" .$Title. ""<br>"";
if ($Email != """") $Bodycopy. ""Email : "" .$Email. ""<br>"";
if ($Address != """") $Bodycopy. ""Address : "" .$Address. ""<br>"";
if ($City != """") $Bodycopy. ""City : "" .$City. ""<br>"";
if ($State != """") $Bodycopy. ""State : "" .$State. ""<br>"";
if ($Zip != """") $Bodycopy. ""Zip/Postal Code : "" .$Zip. ""<br>"";
if ($Phone != """") $Bodycopy. ""Phone : "" .$Phone. ""<br>"";
if ($F != """") $Bodycopy. ""F : "" .$F. ""<br>"";
if ($ProductDesc != """") $Bodycopy. ""UPC or product description : "" .$ProductDesc. ""<br>"";
$Bodycopy. ""<br>----------------------------------------------------------------------------------------------<br><br>"";
if ($Comment != """") $Bodycopy. ""Comments : <br>"" .$Comment. ""<br>"";
$Bodycopy. ""<br><br>"";
$Bodycopy. $IP = $_SERVER[""remote_addr""];
// PROCESS EMAIL
// mail server info...
$from = $SendTo;
$to = ""Do Not Reply <donotreply@------>"";
$subject = ""Website Contact : "" . $GlobalLocation;
$body = $Bodycopy;
$host = ""mail.------"";
$port = ""25"";
$username = ""donotreply@-------"";
$password = ""-------"";
$headers = array ('From' => $from,
'To' => $to,
'Subject' => $subject);
$smtp = Mail::factory('smtp',
array ('host' => $host,
'auth' => true,
'port' => $port,
'username' => $username,
'password' => $password));
$mail = $smtp->send($to, $headers, $body);
if (PEAR::isError($mail)) {
echo(""<p>"" . $mail->getMessage() . ""</p>"");
} else {
echo(""<p>Message successfully sent!</p>"");
}
// MAKE SURE DB CONN IS CLOSED
mysql_close($dbc);
// REDIRECT TO THANK YOU PAGE
header ('Location: /index.php?option');
exit();
}
?>
",acctman,https://codereview.stackexchange.com/users/13137,"Although it'll lenghten your code quite a bit, in my opinion it's worth checking if the $_POST variables are set (using isset), to avoid any exceptions.
",mikeythemissile,https://codereview.stackexchange.com/users/13145,http://codereview.stackexchange.com/questions/11466/verify-post-script-will-work-correctly,TECHNOLOGY,codereview.stackexchange.com,1.0,0.3333333333333333,0.0,1.0,0.3333333333333333,1.0,0.4444444444444444,0.4444444444444444,0.0,0.0,1.0,0.3333333333333333,0.0,0.0,0.0,0.0,0.3333333333333333,0.0,0.3333333333333333,0.0,0.6666666666666666,0.8888888888888888,0.5555555555555556,1.0,1.0,0.6,1.0,0.0,0.3333333333333333,1.0
17,"what's the relation between sensor size and image quality (noise, dynamic range)?","I'm reading this description on sensor size:
Digital compact cameras have substantially smaller sensors offering a
similar number of pixels. As a consequence, the pixels are much
smaller, which is a key reason for the image quality difference,
especially in terms of noise and dynamic range.
Could you please elaborate on the last sentence: what's the relation between sensor size and image quality? In particular, what are the advantages and disadvantages of a small sensor (of a compact camera, in contrast to a large sensor of a DSLR camera)?
",cody,https://photo.stackexchange.com/users/11005,"A digital image sensor is ultimately a device that uses the photovoltaic or photoconductive properties of photodiodes to convert photons into electrons (charge), which can later be read out as a saturation value and converted to a digital pixel. This is an analog-to-analog then analog-to-digital conversion process.
The key behavior of a photodiode relevant to imaging, converting photons to electrons, improves with total surface area. The more surface area, the greater the area to detect photon strikes per photodiode, and the greater the physical material area within which electron charge (signal) can be collected. In other words, larger physical pixel area equates to higher signal ratio. The ""depth"" of a well is ultimately immaterial to modern Bayer-type CFA sensors, as deeper penetration only really has a filtration effect...the deeper a photon penetrates a photodiode, the more blueshifted light will be filtered out in favor of redshifted light. This is due to the response curve of the type of silicon used in photodiodes...which are more sensitive to infrared wavelengths than visible light wavelengths, and more sensitive to visible light wavelengths than ultraviolet and x-ray wavelengths.
Finally, being electronic devices, image sensors produce a variety of forms of electronic noise. In particular, they are susceptible to a low number of electrons in any given photodiode being generated from the low level of dark current that is always running through the sensor. Being devices sensitive to electromagnetic frequencies, the intrinsic field of the sensor itself can be affected by strong, nearby devices that emit electromagnetic frequencies of their own (if its not shielded properly) which can produce banding. Slight differences in the exact electrical response of each pixel can produce slight variations in how the charge accumulated in a photodiode is read out, and there can be thermally induced forms of noise. These forms of noise create a signal floor wherein it becomes difficult or impossible to determine of a digital pixel level is the product of an actual photon capture or due to electronic and thermal noise. So long as the image signal is larger than the noise floor, or in other words the signal to noise ratio (SNR) is high, a useful image can be produced.
All things being equal...and by that, I mean the same number of pixels, the same ultimate sensor design characteristics, the same amount of dark current, the same amount of read noise...a smaller sensor will be noisier than a larger sensor because the larger sensor, with the exact same number of pixels, can have larger surface area for each of those pixels, allowing more electrons to be captured for any given photon strike. A larger pixel has a higher maximum saturation point, which allows more total electron charge before the pixel is ""full"" or totally white. That intrinsically increases SNR, reducing the impact that electronic noise has on the final image signal, producing less noisy images at exactly the same settings as a smaller sensor.
Dynamic range is the total usable tonal range available from a sensor. It is ultimately affected by the amount of electronic noise present in a sensor and the maximum saturation point of the pixels, or the ratio between the mean of electronic noise and the maximum saturation point of each pixel in the sensor. Again, all things being equal, dynamic range will be better on a larger sensor as the SNR, even at low signal levels, is going to be slightly better than a smaller sensor, and at higher signal levels it can be significantly better. As is often the case with image sensors these days, increasing pixel size, or for that matter increasing a pixels maximum sensitivity (ISO), has the effect of also increasing the maximum amount of read noise at low ISO. This is ultimately due to poor control over electronic noise to start with, resulting in higher read noise at minimum ISO for larger sensors than for smaller sensors. While the increase in read noise is often still minor compared to the increase in maximum saturation point, and therefor does not affect maximum SNR much, it can mitigate or eliminate any gains at the sensors minimal SNR level, reducing or eliminating any improvement in dynamic range as well.
",jrista,https://photo.stackexchange.com/users/124,http://photo.stackexchange.com/questions/26001/whats-the-relation-between-sensor-size-and-image-quality-noise-dynamic-range,LIFE_ARTS,photo.stackexchange.com,0.8888888888888888,1.0,0.0,0.0,1.0,0.0,0.6666666666666666,0.3333333333333333,0.0,0.0,0.0,0.0,0.3333333333333333,0.0,0.0,0.0,0.0,0.0,0.6666666666666666,0.0,1.0,1.0,0.6666666666666666,1.0,1.0,1.0,0.0,0.0,1.0,1.0
18,"multiple keys via HKDF - whats better, one or two applications of HKDF-extract","Assume for the sake of the question that I have two variable-length bit strings, each with 128 bit cryptographic randomness, and I want to extract two 128 bit keys via HKDF-SHA256.
Which alternative is better (if any), and why?
Use a single HKDF-extract on the concatenation of the two strings, and two HKDF-expands with different info strings to get two 128 bit keys.
Use two HKDF-extract operations, one for each bit string, and use a single HKDF-expand on each to get two 128 bit keys.
Or in other words, is it better to HKDF-extract on a longer IKM string and use multiple HKDF-expands, or is it better to use HKDF on independent but shorter IKMs.
My intuition tells me that, if my randomness strings are really as good as I claim, then two independent HKDF extracts are better, but using a single one on the concatenation is, in practice, just as safe, and safer if my input randomness is not actually as good as assumed, so two HKDFs might be more robust in practice.
",Marc Lehmann,https://crypto.stackexchange.com/users/7672,"Realistically, it probably doesn't matter, if all of your premises are accurate.
If it were me, I'd probably concatenate the inputs, then apply a HKDF to the concatenation to derive two keys -- but honestly, it's unlikely to matter. This is very unlikely to be the weakest link in your system. Pick something that's easy to implement and easy to understand, and move on: focus your energy on some other aspect of your system.
",D.W.,https://crypto.stackexchange.com/users/351,http://crypto.stackexchange.com/questions/9318/multiple-keys-via-hkdf-whats-better-one-or-two-applications-of-hkdf-extract,TECHNOLOGY,crypto.stackexchange.com,1.0,1.0,0.0,1.0,1.0,0.0,0.6666666666666666,1.0,0.0,0.0,1.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.3333333333333333,1.0,1.0,0.9,0.0,0.0,0.0,1.0
19,What filter should I use to avoid overly-whitened daylight images?,"What filter should I use to avoid whitened images (blown highlights) caused by intense daylight? At the moment I am only using an UV Filter, but it doesn't solve the whole problem.
I am a newbie in photography, and I've recently bought a Nikon DSLR.
",Daniel,https://photo.stackexchange.com/users/6471,"If you are indeed referring to your images having too much contrast to allow for both a properly exposed subject and sky, then there are a few options.
As Zak stated, you could use exposure compensation to darken the entire image (this might make the non-sky portion of the image too dark though)
Use a graduated neutral density filter. This will selectively darken one part of an image while leaving the other part alone. This is the old-school method. While it typically limits the transition between the filtered and unfiltered parts to a straight line, it does allow you to fix things at the time of the shot without having to spend time tweaking it on the computer afterwards.
HDR (High Dynamic Range) photography. This is the hi-tech way to do things. This requires that you take multiple shots of the same scene at different exposures and use software to combine them into one image where both the highlights and shadows are well-exposed.
",AndrewStevens,https://photo.stackexchange.com/users/6186,http://photo.stackexchange.com/questions/15334/what-filter-should-i-use-to-avoid-overly-whitened-daylight-images,LIFE_ARTS,photo.stackexchange.com,1.0,1.0,0.3333333333333333,1.0,0.6666666666666666,0.0,1.0,0.5555555555555556,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.6666666666666666,0.3333333333333333,0.0,0.0,0.0,1.0,1.0,0.6666666666666666,1.0,1.0,1.0,0.3333333333333333,0.0,0.0,1.0
22,Create heatmap with PSTricks or TikZ,"I have a huge amount of 2D-coordinates, associated with a value, e.g.:
x | y | value
27.50 52.15 12.51
61.83 13.32 57.56
36.23 21.83 41.73
40.46 85.67 25.20
...
The data is not tabular and I Want the points between two data-points to be interpolated in some way (which way is not really clear, yet)
I want to preset the data as heatmap like this:
Is there any ready-to-use package for PSTricks or TikZ to do it?
",user2033412,https://tex.stackexchange.com/users/35228,"There is tikzDevice for R which will generate TikZ code for a plot created in R. So, if you use R to create your heat map (say, using ggplot2's geom_density2d()), you also get the TikZ code with little effort. There is a learning curve, though.
However, this kind of image should be included as a (perhaps high-resolution) raster image in your document, as the vector version might take a long time to render. So you can create a TikZ version of the plot, compile it to PDF and then convert to PNG at the required resolution/pixel density.
",krlmlr,https://tex.stackexchange.com/users/8057,http://tex.stackexchange.com/questions/133882/create-heatmap-with-pstricks-or-tikz,TECHNOLOGY,tex.stackexchange.com,1.0,0.7777777777777778,0.0,1.0,1.0,1.0,0.5555555555555556,0.3333333333333333,0.0,0.0,0.0,0.3333333333333333,0.0,0.0,0.0,0.3333333333333333,0.6666666666666666,0.0,0.0,0.0,0.7777777777777778,0.5555555555555556,0.7777777777777778,0.7777777777777778,1.0,0.9333333333333332,1.0,0.0,0.0,1.0
23,Can you book hotels on a prepaid credit card worldwide?,"I tend to stay at smaller boutique hotels or local apartments when I visit a city but recently due to some credit card issues I will need to depend on prepaid Visa. But I noticed when I try to shop online a lot of retailers don't accept prepaid credit cards so I'm thinking hotels would be even stricter. Is there a list somewhere of countries or particular hotel chains than ban prepaid cards? If so, how does one book online or reserve a room without a card? Do all hotels worldwide accept cash?
",verve,https://travel.stackexchange.com/users/2283,"TL;DR - Depends, on your pre-paid card, the hotel, and how you book the hotel.
There are a few different charges to consider here:
Pre-payment of the room at/shortly after booking
Holding the room on a flexible booking
Deposit at checkin
Room charge, meals, drinks, extras etc at checkout
With many OTAs and hotel websites, if you make a non flexible booking, or some kinds of flexible bookings, they will charge your card for the room rate during the booking process. With some others, they'll send your card details through to the hotel, who'll put it through their tills later. (Maybe that day, maybe during a weekly sweep). In order for this pre-payment to go through, your card will need to support offline / cardholder-not-present transactions. As long as your card advertises itself as ""suitable for online shopping"" or similar, and as long as the card issuer doesn't block travel booking, you should be fine. Speak to your card issuer to be sure.
Alternately, when reserving the room, you might opt for a flexible rate where you pay at checkout, with no pre-payment. This is typically offered on the hotel's own site, and some OTAs. They will normally ask for a credit card to ""hold"" the reservation, which would be charged in the event of a no-show, but as long as you turn up as planned the card won't be charged. These rates are normally a bit more expensive than non-flexible pre-paid ones, but this style of booking should be fine on any card.
At checkin, most hotels (but not all) will want to take some sort of deposit. Typically this covers any unpaid parts of the room rate, along with expected spend in the restaurant / bar / etc. The details taken would also be used in the event of damage. If you've pre-paid the room and don't plan to run up extras in the hotel, many hotels would let you not leave a deposit, but not all. Very small hotels may take a look at you, decide you look fine, and tell you to pay everything at checkout. Some hotels may want to take something no matter what.
For the deposit, this is normally done as a pre-authorisation. Not all pre-paid card support this, which would be a problem. Some pre-paid cards do support it, but badly, and might take a week or so to release the reserved funds, which could be a problem. Check with your card issuer about this. Alternately, many hotels which require a deposit will be willing to take a cash deposit, but they may well want a larger cash deposit than they'd normally authorise on a credit card. (They can't charge extras to the card later for example). Expect to leave a hefty chunk of cash as a deposit, expect checkin to take a while, expect several people to count the money, bills may be checked for counterfeits, and expect a receipt at the end. Speak with the hotel first to see if they need a deposit, and if they take cash ones. As a datapoint, I once got stuck in a checkin queue behind someone doing a cash deposit, he was asked to leave something like 4 nights room rate as a deposit on a 2 night stay.
Finally, we have settling up the bill at the end. If you pre-paid, and didn't eat there, you could well find there's nothing to pay, or just a few dollars / pounds / euros in city taxes. Alternately, if you didn't pre pay (or they didn't take it), ate in the restaurant lots etc, you could have a decent amount to pay here. You'll be present, so any card which supports in-person transactions will be fine for this. Well, as long as you have the money on it for the bill that is! If you did a cash deposit, you should get the choice between taking the bill off that and getting the remainder, or getting the whole deposit back in cash and paying on a card.
Oh, one more possibility - charges not noticed at checkout. Maybe you forgot to tell them about the minibar charges, maybe that morning's breakfast didn't go through, something like that. If you used a credit card for the deposit, expect them to charge it to that later when they notice. If you paid cash, expect to get a bill in the mail some time later.
Can it be done - sure! Can it be done at one specific hotel with one particular kind of pre-paid card on one specific rate booked one specific way? Depends...
",Gagravarr,https://travel.stackexchange.com/users/26,http://travel.stackexchange.com/questions/35395/can-you-book-hotels-on-a-prepaid-credit-card-worldwide,CULTURE,travel.stackexchange.com,0.7777777777777778,0.7777777777777778,0.0,1.0,0.6666666666666666,0.5,0.6666666666666666,0.6666666666666666,0.6666666666666666,0.0,0.6666666666666666,1.0,0.0,0.0,0.0,0.0,0.6666666666666666,0.0,0.3333333333333333,0.0,0.8888888888888888,1.0,0.7777777777777778,1.0,0.8888888888888888,0.9333333333333332,0.0,0.0,1.0,0.8888888888888888
24,Fiber Channel SAN hooked up to 2 ESXi Hosts,"I have a PowerVault with 4 FC modules at 8gb/s. I don't want to jack the SAN into our 10/100/1000 switch because of the max 1gb/s speed.
My question is can I jack both ESXi host's FC into the SAN's FCs and have HV and vMotion?
",Frank,https://serverfault.com/users/95480,"The SAN itself doesn't need an Ethernet connection in this case, all it needs is FC to the ESXi hosts. The ESXi hosts will need GigE Ethernet. During VMotion, the source ESXi starts sending machine-state to the target ESXi host over the VMKernel connection in the virtual-switch. That's actually a connection between the two ESXi hosts over your physical Ethernet switch. Meanwhile, when state is fully transferred control of the VMDK files is passed over to the target server it it goes live. So, vMotion requires both FC and Ethernet.
HA (what I presume you meant by HV) requires vMotion to work, so should be available if vMotion is working.
Unless you meant Hardware Virtualization, or direct LUN presentation. That can also work, but is trickier. The same volume needs to be presented to both ESXi hosts using exactly the same LUN number, if it doesn't have the same LUN number that volume won't be visible when the VM is on one or the other ESXi host.
",sysadmin1138,https://serverfault.com/users/3038,http://serverfault.com/questions/314105,TECHNOLOGY,serverfault.com,0.7777777777777778,0.5555555555555556,0.0,1.0,0.6666666666666666,1.0,0.5555555555555556,0.3333333333333333,0.0,0.0,0.3333333333333333,1.0,0.0,0.0,0.0,0.0,0.6666666666666666,0.0,0.6666666666666666,0.0,0.8888888888888888,0.6666666666666666,0.6666666666666666,0.6666666666666666,0.8888888888888888,0.9,0.3333333333333333,0.3333333333333333,0.6666666666666666,1.0
25,How do I remove a Facebook app request?,"Unfortunately, because of some fat fingering when browsing the Klout website from my iPad I accidentally sent app requests to 50 friends. I am really annoyed by these sort of requests and certainly did not intend to send them to my friends.
Can I view the app requests I've sent on Facebook?
Can I undo the requests that I've sent?
Can I add a privacy setting to prevent any Facebook app (Klout or otherwise) from ever sending these requests in the future?
",ahsteele,https://webapps.stackexchange.com/users/186,"There isn't a native way with Klout's UI that I am aware of. Your best bet is finding a Chrome Extension that handles Facebook JS SDK API calls, then you can do something like this
FB.api('/me/apprequests', function(response) {
var ids = [];
for (var i=0, l=response.data.length; i<l; i++) {
FB.api('/' + response.data[i].id, 'DELETE',
function(response){ console.log('cleared request:' + response) }
);
console.log('For: ' + response.data[i].to.name);
}
});
Quick and dirty would be to just paste this into your Developer console on Klout's page.
",phwd,https://webapps.stackexchange.com/users/40,http://webapps.stackexchange.com/questions/37568/how-do-i-remove-a-facebook-app-request,TECHNOLOGY,webapps.stackexchange.com,1.0,0.5555555555555556,0.0,0.3333333333333333,1.0,1.0,0.8888888888888888,0.8888888888888888,1.0,0.0,0.3333333333333333,0.3333333333333333,0.0,0.0,0.0,0.0,1.0,0.6666666666666666,0.0,0.0,1.0,0.8888888888888888,0.5555555555555556,1.0,1.0,0.7333333333333333,1.0,0.3333333333333333,0.0,0.8888888888888888
27,How often do I need to change my oil in a VW TDI?,"The manual for my 2003 VW TDI says to change the oil every 12 months or 10,000 miles. My dealer says every 6 months or 5,000 miles.
Are they just trying to make twice as much money off my oil changes, or is there a legitimate reason to change the oil twice as often as VW originally printed in the book?
",Ken,https://mechanics.stackexchange.com/users/3547,"2013 Passat TDI. Have an oil analysis done and quit speculating. I change at 8.5 to 9k as that is when the additive package was low on my last analysis.
",pete,https://mechanics.stackexchange.com/users/7604,http://mechanics.stackexchange.com/questions/6856/how-often-do-i-need-to-change-my-oil-in-a-vw-tdi,CULTURE,mechanics.stackexchange.com,1.0,1.0,0.0,0.5,1.0,1.0,0.6666666666666666,0.5555555555555556,0.0,0.0,0.3333333333333333,0.6666666666666666,0.0,0.0,0.0,0.0,0.3333333333333333,0.0,0.6666666666666666,0.0,1.0,0.6666666666666666,0.3333333333333333,0.7777777777777778,0.7777777777777778,0.5,0.6666666666666666,0.3333333333333333,0.0,0.6666666666666666
29,Does this statement make any sense?,"I am asking this question completely out of curiosity. The other day, my roommate, by mistake, used 'Light year' as a unit of time instead of distance. When I corrected him (pedantic, much), he said the following:
""Units are relative. And according to Fourier Transforms, units can be changed so Light year is a unit of time.""
That got me thinking and I read up Fourier Transforms on wikipedia but couldn't find anything about using a unit in one domain as a unit for another measurement. I do agree that units (particularly, base units are relative. eg: the meter), but does his statement make any sense?
EDIT
Thank you everyone for all the answers. It isn't so much to in it in or prove a point as it is to understand the concept better. Anyways this is his response after I showed him this thread. Any comments would be appreciated.
His response:
Nevermind, for the first time I accept I was wrong. BUT using lightyears to measure time is possible. My example didn't make sense bacause I was wrong when I meantioned that I'm still measuring dist. If you have a signal in time domain and ...take the FT, I get a signal which DOES NOT HAVE to be in frequency domain. Clarify this to the guy who posted last. Now the new signal is in a domain defined by me and so is its units. This signal although not equal to the original signal, still represents that if ya take an inverse FT. So, the idea of time will still be there. Now coming back to our case: lightyears here is not the lightyears you are used to read when dealing with distance. It represents time.
",xbonez,https://physics.stackexchange.com/users/436,"This doesn't make much sense: light year is in any case a unit of distance.
What is common is to use ""reduced units"", for examples units where $c=1$ (speed of light) or $h=2\pi$. But in these cases the opposite would happen: you would say ""year"" to mean a distance. Or for example you say ""has a mass of xyz MeV"" instead of ""$MeV/c^2$"".
About the Fourier transform: this allow to go from the so-called ""time domain"" (even if ""time"" is not always the usual time) to the ""frequency domain"" involving ... frequencies.
But as you can see this cannot change the definition of light-year.
",Cedric H.,https://physics.stackexchange.com/users/82,http://physics.stackexchange.com/questions/862/does-this-statement-make-any-sense,SCIENCE,physics.stackexchange.com,0.8333333333333334,0.6666666666666666,0.5,0.5,0.5,0.5,0.6666666666666666,0.6666666666666666,0.0,0.0,1.0,0.5,0.0,0.0,0.5,0.0,0.0,0.0,0.5,0.0,0.6666666666666666,0.8333333333333334,0.6666666666666666,1.0,1.0,0.8,0.0,0.0,1.0,0.8333333333333334
30,"2 servers, high availability and faster response","I recently bought a second webserver because I worry about hardware failure of my old server. Now that I have that second server I wish to do a little more then just have one server standby and replicate all day. As long as it's there I might as well get some advantage our of it !
I have a website powered by ubuntu 12.04, nginx, php-fpm, apc, mysql (5.5) and couchdb.
Im currently testing configurations where i can achieve failover AND make good use of the extra harware for faster responses / distributed load.
The setup I am testing nowinvolves heartbeat for ip failover and two identical servers. Of the two servers only one has a public ip adress. If one server crashes the other server takes over the public ip adress. On an incoming request nginx forwards the request tot php-fpm to either server a of server b (50/50 if both servers are alive). Once the request has been send to php-fpm both servers look at localhost for the mysql server. I use master-master mysql replication for this. The file system is synced with lsyncd.
This works pretty well but Im reading it's discouraged by the (mysql) community.
Another option I could think of is to use one server as a mysql master and one server as a web/php server. The servers would still sync their filesystem, would still run the same duplicate software (nginx,mysql) but master slave mysql replication could be used. As long as bother servers are alive I could just prefer nginx to listen to ip a and mysql to ip b. If one server is down, the other server could take over the task of the other server, simply by ip switching.
But im completely new at this so I would greatly value your expert advice. Is either of the two setups any good ? If you have any thoughts on this please let me know !
PS, virtualisation, hosting on different locations or active/passive setups are not solutions im looking for. I find virtual server either too slow or too expensive. I already have a passive failover on another location. But in case of a crash I found the site was still unreachable for too long due to dns caching.
",user17886,https://serverfault.com/users/133018,"We do it on all our customer HA boxes and our Magento MicroCloudTM/ configurations, so that the fail over system(s) isn't idle. Look into DRBD, Linux VServer and heartbeat.
Read the following for an insight into it.
http://linux-vserver.org/Getting_high_with_lenny
Caveat
But as always, we advise not trying to do this unless you have substantial experience, as you are more likely to end up with a more unreliable configuration, that is harder to debug and if you end up in a split brain situation, more likely to have data loss ... than a plain old single server.
",Ben Lessani - Sonassi,https://serverfault.com/users/113375,http://serverfault.com/questions/430759,TECHNOLOGY,serverfault.com,0.8888888888888888,0.3333333333333333,0.0,0.6666666666666666,0.0,0.3333333333333333,0.4444444444444444,0.3333333333333333,0.3333333333333333,0.0,1.0,0.3333333333333333,0.3333333333333333,0.0,0.0,0.0,0.3333333333333333,0.0,0.6666666666666666,0.0,0.6666666666666666,1.0,0.6666666666666666,1.0,1.0,0.9333333333333332,0.6666666666666666,0.0,0.6666666666666666,0.8888888888888888
31,Why does string theory have such a huge landscape?,"I was browsing through Foundations of Space and Time, a compilation of essays on various theories of quantum gravity. The following passage in the introduction intrigued me:
Each compactification leads to a different vacuum state.... at least one state should describe our Universe in its entirety.... the enormous number (~10^500 at last count) of solutions, with no perturbative mechanism to select mechanism to select among them, leads some critics to question the predictive power of the theory..Even more worrying is that, while the theory is perturbatively finite order by order, the perturbation series does not seem to converge.
I don't know anything about string theory and so I could not make head or tails this. All I know is that ~$10^{500}$ is a very large number.
What exactly is a 'solution' in string theory? Is it a spacetime metric of some sort or the terms of a S-matrix of some sort?
Why are there so many 'solutions'?
I thought string theory was supposed to be finite, why do perturbative series still diverge?
Is there any experimental technique to limit the number of 'solutions'?
Will experimental techniques be able to pinpoint a solution within present day string theorists' lifetimes too? If not, how long will it take before we can experimentally probe these things?
Are string theorists completely relaxed about these issues? Or are they in anguish?
",dj_mummy,https://physics.stackexchange.com/users/28244,"
""Each compactification leads to a different vacuum state.... at least
one state should describe our Universe in its entirety.... the
enormous number (~10^500 at last count) of solutions, with no
perturbative mechanism to select mechanism to select among them, leads
some critics to question the predicitive power of the theory..Even
more worrying is that, while the theory is perturubatively finite
order by order, the pertrubation series does not seem to converge.""
Ok.
I don't know anything about string theory and so I could not make head
or tails this. All I know is that ~10^500 is a very large number.
That, it is.
What exactly is a 'solution' in string theory? Is it a spacetime
metric of some sort or the terms of a S-matrix of some sort?
So, we have a non-peturbative definition of M-theory and string theories on AdS Space through the AdS/.CFT correspondence. Now, these are 10 or 11-dimensional. To get rid of the extra 6 or 7 dimensions, you need to compactify it on a 6-dimensional or 7-dimensional manifold.
A particularly convinient compactifications of 11-dimensional M-theory is on $G(2)$-holonomy manifolds. Particularly convinient compactifications of 10-dimensional string theories, such as Type HE, are on $SU(3)$-holonomy Calabi-Yau manifolds. Of course, it's not necessary; e.g. if the world happens to be something with $\mathcal N=2$ supersymmetry, as opposed to $\mathcal N=1$.
Why are there so many 'solutions'?
Because there are lots of these manifolds!
I thought string theory was supposed to be finite, why do perturbative
series still diverge?
Uh... Yes. But it's renormaliable. And there are non-peturbative definitions in AdS spacetime.
Is there any experimental technique to limit the number of
'solutions'? Will experimental techniques be able to pinpoint a
solution within present day string theorists' life times too? If
not, how long will it take before we can experimentally probe these
things?
In principle, it's possible. But in anyone's lifetime... Do you know how big $10^{500} $ is? % See this.
Are string theorists completely relaxed about these issues? Or are
they in anguish?
There's the branch of String Phenomenology that attempts to find the correct vacua...
",centralcharge,https://physics.stackexchange.com/users/23119,http://physics.stackexchange.com/questions/75441/why-does-string-theory-have-such-a-huge-landscape,SCIENCE,physics.stackexchange.com,1.0,0.5555555555555556,0.0,0.6666666666666666,0.6666666666666666,1.0,0.6666666666666666,0.5555555555555556,0.6666666666666666,0.0,0.3333333333333333,0.3333333333333333,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,1.0,0.6666666666666666,1.0,1.0,1.0,0.0,0.0,1.0,1.0
32,What are the benefits of owning a physical book?,"I have seen this question about updates of the D&D 4th Edition books, and it got me thinking.
Since I got my Kindle I have not read a single paper novel; they have fewer drawbacks compared to digital copies than rpg rulebooks.
Dead-tree types have some benefits like looking good on a bookshelf, but any ebook reader weighs less with 100 novels than the usual hard-cover book.
If you want to look for the damage of Ares Alpha, even with a half-decent tablet it takes less than 2 seconds.
Digital copies do not get worn, they never get unwanted earmarks, but you can bookmark them.
Rulebooks do get updates, and unless you are willing to take a pen to your book, your hard copies will never contain them. The pdfs can be edited and resent to the buyers.
Even better is the WotC approach with the DDI, you can look up any monster or item or (almost any) rule, in the most recent form, for 3 years at the cost of seven books.
I think this is the way to go, even considering the horribly slow character builder. Although I must admit good illustration can help build the athmosphere.
So what am I missing? Why are people buying rpg rulebooks in paper format? Why are books even published, I do not need to know if feats are supposed to be on the right page and skills on the left, I just want a list of them, filterable any way I want.
Is this just a necessary part of earning money? I understand that pdfs are copied illegally, but the Compendium is not.
",András,https://rpg.stackexchange.com/users/9552,"As others have said - there's definitely something to be said about tactile navigation.
While digital formats (assuming they're text-parseable) can be searched, if you don't know the specific spelling or the specific term, they can be difficult to parse by hand. Quadruply so if the publisher did not provide bookmarks to the different chapters. (Which is really annoying, imo.)
By contrast, with a physical copy, you can pick up a general sense of where the desired content is physically located fairly quickly. For example: Combat Rules are towards the middle. Spell lists are towards the end. Character classes are near the beginning. The more familiar one is with the book, the quicker this is to process (and more accurate one tends to get).
Additionally - I stare at a digital monitor all day at work; then for most of the evening. So if I actually have to read something, I'll opt for ink on paper, just to save my eyes that itty little bit. :)
(Kindles are awesome, but they can be slow; plus, some books have art work and/or tables that I am unsure would translate over very well.)
",phil,https://rpg.stackexchange.com/users/5603,http://rpg.stackexchange.com/questions/34615/what-are-the-benefits-of-owning-a-physical-book,CULTURE,rpg.stackexchange.com,1.0,0.3333333333333333,0.3333333333333333,0.0,0.6666666666666666,0.0,0.8888888888888888,0.8888888888888888,1.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.6666666666666666,0.0,0.3333333333333333,0.0,1.0,0.8888888888888888,0.6666666666666666,1.0,1.0,0.9333333333333332,0.0,0.0,1.0,1.0
33,I teleported too high on my Minecraft server,"My friends own a Minecraft server and I was playing around with commands when I teleported to y 100000000000000000000000000000000 just because I wanted to see how high maps could go. That was a big mistake. Now every time I go on the server I crash.
I don't have access to the server files and I don't have the owner's email or phone number or anything and I'm the only one that plays on the server. I know that one solution is to make a new Minecraft character but that would involve me to pay.
Is there some way I can delete my player information or change the location without access to the server files? I don't want to leave the server because I've been building a castle for 3 months now and I don't want to give up that easy.
",josiah,https://gaming.stackexchange.com/users/49831,"Yer, you're basically screwed :I. Even though you don't have contact to the owner maybe try and get a friend to go online and get them to teleport you to the ground or get them to contact the owner, as only he will be able to fix this.
",ReallyGoodPie,https://gaming.stackexchange.com/users/47440,http://gaming.stackexchange.com/questions/119737/i-teleported-too-high-on-my-minecraft-server,CULTURE,gaming.stackexchange.com,0.8888888888888888,0.3333333333333333,0.0,1.0,0.6666666666666666,1.0,0.5555555555555556,0.5555555555555556,0.0,0.0,0.6666666666666666,0.3333333333333333,0.0,0.0,0.0,0.0,1.0,0.0,0.3333333333333333,0.0,0.6666666666666666,1.0,0.6666666666666666,1.0,1.0,0.9333333333333332,1.0,0.0,0.6666666666666666,0.8888888888888888
36,How can I write HTML and send as an email?,"I want to send HTML email from my Outlook or Yahoo, Gmail, Hotmail. I wasn't able to find any such option where I could write HTML which will then be rendered at the receiver's end. How can I do that?
",Umair Jabbar,https://webapps.stackexchange.com/users/2676,"HTML in email clients is a very very thorny problem because most of them do not just embed a browser, but implement random subsets of the HTML and CSS. And there is a LOT of email clients.
If you really do need to do this, you may look at professional solutions from Mailing List Marketing companies. It is a known problem in that space and there are services, tutorials (example) and checklists for that. Here is a service from MailChimp. Most of the providers offer something similar.
",Alexandre Rafalovitch,https://webapps.stackexchange.com/users/2820,http://webapps.stackexchange.com/questions/7932/how-can-i-write-html-and-send-as-an-email,TECHNOLOGY,webapps.stackexchange.com,1.0,0.8888888888888888,0.0,0.6666666666666666,1.0,0.6666666666666666,0.7777777777777778,0.6666666666666666,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.8888888888888888,0.5555555555555556,1.0,1.0,0.7333333333333333,0.0,0.0,1.0,1.0
38,Why is the potential function defined differently in physics and calculus?,"I am very familiar with the concept of a potential function, and potential energy, from calculus-based physics.
For instance, if we have the familiar force field $\mathbf{F} = -mg \,\mathbf{j}$, then a potential function is given by $U = mgy + C$. (Since potential energy is relative, we have an infinite number of potential functions.)
Notice that the gradient of the potential function is the negative of the force field: $$\nabla U = \nabla(mgy + C) = mg \,\mathbf{j} = -\mathbf{F}.$$
That was perfectly fine with me. But now in vector calculus, I am reading that the potential function $f$ of a vector function $\mathbf{F}$ is such that $\nabla f = \mathbf{F}$. A negative sign appears to have been lost when migrating from physics to calculus.
It seems confusing to call $f$ a ""potential function"", since it cannot be interpreted as potential energy in the real world. So why is the calculus nomenclature as it is (i.e., why not call this something else and then say the potential function is the negative of it)?
",Radon Rosborough,https://math.stackexchange.com/users/160658,"Recall where the negative sign comes from in physics -- it is simply due to your coordinate system and point of view. The difference is analogous to the difference between work done by gravity and work done on gravity.
",Nitin,https://math.stackexchange.com/users/217285,http://math.stackexchange.com/questions/1335576/why-is-the-potential-function-defined-differently-in-physics-and-calculus,SCIENCE,math.stackexchange.com,1.0,0.7777777777777778,0.0,1.0,1.0,1.0,0.6666666666666666,0.4444444444444444,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.8888888888888888,1.0,0.6666666666666666,1.0,1.0,0.9333333333333332,0.0,0.0,1.0,0.8888888888888888
40,What is Cold Iron actually?,"It came up in Dresden Files, but is not limited to that game, you can find the term in DnD as well. I would like to know what it means.
If you look for Cold Iron on Wikipedia, you only get iron:
""Cold iron is a poetic and archaic term for iron.""
This would imply everything made mostly from Fe is cold iron. Clearly, this is not the case, in every game Cold Iron is something special, the every day sword is not made out of it.
The Dresden Files rulebook is not very specific about it:
something that anyone could reasonably get access to, but usually doesn’t carry on them (like cold iron) page 185.
What is cold iron?
How do I create cold iron?
How do I get cold iron?
To make the question easier to understand, compare Cold Iron to Holy Water. You know how it is different from usual Water, you know how you get it or create it.
",András,https://rpg.stackexchange.com/users/9552,"Based on the events of Summer Knight cold iron is, in fact, just iron. Dresden
kills Aurora with hundreds of pixies wielding common hobby knives with plastic casings. The book specifically mentions how the cold iron of the knife blades makes the relatively minor cuts deadly to the Summer Lady.
Referencing your quote, how much steel do you have on you right now? Sure, some people still carry a Leatherman or a pocket knife but most don't in the US these days. However, how easy is it to get one? In Summer Knight Dresden specifically mentions needing to
stop at Walmart prior to the climatic battle but not what he needs,
which is saved for the big reveal at the Stone Table.
",HerbN,https://rpg.stackexchange.com/users/14,http://rpg.stackexchange.com/questions/40826/what-is-cold-iron-actually,CULTURE,rpg.stackexchange.com,1.0,0.5555555555555556,0.0,0.6666666666666666,1.0,0.6666666666666666,0.6666666666666666,0.5555555555555556,0.6666666666666666,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.3333333333333333,0.3333333333333333,0.3333333333333333,0.0,0.8888888888888888,1.0,0.6666666666666666,1.0,1.0,0.9333333333333332,0.3333333333333333,0.0,0.6666666666666666,1.0
41,"Relationship between user story, feature, and epic?","As someone whose still new to agile, I'm not sure I completely understand the relationship or difference between a user story, feature, and epic.
According to this question, a feature is a collection of stories. One of the answers suggest that a feature is actually an epic.
So are features and epics considered the same thing, which is basically a collection of related user stories?
Our project manager insists that there's a hierarchical structure:
Epic -> Features -> User stories
... basically all user stories must fall within this structure. Therefore all user stories must fall under an umbrella feature and all features must fall under an epic.
To me, that sounds awkward. Can someone please clarify how user stories, features, and epics are related? Or is there an article that clearly outlines the differences?
",nivlam,https://programmers.stackexchange.com/users/45,"I caution you against applying too rigid a hierarchy to these terms. We tried to do that in my previous job. Twice. Both attempts were different and both times we found we had unnecessarily limited ourselves. The only constant was the definition of a User Story. From a planning perspective, a story is the basic building block of a project. The larger terms (epic, feature, etc.) are effectively just tags. Tags are an easy way to allow a story to exist as part of multiple Epics and multiple Features at the same time. It's not worth the mental effort to be more strict than that.
Tags work for Stack Exchange and they can work for you too.
",Kristo,https://programmers.stackexchange.com/users/1111,http://programmers.stackexchange.com/questions/182158/relationship-between-user-story-feature-and-epic,TECHNOLOGY,programmers.stackexchange.com,0.8888888888888888,0.6666666666666666,0.0,0.3333333333333333,1.0,0.6666666666666666,0.5555555555555556,0.4444444444444444,1.0,0.0,0.0,0.3333333333333333,0.0,0.0,0.3333333333333333,0.3333333333333333,0.0,0.0,0.6666666666666666,0.0,1.0,0.8888888888888888,0.5555555555555556,1.0,1.0,0.8,0.0,0.0,0.3333333333333333,1.0
47,How do different tissue culture matrices affect background in fluorescent microscopy?,"In response to my previous question, I've been reading up a little bit on poly-D-lysine, Collagen I, Collagen IV, laminin, and other tissue culture coatings that promote cell adhesion. I've always assumed that anything other than standard TC-treated plastic or glass would significantly increase background, but perhaps my views on background fluorescence are a little outdated. Does anybody have experience with these in a fluorescent microscopy/high-throughput screening environment?
Specifically in my case, I'm looking at endocytosis and trafficking of a labeled protein into the lysosome. I'm labeling the protein with the pH-dependent dye pHrodoTM from Molecular Probes, which supposedly has very little fluorescence at neutral pH, but becomes very bright as the pH drops when endocytic vesicles become lysosomes. This theoretically means that a final wash step is not needed, but with a matrix coating on the plates I'm worried about background.
So, what is the current thinking as far as background fluorescence of the various TC matrices is concerned? Does the background come from the matrix itself, or by the fluorescent dye becoming adsorbed to it? Is it wavelength-dependent? Fortunately I may not be stuck with my poorly-adhering cells, and I may not need supplemental matrix at all in the end, but I still want a better understanding of how it works.
",MattDMo,https://biology.stackexchange.com/users/948,"Extracellular matrix (ECM) fluoresces, especially Collagen and Laminin. The maximum is in the DAPI and FITC channels and the fluorescence becomes weaker towards longer wavelengths. However, since the coat on the TC flasks is very thin, I would not expect this to be a problem. The best thing is just to try it. There is also a quite famous document available which might be of help:
Autofluorescence: Causes and Cures
",Eekhoorn,https://biology.stackexchange.com/users/852,http://biology.stackexchange.com/questions/8656/how-do-different-tissue-culture-matrices-affect-background-in-fluorescent-micros,SCIENCE,biology.stackexchange.com,1.0,0.6666666666666666,0.0,1.0,1.0,1.0,0.6666666666666666,0.6666666666666666,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0,0.0,1.0,1.0,0.6666666666666666,1.0,1.0,0.8,0.0,0.0,1.0,0.6666666666666666
48,Tips for finding SPAM links injected into the_content,"I'm working on a client's site and I noticed that posts have a hidden <div> filled to SPAM links to dick pills, etc. Hoping to get lucky, I searched for some of the keywords in the database tables, but found no matches. I also searched the code in all the files, and also found no matches.
I know that Wordpress hacks can be very tricky to remove, and they go to great lengths to make them hard to find. But perhaps there are some ""usual suspects"" that I could check, or maybe some tell-tale signs I could look for.
I'm not asking for anyone to solve this specific hack. I'm just looking for advice on where (in general) to look.
In case it's useful, here's the unauthorized <div> which is injected right before the close of the first <\p>:
<div id='hideMeya'> At that requires looking for how you http://www.cialis.com <a href=""http://wwxcashadvancecom.com/"" title=""want $745? visit our site."">want $745? visit our site.</a> sign any of money. Visit our secure bad creditors that cialis levitra sales viagra <a href=""http://www10525.c3viagra10.com/"" title=""viagra australia online"">viagra australia online</a> payday lenders know otherwise. But the black you stay on discount price levitra <a href=""http://www10385.x1cialis10.com/"" title=""what is impotence in men"">what is impotence in men</a> duty to their lives. Citizen at one online or after receiving their research viagra online <a href=""http://www10675.80viagra10.com/"" title=""www.viagra.com"">www.viagra.com</a> to just take just wait until monday. This specifically relates to shop every pay stubs get viagra without prescription <a href=""http://www10154.40cialis10.com/"" title=""cialis overnight delivery"">cialis overnight delivery</a> and only used or faxing required. We will turn double checked by some small business loans viagra for woman <a href=""http://www10077.x1cialis10.com/"" title=""cialis india"">cialis india</a> sites that works the business before approval. Living paycheck went out cash there would generate levitra <a href=""http://www10450.a1viagra10.com/"" title=""viagra cialis levitra"">viagra cialis levitra</a> the scheduled maturity day method. Own a short application on when money also buy cialis online <a href=""http://buy4kamagra.com/"" title=""kamagra"">kamagra</a> plenty of personal initial limits. Even those loans quick because lenders realize http://cialis-ca-online.com <a href=""http://levitra4au.com/"" title=""levitrafroaustraila"">levitrafroaustraila</a> you notice a payday advance. A loan applications are more common thanks http://www.cialis2au.com/ <a href=""http://buy-7cialis.com/"" title=""cialis"">cialis</a> to only apply online website. Third borrowers will use your paycheck to levitra online pharmacy <a href=""http://www10675.30viagra10.com/"" title=""viagra online purchase"">viagra online purchase</a> utilize these individuals can cover. Often there must also referred to ensure online pharmacy viagra usa <a href=""http://www10600.90viagra10.com/"" title=""viagra effectiveness"">viagra effectiveness</a> you with financial expenses. Thanks to checking account also merchant cash loan wwwwviagracom.com <a href=""http://www10075.90viagra10.com/"" title=""levitra viagra cialis"">levitra viagra cialis</a> comparison to state or from there. At that they pay them in mere viagra <a href=""http://www10225.30viagra10.com/"" title=""cheapest generic viagra"">cheapest generic viagra</a> seconds and to comprehend. If a repossession or limited to see if approved www.cashadvances.com | Apply for a cash advance online! <a href=""http://www10385.70cialis10.com/"" title=""cialis dosage"">cialis dosage</a> the risks associated at your current address. Second borrowers should not start and struggle http://www.cashadvance.com <a href=""http://levitra-online-ca.com/"" title=""levitra for sale"">levitra for sale</a> at least a button. Thanks to send the benefits of everyday living cheapest viagra order online <a href=""http://www10462.70cialis10.com/"" title=""tadalafil"">tadalafil</a> from being foreclosed on its benefits. Finally you get help rebuild the original loan buy cialis viagra <a href=""http://viagra5online.com/"" title=""viagra without prescription"">viagra without prescription</a> can really only to surprises. Bank loans out you will take http://wviagracom.com/ <a href=""http://www10539.40cialis10.com/"" title=""erectile dysfunction supplements"">erectile dysfunction supplements</a> the conditions are a. Bills might provide an unexpected car cialis uk suppliers <a href=""http://kamagra-ca-online.com/"" title=""kamagra"">kamagra</a> broke a repayment length. Third borrowers repay because payday industry has the results http://www.buy9levitra.com/ <a href=""http://www10075.20viagra10.com/"" title=""viagra recreational use"">viagra recreational use</a> by the middle man and check process. After verifying your question with dignity and credit cards www.levitra.com <a href=""http://www10300.b2viagra10.com/"" title=""overnight viagra delivery"">overnight viagra delivery</a> or drive to secure loan online. Most people for dollars you between bad and free cialis <a href=""http://viagra7au.com/"" title=""http://viagra7au.com/"">http://viagra7au.com/</a> instead these applicants is available. Social security against your payday the larger sums buying viagra online <a href=""http://payday7online.com/"" title=""direct lenders installment loans no credit check"">direct lenders installment loans no credit check</a> of gossip when working telephone calls. Face it provides hour payday industry levitra online <a href=""http://www10150.30viagra10.com/"" title=""buy viagra now"">buy viagra now</a> has high credit score? Within minutes during your best score range from http://cashadvance8online.com <a href=""http://www10450.60viagra10.com/"" title=""viagra dosage instructions"">viagra dosage instructions</a> fees if there for them most. To avoid paperwork you in crisis arise from wwwpaydayloancom.com | Online Payday Loans application form! <a href=""http://www10225.80viagra10.com/"" title=""super active viagra"">super active viagra</a> online from paying the bank? Funds will know to throwing your cash advance no credit check <a href=""http://orderviagrauaonline.com/"" title=""viagara online"">viagara online</a> finances there that purse. Companies realize that asks for which can become cialis online <a href=""http://www10375.60viagra10.com/"" title=""sublingual viagra"">sublingual viagra</a> eligible to paycheck some lenders. Medical bills that be much easier than actually need only online cash advance <a href=""http://cashadvance8online.com"" title=""online cash advance"">online cash advance</a> your funds via the freedom you out. </div><script type='text/javascript'>if(document.getElementById('hideMeya') != null){document.getElementById('hideMeya').style.visibility = 'hidden';document.getElementById('hideMeya').style.display = 'none';}</script> </p>
",emersonthis,https://wordpress.stackexchange.com/users/11363,"In general, the best place to look is in your theme folders, specifically the main theme and in the index.php file. Then the footer and header files.
Also, check your modified dates and start with the most recently modified. Especially if there are several that were all modified around the same date/time.
I've seen, and had to fix, this problem several times on people's servers. The false data is normally loaded via a script inserted into the php files of the template.
First, you should definitely read over the Wordpress FAQ for dealing with a hacked site.
Common entry points for gaining the ""access"" required to pull the injection off are outdated themes and/or plugins. It's best to run production servers with only the one active theme on the server, as well as removing unused plugins and replacing outdated active plugins.
There are a few scripts out there that you can upload to your server to help you find infected files so you can replace, clean or delete them. (Links listed below)
Again, try looking at file modified dates and check out ones you haven't modified/installed recently yet have a recent date on the.
Look for Bad Guys : I've used this one. It takes a bunch of configuration to root out all the false positives ... but it can at least help you get a general idea of which files to check out.
Wordfence offers the ability to scan your files with their free plugin, I can't personally endorse it though, since I haven't used it myself.
https://www.wordfence.com/docs/how-to-clean-a-hacked-wordpress-site-using-wordfence/
Sucuri is linked on the Wordpress forums frequently and they have a plugin for scanning as well, but once again... haven't personally used it. But I'm a hands-on kind of person.
",Squish,https://wordpress.stackexchange.com/users/28821,http://wordpress.stackexchange.com/questions/134825/tips-for-finding-spam-links-injected-into-the-content,TECHNOLOGY,wordpress.stackexchange.com,0.8333333333333334,0.5,0.0,1.0,0.5,1.0,0.5,0.3333333333333333,0.0,0.0,0.5,0.0,0.0,0.0,0.0,0.5,1.0,1.0,0.5,0.0,0.8333333333333334,1.0,0.6666666666666666,1.0,0.8333333333333334,0.9,0.0,1.0,0.5,0.8333333333333334
50,Photo icons have generic picture only,"When going to a JPG photo folder OS C drive and opening it to view photos in an icon mode the icons only show a generic picture. If I click on the icon then the actual photo comes up. It seems like this problem just started happening. In the past I would open a folder and view icons and the photos were all there to view at once. I don't believe I made any intentinal changes. Thanks for the help.
",Bob,https://superuser.com/users/323210,"I had this problem with my new installation of Windows 8.1.
After I had installed most of the regular Windows applications I was using in Windows 7, I started on a getting-acquainted tour of the new OS and its ""ModernUI""apps.
The generic file type icons displayed in the Photos app really puzzled me, but a lot of Internet searching finally led to the solution: any installation of a recent version of the open source office suite ""LibreOffice"" changes the Windows registry of common graphics file types in an undesirable way, so that they are treated as a document rather than a picture.
There is a very long thread about the problems and solutions at http://answers.microsoft.com/en-us/windows/forum/windows8_1-pictures/windows-81-photos-app-does-not-show-any-photos/5b1740bc-d87d-4d07-afe5-c1a60cdecd55?page=6&msgId=9cdf0542-aab0-4a0b-85aa-e9f6d40b64fc
The simplest solution presented was a registry file which has to be reapplied every time LibreOffice is upgraded. The file must contain the following text and be named with a .reg suffix, then double click on it to load it into the registry. You may have to log out and back in, or even restart the computer--I can't remember which I did to get it to work.
Windows Registry Editor Version 5.00
[HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\KindMap]
"".wpg""=""picture""
"".dxf""=""picture""
"".emf""=""picture""
"".eps""=""picture""
"".met""=""picture""
"".pct""=""picture""
"".wmf""=""picture""
"".bmp""=""picture""
"".cdr""=""picture""
"".cmx""=""picture""
"".gif""=""picture""
"".jpg""=""picture""
"".jpeg""=""picture""
"".jpe""=""picture""
"".pbm""=""picture""
"".pcx""=""picture""
"".pgm""=""picture""
"".png""=""picture""
"".ppm""=""picture""
"".psd""=""picture""
"".ras""=""picture""
"".svg""=""picture""
"".tga""=""picture""
"".tif""=""picture""
"".tiff""=""picture""
"".xbm""=""picture""
"".xpm""=""picture""
"".pcd""=""picture""
I hope this helps, but I am not (yet) sufficiently familiar with Windows internals to explain the problem and the solution presented here.
",Insert Real Name,https://superuser.com/users/12204,http://superuser.com/questions/754061,TECHNOLOGY,superuser.com,1.0,0.5555555555555556,0.0,0.6666666666666666,1.0,0.3333333333333333,0.7777777777777778,0.5555555555555556,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.6666666666666666,0.3333333333333333,0.0,0.0,0.8888888888888888,0.7777777777777778,0.5555555555555556,0.8888888888888888,0.8888888888888888,0.8,0.6666666666666666,0.0,0.3333333333333333,0.7777777777777778
51,More than 2 resistors in parallel,"Most people know the formula for the total resistance of parallel resistors:
\$ \dfrac{1}{R_t} = \dfrac{1}{R_1} + \dfrac{1}{R_2} + {}...{} + \dfrac{1}{R_n} \$
If there are only 2 resistors, that can be easily rearranged to solve for Rt:
\$ {R_t} = \dfrac{(R_1 \cdot R_2)}{(R_1 + R_2)} \$
Is there a safe way to do that for n resistors?
",Bluefire,https://electronics.stackexchange.com/users/15851,"This is not an answer to your question but rather, additional information that may (or may not) be helpful in thinking about this kind of problem.
When I teach introductory circuit classes, I always emphasize the notion of duality which, when mastered, can give you deep insight into many fundamental ""rules"" of circuit analysis.
The idea is that if you know the answer for, say, a series circuit, you can take the dual of the result and get a correct answer for a seemingly very different problem.
So, here is short list of circuit duals:
Voltage - Current
Resistance - Conductance
Inductance - Capacitance
Impedance - Admittance
Series - Parallel
Thevenin - Norton
There are others but these will do most of the time.
Ohm's law is usually written as:
\$V = I R \$
To take the dual, replace all the variables in the above equation with their duals:
The dual of Ohm's Law:
\$I = VG \$
where \$G = \dfrac{1}{R} \$
Recall that for resistors in series, resistances add, so that the equivalent resistance is just the sum.
Consider the dual of this, conductances in parallel.
From the principle of duality, parallel conductances add just as series resistances. So, if you have 3 conductances in parallel, the equivalent conductance is:
\$G_{eq} = G_1 + G_2 + G_3 \$
Now, convert back to resistance:
\$R_{eq} = \dfrac{1}{G_{eq}} = \dfrac{1}{G_1 + G_2 + G_3} = \dfrac{1}{\frac{1}{R_1}+\frac{1}{R_2} +\frac{1}{R_3}}\$
In other words, the equivalent resistance of \$n\$ parallel resistors is the reciprocal of the sum of the reciprocals.
This is the origin of your first formula.
",Alfred Centauri,https://electronics.stackexchange.com/users/10475,http://electronics.stackexchange.com/questions/47185/more-than-2-resistors-in-parallel,TECHNOLOGY,electronics.stackexchange.com,0.6666666666666666,0.3333333333333333,0.0,1.0,1.0,1.0,0.6666666666666666,0.3333333333333333,0.0,0.0,0.0,0.6666666666666666,0.0,0.0,0.0,0.0,0.6666666666666666,0.3333333333333333,0.3333333333333333,0.0,0.8888888888888888,1.0,0.8333333333333334,1.0,0.8333333333333334,0.7,0.3333333333333333,0.3333333333333333,1.0,1.0
52,C# Test Assembly,"I am new to C# and am having a particularly difficult time figuring out how test assemblies work. This is my Solution directory (with a single project MyProject inside of it):
MySolution/MySolutionFile.sln -- My Solution File
MySolution/packages -- My Packages Directory
MySolution/MyProject/Properties -- My Project Properties
MySolution/MyProject/src -- My Source Code
/src/MyClass1.cs
/src/MyClass2.cs
MySolution/MyProject/test -- My NUnit Tests
/test/MyClass1Test.cs
/test/MyClass2Test.cs
MySolution/MyProject/TheMainClass.cs
Now I can run all the tests in the test folder from visual studio. However I want to run the tests from a CI system like TeamCity, in which case I need an assembly path. How do I generate just the test folder into an assembly to be run.
Do I need to add to the main class a method that runs each test in the MySolution/test folder?
",Anil Vaitla,https://stackoverflow.com/users/815977,"I think you need to read about projects and solutions: http://msdn.microsoft.com/en-us/library/ee817674.aspx
And after that it all becomes more clear: Have one solution, inside of that solution create a project for your application and a project for your unit-tests. In test-project add reference to the testing framework of your choice and a reference to your application-project.
This way your application does not know about your tests and compiled into one assembly. At the same time your tests depend on your application, but compiled into another assembly, which can be used by your test-runner GUI/CI or whatever else you use.
And to answer your next question, for test-project you need to choose project type of ""Library"" (console application will work as well if you like)
",trailmax,https://stackoverflow.com/users/809357,http://stackoverflow.com/questions/12379599/c-sharp-test-assembly,STACKOVERFLOW,stackoverflow.com,1.0,0.3333333333333333,0.0,1.0,1.0,1.0,0.6666666666666666,0.3333333333333333,0.0,0.0,1.0,1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.3333333333333333,0.6666666666666666,0.6666666666666666,0.6666666666666666,1.0,0.8,1.0,0.0,1.0,1.0
54,Is there a general term for a single note or a chord?,"Take a passage like this:
Fill in the blank: Each of these boxes denote a ______
Is there a single general term for these that's better than note or chord? Or maybe there's a term for ""anything that has a duration"" that also encompasses rests? It seems like enough of a fundamental concept that it should have a name.
",Trillian,https://music.stackexchange.com/users/16,"For formal, technical purposes (e.g. when discussing musical audiation and other aspects of musical cognition) the terms ""acoustic event"" or ""notated event"" or ""vertical event"" is pretty much standard terminology within psychology of music for referring broadly to any individual single tone or simultaneosly experienced combination of tones (i.e. an individual chord) within a passage or composition. An acoustic event can also include a silence. Really useful all-inclusive terms, (though not yet in musicians' common parlance)!
So, I'd use the term ""notated event"" (or the more generalized ""vertical event"") to fill in the blank in the OP's question.
",RichT,https://music.stackexchange.com/users/13338,http://music.stackexchange.com/questions/17282/is-there-a-general-term-for-a-single-note-or-a-chord,LIFE_ARTS,music.stackexchange.com,0.8888888888888888,0.8888888888888888,0.0,1.0,0.6666666666666666,0.3333333333333333,0.7777777777777778,0.7777777777777778,0.0,0.0,0.3333333333333333,0.0,0.0,0.0,0.3333333333333333,0.3333333333333333,0.0,0.0,0.3333333333333333,0.0,1.0,1.0,0.7777777777777778,0.8888888888888888,1.0,0.9333333333333332,0.0,0.0,0.6666666666666666,1.0
55,How can I login to a website from terminal?,"As a student of computer science, I am now learning to work with command line.
I need to use brute force to login to a website, knowing the username and that the password is an integer in a certain range. This is what I have so far but it doesn't seem to work.
for i in {2000..3000}; do curl http://admin:$i@mywebsite.com/link; done
How can I proceed?
",Nat,https://unix.stackexchange.com/users/103602,"What your routine is lacking is some way to exit once the correct i is found.
For that you can look at the exit code given back from curl:
for i in {2000..3000}
do
curl http://admin:$i@mywebsite.com/link
if [ ""$?"" -eq 0 ]; then
echo Found ""$i""
break
fi
done
The exit value of curl is 0 when everything is correct and can be checked directly after the program stops by inspecting the special variable $?.
Use man curl and search for EXIT CODE to see all the different things curl can tell you with its exit code.
",Anthon,https://unix.stackexchange.com/users/33055,http://unix.stackexchange.com/questions/203827/how-can-i-login-to-a-website-from-terminal,TECHNOLOGY,unix.stackexchange.com,1.0,0.5555555555555556,0.0,0.5,1.0,1.0,0.3333333333333333,0.3333333333333333,0.0,0.0,0.3333333333333333,0.0,0.0,0.0,0.0,0.0,1.0,0.6666666666666666,0.0,0.0,0.8888888888888888,1.0,0.6666666666666666,1.0,1.0,1.0,1.0,0.6666666666666666,0.6666666666666666,0.8888888888888888
56,Export error with addon,"Im trying to export a model for the Euro Truck Simulator 2 game with the Blender2SCS addon and I get the following error, any help?
Traceback (most recent call last):
File ""C:\Program Files\Blender Foundation\Blender\2.72\scripts\addons\io_scene
_scs\__init__.py"", line 1951, in execute
error = export_scs.save(filepath, origin_path, root_object, self.copy_textur
es, int(self.pmg_version))
File ""C:\Program Files\Blender Foundation\Blender\2.72\scripts\addons\io_scene
_scs\export_scs.py"", line 27, in save
status, ob = export_pmd.save(exportpath, originpath, root_ob, copy_textures)
File ""C:\Program Files\Blender Foundation\Blender\2.72\scripts\addons\io_scene
_scs\export_pmd.py"", line 305, in save
pmd.write(f, exportpath, originpath, copy_tex)
File ""C:\Program Files\Blender Foundation\Blender\2.72\scripts\addons\io_scene
_scs\export_pmd.py"", line 267, in write
ret_ob = exp_mat.write(copy_tex)
File ""C:\Program Files\Blender Foundation\Blender\2.72\scripts\addons\io_scene
_scs\export_mat.py"", line 91, in write
for option in self.__options.keys():
AttributeError: 'str' object has no attribute 'keys'
location: <unknown location>:-1
location: <unknown location>:-1
",Pumizo,https://blender.stackexchange.com/users/6311,"The addon seems to be broken due to api changes in Blender, either use the addons bug tracker or try an older version of Blender that is compatible. According to the video tutorials version 2.66 was used.
",stacker,https://blender.stackexchange.com/users/29,http://blender.stackexchange.com/questions/23314/export-error-with-addon,TECHNOLOGY,blender.stackexchange.com,0.8888888888888888,0.3333333333333333,0.0,0.6666666666666666,1.0,1.0,0.5555555555555556,0.5555555555555556,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.3333333333333333,0.3333333333333333,0.0,0.5555555555555556,1.0,0.6666666666666666,1.0,1.0,0.8666666666666666,0.6666666666666666,0.3333333333333333,0.3333333333333333,0.8888888888888888
58,Java final variables changes at the execution time,"I don't know the reason fo that. Maybe you could help me
So code here creating a frame with 8 sliders.
public class MyFrame extends JFrame {
ImagePanel imagePanel;
final int Minimum = 0;
final int Maximum = 10;
final int NumberOfSpheres = 8;
final int NumberOfScales = 10;
MyRandomAccessFile file;
final String[] s = {""Друзья и Окружение"",
""Отношения"", ""Карьера и Бизнес"",
""Финансы"", ""Духовность и Творчество"",
""Личностный Рост"", ""Яркость Жизни"", ""Здоровье и Спорт""};
private final Color[] colors = {Color.RED, Color.ORANGE, Color.YELLOW, Color.GREEN, Color.BLUE, Color.PINK, Color.MAGENTA, Color.DARK_GRAY};
private final int[] array = {1, 0, 0, 0, 0, 0, 0, 0};
public MyFrame () {
setTitle(""Wheel Of Life"");
MySlider[] sliders = new MySlider[NumberOfSpheres];
JButton saveButton = new JButton(""Save"");
MyActionListener listener1 = new MyActionListener();
saveButton.addActionListener(listener1);
file = new MyRandomAccessFile();
//String s3 = ""0 0 0 0 0 0 0 0"";
//array = stringToIntArray(s3);
array[1] = 4;
JLabel[] labels = new JLabel[NumberOfSpheres];
imagePanel = new ImagePanel(colors, array);
System.out.println(array[1]);
JPanel mainPanel = new JPanel();
JPanel[] sliderPanels = new JPanel[NumberOfSpheres];
JPanel mainSliderPanel = new JPanel(new GridLayout(4, 2, 20, 20));
MyChangeListener listener = new MyChangeListener();
for (int i = 0; i < NumberOfSpheres; i++) {
sliders[i] = new MySlider(s[i]);
sliders[i].addChangeListener(listener);
labels[i] = new JLabel(s[i]);
labels[i].setForeground(colors[i]);
labels[i].setFont(new Font(""Droid Sans"", Font.BOLD, 20));
sliderPanels[i] = new JPanel();
sliders[i].setMinimum(Minimum);
sliders[i].setMaximum(Maximum);
System.out.print(array[i]);
sliders[i].setValue(4);
sliders[i].setMajorTickSpacing(1);
sliders[i].setMinorTickSpacing((int) 0.1);
sliders[i].setPaintLabels(true);
sliders[i].setPaintTicks(true);
sliderPanels[i].setLayout(new GridLayout(2, 1, 5, 5));
sliderPanels[i].add(sliders[i]);
sliderPanels[i].add(labels[i]);
mainSliderPanel.add(sliderPanels[i]);
}
mainPanel.setLayout(new BorderLayout());
mainPanel.add(imagePanel, BorderLayout.CENTER);
mainPanel.add(mainSliderPanel, BorderLayout.EAST);
mainPanel.add(saveButton, BorderLayout.SOUTH);
add(mainPanel);
}
And the output is this
4
99999999
How can final variable change its value at the execution time, what the hell???
Actual value of variable is depend on value that I writing at sliders[i].setValue(4); But I don't know how exactly...
And I tried to set a watchpoint for this variable... Doesn't working. One time its zero and next time program stop in this for loop value is 9, 4 or whatever...
",user1685095,https://stackoverflow.com/users/1685095,"Setting final to array does not mean that u cant add or change element value, but it means now you can assign another array reference to this variable. But you can add , update or remove the elements in that array
",zaffargachal,https://stackoverflow.com/users/1227732,http://stackoverflow.com/questions/12806590/java-final-variables-changes-at-the-execution-time,STACKOVERFLOW,stackoverflow.com,0.6666666666666666,0.3333333333333333,0.0,0.0,1.0,0.0,0.5,0.3333333333333333,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,0.0,0.6666666666666666,0.8333333333333334,0.6666666666666666,0.8333333333333334,1.0,0.8,0.6666666666666666,0.0,1.0,0.8333333333333334
59,Difference between busses,"I think I am confusing the difference between some of the of busses, such as IDE, SATA, USB, and PCI. What is the relationship between all four, how are they connected to each other? From what I read it seems like PCI connects them together as well as to the CPU, but it's not clear. Any help would be greatly appreciated. I am cross referencing this post with another I made about the Linux commands to browse them. http://unix.stackexchange.com/questions/27414/ide-and-pci-bus-commands
",rubixibuc,https://electronics.stackexchange.com/users/7163,"The interrelationship of the different busses is roughly as follows:
/ SATA
CPU => Northbridge => PCI Bus => Southbridge => IDE
\ USB
Where the Northbridge and Southbridge are names given to the two main controller chips inside a PC.
IDE and SATA both perform the same job but through different physical media - they are for attaching hard drives etc.
IDE is ""Integrated Device Electronics"" - also known as ""ATA"" or ""ATAPI"" (ATA Peripheral Interface).
SATA is ""Serial ATA"" - the same ATA protocol but serial instead of parallel.
USB is a serial communications bus which can communicate with any number of devices, not just hard drives and other storage devices. It speaks a completely different protocol to the ATA family.
PCI (and the derivatives PCIe, etc) are much closer to the CPU and generally provides much more direct access to the CPU.
Edit:
You can see how everything is connected together in Windows through the Device Manager set to View Devices by Connection:
",Majenko,https://electronics.stackexchange.com/users/4245,http://electronics.stackexchange.com/questions/24077/difference-between-busses,SCIENCE,electronics.stackexchange.com,1.0,0.5555555555555556,0.0,0.6666666666666666,1.0,0.6666666666666666,0.4444444444444444,0.4444444444444444,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.3333333333333333,0.6666666666666666,0.0,0.8888888888888888,1.0,0.6666666666666666,1.0,1.0,1.0,0.0,0.0,0.6666666666666666,0.8888888888888888
60,Flushing coolant after driving only with water in,"If I drive my truck for 30 min with just water in my system, would it be equivalent to just flushing it stationary (the proper way to flush it)? IOW, if I just drive around with a clean water refill in the coolant system instead of keeping the truck in place and then draining, would it be an okay way to flush?
",amphibient,https://mechanics.stackexchange.com/users/2618,"It would make no difference if you are standing still or driving the vehicle. You want to ensure you have the heater wide open when you do it to ensure you are getting the old fluid from the heater core as well as the engine. Driving the vehicle around will probably allow the process to happen a little faster, as you engine will get up to operating temperature faster. The real thing you are trying to accomplish is to get the thermostat open to allow everything to circulate.
",Pᴀᴜʟsᴛᴇʀ2,https://mechanics.stackexchange.com/users/4152,http://mechanics.stackexchange.com/questions/17345/flushing-coolant-after-driving-only-with-water-in,CULTURE,mechanics.stackexchange.com,0.8888888888888888,0.5555555555555556,0.0,1.0,0.3333333333333333,1.0,0.6666666666666666,0.7777777777777778,0.0,0.0,1.0,0.6666666666666666,0.0,0.0,0.0,0.0,0.3333333333333333,0.0,0.3333333333333333,0.0,1.0,1.0,0.6666666666666666,1.0,1.0,0.8666666666666666,0.6666666666666666,0.0,0.6666666666666666,0.8888888888888888
61,How does critical strike chance stack?,"I was wondering how critical strike chance stacks? Say I buy a Phanton Dancer with its 30% crit chance. Now if I buy another Phanton Dancer, what is my critical chance?
",BubBidderskins,https://gaming.stackexchange.com/users/22482,"they stack additively, so just add up the crit chance. 1 PD will increase your total crit chance by +30%. The 2nd PD will increase your total crit chance by +30%, so +60% crit chance total from 2 PD's.
",Masck,https://gaming.stackexchange.com/users/23074,http://gaming.stackexchange.com/questions/60921/how-does-critical-strike-chance-stack,CULTURE,gaming.stackexchange.com,0.7777777777777778,0.5555555555555556,0.0,1.0,1.0,1.0,0.7777777777777778,0.3333333333333333,0.3333333333333333,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.3333333333333333,0.6666666666666666,0.6666666666666666,0.0,0.7777777777777778,1.0,0.6666666666666666,1.0,1.0,1.0,0.3333333333333333,0.3333333333333333,0.6666666666666666,0.8888888888888888
63,Directory.GetFiles string gives an error,"I am creating a website using asp.net and c#
I need to access all files one by one in a directory. So I wrote the following code.
string[] ssImgs = Directory.GetFiles(""images/movies"");
Label1.Text = ssImgs[0];
Label2.Text = ssImgs[1];
But it gives an error. What is the correct way to do this? Thanks in advance.
",LIH,https://stackoverflow.com/users/2436770,"There are two potential issues here.
If you doing have using System.IO; in your file, the Directory class will not be found by the compiler.
The way you're specifying your path (""images/movies"") will be relative to the current working directory, which is likely not the proper directory. You should use Server.MapPath or the Path class to build a full path to the proper folder, so you can specify the correct folder with a full, absolute path.
",Reed Copsey,https://stackoverflow.com/users/65358,http://stackoverflow.com/questions/18471710/directory-getfiles-string-gives-an-error,STACKOVERFLOW,stackoverflow.com,0.8888888888888888,0.3333333333333333,0.0,1.0,0.6666666666666666,1.0,0.4444444444444444,0.3333333333333333,0.0,0.0,0.6666666666666666,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.6666666666666666,0.0,0.7777777777777778,1.0,0.6666666666666666,1.0,1.0,0.9,0.6666666666666666,0.0,0.6666666666666666,1.0
64,Discontinuity at the edge of Chebychev window,"I am using Chebychev window for its narrow main lobe. The problem of chebychev window is that it has discontinuities at the edge, and it seems that Taylor window solves this issue.
More detail:
http://de.mathworks.com/help/signal/ref/taylorwin.html
http://en.wikipedia.org/wiki/Window_function#Dolph.E2.80.93Chebyshev_window
I've searched around but I can't find any information on how to implement a Taylor window. Any information on taylor window or suggestions on fixing this issue of edge discontinuities would be very appreciated.
",Thinium,https://dsp.stackexchange.com/users/16093,"A little Googling came up with this reference, which indicates that the impulse response for a Taylor window is:
$$
h[n] = 1 + 2 \sum_{m=1}^{\tilde{n}-1} F_m \cos\left(\frac{2\pi m}{N} \left(n-\frac{N}{2}+\frac{1}{2}\right)\right)
$$
$\tilde{n}$ is a parameter for controlling how many equal-height sidelobes there are in the window. $F_m$ is a parameter that is related to the maximum sidelobe height; the references given in Matt L's answer give more detail on how it is calculated.
",Jason R,https://dsp.stackexchange.com/users/90,http://dsp.stackexchange.com/questions/23890/discontinuity-at-the-edge-of-chebychev-window,TECHNOLOGY,dsp.stackexchange.com,0.6666666666666666,0.3333333333333333,0.0,0.5,1.0,1.0,0.6666666666666666,0.3333333333333333,0.3333333333333333,0.0,0.3333333333333333,0.0,0.0,0.0,0.0,0.0,0.6666666666666666,0.0,0.6666666666666666,0.0,0.7777777777777778,1.0,0.5555555555555556,1.0,0.8888888888888888,0.7333333333333333,0.3333333333333333,0.0,0.6666666666666666,1.0
65,Silverlight: Returning value from synchronous method using Rx,"I am writing a simple Silverlight application and WCF Service.
I want to create a synchronous method that return a value.
The method itself, call an asynchronous method from WCF Services. After I call asynchronous method, I want to get it value, and return to sender.
I hear that Rx can solve this kind of problem.
This is my code :
private void btnCreate_Click(object sender, RoutedEventArgs e)
{
string myResult = getMyBook(txtBookName.Text);
MessageBox.Show(""Result\n"" + myResult);
// myResult will be use for another purpose here..
}
// I want this method can be called anywhere, as long as the caller still in the same namespace.
public string getMyBook(string bookName)
{
Servo.ServoClient svc = new ServoClient();
string returnValue = """";
var o = Observable.FromEventPattern<GetBookCompletedEventArgs>(svc, ""GetBookCompleted"");
o.Subscribe(
b => returnValue = b.EventArgs.Result
);
svc.GetBookAsync(bookName);
return returnValue;
}
When I click btnCreate, myResult variable still empty. Is that something wrong with my code? Or maybe I am just don't understand with Rx concept? I am new to Rx.
My goal is : I need to get the result (myResult variable) from asynchronous method, and then used in later code.
",Trom,https://stackoverflow.com/users/1219992,"Remember that GetBookAsync returns immediately, and will return the value stored in returnvalue. When the data arrives returnvalue will be out of scope, and by then btnCreate will have finished.
U could use await on the GetBookAsync, so that it will wait for the data to arrive before continuing. Don't forget that would mean u also need the async on the method.
Not a great example or use of either RX or await, but trying is how we learn!
",codeputer,https://stackoverflow.com/users/176338,http://stackoverflow.com/questions/15583781/silverlight-returning-value-from-synchronous-method-using-rx,STACKOVERFLOW,stackoverflow.com,0.8888888888888888,0.3333333333333333,0.0,1.0,0.3333333333333333,1.0,0.5555555555555556,0.3333333333333333,0.3333333333333333,0.0,0.6666666666666666,0.3333333333333333,0.0,0.0,0.0,0.0,0.6666666666666666,0.0,0.0,0.0,0.4444444444444444,1.0,0.6666666666666666,1.0,1.0,0.8666666666666666,0.6666666666666666,0.0,0.3333333333333333,1.0
68,How to grep words in a file?,"
Possible Duplicate:
Regex for “or” in grep
How can I grep for lines with either 'disable' or 'enable' in my file?
I tried
$ grep ""disable|enable"" fail.log
but that shows up nothing.
",michael,https://superuser.com/users/31500,"Try ""grep ""disable\|enable"" fail.log
The backslash turns the | into the ""or"" metacharacter, otherwise you are grepping for the literal 'disable|enable""
",bdk,https://superuser.com/users/9262,http://superuser.com/questions/436426,TECHNOLOGY,superuser.com,0.8888888888888888,0.6666666666666666,0.0,0.3333333333333333,1.0,1.0,0.6666666666666666,0.4444444444444444,0.0,0.0,0.3333333333333333,0.0,0.0,0.0,0.0,0.0,1.0,0.6666666666666666,0.3333333333333333,0.0,0.7777777777777778,0.8888888888888888,0.6666666666666666,1.0,1.0,0.8666666666666666,1.0,0.3333333333333333,1.0,0.8888888888888888
71,Get term weight in node object for ordering with EntityFieldQuery,"Is it posiible to expose taxonomy term weight in node object ?
I need to sort nodes using EntityFieldQuery by term reference field, but weight is not exposed to node object so i can say for example
->fieldOrderBy('field_custom_terms', 'weight', 'ASC');
and sort nodes by weight of terms.
",NenadP,https://drupal.stackexchange.com/users/5970,"No, this is not possible. You can only order on actual field values, the term weight would require a JOIN to a non-field table and EFQ is not capable of doing that.
You will have to create a manual SQL query to do this. Note that Drupal by default maintains the node <-> term associations in the taxonomy_index table, that one is easier and more reliable to query than a field data table.
",Berdir,https://drupal.stackexchange.com/users/31,http://drupal.stackexchange.com/questions/40815/get-term-weight-in-node-object-for-ordering-with-entityfieldquery,TECHNOLOGY,drupal.stackexchange.com,1.0,0.6666666666666666,0.0,1.0,1.0,1.0,0.6666666666666666,0.3333333333333333,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.6666666666666666,1.0,0.3333333333333333,1.0,1.0,0.4,1.0,0.0,1.0,1.0
72,Why is my CV 145% complete?,"On Careers, when filling out my CV, it is listed as being 145% complete. Could someone tell me why this is?
",Matthew Jones,https://meta.stackexchange.com/users/133611,"145% means that you're 45% better than the average programmer. Congratulations.
",Lance Roberts,https://meta.stackexchange.com/users/13295,http://meta.stackexchange.com/questions/24964/why-is-my-cv-145-complete,TECHNOLOGY,meta.stackexchange.com,1.0,0.5555555555555556,0.0,1.0,1.0,1.0,0.6666666666666666,0.6666666666666666,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,1.0,0.3333333333333333,0.3333333333333333,0.3333333333333333,0.4444444444444444,0.2,0.0,0.0,1.0,0.7777777777777778
74,Symfony 2 and Twig custom error page,"I give up.
The problem is with the overrided Twig 404 template. It is overrided in a default way by creating an error404.html.twig under the /app/Resources/TwigBundle/views directory.
The template itself does not contain any irregular or complex logic: just a layout with some translated text (|trans) and a menu with several links.
The problem is, that I can't get app.user object (returned as NULL) or current app.request.locale (always returned as default locale) inside this template.
I have even tried to override the twig exception controller and dump a current locale (Request::getLocale()) or get user - the results are the same - default locale and NULL for user.
Then I decided to dig deeper and found a dozens of listeners (locale listeners, exception liteners, ...) and tried to debug/fix/test there, but I didn't proceed any further.
By the way, I have overrided the 500 error page too, and everything is fine there. Well I guess that when 500 error (exception) is thrown, the symfony has already set up user/locale/etc, because it already has got to the target (controller/action) and other listeners been already executed. But 404 error (NotFoundHttpException) is being thrown BEFORE Symfony targets the action...
Some words about project: symfony 2.4.8, doctrine, Gedmo extensions/stof bundle, JMS i18n routing bundle.
Symfony version: v2.4.8
JMS I18n Routing bundle: 1.1.1
Appreciate your help.
",Dmitry,https://stackoverflow.com/users/629899,"That's the way I do it (at least for 404 templates):
In app/config/routing.yml append:
#always in last position
#------------>
nonexistent_route:
path: /{url}
defaults: { _controller: ACMEDemoBundle:Default:wrongRoute}
requirements:
url: "".+""
#<-----------
The Controller:
namespace ACME\DemoBundle\Controller;
use Symfony\Bundle\FrameworkBundle\Controller\Controller;
use Symfony\Component\HttpKernel\Exception\NotFoundHttpException;
class DefaultController extends Controller
{
...
public function wrongRouteAction($url)
{
$user = $this->get('security.context')->getToken()->getUser();
return $this->render('TwigBundle:Exception:error404.html.twig', array(""user"" => $user, ""url"" => $url));
}
}
And in your twig template app/Resources/TwigBundle/views/Exception/error404.html.twig you can access {{ user }}
",devilcius,https://stackoverflow.com/users/779388,http://stackoverflow.com/questions/27276778/symfony-2-and-twig-custom-error-page,STACKOVERFLOW,stackoverflow.com,0.8888888888888888,0.3333333333333333,0.0,0.5,1.0,1.0,0.3333333333333333,0.3333333333333333,0.0,0.0,0.3333333333333333,0.0,0.0,0.0,0.0,0.0,1.0,0.6666666666666666,0.6666666666666666,0.0,0.8888888888888888,0.8888888888888888,0.6666666666666666,1.0,1.0,0.9333333333333332,1.0,0.3333333333333333,0.0,0.8888888888888888
75,Thunderbird 2.0: Inbox size is 4GB on disk: how do I reduce it?,"Mozilla Thunderbird 2.0: I have set Thunderbird never to delete a message that is on disk...Thus, after four short years, I have a 4GB Inbox file. Thunderbird needs about 10 minutes to read it, and even then I can't compact it. Anyone have some suggestions?
",Eric,https://superuser.com/users/15673,"There is definitely a 4GB limit on Windows due to Windows limitations which means you will have problem with individual Thunderbird mail folders that are larger than 4GB.
And I thought the 4GB limit existed on Mac and Linux as well (so I am curious as to how emgee can have a Thunderbird folder that's 7GB! emgee: perhaps you are referring to a Unified Folder being 7GB but your individual folders are < 4GB?)
Nick's suggestion (i.e. move to new, multiple Thunderbird folders each of which is <4GB. please clarify emgee) should work. More info with a complete procedure (change ""Sent"" to ""Inbox""):
http://getsatisfaction.com/mozilla_messaging/topics/version_3_1_2_still_has_the_missing_sent_message_bug#reply_3235232
",Roland Tanglao,https://superuser.com/users/9764,http://superuser.com/questions/198503,TECHNOLOGY,superuser.com,0.7777777777777778,0.7777777777777778,0.3333333333333333,0.6666666666666666,1.0,0.3333333333333333,0.6666666666666666,0.4444444444444444,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.6666666666666666,0.3333333333333333,0.0,0.0,1.0,1.0,0.7777777777777778,1.0,1.0,0.8666666666666666,0.6666666666666666,0.3333333333333333,0.6666666666666666,1.0
================================================
FILE: input/qa_stackexchange_cleaned.csv
================================================
id,host,question_username,question_score,question_views,question_favs,answers_count,answers_max_score,answers_mean_score,question_title,question_body,answer_username,answer,answer_score,is_answer_accepted
1,3dprinting.stackexchange.com,Adam Davis,3.767809650974757,3.912607320579891,0.0,3.1880595895805834,3.749318730431847,3.4319460411998994,How to obtain high resolution prints in a shorter period of time?,"When I've printed an object I've had to choose between high resolution and quick prints. What techniques or technologies can I use or deploy to speed up my high resolution prints?
",hroncok,"You could experiment with slicing. For example, you might not need high resolution all over the object, but you can speed up some straight parts by using greater layer high there. See a part of Slic3r manual about such thing.
It is also possible to print thicker infill every Nth layer, see Infill optimization in Slic3r.
Other slicers might have those features as well.
",3.2133663407215876,2.0769199823829045
1,3dprinting.stackexchange.com,Adam Davis,3.767809650974757,3.912607320579891,0.0,3.1880595895805834,3.749318730431847,3.4319460411998994,How to obtain high resolution prints in a shorter period of time?,"When I've printed an object I've had to choose between high resolution and quick prints. What techniques or technologies can I use or deploy to speed up my high resolution prints?
",plaintoothpaste,"For FDM technologies in general with a single extruder, slicing modifications is your only options. However there will be a trade off between quality and speed.
For ABS, changing to a machine with a enclosed build (such as a zortrax) chamber may help and a heated build chamber (Stratasys machine) will help the quality and reliability but not the print speed directly. As ABS has a tendency to warp vase mode is not the best idea either.
If you only need high resolution and not strength then reducing the infill percentage or even using vase mode will speed up the print. Also changing to a material that you can print at higher speeds like PLA will magnify any of the previous settings.
If you have two extruders then changing to a wider nozzle and using that for infill may speed up the print, heating and cooling time during extruder changeover may actually make it slower.
For other technologies there are lots of options digital light projection (DLP) and stereolithography (SLA) both provide significantly higher resolution then FDM, with DLP being the faster of the two, comparison. Take the review with a grain of salt though as the technologies are far from standardised, for example out DLP at work offers far higher resolution and speed then our SLA which is quite old.
Sintering or melting technologies can scan the outline every layer then perform a infill of multiple layers at once to speed up the process.
",1.6066831703607938,0.0
4,3dprinting.stackexchange.com,Adam Davis,4.541637296610063,3.9215535641013384,2.7799718631987322,4.670422881206417,3.320478682445624,3.4319460411998994,Are there any metals that exhibit a large glass state?,"Plastic is used in 3D FDM/FFF printing partly because it had a wide temperature range for its glass state - where it can be flowed with some force, but won't flow due only to gravity.
Most metals have a very narrow, or non-existant, glass state. They transition from solid to liquid with almost no flowable-but-not-liquid state.
Are there any metals or alloys that display a glass transition state?
",TextGeek,"I""m no expert on this, but the article at https://en.wikipedia.org/wiki/Amorphous_metal may be relevant for you.
There are some special alloys, such as gold/silicon and various titanium-based ones, that become ""bulk metal glasses"" if cooled extremely quickly (for example, by sputtering onto a spinning cold surface). The speed of cooling prevents crystal formation. Early BMGs were quite strong but brittle; improvements have reduced brittleness and required cooling speed.
",2.620387387103937,0.0
4,3dprinting.stackexchange.com,Adam Davis,4.541637296610063,3.9215535641013384,2.7799718631987322,4.670422881206417,3.320478682445624,3.4319460411998994,Are there any metals that exhibit a large glass state?,"Plastic is used in 3D FDM/FFF printing partly because it had a wide temperature range for its glass state - where it can be flowed with some force, but won't flow due only to gravity.
Most metals have a very narrow, or non-existant, glass state. They transition from solid to liquid with almost no flowable-but-not-liquid state.
Are there any metals or alloys that display a glass transition state?
",Ryan Carlyle,"A few things are required for effective extrusion-style 3d printing materials:
It must stay where placed by the nozzle long enough to harden (or, alternately for pastes and such, have a shear-thinning or thixotropic viscous profile so it will not flow under its own weight).
If using a filament extruder, it must have a wide range of viscosity that varies gradually over a considerable temperature range. This is necessary to develop the proper ""cap zone"" semi-melt shearing behavior that allows the incoming filament to act like a piston and generate pressure upstream of the nozzle. Pellet extruders have a similar requirement but related to screw/wall shearing rather than filament/wall shearing. If using neither filament nor pellets, such as clay printers, the material must be pumpable by a positive-displacement pump. (It is possible to pump molten metal, but the cost is quite high.)
It must form some kind of bond with previously-deposited solid material, without needing to be in a state that will rapidly flow and lose shape.
It must have some combination of low shrinkage, the ability to creep at the printer's ambient temp, and/or low stiffness that allows consecutive layers to be stacked without an unacceptable amount of warping.
Liquid metals tend to have a conflict between ""Staying where you put it"" and ""bonding with the previous layer."" In order for deposited metal to fully bond, the interface material needs to reach the melting point so a true fusion weld occurs. And in order to supply enough heat to remelt the interface without an additional heat source like an arc, the deposited molten metal needs to be very hot. So it will tend to run while it cools. High density and high heat capacity makes it run fast and cool slowly.
Pretty much every DIY metal 3d print (such as made by wire-feed MIG welders) ends up looking something like this:
https://3dprint.com/29944/diy-metal-printing-garage/
In comparison, polymers have long molecular chains that allow them to ""diffusion weld"" and adhere WITHOUT fully remelting the interface. Molten liquid plastic will stick to solid plastic quite effectively. The interface only needs to get hot enough for appreciable diffusion to intertwine the molecular chains. This will occur between the glass point and melting point, without true fusion occurring. So you can print molten plastic at a temperature where it will stay in place long enough to harden, and still get good bonding.
Metals also tend to be very stiff, which encourages warping. It is difficult to build a heated environment of sufficient temperature to properly stress-relieve the thermal contraction stress as the print progresses, whereas with plastic a heated build plate and warm enclosure can permit warping stresses to start relaxing as the print progresses.
It is possible to ""FDM-style"" 3d print filament/wire made of metal alloys that have a wide range between solidus and liquidus. It has been done using solder and similar alloys. However, between the warping stresses, poor layer bonding from inadequate interface re-melting, and use of soft low-melting alloys, the resulting printed parts will usually end up being weaker than if they had simply been printed in a strong plastic. For example, PEEK is nearly as strong as aluminum, and carbon fiber or fiberglass composite plastics can exceed metals on various performance metrics. So what's the point of printing with weak, brittle metal alloys?
Over the years, lots of people have tried FDM-style metal printing, but no one has found it worthwhile to pursue in the long run. More typical DIY metal printing approaches like 3D MIG welding following by cleanup machining will produce better results.
",2.845827522384412,2.0769199823829045
2,3dprinting.stackexchange.com,kenorb,5.291027456384213,5.52632428328061,3.507933925167123,5.199501240805782,5.195138047661548,4.521992869097919,Is 3D printing safe for your health?,"I would like to buy a 3D printer, but I'm concerned about the health risks that are associated with its operation. Some groups of scientists say it can be harmful for humans.
What do I need to consider before buying a 3D printer if I care about my health? Are there any safe printers?
",Tom van der Zanden,"There is very little information about safety available, as home 3D printers are relatively new. However, plastics such as ABS have a long history in making plastic products, and a study found that at traditional manufacturing methods (such as injection molding and hot wire cutting) do not release dangerous levels of carcinogens and/or respirator
gitextract_liiwo2a9/
├── .gitignore
├── README.md
├── bash/
│ ├── blending_n_postprocessing.sh
│ ├── download_all_model_ckpts_for_inference.sh
│ ├── download_comp_data.sh
│ ├── inference/
│ │ ├── model1_inference.sh
│ │ ├── model2_inference.sh
│ │ ├── model3_inference.sh
│ │ ├── model4_inference.sh
│ │ └── run_inference.sh
│ ├── pseudo/
│ │ ├── create_all_pseudo_labels.sh
│ │ ├── create_all_pseudo_labels_toy.sh
│ │ ├── create_pseudo_base.sh
│ │ ├── create_pseudo_base_pretrained.sh
│ │ ├── create_pseudo_large.sh
│ │ ├── train_base.sh
│ │ ├── train_base_pretrained.sh
│ │ └── train_large.sh
│ ├── setup.sh
│ └── training/
│ ├── load_roberta_weights.sh
│ ├── train1a_prepare_stackx_data.sh
│ ├── train1b_train_bert_stackx_lang_model.sh
│ ├── train2_pseudo_labels.sh
│ ├── train3_bert_base_cased_stackx_pretrained.sh
│ ├── train4_bert_base_cased_stackx_with_pseudo_labels.sh
│ ├── train5_roberta_with_pseudo_labels.sh
│ └── train6_bart_with_pseudo_labels.sh
├── experiments/
│ ├── 1-8-5-head_tail-pseudonoleakrandom100k-1e-05-210-260-500-26-200/
│ │ ├── command
│ │ ├── commit_hash
│ │ └── config.json
│ ├── 2-4-5-head_tail-pseudonoleakrandom100k-1e-05-210-260-500-26-roberta-200/
│ │ ├── command
│ │ ├── commit_hash
│ │ └── config.json
│ ├── 2-4-roberta-base-saved-5-head_tail-roberta-stackx-base-v2-pl1kksample20k-1e-05-210-260-500-26-roberta-200/
│ │ ├── config.json
│ │ └── config_train.json
│ └── 4-2-5-head_tail-bart-2e-05-210-260-500-26-split_pseudo-250/
│ ├── command
│ ├── commit_hash
│ └── config.json
├── input/
│ ├── google-quest-challenge/
│ │ ├── sample_submission_toy.csv
│ │ ├── test_toy.csv
│ │ └── train_toy.csv
│ ├── qa_stackexchange_cleaned.csv
│ ├── qa_stackexchange_cleaned_toy.csv
│ └── stackx-base-cased/
│ ├── config.json
│ ├── stackx-base-cased-config.json
│ ├── stackx-base-cased-vocab.txt
│ ├── training_log.csv
│ └── vocab.txt
├── packages/
│ ├── fairseq-hacked/
│ │ ├── .gitignore
│ │ ├── CODE_OF_CONDUCT.md
│ │ ├── CONTRIBUTING.md
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── docs/
│ │ │ ├── Makefile
│ │ │ ├── _static/
│ │ │ │ └── theme_overrides.css
│ │ │ ├── command_line_tools.rst
│ │ │ ├── conf.py
│ │ │ ├── criterions.rst
│ │ │ ├── data.rst
│ │ │ ├── docutils.conf
│ │ │ ├── getting_started.rst
│ │ │ ├── index.rst
│ │ │ ├── lr_scheduler.rst
│ │ │ ├── make.bat
│ │ │ ├── models.rst
│ │ │ ├── modules.rst
│ │ │ ├── optim.rst
│ │ │ ├── overview.rst
│ │ │ ├── requirements.txt
│ │ │ ├── tasks.rst
│ │ │ ├── tutorial_classifying_names.rst
│ │ │ └── tutorial_simple_lstm.rst
│ │ ├── eval_lm.py
│ │ ├── examples/
│ │ │ ├── .gitignore
│ │ │ ├── __init__.py
│ │ │ ├── backtranslation/
│ │ │ │ └── README.md
│ │ │ ├── bart/
│ │ │ │ ├── README.cnn.md
│ │ │ │ ├── README.glue.md
│ │ │ │ └── README.md
│ │ │ ├── camembert/
│ │ │ │ └── README.md
│ │ │ ├── conv_seq2seq/
│ │ │ │ └── README.md
│ │ │ ├── cross_lingual_language_model/
│ │ │ │ └── README.md
│ │ │ ├── joint_alignment_translation/
│ │ │ │ ├── README.md
│ │ │ │ └── prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh
│ │ │ ├── language_model/
│ │ │ │ ├── README.md
│ │ │ │ ├── conv_lm/
│ │ │ │ │ └── README.md
│ │ │ │ ├── prepare-wikitext-103.sh
│ │ │ │ └── transformer_lm/
│ │ │ │ └── README.md
│ │ │ ├── layerdrop/
│ │ │ │ └── README.md
│ │ │ ├── noisychannel/
│ │ │ │ ├── README.md
│ │ │ │ ├── __init__.py
│ │ │ │ ├── rerank.py
│ │ │ │ ├── rerank_generate.py
│ │ │ │ ├── rerank_options.py
│ │ │ │ ├── rerank_score_bw.py
│ │ │ │ ├── rerank_score_lm.py
│ │ │ │ ├── rerank_tune.py
│ │ │ │ └── rerank_utils.py
│ │ │ ├── nonautoregressive_translation/
│ │ │ │ ├── README.md
│ │ │ │ └── scripts.md
│ │ │ ├── pay_less_attention_paper/
│ │ │ │ └── README.md
│ │ │ ├── roberta/
│ │ │ │ ├── README.custom_classification.md
│ │ │ │ ├── README.glue.md
│ │ │ │ ├── README.md
│ │ │ │ ├── README.pretraining.md
│ │ │ │ ├── README.race.md
│ │ │ │ ├── commonsense_qa/
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── commonsense_qa_task.py
│ │ │ │ │ └── download_cqa_data.sh
│ │ │ │ ├── multiprocessing_bpe_encoder.py
│ │ │ │ ├── preprocess_GLUE_tasks.sh
│ │ │ │ ├── preprocess_RACE.py
│ │ │ │ ├── preprocess_RACE.sh
│ │ │ │ └── wsc/
│ │ │ │ ├── README.md
│ │ │ │ ├── __init__.py
│ │ │ │ ├── wsc_criterion.py
│ │ │ │ ├── wsc_task.py
│ │ │ │ └── wsc_utils.py
│ │ │ ├── scaling_nmt/
│ │ │ │ └── README.md
│ │ │ ├── speech_recognition/
│ │ │ │ ├── README.md
│ │ │ │ ├── __init__.py
│ │ │ │ ├── criterions/
│ │ │ │ │ ├── ASG_loss.py
│ │ │ │ │ ├── CTC_loss.py
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── cross_entropy_acc.py
│ │ │ │ ├── datasets/
│ │ │ │ │ ├── asr_prep_json.py
│ │ │ │ │ └── prepare-librispeech.sh
│ │ │ │ ├── infer.py
│ │ │ │ ├── models/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── vggtransformer.py
│ │ │ │ │ └── w2l_conv_glu_enc.py
│ │ │ │ ├── tasks/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── speech_recognition.py
│ │ │ │ ├── utils/
│ │ │ │ │ └── wer_utils.py
│ │ │ │ └── w2l_decoder.py
│ │ │ ├── stories/
│ │ │ │ └── README.md
│ │ │ ├── translation/
│ │ │ │ ├── README.md
│ │ │ │ ├── prepare-iwslt14.sh
│ │ │ │ ├── prepare-iwslt17-multilingual.sh
│ │ │ │ ├── prepare-wmt14en2de.sh
│ │ │ │ └── prepare-wmt14en2fr.sh
│ │ │ ├── translation_moe/
│ │ │ │ ├── README.md
│ │ │ │ └── score.py
│ │ │ ├── wav2vec/
│ │ │ │ └── README.md
│ │ │ ├── wmt19/
│ │ │ │ └── README.md
│ │ │ └── xlmr/
│ │ │ └── README.md
│ │ ├── fairseq/
│ │ │ ├── __init__.py
│ │ │ ├── binarizer.py
│ │ │ ├── bleu.py
│ │ │ ├── checkpoint_utils.py
│ │ │ ├── clib/
│ │ │ │ ├── libbleu/
│ │ │ │ │ ├── libbleu.cpp
│ │ │ │ │ └── module.cpp
│ │ │ │ └── libnat/
│ │ │ │ └── edit_dist.cpp
│ │ │ ├── criterions/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── adaptive_loss.py
│ │ │ │ ├── binary_cross_entropy.py
│ │ │ │ ├── composite_loss.py
│ │ │ │ ├── cross_entropy.py
│ │ │ │ ├── fairseq_criterion.py
│ │ │ │ ├── label_smoothed_cross_entropy.py
│ │ │ │ ├── label_smoothed_cross_entropy_with_alignment.py
│ │ │ │ ├── legacy_masked_lm.py
│ │ │ │ ├── masked_lm.py
│ │ │ │ ├── nat_loss.py
│ │ │ │ ├── sentence_prediction.py
│ │ │ │ └── sentence_ranking.py
│ │ │ ├── distributed_utils.py
│ │ │ ├── file_utils.py
│ │ │ ├── hub_utils.py
│ │ │ ├── iterative_refinement_generator.py
│ │ │ ├── legacy_distributed_data_parallel.py
│ │ │ ├── meters.py
│ │ │ ├── models/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── bart/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── hub_interface.py
│ │ │ │ │ └── model.py
│ │ │ │ ├── cmlm_transformer.py
│ │ │ │ ├── composite_encoder.py
│ │ │ │ ├── distributed_fairseq_model.py
│ │ │ │ ├── fairseq_decoder.py
│ │ │ │ ├── fairseq_encoder.py
│ │ │ │ ├── fairseq_incremental_decoder.py
│ │ │ │ ├── fairseq_model.py
│ │ │ │ ├── fconv.py
│ │ │ │ ├── fconv_lm.py
│ │ │ │ ├── fconv_self_att.py
│ │ │ │ ├── insertion_transformer.py
│ │ │ │ ├── iterative_nonautoregressive_transformer.py
│ │ │ │ ├── levenshtein_transformer.py
│ │ │ │ ├── lightconv.py
│ │ │ │ ├── lightconv_lm.py
│ │ │ │ ├── lstm.py
│ │ │ │ ├── masked_lm.py
│ │ │ │ ├── model_utils.py
│ │ │ │ ├── multilingual_transformer.py
│ │ │ │ ├── nonautoregressive_ensembles.py
│ │ │ │ ├── nonautoregressive_transformer.py
│ │ │ │ ├── roberta/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── alignment_utils.py
│ │ │ │ │ ├── hub_interface.py
│ │ │ │ │ └── model.py
│ │ │ │ ├── transformer.py
│ │ │ │ ├── transformer_from_pretrained_xlm.py
│ │ │ │ ├── transformer_lm.py
│ │ │ │ └── wav2vec.py
│ │ │ ├── modules/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── adaptive_input.py
│ │ │ │ ├── adaptive_softmax.py
│ │ │ │ ├── beamable_mm.py
│ │ │ │ ├── character_token_embedder.py
│ │ │ │ ├── conv_tbc.py
│ │ │ │ ├── cuda_utils.cu
│ │ │ │ ├── downsampled_multihead_attention.py
│ │ │ │ ├── dynamic_convolution.py
│ │ │ │ ├── dynamicconv_layer/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── cuda_function_gen.py
│ │ │ │ │ ├── dynamicconv_cuda.cpp
│ │ │ │ │ ├── dynamicconv_cuda.cuh
│ │ │ │ │ ├── dynamicconv_cuda_kernel.cu
│ │ │ │ │ ├── dynamicconv_layer.py
│ │ │ │ │ ├── dynamiconv_cpu.cpp
│ │ │ │ │ └── setup.py
│ │ │ │ ├── gelu.py
│ │ │ │ ├── grad_multiply.py
│ │ │ │ ├── highway.py
│ │ │ │ ├── layer_norm.py
│ │ │ │ ├── learned_positional_embedding.py
│ │ │ │ ├── lightconv_layer/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── cuda_function_gen.py
│ │ │ │ │ ├── lightconv_cuda.cpp
│ │ │ │ │ ├── lightconv_cuda.cuh
│ │ │ │ │ ├── lightconv_cuda_kernel.cu
│ │ │ │ │ ├── lightconv_layer.py
│ │ │ │ │ └── setup.py
│ │ │ │ ├── lightweight_convolution.py
│ │ │ │ ├── linearized_convolution.py
│ │ │ │ ├── logsumexp_moe.py
│ │ │ │ ├── mean_pool_gating_network.py
│ │ │ │ ├── multihead_attention.py
│ │ │ │ ├── positional_embedding.py
│ │ │ │ ├── scalar_bias.py
│ │ │ │ ├── sinusoidal_positional_embedding.py
│ │ │ │ ├── sparse_multihead_attention.py
│ │ │ │ ├── sparse_transformer_sentence_encoder.py
│ │ │ │ ├── sparse_transformer_sentence_encoder_layer.py
│ │ │ │ ├── transformer_layer.py
│ │ │ │ ├── transformer_sentence_encoder.py
│ │ │ │ ├── transformer_sentence_encoder_layer.py
│ │ │ │ ├── unfold.py
│ │ │ │ └── vggblock.py
│ │ │ ├── optim/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── adadelta.py
│ │ │ │ ├── adafactor.py
│ │ │ │ ├── adagrad.py
│ │ │ │ ├── adam.py
│ │ │ │ ├── adamax.py
│ │ │ │ ├── bmuf.py
│ │ │ │ ├── fairseq_optimizer.py
│ │ │ │ ├── fp16_optimizer.py
│ │ │ │ ├── lr_scheduler/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── cosine_lr_scheduler.py
│ │ │ │ │ ├── fairseq_lr_scheduler.py
│ │ │ │ │ ├── fixed_schedule.py
│ │ │ │ │ ├── inverse_square_root_schedule.py
│ │ │ │ │ ├── polynomial_decay_schedule.py
│ │ │ │ │ ├── reduce_lr_on_plateau.py
│ │ │ │ │ ├── tri_stage_lr_scheduler.py
│ │ │ │ │ └── triangular_lr_scheduler.py
│ │ │ │ ├── nag.py
│ │ │ │ └── sgd.py
│ │ │ ├── options.py
│ │ │ ├── pdb.py
│ │ │ ├── progress_bar.py
│ │ │ ├── registry.py
│ │ │ ├── search.py
│ │ │ ├── sequence_generator.py
│ │ │ ├── sequence_scorer.py
│ │ │ ├── tasks/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── audio_pretraining.py
│ │ │ │ ├── cross_lingual_lm.py
│ │ │ │ ├── denoising.py
│ │ │ │ ├── fairseq_task.py
│ │ │ │ ├── language_modeling.py
│ │ │ │ ├── legacy_masked_lm.py
│ │ │ │ ├── masked_lm.py
│ │ │ │ ├── multilingual_masked_lm.py
│ │ │ │ ├── multilingual_translation.py
│ │ │ │ ├── semisupervised_translation.py
│ │ │ │ ├── sentence_prediction.py
│ │ │ │ ├── sentence_ranking.py
│ │ │ │ ├── translation.py
│ │ │ │ ├── translation_from_pretrained_xlm.py
│ │ │ │ ├── translation_lev.py
│ │ │ │ └── translation_moe.py
│ │ │ ├── tokenizer.py
│ │ │ ├── trainer.py
│ │ │ └── utils.py
│ │ ├── fairseq_cli/
│ │ │ ├── __init__.py
│ │ │ ├── eval_lm.py
│ │ │ ├── generate.py
│ │ │ ├── interactive.py
│ │ │ ├── preprocess.py
│ │ │ ├── score.py
│ │ │ ├── setup.py
│ │ │ └── train.py
│ │ ├── generate.py
│ │ ├── hubconf.py
│ │ ├── interactive.py
│ │ ├── preprocess.py
│ │ ├── score.py
│ │ ├── scripts/
│ │ │ ├── __init__.py
│ │ │ ├── average_checkpoints.py
│ │ │ ├── build_sym_alignment.py
│ │ │ ├── compare_namespaces.py
│ │ │ ├── compound_split_bleu.sh
│ │ │ ├── convert_dictionary.lua
│ │ │ ├── convert_model.lua
│ │ │ ├── count_docs.py
│ │ │ ├── read_binarized.py
│ │ │ ├── rm_pt.py
│ │ │ ├── sacrebleu_pregen.sh
│ │ │ ├── shard_docs.py
│ │ │ ├── split_train_valid_docs.py
│ │ │ ├── spm_decode.py
│ │ │ ├── spm_encode.py
│ │ │ ├── spm_train.py
│ │ │ ├── wav2vec_featurize.py
│ │ │ └── wav2vec_manifest.py
│ │ ├── setup.py
│ │ ├── tests/
│ │ │ ├── __init__.py
│ │ │ ├── speech_recognition/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── asr_test_base.py
│ │ │ │ ├── test_collaters.py
│ │ │ │ ├── test_cross_entropy.py
│ │ │ │ └── test_vggtransformer.py
│ │ │ ├── test_average_checkpoints.py
│ │ │ ├── test_backtranslation_dataset.py
│ │ │ ├── test_binaries.py
│ │ │ ├── test_bmuf.py
│ │ │ ├── test_character_token_embedder.py
│ │ │ ├── test_concat_dataset.py
│ │ │ ├── test_convtbc.py
│ │ │ ├── test_dictionary.py
│ │ │ ├── test_iterators.py
│ │ │ ├── test_label_smoothing.py
│ │ │ ├── test_memory_efficient_fp16.py
│ │ │ ├── test_multi_corpus_sampled_dataset.py
│ │ │ ├── test_multihead_attention.py
│ │ │ ├── test_noising.py
│ │ │ ├── test_reproducibility.py
│ │ │ ├── test_resampling_dataset.py
│ │ │ ├── test_sequence_generator.py
│ │ │ ├── test_sequence_scorer.py
│ │ │ ├── test_sparse_multihead_attention.py
│ │ │ ├── test_token_block_dataset.py
│ │ │ ├── test_train.py
│ │ │ ├── test_utils.py
│ │ │ └── utils.py
│ │ ├── train.py
│ │ └── validate.py
│ └── gpt2bpe/
│ ├── encoder.json
│ └── vocab.bpe
├── requirements_full.txt
├── requirements_minimal.txt
├── step11_final/
│ └── blending_n_postprocessing.py
├── step1_lm_finetuning/
│ ├── callbacks.py
│ ├── data/
│ │ ├── __init__.py
│ │ ├── augmentation/
│ │ │ ├── __init__.py
│ │ │ └── tokenization.py
│ │ ├── config.json
│ │ ├── dataset.py
│ │ ├── folds.csv
│ │ ├── group_kf_folds.csv
│ │ ├── make_folds.py
│ │ ├── sampler.py
│ │ └── vocab.txt
│ ├── data_preparation/
│ │ ├── clean_stack_exchange_qa.py
│ │ ├── clean_stackexchange_QA_demonstration.ipynb
│ │ ├── download_and_process_stackexchange_dump_demonstration.ipynb
│ │ └── scrape_stack_exchange.py
│ ├── train_stackx_lm.py
│ └── utils.py
├── step2_pseudo_labeling/
│ ├── bert-base/
│ │ ├── apply_swa.py
│ │ ├── args.py
│ │ ├── dataset.py
│ │ ├── evaluation.py
│ │ ├── infer.py
│ │ ├── infer_pseudo.py
│ │ ├── loops.py
│ │ ├── misc.py
│ │ ├── model.py
│ │ └── run.py
│ ├── bert-base-pretrained/
│ │ ├── apply_swa.py
│ │ ├── args.py
│ │ ├── dataset.py
│ │ ├── evaluation.py
│ │ ├── infer.py
│ │ ├── infer_pseudo.py
│ │ ├── loops.py
│ │ ├── misc.py
│ │ ├── model.py
│ │ └── run.py
│ ├── bert-large/
│ │ ├── apply_swa.py
│ │ ├── args.py
│ │ ├── dataset.py
│ │ ├── evaluation.py
│ │ ├── infer.py
│ │ ├── infer_pseudo.py
│ │ ├── loops.py
│ │ ├── misc.py
│ │ ├── model.py
│ │ └── run.py
│ └── blend_pseudo.py
├── step3_model1_bert_code/
│ ├── bert.py
│ ├── callbacks.py
│ ├── data/
│ │ ├── __init__.py
│ │ ├── augmentation/
│ │ │ ├── __init__.py
│ │ │ └── tokenization.py
│ │ ├── dataset.py
│ │ ├── folds.csv
│ │ ├── group_kf_folds.csv
│ │ ├── make_folds.py
│ │ └── sampler.py
│ ├── metrics.py
│ ├── models.py
│ ├── schedule.py
│ ├── train.py
│ └── utils.py
├── step4_model2_bert_code/
│ ├── apply_swa.py
│ ├── args.py
│ ├── dataset.py
│ ├── evaluation.py
│ ├── infer.py
│ ├── loops.py
│ ├── misc.py
│ ├── model.py
│ └── run.py
├── step5_model3_roberta_code/
│ ├── args.py
│ ├── augmentation.py
│ ├── dataset.py
│ ├── evaluation.py
│ ├── infer.py
│ ├── loops.py
│ ├── misc.py
│ ├── model.py
│ └── run.py
├── step6_model4_bart_code/
│ ├── apply_swa.py
│ ├── args.py
│ ├── dataset.py
│ ├── evaluation.py
│ ├── infer.py
│ ├── loops.py
│ ├── misc.py
│ ├── model.py
│ └── run.py
├── steps7_10_inference/
│ ├── model1_bert_code/
│ │ ├── callbacks.py
│ │ ├── data/
│ │ │ ├── __init__.py
│ │ │ ├── augmentation/
│ │ │ │ ├── __init__.py
│ │ │ │ └── tokenization.py
│ │ │ ├── dataset.py
│ │ │ ├── folds.csv
│ │ │ ├── group_kf_folds.csv
│ │ │ ├── make_folds.py
│ │ │ └── sampler.py
│ │ ├── metrics.py
│ │ ├── models.py
│ │ ├── predict_test.py
│ │ ├── schedule.py
│ │ └── utils.py
│ ├── model2_bert_code/
│ │ ├── args.py
│ │ ├── dataset.py
│ │ ├── loops.py
│ │ ├── model.py
│ │ └── run.py
│ ├── model3_roberta_code/
│ │ ├── args.py
│ │ ├── augmentation.py
│ │ ├── dataset.py
│ │ ├── evaluation.py
│ │ ├── infer.py
│ │ ├── loops.py
│ │ ├── misc.py
│ │ ├── model.py
│ │ └── run.py
│ └── model4_bart_code/
│ ├── args.py
│ ├── dataset.py
│ ├── loops.py
│ ├── model.py
│ └── run.py
└── submissions/
├── model1_submission.csv
├── model2_bert_base_cased_pred.csv
├── model3_roberta-base-output/
│ ├── fold-0.csv
│ ├── fold-1.csv
│ ├── fold-2.csv
│ ├── fold-3.csv
│ └── fold-4.csv
├── model4_bart_large_pred.csv
└── submission.csv
Showing preview only (218K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (2653 symbols across 280 files)
FILE: packages/fairseq-hacked/eval_lm.py
class WordStat (line 20) | class WordStat(object):
method __init__ (line 21) | def __init__(self, word, is_bpe):
method add (line 29) | def add(self, log_prob, next_word_prob):
method __str__ (line 41) | def __str__(self):
function main (line 52) | def main(parsed_args):
function cli_main (line 247) | def cli_main():
FILE: packages/fairseq-hacked/examples/noisychannel/rerank.py
function score_target_hypo (line 14) | def score_target_hypo(
function match_target_hypo (line 153) | def match_target_hypo(args, target_outfile, hypo_outfile):
function load_score_files (line 215) | def load_score_files(args):
function rerank (line 347) | def rerank(args):
function cli_main (line 406) | def cli_main():
FILE: packages/fairseq-hacked/examples/noisychannel/rerank_generate.py
function gen_and_reprocess_nbest (line 22) | def gen_and_reprocess_nbest(args):
function cli_main (line 391) | def cli_main():
FILE: packages/fairseq-hacked/examples/noisychannel/rerank_options.py
function get_reranking_parser (line 9) | def get_reranking_parser(default_task='translation'):
function get_tuning_parser (line 15) | def get_tuning_parser(default_task='translation'):
function add_reranking_args (line 22) | def add_reranking_args(parser):
function add_tuning_args (line 110) | def add_tuning_args(parser):
FILE: packages/fairseq-hacked/examples/noisychannel/rerank_score_bw.py
function score_bw (line 9) | def score_bw(args):
function cli_main (line 130) | def cli_main():
FILE: packages/fairseq-hacked/examples/noisychannel/rerank_score_lm.py
function score_lm (line 7) | def score_lm(args):
function cli_main (line 68) | def cli_main():
FILE: packages/fairseq-hacked/examples/noisychannel/rerank_tune.py
function random_search (line 9) | def random_search(args):
function cli_main (line 88) | def cli_main():
FILE: packages/fairseq-hacked/examples/noisychannel/rerank_utils.py
function reprocess (line 11) | def reprocess(fle):
function reprocess_nbest (line 70) | def reprocess_nbest(fle):
function write_reprocessed (line 119) | def write_reprocessed(
function calc_length_from_frac (line 191) | def calc_length_from_frac(bpe_sentence, prefix_frac, bpe_symbol):
function get_prefix (line 202) | def get_prefix(sentence, prefix_len):
function get_prefix_no_bpe (line 211) | def get_prefix_no_bpe(sentence, bpe_symbol, prefix_len):
function get_prefix_from_len (line 218) | def get_prefix_from_len(sentence, bpe_symbol, prefix_len):
function get_num_bpe_tokens_from_len (line 229) | def get_num_bpe_tokens_from_len(sentence, bpe_symbol, prefix_len):
function make_right_to_left (line 236) | def make_right_to_left(line):
function remove_bpe (line 243) | def remove_bpe(line, bpe_symbol):
function remove_bpe_dict (line 249) | def remove_bpe_dict(pred_dict, bpe_symbol):
function parse_bleu_scoring (line 260) | def parse_bleu_scoring(line):
function get_full_from_prefix (line 267) | def get_full_from_prefix(hypo_prefix, hypos):
function get_score (line 278) | def get_score(
class BitextOutput (line 320) | class BitextOutput(object):
method __init__ (line 321) | def __init__(
class BitextOutputFromGen (line 406) | class BitextOutputFromGen(object):
method __init__ (line 407) | def __init__(
function get_score_from_pos (line 477) | def get_score_from_pos(
class LMOutput (line 505) | class LMOutput(object):
method __init__ (line 506) | def __init__(
function parse_lm (line 535) | def parse_lm(input_file, prefix_len=None, bpe_symbol=None, target_prefix...
function get_directories (line 580) | def get_directories(
function lm_scoring (line 647) | def lm_scoring(
function rescore_file_name (line 824) | def rescore_file_name(
FILE: packages/fairseq-hacked/examples/roberta/commonsense_qa/commonsense_qa_task.py
class CommonsenseQATask (line 29) | class CommonsenseQATask(FairseqTask):
method add_args (line 33) | def add_args(parser):
method __init__ (line 46) | def __init__(self, args, vocab):
method load_dictionary (line 54) | def load_dictionary(cls, filename):
method setup_task (line 65) | def setup_task(cls, args, **kwargs):
method load_dataset (line 76) | def load_dataset(
method build_model (line 170) | def build_model(self, args):
method source_dictionary (line 182) | def source_dictionary(self):
method target_dictionary (line 186) | def target_dictionary(self):
FILE: packages/fairseq-hacked/examples/roberta/multiprocessing_bpe_encoder.py
function main (line 18) | def main():
class MultiprocessingEncoder (line 81) | class MultiprocessingEncoder(object):
method __init__ (line 82) | def __init__(self, args):
method initializer (line 85) | def initializer(self):
method encode (line 89) | def encode(self, line):
method decode (line 94) | def decode(self, tokens):
method encode_lines (line 98) | def encode_lines(self, lines):
method decode_lines (line 111) | def decode_lines(self, lines):
FILE: packages/fairseq-hacked/examples/roberta/preprocess_RACE.py
class InputExample (line 14) | class InputExample:
method __init__ (line 15) | def __init__(self, paragraph, qa_list, label):
function get_examples (line 21) | def get_examples(data_dir, set_type):
function main (line 60) | def main():
FILE: packages/fairseq-hacked/examples/roberta/wsc/wsc_criterion.py
class WSCCriterion (line 17) | class WSCCriterion(FairseqCriterion):
method __init__ (line 18) | def __init__(self, args, task):
method __del__ (line 27) | def __del__(self):
method add_args (line 32) | def add_args(parser):
method get_masked_input (line 45) | def get_masked_input(self, tokens, mask):
method get_lprobs (line 50) | def get_lprobs(self, model, tokens, mask):
method get_loss (line 58) | def get_loss(self, query_lprobs, cand_lprobs):
method forward (line 71) | def forward(self, model, sample, reduce=True):
method aggregate_logging_outputs (line 117) | def aggregate_logging_outputs(logging_outputs):
class WinograndeCriterion (line 140) | class WinograndeCriterion(WSCCriterion):
method forward (line 141) | def forward(self, model, sample, reduce=True):
FILE: packages/fairseq-hacked/examples/roberta/wsc/wsc_task.py
class WSCTask (line 33) | class WSCTask(FairseqTask):
method add_args (line 37) | def add_args(parser):
method __init__ (line 49) | def __init__(self, args, vocab):
method load_dictionary (line 66) | def load_dictionary(cls, filename):
method setup_task (line 77) | def setup_task(cls, args, **kwargs):
method binarize (line 86) | def binarize(self, s: str, append_eos: bool = False):
method binarize_with_mask (line 98) | def binarize_with_mask(self, txt, prefix, suffix, leading_space, trail...
method load_dataset (line 108) | def load_dataset(
method build_dataset_for_inference (line 216) | def build_dataset_for_inference(self, sample_json):
method disambiguate_pronoun (line 224) | def disambiguate_pronoun(self, model, sentence, use_cuda=False):
method source_dictionary (line 261) | def source_dictionary(self):
method target_dictionary (line 265) | def target_dictionary(self):
class WinograndeTask (line 270) | class WinograndeTask(WSCTask):
method setup_task (line 277) | def setup_task(cls, args, **kwargs):
method load_dataset (line 286) | def load_dataset(
FILE: packages/fairseq-hacked/examples/roberta/wsc/wsc_utils.py
function convert_sentence_to_json (line 10) | def convert_sentence_to_json(sentence):
function extended_noun_chunks (line 36) | def extended_noun_chunks(sentence):
function find_token (line 52) | def find_token(sentence, start_pos):
function find_span (line 61) | def find_span(sentence, search_text, start=0):
function get_detokenizer (line 77) | def get_detokenizer():
function get_spacy_nlp (line 85) | def get_spacy_nlp():
function jsonl_iterator (line 92) | def jsonl_iterator(input_fname, positive_only=False, ngram_order=3, eval...
function winogrande_jsonl_iterator (line 195) | def winogrande_jsonl_iterator(input_fname, eval=False):
function filter_noun_chunks (line 215) | def filter_noun_chunks(
FILE: packages/fairseq-hacked/examples/speech_recognition/criterions/ASG_loss.py
class ASGCriterion (line 20) | class ASGCriterion(FairseqCriterion):
method add_args (line 22) | def add_args(parser):
method __init__ (line 45) | def __init__(self, args, task):
method linseg_step (line 68) | def linseg_step(self):
method replace_eos_with_silence (line 82) | def replace_eos_with_silence(self, tgt):
method forward (line 90) | def forward(self, model, sample, reduce=True):
method aggregate_logging_outputs (line 142) | def aggregate_logging_outputs(logging_outputs):
FILE: packages/fairseq-hacked/examples/speech_recognition/criterions/CTC_loss.py
function arr_to_toks (line 24) | def arr_to_toks(arr):
function compute_ctc_uer (line 31) | def compute_ctc_uer(logprobs, targets, input_lengths, target_lengths, bl...
class CTCCriterion (line 78) | class CTCCriterion(FairseqCriterion):
method __init__ (line 79) | def __init__(self, args, task):
method add_args (line 86) | def add_args(parser):
method forward (line 98) | def forward(self, model, sample, reduce=True, log_probs=True):
method aggregate_logging_outputs (line 175) | def aggregate_logging_outputs(logging_outputs):
FILE: packages/fairseq-hacked/examples/speech_recognition/criterions/cross_entropy_acc.py
class CrossEntropyWithAccCriterion (line 18) | class CrossEntropyWithAccCriterion(FairseqCriterion):
method __init__ (line 19) | def __init__(self, args, task):
method compute_loss (line 22) | def compute_loss(self, model, net_output, target, reduction, log_probs):
method get_logging_output (line 45) | def get_logging_output(self, sample, target, lprobs, loss):
method forward (line 68) | def forward(self, model, sample, reduction="sum", log_probs=True):
method aggregate_logging_outputs (line 102) | def aggregate_logging_outputs(logging_outputs):
FILE: packages/fairseq-hacked/examples/speech_recognition/datasets/asr_prep_json.py
function process_sample (line 24) | def process_sample(aud_path, lable, utt_id, sp, tgt_dict):
function main (line 41) | def main():
FILE: packages/fairseq-hacked/examples/speech_recognition/infer.py
function add_asr_eval_argument (line 26) | def add_asr_eval_argument(parser):
function check_args (line 59) | def check_args(args):
function get_dataset_itr (line 70) | def get_dataset_itr(args, task):
function process_predictions (line 84) | def process_predictions(
function prepare_result_files (line 106) | def prepare_result_files(args):
function load_models_and_criterions (line 124) | def load_models_and_criterions(filenames, arg_overrides=None, task=None):
function optimize_models (line 148) | def optimize_models(args, use_cuda, models):
function main (line 162) | def main(args):
function cli_main (line 260) | def cli_main():
FILE: packages/fairseq-hacked/examples/speech_recognition/models/vggtransformer.py
class VGGTransformerModel (line 27) | class VGGTransformerModel(FairseqEncoderDecoderModel):
method __init__ (line 33) | def __init__(self, encoder, decoder):
method add_args (line 37) | def add_args(parser):
method build_encoder (line 120) | def build_encoder(cls, args, task):
method build_decoder (line 130) | def build_decoder(cls, args, task):
method build_model (line 140) | def build_model(cls, args, task):
method get_normalized_probs (line 150) | def get_normalized_probs(self, net_output, log_probs, sample=None):
function prepare_transformer_encoder_params (line 172) | def prepare_transformer_encoder_params(
function prepare_transformer_decoder_params (line 192) | def prepare_transformer_decoder_params(
class VGGTransformerEncoder (line 212) | class VGGTransformerEncoder(FairseqEncoder):
method __init__ (line 215) | def __init__(
method forward (line 321) | def forward(self, src_tokens, src_lengths, **kwargs):
method infer_conv_output_dim (line 383) | def infer_conv_output_dim(self, in_channels, input_dim):
method validate_transformer_config (line 393) | def validate_transformer_config(self, transformer_config):
method parse_transformer_context (line 404) | def parse_transformer_context(self, transformer_context):
method parse_transformer_sampling (line 438) | def parse_transformer_sampling(self, transformer_sampling, num_layers):
method slice (line 476) | def slice(self, embedding, padding_mask, attn_mask, sampling_factor):
method lengths_to_attn_mask (line 490) | def lengths_to_attn_mask(self, input_lengths, subsampling_factor=1):
method reorder_encoder_out (line 542) | def reorder_encoder_out(self, encoder_out, new_order):
class TransformerDecoder (line 553) | class TransformerDecoder(FairseqIncrementalDecoder):
method __init__ (line 567) | def __init__(
method forward (line 621) | def forward(self, prev_output_tokens, encoder_out=None, incremental_st...
method buffered_future_mask (line 692) | def buffered_future_mask(self, tensor):
method _transpose_if_training (line 708) | def _transpose_if_training(self, x, incremental_state):
method _transpose_if_inference (line 713) | def _transpose_if_inference(self, x, incremental_state):
class VGGTransformerEncoderModel (line 720) | class VGGTransformerEncoderModel(FairseqEncoderModel):
method __init__ (line 721) | def __init__(self, encoder):
method add_args (line 725) | def add_args(parser):
method build_model (line 786) | def build_model(cls, args, task):
method get_normalized_probs (line 801) | def get_normalized_probs(self, net_output, log_probs, sample=None):
class VGGTransformerEncoderOnly (line 811) | class VGGTransformerEncoderOnly(VGGTransformerEncoder):
method __init__ (line 812) | def __init__(
method forward (line 834) | def forward(self, src_tokens, src_lengths, **kwargs):
method max_positions (line 850) | def max_positions(self):
function Embedding (line 855) | def Embedding(num_embeddings, embedding_dim, padding_idx):
function Linear (line 862) | def Linear(in_features, out_features, bias=True, dropout=0):
function LinearizedConv1d (line 871) | def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, ...
function LayerNorm (line 880) | def LayerNorm(embedding_dim):
function base_architecture (line 886) | def base_architecture(args):
function vggtransformer_1 (line 905) | def vggtransformer_1(args):
function vggtransformer_2 (line 926) | def vggtransformer_2(args):
function vggtransformer_base (line 947) | def vggtransformer_base(args):
function base_architecture_enconly (line 984) | def base_architecture_enconly(args):
function vggtransformer_enc_1 (line 999) | def vggtransformer_enc_1(args):
FILE: packages/fairseq-hacked/examples/speech_recognition/models/w2l_conv_glu_enc.py
class W2lConvGluEncoderModel (line 43) | class W2lConvGluEncoderModel(FairseqEncoderModel):
method __init__ (line 44) | def __init__(self, encoder):
method add_args (line 48) | def add_args(parser):
method build_model (line 73) | def build_model(cls, args, task):
method get_normalized_probs (line 84) | def get_normalized_probs(self, net_output, log_probs, sample=None):
class W2lConvGluEncoder (line 90) | class W2lConvGluEncoder(FairseqEncoder):
method __init__ (line 91) | def __init__(
method forward (line 120) | def forward(self, src_tokens, src_lengths, **kwargs):
method reorder_encoder_out (line 156) | def reorder_encoder_out(self, encoder_out, new_order):
method max_positions (line 165) | def max_positions(self):
function w2l_conv_glu_enc (line 171) | def w2l_conv_glu_enc(args):
FILE: packages/fairseq-hacked/examples/speech_recognition/tasks/speech_recognition.py
function get_asr_dataset_from_json (line 17) | def get_asr_dataset_from_json(data_json_path, tgt_dict):
class SpeechRecognitionTask (line 68) | class SpeechRecognitionTask(FairseqTask):
method add_args (line 74) | def add_args(parser):
method __init__ (line 81) | def __init__(self, args, tgt_dict):
method setup_task (line 86) | def setup_task(cls, args, **kwargs):
method load_dataset (line 102) | def load_dataset(self, split, combine=False, **kwargs):
method build_generator (line 111) | def build_generator(self, args):
method target_dictionary (line 125) | def target_dictionary(self):
method source_dictionary (line 131) | def source_dictionary(self):
FILE: packages/fairseq-hacked/examples/speech_recognition/utils/wer_utils.py
class Code (line 24) | class Code(Enum):
class Token (line 31) | class Token(object):
method __init__ (line 32) | def __init__(self, lbl="", st=np.nan, en=np.nan):
class AlignmentResult (line 39) | class AlignmentResult(object):
method __init__ (line 40) | def __init__(self, refs, hyps, codes, score):
function coordinate_to_offset (line 47) | def coordinate_to_offset(row, col, ncols):
function offset_to_row (line 51) | def offset_to_row(offset, ncols):
function offset_to_col (line 55) | def offset_to_col(offset, ncols):
function trimWhitespace (line 59) | def trimWhitespace(str):
function str2toks (line 63) | def str2toks(str):
class EditDistance (line 71) | class EditDistance(object):
method __init__ (line 72) | def __init__(self, time_mediated):
method cost (line 80) | def cost(self, ref, hyp, code):
method get_result (line 98) | def get_result(self, refs, hyps):
method align (line 141) | def align(self, refs, hyps):
class WERTransformer (line 205) | class WERTransformer(object):
method __init__ (line 206) | def __init__(self, hyp_str, ref_str, verbose=True):
method process (line 221) | def process(self, input): # std::vector<std::string>&& input
method report_result (line 294) | def report_result(self):
method wer (line 320) | def wer(self):
method stats (line 331) | def stats(self):
function calc_wer (line 354) | def calc_wer(hyp_str, ref_str):
function calc_wer_stats (line 359) | def calc_wer_stats(hyp_str, ref_str):
function get_wer_alignment_codes (line 364) | def get_wer_alignment_codes(hyp_str, ref_str):
function merge_counts (line 373) | def merge_counts(x, y):
FILE: packages/fairseq-hacked/examples/speech_recognition/w2l_decoder.py
class W2lDecoder (line 28) | class W2lDecoder(object):
method __init__ (line 29) | def __init__(self, args, tgt_dict):
method generate (line 48) | def generate(self, models, sample, prefix_tokens=None):
method get_emissions (line 58) | def get_emissions(self, models, encoder_input):
method get_tokens (line 67) | def get_tokens(self, idxs):
class W2lViterbiDecoder (line 78) | class W2lViterbiDecoder(W2lDecoder):
method __init__ (line 79) | def __init__(self, args, tgt_dict):
method decode (line 82) | def decode(self, emissions):
class W2lKenLMDecoder (line 106) | class W2lKenLMDecoder(W2lDecoder):
method __init__ (line 107) | def __init__(self, args, tgt_dict):
method decode (line 149) | def decode(self, emissions):
FILE: packages/fairseq-hacked/examples/translation_moe/score.py
function main (line 23) | def main():
function dictolist (line 51) | def dictolist(d):
function load_sys (line 56) | def load_sys(paths):
function load_ref (line 77) | def load_ref(path):
function merge (line 98) | def merge(src, tgt, hypos, log_probs, path):
function corpus_bleu (line 109) | def corpus_bleu(sys_stream, ref_streams):
function sentence_bleu (line 114) | def sentence_bleu(hypothesis, reference):
function pairwise (line 130) | def pairwise(sents):
function multi_ref (line 141) | def multi_ref(refs, hypos):
function intra_ref (line 178) | def intra_ref(refs):
FILE: packages/fairseq-hacked/fairseq/binarizer.py
function safe_readline (line 12) | def safe_readline(f):
class Binarizer (line 22) | class Binarizer:
method binarize (line 24) | def binarize(
method binarize_alignments (line 68) | def binarize_alignments(filename, alignment_parser, consumer, offset=0...
method find_offsets (line 84) | def find_offsets(filename, num_chunks):
FILE: packages/fairseq-hacked/fairseq/bleu.py
class BleuStat (line 22) | class BleuStat(ctypes.Structure):
class SacrebleuScorer (line 37) | class SacrebleuScorer(object):
method __init__ (line 38) | def __init__(self):
method reset (line 44) | def reset(self, one_init=False):
method add_string (line 50) | def add_string(self, ref, pred):
method score (line 54) | def score(self, order=4):
method result_string (line 57) | def result_string(self, order=4):
class Scorer (line 63) | class Scorer(object):
method __init__ (line 64) | def __init__(self, pad, eos, unk):
method reset (line 71) | def reset(self, one_init=False):
method add (line 77) | def add(self, ref, pred):
method score (line 101) | def score(self, order=4):
method precision (line 107) | def precision(self):
method brevity (line 118) | def brevity(self):
method result_string (line 122) | def result_string(self, order=4):
FILE: packages/fairseq-hacked/fairseq/checkpoint_utils.py
function save_checkpoint (line 20) | def save_checkpoint(args, trainer, epoch_itr, val_loss):
function load_checkpoint (line 99) | def load_checkpoint(args, trainer, **passthrough_args):
function load_checkpoint_to_cpu (line 148) | def load_checkpoint_to_cpu(path, arg_overrides=None):
function load_model_ensemble (line 170) | def load_model_ensemble(filenames, arg_overrides=None, task=None):
function load_model_ensemble_and_task (line 183) | def load_model_ensemble_and_task(filenames, arg_overrides=None, task=None):
function checkpoint_paths (line 203) | def checkpoint_paths(path, pattern=r"checkpoint(\d+)\.pt"):
function torch_persistent_save (line 222) | def torch_persistent_save(*args, **kwargs):
function convert_state_dict_type (line 231) | def convert_state_dict_type(state_dict, ttype=torch.FloatTensor):
function save_state (line 245) | def save_state(
function _upgrade_state_dict (line 293) | def _upgrade_state_dict(state):
function prune_state_dict (line 360) | def prune_state_dict(state_dict, args):
function load_pretrained_component_from_model (line 444) | def load_pretrained_component_from_model(
function verify_checkpoint_directory (line 475) | def verify_checkpoint_directory(save_dir: str) -> None:
FILE: packages/fairseq-hacked/fairseq/clib/libbleu/libbleu.cpp
function bleu_ltrim (line 29) | void bleu_ltrim(size_t* len, int** sent, int pad) {
function bleu_rtrim (line 40) | void bleu_rtrim(size_t* len, int** sent, int pad, int eos) {
function bleu_trim (line 50) | void bleu_trim(size_t* len, int** sent, int pad, int eos) {
function bleu_hash (line 55) | size_t bleu_hash(int len, int* data) {
function bleu_addngram (line 69) | void bleu_addngram(
function bleu_zero_init (line 101) | void bleu_zero_init(bleu_stat* stat) {
function bleu_one_init (line 105) | void bleu_one_init(bleu_stat* stat) {
function bleu_add (line 117) | void bleu_add(
FILE: packages/fairseq-hacked/fairseq/clib/libbleu/module.cpp
type PyModuleDef (line 16) | struct PyModuleDef
FILE: packages/fairseq-hacked/fairseq/clib/libnat/edit_dist.cpp
function edit_distance2_with_dp (line 14) | vector<vector<uint32_t>> edit_distance2_with_dp(
function edit_distance2_backtracking (line 36) | vector<vector<uint32_t>> edit_distance2_backtracking(
function edit_distance2_backtracking_with_delete (line 107) | vector<vector<uint32_t>> edit_distance2_backtracking_with_delete(
function compute_ed2 (line 177) | vector<uint32_t> compute_ed2(
function suggested_ed2_path (line 188) | vector<vector<vector<uint32_t>>> suggested_ed2_path(
function suggested_ed2_path_with_delete (line 201) | vector<vector<vector<uint32_t>>> suggested_ed2_path_with_delete(
function PYBIND11_MODULE (line 215) | PYBIND11_MODULE(libnat, m) {
FILE: packages/fairseq-hacked/fairseq/criterions/adaptive_loss.py
class AdaptiveLoss (line 15) | class AdaptiveLoss(FairseqCriterion):
method __init__ (line 20) | def __init__(self, args, task):
method forward (line 30) | def forward(self, model, sample, reduce=True):
method aggregate_logging_outputs (line 80) | def aggregate_logging_outputs(logging_outputs):
FILE: packages/fairseq-hacked/fairseq/criterions/binary_cross_entropy.py
class BinaryCrossEntropyCriterion (line 16) | class BinaryCrossEntropyCriterion(FairseqCriterion):
method __init__ (line 17) | def __init__(self, args, task):
method forward (line 20) | def forward(self, model, sample, reduce=True):
method aggregate_logging_outputs (line 56) | def aggregate_logging_outputs(logging_outputs):
FILE: packages/fairseq-hacked/fairseq/criterions/composite_loss.py
class CompositeLoss (line 13) | class CompositeLoss(FairseqCriterion):
method add_args (line 18) | def add_args(parser):
method build_underlying_criterion (line 26) | def build_underlying_criterion(args, task):
method build_criterion (line 35) | def build_criterion(cls, args, task):
FILE: packages/fairseq-hacked/fairseq/criterions/cross_entropy.py
class CrossEntropyCriterion (line 15) | class CrossEntropyCriterion(FairseqCriterion):
method __init__ (line 16) | def __init__(self, args, task):
method forward (line 19) | def forward(self, model, sample, reduce=True):
method compute_loss (line 41) | def compute_loss(self, model, net_output, sample, reduce=True):
method aggregate_logging_outputs (line 54) | def aggregate_logging_outputs(logging_outputs):
FILE: packages/fairseq-hacked/fairseq/criterions/fairseq_criterion.py
class FairseqCriterion (line 9) | class FairseqCriterion(_Loss):
method __init__ (line 10) | def __init__(self, args, task):
method add_args (line 19) | def add_args(parser):
method build_criterion (line 24) | def build_criterion(cls, args, task):
method forward (line 27) | def forward(self, model, sample, reduce=True):
method aggregate_logging_outputs (line 38) | def aggregate_logging_outputs(logging_outputs):
method grad_denom (line 43) | def grad_denom(sample_sizes):
FILE: packages/fairseq-hacked/fairseq/criterions/label_smoothed_cross_entropy.py
function label_smoothed_nll_loss (line 13) | def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, ...
class LabelSmoothedCrossEntropyCriterion (line 34) | class LabelSmoothedCrossEntropyCriterion(FairseqCriterion):
method __init__ (line 35) | def __init__(self, args, task):
method add_args (line 40) | def add_args(parser):
method forward (line 47) | def forward(self, model, sample, reduce=True):
method compute_loss (line 69) | def compute_loss(self, model, net_output, sample, reduce=True):
method aggregate_logging_outputs (line 79) | def aggregate_logging_outputs(logging_outputs):
FILE: packages/fairseq-hacked/fairseq/criterions/label_smoothed_cross_entropy_with_alignment.py
class LabelSmoothedCrossEntropyCriterionWithAlignment (line 15) | class LabelSmoothedCrossEntropyCriterionWithAlignment(
method __init__ (line 18) | def __init__(self, args, task):
method add_args (line 23) | def add_args(parser):
method forward (line 37) | def forward(self, model, sample, reduce=True):
method compute_alignment_loss (line 70) | def compute_alignment_loss(self, sample, net_output):
method aggregate_logging_outputs (line 91) | def aggregate_logging_outputs(logging_outputs):
FILE: packages/fairseq-hacked/fairseq/criterions/legacy_masked_lm.py
function compute_cross_entropy_loss (line 15) | def compute_cross_entropy_loss(logits, targets, ignore_index=-100):
class LegacyMaskedLmLoss (line 35) | class LegacyMaskedLmLoss(FairseqCriterion):
method __init__ (line 52) | def __init__(self, args, task):
method add_args (line 56) | def add_args(parser):
method forward (line 72) | def forward(self, model, sample, reduce=True):
method aggregate_logging_outputs (line 135) | def aggregate_logging_outputs(logging_outputs):
FILE: packages/fairseq-hacked/fairseq/criterions/masked_lm.py
class MaskedLmLoss (line 17) | class MaskedLmLoss(FairseqCriterion):
method __init__ (line 22) | def __init__(self, args, task):
method forward (line 25) | def forward(self, model, sample, reduce=True):
method aggregate_logging_outputs (line 65) | def aggregate_logging_outputs(logging_outputs):
FILE: packages/fairseq-hacked/fairseq/criterions/nat_loss.py
class LabelSmoothedDualImitationCriterion (line 17) | class LabelSmoothedDualImitationCriterion(FairseqCriterion):
method add_args (line 19) | def add_args(parser):
method _compute_loss (line 30) | def _compute_loss(
method _custom_loss (line 75) | def _custom_loss(self, loss, name="loss", factor=1.0):
method forward (line 78) | def forward(self, model, sample, reduce=True):
method aggregate_logging_outputs (line 143) | def aggregate_logging_outputs(logging_outputs):
FILE: packages/fairseq-hacked/fairseq/criterions/sentence_prediction.py
class SentencePredictionCriterion (line 17) | class SentencePredictionCriterion(FairseqCriterion):
method add_args (line 19) | def add_args(parser):
method forward (line 25) | def forward(self, model, sample, reduce=True):
method aggregate_logging_outputs (line 70) | def aggregate_logging_outputs(logging_outputs):
FILE: packages/fairseq-hacked/fairseq/criterions/sentence_ranking.py
class SentenceRankingCriterion (line 17) | class SentenceRankingCriterion(FairseqCriterion):
method __init__ (line 18) | def __init__(self, args, task):
method __del__ (line 25) | def __del__(self):
method add_args (line 30) | def add_args(parser):
method forward (line 36) | def forward(self, model, sample, reduce=True):
method aggregate_logging_outputs (line 88) | def aggregate_logging_outputs(logging_outputs):
FILE: packages/fairseq-hacked/fairseq/distributed_utils.py
function is_master (line 18) | def is_master(args):
function infer_init_method (line 22) | def infer_init_method(args):
function distributed_init (line 74) | def distributed_init(args):
function suppress_output (line 112) | def suppress_output(is_master):
function get_rank (line 126) | def get_rank():
function get_world_size (line 130) | def get_world_size():
function get_default_group (line 134) | def get_default_group():
function all_reduce (line 138) | def all_reduce(tensor, group=None):
function all_gather_list (line 144) | def all_gather_list(data, group=None, max_size=16384):
FILE: packages/fairseq-hacked/fairseq/file_utils.py
function load_archive_file (line 54) | def load_archive_file(archive_file):
function url_to_filename (line 95) | def url_to_filename(url, etag=None):
function filename_to_url (line 113) | def filename_to_url(filename, cache_dir=None):
function cached_path (line 139) | def cached_path(url_or_filename, cache_dir=None):
function split_s3_path (line 171) | def split_s3_path(url):
function s3_request (line 184) | def s3_request(func):
function s3_etag (line 206) | def s3_etag(url):
function s3_get (line 217) | def s3_get(url, temp_file):
function http_get (line 226) | def http_get(url, temp_file):
function get_from_cache (line 241) | def get_from_cache(url, cache_dir=None):
function read_set_from_file (line 315) | def read_set_from_file(filename):
function get_file_extension (line 327) | def get_file_extension(path, dot=True, lower=True):
FILE: packages/fairseq-hacked/fairseq/hub_utils.py
function from_pretrained (line 18) | def from_pretrained(
class GeneratorHubInterface (line 77) | class GeneratorHubInterface(nn.Module):
method __init__ (line 83) | def __init__(self, args, task, models):
method device (line 113) | def device(self):
method translate (line 116) | def translate(
method sample (line 121) | def sample(
method generate (line 128) | def generate(
method encode (line 179) | def encode(self, sentence: str) -> torch.LongTensor:
method decode (line 184) | def decode(self, tokens: torch.LongTensor) -> str:
method tokenize (line 189) | def tokenize(self, sentence: str) -> str:
method detokenize (line 194) | def detokenize(self, sentence: str) -> str:
method apply_bpe (line 199) | def apply_bpe(self, sentence: str) -> str:
method remove_bpe (line 204) | def remove_bpe(self, sentence: str) -> str:
method binarize (line 209) | def binarize(self, sentence: str) -> torch.LongTensor:
method string (line 212) | def string(self, tokens: torch.LongTensor) -> str:
method _build_sample (line 215) | def _build_sample(self, src_tokens: torch.LongTensor):
class BPEHubInterface (line 225) | class BPEHubInterface(object):
method __init__ (line 228) | def __init__(self, bpe, **kwargs):
method encode (line 234) | def encode(self, sentence: str) -> str:
method decode (line 237) | def decode(self, sentence: str) -> str:
class TokenizerHubInterface (line 241) | class TokenizerHubInterface(object):
method __init__ (line 244) | def __init__(self, tokenizer, **kwargs):
method encode (line 250) | def encode(self, sentence: str) -> str:
method decode (line 253) | def decode(self, sentence: str) -> str:
FILE: packages/fairseq-hacked/fairseq/iterative_refinement_generator.py
class IterativeRefinementGenerator (line 19) | class IterativeRefinementGenerator(object):
method __init__ (line 20) | def __init__(
method generate_batched_itr (line 58) | def generate_batched_itr(
method generate (line 98) | def generate(self, models, sample, prefix_tokens=None):
FILE: packages/fairseq-hacked/fairseq/legacy_distributed_data_parallel.py
class LegacyDistributedDataParallel (line 27) | class LegacyDistributedDataParallel(nn.Module):
method __init__ (line 44) | def __init__(self, module, world_size, process_group=None, buffer_size...
method __getstate__ (line 69) | def __getstate__(self):
method __setstate__ (line 73) | def __setstate__(self, state):
method no_sync (line 78) | def no_sync(self):
method forward (line 85) | def forward(self, *inputs, **kwargs):
method _register_grad_hook (line 88) | def _register_grad_hook(self):
FILE: packages/fairseq-hacked/fairseq/meters.py
class AverageMeter (line 9) | class AverageMeter(object):
method __init__ (line 12) | def __init__(self):
method reset (line 15) | def reset(self):
method update (line 21) | def update(self, val, n=1):
class TimeMeter (line 28) | class TimeMeter(object):
method __init__ (line 31) | def __init__(self, init=0):
method reset (line 34) | def reset(self, init=0):
method update (line 39) | def update(self, val=1):
method avg (line 43) | def avg(self):
method elapsed_time (line 47) | def elapsed_time(self):
class StopwatchMeter (line 51) | class StopwatchMeter(object):
method __init__ (line 54) | def __init__(self):
method start (line 57) | def start(self):
method stop (line 60) | def stop(self, n=1):
method reset (line 67) | def reset(self):
method avg (line 73) | def avg(self):
FILE: packages/fairseq-hacked/fairseq/models/__init__.py
function build_model (line 47) | def build_model(args, task):
function register_model (line 51) | def register_model(name):
function register_model_architecture (line 84) | def register_model_architecture(model_name, arch_name):
FILE: packages/fairseq-hacked/fairseq/models/bart/hub_interface.py
class BARTHubInterface (line 22) | class BARTHubInterface(nn.Module):
method __init__ (line 28) | def __init__(self, args, task, model):
method device (line 43) | def device(self):
method encode (line 46) | def encode(
method decode (line 78) | def decode(self, tokens: torch.LongTensor):
method _build_sample (line 93) | def _build_sample(self, src_tokens: List[torch.LongTensor]):
method sample (line 102) | def sample(
method generate (line 109) | def generate(
method extract_features (line 145) | def extract_features(
method register_classification_head (line 179) | def register_classification_head(
method predict (line 186) | def predict(self, head: str, tokens: torch.LongTensor, return_logits: ...
FILE: packages/fairseq-hacked/fairseq/models/bart/model.py
class BARTModel (line 29) | class BARTModel(TransformerModel):
method hub_models (line 31) | def hub_models(cls):
method __init__ (line 36) | def __init__(self, args, encoder, decoder):
method add_args (line 45) | def add_args(parser):
method supported_targets (line 60) | def supported_targets(self):
method forward (line 63) | def forward(
method from_pretrained (line 93) | def from_pretrained(
method register_classification_head (line 119) | def register_classification_head(
method upgrade_state_dict_named (line 142) | def upgrade_state_dict_named(self, state_dict, name):
class BARTClassificationHead (line 216) | class BARTClassificationHead(nn.Module):
method __init__ (line 219) | def __init__(
method forward (line 228) | def forward(self, features, **kwargs):
function bart_large_architecture (line 239) | def bart_large_architecture(args):
FILE: packages/fairseq-hacked/fairseq/models/cmlm_transformer.py
function _skeptical_unmasking (line 18) | def _skeptical_unmasking(output_scores, output_masks, p):
class CMLMNATransformerModel (line 28) | class CMLMNATransformerModel(NATransformerModel):
method add_args (line 30) | def add_args(parser):
method forward (line 33) | def forward(
method forward_decoder (line 63) | def forward_decoder(self, decoder_out, encoder_out, decoding_format=No...
function base_architecture (line 104) | def base_architecture(args):
function iter_nat_wmt_en_de (line 151) | def iter_nat_wmt_en_de(args):
FILE: packages/fairseq-hacked/fairseq/models/composite_encoder.py
class CompositeEncoder (line 9) | class CompositeEncoder(FairseqEncoder):
method __init__ (line 20) | def __init__(self, encoders):
method forward (line 26) | def forward(self, src_tokens, src_lengths):
method reorder_encoder_out (line 43) | def reorder_encoder_out(self, encoder_out, new_order):
method max_positions (line 51) | def max_positions(self):
method upgrade_state_dict (line 54) | def upgrade_state_dict(self, state_dict):
FILE: packages/fairseq-hacked/fairseq/models/distributed_fairseq_model.py
function DistributedFairseqModel (line 14) | def DistributedFairseqModel(args, model):
FILE: packages/fairseq-hacked/fairseq/models/fairseq_decoder.py
class FairseqDecoder (line 11) | class FairseqDecoder(nn.Module):
method __init__ (line 14) | def __init__(self, dictionary):
method forward (line 19) | def forward(self, prev_output_tokens, encoder_out=None, **kwargs):
method extract_features (line 38) | def extract_features(self, prev_output_tokens, encoder_out=None, **kwa...
method output_layer (line 47) | def output_layer(self, features, **kwargs):
method get_normalized_probs (line 56) | def get_normalized_probs(self, net_output, log_probs, sample):
method max_positions (line 74) | def max_positions(self):
method upgrade_state_dict (line 78) | def upgrade_state_dict(self, state_dict):
method prepare_for_onnx_export_ (line 82) | def prepare_for_onnx_export_(self):
FILE: packages/fairseq-hacked/fairseq/models/fairseq_encoder.py
class FairseqEncoder (line 9) | class FairseqEncoder(nn.Module):
method __init__ (line 12) | def __init__(self, dictionary):
method forward (line 16) | def forward(self, src_tokens, src_lengths=None, **kwargs):
method reorder_encoder_out (line 26) | def reorder_encoder_out(self, encoder_out, new_order):
method max_positions (line 39) | def max_positions(self):
method upgrade_state_dict (line 43) | def upgrade_state_dict(self, state_dict):
FILE: packages/fairseq-hacked/fairseq/models/fairseq_incremental_decoder.py
class FairseqIncrementalDecoder (line 9) | class FairseqIncrementalDecoder(FairseqDecoder):
method __init__ (line 31) | def __init__(self, dictionary):
method forward (line 34) | def forward(
method extract_features (line 53) | def extract_features(
method reorder_incremental_state (line 64) | def reorder_incremental_state(self, incremental_state, new_order):
method set_beam_size (line 84) | def set_beam_size(self, beam_size):
FILE: packages/fairseq-hacked/fairseq/models/fairseq_model.py
class BaseFairseqModel (line 21) | class BaseFairseqModel(nn.Module):
method __init__ (line 24) | def __init__(self):
method add_args (line 29) | def add_args(parser):
method build_model (line 34) | def build_model(cls, args, task):
method get_targets (line 38) | def get_targets(self, sample, net_output):
method get_normalized_probs (line 42) | def get_normalized_probs(self, net_output, log_probs, sample=None):
method extract_features (line 54) | def extract_features(self, *args, **kwargs):
method max_positions (line 58) | def max_positions(self):
method load_state_dict (line 62) | def load_state_dict(self, state_dict, strict=True, args=None):
method upgrade_state_dict (line 73) | def upgrade_state_dict(self, state_dict):
method upgrade_state_dict_named (line 77) | def upgrade_state_dict_named(self, state_dict, name):
method make_generation_fast_ (line 100) | def make_generation_fast_(self, **kwargs):
method prepare_for_onnx_export_ (line 136) | def prepare_for_onnx_export_(self, **kwargs):
method from_pretrained (line 152) | def from_pretrained(
method hub_models (line 193) | def hub_models(cls):
class FairseqEncoderDecoderModel (line 197) | class FairseqEncoderDecoderModel(BaseFairseqModel):
method __init__ (line 205) | def __init__(self, encoder, decoder):
method forward (line 213) | def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
method forward_decoder (line 242) | def forward_decoder(self, prev_output_tokens, **kwargs):
method extract_features (line 245) | def extract_features(self, src_tokens, src_lengths, prev_output_tokens...
method output_layer (line 260) | def output_layer(self, features, **kwargs):
method max_positions (line 264) | def max_positions(self):
method max_decoder_positions (line 268) | def max_decoder_positions(self):
class FairseqModel (line 273) | class FairseqModel(FairseqEncoderDecoderModel):
method __init__ (line 274) | def __init__(self, *args, **kwargs):
class FairseqMultiModel (line 283) | class FairseqMultiModel(BaseFairseqModel):
method __init__ (line 286) | def __init__(self, encoders, decoders):
method build_shared_embeddings (line 299) | def build_shared_embeddings(
method forward (line 328) | def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
method max_positions (line 337) | def max_positions(self):
method max_decoder_positions (line 347) | def max_decoder_positions(self):
method encoder (line 352) | def encoder(self):
method decoder (line 356) | def decoder(self):
class FairseqLanguageModel (line 360) | class FairseqLanguageModel(BaseFairseqModel):
method __init__ (line 367) | def __init__(self, decoder):
method forward (line 372) | def forward(self, src_tokens, **kwargs):
method forward_decoder (line 390) | def forward_decoder(self, prev_output_tokens, **kwargs):
method extract_features (line 393) | def extract_features(self, src_tokens, **kwargs):
method output_layer (line 404) | def output_layer(self, features, **kwargs):
method max_positions (line 408) | def max_positions(self):
method max_decoder_positions (line 412) | def max_decoder_positions(self):
method supported_targets (line 417) | def supported_targets(self):
class FairseqEncoderModel (line 421) | class FairseqEncoderModel(BaseFairseqModel):
method __init__ (line 428) | def __init__(self, encoder):
method forward (line 433) | def forward(self, src_tokens, src_lengths, **kwargs):
method get_normalized_probs (line 448) | def get_normalized_probs(self, net_output, log_probs, sample=None):
method max_positions (line 459) | def max_positions(self):
FILE: packages/fairseq-hacked/fairseq/models/fconv.py
class FConvModel (line 29) | class FConvModel(FairseqEncoderDecoderModel):
method hub_models (line 48) | def hub_models(cls):
method __init__ (line 68) | def __init__(self, encoder, decoder):
method add_args (line 75) | def add_args(parser):
method build_model (line 103) | def build_model(cls, args, task):
class FConvEncoder (line 140) | class FConvEncoder(FairseqEncoder):
method __init__ (line 158) | def __init__(
method forward (line 219) | def forward(self, src_tokens, src_lengths):
method reorder_encoder_out (line 303) | def reorder_encoder_out(self, encoder_out, new_order):
method max_positions (line 315) | def max_positions(self):
class AttentionLayer (line 320) | class AttentionLayer(nn.Module):
method __init__ (line 321) | def __init__(self, conv_channels, embed_dim, bmm=None):
method forward (line 330) | def forward(self, x, target_embedding, encoder_out, encoder_padding_ma...
method make_generation_fast_ (line 368) | def make_generation_fast_(self, beamable_mm_beam_size=None, **kwargs):
class FConvDecoder (line 375) | class FConvDecoder(FairseqIncrementalDecoder):
method __init__ (line 378) | def __init__(
method forward (line 479) | def forward(
method reorder_incremental_state (line 561) | def reorder_incremental_state(self, incremental_state, new_order):
method max_positions (line 572) | def max_positions(self):
method upgrade_state_dict (line 580) | def upgrade_state_dict(self, state_dict):
method make_generation_fast_ (line 590) | def make_generation_fast_(self, need_attn=False, **kwargs):
method _embed_tokens (line 593) | def _embed_tokens(self, tokens, incremental_state):
method _split_encoder_out (line 599) | def _split_encoder_out(self, encoder_out, incremental_state):
method _transpose_if_training (line 619) | def _transpose_if_training(self, x, incremental_state):
function extend_conv_spec (line 625) | def extend_conv_spec(convolutions):
function Embedding (line 646) | def Embedding(num_embeddings, embedding_dim, padding_idx):
function PositionalEmbedding (line 653) | def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx):
function Linear (line 660) | def Linear(in_features, out_features, dropout=0):
function LinearizedConv1d (line 668) | def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, ...
function ConvTBC (line 677) | def ConvTBC(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
function base_architecture (line 689) | def base_architecture(args):
function fconv_iwslt_de_en (line 703) | def fconv_iwslt_de_en(args):
function fconv_wmt_en_ro (line 713) | def fconv_wmt_en_ro(args):
function fconv_wmt_en_de (line 719) | def fconv_wmt_en_de(args):
function fconv_wmt_en_fr (line 733) | def fconv_wmt_en_fr(args):
FILE: packages/fairseq-hacked/fairseq/models/fconv_lm.py
class FConvLanguageModel (line 16) | class FConvLanguageModel(FairseqLanguageModel):
method __init__ (line 17) | def __init__(self, decoder):
method add_args (line 21) | def add_args(parser):
method build_model (line 64) | def build_model(cls, args, task):
function base_lm_architecture (line 95) | def base_lm_architecture(args):
function fconv_lm_dauphin_wikitext103 (line 105) | def fconv_lm_dauphin_wikitext103(args):
function fconv_lm_dauphin_gbw (line 123) | def fconv_lm_dauphin_gbw(args):
FILE: packages/fairseq-hacked/fairseq/models/fconv_self_att.py
class FConvModelSelfAtt (line 32) | class FConvModelSelfAtt(FairseqEncoderDecoderModel):
method hub_models (line 34) | def hub_models(cls):
method __init__ (line 52) | def __init__(self, encoder, decoder, pretrained_encoder=None):
method add_args (line 67) | def add_args(parser):
method build_model (line 107) | def build_model(cls, args, task):
method pretrained (line 163) | def pretrained(self):
class FConvEncoder (line 167) | class FConvEncoder(FairseqEncoder):
method __init__ (line 170) | def __init__(
method forward (line 224) | def forward(self, src_tokens, src_lengths):
method reorder_encoder_out (line 280) | def reorder_encoder_out(self, encoder_out, new_order):
method max_positions (line 298) | def max_positions(self):
class FConvDecoder (line 303) | class FConvDecoder(FairseqDecoder):
method __init__ (line 306) | def __init__(
method forward (line 445) | def forward(self, prev_output_tokens, encoder_out):
method max_positions (line 524) | def max_positions(self):
method make_generation_fast_ (line 528) | def make_generation_fast_(self, need_attn=False, **kwargs):
method _split_encoder_out (line 531) | def _split_encoder_out(self, encoder_out):
class SelfAttention (line 541) | class SelfAttention(nn.Module):
method __init__ (line 542) | def __init__(
method forward (line 567) | def forward(self, x):
function Embedding (line 578) | def Embedding(num_embeddings, embedding_dim, padding_idx):
function PositionalEmbedding (line 584) | def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx):
function Linear (line 590) | def Linear(in_features, out_features, dropout=0.0):
function LinearizedConv1d (line 598) | def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0.0...
function ConvTBC (line 607) | def ConvTBC(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
function base_architecture (line 619) | def base_architecture(args):
function fconv_self_att_wp (line 642) | def fconv_self_att_wp(args):
FILE: packages/fairseq-hacked/fairseq/models/insertion_transformer.py
class NegativeDistanceScore (line 20) | class NegativeDistanceScore(object):
method __init__ (line 21) | def __init__(self):
method __call__ (line 30) | def __call__(self, i, L, tau):
method compute_score (line 39) | def compute_score(self, L, tau):
method compute_score_full (line 44) | def compute_score_full(self, L, tau):
function _get_ins_targets (line 54) | def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx, vocab_...
function _apply_ins_words (line 101) | def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_score...
class InsertionTransformerModel (line 120) | class InsertionTransformerModel(LevenshteinTransformerModel):
method __init__ (line 121) | def __init__(self, args, encoder, decoder):
method add_args (line 125) | def add_args(parser):
method build_decoder (line 135) | def build_decoder(cls, args, tgt_dict, embed_tokens):
method forward (line 141) | def forward(
method forward_decoder (line 174) | def forward_decoder(
class InsertionTransformerDecoder (line 210) | class InsertionTransformerDecoder(LevenshteinTransformerDecoder):
method __init__ (line 211) | def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=Fal...
method forward_word_ins (line 225) | def forward_word_ins(self, prev_output_tokens, encoder_out=None):
method forward_mask_ins (line 232) | def forward_mask_ins(self, *args, **kwargs):
method forward_word_del (line 235) | def forward_word_del(self, *args, **kwargs):
method forward_word_del_mask_ins (line 238) | def forward_word_del_mask_ins(self, *args, **kwargs):
function base_architecture (line 243) | def base_architecture(args):
FILE: packages/fairseq-hacked/fairseq/models/iterative_nonautoregressive_transformer.py
function _sequential_poisoning (line 12) | def _sequential_poisoning(s, V, beta=0.33, bos=2, eos=3, pad=1):
function gumbel_noise (line 46) | def gumbel_noise(input, TINY=1e-8):
class IterNATransformerModel (line 60) | class IterNATransformerModel(NATransformerModel):
method add_args (line 62) | def add_args(parser):
method build_model (line 81) | def build_model(cls, args, task):
method forward (line 88) | def forward(
function base_architecture (line 165) | def base_architecture(args):
function iter_nat_wmt_en_de (line 219) | def iter_nat_wmt_en_de(args):
FILE: packages/fairseq-hacked/fairseq/models/levenshtein_transformer.py
function _skip (line 23) | def _skip(x, mask):
function _skip_encoder_out (line 48) | def _skip_encoder_out(encoder, encoder_out, mask):
function _fill (line 55) | def _fill(x, mask, y, padding_idx):
function load_libnat (line 86) | def load_libnat():
function _get_ins_targets (line 97) | def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx):
function _get_del_targets (line 137) | def _get_del_targets(in_tokens, out_tokens, padding_idx):
function _get_del_ins_targets (line 165) | def _get_del_ins_targets(in_tokens, out_tokens, padding_idx):
function _apply_ins_masks (line 203) | def _apply_ins_masks(
function _apply_ins_words (line 237) | def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_score...
function _apply_del_words (line 251) | def _apply_del_words(
class LevenshteinTransformerModel (line 283) | class LevenshteinTransformerModel(TransformerModel):
method __init__ (line 284) | def __init__(self, args, encoder, decoder):
method add_args (line 293) | def add_args(parser):
method build_decoder (line 328) | def build_decoder(cls, args, tgt_dict, embed_tokens):
method build_encoder (line 335) | def build_encoder(cls, args, src_dict, embed_tokens):
method forward (line 341) | def forward(
method forward_encoder (line 402) | def forward_encoder(self, encoder_inputs):
method forward_decoder (line 405) | def forward_decoder(
method initialize_output_tokens (line 517) | def initialize_output_tokens(self, encoder_out, src_tokens):
class LevenshteinTransformerDecoder (line 535) | class LevenshteinTransformerDecoder(TransformerDecoder):
method __init__ (line 536) | def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=Fal...
method extract_features (line 576) | def extract_features(
method forward_mask_ins (line 642) | def forward_mask_ins(self, prev_output_tokens, encoder_out=None, **unu...
method forward_word_ins (line 653) | def forward_word_ins(self, prev_output_tokens, encoder_out=None, **unu...
method forward_word_del (line 663) | def forward_word_del(self, prev_output_tokens, encoder_out=None, **unu...
function base_architecture (line 675) | def base_architecture(args):
function levenshtein_transformer_wmt_en_de (line 725) | def levenshtein_transformer_wmt_en_de(args):
function levenshtein_transformer_vaswani_wmt_en_de_big (line 733) | def levenshtein_transformer_vaswani_wmt_en_de_big(args):
function levenshtein_transformer_wmt_en_de_big_t2t (line 749) | def levenshtein_transformer_wmt_en_de_big_t2t(args):
FILE: packages/fairseq-hacked/fairseq/models/lightconv.py
class LightConvModel (line 31) | class LightConvModel(FairseqEncoderDecoderModel):
method __init__ (line 50) | def __init__(self, encoder, decoder):
method add_args (line 54) | def add_args(parser):
method build_model (line 227) | def build_model(cls, args, task):
class LightConvEncoder (line 283) | class LightConvEncoder(FairseqEncoder):
method __init__ (line 294) | def __init__(self, args, dictionary, embed_tokens):
method forward (line 329) | def forward(self, src_tokens, **unused):
method reorder_encoder_out (line 368) | def reorder_encoder_out(self, encoder_out, new_order):
method max_positions (line 389) | def max_positions(self):
class LightConvDecoder (line 396) | class LightConvDecoder(FairseqIncrementalDecoder):
method __init__ (line 409) | def __init__(
method forward (line 481) | def forward(
method max_positions (line 560) | def max_positions(self):
method buffered_future_mask (line 566) | def buffered_future_mask(self, tensor):
class LightConvEncoderLayer (line 583) | class LightConvEncoderLayer(nn.Module):
method __init__ (line 591) | def __init__(self, args, kernel_size=0):
method forward (line 637) | def forward(self, x, encoder_padding_mask):
method maybe_layer_norm (line 671) | def maybe_layer_norm(self, i, x, before=False, after=False):
method extra_repr (line 678) | def extra_repr(self):
class LightConvDecoderLayer (line 684) | class LightConvDecoderLayer(nn.Module):
method __init__ (line 694) | def __init__(self, args, no_encoder_attn=False, kernel_size=0):
method forward (line 751) | def forward(
method maybe_layer_norm (line 820) | def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
method make_generation_fast_ (line 827) | def make_generation_fast_(self, need_attn=False, **kwargs):
method extra_repr (line 830) | def extra_repr(self):
function Embedding (line 836) | def Embedding(num_embeddings, embedding_dim, padding_idx):
function Linear (line 843) | def Linear(in_features, out_features, bias=True):
function base_architecture (line 852) | def base_architecture(args):
function lightconv_iwslt_de_en (line 917) | def lightconv_iwslt_de_en(args):
function lightconv_wmt_en_de (line 935) | def lightconv_wmt_en_de(args):
function lightconv_wmt_en_de_big (line 940) | def lightconv_wmt_en_de_big(args):
function lightconv_wmt_en_fr_big (line 954) | def lightconv_wmt_en_fr_big(args):
function lightconv_wmt_zh_en_big (line 960) | def lightconv_wmt_zh_en_big(args):
FILE: packages/fairseq-hacked/fairseq/models/lightconv_lm.py
class LightConvLanguageModel (line 23) | class LightConvLanguageModel(FairseqLanguageModel):
method __init__ (line 24) | def __init__(self, decoder):
method add_args (line 28) | def add_args(parser):
method build_model (line 208) | def build_model(cls, args, task):
function base_lm_architecture (line 262) | def base_lm_architecture(args):
function lightconv_lm_gbw (line 306) | def lightconv_lm_gbw(args):
FILE: packages/fairseq-hacked/fairseq/models/lstm.py
class LSTMModel (line 22) | class LSTMModel(FairseqEncoderDecoderModel):
method __init__ (line 23) | def __init__(self, encoder, decoder):
method add_args (line 27) | def add_args(parser):
method build_model (line 80) | def build_model(cls, args, task):
class LSTMEncoder (line 177) | class LSTMEncoder(FairseqEncoder):
method __init__ (line 180) | def __init__(
method forward (line 221) | def forward(self, src_tokens, src_lengths):
method reorder_encoder_out (line 277) | def reorder_encoder_out(self, encoder_out, new_order):
method max_positions (line 287) | def max_positions(self):
class AttentionLayer (line 292) | class AttentionLayer(nn.Module):
method __init__ (line 293) | def __init__(self, input_embed_dim, source_embed_dim, output_embed_dim...
method forward (line 301) | def forward(self, input, source_hids, encoder_padding_mask):
class LSTMDecoder (line 328) | class LSTMDecoder(FairseqIncrementalDecoder):
method __init__ (line 331) | def __init__(
method forward (line 396) | def forward(self, prev_output_tokens, encoder_out, incremental_state=N...
method extract_features (line 402) | def extract_features(self, prev_output_tokens, encoder_out, incrementa...
method output_layer (line 496) | def output_layer(self, x):
method reorder_incremental_state (line 505) | def reorder_incremental_state(self, incremental_state, new_order):
method max_positions (line 521) | def max_positions(self):
method make_generation_fast_ (line 525) | def make_generation_fast_(self, need_attn=False, **kwargs):
function Embedding (line 529) | def Embedding(num_embeddings, embedding_dim, padding_idx):
function LSTM (line 536) | def LSTM(input_size, hidden_size, **kwargs):
function LSTMCell (line 544) | def LSTMCell(input_size, hidden_size, **kwargs):
function Linear (line 552) | def Linear(in_features, out_features, bias=True, dropout=0):
function base_architecture (line 562) | def base_architecture(args):
function lstm_wiseman_iwslt_de_en (line 595) | def lstm_wiseman_iwslt_de_en(args):
function lstm_luong_wmt_en_de (line 608) | def lstm_luong_wmt_en_de(args):
FILE: packages/fairseq-hacked/fairseq/models/masked_lm.py
class MaskedLMModel (line 26) | class MaskedLMModel(BaseFairseqModel):
method __init__ (line 32) | def __init__(self, args, encoder):
method add_args (line 44) | def add_args(parser):
method forward (line 149) | def forward(self, src_tokens, segment_labels=None, **kwargs):
method max_positions (line 152) | def max_positions(self):
method build_model (line 156) | def build_model(cls, args, task):
class MaskedLMEncoder (line 171) | class MaskedLMEncoder(FairseqEncoder):
method __init__ (line 176) | def __init__(self, args, dictionary):
method forward (line 238) | def forward(self, src_tokens, segment_labels=None, **unused):
method max_positions (line 294) | def max_positions(self):
method upgrade_state_dict_named (line 298) | def upgrade_state_dict_named(self, state_dict, name):
function base_architecture (line 317) | def base_architecture(args):
function bert_base_architecture (line 349) | def bert_base_architecture(args):
function bert_large_architecture (line 379) | def bert_large_architecture(args):
function xlm_architecture (line 388) | def xlm_architecture(args):
FILE: packages/fairseq-hacked/fairseq/models/model_utils.py
function script_skip_tensor_list (line 13) | def script_skip_tensor_list(x: List[Tensor], mask):
function script_skip_tensor (line 25) | def script_skip_tensor(x: Tensor, mask):
function expand_2d_or_3d_tensor (line 37) | def expand_2d_or_3d_tensor(x, trg_dim: int, padding_idx: int):
function coalesce (line 58) | def coalesce(x: Optional[Tensor], y: Tensor) -> Tensor:
function fill_tensors (line 63) | def fill_tensors(
FILE: packages/fairseq-hacked/fairseq/models/multilingual_transformer.py
class MultilingualTransformerModel (line 24) | class MultilingualTransformerModel(FairseqMultiModel):
method __init__ (line 40) | def __init__(self, encoders, decoders):
method add_args (line 44) | def add_args(parser):
method build_model (line 69) | def build_model(cls, args, task):
method load_state_dict (line 192) | def load_state_dict(self, state_dict, strict=True):
function base_multilingual_architecture (line 203) | def base_multilingual_architecture(args):
function multilingual_transformer_iwslt_de_en (line 214) | def multilingual_transformer_iwslt_de_en(args):
FILE: packages/fairseq-hacked/fairseq/models/nonautoregressive_ensembles.py
class _EnsembleModelEncoder (line 21) | class _EnsembleModelEncoder(object):
method __init__ (line 22) | def __init__(self, models):
method reorder_encoder_out (line 25) | def reorder_encoder_out(self, encoder_outs, new_order):
class BasicEnsembleModel (line 33) | class BasicEnsembleModel(torch.nn.Module):
method __init__ (line 36) | def __init__(self, models):
method has_encoder (line 45) | def has_encoder(self):
method max_decoder_positions (line 48) | def max_decoder_positions(self):
method forward_encoder (line 52) | def forward_encoder(self, encoder_input):
method forward_decoder (line 58) | def forward_decoder(self, *inputs):
method initialize_output_tokens (line 61) | def initialize_output_tokens(self, *inputs):
class EnsembleLevT (line 65) | class EnsembleLevT(BasicEnsembleModel):
method __init__ (line 68) | def __init__(self, models):
method forward_decoder (line 72) | def forward_decoder(
method forward_word_del (line 136) | def forward_word_del(
method forward_mask_ins (line 172) | def forward_mask_ins(
method forward_word_ins (line 210) | def forward_word_ins(
method initialize_output_tokens (line 245) | def initialize_output_tokens(self, encoder_outs, src_tokens):
FILE: packages/fairseq-hacked/fairseq/models/nonautoregressive_transformer.py
function _mean_pooling (line 21) | def _mean_pooling(enc_feats, src_masks):
function _argmax (line 34) | def _argmax(x, dim):
function _uniform_assignment (line 38) | def _uniform_assignment(src_lens, trg_lens):
class NATransformerModel (line 49) | class NATransformerModel(TransformerModel):
method __init__ (line 50) | def __init__(self, args, encoder, decoder):
method add_args (line 59) | def add_args(parser):
method build_decoder (line 90) | def build_decoder(cls, args, tgt_dict, embed_tokens):
method build_encoder (line 97) | def build_encoder(cls, args, src_dict, embed_tokens):
method forward (line 103) | def forward(
method forward_encoder (line 131) | def forward_encoder(self, encoder_inputs):
method forward_decoder (line 134) | def forward_decoder(self, decoder_out, encoder_out, decoding_format=No...
method initialize_output_tokens (line 160) | def initialize_output_tokens(self, encoder_out, src_tokens):
class NATransformerDecoder (line 189) | class NATransformerDecoder(TransformerDecoder):
method __init__ (line 190) | def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=Fal...
method forward (line 207) | def forward(
method extract_features (line 231) | def extract_features(
method forward_embedding (line 305) | def forward_embedding(self, prev_output_tokens, states=None):
method forward_copying_source (line 327) | def forward_copying_source(self, src_embeds, src_masks, tgt_masks):
method forward_length_prediction (line 342) | def forward_length_prediction(self, encoder_out, tgt_tokens=None):
function base_architecture (line 385) | def base_architecture(args):
function nonautoregressive_transformer_wmt_en_de (line 433) | def nonautoregressive_transformer_wmt_en_de(args):
FILE: packages/fairseq-hacked/fairseq/models/roberta/alignment_utils.py
function align_bpe_to_words (line 12) | def align_bpe_to_words(roberta, bpe_tokens: torch.LongTensor, other_toke...
function align_features_to_words (line 71) | def align_features_to_words(roberta, features, alignment):
function spacy_nlp (line 100) | def spacy_nlp():
function spacy_tokenizer (line 111) | def spacy_tokenizer():
FILE: packages/fairseq-hacked/fairseq/models/roberta/hub_interface.py
class RobertaHubInterface (line 15) | class RobertaHubInterface(nn.Module):
method __init__ (line 21) | def __init__(self, args, task, model):
method device (line 33) | def device(self):
method encode (line 36) | def encode(
method decode (line 68) | def decode(self, tokens: torch.LongTensor):
method extract_features (line 83) | def extract_features(
method register_classification_head (line 106) | def register_classification_head(
method predict (line 113) | def predict(self, head: str, tokens: torch.LongTensor, return_logits: ...
method extract_features_aligned_to_words (line 120) | def extract_features_aligned_to_words(
method fill_mask (line 157) | def fill_mask(self, masked_input: str, topk: int = 5):
method disambiguate_pronoun (line 218) | def disambiguate_pronoun(self, sentence: str) -> bool:
FILE: packages/fairseq-hacked/fairseq/models/roberta/model.py
class RobertaModel (line 30) | class RobertaModel(FairseqLanguageModel):
method hub_models (line 32) | def hub_models(cls):
method __init__ (line 40) | def __init__(self, args, encoder):
method add_args (line 50) | def add_args(parser):
method build_model (line 132) | def build_model(cls, args, task):
method forward (line 144) | def forward(
method register_classification_head (line 161) | def register_classification_head(
method supported_targets (line 184) | def supported_targets(self):
method from_pretrained (line 188) | def from_pretrained(
method upgrade_state_dict_named (line 209) | def upgrade_state_dict_named(self, state_dict, name):
class XLMRModel (line 270) | class XLMRModel(RobertaModel):
method hub_models (line 272) | def hub_models(cls):
method from_pretrained (line 279) | def from_pretrained(
class CamembertModel (line 302) | class CamembertModel(RobertaModel):
method hub_models (line 304) | def hub_models(cls):
method from_pretrained (line 310) | def from_pretrained(
class RobertaLMHead (line 332) | class RobertaLMHead(nn.Module):
method __init__ (line 335) | def __init__(self, embed_dim, output_dim, activation_fn, weight=None):
method forward (line 346) | def forward(self, features, masked_tokens=None, **kwargs):
class RobertaClassificationHead (line 360) | class RobertaClassificationHead(nn.Module):
method __init__ (line 363) | def __init__(
method forward (line 372) | def forward(self, features, **kwargs):
class RobertaEncoder (line 382) | class RobertaEncoder(FairseqDecoder):
method __init__ (line 389) | def __init__(self, args, dictionary):
method forward (line 425) | def forward(
method extract_features (line 455) | def extract_features(self, src_tokens, return_all_hiddens=False, **unu...
method output_layer (line 462) | def output_layer(self, features, masked_tokens=None, **unused):
method max_positions (line 465) | def max_positions(self):
function base_architecture (line 471) | def base_architecture(args):
function roberta_base_architecture (line 489) | def roberta_base_architecture(args):
function roberta_large_architecture (line 494) | def roberta_large_architecture(args):
function xlm_architecture (line 503) | def xlm_architecture(args):
FILE: packages/fairseq-hacked/fairseq/models/transformer.py
class TransformerModel (line 36) | class TransformerModel(FairseqEncoderDecoderModel):
method hub_models (line 54) | def hub_models(cls):
method __init__ (line 86) | def __init__(self, args, encoder, decoder):
method add_args (line 92) | def add_args(parser):
method build_model (line 167) | def build_model(cls, args, task):
method build_encoder (line 226) | def build_encoder(cls, args, src_dict, embed_tokens):
method build_decoder (line 230) | def build_decoder(cls, args, tgt_dict, embed_tokens):
class TransformerAlignModel (line 240) | class TransformerAlignModel(TransformerModel):
method __init__ (line 246) | def __init__(self, encoder, decoder, args):
method add_args (line 253) | def add_args(parser):
method build_model (line 265) | def build_model(cls, args, task):
method forward (line 274) | def forward(self, src_tokens, src_lengths, prev_output_tokens):
method forward_decoder (line 278) | def forward_decoder(
class TransformerEncoder (line 319) | class TransformerEncoder(FairseqEncoder):
method __init__ (line 330) | def __init__(self, args, dictionary, embed_tokens):
method forward_embedding (line 372) | def forward_embedding(self, src_tokens):
method forward (line 382) | def forward(
method reorder_encoder_out (line 447) | def reorder_encoder_out(self, encoder_out, new_order):
method max_positions (line 479) | def max_positions(self):
method buffered_future_mask (line 485) | def buffered_future_mask(self, tensor):
method upgrade_state_dict_named (line 501) | def upgrade_state_dict_named(self, state_dict, name):
class TransformerDecoder (line 526) | class TransformerDecoder(FairseqIncrementalDecoder):
method __init__ (line 539) | def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=Fal...
method forward (line 621) | def forward(
method extract_features (line 655) | def extract_features(
method output_layer (line 775) | def output_layer(self, features, **kwargs):
method max_positions (line 786) | def max_positions(self):
method buffered_future_mask (line 792) | def buffered_future_mask(self, tensor):
method upgrade_state_dict_named (line 805) | def upgrade_state_dict_named(self, state_dict, name):
function Embedding (line 841) | def Embedding(num_embeddings, embedding_dim, padding_idx):
function Linear (line 848) | def Linear(in_features, out_features, bias=True):
function base_architecture (line 857) | def base_architecture(args):
function transformer_iwslt_de_en (line 902) | def transformer_iwslt_de_en(args):
function transformer_wmt_en_de (line 915) | def transformer_wmt_en_de(args):
function transformer_vaswani_wmt_en_de_big (line 921) | def transformer_vaswani_wmt_en_de_big(args):
function transformer_vaswani_wmt_en_fr_big (line 934) | def transformer_vaswani_wmt_en_fr_big(args):
function transformer_wmt_en_de_big (line 940) | def transformer_wmt_en_de_big(args):
function transformer_wmt_en_de_big_t2t (line 947) | def transformer_wmt_en_de_big_t2t(args):
function transformer_align (line 956) | def transformer_align(args):
function transformer_wmt_en_de_big_align (line 964) | def transformer_wmt_en_de_big_align(args):
FILE: packages/fairseq-hacked/fairseq/models/transformer_from_pretrained_xlm.py
class TransformerFromPretrainedXLMModel (line 21) | class TransformerFromPretrainedXLMModel(TransformerModel):
method add_args (line 23) | def add_args(parser):
method build_model (line 44) | def build_model(self, args, task, cls_dictionary=MaskedLMDictionary):
method build_encoder (line 65) | def build_encoder(cls, args, src_dict, embed_tokens):
method build_decoder (line 69) | def build_decoder(cls, args, tgt_dict, embed_tokens):
function upgrade_state_dict_with_xlm_weights (line 73) | def upgrade_state_dict_with_xlm_weights(
class TransformerEncoderFromPretrainedXLM (line 112) | class TransformerEncoderFromPretrainedXLM(TransformerEncoder):
method __init__ (line 113) | def __init__(self, args, dictionary, embed_tokens):
class TransformerDecoderFromPretrainedXLM (line 130) | class TransformerDecoderFromPretrainedXLM(TransformerDecoder):
method __init__ (line 131) | def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=Fal...
function base_architecture (line 151) | def base_architecture(args):
FILE: packages/fairseq-hacked/fairseq/models/transformer_lm.py
class TransformerLanguageModel (line 25) | class TransformerLanguageModel(FairseqLanguageModel):
method hub_models (line 27) | def hub_models(cls):
method __init__ (line 49) | def __init__(self, decoder):
method add_args (line 53) | def add_args(parser):
method build_model (line 125) | def build_model(cls, args, task):
function base_lm_architecture (line 180) | def base_lm_architecture(args):
function transformer_lm_big (line 234) | def transformer_lm_big(args):
function transformer_lm_baevski_wiki103 (line 244) | def transformer_lm_baevski_wiki103(args):
function transformer_lm_baevski_gbw (line 264) | def transformer_lm_baevski_gbw(args):
function transformer_lm_gpt (line 273) | def transformer_lm_gpt(args):
function transformer_lm_gpt2_small (line 285) | def transformer_lm_gpt2_small(args):
function transformer_lm_gpt2_medium (line 297) | def transformer_lm_gpt2_medium(args):
function transformer_lm_gpt2_big (line 309) | def transformer_lm_gpt2_big(args):
FILE: packages/fairseq-hacked/fairseq/models/wav2vec.py
class Wav2VecModel (line 18) | class Wav2VecModel(BaseFairseqModel):
method add_args (line 20) | def add_args(parser):
method build_model (line 143) | def build_model(cls, args, task):
method __init__ (line 153) | def __init__(self, args):
method forward (line 245) | def forward(self, source):
method upgrade_state_dict_named (line 262) | def upgrade_state_dict_named(self, state_dict, name):
method max_positions (line 265) | def max_positions(self):
method get_logits (line 269) | def get_logits(self, net_output):
method get_targets (line 273) | def get_targets(self, sample, net_output, expand_steps=True):
method get_target_weights (line 277) | def get_target_weights(self, targets, net_output):
class TransposeLast (line 284) | class TransposeLast(nn.Module):
method __init__ (line 285) | def __init__(self, deconstruct_idx=None):
method forward (line 289) | def forward(self, x):
class Fp32GroupNorm (line 295) | class Fp32GroupNorm(nn.GroupNorm):
method __init__ (line 296) | def __init__(self, *args, **kwargs):
method forward (line 299) | def forward(self, input):
class Fp32LayerNorm (line 310) | class Fp32LayerNorm(nn.LayerNorm):
method __init__ (line 311) | def __init__(self, *args, **kwargs):
method forward (line 314) | def forward(self, input):
function norm_block (line 325) | def norm_block(is_layer_norm, dim, affine=True):
class ConvFeatureExtractionModel (line 338) | class ConvFeatureExtractionModel(nn.Module):
method __init__ (line 339) | def __init__(
method forward (line 370) | def forward(self, x):
class ZeroPad1d (line 391) | class ZeroPad1d(nn.Module):
method __init__ (line 392) | def __init__(self, pad_left, pad_right):
method forward (line 397) | def forward(self, x):
class ConvAggegator (line 401) | class ConvAggegator(nn.Module):
method __init__ (line 402) | def __init__(
method forward (line 447) | def forward(self, x):
class Wav2VecPredictionsModel (line 458) | class Wav2VecPredictionsModel(nn.Module):
method __init__ (line 459) | def __init__(
method sample_negatives (line 484) | def sample_negatives(self, y):
method forward (line 532) | def forward(self, x, y):
function base_wav2vec_architecture (line 576) | def base_wav2vec_architecture(args):
FILE: packages/fairseq-hacked/fairseq/modules/adaptive_input.py
class AdaptiveInput (line 13) | class AdaptiveInput(nn.Module):
method __init__ (line 14) | def __init__(
method weights_for_band (line 58) | def weights_for_band(self, band: int):
method forward (line 61) | def forward(self, input: torch.Tensor):
FILE: packages/fairseq-hacked/fairseq/modules/adaptive_softmax.py
class TiedLinear (line 14) | class TiedLinear(nn.Module):
method __init__ (line 15) | def __init__(self, weight, transpose):
method forward (line 20) | def forward(self, input):
class TiedHeadModule (line 24) | class TiedHeadModule(nn.Module):
method __init__ (line 25) | def __init__(self, weights, input_dim, num_classes):
method forward (line 41) | def forward(self, input):
class AdaptiveSoftmax (line 49) | class AdaptiveSoftmax(nn.Module):
method __init__ (line 56) | def __init__(
method _make_tail (line 106) | def _make_tail(self, adaptive_inputs=None, tie_proj=False):
method upgrade_state_dict_named (line 135) | def upgrade_state_dict_named(self, state_dict, name):
method adapt_target (line 140) | def adapt_target(self, target):
method forward (line 165) | def forward(self, input, target):
method get_log_prob (line 188) | def get_log_prob(self, input, target):
FILE: packages/fairseq-hacked/fairseq/modules/beamable_mm.py
class BeamableMM (line 10) | class BeamableMM(nn.Module):
method __init__ (line 19) | def __init__(self, beam_size=None):
method forward (line 23) | def forward(self, input1, input2):
method set_beam_size (line 48) | def set_beam_size(self, beam_size):
FILE: packages/fairseq-hacked/fairseq/modules/character_token_embedder.py
class CharacterTokenEmbedder (line 20) | class CharacterTokenEmbedder(torch.nn.Module):
method __init__ (line 21) | def __init__(
method prepare_for_onnx_export_ (line 62) | def prepare_for_onnx_export_(self):
method set_vocab (line 65) | def set_vocab(self, vocab, max_char_len):
method padding_idx (line 92) | def padding_idx(self):
method reset_parameters (line 95) | def reset_parameters(self):
method forward (line 105) | def forward(
method _convolve (line 152) | def _convolve(
FILE: packages/fairseq-hacked/fairseq/modules/conv_tbc.py
class ConvTBC (line 10) | class ConvTBC(torch.nn.Module):
method __init__ (line 17) | def __init__(self, in_channels, out_channels, kernel_size, padding=0):
method forward (line 29) | def forward(self, input):
method __repr__ (line 34) | def __repr__(self):
FILE: packages/fairseq-hacked/fairseq/modules/downsampled_multihead_attention.py
class SingleHeadAttention (line 15) | class SingleHeadAttention(nn.Module):
method __init__ (line 20) | def __init__(
method forward (line 71) | def forward(
class DownsampledMultiHeadAttention (line 166) | class DownsampledMultiHeadAttention(nn.ModuleList):
method __init__ (line 171) | def __init__(
method forward (line 227) | def forward(
class Downsample (line 284) | class Downsample(nn.Module):
method __init__ (line 289) | def __init__(self, index):
method forward (line 293) | def forward(self, x):
function Linear (line 297) | def Linear(in_features, out_features, dropout=0.0, bias=True):
function GatedLinear (line 305) | def GatedLinear(in_features, out_features, dropout=0.0, bias=True):
FILE: packages/fairseq-hacked/fairseq/modules/dynamic_convolution.py
function DynamicConv (line 14) | def DynamicConv(
function Linear (line 53) | def Linear(in_features, out_features, bias=True):
class DynamicConv1dTBC (line 61) | class DynamicConv1dTBC(nn.Module):
method __init__ (line 86) | def __init__(
method in_proj (line 125) | def in_proj(self):
method reset_parameters (line 131) | def reset_parameters(self):
method forward (line 136) | def forward(self, x, incremental_state=None, query=None, unfold=None):
method _forward_unfolded (line 161) | def _forward_unfolded(self, x, incremental_state, query):
method _forward_expanded (line 219) | def _forward_expanded(self, x, incremental_stat, query):
method reorder_incremental_state (line 278) | def reorder_incremental_state(self, incremental_state, new_order):
method _get_input_buffer (line 284) | def _get_input_buffer(self, incremental_state):
method _set_input_buffer (line 287) | def _set_input_buffer(self, incremental_state, new_buffer):
method extra_repr (line 292) | def extra_repr(self):
FILE: packages/fairseq-hacked/fairseq/modules/dynamicconv_layer/cuda_function_gen.py
function gen_forward (line 7) | def gen_forward():
function gen_backward (line 96) | def gen_backward():
FILE: packages/fairseq-hacked/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cpp
function dynamicconv_forward (line 27) | std::vector<at::Tensor> dynamicconv_forward(
function dynamicconv_backward (line 39) | std::vector<at::Tensor> dynamicconv_backward(
function PYBIND11_MODULE (line 53) | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
FILE: packages/fairseq-hacked/fairseq/modules/dynamicconv_layer/dynamicconv_layer.py
class dynamicconvFunction (line 16) | class dynamicconvFunction(Function):
method forward (line 18) | def forward(ctx, x, weights, padding_l):
method backward (line 26) | def backward(ctx, grad_output):
class DynamicconvLayer (line 34) | class DynamicconvLayer(nn.Module):
method __init__ (line 35) | def __init__(
method reset_parameters (line 67) | def reset_parameters(self):
method forward (line 73) | def forward(self, x, incremental_state=None, query=None, unfold=None):
method reorder_incremental_state (line 117) | def reorder_incremental_state(self, incremental_state, new_order):
method _get_input_buffer (line 123) | def _get_input_buffer(self, incremental_state):
method _set_input_buffer (line 126) | def _set_input_buffer(self, incremental_state, new_buffer):
method _forward_unfolded (line 131) | def _forward_unfolded(self, x, incremental_state, query):
method _forward_expanded (line 182) | def _forward_expanded(self, x, incremental_stat, query):
FILE: packages/fairseq-hacked/fairseq/modules/dynamicconv_layer/dynamiconv_cpu.cpp
function dynamicconv_forward (line 15) | std::vector<float*> dynamicconv_forward(
function dynamicconv_backward (line 23) | std::vector<float*> dynamicconv_backward(
function PYBIND11_MODULE (line 32) | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
FILE: packages/fairseq-hacked/fairseq/modules/gelu.py
function gelu_accurate (line 15) | def gelu_accurate(x):
function gelu (line 23) | def gelu(x: torch.Tensor) -> torch.Tensor:
FILE: packages/fairseq-hacked/fairseq/modules/grad_multiply.py
class GradMultiply (line 9) | class GradMultiply(torch.autograd.Function):
method forward (line 11) | def forward(ctx, x, scale):
method backward (line 17) | def backward(ctx, grad):
FILE: packages/fairseq-hacked/fairseq/modules/highway.py
class Highway (line 11) | class Highway(torch.nn.Module):
method __init__ (line 17) | def __init__(self, input_dim: int, num_layers: int = 1):
method reset_parameters (line 27) | def reset_parameters(self):
method forward (line 39) | def forward(self, x: torch.Tensor):
FILE: packages/fairseq-hacked/fairseq/modules/layer_norm.py
function LayerNorm (line 9) | def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, expor...
FILE: packages/fairseq-hacked/fairseq/modules/learned_positional_embedding.py
class LearnedPositionalEmbedding (line 11) | class LearnedPositionalEmbedding(nn.Embedding):
method __init__ (line 19) | def __init__(
method forward (line 25) | def forward(self, input, incremental_state=None, positions=None):
method max_positions (line 44) | def max_positions(self):
FILE: packages/fairseq-hacked/fairseq/modules/lightconv_layer/cuda_function_gen.py
function gen_forward (line 7) | def gen_forward():
function gen_backward (line 116) | def gen_backward():
FILE: packages/fairseq-hacked/fairseq/modules/lightconv_layer/lightconv_cuda.cpp
function lightconv_forward (line 27) | std::vector<at::Tensor> lightconv_forward(
function lightconv_backward (line 38) | std::vector<at::Tensor> lightconv_backward(
function PYBIND11_MODULE (line 51) | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
FILE: packages/fairseq-hacked/fairseq/modules/lightconv_layer/lightconv_layer.py
class lightconvFunction (line 15) | class lightconvFunction(Function):
method forward (line 17) | def forward(ctx, x, weights, padding_l):
method backward (line 25) | def backward(ctx, grad_output):
class LightconvLayer (line 33) | class LightconvLayer(nn.Module):
method __init__ (line 34) | def __init__(
method reset_parameters (line 59) | def reset_parameters(self):
method forward (line 64) | def forward(self, x, incremental_state=None):
method reorder_incremental_state (line 111) | def reorder_incremental_state(self, incremental_state, new_order):
method _get_input_buffer (line 117) | def _get_input_buffer(self, incremental_state):
method _set_input_buffer (line 120) | def _set_input_buffer(self, incremental_state, new_buffer):
method half (line 125) | def half(self):
FILE: packages/fairseq-hacked/fairseq/modules/lightweight_convolution.py
function LightweightConv (line 14) | def LightweightConv(
class LightweightConv1d (line 49) | class LightweightConv1d(nn.Module):
method __init__ (line 72) | def __init__(
method reset_parameters (line 97) | def reset_parameters(self):
method forward (line 102) | def forward(self, input):
class LightweightConv1dTBC (line 128) | class LightweightConv1dTBC(nn.Module):
method __init__ (line 149) | def __init__(
method reset_parameters (line 177) | def reset_parameters(self):
method forward (line 182) | def forward(self, x, incremental_state=None, unfold=False):
method prepare_for_onnx_export_ (line 200) | def prepare_for_onnx_export_(self):
method _forward_unfolded (line 203) | def _forward_unfolded(self, x, incremental_state):
method _forward_expanded (line 245) | def _forward_expanded(self, x, incremental_state):
method reorder_incremental_state (line 282) | def reorder_incremental_state(self, incremental_state, new_order):
method _get_input_buffer (line 288) | def _get_input_buffer(self, incremental_state):
method _set_input_buffer (line 291) | def _set_input_buffer(self, incremental_state, new_buffer):
method extra_repr (line 296) | def extra_repr(self):
FILE: packages/fairseq-hacked/fairseq/modules/linearized_convolution.py
class LinearizedConvolution (line 14) | class LinearizedConvolution(ConvTBC):
method __init__ (line 23) | def __init__(self, in_channels, out_channels, kernel_size, **kwargs):
method forward (line 28) | def forward(self, input, incremental_state=None):
method reorder_incremental_state (line 66) | def reorder_incremental_state(self, incremental_state, new_order):
method _get_input_buffer (line 72) | def _get_input_buffer(self, incremental_state):
method _set_input_buffer (line 75) | def _set_input_buffer(self, incremental_state, new_buffer):
method _get_linearized_weight (line 80) | def _get_linearized_weight(self):
method _clear_linearized_weight (line 90) | def _clear_linearized_weight(self, *args):
FILE: packages/fairseq-hacked/fairseq/modules/logsumexp_moe.py
class LogSumExpMoE (line 9) | class LogSumExpMoE(torch.autograd.Function):
method forward (line 17) | def forward(ctx, logp, posterior, dim=-1):
method backward (line 23) | def backward(ctx, grad_output):
FILE: packages/fairseq-hacked/fairseq/modules/mean_pool_gating_network.py
class MeanPoolGatingNetwork (line 10) | class MeanPoolGatingNetwork(torch.nn.Module):
method __init__ (line 18) | def __init__(self, embed_dim, num_experts, dropout=None):
method forward (line 27) | def forward(self, encoder_out):
FILE: packages/fairseq-hacked/fairseq/modules/multihead_attention.py
class MultiheadAttention (line 15) | class MultiheadAttention(nn.Module):
method __init__ (line 21) | def __init__(
method prepare_for_onnx_export_ (line 79) | def prepare_for_onnx_export_(self):
method reset_parameters (line 82) | def reset_parameters(self):
method forward (line 101) | def forward(
method _append_prev_key_padding_mask (line 351) | def _append_prev_key_padding_mask(
method reorder_incremental_state (line 378) | def reorder_incremental_state(self, incremental_state, new_order):
method _get_input_buffer (line 387) | def _get_input_buffer(self, incremental_state):
method _set_input_buffer (line 390) | def _set_input_buffer(self, incremental_state, buffer):
method apply_sparse_mask (line 395) | def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
method upgrade_state_dict_named (line 398) | def upgrade_state_dict_named(self, state_dict, name):
FILE: packages/fairseq-hacked/fairseq/modules/positional_embedding.py
function PositionalEmbedding (line 12) | def PositionalEmbedding(
FILE: packages/fairseq-hacked/fairseq/modules/scalar_bias.py
class ScalarBias (line 10) | class ScalarBias(torch.autograd.Function):
method forward (line 17) | def forward(ctx, input, dim, bias_init):
method backward (line 26) | def backward(ctx, grad):
function scalar_bias (line 30) | def scalar_bias(input, dim, bias_init=0):
FILE: packages/fairseq-hacked/fairseq/modules/sinusoidal_positional_embedding.py
class SinusoidalPositionalEmbedding (line 15) | class SinusoidalPositionalEmbedding(nn.Module):
method __init__ (line 21) | def __init__(self, embedding_dim, padding_idx, init_size=1024):
method prepare_for_onnx_export_ (line 31) | def prepare_for_onnx_export_(self):
method get_embedding (line 35) | def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
method forward (line 57) | def forward(self, input, incremental_state=None, timestep=None, **kwar...
method max_positions (line 97) | def max_positions(self):
FILE: packages/fairseq-hacked/fairseq/modules/sparse_multihead_attention.py
class SparseMultiheadAttention (line 11) | class SparseMultiheadAttention(MultiheadAttention):
method __init__ (line 22) | def __init__(
method compute_checkpoint (line 58) | def compute_checkpoint(self, word_index):
method compute_subset_summaries (line 70) | def compute_subset_summaries(self, absolute_max):
method compute_fixed_attention_subset (line 85) | def compute_fixed_attention_subset(self, word_index, tgt_len):
method buffered_sparse_mask (line 117) | def buffered_sparse_mask(self, tensor, tgt_len, src_len):
method apply_sparse_mask (line 133) | def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
FILE: packages/fairseq-hacked/fairseq/modules/sparse_transformer_sentence_encoder.py
class SparseTransformerSentenceEncoder (line 13) | class SparseTransformerSentenceEncoder(TransformerSentenceEncoder):
method __init__ (line 19) | def __init__(
FILE: packages/fairseq-hacked/fairseq/modules/sparse_transformer_sentence_encoder_layer.py
class SparseTransformerSentenceEncoderLayer (line 10) | class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLa...
method __init__ (line 15) | def __init__(
FILE: packages/fairseq-hacked/fairseq/modules/transformer_layer.py
class TransformerEncoderLayer (line 13) | class TransformerEncoderLayer(nn.Module):
method __init__ (line 28) | def __init__(self, args):
method upgrade_state_dict_named (line 51) | def upgrade_state_dict_named(self, state_dict, name):
method forward (line 65) | def forward(self, x, encoder_padding_mask, attn_mask=None):
method maybe_layer_norm (line 109) | def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
class TransformerDecoderLayer (line 117) | class TransformerDecoderLayer(nn.Module):
method __init__ (line 134) | def __init__(
method prepare_for_onnx_export_ (line 186) | def prepare_for_onnx_export_(self):
method forward (line 189) | def forward(
method maybe_layer_norm (line 310) | def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
method make_generation_fast_ (line 317) | def make_generation_fast_(self, need_attn=False, **kwargs):
function Linear (line 321) | def Linear(in_features, out_features, bias=True):
FILE: packages/fairseq-hacked/fairseq/modules/transformer_sentence_encoder.py
function init_bert_params (line 20) | def init_bert_params(module):
class TransformerSentenceEncoder (line 48) | class TransformerSentenceEncoder(nn.Module):
method __init__ (line 72) | def __init__(
method forward (line 177) | def forward(
FILE: packages/fairseq-hacked/fairseq/modules/transformer_sentence_encoder_layer.py
class TransformerSentenceEncoderLayer (line 17) | class TransformerSentenceEncoderLayer(nn.Module):
method __init__ (line 23) | def __init__(
method forward (line 62) | def forward(
FILE: packages/fairseq-hacked/fairseq/modules/unfold.py
function unfold1d (line 9) | def unfold1d(x, kernel_size, padding_l, pad_value=0):
FILE: packages/fairseq-hacked/fairseq/modules/vggblock.py
function _pair (line 15) | def _pair(v):
function infer_conv_output_dim (line 22) | def infer_conv_output_dim(conv_op, input_dim, sample_inchannel):
class VGGBlock (line 38) | class VGGBlock(torch.nn.Module):
method __init__ (line 60) | def __init__(
method forward (line 113) | def forward(self, x):
FILE: packages/fairseq-hacked/fairseq/optim/adadelta.py
class Adadelta (line 12) | class Adadelta(FairseqOptimizer):
method __init__ (line 13) | def __init__(self, args, params):
method add_args (line 18) | def add_args(parser):
method optimizer_config (line 31) | def optimizer_config(self):
FILE: packages/fairseq-hacked/fairseq/optim/adafactor.py
class FairseqAdafactor (line 14) | class FairseqAdafactor(FairseqOptimizer):
method __init__ (line 15) | def __init__(self, args, params):
method add_args (line 20) | def add_args(parser):
method optimizer_config (line 43) | def optimizer_config(self):
class Adafactor (line 65) | class Adafactor(torch.optim.Optimizer):
method __init__ (line 93) | def __init__(
method supports_memory_efficient_fp16 (line 120) | def supports_memory_efficient_fp16(self):
method _get_lr (line 123) | def _get_lr(self, param_group, param_state):
method _get_options (line 135) | def _get_options(self, param_group, param_shape):
method _rms (line 140) | def _rms(self, tensor):
method _approx_sq_grad (line 143) | def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col, output):
method step (line 152) | def step(self, closure=None):
FILE: packages/fairseq-hacked/fairseq/optim/adagrad.py
class Adagrad (line 12) | class Adagrad(FairseqOptimizer):
method __init__ (line 13) | def __init__(self, args, params):
method add_args (line 18) | def add_args(parser):
method optimizer_config (line 26) | def optimizer_config(self):
FILE: packages/fairseq-hacked/fairseq/optim/adam.py
class FairseqAdam (line 17) | class FairseqAdam(FairseqOptimizer):
method __init__ (line 25) | def __init__(self, args, params):
method add_args (line 38) | def add_args(parser):
method optimizer_config (line 50) | def optimizer_config(self):
method average_params (line 64) | def average_params(self):
class Adam (line 76) | class Adam(torch.optim.Optimizer):
method __init__ (line 103) | def __init__(
method supports_memory_efficient_fp16 (line 118) | def supports_memory_efficient_fp16(self):
method step (line 121) | def step(self, closure=None):
class FusedAdam (line 197) | class FusedAdam(torch.optim.Optimizer):
method __init__ (line 229) | def __init__(
method supports_memory_efficient_fp16 (line 260) | def supports_memory_efficient_fp16(self):
method step (line 263) | def step(self, closure=None, grads=None, scale=1.0, grad_norms=None):
FILE: packages/fairseq-hacked/fairseq/optim/adamax.py
class FairseqAdamax (line 13) | class FairseqAdamax(FairseqOptimizer):
method __init__ (line 14) | def __init__(self, args, params):
method add_args (line 19) | def add_args(parser):
method optimizer_config (line 33) | def optimizer_config(self):
class Adamax (line 49) | class Adamax(torch.optim.Optimizer):
method __init__ (line 70) | def __init__(
method supports_memory_efficient_fp16 (line 100) | def supports_memory_efficient_fp16(self):
method step (line 103) | def step(self, closure=None):
FILE: packages/fairseq-hacked/fairseq/optim/bmuf.py
class FairseqBMUF (line 12) | class FairseqBMUF(FairseqOptimizer):
method __init__ (line 22) | def __init__(self, args, optimizer):
method add_args (line 37) | def add_args(parser):
method optimizer (line 74) | def optimizer(self):
method optimizer_config (line 78) | def optimizer_config(self):
method get_lr (line 81) | def get_lr(self):
method set_lr (line 84) | def set_lr(self, lr):
method state_dict (line 87) | def state_dict(self):
method load_state_dict (line 90) | def load_state_dict(self, state_dict, optimizer_overrides=None):
method multiply_grads (line 93) | def multiply_grads(self, c):
method clip_grad_norm (line 97) | def clip_grad_norm(self, max_norm):
method average_params (line 101) | def average_params(self):
method _block_sync (line 104) | def _block_sync(self):
method _is_warmup_end (line 122) | def _is_warmup_end(self):
method _is_bmuf_iter (line 128) | def _is_bmuf_iter(self):
method _warmup_sync (line 136) | def _warmup_sync(self, root_rank=0):
method step (line 149) | def step(self, closure=None):
method zero_grad (line 158) | def zero_grad(self):
method get_num_updates (line 162) | def get_num_updates(self):
method set_num_updates (line 166) | def set_num_updates(self, num_updates):
method _reset_local_data (line 171) | def _reset_local_data(self):
method _calc_grad (line 182) | def _calc_grad(self):
method _avg_grad_from_all_gpus (line 192) | def _avg_grad_from_all_gpus(self):
method _update_global_model (line 199) | def _update_global_model(self):
FILE: packages/fairseq-hacked/fairseq/optim/fairseq_optimizer.py
class FairseqOptimizer (line 11) | class FairseqOptimizer(object):
method __init__ (line 12) | def __init__(self, args):
method add_args (line 17) | def add_args(parser):
method optimizer (line 22) | def optimizer(self):
method optimizer_config (line 31) | def optimizer_config(self):
method params (line 41) | def params(self):
method __getstate__ (line 47) | def __getstate__(self):
method get_lr (line 50) | def get_lr(self):
method set_lr (line 54) | def set_lr(self, lr):
method state_dict (line 59) | def state_dict(self):
method load_state_dict (line 63) | def load_state_dict(self, state_dict, optimizer_overrides=None):
method backward (line 78) | def backward(self, loss):
method multiply_grads (line 82) | def multiply_grads(self, c):
method clip_grad_norm (line 88) | def clip_grad_norm(self, max_norm):
method step (line 97) | def step(self, closure=None):
method zero_grad (line 101) | def zero_grad(self):
method supports_memory_efficient_fp16 (line 108) | def supports_memory_efficient_fp16(self):
method average_params (line 113) | def average_params(self):
FILE: packages/fairseq-hacked/fairseq/optim/fp16_optimizer.py
class DynamicLossScaler (line 13) | class DynamicLossScaler(object):
method __init__ (line 14) | def __init__(
method update_scale (line 32) | def update_scale(self, overflow):
method _decrease_loss_scale (line 47) | def _decrease_loss_scale(self):
method has_overflow (line 53) | def has_overflow(grad_norm):
class _FP16OptimizerMixin (line 60) | class _FP16OptimizerMixin(object):
method __init__ (line 61) | def __init__(self, *args, **kwargs):
method build_fp32_params (line 66) | def build_fp32_params(cls, params):
method state_dict (line 79) | def state_dict(self):
method load_state_dict (line 85) | def load_state_dict(self, state_dict, optimizer_overrides=None):
method backward (line 97) | def backward(self, loss):
method _sync_fp16_grads_to_fp32 (line 108) | def _sync_fp16_grads_to_fp32(self, multiply_grads=1.0):
method multiply_grads (line 131) | def multiply_grads(self, c):
method clip_grad_norm (line 138) | def clip_grad_norm(self, max_norm):
method step (line 160) | def step(self, closure=None):
method zero_grad (line 174) | def zero_grad(self):
class FP16Optimizer (line 181) | class FP16Optimizer(_FP16OptimizerMixin, optim.FairseqOptimizer):
method __init__ (line 186) | def __init__(self, args, params, fp32_optimizer, fp32_params):
method build_optimizer (line 211) | def build_optimizer(cls, args, params):
method optimizer (line 222) | def optimizer(self):
method optimizer_config (line 226) | def optimizer_config(self):
method get_lr (line 229) | def get_lr(self):
method set_lr (line 232) | def set_lr(self, lr):
class _MemoryEfficientFP16OptimizerMixin (line 236) | class _MemoryEfficientFP16OptimizerMixin(object):
method __init__ (line 237) | def __init__(self, *args, **kwargs):
method state_dict (line 241) | def state_dict(self):
method load_state_dict (line 247) | def load_state_dict(self, state_dict, optimizer_overrides=None):
method backward (line 279) | def backward(self, loss):
method _unscale_grads (line 290) | def _unscale_grads(self, multiply_grads=1.0):
method multiply_grads (line 301) | def multiply_grads(self, c):
method clip_grad_norm (line 308) | def clip_grad_norm(self, max_norm):
method step (line 331) | def step(self, closure=None):
method zero_grad (line 336) | def zero_grad(self):
class MemoryEfficientFP16Optimizer (line 342) | class MemoryEfficientFP16Optimizer(
method __init__ (line 360) | def __init__(self, args, params, optimizer):
method build_optimizer (line 388) | def build_optimizer(cls, args, params):
method optimizer (line 398) | def optimizer(self):
method optimizer_config (line 402) | def optimizer_config(self):
method get_lr (line 405) | def get_lr(self):
method set_lr (line 408) | def set_lr(self, lr):
FILE: packages/fairseq-hacked/fairseq/optim/lr_scheduler/cosine_lr_scheduler.py
class CosineSchedule (line 12) | class CosineSchedule(FairseqLRScheduler):
method __init__ (line 35) | def __init__(self, args, optimizer):
method add_args (line 75) | def add_args(parser):
method step (line 92) | def step(self, epoch, val_loss=None):
method step_update (line 98) | def step_update(self, num_updates):
FILE: packages/fairseq-hacked/fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py
class FairseqLRScheduler (line 9) | class FairseqLRScheduler(object):
method __init__ (line 10) | def __init__(self, args, optimizer):
method add_args (line 19) | def add_args(parser):
method state_dict (line 23) | def state_dict(self):
method load_state_dict (line 27) | def load_state_dict(self, state_dict):
method step (line 31) | def step(self, epoch, val_loss=None):
method step_update (line 39) | def step_update(self, num_updates):
FILE: packages/fairseq-hacked/fairseq/optim/lr_scheduler/fixed_schedule.py
class FixedSchedule (line 10) | class FixedSchedule(FairseqLRScheduler):
method __init__ (line 13) | def __init__(self, args, optimizer):
method add_args (line 26) | def add_args(parser):
method get_next_lr (line 37) | def get_next_lr(self, epoch):
method step (line 49) | def step(self, epoch, val_loss=None):
method step_update (line 56) | def step_update(self, num_updates):
FILE: packages/fairseq-hacked/fairseq/optim/lr_scheduler/inverse_square_root_schedule.py
class InverseSquareRootSchedule (line 10) | class InverseSquareRootSchedule(FairseqLRScheduler):
method __init__ (line 29) | def __init__(self, args, optimizer):
method add_args (line 51) | def add_args(parser):
method step (line 60) | def step(self, epoch, val_loss=None):
method step_update (line 66) | def step_update(self, num_updates):
FILE: packages/fairseq-hacked/fairseq/optim/lr_scheduler/polynomial_decay_schedule.py
class PolynomialDecaySchedule (line 10) | class PolynomialDecaySchedule(FairseqLRScheduler):
method __init__ (line 13) | def __init__(self, args, optimizer):
method add_args (line 30) | def add_args(parser):
method get_next_lr (line 50) | def get_next_lr(self, epoch):
method step (line 60) | def step(self, epoch, val_loss=None):
method step_update (line 67) | def step_update(self, num_updates):
FILE: packages/fairseq-hacked/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py
class ReduceLROnPlateau (line 12) | class ReduceLROnPlateau(FairseqLRScheduler):
method __init__ (line 25) | def __init__(self, args, optimizer):
method add_args (line 54) | def add_args(parser):
method state_dict (line 68) | def state_dict(self):
method load_state_dict (line 75) | def load_state_dict(self, state_dict):
method step (line 81) | def step(self, epoch, val_loss=None):
method step_update (line 90) | def step_update(self, num_updates):
FILE: packages/fairseq-hacked/fairseq/optim/lr_scheduler/tri_stage_lr_scheduler.py
class TriStageLRSchedule (line 11) | class TriStageLRSchedule(FairseqLRScheduler):
method __init__ (line 49) | def __init__(self, args, optimizer):
method add_args (line 75) | def add_args(parser):
method _decide_stage (line 113) | def _decide_stage(self, update_step):
method step (line 138) | def step(self, epoch, val_loss=None):
method step_update (line 144) | def step_update(self, num_updates):
FILE: packages/fairseq-hacked/fairseq/optim/lr_scheduler/triangular_lr_scheduler.py
class TriangularSchedule (line 12) | class TriangularSchedule(FairseqLRScheduler):
method __init__ (line 18) | def __init__(self, args, optimizer):
method add_args (line 40) | def add_args(parser):
method step (line 53) | def step(self, epoch, val_loss=None):
method step_update (line 59) | def step_update(self, num_updates):
FILE: packages/fairseq-hacked/fairseq/optim/nag.py
class FairseqNAG (line 13) | class FairseqNAG(FairseqOptimizer):
method __init__ (line 14) | def __init__(self, args, params):
method add_args (line 19) | def add_args(parser):
method optimizer_config (line 29) | def optimizer_config(self):
class NAG (line 43) | class NAG(Optimizer):
method __init__ (line 44) | def __init__(self, params, lr=required, momentum=0, weight_decay=0):
method supports_memory_efficient_fp16 (line 49) | def supports_memory_efficient_fp16(self):
method step (line 52) | def step(self, closure=None):
FILE: packages/fairseq-hacked/fairseq/optim/sgd.py
class SGD (line 12) | class SGD(FairseqOptimizer):
method __init__ (line 13) | def __init__(self, args, params):
method add_args (line 18) | def add_args(parser):
method optimizer_config (line 28) | def optimizer_config(self):
FILE: packages/fairseq-hacked/fairseq/options.py
function get_preprocessing_parser (line 15) | def get_preprocessing_parser(default_task="translation"):
function get_training_parser (line 21) | def get_training_parser(default_task="translation"):
function get_generation_parser (line 31) | def get_generation_parser(interactive=False, default_task="translation"):
function get_interactive_generation_parser (line 40) | def get_interactive_generation_parser(default_task="translation"):
function get_eval_lm_parser (line 44) | def get_eval_lm_parser(default_task="language_modeling"):
function get_validation_parser (line 51) | def get_validation_parser(default_task=None):
function eval_str_list (line 59) | def eval_str_list(x, type=float):
function eval_bool (line 70) | def eval_bool(x, default=False):
function parse_args_and_arch (line 79) | def parse_args_and_arch(
function get_parser (line 161) | def get_parser(desc, default_task="translation"):
function add_preprocess_args (line 217) | def add_preprocess_args(parser):
function add_dataset_args (line 263) | def add_dataset_args(parser, train=False, gen=False):
function add_distributed_training_args (line 311) | def add_distributed_training_args(parser):
function add_optimization_args (line 349) | def add_optimization_args(parser):
function add_checkpoint_args (line 376) | def add_checkpoint_args(parser):
function add_common_eval_args (line 418) | def add_common_eval_args(group):
function add_eval_lm_args (line 434) | def add_eval_lm_args(parser):
function add_generation_args (line 451) | def add_generation_args(parser):
function add_interactive_args (line 521) | def add_interactive_args(parser):
function add_model_args (line 531) | def add_model_args(parser):
FILE: packages/fairseq-hacked/fairseq/pdb.py
class MultiprocessingPdb (line 23) | class MultiprocessingPdb(pdb.Pdb):
method __init__ (line 29) | def __init__(self):
method _cmdloop (line 32) | def _cmdloop(self):
function set_trace (line 45) | def set_trace():
FILE: packages/fairseq-hacked/fairseq/progress_bar.py
function build_progress_bar (line 20) | def build_progress_bar(
function format_stat (line 53) | def format_stat(stat):
class progress_bar (line 65) | class progress_bar(object):
method __init__ (line 68) | def __init__(self, iterable, epoch=None, prefix=None):
method __len__ (line 78) | def __len__(self):
method __enter__ (line 81) | def __enter__(self):
method __exit__ (line 84) | def __exit__(self, *exc):
method __iter__ (line 87) | def __iter__(self):
method log (line 90) | def log(self, stats, tag="", step=None):
method print (line 94) | def print(self, stats, tag="", step=None):
method _str_commas (line 98) | def _str_commas(self, stats):
method _str_pipes (line 101) | def _str_pipes(self, stats):
method _format_stats (line 104) | def _format_stats(self, stats):
class json_progress_bar (line 112) | class json_progress_bar(progress_bar):
method __init__ (line 115) | def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
method __iter__ (line 120) | def __iter__(self):
method log (line 136) | def log(self, stats, tag="", step=None):
method print (line 140) | def print(self, stats, tag="", step=None):
method _format_stats (line 150) | def _format_stats(self, stats, epoch=None, update=None):
class noop_progress_bar (line 162) | class noop_progress_bar(progress_bar):
method __init__ (line 165) | def __init__(self, iterable, epoch=None, prefix=None):
method __iter__ (line 168) | def __iter__(self):
method log (line 172) | def log(self, stats, tag="", step=None):
method print (line 176) | def print(self, stats, tag="", step=None):
class simple_progress_bar (line 181) | class simple_progress_bar(progress_bar):
method __init__ (line 184) | def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
method __iter__ (line 189) | def __iter__(self):
method log (line 205) | def log(self, stats, tag="", step=None):
method print (line 209) | def print(self, stats, tag="", step=None):
class tqdm_progress_bar (line 215) | class tqdm_progress_bar(progress_bar):
method __init__ (line 218) | def __init__(self, iterable, epoch=None, prefix=None):
method __iter__ (line 224) | def __iter__(self):
method log (line 227) | def log(self, stats, tag="", step=None):
method print (line 231) | def print(self, stats, tag="", step=None):
class tensorboard_log_wrapper (line 237) | class tensorboard_log_wrapper(progress_bar):
method __init__ (line 240) | def __init__(self, wrapped_bar, tensorboard_logdir, args):
method _writer (line 257) | def _writer(self, key):
method __iter__ (line 268) | def __iter__(self):
method log (line 271) | def log(self, stats, tag="", step=None):
method print (line 276) | def print(self, stats, tag="", step=None):
method __exit__ (line 281) | def __exit__(self, *exc):
method _log_to_tensorboard (line 286) | def _log_to_tensorboard(self, stats, tag="", step=None):
FILE: packages/fairseq-hacked/fairseq/registry.py
function setup_registry (line 12) | def setup_registry(
function set_defaults (line 66) | def set_defaults(args, cls):
FILE: packages/fairseq-hacked/fairseq/search.py
class Search (line 11) | class Search(object):
method __init__ (line 12) | def __init__(self, tgt_dict):
method _init_buffers (line 21) | def _init_buffers(self, t):
method step (line 27) | def step(self, step, lprobs, scores):
method set_src_lengths (line 49) | def set_src_lengths(self, src_lengths):
class BeamSearch (line 53) | class BeamSearch(Search):
method __init__ (line 54) | def __init__(self, tgt_dict):
method step (line 57) | def step(self, step, lprobs, scores):
class LengthConstrainedBeamSearch (line 84) | class LengthConstrainedBeamSearch(Search):
method __init__ (line 85) | def __init__(self, tgt_dict, min_len_a, min_len_b, max_len_a, max_len_b):
method step (line 93) | def step(self, step, lprobs, scores):
class DiverseBeamSearch (line 102) | class DiverseBeamSearch(Search):
method __init__ (line 112) | def __init__(self, tgt_dict, num_groups, diversity_strength):
method step (line 119) | def step(self, step, lprobs, scores):
class Sampling (line 170) | class Sampling(Search):
method __init__ (line 171) | def __init__(self, tgt_dict, sampling_topk=-1, sampling_topp=-1.0):
method _sample_topp (line 176) | def _sample_topp(self, lprobs):
method step (line 221) | def step(self, step, lprobs, scores):
FILE: packages/fairseq-hacked/fairseq/sequence_generator.py
class SequenceGenerator (line 15) | class SequenceGenerator(object):
method __init__ (line 16) | def __init__(
method generate (line 103) | def generate(self, models, sample, **kwargs):
method _generate (line 118) | def _generate(self, model, sample, prefix_tokens=None, bos_token=None,...
class EnsembleModel (line 574) | class EnsembleModel(torch.nn.Module):
method __init__ (line 577) | def __init__(self, models):
method has_encoder (line 584) | def has_encoder(self):
method max_decoder_positions (line 587) | def max_decoder_positions(self):
method forward_encoder (line 591) | def forward_encoder(self, encoder_input):
method forward_decoder (line 597) | def forward_decoder(self, tokens, encoder_outs, temperature=1.0):
method _decode_one (line 632) | def _decode_one(
method reorder_encoder_out (line 663) | def reorder_encoder_out(self, encoder_outs, new_order):
method reorder_incremental_state (line 671) | def reorder_incremental_state(self, new_order):
class SequenceGeneratorWithAlignment (line 680) | class SequenceGeneratorWithAlignment(SequenceGenerator):
method __init__ (line 681) | def __init__(self, tgt_dict, left_pad_target=False, **kwargs):
method generate (line 696) | def generate(self, models, sample, **kwargs):
method _prepare_batch_for_alignment (line 725) | def _prepare_batch_for_alignment(self, sample, hypothesis):
class EnsembleModelWithAlignment (line 758) | class EnsembleModelWithAlignment(EnsembleModel):
method __init__ (line 761) | def __init__(self, models):
method forward_align (line 764) | def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
method _decode_one (line 777) | def _decode_one(
FILE: packages/fairseq-hacked/fairseq/sequence_scorer.py
class SequenceScorer (line 12) | class SequenceScorer(object):
method __init__ (line 15) | def __init__(self, tgt_dict, softmax_batch=None):
method generate (line 22) | def generate(self, models, sample, **kwargs):
FILE: packages/fairseq-hacked/fairseq/tasks/__init__.py
function setup_task (line 16) | def setup_task(args, **kwargs):
function register_task (line 20) | def register_task(name):
function get_task (line 81) | def get_task(name):
FILE: packages/fairseq-hacked/fairseq/tasks/audio_pretraining.py
class AudioPretrainingTask (line 13) | class AudioPretrainingTask(FairseqTask):
method add_args (line 19) | def add_args(parser):
method __init__ (line 41) | def __init__(self, args):
method setup_task (line 45) | def setup_task(cls, args, **kwargs):
method load_dataset (line 53) | def load_dataset(self, split, **kwargs):
method target_dictionary (line 69) | def target_dictionary(self):
FILE: packages/fairseq-hacked/fairseq/tasks/cross_lingual_lm.py
class CrossLingualLMTask (line 30) | class CrossLingualLMTask(FairseqTask):
method add_args (line 39) | def add_args(parser):
method __init__ (line 74) | def __init__(self, args, dictionary):
method _lang_to_id (line 81) | def _lang_to_id(self, languages: str):
method load_dictionary (line 93) | def load_dictionary(cls, filename):
method build_dictionary (line 97) | def build_dictionary(
method target_dictionary (line 109) | def target_dictionary(self):
method setup_task (line 113) | def setup_task(cls, args, **kwargs):
method _load_single_lang_dataset (line 122) | def _load_single_lang_dataset(self, split, epoch):
method load_dataset (line 172) | def load_dataset(self, split, epoch=0, combine=False, **kwargs):
FILE: packages/fairseq-hacked/fairseq/tasks/denoising.py
class DenoisingTask (line 23) | class DenoisingTask(FairseqTask):
method add_args (line 29) | def add_args(parser):
method __init__ (line 121) | def __init__(self, args, dictionary):
method setup_task (line 130) | def setup_task(cls, args, **kwargs):
method load_dataset (line 139) | def load_dataset(self, split, epoch=0, combine=False, **kwargs):
method max_positions (line 198) | def max_positions(self):
method source_dictionary (line 203) | def source_dictionary(self):
method target_dictionary (line 208) | def target_dictionary(self):
FILE: packages/fairseq-hacked/fairseq/tasks/fairseq_task.py
class FairseqTask (line 18) | class FairseqTask(object):
method add_args (line 25) | def add_args(parser):
method __init__ (line 29) | def __init__(self, args):
method load_dictionary (line 35) | def load_dictionary(cls, filename):
method build_dictionary (line 44) | def build_dictionary(
method setup_task (line 68) | def setup_task(cls, args, **kwargs):
method load_dataset (line 76) | def load_dataset(self, split, combine=False, **kwargs):
method dataset (line 84) | def dataset(self, split):
method get_batch_iterator (line 102) | def get_batch_iterator(
method build_model (line 193) | def build_model(self, args):
method build_criterion (line 208) | def build_criterion(self, args):
method build_generator (line 223) | def build_generator(self, args):
method train_step (line 257) | def train_step(self, sample, model, criterion, optimizer, ignore_grad=...
method valid_step (line 284) | def valid_step(self, sample, model, criterion):
method inference_step (line 290) | def inference_step(self, generator, models, sample, prefix_tokens=None):
method update_step (line 294) | def update_step(self, num_updates):
method grad_denom (line 299) | def grad_denom(self, sample_sizes, criterion):
method aggregate_logging_outputs (line 302) | def aggregate_logging_outputs(self, logging_outputs, criterion):
method max_positions (line 305) | def max_positions(self):
method source_dictionary (line 310) | def source_dictionary(self):
method target_dictionary (line 316) | def target_dictionary(self):
FILE: packages/fairseq-hacked/fairseq/tasks/language_modeling.py
class LanguageModelingTask (line 23) | class LanguageModelingTask(FairseqTask):
method add_args (line 53) | def add_args(parser):
method __init__ (line 84) | def __init__(self, args, dictionary, output_dictionary=None, targets=N...
method setup_task (line 94) | def setup_task(cls, args, **kwargs):
method build_model (line 141) | def build_model(self, args):
method load_dataset (line 152) | def load_dataset(self, split, epoch=0, combine=False, **kwargs):
method build_dataset_for_inference (line 198) | def build_dataset_for_inference(self, src_tokens, src_lengths):
method inference_step (line 223) | def inference_step(self, generator, models, sample, prefix_tokens=None):
method source_dictionary (line 232) | def source_dictionary(self):
method target_dictionary (line 238) | def target_dictionary(self):
FILE: packages/fairseq-hacked/fairseq/tasks/legacy_masked_lm.py
class LegacyMaskedLMTask (line 26) | class LegacyMaskedLMTask(FairseqTask):
method add_args (line 34) | def add_args(parser):
method __init__ (line 53) | def __init__(self, args, dictionary):
method load_dictionary (line 59) | def load_dictionary(cls, filename):
method build_dictionary (line 63) | def build_dictionary(
method target_dictionary (line 75) | def target_dictionary(self):
method setup_task (line 79) | def setup_task(cls, args, **kwargs):
method load_dataset (line 89) | def load_dataset(self, split, epoch=0, combine=False):
FILE: packages/fairseq-hacked/fairseq/tasks/masked_lm.py
class MaskedLMTask (line 28) | class MaskedLMTask(FairseqTask):
method add_args (line 32) | def add_args(parser):
method __init__ (line 86) | def __init__(self, args, dictionary):
method setup_task (line 95) | def setup_task(cls, args, **kwargs):
method load_dataset (line 102) | def load_dataset(self, split, epoch=0, combine=False, **kwargs):
method build_dataset_for_inference (line 183) | def build_dataset_for_inference(self, src_tokens, src_lengths, sort=Tr...
method source_dictionary (line 212) | def source_dictionary(self):
method target_dictionary (line 216) | def target_dictionary(self):
FILE: packages/fairseq-hacked/fairseq/tasks/multilingual_masked_lm.py
class MultiLingualMaskedLMTask (line 32) | class MultiLingualMaskedLMTask(FairseqTask):
method add_args (line 36) | def add_args(parser):
method __init__ (line 96) | def __init__(self, args, dictionary):
method setup_task (line 105) | def setup_task(cls, args, **kwargs):
method _get_whole_word_mask (line 112) | def _get_whole_word_mask(self):
method _get_sample_prob (line 137) | def _get_sample_prob(self, dataset_lens):
method load_dataset (line 147) | def load_dataset(self, split, epoch=0, combine=False, **kwargs):
method build_dataset_for_inference (line 288) | def build_dataset_for_inference(self, src_tokens, src_lengths, sort=Tr...
method get_batch_iterator (line 316) | def get_batch_iterator(
method source_dictionary (line 348) | def source_dictionary(self):
method target_dictionary (line 352) | def target_dictionary(self):
FILE: packages/fairseq-hacked/fairseq/tasks/multilingual_translation.py
function _lang_token (line 25) | def _lang_token(lang: str):
function _lang_token_index (line 29) | def _lang_token_index(dic: Dictionary, lang: str):
class MultilingualTranslationTask (line 37) | class MultilingualTranslationTask(FairseqTask):
method add_args (line 63) | def add_args(parser):
method __init__ (line 95) | def __init__(self, args, dicts, training):
method setup_task (line 117) | def setup_task(cls, args, **kwargs):
method prepare (line 122) | def prepare(cls, args, **kargs):
method get_encoder_langtok (line 167) | def get_encoder_langtok(self, src_lang, tgt_lang):
method get_decoder_langtok (line 175) | def get_decoder_langtok(self, tgt_lang):
method alter_dataset_langtok (line 180) | def alter_dataset_langtok(
method load_dataset (line 216) | def load_dataset(self, split, epoch=0, **kwargs):
method build_dataset_for_inference (line 260) | def build_dataset_for_inference(self, src_tokens, src_lengths):
method build_model (line 282) | def build_model(self, args):
method train_step (line 320) | def train_step(self, sample, model, criterion, optimizer, ignore_grad=...
method valid_step (line 338) | def valid_step(self, sample, model, criterion):
method inference_step (line 358) | def inference_step(self, generator, models, sample, prefix_tokens=None):
method init_logging_output (line 371) | def init_logging_output(self, sample):
method grad_denom (line 386) | def grad_denom(self, sample_sizes, criterion):
method aggregate_logging_outputs (line 389) | def aggregate_logging_outputs(
method source_dictionary (line 424) | def source_dictionary(self):
method target_dictionary (line 428) | def target_dictionary(self):
method max_positions (line 431) | def max_positions(self):
FILE: packages/fairseq-hacked/fairseq/tasks/semisupervised_translation.py
function _get_bt_dataset_key (line 26) | def _get_bt_dataset_key(lang_pair):
function _get_denoising_dataset_key (line 30) | def _get_denoising_dataset_key(lang_pair):
function parse_lambda_config (line 35) | def parse_lambda_config(x):
class SemisupervisedTranslationTask (line 58) | class SemisupervisedTranslationTask(MultilingualTranslationTask):
method add_args (line 82) | def add_args(parser):
method __init__ (line 117) | def __init__(self, args, dicts, training):
method setup_task (line 138) | def setup_task(cls, args, **kwargs):
method load_dataset (line 142) | def load_dataset(self, split, epoch=0, **kwargs):
method build_model (line 352) | def build_model(self, args):
method train_step (line 391) | def train_step(self, sample, model, criterion, optimizer, ignore_grad=...
method update_step (line 442) | def update_step(self, num_updates):
method aggregate_logging_outputs (line 472) | def aggregate_logging_outputs(self, logging_outputs, criterion):
FILE: packages/fairseq-hacked/fairseq/tasks/sentence_prediction.py
class SentencePredictionTask (line 32) | class SentencePredictionTask(FairseqTask):
method add_args (line 41) | def add_args(parser):
method __init__ (line 74) | def __init__(self, args, data_dictionary, label_dictionary):
method load_dictionary (line 88) | def load_dictionary(cls, args, filename, source=True):
method setup_task (line 99) | def setup_task(cls, args, **kwargs):
method load_dataset (line 119) | def load_dataset(self, split, combine=False, **kwargs):
method build_model (line 213) | def build_model(self, args):
method max_positions (line 224) | def max_positions(self):
method source_dictionary (line 228) | def source_dictionary(self):
method target_dictionary (line 232) | def target_dictionary(self):
method label_dictionary (line 236) | def label_dictionary(self):
FILE: packages/fairseq-hacked/fairseq/tasks/sentence_ranking.py
class SentenceRankingTask (line 29) | class SentenceRankingTask(FairseqTask):
method add_args (line 38) | def add_args(parser):
method __init__ (line 62) | def __init__(self, args, dictionary):
method load_dictionary (line 67) | def load_dictionary(cls, args, filename, source=True):
method setup_task (line 78) | def setup_task(cls, args, **kwargs):
method load_dataset (line 90) | def load_dataset(self, split, combine=False, **kwargs):
method build_model (line 179) | def build_model(self, args):
method max_positions (line 190) | def max_positions(self):
method source_dictionary (line 194) | def source_dictionary(self):
method target_dictionary (line 198) | def target_dictionary(self):
FILE: packages/fairseq-hacked/fairseq/tasks/translation.py
function load_langpair_dataset (line 24) | def load_langpair_dataset(
class TranslationTask (line 129) | class TranslationTask(FairseqTask):
method add_args (line 151) | def add_args(parser):
method __init__ (line 180) | def __init__(self, args, src_dict, tgt_dict):
method setup_task (line 186) | def setup_task(cls, args, **kwargs):
method load_dataset (line 232) | def load_dataset(self, split, epoch=0, combine=False, **kwargs):
method build_dataset_for_inference (line 263) | def build_dataset_for_inference(self, src_tokens, src_lengths):
method max_positions (line 266) | def max_positions(self):
method source_dictionary (line 271) | def source_dictionary(self):
method target_dictionary (line 276) | def target_dictionary(self):
FILE: packages/fairseq-hacked/fairseq/tasks/translation_from_pretrained_xlm.py
class TranslationFromPretrainedXLMTask (line 13) | class TranslationFromPretrainedXLMTask(TranslationTask):
method load_dictionary (line 25) | def load_dictionary(cls, filename):
FILE: packages/fairseq-hacked/fairseq/tasks/translation_lev.py
class TranslationLevenshteinTask (line 14) | class TranslationLevenshteinTask(TranslationTask):
method add_args (line 21) | def add_args(parser):
method load_dataset (line 30) | def load_dataset(self, split, epoch=0, combine=False, **kwargs):
method inject_noise (line 54) | def inject_noise(self, target_tokens):
method build_generator (line 127) | def build_generator(self, args):
method train_step (line 137) | def train_step(self,
method valid_step (line 151) | def valid_step(self, sample, model, criterion):
FILE: packages/fairseq-hacked/fairseq/tasks/translation_moe.py
class TranslationMoETask (line 14) | class TranslationMoETask(TranslationTask):
method add_args (line 39) | def add_args(parser):
method __init__ (line 57) | def __init__(self, args, src_dict, tgt_dict):
method build_model (line 83) | def build_model(self, args):
method expert_index (line 116) | def expert_index(self, i):
method _get_loss (line 119) | def _get_loss(self, sample, model, criterion):
method train_step (line 190) | def train_step(self, sample, model, criterion, optimizer, ignore_grad=...
method valid_step (line 198) | def valid_step(self, sample, model, criterion):
method inference_step (line 204) | def inference_step(
method aggregate_logging_outputs (line 216) | def aggregate_logging_outputs(self, logging_outputs, criterion):
FILE: packages/fairseq-hacked/fairseq/tokenizer.py
function tokenize_line (line 11) | def tokenize_line(line):
FILE: packages/fairseq-hacked/fairseq/trainer.py
class Trainer (line 23) | class Trainer(object):
method __init__ (line 33) | def __init__(self, args, task, model, criterion, dummy_batch=None, oom...
method init_meters (line 66) | def init_meters(self, args):
method criterion (line 85) | def criterion(self):
method model (line 100) | def model(self):
method optimizer (line 111) | def optimizer(self):
method lr_scheduler (line 117) | def lr_scheduler(self):
method _build_optimizer (line 122) | def _build_optimizer(self):
method save_checkpoint (line 155) | def save_checkpoint(self, filename, extra_state):
method load_checkpoint (line 171) | def load_checkpoint(
method get_train_iterator (line 253) | def get_train_iterator(
method train_step (line 286) | def train_step(self, samples, dummy_batch=False, raise_oom=False):
method valid_step (line 498) | def valid_step(self, sample, raise_oom=False):
method dummy_train_step (line 563) | def dummy_train_step(self, dummy_batch):
method handle_ooms (line 568) | def handle_ooms(self, number_of_ooms):
method zero_grad (line 577) | def zero_grad(self):
method clear_buffered_stats (line 580) | def clear_buffered_stats(self):
method lr_step (line 583) | def lr_step(self, epoch, val_loss=None):
method lr_step_update (line 589) | def lr_step_update(self):
method get_lr (line 593) | def get_lr(self):
method get_model (line 597) | def get_model(self):
method get_criterion (line 601) | def get_criterion(self):
method get_meter (line 605) | def get_meter(self, name):
method get_num_updates (line 611) | def get_num_updates(self):
method set_num_updates (line 615) | def set_num_updates(self, num_updates):
method _prepare_sample (line 620) | def _prepare_sample(self, sample):
method _set_seed (line 637) | def _set_seed(self):
method _sync_stats (line 645) | def _sync_stats(self):
method _log_oom (line 654) | def _log_oom(self, exc):
FILE: packages/fairseq-hacked/fairseq/utils.py
function load_ensemble_for_inference (line 23) | def load_ensemble_for_inference(filenames, task, model_arg_overrides=None):
function apply_to_sample (line 35) | def apply_to_sample(f, sample):
function move_to_cuda (line 52) | def move_to_cuda(sample):
function _get_full_incremental_state_key (line 62) | def _get_full_incremental_state_key(module_instance, key):
function get_incremental_state (line 76) | def get_incremental_state(module, incremental_state, key):
function set_incremental_state (line 84) | def set_incremental_state(module, incremental_state, key, value):
function load_align_dict (line 91) | def load_align_dict(replace_unk):
function print_embed_overlap (line 108) | def print_embed_overlap(embed_dict, vocab_dict):
function parse_embedding (line 115) | def parse_embedding(embed_path):
function load_embedding (line 137) | def load_embedding(embed_dict, vocab, embedding):
function replace_unk (line 145) | def replace_unk(hypo_str, src_str, alignment, align_dict, unk):
function post_process_prediction (line 160) | def post_process_prediction(
function make_positions (line 175) | def make_positions(tensor, padding_idx, onnx_trace=False):
function strip_pad (line 188) | def strip_pad(tensor, pad):
function buffered_arange (line 192) | def buffered_arange(max):
function convert_padding_direction (line 200) | def convert_padding_direction(
function item (line 224) | def item(tensor):
function clip_grad_norm_ (line 232) | def clip_grad_norm_(tensor, max_norm):
function fill_with_neg_inf (line 240) | def fill_with_neg_inf(t):
function resolve_max_positions (line 245) | def resolve_max_positions(*args):
function import_user_module (line 281) | def import_user_module(args):
function softmax (line 299) | def softmax(x, dim, onnx_trace=False):
function log_softmax (line 306) | def log_softmax(x, dim, onnx_trace=False):
function get_perplexity (line 313) | def get_perplexity(loss):
function deprecation_warning (line 320) | def deprecation_warning(message, stacklevel=3):
function get_activation_fn (line 325) | def get_activation_fn(activation: str) -> Callable:
function get_available_activation_fns (line 346) | def get_available_activation_fns() -> List:
function eval (line 358) | def eval(model):
function has_parameters (line 365) | def has_parameters(module):
function set_torch_seed (line 373) | def set_torch_seed(seed):
function parse_alignment (line 381) | def parse_alignment(line):
function get_token_to_word_mapping (line 402) | def get_token_to_word_mapping(tokens, exclude_list):
function extract_hard_alignment (line 410) | def extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos):
function new_arange (line 430) | def new_arange(x, *size):
FILE: packages/fairseq-hacked/generate.py
function main (line 16) | def main(args):
function cli_main (line 243) | def cli_main():
FILE: packages/fairseq-hacked/interactive.py
function buffered_read (line 23) | def buffered_read(input, buffer_size):
function make_batches (line 36) | def make_batches(lines, args, task, max_positions, encode_fn):
function main (line 58) | def main(args):
function cli_main (line 190) | def cli_main():
FILE: packages/fairseq-hacked/preprocess.py
function main (line 22) | def main(args):
function binarize (line 320) | def binarize(args, filename, vocab, output_prefix, lang, offset, end, ap...
function binarize_alignments (line 337) | def binarize_alignments(args, filename, parse_alignment, output_prefix, ...
function dataset_dest_prefix (line 354) | def dataset_dest_prefix(args, output_prefix, lang):
function dataset_dest_file (line 366) | def dataset_dest_file(args, output_prefix, lang, extension):
function get_offsets (line 371) | def get_offsets(input_file, num_workers):
function cli_main (line 375) | def cli_main():
FILE: packages/fairseq-hacked/score.py
function get_parser (line 18) | def get_parser():
function main (line 37) | def main():
FILE: packages/fairseq-hacked/scripts/average_checkpoints.py
function average_checkpoints (line 14) | def average_checkpoints(inputs):
function last_n_checkpoints (line 70) | def last_n_checkpoints(paths, n, update_based, upper_bound=None):
function main (line 93) | def main():
FILE: packages/fairseq-hacked/scripts/build_sym_alignment.py
function main (line 29) | def main():
FILE: packages/fairseq-hacked/scripts/compare_namespaces.py
function main (line 7) | def main():
FILE: packages/fairseq-hacked/scripts/count_docs.py
function main (line 18) | def main():
FILE: packages/fairseq-hacked/scripts/read_binarized.py
function get_parser (line 12) | def get_parser():
function main (line 26) | def main():
FILE: packages/fairseq-hacked/scripts/rm_pt.py
function parse_checkpoints (line 19) | def parse_checkpoints(files):
function last_n_checkpoints (line 32) | def last_n_checkpoints(files, n):
function every_n_checkpoints (line 37) | def every_n_checkpoints(files, n):
function main (line 42) | def main():
FILE: packages/fairseq-hacked/scripts/shard_docs.py
function main (line 15) | def main():
FILE: packages/fairseq-hacked/scripts/split_train_valid_docs.py
function main (line 16) | def main():
FILE: packages/fairseq-hacked/scripts/spm_decode.py
function main (line 15) | def main():
FILE: packages/fairseq-hacked/scripts/spm_encode.py
function main (line 17) | def main():
FILE: packages/fairseq-hacked/scripts/wav2vec_featurize.py
function read_audio (line 26) | def read_audio(fname):
class PretrainedWav2VecModel (line 35) | class PretrainedWav2VecModel(nn.Module):
method __init__ (line 36) | def __init__(self, fname):
method forward (line 47) | def forward(self, x):
class EmbeddingWriterConfig (line 56) | class EmbeddingWriterConfig(argparse.ArgumentParser):
method __init__ (line 57) | def __init__(self):
class Prediction (line 83) | class Prediction:
method __init__ (line 86) | def __init__(self, fname, gpu=0):
method __call__ (line 90) | def __call__(self, x):
class H5Writer (line 98) | class H5Writer:
method __init__ (line 101) | def __init__(self, fname):
method write (line 105) | def write(self, data):
class EmbeddingDatasetWriter (line 114) | class EmbeddingDatasetWriter(object):
method __init__ (line 126) | def __init__(
method _progress (line 154) | def _progress(self, iterable, **kwargs):
method require_output_path (line 159) | def require_output_path(self, fname=None):
method input_path (line 164) | def input_path(self):
method output_path (line 168) | def output_path(self):
method get_input_path (line 171) | def get_input_path(self, fname=None):
method get_output_path (line 176) | def get_output_path(self, fname=None):
method copy_labels (line 181) | def copy_labels(self):
method input_fnames (line 193) | def input_fnames(self):
method __len__ (line 196) | def __len__(self):
method write_features (line 199) | def write_features(self):
method __repr__ (line 219) | def __repr__(self):
FILE: packages/fairseq-hacked/scripts/wav2vec_manifest.py
function get_parser (line 17) | def get_parser():
function main (line 46) | def main(args):
FILE: packages/fairseq-hacked/setup.py
class NumpyExtension (line 26) | class NumpyExtension(Extension):
method __init__ (line 29) | def __init__(self, *args, **kwargs):
method include_dirs (line 34) | def include_dirs(self):
method include_dirs (line 40) | def include_dirs(self, dirs):
FILE: packages/fairseq-hacked/tests/speech_recognition/asr_test_base.py
function get_dummy_dictionary (line 32) | def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):
class DummyTask (line 40) | class DummyTask(FairseqTask):
method __init__ (line 41) | def __init__(self, args):
method target_dictionary (line 49) | def target_dictionary(self):
function get_dummy_task_and_parser (line 53) | def get_dummy_task_and_parser():
function get_dummy_input (line 70) | def get_dummy_input(T=100, D=80, B=5, K=100):
function get_dummy_encoder_output (line 104) | def get_dummy_encoder_output(encoder_out_shape=(100, 80, 5)):
function _current_postion_info (line 126) | def _current_postion_info():
function check_encoder_output (line 134) | def check_encoder_output(encoder_output, batch_size=None):
function check_decoder_output (line 201) | def check_decoder_output(decoder_output):
class TestBaseFairseqModelBase (line 228) | class TestBaseFairseqModelBase(unittest.TestCase):
method setUpClass (line 235) | def setUpClass(cls):
method setUpModel (line 240) | def setUpModel(self, model):
method setupInput (line 244) | def setupInput(self):
method setUp (line 247) | def setUp(self):
class TestFairseqEncoderDecoderModelBase (line 253) | class TestFairseqEncoderDecoderModelBase(TestBaseFairseqModelBase):
method setUpClass (line 260) | def setUpClass(cls):
method setUpModel (line 265) | def setUpModel(self, model_cls, extra_args_setters=None):
method setUpInput (line 281) | def setUpInput(self, input=None):
method setUp (line 284) | def setUp(self):
method test_forward (line 287) | def test_forward(self):
method test_get_normalized_probs (line 297) | def test_get_normalized_probs(self):
class TestFairseqEncoderModelBase (line 316) | class TestFairseqEncoderModelBase(TestBaseFairseqModelBase):
method setUpClass (line 322) | def setUpClass(cls):
method setUpModel (line 327) | def setUpModel(self, model_cls, extra_args_setters=None):
method setUpInput (line 342) | def setUpInput(self, input=None):
method setUp (line 348) | def setUp(self):
method test_forward (line 351) | def test_forward(self):
method test_get_normalized_probs (line 365) | def test_get_normalized_probs(self):
class TestFairseqEncoderBase (line 384) | class TestFairseqEncoderBase(unittest.TestCase):
method setUpClass (line 390) | def setUpClass(cls):
method setUpEncoder (line 395) | def setUpEncoder(self, encoder):
method setUpInput (line 402) | def setUpInput(self, input=None):
method setUp (line 408) | def setUp(self):
method test_forward (line 412) | def test_forward(self):
class TestFairseqDecoderBase (line 423) | class TestFairseqDecoderBase(unittest.TestCase):
method setUpClass (line 429) | def setUpClass(cls):
method setUpDecoder (line 434) | def setUpDecoder(self, decoder):
method setUpInput (line 441) | def setUpInput(self, input=None):
method setUpPrevOutputTokens (line 444) | def setUpPrevOutputTokens(self, tokens=None):
method setUp (line 451) | def setUp(self):
method test_forward (line 456) | def test_forward(self):
class DummyEncoderModel (line 472) | class DummyEncoderModel(FairseqEncoderModel):
method __init__ (line 473) | def __init__(self, encoder):
method build_model (line 477) | def build_model(cls, args, task):
method get_logits (line 480) | def get_logits(self, net_output):
class DummyEncoder (line 488) | class DummyEncoder(FairseqEncoder):
method __init__ (line 489) | def __init__(self):
method forward (line 492) | def forward(self, src_tokens, src_lengths):
class CrossEntropyCriterionTestBase (line 497) | class CrossEntropyCriterionTestBase(unittest.TestCase):
method setUpClass (line 499) | def setUpClass(cls):
method setUpArgs (line 504) | def setUpArgs(self):
method setUp (line 510) | def setUp(self):
method get_src_tokens (line 515) | def get_src_tokens(self, correct_prediction, aggregate):
method get_target (line 534) | def get_target(self, soft_target):
method get_test_sample (line 543) | def get_test_sample(self, correct, soft_target, aggregate):
FILE: packages/fairseq-hacked/tests/speech_recognition/test_collaters.py
class TestSeq2SeqCollator (line 14) | class TestSeq2SeqCollator(unittest.TestCase):
method test_collate (line 15) | def test_collate(self):
method assertTensorEqual (line 52) | def assertTensorEqual(self, t1, t2):
FILE: packages/fairseq-hacked/tests/speech_recognition/test_cross_entropy.py
class CrossEntropyWithAccCriterionTest (line 13) | class CrossEntropyWithAccCriterionTest(CrossEntropyCriterionTestBase):
method setUp (line 14) | def setUp(self):
method test_cross_entropy_all_correct (line 18) | def test_cross_entropy_all_correct(self):
method test_cross_entropy_all_wrong (line 28) | def test_cross_entropy_all_wrong(self):
FILE: packages/fairseq-hacked/tests/speech_recognition/test_vggtransformer.py
class VGGTransformerModelTest_mid (line 25) | class VGGTransformerModelTest_mid(TestFairseqEncoderDecoderModelBase):
method setUp (line 26) | def setUp(self):
class VGGTransformerModelTest_big (line 44) | class VGGTransformerModelTest_big(TestFairseqEncoderDecoderModelBase):
method setUp (line 45) | def setUp(self):
class VGGTransformerModelTest_base (line 63) | class VGGTransformerModelTest_base(TestFairseqEncoderDecoderModelBase):
method setUp (line 64) | def setUp(self):
class VGGTransformerEncoderTest (line 82) | class VGGTransformerEncoderTest(TestFairseqEncoderBase):
method setUp (line 83) | def setUp(self):
method test_forward (line 88) | def test_forward(self):
class TransformerDecoderTest (line 125) | class TransformerDecoderTest(TestFairseqDecoderBase):
method setUp (line 126) | def setUp(self):
FILE: packages/fairseq-hacked/tests/test_average_checkpoints.py
class ModelWithSharedParameter (line 20) | class ModelWithSharedParameter(nn.Module):
method __init__ (line 21) | def __init__(self):
method forward (line 32) | def forward(self, input):
class TestAverageCheckpoints (line 36) | class TestAverageCheckpoints(unittest.TestCase):
method test_average_checkpoints (line 37) | def test_average_checkpoints(self):
method test_average_checkpoints_with_shared_parameters (line 90) | def test_average_checkpoints_with_shared_parameters(self):
FILE: packages/fairseq-hacked/tests/test_backtranslation_dataset.py
class TestBacktranslationDataset (line 20) | class TestBacktranslationDataset(unittest.TestCase):
method setUp (line 21) | def setUp(self):
method _backtranslation_dataset_helper (line 36) | def _backtranslation_dataset_helper(
method test_backtranslation_dataset_no_eos_in_output_src (line 99) | def test_backtranslation_dataset_no_eos_in_output_src(self):
method test_backtranslation_dataset_with_eos_in_output_src (line 104) | def test_backtranslation_dataset_with_eos_in_output_src(self):
method test_backtranslation_dataset_no_eos_in_input_src (line 109) | def test_backtranslation_dataset_no_eos_in_input_src(self):
method assertTensorEqual (line 114) | def assertTensorEqual(self, t1, t2):
FILE: packages/fairseq-hacked/tests/test_binaries.py
class TestTranslation (line 26) | class TestTranslation(unittest.TestCase):
method test_fconv (line 27) | def test_fconv(self):
method test_raw (line 35) | def test_raw(self):
method test_fp16 (line 45) | def test_fp16(self):
method test_memory_efficient_fp16 (line 53) | def test_memory_efficient_fp16(self):
method test_update_freq (line 63) | def test_update_freq(self):
method test_max_positions (line 73) | def test_max_positions(self):
method test_generation (line 99) | def test_generation(self):
method test_lstm (line 143) | def test_lstm(self):
method test_lstm_bidirectional (line 166) | def test_lstm_bidirectional(self):
method test_transformer (line 192) | def test_transformer(self):
method test_transformer_cross_self_attention (line 214) | def test_transformer_cross_self_attention(self):
method test_lightconv (line 243) | def test_lightconv(self):
method test_dynamicconv (line 264) | def test_dynamicconv(self):
method test_cmlm_transformer (line 285) | def test_cmlm_transformer(self):
method test_levenshtein_transformer (line 318) | def test_levenshtein_transformer(self):
method test_nonautoregressive_transformer (line 350) | def test_nonautoregressive_transformer(self):
method test_iterative_nonautoregressive_transformer (line 386) | def test_iterative_nonautoregressive_transformer(self):
method test_insertion_transformer (line 424) | def test_insertion_transformer(self):
method test_mixture_of_experts (line 454) | def test_mixture_of_experts(self):
method test_alignment (line 495) | def test_alignment(self):
class TestStories (line 523) | class TestStories(unittest.TestCase):
method test_fconv_self_att_wp (line 524) | def test_fconv_self_att_wp(self):
class TestLanguageModeling (line 574) | class TestLanguageModeling(unittest.TestCase):
method test_fconv_lm (line 575) | def test_fconv_lm(self):
method test_transformer_lm (line 596) | def test_transformer_lm(self):
method test_lightconv_lm (line 620) | def test_lightconv_lm(self):
class TestMaskedLanguageModel (line 642) | class TestMaskedLanguageModel(unittest.TestCase):
method test_legacy_masked_lm (line 643) | def test_legacy_masked_lm(self):
method _test_pretrained_masked_lm_for_translation (line 650) | def _test_pretrained_masked_lm_for_translation(self, learned_pos_emb, ...
method test_pretrained_masked_lm_for_translation_learned_pos_emb (line 706) | def test_pretrained_masked_lm_for_translation_learned_pos_emb(self):
method test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb (line 709) | def test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb(self):
method test_pretrained_masked_lm_for_translation_encoder_only (line 712) | def test_pretrained_masked_lm_for_translation_encoder_only(self):
function train_legacy_masked_language_model (line 716) | def train_legacy_masked_language_model(data_dir, arch, extra_args=()):
class TestCommonOptions (line 780) | class TestCommonOptions(unittest.TestCase):
method test_optimizers (line 781) | def test_optimizers(self):
function create_dummy_data (line 811) | def create_dummy_data(data_dir, num_examples=1000, maxlen=20, alignment=...
function preprocess_translation_data (line 855) | def preprocess_translation_data(data_dir, extra_flags=None):
function train_translation_model (line 881) | def train_translation_model(
function generate_main (line 934) | def generate_main(data_dir, extra_flags=None):
function preprocess_lm_data (line 972) | def preprocess_lm_data(data_dir):
function train_language_model (line 990) | def train_language_model(data_dir, arch, extra_flags=None, run_validatio...
function eval_lm_main (line 1047) | def eval_lm_main(data_dir):
FILE: packages/fairseq-hacked/tests/test_bmuf.py
class Model (line 17) | class Model(nn.Module):
method __init__ (line 18) | def __init__(self, input_size, output_size):
method forward (line 22) | def forward(self, input):
function setup_model_loss_criterion (line 27) | def setup_model_loss_criterion(args, rank, is_cuda):
function train_step (line 46) | def train_step(input, target, model, loss_fn, optimizer):
function single_gpu_training (line 55) | def single_gpu_training(args, rank, iterations, shared_results):
function setup_args (line 82) | def setup_args():
class TestBMUF (line 108) | class TestBMUF(unittest.TestCase):
method bmuf_process (line 109) | def bmuf_process(self, args, iterations):
method test_bmuf_sync (line 127) | def test_bmuf_sync(self):
method test_warmup_sync (line 133) | def test_warmup_sync(self):
method test_warmup_sync_bmuf_sync (line 140) | def test_warmup_sync_bmuf_sync(self):
method assertAlmostEqual (line 149) | def assertAlmostEqual(self, t1, t2):
FILE: packages/fairseq-hacked/tests/test_character_token_embedder.py
class TestCharacterTokenEmbedder (line 13) | class TestCharacterTokenEmbedder(unittest.TestCase):
method test_character_token_embedder (line 14) | def test_character_token_embedder(self):
method assertAlmostEqual (line 42) | def assertAlmostEqual(self, t1, t2):
FILE: packages/fairseq-hacked/tests/test_concat_dataset.py
class TestConcatDataset (line 14) | class TestConcatDataset(unittest.TestCase):
method setUp (line 15) | def setUp(self):
method test_concat_dataset_basics (line 42) | def test_concat_dataset_basics(self):
FILE: packages/fairseq-hacked/tests/test_convtbc.py
class TestConvTBC (line 12) | class TestConvTBC(unittest.TestCase):
method test_convtbc (line 13) | def test_convtbc(self):
method assertAlmostEqual (line 47) | def assertAlmostEqual(self, t1, t2):
FILE: packages/fairseq-hacked/tests/test_dictionary.py
class TestDictionary (line 14) | class TestDictionary(unittest.TestCase):
method test_finalize (line 15) | def test_finalize(self):
FILE: packages/fairseq-hacked/tests/test_iterators.py
class TestIterators (line 11) | class TestIterators(unittest.TestCase):
method test_counting_iterator (line 12) | def test_counting_iterator(self):
FILE: packages/fairseq-hacked/tests/test_label_smoothing.py
class TestLabelSmoothing (line 20) | class TestLabelSmoothing(unittest.TestCase):
method setUp (line 21) | def setUp(self):
method test_nll_loss (line 63) | def test_nll_loss(self):
method test_padding (line 76) | def test_padding(self):
method test_reduction (line 95) | def test_reduction(self):
method test_zero_eps (line 102) | def test_zero_eps(self):
method assertAlmostEqual (line 114) | def assertAlmostEqual(self, t1, t2):
FILE: packages/fairseq-hacked/tests/test_memory_efficient_fp16.py
class TestMemoryEfficientFP16 (line 15) | class TestMemoryEfficientFP16(unittest.TestCase):
method test_load_state_dict (line 16) | def test_load_state_dict(self):
FILE: packages/fairseq-hacked/tests/test_multi_corpus_sampled_dataset.py
class TestMultiCorpusSampledDataset (line 16) | class TestMultiCorpusSampledDataset(unittest.TestCase):
method setUp (line 17) | def setUp(self):
method _test_sample_helper (line 44) | def _test_sample_helper(
method test_multi_corpus_sampled_dataset_uniform_sample (line 77) | def test_multi_corpus_sampled_dataset_uniform_sample(self):
method test_multi_corpus_sampled_dataset_weighted_sample (line 80) | def test_multi_corpus_sampled_dataset_weighted_sample(self):
FILE: packages/fairseq-hacked/tests/test_multihead_attention.py
class TestMultiheadAttention (line 11) | class TestMultiheadAttention(unittest.TestCase):
method test_append_prev_key_padding_mask (line 12) | def test_append_prev_key_padding_mask(self):
FILE: packages/fairseq-hacked/tests/test_noising.py
class TestDataNoising (line 21) | class TestDataNoising(unittest.TestCase):
method _get_test_data_with_bpe_cont_marker (line 22) | def _get_test_data_with_bpe_cont_marker(self, append_eos=True):
method _get_test_data_with_bpe_end_marker (line 57) | def _get_test_data_with_bpe_end_marker(self, append_eos=True):
method _get_test_data_with_word_vocab (line 93) | def _get_test_data_with_word_vocab(self, append_eos=True):
method _convert_src_tokens_to_tensor (line 122) | def _convert_src_tokens_to_tensor(
method assert_eos_at_end (line 140) | def assert_eos_at_end(self, x, x_len, eos):
method assert_word_dropout_correct (line 152) | def assert_word_dropout_correct(self, x, x_noised, x_len, l_noised):
method test_word_dropout_with_eos (line 159) | def test_word_dropout_with_eos(self):
method assert_word_blanking_correct (line 170) | def assert_word_blanking_correct(self, x, x_noised, x_len, l_noised, u...
method test_word_blank_with_eos (line 180) | def test_word_blank_with_eos(self):
method generate_unchanged_shuffle_map (line 191) | def generate_unchanged_shuffle_map(self, length):
method assert_word_shuffle_matches_expected (line 194) | def assert_word_shuffle_matches_expected(
method test_word_shuffle_with_eos (line 246) | def test_word_shuffle_with_eos(self):
method test_word_shuffle_with_eos_nonbpe (line 277) | def test_word_shuffle_with_eos_nonbpe(self):
method test_word_shuffle_without_eos (line 309) | def test_word_shuffle_without_eos(self):
method test_word_shuffle_without_eos_with_bpe_end_marker (line 341) | def test_word_shuffle_without_eos_with_bpe_end_marker(self):
method assert_no_eos_at_end (line 375) | def assert_no_eos_at_end(self, x, x_len, eos):
method test_word_dropout_without_eos (line 386) | def test_word_dropout_without_eos(self):
method test_word_blank_without_eos (line 398) | def test_word_blank_without_eos(self):
method _get_noising_dataset_batch (line 410) | def _get_noising_dataset_batch(
method test_noising_dataset_with_eos (line 447) | def test_noising_dataset_with_eos(self):
method test_noising_dataset_without_eos (line 479) | def test_noising_dataset_without_eos(self):
method assertTensorEqual (line 519) | def assertTensorEqual(self, t1, t2):
FILE: packages/fairseq-hacked/tests/test_reproducibility.py
class TestReproducibility (line 16) | class TestReproducibility(unittest.TestCase):
method _test_reproducibility (line 17) | def _test_reproducibility(self, name, extra_flags=None):
method test_reproducibility (line 85) | def test_reproducibility(self):
method test_reproducibility_fp16 (line 88) | def test_reproducibility_fp16(self):
method test_reproducibility_memory_efficient_fp16 (line 93) | def test_reproducibility_memory_efficient_fp16(self):
FILE: packages/fairseq-hacked/tests/test_resampling_dataset.py
class TestResamplingDataset (line 14) | class TestResamplingDataset(unittest.TestCase):
method setUp (line 15) | def setUp(self):
method _test_common (line 23) | def _test_common(self, resampling_dataset, iters):
method test_resampling_dataset_batch_by_size_false (line 66) | def test_resampling_dataset_batch_by_size_false(self):
method test_resampling_dataset_batch_by_size_true (line 84) | def test_resampling_dataset_batch_by_size_true(self):
FILE: packages/fairseq-hacked/tests/test_sequence_generator.py
class TestSequenceGeneratorBase (line 16) | class TestSequenceGeneratorBase(unittest.TestCase):
method assertHypoTokens (line 17) | def assertHypoTokens(self, hypo, tokens):
method assertHypoScore (line 20) | def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0):
method assertAlmostEqual (line 29) | def assertAlmostEqual(self, t1, t2):
method assertTensorEqual (line 33) | def assertTensorEqual(self, t1, t2):
class TestSequenceGenerator (line 38) | class TestSequenceGenerator(TestSequenceGeneratorBase):
method setUp (line 39) | def setUp(self):
method test_with_normalization (line 52) | def test_with_normalization(self):
method test_without_normalization (line 69) | def test_without_normalization(self):
method test_with_lenpen_favoring_short_hypos (line 90) | def test_with_lenpen_favoring_short_hypos(self):
method test_with_lenpen_favoring_long_hypos (line 108) | def test_with_lenpen_favoring_long_hypos(self):
method test_maxlen (line 126) | def test_maxlen(self):
class TestDiverseBeamSearch (line 144) | class TestDiverseBeamSearch(TestSequenceGeneratorBase):
method setUp (line 145) | def setUp(self):
method test_diverse_beam_search (line 206) | def test_diverse_beam_search(self):
class TestTopPSamplingSearch (line 235) | class TestTopPSamplingSearch(TestSequenceGeneratorBase):
method setUp (line 236) | def setUp(self):
method test_topp_sampling_search_low_prob (line 300) | def test_topp_sampling_search_low_prob(self):
method test_topp_sampling_search_high_prob (line 328) | def test_topp_sampling_search_high_prob(self):
method hypoTokens (line 383) | def hypoTokens(self, hypo, tokens):
method hypoScore (line 386) | def hypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0):
method almostEqual (line 397) | def almostEqual(self, t1, t2):
method tensorEqual (line 400) | def tensorEqual(self, t1, t2):
FILE: packages/fairseq-hacked/tests/test_sequence_scorer.py
class TestSequenceScorer (line 16) | class TestSequenceScorer(unittest.TestCase):
method test_sequence_scorer (line 17) | def test_sequence_scorer(self):
method assertHypoTokens (line 100) | def assertHypoTokens(self, hypo, tokens):
method assertHypoScore (line 103) | def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0):
method assertAlmostEqual (line 112) | def assertAlmostEqual(self, t1, t2):
method assertTensorEqual (line 116) | def assertTensorEqual(self, t1, t2):
FILE: packages/fairseq-hacked/tests/test_sparse_multihead_attention.py
class TestSparseMultiheadAttention (line 11) | class TestSparseMultiheadAttention(unittest.TestCase):
method test_sparse_multihead_attention (line 12) | def test_sparse_multihead_attention(self):
FILE: packages/fairseq-hacked/tests/test_token_block_dataset.py
class TestTokenBlockDataset (line 15) | class TestTokenBlockDataset(unittest.TestCase):
method _build_dataset (line 16) | def _build_dataset(self, data, **kwargs):
method test_eos_break_mode (line 21) | def test_eos_break_mode(self):
method test_block_break_mode (line 42) | def test_block_break_mode(self):
method test_complete_break_mode (line 54) | def test_complete_break_mode(self):
FILE: packages/fairseq-hacked/tests/test_train.py
function mock_trainer (line 16) | def mock_trainer(epoch, num_updates, iterations_in_epoch):
function mock_dict (line 29) | def mock_dict():
function get_trainer_and_epoch_itr (line 37) | def get_trainer_and_epoch_itr(epoch, epoch_size, num_updates, iterations...
class TestLoadCheckpoint (line 59) | class TestLoadCheckpoint(unittest.TestCase):
method setUp (line 60) | def setUp(self):
method test_load_partial_checkpoint (line 75) | def test_load_partial_checkpoint(self):
method test_load_full_checkpoint (line 105) | def test_load_full_checkpoint(self):
method test_load_no_checkpoint (line 117) | def test_load_no_checkpoint(self):
method tearDown (line 130) | def tearDown(self):
FILE: packages/fairseq-hacked/tests/test_utils.py
class TestUtils (line 13) | class TestUtils(unittest.TestCase):
method test_convert_padding_direction (line 14) | def test_convert_padding_direction(self):
method test_make_positions (line 32) | def test_make_positions(self):
method assertAlmostEqual (line 54) | def assertAlmostEqual(self, t1, t2):
FILE: packages/fairseq-hacked/tests/utils.py
function dummy_dictionary (line 20) | def dummy_dictionary(vocab_size, prefix="token_"):
function dummy_dataloader (line 29) | def dummy_dataloader(
function sequence_generator_setup (line 50) | def sequence_generator_setup():
class TestDataset (line 152) | class TestDataset(torch.utils.data.Dataset):
method __init__ (line 153) | def __init__(self, data):
method __getitem__ (line 158) | def __getitem__(self, index):
method __len__ (line 161) | def __len__(self):
class TestTranslationTask (line 165) | class TestTranslationTask(FairseqTask):
method __init__ (line 166) | def __init__(self, args, src_dict, tgt_dict, model):
method setup_task (line 173) | def setup_task(cls, args, src_dict=None, tgt_dict=None, model=None):
method build_model (line 176) | def build_model(self, args):
method source_dictionary (line 180) | def source_dictionary(self):
method target_dictionary (line 184) | def target_dictionary(self):
class TestModel (line 188) | class TestModel(FairseqEncoderDecoderModel):
method __init__ (line 189) | def __init__(self, encoder, decoder):
method build_model (line 193) | def build_model(cls, args, task):
class TestEncoder (line 199) | class TestEncoder(FairseqEncoder):
method __init__ (line 200) | def __init__(self, args, dictionary):
method forward (line 204) | def forward(self, src_tokens, src_lengths=None, **kwargs):
method reorder_encoder_out (line 207) | def reorder_encoder_out(self, encoder_out, new_order):
class TestIncrementalDecoder (line 211) | class TestIncrementalDecoder(FairseqIncrementalDecoder):
method __init__ (line 212) | def __init__(self, args, dictionary):
method forward (line 218) | def forward(self, prev_output_tokens, encoder_out=None, incremental_st...
method get_normalized_probs (line 259) | def get_normalized_probs(self, net_output, log_probs, _):
method max_positions (line 267) | def max_positions(self):
FILE: packages/fairseq-hacked/train.py
function main (line 30) | def main(args, init_distributed=False):
function train (line 125) | def train(args, trainer, task, epoch_itr):
function get_training_stats (line 204) | def get_training_stats(trainer):
function validate (line 229) | def validate(args, trainer, task, epoch_itr, subsets):
function get_valid_stats (line 290) | def get_valid_stats(trainer, args, extra_meters=None):
function distributed_main (line 320) | def distributed_main(i, args, start_rank=0):
function cli_main (line 327) | def cli_main():
FILE: packages/fairseq-hacked/validate.py
function main (line 13) | def main(args, override_args=None):
function cli_main (line 86) | def cli_main():
FILE: step11_final/blending_n_postprocessing.py
function postprocess_single (line 52) | def postprocess_single(target, ref):
function postprocess_prediction (line 79) | def postprocess_prediction(prediction, actual):
FILE: step1_lm_finetuning/callbacks.py
class LosswiseSessionHandler (line 4) | class LosswiseSessionHandler:
method __init__ (line 5) | def __init__(self, api_key, tag="", params=None):
method create_graph (line 14) | def create_graph(
method __getitem__ (line 28) | def __getitem__(self, graph_name):
method done (line 33) | def done(self):
class LosswiseCallback (line 37) | class LosswiseCallback(Callback):
method __init__ (line 38) | def __init__(
method on_train_begin (line 64) | def on_train_begin(self, logs):
method on_train_end (line 77) | def on_train_end(self, logs):
method on_epoch_end (line 81) | def on_epoch_end(self, epoch, logs):
method on_batch_end (line 99) | def on_batch_end(self, batch, logs):
class CSVParamLogger (line 121) | class CSVParamLogger(CSVLogger):
method __init__ (line 122) | def __init__(
method on_train_begin (line 139) | def on_train_begin(self, logs):
FILE: step1_lm_finetuning/data/augmentation/tokenization.py
class BertRandomTokenizer (line 6) | class BertRandomTokenizer(BertTokenizer):
method __init__ (line 7) | def __init__(
method _split_word_piece (line 27) | def _split_word_piece(self, token: str) -> List[str]:
method _tokenize (line 40) | def _tokenize(self, text):
FILE: step1_lm_finetuning/data/dataset.py
class QuestDataset (line 82) | class QuestDataset(Dataset):
method __init__ (line 83) | def __init__(
method _encode_segments (line 124) | def _encode_segments(self, *text_segments: List[Text]) -> List[List[in...
method _process (line 136) | def _process(self, title=None, body=None, answer=None):
method _pad_and_truncate (line 150) | def _pad_and_truncate(self, features, pad_value=0):
method _balance_segments (line 157) | def _balance_segments(
method _prepare_features (line 174) | def _prepare_features(self, title, body, answer):
method _get_text (line 214) | def _get_text(self, index):
method __getitem__ (line 235) | def __getitem__(self, index):
method __len__ (line 248) | def __len__(self):
class TestQuestDataset (line 257) | class TestQuestDataset(QuestDataset):
method __init__ (line 258) | def __init__(
method __getitem__ (line 287) | def __getitem__(self, index):
class QuestSiameseDataset (line 297) | class QuestSiameseDataset(QuestDataset):
method __getitem__ (line 298) | def __getitem__(self, index):
FILE: step1_lm_finetuning/data/make_folds.py
function rareness_split (line 12) | def rareness_split(train_df, least_representative_cols=("question_type_s...
function aggregate_ordinals (line 27) | def aggregate_ordinals(group, agg_func=pd.Series.mode):
function stratified_fold_split_for_common (line 36) | def stratified_fold_split_for_common(
function stratified_fold_split_for_rare (line 82) | def stratified_fold_split_for_rare(
FILE: step1_lm_finetuning/data/sampler.py
class UniformRandomSampler (line 5) | class UniformRandomSampler(Sampler):
method __init__ (line 6) | def __init__(self, data_source, num_samples: int = None):
method num_samples (line 12) | def num_samples(self):
method __iter__ (line 17) | def __iter__(self):
method __len__ (line 27) | def __len__(self):
FILE: step1_lm_finetuning/data_preparation/clean_stack_exchange_qa.py
function merge_all_questions_and_answers (line 13) | def merge_all_questions_and_answers(path_to_parsed_dumps):
function process (line 44) | def process(
function select_answers (line 181) | def select_answers(all_answers, max_answers_per_question=2):
FILE: step1_lm_finetuning/data_preparation/scrape_stack_exchange.py
function get_urls (line 12) | def get_urls(main_url, path_to_dump):
function download_and_unzip_data (line 27) | def download_and_unzip_data(main_url, links, path_to_dump):
function xml_to_pandas (line 39) | def xml_to_pandas(root, columns, row_name="row"):
function parse_xml_dump (line 54) | def parse_xml_dump(pathes):
function parse_dumps (line 131) | def parse_dumps(path_to_dump, out_dir):
function main (line 144) | def main():
FILE: step1_lm_finetuning/train_stackx_lm.py
class QuestMLMDataset (line 58) | class QuestMLMDataset(QuestDataset):
method __init__ (line 59) | def __init__(
method _mask_tokens (line 98) | def _mask_tokens(self, inputs, masked_random_replace_prob=0.2):
method __getitem__ (line 132) | def __getitem__(self, index):
class BertPretrain (line 179) | class BertPretrain(BertForPreTraining):
method __init__ (line 180) | def __init__(self, config, num_labels):
method forward (line 186) | def forward(
function spearman_metric (line 268) | def spearman_metric(y_true, y_pred, return_scores=False, colnames=None):
class Spearman (line 281) | class Spearman(EpochMetric):
class SpearmanCallback (line 282) | class SpearmanCallback(Callback):
method __init__ (line 283) | def __init__(self):
method on_epoch_end (line 286) | def on_epoch_end(self, epoch, logs):
method __init__ (line 289) | def __init__(self, colnames=None):
method forward (line 297) | def forward(self, logits, targets):
method get_metric (line 302) | def get_metric(self):
class MaskLMCrossEntropyLoss (line 314) | class MaskLMCrossEntropyLoss(torch.nn.CrossEntropyLoss):
method forward (line 315) | def forward(self, logits, targets):
class SOPCrossEntropyLoss (line 323) | class SOPCrossEntropyLoss(torch.nn.CrossEntropyLoss):
method forward (line 324) | def forward(self, logits, targets):
class PretrainingLoss (line 331) | class PretrainingLoss(torch.nn.Module):
method __init__ (line 332) | def __init__(self, targets_alpha=1.0):
method forward (line 339) | def forward(self, logits, targets):
class MaskLMPerplexity (line 347) | class MaskLMPerplexity(MaskLMCrossEntropyLoss):
method forward (line 350) | def forward(self, logits, targets):
function sop_accuracy (line 357) | def sop_accuracy(logits, targets):
FILE: step1_lm_finetuning/utils.py
function encode_labels (line 11) | def encode_labels(df, target_columns=ALL_TARGETS, method="average"):
function transform_target_columns_to_ordinals (line 22) | def transform_target_columns_to_ordinals(
function torch_to_numpy (line 54) | def torch_to_numpy(obj, copy=False):
function torch_to (line 92) | def torch_to(obj, *args, **kargs):
function torch_apply (line 96) | def torch_apply(obj, func):
function _apply (line 114) | def _apply(obj, func):
function _concat (line 122) | def _concat(obj):
function numpy_to_torch (line 133) | def numpy_to_torch(obj):
FILE: step2_pseudo_labeling/bert-base-pretrained/dataset.py
function _get_masks (line 10) | def _get_masks(tokens, max_seq_length):
function _get_segments (line 17) | def _get_segments(tokens, max_seq_length):
function _get_ids (line 37) | def _get_ids(tokens, tokenizer, max_seq_length):
function _trim_input (line 45) | def _trim_input(
function _convert_to_bert_inputs (line 105) | def _convert_to_bert_inputs(title, question, answer, tokenizer, max_sequ...
function _get_stoken_output (line 117) | def _get_stoken_output(title, question, answer, tokenizer, max_sequence_...
function compute_input_arays (line 124) | def compute_input_arays(
function compute_output_arrays (line 165) | def compute_output_arrays(df, columns):
class BucketingSampler (line 169) | class BucketingSampler:
method __init__ (line 170) | def __init__(self, lengths, batch_size, maxlen=500):
method _make_batches (line 178) | def _make_batches(self, lengths, batch_size, maxlen):
method __len__ (line 206) | def __len__(self):
method __iter__ (line 209) | def __iter__(self):
function make_collate_fn (line 213) | def make_collate_fn(
class QuestDataset (line 242) | class QuestDataset(torch.utils.data.Dataset):
method __init__ (line 243) | def __init__(self, inputs, lengths, labels=None):
method from_frame (line 249) | def from_frame(cls, args, df, tokenizer, test=False):
method __len__ (line 274) | def __len__(self):
method __getitem__ (line 277) | def __getitem__(self, idx):
function cross_validation_split (line 298) | def cross_validation_split(args, train_df, tokenizer, ignore_train=False):
function get_pseudo_set (line 326) | def get_pseudo_set(args, pseudo_df, tokenizer):
function get_test_set (line 330) | def get_test_set(args, test_df, tokenizer):
FILE: step2_pseudo_labeling/bert-base-pretrained/evaluation.py
function target_metric (line 8) | def target_metric(prediction, actual, columns=target_columns):
FILE: step2_pseudo_labeling/bert-base-pretrained/loops.py
function train_loop (line 8) | def train_loop(model, train_loader, optimizer, criterion, scheduler, arg...
function evaluate (line 49) | def evaluate(args, model, val_loader, criterion, val_shape):
function infer (line 96) | def infer(args, model, test_loader, test_shape):
FILE: step2_pseudo_labeling/bert-base-pretrained/model.py
class Squeeze (line 19) | class Squeeze(nn.Module):
method __init__ (line 20) | def __init__(self, dim):
method forward (line 24) | def forward(self, x):
class CustomBert (line 28) | class CustomBert(BertPreTrainedModel):
method __init__ (line 29) | def __init__(self, config):
method forward (line 46) | def forward(
function get_model_optimizer (line 88) | def get_model_optimizer(args):
FILE: step2_pseudo_labeling/bert-base-pretrained/run.py
function seed_everything (line 55) | def seed_everything(seed: int):
FILE: step2_pseudo_labeling/bert-base/dataset.py
function _get_masks (line 10) | def _get_masks(tokens, max_seq_length):
function _get_segments (line 17) | def _get_segments(tokens, max_seq_length):
function _get_ids (line 37) | def _get_ids(tokens, tokenizer, max_seq_length):
function _trim_input (line 45) | def _trim_input(
function _convert_to_bert_inputs (line 105) | def _convert_to_bert_inputs(title, question, answer, tokenizer, max_sequ...
function _get_stoken_output (line 117) | def _get_stoken_output(title, question, answer, tokenizer, max_sequence_...
function compute_input_arays (line 124) | def compute_input_arays(
function compute_output_arrays (line 165) | def compute_output_arrays(df, columns):
class BucketingSampler (line 169) | class BucketingSampler:
method __init__ (line 170) | def __init__(self, lengths, batch_size, maxlen=500):
method _make_batches (line 178) | def _make_batches(self, lengths, batch_size, maxlen):
method __len__ (line 206) | def __len__(self):
method __iter__ (line 209) | def __iter__(self):
function make_collate_fn (line 213) | def make_collate_fn(
class QuestDataset (line 242) | class QuestDataset(torch.utils.data.Dataset):
method __init__ (line 243) | def __init__(self, inputs, lengths, labels=None):
method from_frame (line 249) | def from_frame(cls, args, df, tokenizer, test=False):
method __len__ (line 274) | def __len__(self):
method __getitem__ (line 277) | def __getitem__(self, idx):
function cross_validation_split (line 298) | def cross_validation_split(args, train_df, tokenizer, ignore_train=False):
function get_pseudo_set (line 326) | def get_pseudo_set(args, pseudo_df, tokenizer):
function get_test_set (line 330) | def get_test_set(args, test_df, tokenizer):
FILE: step2_pseudo_labeling/bert-base/evaluation.py
function target_metric (line 8) | def target_metric(prediction, actual, columns=target_columns):
FILE: step2_pseudo_labeling/bert-base/loops.py
function train_loop (line 8) | def train_loop(model, train_loader, optimizer, criterion, scheduler, arg...
function evaluate (line 49) | def evaluate(args, model, val_loader, criterion, val_shape):
function infer (line 96) | def infer(args, model, test_loader, test_shape):
FILE: step2_pseudo_labeling/bert-base/model.py
class Squeeze (line 19) | class Squeeze(nn.Module):
method __init__ (line 20) | def __init__(self, dim):
method forward (line 24) | def forward(self, x):
class CustomBert (line 28) | class CustomBert(BertPreTrainedModel):
method __init__ (line 29) | def __init__(self, config):
method forward (line 46) | def forward(
function get_model_optimizer (line 88) | def get_model_optimizer(args):
FILE: step2_pseudo_labeling/bert-base/run.py
function seed_everything (line 55) | def seed_everything(seed: int):
FILE: step2_pseudo_labeling/bert-large/dataset.py
function _get_masks (line 10) | def _get_masks(tokens, max_seq_length):
function _get_segments (line 17) | def _get_segments(tokens, max_seq_length):
function _get_ids (line 37) | def _get_ids(tokens, tokenizer, max_seq_length):
function _trim_input (line 45) | def _trim_input(
function _convert_to_bert_inputs (line 105) | def _convert_to_bert_inputs(title, question, answer, tokenizer, max_sequ...
function _get_stoken_output (line 117) | def _get_stoken_output(title, question, answer, tokenizer, max_sequence_...
function compute_input_arays (line 124) | def compute_input_arays(
function compute_output_arrays (line 165) | def compute_output_arrays(df, columns):
class BucketingSampler (line 169) | class BucketingSampler:
method __init__ (line 170) | def __init__(self, lengths, batch_size, maxlen=500):
method _make_batches (line 178) | def _make_batches(self, lengths, batch_size, maxlen):
method __len__ (line 206) | def __len__(self):
method __iter__ (line 209) | def __iter__(self):
function make_collate_fn (line 213) | def make_collate_fn(
class QuestDataset (line 242) | class QuestDataset(torch.utils.data.Dataset):
method __init__ (line 243) | def __init__(self, inputs, lengths, labels=None):
method from_frame (line 249) | def from_frame(cls, args, df, tokenizer, test=False):
method __len__ (line 274) | def __len__(self):
method __getitem__ (line 277) | def __getitem__(self, idx):
function cross_validation_split (line 298) | def cross_validation_split(args, train_df, tokenizer, ignore_train=False):
function get_pseudo_set (line 326) | def get_pseudo_set(args, pseudo_df, tokenizer):
function get_test_set (line 330) | def get_test_set(args, test_df, tokenizer):
FILE: step2_pseudo_labeling/bert-large/evaluation.py
function target_metric (line 8) | def target_metric(prediction, actual, columns=target_columns):
FILE: step2_pseudo_labeling/bert-large/loops.py
function train_loop (line 8) | def train_loop(model, train_loader, optimizer, criterion, scheduler, arg...
function evaluate (line 49) | def evaluate(args, model, val_loader, criterion, val_shape):
function infer (line 96) | def infer(args, model, test_loader, test_shape):
FILE: step2_pseudo_labeling/bert-large/model.py
class Squeeze (line 19) | class Squeeze(nn.Module):
method __init__ (line 20) | def __init__(self, dim):
method forward (line 24) | def forward(self, x):
class CustomBert (line 28) | class CustomBert(BertPreTrainedModel):
method __init__ (line 29) | def __init__(self, config):
method forward (line 46) | def forward(
function get_model_optimizer (line 88) | def get_model_optimizer(args):
FILE: step2_pseudo_labeling/bert-large/run.py
function seed_everything (line 55) | def seed_everything(seed: int):
FILE: step3_model1_bert_code/bert.py
function gelu (line 71) | def gelu(x):
function swish (line 80) | def swish(x):
class BertConfig (line 87) | class BertConfig(PretrainedConfig):
method __init__ (line 118) | def __init__(
class BertEmbeddings (line 172) | class BertEmbeddings(nn.Module):
method __init__ (line 176) | def __init__(self, config):
method forward (line 193) | def forward(self, input_ids, token_type_ids=None, position_ids=None):
class BertSelfAttention (line 213) | class BertSelfAttention(nn.Module):
method __init__ (line 214) | def __init__(self, config):
method transpose_for_scores (line 233) | def transpose_for_scores(self, x):
method forward (line 241) | def forward(self, hidden_states, attention_mask, head_mask=None):
class BertSelfOutput (line 281) | class BertSelfOutput(nn.Module):
method __init__ (line 282) | def __init__(self, config):
method forward (line 288) | def forward(self, hidden_states, input_tensor):
class BertAttention (line 295) | class BertAttention(nn.Module):
method __init__ (line 296) | def __init__(self, config):
method prune_heads (line 302) | def prune_heads(self, heads):
method forward (line 329) | def forward(self, input_tensor, attention_mask, head_mask=None):
class BertIntermediate (line 338) | class BertIntermediate(nn.Module):
method __init__ (line 339) | def __init__(self, config):
method forward (line 349) | def forward(self, hidden_states):
class BertOutput (line 355) | class BertOutput(nn.Module):
method __init__ (line 356) | def __init__(self, config):
method forward (line 362) | def forward(self, hidden_states, input_tensor):
class BertLayer (line 369) | class BertLayer(nn.Module):
method __init__ (line 370) | def __init__(self, config):
method forward (line 376) | def forward(self, hidden_states, attention_mask, head_mask=None):
class BertEncoder (line 387) | class BertEncoder(nn.Module):
method __init__ (line 388) | def __init__(self, config):
method forward (line 396) | def forward(self, hidden_states, attention_mask, head_mask=None):
class BertPooler (line 421) | class BertPooler(nn.Module):
method __init__ (line 422) | def __init__(self, config):
method forward (line 427) | def forward(self, hidden_states):
class BertPredictionHeadTransform (line 436) | class BertPredictionHeadTransform(nn.Module):
method __init__ (line 437) | def __init__(self, config):
method forward (line 448) | def forward(self, hidden_states):
class BertLMPredictionHead (line 455) | class BertLMPredictionHead(nn.Module):
method __init__ (line 456) | def __init__(self, config):
method forward (line 466) | def forward(self, hidden_states):
class BertOnlyMLMHead (line 472) | class BertOnlyMLMHead(nn.Module):
method __init__ (line 473) | def __init__(self, config):
method forward (line 477) | def forward(self, sequence_output):
class BertOnlyNSPHead (line 482) | class BertOnlyNSPHead(nn.Module):
method __init__ (line 483) | def __init__(self, config):
method forward (line 487) | def forward(self, pooled_output):
class BertPreTrainingHeads (line 492) | class BertPreTrainingHeads(nn.Module):
method __init__ (line 493) | def __init__(self, config):
method forward (line 498) | def forward(self, sequence_output, pooled_output):
class BertPreTrainedModel (line 504) | class BertPreTrainedModel(PreTrainedModel):
method _init_weights (line 514) | def _init_weights(self, module):
class BertModel (line 596) | class BertModel(BertPreTrainedModel):
method __init__ (line 626) | def __init__(self, config):
method _resize_token_embeddings (line 635) | def _resize_token_embeddings(self, new_num_tokens):
method _prune_heads (line 641) | def _prune_heads(self, heads_to_prune):
method forward (line 649) | def forward(
class BertForPreTraining (line 723) | class BertForPreTraining(BertPreTrainedModel):
method __init__ (line 761) | def __init__(self, config):
method tie_weights (line 770) | def tie_weights(self):
method forward (line 778) | def forward(
class BertForMaskedLM (line 825) | class BertForMaskedLM(BertPreTrainedModel):
method __init__ (line 856) | def __init__(self, config):
method tie_weights (line 865) | def tie_weights(self):
method forward (line 873) | def forward(
class BertForNextSentencePrediction (line 912) | class BertForNextSentencePrediction(BertPreTrainedModel):
method __init__ (line 943) | def __init__(self, config):
method forward (line 951) | def forward(
class BertForSequenceClassification (line 990) | class BertForSequenceClassification(BertPreTrainedModel):
method __init__ (line 1022) | def __init__(self, config):
method forward (line 1032) | def forward(
class BertForMultipleChoice (line 1075) | class BertForMultipleChoice(BertPreTrainedModel):
method __init__ (line 1144) | def __init__(self, config):
method forward (line 1153) | def forward(
class BertForTokenClassification (line 1211) | class BertForTokenClassification(BertPreTrainedModel):
method __init__ (line 1241) | def __init__(self, config):
method forward (line 1251) | def forward(
class BertForQuestionAnswering (line 1296) | class BertForQuestionAnswering(BertPreTrainedModel):
method __init__ (line 1334) | def __init__(self, config):
method forward (line 1343) | def forward(
FILE: step3_model1_bert_code/callbacks.py
class LosswiseSessionHandler (line 4) | class LosswiseSessionHandler:
method __init__ (line 5) | def __init__(self, api_key, tag="", params=None):
method create_graph (line 14) | def create_graph(
method __getitem__ (line 28) | def __getitem__(self, graph_name):
method done (line 33) | def done(self):
class LosswiseCallback (line 37) | class LosswiseCallback(Callback):
method __init__ (line 38) | def __init__(
method on_train_begin (line 64) | def on_train_begin(self, logs):
method on_train_end (line 77) | def on_train_end(self, logs):
method on_epoch_end (line 81) | def on_epoch_end(self, epoch, logs):
method on_batch_end (line 99) | def on_batch_end(self, batch, logs):
class CSVParamLogger (line 121) | class CSVParamLogger(CSVLogger):
method __init__ (line 122) | def __init__(
method on_train_begin (line 139) | def on_train_begin(self, logs):
FILE: step3_model1_bert_code/data/augmentation/tokenization.py
class BertRandomTokenizer (line 6) | class BertRandomTokenizer(BertTokenizer):
method __init__ (line 7) | def __init__(
method _split_word_piece (line 27) | def _split_word_piece(self, token: str) -> List[str]:
method _tokenize (line 40) | def _tokenize(self, text):
FILE: step3_model1_bert_code/data/dataset.py
class QuestDataset (line 82) | class QuestDataset(Dataset):
method __init__ (line 83) | def __init__(
method _encode_segments (line 124) | def _encode_segments(self, *text_segments: List[Text]) -> List[List[in...
method _process (line 136) | def _process(self, title=None, body=None, answer=None):
method _pad_and_truncate (line 150) | def _pad_and_truncate(self, features, pad_value=0):
method _balance_segments (line 157) | def _balance_segments(
method _prepare_features (line 174) | def _prepare_features(self, title, body, answer):
method _get_text (line 214) | def _get_text(self, index):
method __getitem__ (line 235) | def __getitem__(self, index):
method __len__ (line 248) | def __len__(self):
class TestQuestDataset (line 257) | class TestQuestDataset(QuestDataset):
method __init__ (line 258) | def __init__(
method __getitem__ (line 287) | def __getitem__(self, index):
class QuestSiameseDataset (line 297) | class QuestSiameseDataset(QuestDataset):
method __getitem__ (line 298) | def __getitem__(self, index):
FILE: step3_model1_bert_code/data/make_folds.py
function rareness_split (line 12) | def rareness_split(train_df, least_representative_cols=("question_type_s...
function aggregate_ordinals (line 27) | def aggregate_ordinals(group, agg_func=pd.Series.mode):
function stratified_fold_split_for_common (line 36) | def stratified_fold_split_for_common(
function stratified_fold_split_for_rare (line 82) | def stratified_fold_split_for_rare(
FILE: step3_model1_bert_code/data/sampler.py
class UniformRandomSampler (line 5) | class UniformRandomSampler(Sampler):
method __init__ (line 6) | def __init__(self, data_source, num_samples: int = None):
method num_samples (line 12) | def num_samples(self):
method __iter__ (line 17) | def __iter__(self):
method __len__ (line 27) | def __len__(self):
FILE: step3_model1_bert_code/metrics.py
function spearman_metric (line 9) | def spearman_metric(y_true, y_pred, return_scores=False, colnames=None):
class Spearman (line 22) | class Spearman(EpochMetric):
class SpearmanCallback (line 23) | class SpearmanCallback(Callback):
method __init__ (line 24) | def __init__(self):
method on_epoch_end (line 27) | def on_epoch_end(self, epoch, logs):
method __init__ (line 30) | def __init__(self, colnames=None):
method forward (line 38) | def forward(self, y_pred, y_true):
method get_metric (line 42) | def get_metric(self):
FILE: step3_model1_bert_code/models.py
class BertForQuestRegression (line 8) | class BertForQuestRegression(BertPreTrainedModel):
method __init__ (line 9) | def __init__(self, config, head_dropout=None):
method forward (line 22) | def forward(
method load (line 45) | def load(self, checkpoint, strict=True, **cfg_args):
class RobertaForQuestRegression (line 53) | class RobertaForQuestRegression(BertPreTrainedModel):
method __init__ (line 54) | def __init__(self, config):
method forward (line 63) | def forward(
method load (line 86) | def load(self, checkpoint, strict=True, **cfg_args):
class CustomBert (line 94) | class CustomBert(transformers.BertPreTrainedModel):
method __init__ (line 95) | def __init__(self, config):
method forward (line 112) | def forward(
function get_optimizer (line 147) | def get_optimizer(model, learning_rate, backbone_prefix="bert"):
FILE: step3_model1_bert_code/schedule.py
class _PyTorchLRSchedulerWrapper (line 6) | class _PyTorchLRSchedulerWrapper(Callback):
method __init__ (line 7) | def __init__(self, torch_lr_scheduler, *args, **kwargs):
method on_train_begin (line 15) | def on_train_begin(self, logs):
method on_batch_end (line 25) | def on_batch_end(self, batch, logs):
method load_state (line 28) | def load_state(self, f):
method save_state (line 34) | def save_state(self, f):
class _TotalStepWrapper (line 38) | class _TotalStepWrapper(_PyTorchLRSchedulerWrapper):
method on_train_begin (line 39) | def on_train_begin(self, logs):
class ConstantLRSchedule (line 60) | class ConstantLRSchedule(_PyTorchLRSchedulerWrapper):
method __init__ (line 64) | def __init__(self, last_epoch=-1):
class WarmupConstantSchedule (line 68) | class WarmupConstantSchedule(_PyTorchLRSchedulerWrapper):
method __init__ (line 74) | def __init__(self, warmup_steps, last_epoch=-1):
class WarmupLinearSchedule (line 80) | class WarmupLinearSchedule(_TotalStepWrapper):
method __init__ (line 86) | def __init__(self, warmup_steps, t_total=None, last_epoch=-1):
class WarmupCosineSchedule (line 95) | class WarmupCosineSchedule(_TotalStepWrapper):
method __init__ (line 102) | def __init__(self, warmup_steps, t_total=None, cycles=0.5, last_epoch=...
class WarmupCosineWithHardRestartsSchedule (line 112) | class WarmupCosineWithHardRestartsSchedule(_TotalStepWrapper):
method __init__ (line 119) | def __init__(self, warmup_steps, t_total=None, cycles=1.0, last_epoch=...
FILE: step3_model1_bert_code/train.py
function get_model (line 33) | def get_model():
FILE: step3_model1_bert_code/utils.py
function encode_labels (line 10) | def encode_labels(df, target_columns=ALL_TARGETS, method="average"):
function transform_target_columns_to_ordinals (line 21) | def transform_target_columns_to_ordinals(
function torch_to_numpy (line 53) | def torch_to_numpy(obj, copy=False):
function torch_to (line 91) | def torch_to(obj, *args, **kargs):
function torch_apply (line 95) | def torch_apply(obj, func):
function _apply (line 113) | def _apply(obj, func):
function _concat (line 121) | def _concat(obj):
function numpy_to_torch (line 132) | def numpy_to_torch(obj):
FILE: step4_model2_bert_code/dataset.py
function _get_masks (line 12) | def _get_masks(tokens, max_seq_length):
function _get_segments (line 19) | def _get_segments(tokens, max_seq_length):
function _get_ids (line 39) | def _get_ids(tokens, tokenizer, max_seq_length):
function _trim_input (line 47) | def _trim_input(
function _convert_to_bert_inputs (line 107) | def _convert_to_bert_inputs(title, question, answer, tokenizer, max_sequ...
function _get_stoken_output (line 119) | def _get_stoken_output(title, question, answer, tokenizer, max_sequence_...
function compute_input_arays (line 126) | def compute_input_arays(
function compute_output_arrays (line 167) | def compute_output_arrays(df, columns):
class BucketingSampler (line 171) | class BucketingSampler:
method __init__ (line 172) | def __init__(self, lengths, batch_size, maxlen=500):
method _make_batches (line 180) | def _make_batches(self, lengths, batch_size, maxlen):
method __len__ (line 208) | def __len__(self):
method __iter__ (line 211) | def __iter__(self):
function make_collate_fn (line 215) | def make_collate_fn(
class QuestDataset (line 244) | class QuestDataset(torch.utils.data.Dataset):
method __init__ (line 245) | def __init__(self, inputs, lengths, labels=None):
method from_frame (line 251) | def from_frame(cls, args, df, tokenizer, test=False):
method __len__ (line 276) | def __len__(self):
method __getitem__ (line 279) | def __getitem__(self, idx):
function cross_validation_split (line 300) | def cross_validation_split(args, train_df, tokenizer, ignore_train=False):
function get_pseudo_set (line 328) | def get_pseudo_set(args, pseudo_df, tokenizer):
function get_test_set (line 332) | def get_test_set(args, test_df, tokenizer):
FILE: step4_model2_bert_code/evaluation.py
function target_metric (line 8) | def target_metric(prediction, actual, columns=target_columns):
FILE: step4_model2_bert_code/loops.py
function train_loop (line 8) | def train_loop(model, train_loader, optimizer, criterion, scheduler, arg...
function evaluate (line 49) | def evaluate(args, model, val_loader, criterion, val_shape):
function infer (line 96) | def infer(args, model, test_loader, test_shape):
FILE: step4_model2_bert_code/model.py
class Squeeze (line 19) | class Squeeze(nn.Module):
method __init__ (line 20) | def __init__(self, dim):
method forward (line 24) | def forward(self, x):
class CustomBert (line 28) | class CustomBert(BertPreTrainedModel):
method __init__ (line 29) | def __init__(self, config):
method forward (line 46) | def forward(
function get_model_optimizer (line 88) | def get_model_optimizer(args):
FILE: step4_model2_bert_code/run.py
function seed_everything (line 55) | def seed_everything(seed: int):
FILE: step5_model3_roberta_code/augmentation.py
class BertRandomTokenizer (line 6) | class BertRandomTokenizer(BertTokenizer):
method __init__ (line 7) | def __init__(
method _split_word_piece (line 27) | def _split_word_piece(self, token: str) -> List[str]:
method _tokenize (line 40) | def _tokenize(self, text):
FILE: step5_model3_roberta_code/dataset.py
function _get_masks (line 9) | def _get_masks(tokens, tokenizer, max_seq_length):
function _get_segments (line 17) | def _get_segments(tokens, tokenizer, max_seq_length):
function _get_ids (line 38) | def _get_ids(tokens, tokenizer, max_seq_length):
function _trim_input (line 47) | def _trim_input(
function _convert_to_bert_inputs (line 107) | def _convert_to_bert_inputs(
function compute_input_arrays (line 140) | def compute_input_arrays(
function compute_output_arrays (line 189) | def compute_output_arrays(df, columns):
class QuestDataset (line 193) | class QuestDataset(torch.utils.data.Dataset):
method __init__ (line 194) | def __init__(
method from_frame (line 214) | def from_frame(
method __getitem__ (line 234) | def __getitem__(self, idx):
method __len__ (line 270) | def __len__(self):
function cross_validation_split (line 274) | def cross_validation_split(args, train_df, tokenizer, ignore_train=False):
function get_pseudo_set (line 302) | def get_pseudo_set(args, pseudo_df, tokenizer):
function get_test_set (line 306) | def get_test_set(args, test_df, tokenizer):
FILE: step5_model3_roberta_code/evaluation.py
function target_metric (line 6) | def target_metric(prediction, actual):
FILE: step5_model3_roberta_code/loops.py
function train_loop (line 8) | def train_loop(model, train_loader, optimizer, criterion, scheduler, arg...
function evaluate (line 44) | def evaluate(args, model, val_loader, criterion, val_shape):
function infer (line 84) | def infer(args, model, test_loader, test_shape):
FILE: step5_model3_roberta_code/model.py
class Squeeze (line 10) | class Squeeze(nn.Module):
method __init__ (line 11) | def __init__(self, dim):
method forward (line 15) | def forward(self, x):
class CustomBert (line 19) | class CustomBert(BertPreTrainedModel):
method __init__ (line 20) | def __init__(self, config):
method forward (line 37) | def forward(
class CustomRoberta (line 75) | class CustomRoberta(BertPreTrainedModel):
method __init__ (line 76) | def __init__(self, config):
method forward (line 93) | def forward(
function get_model_optimizer (line 132) | def get_model_optimizer(args):
FILE: step5_model3_roberta_code/run.
Copy disabled (too large)
Download .json
Condensed preview — 498 files, each showing path, character count, and a content snippet. Download the .json file for the full structured content (17,497K chars).
[
{
"path": ".gitignore",
"chars": 2562,
"preview": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packagi"
},
{
"path": "README.md",
"chars": 9394,
"preview": "# Google QUEST Q&A Labeling 1st place solution\n\nBelow you can find an outline of how to reproduce our solution for the G"
},
{
"path": "bash/blending_n_postprocessing.sh",
"chars": 62,
"preview": "#!/bin/bash\n\npython step11_final/blending_n_postprocessing.py\n"
},
{
"path": "bash/download_all_model_ckpts_for_inference.sh",
"chars": 1808,
"preview": "#!/bin/bash\n\n# model 1\n(kaggle datasets download -d kashnitsky/google-qa-quest-labeling-bibimorph-model-1-5-folds > /dev"
},
{
"path": "bash/download_comp_data.sh",
"chars": 199,
"preview": "#!/bin/bash\n\n# competition data\n(kaggle competitions download -c google-quest-challenge && \\\nunzip google-quest-challeng"
},
{
"path": "bash/inference/model1_inference.sh",
"chars": 322,
"preview": "#!/bin/bash\n\npython steps7_10_inference/model1_bert_code/predict_test.py \\\n --model_dir input/model1_ckpt/ "
},
{
"path": "bash/inference/model2_inference.sh",
"chars": 683,
"preview": "#!/bin/bash\n\npython steps7_10_inference/model2_bert_code/run.py \\\n --sub_file=submissions/model2_bert_base_cased_"
},
{
"path": "bash/inference/model3_inference.sh",
"chars": 725,
"preview": "#!/bin/bash\n\n# some mag setup to go on with the Experiment\nROBERTA_EXPERIMENT_DIR=2-4-roberta-base-saved-5-head_tail-rob"
},
{
"path": "bash/inference/model4_inference.sh",
"chars": 570,
"preview": "#!/bin/bash\n\npython steps7_10_inference/model4_bart_code/run.py \\\n --sub_file=submissions/model4_bart_large_pred.csv "
},
{
"path": "bash/inference/run_inference.sh",
"chars": 1015,
"preview": "#!/bin/bash\n\n#echo \"Downloading all model checkpoints\" && \\\n#sh bash/download_all_mo"
},
{
"path": "bash/pseudo/create_all_pseudo_labels.sh",
"chars": 442,
"preview": "#!/bin/bash\n\n# train three models\nsh bash/pseudo/train_base.sh && \\\nsh bash/pseudo/train_base_pr"
},
{
"path": "bash/pseudo/create_all_pseudo_labels_toy.sh",
"chars": 470,
"preview": "#!/bin/bash\n\n# train three models\nsh bash/pseudo/train_base.sh toy && \\\nsh bash/pseudo/train_bas"
},
{
"path": "bash/pseudo/create_pseudo_base.sh",
"chars": 500,
"preview": "#!/bin/bash\n\ntoy=${1:-False}\n\nif [ $toy = 'toy' ]; then\n dataframe=input/qa_stackexchange_cleaned_toy.csv\nelse\n da"
},
{
"path": "bash/pseudo/create_pseudo_base_pretrained.sh",
"chars": 543,
"preview": "#!/bin/bash\n\ntoy=${1:-False}\n\nif [ $toy = 'toy' ]; then\n dataframe=input/qa_stackexchange_cleaned_toy.csv\nelse\n da"
},
{
"path": "bash/pseudo/create_pseudo_large.sh",
"chars": 513,
"preview": "#!/bin/bash\n\ntoy=${1:-False}\n\nif [ $toy = 'toy' ]; then\n dataframe=input/qa_stackexchange_cleaned_toy.csv\nelse\n da"
},
{
"path": "bash/pseudo/train_base.sh",
"chars": 747,
"preview": "#!/bin/bash\n\ntoy=${1:-False}\n\nif [ $toy = 'toy' ]; then\n epochs=1\nelse \n epochs=5\nfi\n\npython step2_pseudo_labeling"
},
{
"path": "bash/pseudo/train_base_pretrained.sh",
"chars": 949,
"preview": "#!/bin/bash\n\ntoy=${1:-False}\n\nif [ $toy = 'toy' ]; then\n epochs=1\nelse\n epochs=3\nfi\n\npython step2_pseudo_labeling/"
},
{
"path": "bash/pseudo/train_large.sh",
"chars": 819,
"preview": "#!/bin/bash\n\ntoy=${1:-False}\n\nif [ $toy = 'toy' ]; then\n epochs=1\nelse\n epochs=5\nfi\n\npython step2_pseudo_labeling/"
},
{
"path": "bash/setup.sh",
"chars": 431,
"preview": "#!/bin/bash\n\n# install mag,\n# a custom lightweight library to keep track of experiments\ngit clone https://github.com/ex4"
},
{
"path": "bash/training/load_roberta_weights.sh",
"chars": 524,
"preview": "#!/bin/bash\n\nmkdir input/roberta-base/; \\\nwget https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-config.j"
},
{
"path": "bash/training/train1a_prepare_stackx_data.sh",
"chars": 199,
"preview": "#!/bin/bash\n\n# scraping StackExchange\npython step1_lm_finetuning/data_preparation/scrape_stack_exchange.py\n\n# processing"
},
{
"path": "bash/training/train1b_train_bert_stackx_lang_model.sh",
"chars": 58,
"preview": "#!/bin/bash\n\npython step1_lm_finetuning/train_stackx_lm.py"
},
{
"path": "bash/training/train2_pseudo_labels.sh",
"chars": 104,
"preview": "#!/bin/bash\n\n# actually not needed here, just for consistency\nsh bash/pseudo/create_all_pseudo_labels.sh"
},
{
"path": "bash/training/train3_bert_base_cased_stackx_pretrained.sh",
"chars": 74,
"preview": "#!/bin/bash\n\ntoy=${1:-False}\n\npython step3_model1_bert_code/train.py $toy\n"
},
{
"path": "bash/training/train4_bert_base_cased_stackx_with_pseudo_labels.sh",
"chars": 648,
"preview": "#!/bin/bash\n\ntoy=${1:-False}\n\nif [ $toy = 'toy' ]; then\n epochs=1\n n_pseudo=20\nelse\n epochs=5\n n_pseudo=2000"
},
{
"path": "bash/training/train5_roberta_with_pseudo_labels.sh",
"chars": 628,
"preview": "#!/bin/bash\n\ntoy=${1:-False}\n\nif [ $toy = 'toy' ]; then\n epochs=1\nelse\n epochs=5\nfi\n\npython step5_model3_roberta_c"
},
{
"path": "bash/training/train6_bart_with_pseudo_labels.sh",
"chars": 619,
"preview": "#!/bin/bash\n\ntoy=${1:-False}\n\nif [ $toy = 'toy' ]; then\n epochs=1\nelse\n epochs=4\nfi\n\npython step6_model4_bart_code"
},
{
"path": "experiments/1-8-5-head_tail-pseudonoleakrandom100k-1e-05-210-260-500-26-200/command",
"chars": 415,
"preview": "step4_model2_bert_code/run.py --epochs=5 --max_sequence_length=500 --max_title_length=26 --max_question_length=260 --max"
},
{
"path": "experiments/1-8-5-head_tail-pseudonoleakrandom100k-1e-05-210-260-500-26-200/commit_hash",
"chars": 40,
"preview": "5386dc5f3ba53b28bbe7628630cb365ccac00122"
},
{
"path": "experiments/1-8-5-head_tail-pseudonoleakrandom100k-1e-05-210-260-500-26-200/config.json",
"chars": 437,
"preview": "{\n \"_bert_model\": \"input/stackx-base-cased\",\n \"_pseudo_file\": \"pseudo-predictions/pseudo-100k-3x-blend-no-leak/fol"
},
{
"path": "experiments/2-4-5-head_tail-pseudonoleakrandom100k-1e-05-210-260-500-26-roberta-200/command",
"chars": 434,
"preview": "step5_model3_roberta_code/run.py --epochs=5 --max_sequence_length=500 --max_title_length=26 --max_question_length=260 --"
},
{
"path": "experiments/2-4-5-head_tail-pseudonoleakrandom100k-1e-05-210-260-500-26-roberta-200/commit_hash",
"chars": 40,
"preview": "1c4ae95b166fc5aea653bef3b4ecda64fbdbe9cf"
},
{
"path": "experiments/2-4-5-head_tail-pseudonoleakrandom100k-1e-05-210-260-500-26-roberta-200/config.json",
"chars": 461,
"preview": "{\n \"_bert_model\": \"input/roberta-base\",\n \"_pseudo_file\": \"pseudo-predictions/pseudo-100k-3x-blend-no-leak/fold-{}."
},
{
"path": "experiments/2-4-roberta-base-saved-5-head_tail-roberta-stackx-base-v2-pl1kksample20k-1e-05-210-260-500-26-roberta-200/config.json",
"chars": 331,
"preview": "{\"_seed\": 42, \"batch_accumulation\": 2, \"batch_size\": 4, \"bert_model\": \"roberta-base-saved\", \"folds\": 5, \"head_tail\": tru"
},
{
"path": "experiments/2-4-roberta-base-saved-5-head_tail-roberta-stackx-base-v2-pl1kksample20k-1e-05-210-260-500-26-roberta-200/config_train.json",
"chars": 520,
"preview": "{\n \"_pseudo_file\": \"/data/dis/monty/common_crawl/1kk/pseudo-1kk-blend-fold-{}.csv.gz\",\n \"_seed\": 42,\n \"batch_ac"
},
{
"path": "experiments/4-2-5-head_tail-bart-2e-05-210-260-500-26-split_pseudo-250/command",
"chars": 418,
"preview": "step6_model4_bart_code/run.py --data_path=input/google-quest-challenge --epochs=1 --max_sequence_length=500 --max_title_"
},
{
"path": "experiments/4-2-5-head_tail-bart-2e-05-210-260-500-26-split_pseudo-250/commit_hash",
"chars": 40,
"preview": "5386dc5f3ba53b28bbe7628630cb365ccac00122"
},
{
"path": "experiments/4-2-5-head_tail-bart-2e-05-210-260-500-26-split_pseudo-250/config.json",
"chars": 451,
"preview": "{\n \"_bert_model\": \"input/model4_ckpt/bart.large/\",\n \"_pseudo_file\": \"pseudo-predictions/pseudo-100k-3x-blend-no-le"
},
{
"path": "input/google-quest-challenge/sample_submission_toy.csv",
"chars": 4394,
"preview": "qa_id,question_asker_intent_understanding,question_body_critical,question_conversational,question_expect_short_answer,qu"
},
{
"path": "input/google-quest-challenge/test_toy.csv",
"chars": 31593,
"preview": "qa_id,question_title,question_body,question_user_name,question_user_page,answer,answer_user_name,answer_user_page,url,ca"
},
{
"path": "input/google-quest-challenge/train_toy.csv",
"chars": 118912,
"preview": "qa_id,question_title,question_body,question_user_name,question_user_page,answer,answer_user_name,answer_user_page,url,ca"
},
{
"path": "input/qa_stackexchange_cleaned.csv",
"chars": 7853187,
"preview": "id,host,question_username,question_score,question_views,question_favs,answers_count,answers_max_score,answers_mean_score"
},
{
"path": "input/qa_stackexchange_cleaned_toy.csv",
"chars": 79559,
"preview": "id,host,question_username,question_score,question_views,question_favs,answers_count,answers_max_score,answers_mean_score"
},
{
"path": "input/stackx-base-cased/config.json",
"chars": 522,
"preview": "{\"output_attentions\": false, \"output_hidden_states\": false, \"output_past\": true, \"torchscript\": false, \"use_bfloat16\": f"
},
{
"path": "input/stackx-base-cased/stackx-base-cased-config.json",
"chars": 522,
"preview": "{\"output_attentions\": false, \"output_hidden_states\": false, \"output_past\": true, \"torchscript\": false, \"use_bfloat16\": f"
},
{
"path": "input/stackx-base-cased/stackx-base-cased-vocab.txt",
"chars": 842035,
"preview": "[PAD]\n[unused1]\n[unused2]\n[unused3]\n[unused4]\n[unused5]\n[unused6]\n[unused7]\n[unused8]\n[unused9]\n[unused10]\n[unused11]\n[u"
},
{
"path": "input/stackx-base-cased/training_log.csv",
"chars": 802,
"preview": "epoch,time,lr,loss,spearman,val_loss,val_spearman,question_asker_intent_understanding,question_body_critical,question_co"
},
{
"path": "input/stackx-base-cased/vocab.txt",
"chars": 842035,
"preview": "[PAD]\n[unused1]\n[unused2]\n[unused3]\n[unused4]\n[unused5]\n[unused6]\n[unused7]\n[unused8]\n[unused9]\n[unused10]\n[unused11]\n[u"
},
{
"path": "packages/fairseq-hacked/.gitignore",
"chars": 1604,
"preview": "# JetBrains PyCharm IDE\n.idea/\n\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extension"
},
{
"path": "packages/fairseq-hacked/CODE_OF_CONDUCT.md",
"chars": 3356,
"preview": "# Code of Conduct\n\n## Our Pledge\n\nIn the interest of fostering an open and welcoming environment, we as\ncontributors and"
},
{
"path": "packages/fairseq-hacked/CONTRIBUTING.md",
"chars": 1144,
"preview": "# Contributing to Facebook AI Research Sequence-to-Sequence Toolkit (fairseq)\nWe want to make contributing to this proje"
},
{
"path": "packages/fairseq-hacked/LICENSE",
"chars": 1086,
"preview": "MIT License\n\nCopyright (c) Facebook, Inc. and its affiliates.\n\nPermission is hereby granted, free of charge, to any pers"
},
{
"path": "packages/fairseq-hacked/README.md",
"chars": 8135,
"preview": "# <img src=\"fairseq_logo.png\" width=\"30\"> Introduction\n\nFairseq(-py) is a sequence modeling toolkit that allows research"
},
{
"path": "packages/fairseq-hacked/docs/Makefile",
"chars": 607,
"preview": "# Minimal makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS =\nSPHI"
},
{
"path": "packages/fairseq-hacked/docs/_static/theme_overrides.css",
"chars": 192,
"preview": ".wy-table-responsive table td kbd {\n white-space: nowrap;\n}\n.wy-table-responsive table td {\n white-space: normal !"
},
{
"path": "packages/fairseq-hacked/docs/command_line_tools.rst",
"chars": 1821,
"preview": ".. _Command-line Tools:\n\nCommand-line Tools\n==================\n\nFairseq provides several command-line tools for training"
},
{
"path": "packages/fairseq-hacked/docs/conf.py",
"chars": 4223,
"preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# fairseq documentation build configuration file, created by\n# sphinx-q"
},
{
"path": "packages/fairseq-hacked/docs/criterions.rst",
"chars": 758,
"preview": ".. role:: hidden\n :class: hidden-section\n\n.. _Criterions:\n\nCriterions\n==========\n\nCriterions compute the loss functio"
},
{
"path": "packages/fairseq-hacked/docs/data.rst",
"chars": 1202,
"preview": ".. role:: hidden\n :class: hidden-section\n\n.. module:: fairseq.data\n\nData Loading and Utilities\n======================"
},
{
"path": "packages/fairseq-hacked/docs/docutils.conf",
"chars": 25,
"preview": "[writers]\noption-limit=0\n"
},
{
"path": "packages/fairseq-hacked/docs/getting_started.rst",
"chars": 7735,
"preview": "Evaluating Pre-trained Models\n=============================\n\nFirst, download a pre-trained model along with its vocabula"
},
{
"path": "packages/fairseq-hacked/docs/index.rst",
"chars": 1002,
"preview": ".. fairseq documentation master file, created by\n sphinx-quickstart on Fri Aug 17 21:45:30 2018.\n You can adapt this"
},
{
"path": "packages/fairseq-hacked/docs/lr_scheduler.rst",
"chars": 1055,
"preview": ".. role:: hidden\n :class: hidden-section\n\n.. _Learning Rate Schedulers:\n\nLearning Rate Schedulers\n==================="
},
{
"path": "packages/fairseq-hacked/docs/make.bat",
"chars": 805,
"preview": "@ECHO OFF\r\n\r\npushd %~dp0\r\n\r\nREM Command file for Sphinx documentation\r\n\r\nif \"%SPHINXBUILD%\" == \"\" (\r\n\tset SPHINXBUILD=py"
},
{
"path": "packages/fairseq-hacked/docs/models.rst",
"chars": 2830,
"preview": ".. role:: hidden\n :class: hidden-section\n\n.. module:: fairseq.models\n\n.. _Models:\n\nModels\n======\n\nA Model defines the"
},
{
"path": "packages/fairseq-hacked/docs/modules.rst",
"chars": 241,
"preview": "Modules\n=======\n\nFairseq provides several stand-alone :class:`torch.nn.Module` classes that may\nbe helpful when implemen"
},
{
"path": "packages/fairseq-hacked/docs/optim.rst",
"chars": 846,
"preview": ".. role:: hidden\n :class: hidden-section\n\n.. _optimizers:\n\nOptimizers\n==========\n\nOptimizers update the Model paramet"
},
{
"path": "packages/fairseq-hacked/docs/overview.rst",
"chars": 2692,
"preview": "Overview\n========\n\nFairseq can be extended through user-supplied `plug-ins\n<https://en.wikipedia.org/wiki/Plug-in_(compu"
},
{
"path": "packages/fairseq-hacked/docs/requirements.txt",
"chars": 27,
"preview": "sphinx<2.0\nsphinx-argparse\n"
},
{
"path": "packages/fairseq-hacked/docs/tasks.rst",
"chars": 1391,
"preview": ".. role:: hidden\n :class: hidden-section\n\n.. module:: fairseq.tasks\n\n.. _Tasks:\n\nTasks\n=====\n\nTasks store dictionarie"
},
{
"path": "packages/fairseq-hacked/docs/tutorial_classifying_names.rst",
"chars": 16996,
"preview": "Tutorial: Classifying Names with a Character-Level RNN\n======================================================\n\nIn this t"
},
{
"path": "packages/fairseq-hacked/docs/tutorial_simple_lstm.rst",
"chars": 21188,
"preview": "Tutorial: Simple LSTM\n=====================\n\nIn this tutorial we will extend fairseq by adding a new\n:class:`~fairseq.mo"
},
{
"path": "packages/fairseq-hacked/eval_lm.py",
"chars": 8549,
"preview": "#!/usr/bin/env python3 -u\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the "
},
{
"path": "packages/fairseq-hacked/examples/.gitignore",
"chars": 16,
"preview": "!*/*.sh\n!*/*.md\n"
},
{
"path": "packages/fairseq-hacked/examples/__init__.py",
"chars": 238,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/examples/backtranslation/README.md",
"chars": 1658,
"preview": "# Understanding Back-Translation at Scale (Edunov et al., 2018)\n\nThis page includes pre-trained models from the paper [U"
},
{
"path": "packages/fairseq-hacked/examples/bart/README.cnn.md",
"chars": 3625,
"preview": "# Fine-tuning BART on CNN-Dailymail summarization task\n\n### 1) Follow instructions [here](https://github.com/abisee/cnn-"
},
{
"path": "packages/fairseq-hacked/examples/bart/README.glue.md",
"chars": 4071,
"preview": "# Fine-tuning BART on GLUE tasks\n\n### 1) Download the data from GLUE website (https://gluebenchmark.com/tasks) using fol"
},
{
"path": "packages/fairseq-hacked/examples/bart/README.md",
"chars": 7655,
"preview": "# BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension\n\n[ht"
},
{
"path": "packages/fairseq-hacked/examples/camembert/README.md",
"chars": 1925,
"preview": "# CamemBERT: a French BERT\n\n## Introduction\n\nCamemBERT is a pretrained language model trained on 138GB of French text ba"
},
{
"path": "packages/fairseq-hacked/examples/conv_seq2seq/README.md",
"chars": 1926,
"preview": "# Convolutional Sequence to Sequence Learning (Gehring et al., 2017)\n\n## Pre-trained models\n\nDescription | Dataset | Mod"
},
{
"path": "packages/fairseq-hacked/examples/cross_lingual_language_model/README.md",
"chars": 3043,
"preview": "# Cross-Lingual Language Model Pre-training\n\nBelow are some details for training Cross-Lingual Language Models (XLM) - s"
},
{
"path": "packages/fairseq-hacked/examples/joint_alignment_translation/README.md",
"chars": 3130,
"preview": "# Jointly Learning to Align and Translate with Transformer Models (Garg et al., 2019)\n\nThis page includes instructions f"
},
{
"path": "packages/fairseq-hacked/examples/joint_alignment_translation/prepare-wmt18en2de_no_norm_no_escape_no_agressive.sh",
"chars": 3333,
"preview": "#!/bin/bash\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license f"
},
{
"path": "packages/fairseq-hacked/examples/language_model/README.md",
"chars": 4480,
"preview": "# Neural Language Modeling\n\n## Pre-trained models\n\nModel | Description | Dataset | Download\n---|---|---|---\n`transformer"
},
{
"path": "packages/fairseq-hacked/examples/language_model/conv_lm/README.md",
"chars": 1249,
"preview": "# Language Modeling with Gated Convolutional Networks (Dauphin et al., 2017)\n\n## Example usage\n\nFirst download and prepr"
},
{
"path": "packages/fairseq-hacked/examples/language_model/prepare-wikitext-103.sh",
"chars": 827,
"preview": "#!/bin/bash\n# Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh\n\nURLS=(\n \"https://s3."
},
{
"path": "packages/fairseq-hacked/examples/language_model/transformer_lm/README.md",
"chars": 1284,
"preview": "# Adaptive Input Representations for Neural Language Modeling (Baevski and Auli, 2018)\n\n## Pre-trained models\n\nDescripti"
},
{
"path": "packages/fairseq-hacked/examples/layerdrop/README.md",
"chars": 3656,
"preview": "# Reducing Transformer Depth on Demand with Structured Dropout (Fan et al., 2019)\nThis page contains information for how"
},
{
"path": "packages/fairseq-hacked/examples/noisychannel/README.md",
"chars": 3666,
"preview": "# Simple and Effective Noisy Channel Modeling for Neural Machine Translation (Yee et al., 2019)\nThis page contains point"
},
{
"path": "packages/fairseq-hacked/examples/noisychannel/__init__.py",
"chars": 216,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/examples/noisychannel/rerank.py",
"chars": 13783,
"preview": "import rerank_utils\nimport rerank_generate\nimport rerank_score_bw\nimport rerank_score_lm\nfrom fairseq import bleu, optio"
},
{
"path": "packages/fairseq-hacked/examples/noisychannel/rerank_generate.py",
"chars": 14142,
"preview": "#!/usr/bin/env python3 -u\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the "
},
{
"path": "packages/fairseq-hacked/examples/noisychannel/rerank_options.py",
"chars": 7434,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/examples/noisychannel/rerank_score_bw.py",
"chars": 4021,
"preview": "import rerank_utils\nimport os\nfrom fairseq import options\nfrom examples.noisychannel import rerank_options\nfrom contextl"
},
{
"path": "packages/fairseq-hacked/examples/noisychannel/rerank_score_lm.py",
"chars": 2077,
"preview": "import rerank_utils\nimport os\nfrom fairseq import options\nfrom examples.noisychannel import rerank_options\n\n\ndef score_l"
},
{
"path": "packages/fairseq-hacked/examples/noisychannel/rerank_tune.py",
"chars": 2992,
"preview": "import rerank\nimport argparse\nimport numpy as np\nimport random\nfrom examples.noisychannel import rerank_options\nfrom fai"
},
{
"path": "packages/fairseq-hacked/examples/noisychannel/rerank_utils.py",
"chars": 28488,
"preview": "import subprocess\nimport os\nimport re\nfrom fairseq import options\nimport eval_lm\nimport preprocess\nfrom contextlib impor"
},
{
"path": "packages/fairseq-hacked/examples/nonautoregressive_translation/README.md",
"chars": 4393,
"preview": "# Non-autoregressive Neural Machine Translation (NAT)\n\nThis page mainly includes instructions for reproducing results fr"
},
{
"path": "packages/fairseq-hacked/examples/nonautoregressive_translation/scripts.md",
"chars": 4810,
"preview": "# Examples of Training scripts for Non-autoregressive Machine Translation models\n\n### Non-autoregressive Transformer (NA"
},
{
"path": "packages/fairseq-hacked/examples/pay_less_attention_paper/README.md",
"chars": 10343,
"preview": "# Pay Less Attention with Lightweight and Dynamic Convolutions (Wu et al., 2019)\nThis page contains pointers to pre-trai"
},
{
"path": "packages/fairseq-hacked/examples/roberta/README.custom_classification.md",
"chars": 4085,
"preview": "# Finetuning RoBERTa on a custom classification task\n\nThis example shows how to finetune RoBERTa on the IMDB dataset, bu"
},
{
"path": "packages/fairseq-hacked/examples/roberta/README.glue.md",
"chars": 4258,
"preview": "# Finetuning RoBERTa on GLUE tasks\n\n### 1) Download the data from GLUE website (https://gluebenchmark.com/tasks) using f"
},
{
"path": "packages/fairseq-hacked/examples/roberta/README.md",
"chars": 12872,
"preview": "# RoBERTa: A Robustly Optimized BERT Pretraining Approach\n\nhttps://arxiv.org/abs/1907.11692\n\n## Introduction\n\nRoBERTa it"
},
{
"path": "packages/fairseq-hacked/examples/roberta/README.pretraining.md",
"chars": 4290,
"preview": "# Pretraining RoBERTa using your own data\n\nThis tutorial will walk you through pretraining RoBERTa over your own data.\n\n"
},
{
"path": "packages/fairseq-hacked/examples/roberta/README.race.md",
"chars": 2195,
"preview": "# Finetuning RoBERTa on RACE tasks\n\n### 1) Download the data from RACE website (http://www.cs.cmu.edu/~glai1/data/race/)"
},
{
"path": "packages/fairseq-hacked/examples/roberta/commonsense_qa/README.md",
"chars": 4055,
"preview": "# Finetuning RoBERTa on Commonsense QA\n\nWe follow a similar approach to [finetuning RACE](../README.race.md). Specifical"
},
{
"path": "packages/fairseq-hacked/examples/roberta/commonsense_qa/__init__.py",
"chars": 220,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/examples/roberta/commonsense_qa/commonsense_qa_task.py",
"chars": 6041,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/examples/roberta/commonsense_qa/download_cqa_data.sh",
"chars": 594,
"preview": "#!/bin/bash\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license fo"
},
{
"path": "packages/fairseq-hacked/examples/roberta/multiprocessing_bpe_encoder.py",
"chars": 3695,
"preview": "#!/usr/bin/env python\n# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is "
},
{
"path": "packages/fairseq-hacked/examples/roberta/preprocess_GLUE_tasks.sh",
"chars": 5738,
"preview": "#!/bin/bash\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license fo"
},
{
"path": "packages/fairseq-hacked/examples/roberta/preprocess_RACE.py",
"chars": 3413,
"preview": "#!/usr/bin/env python\n# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is "
},
{
"path": "packages/fairseq-hacked/examples/roberta/preprocess_RACE.sh",
"chars": 2070,
"preview": "#!/bin/bash\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license fo"
},
{
"path": "packages/fairseq-hacked/examples/roberta/wsc/README.md",
"chars": 5649,
"preview": "# Finetuning RoBERTa on Winograd Schema Challenge (WSC) data\n\nThe following instructions can be used to finetune RoBERTa"
},
{
"path": "packages/fairseq-hacked/examples/roberta/wsc/__init__.py",
"chars": 245,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/examples/roberta/wsc/wsc_criterion.py",
"chars": 5932,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/examples/roberta/wsc/wsc_task.py",
"chars": 13110,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/examples/roberta/wsc/wsc_utils.py",
"chars": 8352,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/examples/scaling_nmt/README.md",
"chars": 4034,
"preview": "# Scaling Neural Machine Translation (Ott et al., 2018)\n\nThis page includes instructions for reproducing results from th"
},
{
"path": "packages/fairseq-hacked/examples/speech_recognition/README.md",
"chars": 6330,
"preview": "# Speech Recognition\n`examples/speech_recognition` is implementing ASR task in Fairseq, along with needed features, data"
},
{
"path": "packages/fairseq-hacked/examples/speech_recognition/__init__.py",
"chars": 48,
"preview": "from . import tasks, criterions, models # noqa\n"
},
{
"path": "packages/fairseq-hacked/examples/speech_recognition/criterions/ASG_loss.py",
"chars": 5489,
"preview": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MI"
},
{
"path": "packages/fairseq-hacked/examples/speech_recognition/criterions/CTC_loss.py",
"chars": 6996,
"preview": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MI"
},
{
"path": "packages/fairseq-hacked/examples/speech_recognition/criterions/__init__.py",
"chars": 458,
"preview": "import importlib\nimport os\n\n\n# ASG loss requires wav2letter\nblacklist = set()\ntry:\n import wav2letter\nexcept ImportEr"
},
{
"path": "packages/fairseq-hacked/examples/speech_recognition/criterions/cross_entropy_acc.py",
"chars": 5334,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/examples/speech_recognition/datasets/asr_prep_json.py",
"chars": 3769,
"preview": "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT"
},
{
"path": "packages/fairseq-hacked/examples/speech_recognition/datasets/prepare-librispeech.sh",
"chars": 3822,
"preview": "#!/usr/bin/env bash\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT li"
},
{
"path": "packages/fairseq-hacked/examples/speech_recognition/infer.py",
"chars": 8916,
"preview": "#!/usr/bin/env python3 -u\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the "
},
{
"path": "packages/fairseq-hacked/examples/speech_recognition/models/__init__.py",
"chars": 267,
"preview": "import importlib\nimport os\n\nfor file in os.listdir(os.path.dirname(__file__)):\n if file.endswith(\".py\") and not file."
},
{
"path": "packages/fairseq-hacked/examples/speech_recognition/models/vggtransformer.py",
"chars": 37112,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/examples/speech_recognition/models/w2l_conv_glu_enc.py",
"chars": 5987,
"preview": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MI"
},
{
"path": "packages/fairseq-hacked/examples/speech_recognition/tasks/__init__.py",
"chars": 264,
"preview": "import importlib\nimport os\n\nfor file in os.listdir(os.path.dirname(__file__)):\n if file.endswith(\".py\") and not file."
},
{
"path": "packages/fairseq-hacked/examples/speech_recognition/tasks/speech_recognition.py",
"chars": 4579,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/examples/speech_recognition/utils/wer_utils.py",
"chars": 11842,
"preview": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MI"
},
{
"path": "packages/fairseq-hacked/examples/speech_recognition/w2l_decoder.py",
"chars": 5711,
"preview": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MI"
},
{
"path": "packages/fairseq-hacked/examples/stories/README.md",
"chars": 4129,
"preview": "# Hierarchical Neural Story Generation (Fan et al., 2018)\n\nThe following commands provide an example of pre-processing d"
},
{
"path": "packages/fairseq-hacked/examples/translation/README.md",
"chars": 13163,
"preview": "# Neural Machine Translation\n\nThis README contains instructions for [using pretrained translation models](#example-usage"
},
{
"path": "packages/fairseq-hacked/examples/translation/prepare-iwslt14.sh",
"chars": 2976,
"preview": "#!/usr/bin/env bash\n#\n# Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh\n\necho 'Cloning"
},
{
"path": "packages/fairseq-hacked/examples/translation/prepare-iwslt17-multilingual.sh",
"chars": 4218,
"preview": "#!/bin/bash\n# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed u"
},
{
"path": "packages/fairseq-hacked/examples/translation/prepare-wmt14en2de.sh",
"chars": 3960,
"preview": "#!/bin/bash\n# Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh\n\necho 'Cloning Moses git"
},
{
"path": "packages/fairseq-hacked/examples/translation/prepare-wmt14en2fr.sh",
"chars": 3722,
"preview": "#!/bin/bash\n# Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh\n\necho 'Cloning Moses git"
},
{
"path": "packages/fairseq-hacked/examples/translation_moe/README.md",
"chars": 3380,
"preview": "# Mixture Models for Diverse Machine Translation: Tricks of the Trade (Shen et al., 2019)\n\nThis page includes instructio"
},
{
"path": "packages/fairseq-hacked/examples/translation_moe/score.py",
"chars": 6026,
"preview": "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT"
},
{
"path": "packages/fairseq-hacked/examples/wav2vec/README.md",
"chars": 2254,
"preview": "# wav2vec\n\nExample to train a wav2vec model as described in [wav2vec: Unsupervised Pre-training for Speech Recognition ("
},
{
"path": "packages/fairseq-hacked/examples/wmt19/README.md",
"chars": 3803,
"preview": "# WMT 19\n\nThis page provides pointers to the models of Facebook-FAIR's WMT'19 news translation task submission [(Ng et a"
},
{
"path": "packages/fairseq-hacked/examples/xlmr/README.md",
"chars": 4137,
"preview": "# Unsupervised Cross-lingual Representation Learning at Scale (XLM-RoBERTa)\n\n## Introduction\n\nXLM-R (XLM-RoBERTa) is sca"
},
{
"path": "packages/fairseq-hacked/fairseq/__init__.py",
"chars": 441,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/binarizer.py",
"chars": 2745,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/bleu.py",
"chars": 3956,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/checkpoint_utils.py",
"chars": 17821,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/clib/libbleu/libbleu.cpp",
"chars": 2791,
"preview": "/**\n * Copyright 2017-present, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under the licen"
},
{
"path": "packages/fairseq-hacked/fairseq/clib/libbleu/module.cpp",
"chars": 791,
"preview": "/**\n * Copyright 2017-present, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under the licen"
},
{
"path": "packages/fairseq-hacked/fairseq/clib/libnat/edit_dist.cpp",
"chars": 5763,
"preview": "/**\n * Copyright 2017-present, Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under the licen"
},
{
"path": "packages/fairseq-hacked/fairseq/criterions/__init__.py",
"chars": 747,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/criterions/adaptive_loss.py",
"chars": 3693,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/criterions/binary_cross_entropy.py",
"chars": 2468,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/criterions/composite_loss.py",
"chars": 3460,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/criterions/cross_entropy.py",
"chars": 2631,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/criterions/fairseq_criterion.py",
"chars": 1382,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/criterions/label_smoothed_cross_entropy.py",
"chars": 3793,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/criterions/label_smoothed_cross_entropy_with_alignment.py",
"chars": 4311,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/criterions/legacy_masked_lm.py",
"chars": 6386,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/criterions/masked_lm.py",
"chars": 2854,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/criterions/nat_loss.py",
"chars": 6043,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/criterions/sentence_prediction.py",
"chars": 3258,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/criterions/sentence_ranking.py",
"chars": 3877,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/distributed_utils.py",
"chars": 7115,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/file_utils.py",
"chars": 10523,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/hub_utils.py",
"chars": 8615,
"preview": "#!/usr/bin/env python3 -u\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the "
},
{
"path": "packages/fairseq-hacked/fairseq/iterative_refinement_generator.py",
"chars": 9347,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/legacy_distributed_data_parallel.py",
"chars": 6759,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/meters.py",
"chars": 1635,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/__init__.py",
"chars": 4987,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/bart/__init__.py",
"chars": 244,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/bart/hub_interface.py",
"chars": 7113,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/bart/model.py",
"chars": 10812,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/cmlm_transformer.py",
"chars": 6240,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/composite_encoder.py",
"chars": 1928,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/distributed_fairseq_model.py",
"chars": 2404,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/fairseq_decoder.py",
"chars": 2851,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/fairseq_encoder.py",
"chars": 1396,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/fairseq_incremental_decoder.py",
"chars": 3863,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/fairseq_model.py",
"chars": 16240,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/fconv.py",
"chars": 28537,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/fconv_lm.py",
"chars": 4970,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/fconv_self_att.py",
"chars": 25100,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/insertion_transformer.py",
"chars": 10551,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/iterative_nonautoregressive_transformer.py",
"chars": 8426,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/levenshtein_transformer.py",
"chars": 27868,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/lightconv.py",
"chars": 35839,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/lightconv_lm.py",
"chars": 11190,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/lstm.py",
"chars": 25222,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/masked_lm.py",
"chars": 15605,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/model_utils.py",
"chars": 2343,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/multilingual_transformer.py",
"chars": 9204,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/nonautoregressive_ensembles.py",
"chars": 9165,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/nonautoregressive_transformer.py",
"chars": 15651,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/roberta/__init__.py",
"chars": 244,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
},
{
"path": "packages/fairseq-hacked/fairseq/models/roberta/alignment_utils.py",
"chars": 4091,
"preview": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n#"
}
]
// ... and 298 more files (download for full content)
About this extraction
This page contains the full source code of the oleg-yaroshevskiy/quest_qa_labeling GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 498 files (15.8 MB), approximately 4.2M tokens, and a symbol index with 2653 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.