Repository: zhpmatrix/BERTem Branch: master Commit: 5151c4c304d1 Files: 65 Total size: 1.4 MB Directory structure: gitextract_mjpetdbh/ ├── LICENSE ├── MANIFEST.in ├── README.md ├── docker/ │ └── Dockerfile ├── examples/ │ ├── bertology.py │ ├── extract_features.py │ ├── lm_finetuning/ │ │ ├── README.md │ │ ├── finetune_on_pregenerated.py │ │ ├── pregenerate_training_data.py │ │ └── simple_lm_finetuning.py │ ├── run_classifier.py │ ├── run_classifier_dataset_utils.py │ ├── run_gpt2.py │ ├── run_openai_gpt.py │ ├── run_squad.py │ ├── run_squad_dataset_utils.py │ ├── run_swag.py │ ├── run_transfo_xl.py │ ├── sem_run_classifier.py │ ├── tacred_run_classifier.py │ ├── tacred_run_infer.py │ ├── test.sh │ └── train.sh ├── hubconf.py ├── hubconfs/ │ ├── bert_hubconf.py │ ├── gpt2_hubconf.py │ ├── gpt_hubconf.py │ └── transformer_xl_hubconf.py ├── notebooks/ │ ├── Comparing-PT-and-TF-models.ipynb │ ├── Comparing-TF-and-PT-models-MLM-NSP.ipynb │ ├── Comparing-TF-and-PT-models-SQuAD.ipynb │ └── Comparing-TF-and-PT-models.ipynb ├── pytorch_pretrained_bert/ │ ├── __init__.py │ ├── __main__.py │ ├── convert_gpt2_checkpoint_to_pytorch.py │ ├── convert_openai_checkpoint_to_pytorch.py │ ├── convert_pytorch_checkpoint_to_tf.py │ ├── convert_tf_checkpoint_to_pytorch.py │ ├── convert_transfo_xl_checkpoint_to_pytorch.py │ ├── file_utils.py │ ├── modeling.py │ ├── modeling_gpt2.py │ ├── modeling_openai.py │ ├── modeling_transfo_xl.py │ ├── modeling_transfo_xl_utilities.py │ ├── optimization.py │ ├── optimization_openai.py │ ├── tokenization.py │ ├── tokenization_gpt2.py │ ├── tokenization_openai.py │ └── tokenization_transfo_xl.py ├── requirements.txt ├── samples/ │ ├── input.txt │ └── sample_text.txt ├── setup.py └── tests/ ├── conftest.py ├── modeling_gpt2_test.py ├── modeling_openai_test.py ├── modeling_test.py ├── modeling_transfo_xl_test.py ├── optimization_test.py ├── tokenization_gpt2_test.py ├── tokenization_openai_test.py ├── tokenization_test.py └── tokenization_transfo_xl_test.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: MANIFEST.in ================================================ include LICENSE ================================================ FILE: README.md ================================================ ### 实现说明 主要实现文章前半部分的工作,PyTorch实现,基于[huggingface](https://github.com/huggingface/pytorch-pretrained-BERT)的工作,PyTorch才是世界上最屌的框架,逃。 ### 实现参考 ![img1](http://wx2.sinaimg.cn/mw690/aba7d18bgy1g47p0g5ln3j210n0drtas.jpg) ### 代码说明 (1)主要修改:[modeling.py](https://github.com/zhpmatrix/BERTem/blob/master/pytorch_pretrained_bert/modeling.py) output representation: **BertForSequenceClassification** input representation: **BertEmbeddings** input和output都实现了多种策略,可以结合具体的任务,找到最佳的组合。 (2)非主要实现:examples下的关于classification的文件 (3)服务部署:基于Flask,可以在本地开启一个服务。具体实现在[tacred\_run\_infer.py](https://github.com/zhpmatrix/BERTem/blob/master/examples/tacred_run_infer.py)中。 (4)代码仅供参考,不提供数据集,不提供预训练模型,不提供训练后的模型(希望理解吧)。 (5)相关工作可以参考[我的博客-神经关系抽取](https://zhpmatrix.github.io/2019/06/30/neural-relation-extraction/),可能比这个代码更有价值一些吧。 ### 实现结果:  数据集TACRED上的结果: |模型序号|输入类型|输出类型|指标类型|P|R|F1|备注| |------|------|------|------|------|------|------|------| |0|entity marker|sum(entity start)|micro|**0.68**|**0.63**|**0.65**|**base-model**,lr=3e-5,epoch=3| ||||macro|**0.60**|**0.54**|**0.55**| |1|entity marker|sum(entity start)|micro|**0.70**|**0.62**|**0.65**|**large-model**,lr=3e-5,epoch=1| ||||macro|**0.63**|**0.52**|**0.55**| |-1|None|None|micro|**0.69**|**0.66**|**0.67**|手误之后,再也找不到了,尴尬||| ||||macro|**0.58**|**0.50**|**0.53**|||| 数据集SemEval2010 Task 8上的结果: |模型序号|输入类型|输出类型|指标类型|P|R|F1|备注| |------|------|------|------|------|------|------|------| |0|entity marker|maxpool(entity emb)+relu|micro|**0.86**|**0.86**|**0.86**|bert-large| ||||macro|**0.82**|**0.83**|**0.82**|||| ### 混合精度加速结果 在具体任务上,延续之前的setting,将train和dev合并共同作为新的train集,test集不变。在fp32 和fp16的两种setting下,比较相同batch\_size下,一个epoch的用时或者每个迭代的用时。 |比较方面|fp32|fp16|备注| |------|------|------|------| |训练阶段|1.04it/s|4.41it/s|12.76it/s(独占显卡)| |推断阶段|4.14it/s|8.63it/s|| |测试集指标|0.65/0.55|0.64/0.53|格式:micro/macro| |模型大小|421M|212M|| ================================================ FILE: docker/Dockerfile ================================================ FROM pytorch/pytorch:latest RUN git clone https://github.com/NVIDIA/apex.git && cd apex && python setup.py install --cuda_ext --cpp_ext RUN pip install pytorch-pretrained-bert WORKDIR /workspace ================================================ FILE: examples/bertology.py ================================================ #!/usr/bin/env python3 import os import argparse import logging from datetime import timedelta, datetime from tqdm import tqdm import numpy as np import torch from torch.utils.data import DataLoader, SequentialSampler, TensorDataset, Subset from torch.utils.data.distributed import DistributedSampler from torch.nn import CrossEntropyLoss, MSELoss from pytorch_pretrained_bert import BertForSequenceClassification, BertTokenizer from run_classifier_dataset_utils import processors, output_modes, convert_examples_to_features, compute_metrics logger = logging.getLogger(__name__) def entropy(p): plogp = p * torch.log(p) plogp[p == 0] = 0 return -plogp.sum(dim=-1) def print_1d_tensor(tensor, prefix=""): if tensor.dtype != torch.long: logger.info(prefix + "\t".join(f"{x:.5f}" for x in tensor.cpu().data)) else: logger.info(prefix + "\t".join(f"{x:d}" for x in tensor.cpu().data)) def print_2d_tensor(tensor): logger.info("lv, h >\t" + "\t".join(f"{x + 1}" for x in range(len(tensor)))) for row in range(len(tensor)): print_1d_tensor(tensor[row], prefix=f"layer {row + 1}:\t") def compute_heads_importance(args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None): """ Example on how to use model outputs to compute: - head attention entropy (activated by setting output_attentions=True when we created the model - head importance scores according to http://arxiv.org/abs/1905.10650 (activated by setting keep_multihead_output=True when we created the model) """ # Prepare our tensors n_layers, n_heads = model.bert.config.num_hidden_layers, model.bert.config.num_attention_heads head_importance = torch.zeros(n_layers, n_heads).to(args.device) attn_entropy = torch.zeros(n_layers, n_heads).to(args.device) preds = None labels = None tot_tokens = 0.0 for step, batch in enumerate(tqdm(eval_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])): batch = tuple(t.to(args.device) for t in batch) input_ids, input_mask, segment_ids, label_ids = batch # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) all_attentions, logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask, head_mask=head_mask) if compute_entropy: # Update head attention entropy for layer, attn in enumerate(all_attentions): masked_entropy = entropy(attn.detach()) * input_mask.float().unsqueeze(1) attn_entropy[layer] += masked_entropy.sum(-1).sum(0).detach() if compute_importance: # Update head importance scores with regards to our loss # First, backpropagate to populate the gradients if args.output_mode == "classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, args.num_labels), label_ids.view(-1)) elif args.output_mode == "regression": loss_fct = MSELoss() loss = loss_fct(logits.view(-1), label_ids.view(-1)) loss.backward() # Second, compute importance scores according to http://arxiv.org/abs/1905.10650 multihead_outputs = model.bert.get_multihead_outputs() for layer, mh_layer_output in enumerate(multihead_outputs): dot = torch.einsum("bhli,bhli->bhl", [mh_layer_output.grad, mh_layer_output]) head_importance[layer] += dot.abs().sum(-1).sum(0).detach() # Also store our logits/labels if we want to compute metrics afterwards if preds is None: preds = logits.detach().cpu().numpy() labels = label_ids.detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) labels = np.append(labels, label_ids.detach().cpu().numpy(), axis=0) tot_tokens += input_mask.float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: exponent = 2 norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1/exponent) head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20 if not args.dont_normalize_global_importance: head_importance = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) return attn_entropy, head_importance, preds, labels def run_model(): parser = argparse.ArgumentParser() parser.add_argument('--model_name_or_path', type=str, default='bert-base-cased-finetuned-mrpc', help='pretrained model name or path to local checkpoint') parser.add_argument("--task_name", type=str, default='mrpc', help="The name of the task to train.") parser.add_argument("--data_dir", type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--output_dir", type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") parser.add_argument("--data_subset", type=int, default=-1, help="If > 0: limit the data to a subset of data_subset instances.") parser.add_argument("--overwrite_output_dir", action='store_true', help="Whether to overwrite data in output directory") parser.add_argument("--dont_normalize_importance_by_layer", action='store_true', help="Don't normalize importance score by layers") parser.add_argument("--dont_normalize_global_importance", action='store_true', help="Don't normalize all importance scores between 0 and 1") parser.add_argument("--try_masking", action='store_true', help="Whether to try to mask head until a threshold of accuracy.") parser.add_argument("--masking_threshold", default=0.9, type=float, help="masking threshold in term of metrics" "(stop masking when metric < threshold * original metric value).") parser.add_argument("--masking_amount", default=0.1, type=float, help="Amount to heads to masking at each masking step.") parser.add_argument("--metric_name", default="acc", type=str, help="Metric to use for head masking.") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--batch_size", default=1, type=int, help="Batch size.") parser.add_argument("--seed", type=int, default=42) parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) args.device = torch.device("cuda", args.local_rank) n_gpu = 1 torch.distributed.init_process_group(backend='nccl') # Initializes the distributed backend # Setup logging logging.basicConfig(level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device, n_gpu, bool(args.local_rank != -1))) # Set seeds np.random.seed(args.seed) torch.random.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed(args.seed) # Prepare GLUE task task_name = args.task_name.lower() processor = processors[task_name]() label_list = processor.get_labels() args.output_mode = output_modes[task_name] args.num_labels = len(label_list) # Prepare output directory if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and not args.overwrite_output_dir: raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) # Load model & tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only one distributed process download model & vocab tokenizer = BertTokenizer.from_pretrained(args.model_name_or_path) # Load a model with all BERTology options on: # output_attentions => will output attention weights # keep_multihead_output => will store gradient of attention head outputs for head importance computation # see: http://arxiv.org/abs/1905.10650 model = BertForSequenceClassification.from_pretrained(args.model_name_or_path, num_labels=args.num_labels, output_attentions=True, keep_multihead_output=True) if args.local_rank == 0: torch.distributed.barrier() # Make sure only one distributed process download model & vocab model.to(args.device) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) model.eval() # Prepare dataset for the GLUE task eval_examples = processor.get_dev_examples(args.data_dir) cached_eval_features_file = os.path.join(args.data_dir, 'dev_{0}_{1}_{2}'.format( list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length), str(task_name))) try: eval_features = torch.load(cached_eval_features_file) except: eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer, args.output_mode) if args.local_rank in [-1, 0]: logger.info("Saving eval features to cache file %s", cached_eval_features_file) torch.save(eval_features, cached_eval_features_file) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long if args.output_mode == "classification" else torch.float) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) if args.data_subset > 0: eval_data = Subset(eval_data, list(range(min(args.data_subset, len(eval_data))))) eval_sampler = SequentialSampler(eval_data) if args.local_rank == -1 else DistributedSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size) # Print/save training arguments print(args) torch.save(args, os.path.join(args.output_dir, 'run_args.bin')) # Compute head entropy and importance score attn_entropy, head_importance, _, _ = compute_heads_importance(args, model, eval_dataloader) # Print/save matrices np.save(os.path.join(args.output_dir, 'attn_entropy.npy'), attn_entropy.detach().cpu().numpy()) np.save(os.path.join(args.output_dir, 'head_importance.npy'), head_importance.detach().cpu().numpy()) logger.info("Attention entropies") print_2d_tensor(attn_entropy) logger.info("Head importance scores") print_2d_tensor(head_importance) logger.info("Head ranked by importance scores") head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device) head_ranks[head_importance.view(-1).sort(descending=True)[1]] = torch.arange(head_importance.numel(), device=args.device) head_ranks = head_ranks.view_as(head_importance) print_2d_tensor(head_ranks) # Do masking if we want to if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _, head_importance, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) original_score = compute_metrics(task_name, preds, labels)[args.metric_name] logger.info("Pruning: original score: %f, threshold: %f", original_score, original_score * args.masking_threshold) new_head_mask = torch.ones_like(head_importance) num_to_mask = max(1, int(new_head_mask.numel() * args.masking_amount)) current_score = original_score while current_score >= original_score * args.masking_threshold: head_mask = new_head_mask.clone() # save current head mask # heads from least important to most - keep only not-masked heads head_importance[head_mask == 0.0] = float('Inf') current_heads_to_mask = head_importance.view(-1).sort()[1] if len(current_heads_to_mask) <= num_to_mask: break # mask heads current_heads_to_mask = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s", str(current_heads_to_mask.tolist())) new_head_mask = new_head_mask.view(-1) new_head_mask[current_heads_to_mask] = 0.0 new_head_mask = new_head_mask.view_as(head_mask) print_2d_tensor(new_head_mask) # Compute metric and head importance again _, head_importance, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False, head_mask=new_head_mask) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) current_score = compute_metrics(task_name, preds, labels)[args.metric_name] logger.info("Masking: current score: %f, remaning heads %d (%.1f percents)", current_score, new_head_mask.sum(), new_head_mask.sum()/new_head_mask.numel() * 100) logger.info("Final head mask") print_2d_tensor(head_mask) np.save(os.path.join(args.output_dir, 'head_mask.npy'), head_mask.detach().cpu().numpy()) # Try pruning and test time speedup # Pruning is like masking but we actually remove the masked weights before_time = datetime.now() _, _, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=head_mask) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) score_masking = compute_metrics(task_name, preds, labels)[args.metric_name] original_time = datetime.now() - before_time original_num_params = sum(p.numel() for p in model.parameters()) heads_to_prune = dict((layer, (1 - head_mask[layer].long()).nonzero().tolist()) for layer in range(len(head_mask))) assert sum(len(h) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item() model.bert.prune_heads(heads_to_prune) pruned_num_params = sum(p.numel() for p in model.parameters()) before_time = datetime.now() _, _, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=None) preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds) score_pruning = compute_metrics(task_name, preds, labels)[args.metric_name] new_time = datetime.now() - before_time logger.info("Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)", original_num_params, pruned_num_params, pruned_num_params/original_num_params * 100) logger.info("Pruning: score with masking: %f score with pruning: %f", score_masking, score_pruning) logger.info("Pruning: speed ratio (new timing / original timing): %f percents", original_time/new_time * 100) if __name__ == '__main__': run_model() ================================================ FILE: examples/extract_features.py ================================================ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Extract pre-computed feature vectors from a PyTorch BERT model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import collections import logging import json import re import torch from torch.utils.data import TensorDataset, DataLoader, SequentialSampler from torch.utils.data.distributed import DistributedSampler from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.modeling import BertModel logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) class InputExample(object): def __init__(self, unique_id, text_a, text_b): self.unique_id = unique_id self.text_a = text_a self.text_b = text_b class InputFeatures(object): """A single set of features of data.""" def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids): self.unique_id = unique_id self.tokens = tokens self.input_ids = input_ids self.input_mask = input_mask self.input_type_ids = input_type_ids def convert_examples_to_features(examples, seq_length, tokenizer): """Loads a data file into a list of `InputFeature`s.""" features = [] for (ex_index, example) in enumerate(examples): tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > seq_length - 2: tokens_a = tokens_a[0:(seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambigiously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] input_type_ids = [] tokens.append("[CLS]") input_type_ids.append(0) for token in tokens_a: tokens.append(token) input_type_ids.append(0) tokens.append("[SEP]") input_type_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) input_type_ids.append(1) tokens.append("[SEP]") input_type_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < seq_length: input_ids.append(0) input_mask.append(0) input_type_ids.append(0) assert len(input_ids) == seq_length assert len(input_mask) == seq_length assert len(input_type_ids) == seq_length if ex_index < 5: logger.info("*** Example ***") logger.info("unique_id: %s" % (example.unique_id)) logger.info("tokens: %s" % " ".join([str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "input_type_ids: %s" % " ".join([str(x) for x in input_type_ids])) features.append( InputFeatures( unique_id=example.unique_id, tokens=tokens, input_ids=input_ids, input_mask=input_mask, input_type_ids=input_type_ids)) return features def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def read_examples(input_file): """Read a list of `InputExample`s from an input file.""" examples = [] unique_id = 0 with open(input_file, "r", encoding='utf-8') as reader: while True: line = reader.readline() if not line: break line = line.strip() text_a = None text_b = None m = re.match(r"^(.*) \|\|\| (.*)$", line) if m is None: text_a = line else: text_a = m.group(1) text_b = m.group(2) examples.append( InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b)) unique_id += 1 return examples def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--input_file", default=None, type=str, required=True) parser.add_argument("--output_file", default=None, type=str, required=True) parser.add_argument("--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") ## Other parameters parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--layers", default="-1,-2,-3,-4", type=str) parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences longer " "than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--batch_size", default=32, type=int, help="Batch size for predictions.") parser.add_argument("--local_rank", type=int, default=-1, help = "local_rank for distributed training on gpus") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") args = parser.parse_args() if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info("device: {} n_gpu: {} distributed training: {}".format(device, n_gpu, bool(args.local_rank != -1))) layer_indexes = [int(x) for x in args.layers.split(",")] tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) examples = read_examples(args.input_file) features = convert_examples_to_features( examples=examples, seq_length=args.max_seq_length, tokenizer=tokenizer) unique_id_to_feature = {} for feature in features: unique_id_to_feature[feature.unique_id] = feature model = BertModel.from_pretrained(args.bert_model) model.to(device) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank) elif n_gpu > 1: model = torch.nn.DataParallel(model) all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index) if args.local_rank == -1: eval_sampler = SequentialSampler(eval_data) else: eval_sampler = DistributedSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size) model.eval() with open(args.output_file, "w", encoding='utf-8') as writer: for input_ids, input_mask, example_indices in eval_dataloader: input_ids = input_ids.to(device) input_mask = input_mask.to(device) all_encoder_layers, _ = model(input_ids, token_type_ids=None, attention_mask=input_mask) all_encoder_layers = all_encoder_layers for b, example_index in enumerate(example_indices): feature = features[example_index.item()] unique_id = int(feature.unique_id) # feature = unique_id_to_feature[unique_id] output_json = collections.OrderedDict() output_json["linex_index"] = unique_id all_out_features = [] for (i, token) in enumerate(feature.tokens): all_layers = [] for (j, layer_index) in enumerate(layer_indexes): layer_output = all_encoder_layers[int(layer_index)].detach().cpu().numpy() layer_output = layer_output[b] layers = collections.OrderedDict() layers["index"] = layer_index layers["values"] = [ round(x.item(), 6) for x in layer_output[i] ] all_layers.append(layers) out_features = collections.OrderedDict() out_features["token"] = token out_features["layers"] = all_layers all_out_features.append(out_features) output_json["features"] = all_out_features writer.write(json.dumps(output_json) + "\n") if __name__ == "__main__": main() ================================================ FILE: examples/lm_finetuning/README.md ================================================ # BERT Model Finetuning using Masked Language Modeling objective ## Introduction The three example scripts in this folder can be used to **fine-tune** a pre-trained BERT model using the pretraining objective (combination of masked language modeling and next sentence prediction loss). In general, pretrained models like BERT are first trained with a pretraining objective (masked language modeling and next sentence prediction for BERT) on a large and general natural language corpus. A classifier head is then added on top of the pre-trained architecture and the model is quickly fine-tuned on a target task, while still (hopefully) retaining its general language understanding. This greatly reduces overfitting and yields state-of-the-art results, especially when training data for the target task are limited. The [ULMFiT paper](https://arxiv.org/abs/1801.06146) took a slightly different approach, however, and added an intermediate step in which the model is fine-tuned on text **from the same domain as the target task and using the pretraining objective** before the final stage in which the classifier head is added and the model is trained on the target task itself. This paper reported significantly improved results from this step, and found that they could get high-quality classifications even with only tiny numbers (<1000) of labelled training examples, as long as they had a lot of unlabelled data from the target domain. Although this wasn't covered in the original BERT paper, domain-specific fine-tuning of Transformer models has [recently been reported by other authors](https://arxiv.org/pdf/1905.05583.pdf), and they report performance improvements as well. ## Input format The scripts in this folder expect a single file as input, consisting of untokenized text, with one **sentence** per line, and one blank line between documents. The reason for the sentence splitting is that part of BERT's training involves a _next sentence_ objective in which the model must predict whether two sequences of text are contiguous text from the same document or not, and to avoid making the task _too easy_, the split point between the sequences is always at the end of a sentence. The linebreaks in the file are therefore necessary to mark the points where the text can be split. ## Usage There are two ways to fine-tune a language model using these scripts. The first _quick_ approach is to use [`simple_lm_finetuning.py`](./simple_lm_finetuning.py). This script does everything in a single script, but generates training instances that consist of just two sentences. This is quite different from the BERT paper, where (confusingly) the NextSentence task concatenated sentences together from each document to form two long multi-sentences, which the paper just referred to as _sentences_. The difference between this simple approach and the original paper approach can have a significant effect for long sequences since two sentences will be much shorter than the max sequence length. In this case, most of each training example will just consist of blank padding characters, which wastes a lot of computation and results in a model that isn't really training on long sequences. As such, the preferred approach (assuming you have documents containing multiple contiguous sentences from your target domain) is to use [`pregenerate_training_data.py`](./pregenerate_training_data.py) to pre-process your data into training examples following the methodology used for LM training in the original BERT paper and repository. Since there is a significant random component to training data generation for BERT, this script includes an option to generate multiple _epochs_ of pre-processed data, to avoid training on the same random splits each epoch. Generating an epoch of data for each training epoch should result a better final model, and so we recommend doing so. You can then train on the pregenerated data using [`finetune_on_pregenerated.py`](./finetune_on_pregenerated.py), and pointing it to the folder created by [`pregenerate_training_data.py`](./pregenerate_training_data.py). Note that you should use the same `bert_model` and case options for both! Also note that `max_seq_len` does not need to be specified for the [`finetune_on_pregenerated.py`](./finetune_on_pregenerated.py) script, as it is inferred from the training examples. There are various options that can be tweaked, but they are mostly set to the values from the BERT paper/repository and default values should make sense. The most relevant ones are: - `--max_seq_len`: Controls the length of training examples (in wordpiece tokens) seen by the model. Defaults to 128 but can be set as high as 512. Higher values may yield stronger language models at the cost of slower and more memory-intensive training. - `--fp16`: Enables fast half-precision training on recent GPUs. In addition, if memory usage is an issue, especially when training on a single GPU, reducing `--train_batch_size` from the default 32 to a lower number (4-16) can be helpful, or leaving `--train_batch_size` at the default and increasing `--gradient_accumulation_steps` to 2-8. Changing `--gradient_accumulation_steps` may be preferable as alterations to the batch size may require corresponding changes in the learning rate to compensate. There is also a `--reduce_memory` option for both the `pregenerate_training_data.py` and `finetune_on_pregenerated.py` scripts that spills data to disc in shelf objects or numpy memmaps rather than retaining it in memory, which significantly reduces memory usage with little performance impact. ## Examples ### Simple fine-tuning ``` python3 simple_lm_finetuning.py --train_corpus my_corpus.txt --bert_model bert-base-uncased --do_lower_case --output_dir finetuned_lm/ --do_train ``` ### Pregenerating training data ``` python3 pregenerate_training_data.py --train_corpus my_corpus.txt --bert_model bert-base-uncased --do_lower_case --output_dir training/ --epochs_to_generate 3 --max_seq_len 256 ``` ### Training on pregenerated data ``` python3 finetune_on_pregenerated.py --pregenerated_data training/ --bert_model bert-base-uncased --do_lower_case --output_dir finetuned_lm/ --epochs 3 ``` ================================================ FILE: examples/lm_finetuning/finetune_on_pregenerated.py ================================================ from argparse import ArgumentParser from pathlib import Path import os import torch import logging import json import random import numpy as np from collections import namedtuple from tempfile import TemporaryDirectory from torch.utils.data import DataLoader, Dataset, RandomSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm from pytorch_pretrained_bert import WEIGHTS_NAME, CONFIG_NAME from pytorch_pretrained_bert.modeling import BertForPreTraining from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule InputFeatures = namedtuple("InputFeatures", "input_ids input_mask segment_ids lm_label_ids is_next") log_format = '%(asctime)-10s: %(message)s' logging.basicConfig(level=logging.INFO, format=log_format) def convert_example_to_features(example, tokenizer, max_seq_length): tokens = example["tokens"] segment_ids = example["segment_ids"] is_random_next = example["is_random_next"] masked_lm_positions = example["masked_lm_positions"] masked_lm_labels = example["masked_lm_labels"] assert len(tokens) == len(segment_ids) <= max_seq_length # The preprocessed data should be already truncated input_ids = tokenizer.convert_tokens_to_ids(tokens) masked_label_ids = tokenizer.convert_tokens_to_ids(masked_lm_labels) input_array = np.zeros(max_seq_length, dtype=np.int) input_array[:len(input_ids)] = input_ids mask_array = np.zeros(max_seq_length, dtype=np.bool) mask_array[:len(input_ids)] = 1 segment_array = np.zeros(max_seq_length, dtype=np.bool) segment_array[:len(segment_ids)] = segment_ids lm_label_array = np.full(max_seq_length, dtype=np.int, fill_value=-1) lm_label_array[masked_lm_positions] = masked_label_ids features = InputFeatures(input_ids=input_array, input_mask=mask_array, segment_ids=segment_array, lm_label_ids=lm_label_array, is_next=is_random_next) return features class PregeneratedDataset(Dataset): def __init__(self, training_path, epoch, tokenizer, num_data_epochs, reduce_memory=False): self.vocab = tokenizer.vocab self.tokenizer = tokenizer self.epoch = epoch self.data_epoch = epoch % num_data_epochs data_file = training_path / f"epoch_{self.data_epoch}.json" metrics_file = training_path / f"epoch_{self.data_epoch}_metrics.json" assert data_file.is_file() and metrics_file.is_file() metrics = json.loads(metrics_file.read_text()) num_samples = metrics['num_training_examples'] seq_len = metrics['max_seq_len'] self.temp_dir = None self.working_dir = None if reduce_memory: self.temp_dir = TemporaryDirectory() self.working_dir = Path(self.temp_dir.name) input_ids = np.memmap(filename=self.working_dir/'input_ids.memmap', mode='w+', dtype=np.int32, shape=(num_samples, seq_len)) input_masks = np.memmap(filename=self.working_dir/'input_masks.memmap', shape=(num_samples, seq_len), mode='w+', dtype=np.bool) segment_ids = np.memmap(filename=self.working_dir/'segment_ids.memmap', shape=(num_samples, seq_len), mode='w+', dtype=np.bool) lm_label_ids = np.memmap(filename=self.working_dir/'lm_label_ids.memmap', shape=(num_samples, seq_len), mode='w+', dtype=np.int32) lm_label_ids[:] = -1 is_nexts = np.memmap(filename=self.working_dir/'is_nexts.memmap', shape=(num_samples,), mode='w+', dtype=np.bool) else: input_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.int32) input_masks = np.zeros(shape=(num_samples, seq_len), dtype=np.bool) segment_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.bool) lm_label_ids = np.full(shape=(num_samples, seq_len), dtype=np.int32, fill_value=-1) is_nexts = np.zeros(shape=(num_samples,), dtype=np.bool) logging.info(f"Loading training examples for epoch {epoch}") with data_file.open() as f: for i, line in enumerate(tqdm(f, total=num_samples, desc="Training examples")): line = line.strip() example = json.loads(line) features = convert_example_to_features(example, tokenizer, seq_len) input_ids[i] = features.input_ids segment_ids[i] = features.segment_ids input_masks[i] = features.input_mask lm_label_ids[i] = features.lm_label_ids is_nexts[i] = features.is_next assert i == num_samples - 1 # Assert that the sample count metric was true logging.info("Loading complete!") self.num_samples = num_samples self.seq_len = seq_len self.input_ids = input_ids self.input_masks = input_masks self.segment_ids = segment_ids self.lm_label_ids = lm_label_ids self.is_nexts = is_nexts def __len__(self): return self.num_samples def __getitem__(self, item): return (torch.tensor(self.input_ids[item].astype(np.int64)), torch.tensor(self.input_masks[item].astype(np.int64)), torch.tensor(self.segment_ids[item].astype(np.int64)), torch.tensor(self.lm_label_ids[item].astype(np.int64)), torch.tensor(self.is_nexts[item].astype(np.int64))) def main(): parser = ArgumentParser() parser.add_argument('--pregenerated_data', type=Path, required=True) parser.add_argument('--output_dir', type=Path, required=True) parser.add_argument("--bert_model", type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") parser.add_argument("--do_lower_case", action="store_true") parser.add_argument("--reduce_memory", action="store_true", help="Store training data as on-disc memmaps to massively reduce memory usage") parser.add_argument("--epochs", type=int, default=3, help="Number of epochs to train for") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument('--loss_scale', type=float, default=0, help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--learning_rate", default=3e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") args = parser.parse_args() assert args.pregenerated_data.is_dir(), \ "--pregenerated_data should point to the folder of files made by pregenerate_training_data.py!" samples_per_epoch = [] for i in range(args.epochs): epoch_file = args.pregenerated_data / f"epoch_{i}.json" metrics_file = args.pregenerated_data / f"epoch_{i}_metrics.json" if epoch_file.is_file() and metrics_file.is_file(): metrics = json.loads(metrics_file.read_text()) samples_per_epoch.append(metrics['num_training_examples']) else: if i == 0: exit("No training data was found!") print(f"Warning! There are fewer epochs of pregenerated data ({i}) than training epochs ({args.epochs}).") print("This script will loop over the available data, but training diversity may be negatively impacted.") num_data_epochs = i break else: num_data_epochs = args.epochs if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logging.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if args.output_dir.is_dir() and list(args.output_dir.iterdir()): logging.warning(f"Output directory ({args.output_dir}) already exists and is not empty!") args.output_dir.mkdir(parents=True, exist_ok=True) tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) total_train_examples = 0 for i in range(args.epochs): # The modulo takes into account the fact that we may loop over limited epochs of data total_train_examples += samples_per_epoch[i % len(samples_per_epoch)] num_train_optimization_steps = int( total_train_examples / args.train_batch_size / args.gradient_accumulation_steps) if args.local_rank != -1: num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size() # Prepare model model = BertForPreTraining.from_pretrained(args.bert_model) if args.fp16: model.half() model.to(device) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare optimizer param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion, t_total=num_train_optimization_steps) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=num_train_optimization_steps) global_step = 0 logging.info("***** Running training *****") logging.info(f" Num examples = {total_train_examples}") logging.info(" Batch size = %d", args.train_batch_size) logging.info(" Num steps = %d", num_train_optimization_steps) model.train() for epoch in range(args.epochs): epoch_dataset = PregeneratedDataset(epoch=epoch, training_path=args.pregenerated_data, tokenizer=tokenizer, num_data_epochs=num_data_epochs, reduce_memory=args.reduce_memory) if args.local_rank == -1: train_sampler = RandomSampler(epoch_dataset) else: train_sampler = DistributedSampler(epoch_dataset) train_dataloader = DataLoader(epoch_dataset, sampler=train_sampler, batch_size=args.train_batch_size) tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 with tqdm(total=len(train_dataloader), desc=f"Epoch {epoch}") as pbar: for step, batch in enumerate(train_dataloader): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch loss = model(input_ids, segment_ids, input_mask, lm_label_ids, is_next) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward() tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 pbar.update(1) mean_loss = tr_loss * args.gradient_accumulation_steps / nb_tr_steps pbar.set_postfix_str(f"Loss: {mean_loss:.5f}") if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: # modify learning rate with special warm up BERT uses # if args.fp16 is False, BertAdam is used that handles this automatically lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 # Save a trained model logging.info("** ** * Saving fine-tuned model ** ** * ") model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) if __name__ == '__main__': main() ================================================ FILE: examples/lm_finetuning/pregenerate_training_data.py ================================================ from argparse import ArgumentParser from pathlib import Path from tqdm import tqdm, trange from tempfile import TemporaryDirectory import shelve from multiprocessing import Pool from random import random, randrange, randint, shuffle, choice from pytorch_pretrained_bert.tokenization import BertTokenizer import numpy as np import json import collections class DocumentDatabase: def __init__(self, reduce_memory=False): if reduce_memory: self.temp_dir = TemporaryDirectory() self.working_dir = Path(self.temp_dir.name) self.document_shelf_filepath = self.working_dir / 'shelf.db' self.document_shelf = shelve.open(str(self.document_shelf_filepath), flag='n', protocol=-1) self.documents = None else: self.documents = [] self.document_shelf = None self.document_shelf_filepath = None self.temp_dir = None self.doc_lengths = [] self.doc_cumsum = None self.cumsum_max = None self.reduce_memory = reduce_memory def add_document(self, document): if not document: return if self.reduce_memory: current_idx = len(self.doc_lengths) self.document_shelf[str(current_idx)] = document else: self.documents.append(document) self.doc_lengths.append(len(document)) def _precalculate_doc_weights(self): self.doc_cumsum = np.cumsum(self.doc_lengths) self.cumsum_max = self.doc_cumsum[-1] def sample_doc(self, current_idx, sentence_weighted=True): # Uses the current iteration counter to ensure we don't sample the same doc twice if sentence_weighted: # With sentence weighting, we sample docs proportionally to their sentence length if self.doc_cumsum is None or len(self.doc_cumsum) != len(self.doc_lengths): self._precalculate_doc_weights() rand_start = self.doc_cumsum[current_idx] rand_end = rand_start + self.cumsum_max - self.doc_lengths[current_idx] sentence_index = randrange(rand_start, rand_end) % self.cumsum_max sampled_doc_index = np.searchsorted(self.doc_cumsum, sentence_index, side='right') else: # If we don't use sentence weighting, then every doc has an equal chance to be chosen sampled_doc_index = (current_idx + randrange(1, len(self.doc_lengths))) % len(self.doc_lengths) assert sampled_doc_index != current_idx if self.reduce_memory: return self.document_shelf[str(sampled_doc_index)] else: return self.documents[sampled_doc_index] def __len__(self): return len(self.doc_lengths) def __getitem__(self, item): if self.reduce_memory: return self.document_shelf[str(item)] else: return self.documents[item] def __enter__(self): return self def __exit__(self, exc_type, exc_val, traceback): if self.document_shelf is not None: self.document_shelf.close() if self.temp_dir is not None: self.temp_dir.cleanup() def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens): """Truncates a pair of sequences to a maximum sequence length. Lifted from Google's BERT repo.""" while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_num_tokens: break trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b assert len(trunc_tokens) >= 1 # We want to sometimes truncate from the front and sometimes from the # back to add more randomness and avoid biases. if random() < 0.5: del trunc_tokens[0] else: trunc_tokens.pop() MaskedLmInstance = collections.namedtuple("MaskedLmInstance", ["index", "label"]) def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, whole_word_mask, vocab_list): """Creates the predictions for the masked LM objective. This is mostly copied from the Google BERT repo, but with several refactors to clean it up and remove a lot of unnecessary variables.""" cand_indices = [] for (i, token) in enumerate(tokens): if token == "[CLS]" or token == "[SEP]": continue # Whole Word Masking means that if we mask all of the wordpieces # corresponding to an original word. When a word has been split into # WordPieces, the first token does not have any marker and any subsequence # tokens are prefixed with ##. So whenever we see the ## token, we # append it to the previous set of word indexes. # # Note that Whole Word Masking does *not* change the training code # at all -- we still predict each WordPiece independently, softmaxed # over the entire vocabulary. if (whole_word_mask and len(cand_indices) >= 1 and token.startswith("##")): cand_indices[-1].append(i) else: cand_indices.append([i]) num_to_mask = min(max_predictions_per_seq, max(1, int(round(len(tokens) * masked_lm_prob)))) shuffle(cand_indices) masked_lms = [] covered_indexes = set() for index_set in cand_indices: if len(masked_lms) >= num_to_mask: break # If adding a whole-word mask would exceed the maximum number of # predictions, then just skip this candidate. if len(masked_lms) + len(index_set) > num_to_mask: continue is_any_index_covered = False for index in index_set: if index in covered_indexes: is_any_index_covered = True break if is_any_index_covered: continue for index in index_set: covered_indexes.add(index) masked_token = None # 80% of the time, replace with [MASK] if random() < 0.8: masked_token = "[MASK]" else: # 10% of the time, keep original if random() < 0.5: masked_token = tokens[index] # 10% of the time, replace with random word else: masked_token = choice(vocab_list) masked_lms.append(MaskedLmInstance(index=index, label=tokens[index])) tokens[index] = masked_token assert len(masked_lms) <= num_to_mask masked_lms = sorted(masked_lms, key=lambda x: x.index) mask_indices = [p.index for p in masked_lms] masked_token_labels = [p.label for p in masked_lms] return tokens, mask_indices, masked_token_labels def create_instances_from_document( doc_database, doc_idx, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, whole_word_mask, vocab_list): """This code is mostly a duplicate of the equivalent function from Google BERT's repo. However, we make some changes and improvements. Sampling is improved and no longer requires a loop in this function. Also, documents are sampled proportionally to the number of sentences they contain, which means each sentence (rather than each document) has an equal chance of being sampled as a false example for the NextSentence task.""" document = doc_database[doc_idx] # Account for [CLS], [SEP], [SEP] max_num_tokens = max_seq_length - 3 # We *usually* want to fill up the entire sequence since we are padding # to `max_seq_length` anyways, so short sequences are generally wasted # computation. However, we *sometimes* # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter # sequences to minimize the mismatch between pre-training and fine-tuning. # The `target_seq_length` is just a rough target however, whereas # `max_seq_length` is a hard limit. target_seq_length = max_num_tokens if random() < short_seq_prob: target_seq_length = randint(2, max_num_tokens) # We DON'T just concatenate all of the tokens from a document into a long # sequence and choose an arbitrary split point because this would make the # next sentence prediction task too easy. Instead, we split the input into # segments "A" and "B" based on the actual "sentences" provided by the user # input. instances = [] current_chunk = [] current_length = 0 i = 0 while i < len(document): segment = document[i] current_chunk.append(segment) current_length += len(segment) if i == len(document) - 1 or current_length >= target_seq_length: if current_chunk: # `a_end` is how many segments from `current_chunk` go into the `A` # (first) sentence. a_end = 1 if len(current_chunk) >= 2: a_end = randrange(1, len(current_chunk)) tokens_a = [] for j in range(a_end): tokens_a.extend(current_chunk[j]) tokens_b = [] # Random next if len(current_chunk) == 1 or random() < 0.5: is_random_next = True target_b_length = target_seq_length - len(tokens_a) # Sample a random document, with longer docs being sampled more frequently random_document = doc_database.sample_doc(current_idx=doc_idx, sentence_weighted=True) random_start = randrange(0, len(random_document)) for j in range(random_start, len(random_document)): tokens_b.extend(random_document[j]) if len(tokens_b) >= target_b_length: break # We didn't actually use these segments so we "put them back" so # they don't go to waste. num_unused_segments = len(current_chunk) - a_end i -= num_unused_segments # Actual next else: is_random_next = False for j in range(a_end, len(current_chunk)): tokens_b.extend(current_chunk[j]) truncate_seq_pair(tokens_a, tokens_b, max_num_tokens) assert len(tokens_a) >= 1 assert len(tokens_b) >= 1 tokens = ["[CLS]"] + tokens_a + ["[SEP]"] + tokens_b + ["[SEP]"] # The segment IDs are 0 for the [CLS] token, the A tokens and the first [SEP] # They are 1 for the B tokens and the final [SEP] segment_ids = [0 for _ in range(len(tokens_a) + 2)] + [1 for _ in range(len(tokens_b) + 1)] tokens, masked_lm_positions, masked_lm_labels = create_masked_lm_predictions( tokens, masked_lm_prob, max_predictions_per_seq, whole_word_mask, vocab_list) instance = { "tokens": tokens, "segment_ids": segment_ids, "is_random_next": is_random_next, "masked_lm_positions": masked_lm_positions, "masked_lm_labels": masked_lm_labels} instances.append(instance) current_chunk = [] current_length = 0 i += 1 return instances def create_training_file(docs, vocab_list, args, epoch_num): epoch_filename = args.output_dir / "epoch_{}.json".format(epoch_num) num_instances = 0 with epoch_filename.open('w') as epoch_file: for doc_idx in trange(len(docs), desc="Document"): doc_instances = create_instances_from_document( docs, doc_idx, max_seq_length=args.max_seq_len, short_seq_prob=args.short_seq_prob, masked_lm_prob=args.masked_lm_prob, max_predictions_per_seq=args.max_predictions_per_seq, whole_word_mask=args.do_whole_word_mask, vocab_list=vocab_list) doc_instances = [json.dumps(instance) for instance in doc_instances] for instance in doc_instances: epoch_file.write(instance + '\n') num_instances += 1 metrics_file = args.output_dir / "epoch_{}_metrics.json".format(epoch_num) with metrics_file.open('w') as metrics_file: metrics = { "num_training_examples": num_instances, "max_seq_len": args.max_seq_len } metrics_file.write(json.dumps(metrics)) def main(): parser = ArgumentParser() parser.add_argument('--train_corpus', type=Path, required=True) parser.add_argument("--output_dir", type=Path, required=True) parser.add_argument("--bert_model", type=str, required=True, choices=["bert-base-uncased", "bert-large-uncased", "bert-base-cased", "bert-base-multilingual-uncased", "bert-base-chinese", "bert-base-multilingual-cased"]) parser.add_argument("--do_lower_case", action="store_true") parser.add_argument("--do_whole_word_mask", action="store_true", help="Whether to use whole word masking rather than per-WordPiece masking.") parser.add_argument("--reduce_memory", action="store_true", help="Reduce memory usage for large datasets by keeping data on disc rather than in memory") parser.add_argument("--num_workers", type=int, default=1, help="The number of workers to use to write the files") parser.add_argument("--epochs_to_generate", type=int, default=3, help="Number of epochs of data to pregenerate") parser.add_argument("--max_seq_len", type=int, default=128) parser.add_argument("--short_seq_prob", type=float, default=0.1, help="Probability of making a short sentence as a training example") parser.add_argument("--masked_lm_prob", type=float, default=0.15, help="Probability of masking each token for the LM task") parser.add_argument("--max_predictions_per_seq", type=int, default=20, help="Maximum number of tokens to mask in each sequence") args = parser.parse_args() if args.num_workers > 1 and args.reduce_memory: raise ValueError("Cannot use multiple workers while reducing memory") tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) vocab_list = list(tokenizer.vocab.keys()) with DocumentDatabase(reduce_memory=args.reduce_memory) as docs: with args.train_corpus.open() as f: doc = [] for line in tqdm(f, desc="Loading Dataset", unit=" lines"): line = line.strip() if line == "": docs.add_document(doc) doc = [] else: tokens = tokenizer.tokenize(line) doc.append(tokens) if doc: docs.add_document(doc) # If the last doc didn't end on a newline, make sure it still gets added if len(docs) <= 1: exit("ERROR: No document breaks were found in the input file! These are necessary to allow the script to " "ensure that random NextSentences are not sampled from the same document. Please add blank lines to " "indicate breaks between documents in your input file. If your dataset does not contain multiple " "documents, blank lines can be inserted at any natural boundary, such as the ends of chapters, " "sections or paragraphs.") args.output_dir.mkdir(exist_ok=True) if args.num_workers > 1: writer_workers = Pool(min(args.num_workers, args.epochs_to_generate)) arguments = [(docs, vocab_list, args, idx) for idx in range(args.epochs_to_generate)] writer_workers.starmap(create_training_file, arguments) else: for epoch in trange(args.epochs_to_generate, desc="Epoch"): create_training_file(docs, vocab_list, args, epoch) if __name__ == '__main__': main() ================================================ FILE: examples/lm_finetuning/simple_lm_finetuning.py ================================================ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import, division, print_function, unicode_literals import argparse import logging import os import random from io import open import numpy as np import torch from torch.utils.data import DataLoader, Dataset, RandomSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from pytorch_pretrained_bert import WEIGHTS_NAME, CONFIG_NAME from pytorch_pretrained_bert.modeling import BertForPreTraining from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) class BERTDataset(Dataset): def __init__(self, corpus_path, tokenizer, seq_len, encoding="utf-8", corpus_lines=None, on_memory=True): self.vocab = tokenizer.vocab self.tokenizer = tokenizer self.seq_len = seq_len self.on_memory = on_memory self.corpus_lines = corpus_lines # number of non-empty lines in input corpus self.corpus_path = corpus_path self.encoding = encoding self.current_doc = 0 # to avoid random sentence from same doc # for loading samples directly from file self.sample_counter = 0 # used to keep track of full epochs on file self.line_buffer = None # keep second sentence of a pair in memory and use as first sentence in next pair # for loading samples in memory self.current_random_doc = 0 self.num_docs = 0 self.sample_to_doc = [] # map sample index to doc and line # load samples into memory if on_memory: self.all_docs = [] doc = [] self.corpus_lines = 0 with open(corpus_path, "r", encoding=encoding) as f: for line in tqdm(f, desc="Loading Dataset", total=corpus_lines): line = line.strip() if line == "": self.all_docs.append(doc) doc = [] #remove last added sample because there won't be a subsequent line anymore in the doc self.sample_to_doc.pop() else: #store as one sample sample = {"doc_id": len(self.all_docs), "line": len(doc)} self.sample_to_doc.append(sample) doc.append(line) self.corpus_lines = self.corpus_lines + 1 # if last row in file is not empty if self.all_docs[-1] != doc: self.all_docs.append(doc) self.sample_to_doc.pop() self.num_docs = len(self.all_docs) # load samples later lazily from disk else: if self.corpus_lines is None: with open(corpus_path, "r", encoding=encoding) as f: self.corpus_lines = 0 for line in tqdm(f, desc="Loading Dataset", total=corpus_lines): if line.strip() == "": self.num_docs += 1 else: self.corpus_lines += 1 # if doc does not end with empty line if line.strip() != "": self.num_docs += 1 self.file = open(corpus_path, "r", encoding=encoding) self.random_file = open(corpus_path, "r", encoding=encoding) def __len__(self): # last line of doc won't be used, because there's no "nextSentence". Additionally, we start counting at 0. return self.corpus_lines - self.num_docs - 1 def __getitem__(self, item): cur_id = self.sample_counter self.sample_counter += 1 if not self.on_memory: # after one epoch we start again from beginning of file if cur_id != 0 and (cur_id % len(self) == 0): self.file.close() self.file = open(self.corpus_path, "r", encoding=self.encoding) t1, t2, is_next_label = self.random_sent(item) # tokenize tokens_a = self.tokenizer.tokenize(t1) tokens_b = self.tokenizer.tokenize(t2) # combine to one sample cur_example = InputExample(guid=cur_id, tokens_a=tokens_a, tokens_b=tokens_b, is_next=is_next_label) # transform sample to features cur_features = convert_example_to_features(cur_example, self.seq_len, self.tokenizer) cur_tensors = (torch.tensor(cur_features.input_ids), torch.tensor(cur_features.input_mask), torch.tensor(cur_features.segment_ids), torch.tensor(cur_features.lm_label_ids), torch.tensor(cur_features.is_next)) return cur_tensors def random_sent(self, index): """ Get one sample from corpus consisting of two sentences. With prob. 50% these are two subsequent sentences from one doc. With 50% the second sentence will be a random one from another doc. :param index: int, index of sample. :return: (str, str, int), sentence 1, sentence 2, isNextSentence Label """ t1, t2 = self.get_corpus_line(index) if random.random() > 0.5: label = 0 else: t2 = self.get_random_line() label = 1 assert len(t1) > 0 assert len(t2) > 0 return t1, t2, label def get_corpus_line(self, item): """ Get one sample from corpus consisting of a pair of two subsequent lines from the same doc. :param item: int, index of sample. :return: (str, str), two subsequent sentences from corpus """ t1 = "" t2 = "" assert item < self.corpus_lines if self.on_memory: sample = self.sample_to_doc[item] t1 = self.all_docs[sample["doc_id"]][sample["line"]] t2 = self.all_docs[sample["doc_id"]][sample["line"]+1] # used later to avoid random nextSentence from same doc self.current_doc = sample["doc_id"] return t1, t2 else: if self.line_buffer is None: # read first non-empty line of file while t1 == "" : t1 = next(self.file).strip() t2 = next(self.file).strip() else: # use t2 from previous iteration as new t1 t1 = self.line_buffer t2 = next(self.file).strip() # skip empty rows that are used for separating documents and keep track of current doc id while t2 == "" or t1 == "": t1 = next(self.file).strip() t2 = next(self.file).strip() self.current_doc = self.current_doc+1 self.line_buffer = t2 assert t1 != "" assert t2 != "" return t1, t2 def get_random_line(self): """ Get random line from another document for nextSentence task. :return: str, content of one line """ # Similar to original tf repo: This outer loop should rarely go for more than one iteration for large # corpora. However, just to be careful, we try to make sure that # the random document is not the same as the document we're processing. for _ in range(10): if self.on_memory: rand_doc_idx = random.randint(0, len(self.all_docs)-1) rand_doc = self.all_docs[rand_doc_idx] line = rand_doc[random.randrange(len(rand_doc))] else: rand_index = random.randint(1, self.corpus_lines if self.corpus_lines < 1000 else 1000) #pick random line for _ in range(rand_index): line = self.get_next_line() #check if our picked random line is really from another doc like we want it to be if self.current_random_doc != self.current_doc: break return line def get_next_line(self): """ Gets next line of random_file and starts over when reaching end of file""" try: line = next(self.random_file).strip() #keep track of which document we are currently looking at to later avoid having the same doc as t1 if line == "": self.current_random_doc = self.current_random_doc + 1 line = next(self.random_file).strip() except StopIteration: self.random_file.close() self.random_file = open(self.corpus_path, "r", encoding=self.encoding) line = next(self.random_file).strip() return line class InputExample(object): """A single training/test example for the language model.""" def __init__(self, guid, tokens_a, tokens_b=None, is_next=None, lm_labels=None): """Constructs a InputExample. Args: guid: Unique id for the example. tokens_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. tokens_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.tokens_a = tokens_a self.tokens_b = tokens_b self.is_next = is_next # nextSentence self.lm_labels = lm_labels # masked words for language model class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, is_next, lm_label_ids): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.is_next = is_next self.lm_label_ids = lm_label_ids def random_word(tokens, tokenizer): """ Masking some random tokens for Language Model task with probabilities as in the original BERT paper. :param tokens: list of str, tokenized sentence. :param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here) :return: (list of str, list of int), masked tokens and related labels for LM prediction """ output_label = [] for i, token in enumerate(tokens): prob = random.random() # mask token with 15% probability if prob < 0.15: prob /= 0.15 # 80% randomly change token to mask token if prob < 0.8: tokens[i] = "[MASK]" # 10% randomly change token to random token elif prob < 0.9: tokens[i] = random.choice(list(tokenizer.vocab.items()))[0] # -> rest 10% randomly keep current token # append current token to output (we will predict these later) try: output_label.append(tokenizer.vocab[token]) except KeyError: # For unknown words (should not occur with BPE vocab) output_label.append(tokenizer.vocab["[UNK]"]) logger.warning("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token)) else: # no masking token (will be ignored by loss function later) output_label.append(-1) return tokens, output_label def convert_example_to_features(example, max_seq_length, tokenizer): """ Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with IDs, LM labels, input_mask, CLS and SEP tokens etc. :param example: InputExample, containing sentence input as strings and is_next label :param max_seq_length: int, maximum length of sequence. :param tokenizer: Tokenizer :return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training) """ tokens_a = example.tokens_a tokens_b = example.tokens_b # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) tokens_a, t1_label = random_word(tokens_a, tokenizer) tokens_b, t2_label = random_word(tokens_b, tokenizer) # concatenate lm labels and account for CLS, SEP, SEP lm_label_ids = ([-1] + t1_label + [-1] + t2_label + [-1]) # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambigiously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) assert len(tokens_b) > 0 for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) lm_label_ids.append(-1) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(lm_label_ids) == max_seq_length if example.guid < 5: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logger.info("LM label: %s " % (lm_label_ids)) logger.info("Is next sentence label: %s " % (example.is_next)) features = InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, lm_label_ids=lm_label_ids, is_next=example.is_next) return features def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--train_corpus", default=None, type=str, required=True, help="The input train corpus.") parser.add_argument("--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints will be written.") ## Other parameters parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--learning_rate", default=3e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--on_memory", action='store_true', help="Whether to load train samples into memory or use disk") parser.add_argument("--do_lower_case", action='store_true', help="Whether to lower case the input text. True for uncased models, False for cased models.") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumualte before performing a backward/update pass.") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument('--loss_scale', type = float, default = 0, help = "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") args = parser.parse_args() if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train: raise ValueError("Training is currently the only implemented execution option. Please set `do_train`.") if os.path.exists(args.output_dir) and os.listdir(args.output_dir): raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) #train_examples = None num_train_optimization_steps = None if args.do_train: print("Loading Train Dataset", args.train_corpus) train_dataset = BERTDataset(args.train_corpus, tokenizer, seq_len=args.max_seq_length, corpus_lines=None, on_memory=args.on_memory) num_train_optimization_steps = int( len(train_dataset) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs if args.local_rank != -1: num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size() # Prepare model model = BertForPreTraining.from_pretrained(args.bert_model) if args.fp16: model.half() model.to(device) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare optimizer if args.do_train: param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion, t_total=num_train_optimization_steps) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=num_train_optimization_steps) global_step = 0 if args.do_train: logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_optimization_steps) if args.local_rank == -1: train_sampler = RandomSampler(train_dataset) else: #TODO: check if this works with current data generator from disk that relies on next(file) # (it doesn't return item back by index) train_sampler = DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) model.train() for _ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch loss = model(input_ids, segment_ids, input_mask, lm_label_ids, is_next) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward() tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: # modify learning rate with special warm up BERT uses # if args.fp16 is False, BertAdam is used that handles this automatically lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 # Save a trained model logger.info("** ** * Saving fine - tuned model ** ** * ") model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) if args.do_train: torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) if __name__ == "__main__": main() ================================================ FILE: examples/run_classifier.py ================================================ #coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import, division, print_function import argparse import csv import logging import os import random import sys sys.path.append('..') import copy import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from torch.nn import CrossEntropyLoss, MSELoss from scipy.stats import pearsonr, spearmanr from sklearn.metrics import matthews_corrcoef, f1_score, classification_report from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule logger = logging.getLogger(__name__) class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None, entity_pos=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label self.entity_pos = entity_pos class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id, entity_mask=None, entity_seg_pos=None, entity_span1_pos=None, entity_span2_pos=None): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.entity_mask = entity_mask self.entity_seg_pos = entity_seg_pos self.entity_span1_pos = entity_span1_pos self.entity_span2_pos = entity_span2_pos class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with open(input_file, "r", encoding="utf-8") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) return lines class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv"))) return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[3] text_b = line[4] label = line[0] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class SemProcessor(DataProcessor): """Processor for the SemEval 2010 Task 8 dataset.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.jsonl"))) return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.jsonl")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.jsonl")), "dev") def get_labels(self): """See base class.""" return ['Message-Topic(e2,e1)', 'Instrument-Agency(e2,e1)', 'Entity-Origin(e2,e1)', 'Member-Collection(e1,e2)', 'Member-Collection(e2,e1)', 'Other', 'Component-Whole(e1,e2)', 'Product-Producer(e2,e1)', 'Component-Whole(e2,e1)', 'Entity-Destination(e2,e1)', 'Content-Container(e2,e1)', 'Entity-Destination(e1,e2)', 'Instrument-Agency(e1,e2)', 'Cause-Effect(e2,e1)', 'Entity-Origin(e1,e2)', 'Product-Producer(e1,e2)', 'Cause-Effect(e1,e2)', 'Message-Topic(e1,e2)', 'Content-Container(e1,e2)'] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" import json examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) line = json.loads(line[0]) text_a = ' '.join(line['tokens']) label = line['label'] entity_pos = line['entities'] examples.append( InputExample(guid=guid, text_a=text_a, label=label, entity_pos = entity_pos)) return examples class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[8] text_b = line[9] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MnliMismatchedProcessor(MnliProcessor): """Processor for the MultiNLI Mismatched data set (GLUE version).""" def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_matched") class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = line[3] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class Sst2Processor(DataProcessor): """Processor for the SST-2 data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[0] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class StsbProcessor(DataProcessor): """Processor for the STS-B data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return [None] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[7] text_b = line[8] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QqpProcessor(DataProcessor): """Processor for the QQP data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) try: text_a = line[3] text_b = line[4] label = line[5] except IndexError: continue examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QnliProcessor(DataProcessor): """Processor for the QNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev_matched") def get_labels(self): """See base class.""" return ["entailment", "not_entailment"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class RteProcessor(DataProcessor): """Processor for the RTE data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["entailment", "not_entailment"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class WnliProcessor(DataProcessor): """Processor for the WNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode): """Loads a data file into a list of `InputBatch`s.""" label_map = {label : i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) old_entity_pos = copy.deepcopy(example.entity_pos) tokens_a, new_entity_pos = tokenizer.tokenize(example.text_a,example.entity_pos) old_entity0 = ''.join(example.text_a.split()[old_entity_pos[0][0]:old_entity_pos[0][1]]) old_entity1 = ''.join(example.text_a.split()[old_entity_pos[1][0]:old_entity_pos[1][1]]) new_entity0 = ''.join(tokens_a[new_entity_pos[0][0]:new_entity_pos[0][1]]) new_entity1 = ''.join(tokens_a[new_entity_pos[1][0]:new_entity_pos[1][1]]) old_entity0 = old_entity0.lower() old_entity1 = old_entity1.lower() if '##' in new_entity0 or '##' in new_entity1: new_entity0 = new_entity0.replace('#','') new_entity1 = new_entity1.replace('#','') try: assert(old_entity0 == new_entity0) assert(old_entity1 == new_entity1) except: import pdb;pdb.set_trace() # Entity marker tokens_a_ = copy.deepcopy(tokens_a) new_entity_pos_ = copy.deepcopy(new_entity_pos) entity1_start, entity1_end = new_entity_pos[0][0], new_entity_pos[0][1] entity2_start, entity2_end = new_entity_pos[1][0], new_entity_pos[1][1] tokens_a.insert(entity1_start, '') new_entity_pos[0][0] = entity1_start tokens_a.insert(entity1_end+1, '') new_entity_pos[0][1] = entity1_end+1+1 tokens_a.insert(entity2_start+2, '') new_entity_pos[1][0] = entity2_start+2 tokens_a.insert(entity2_end+3,'') new_entity_pos[1][1] = entity2_end+3+1 if new_entity_pos[1][1] > max_seq_length - 2 - 1: import pdb;pdb.set_trace() tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = ["[CLS]"] + tokens_a + ["[SEP]"] segment_ids = [0] * len(tokens) if tokens_b: tokens += tokens_b + ["[SEP]"] segment_ids += [1] * (len(tokens_b) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding # Used for mention pooling entity_mask_tag = 1 entity_mask = [0] * len(input_ids) for entity in new_entity_pos: start, end = entity[0],entity[1] for i in range(start, end): # [CLS], need to +1 offset entity_mask[i+1] = entity_mask_tag """ Different position embedding """ # Strategy 1 entity1_pos_tag = 1 entity2_pos_tag = 2 entity_seg_pos = [0] * len(input_ids) entity1_start, entity1_end = new_entity_pos[0][0], new_entity_pos[0][1] for i in range(entity1_start, entity1_end): entity_seg_pos[i+1] = entity1_pos_tag entity2_start, entity2_end = new_entity_pos[1][0], new_entity_pos[1][1] for i in range(entity2_start, entity2_end): entity_seg_pos[i+1] = entity2_pos_tag # Strategy 2 entity_start_pos_tag = 1 entity_seg_pos_ = [0] * len(input_ids) entity1_start, entity1_end = new_entity_pos[0][0], new_entity_pos[0][1] entity_seg_pos_[entity1_start+1] = entity_start_pos_tag entity2_start, entity2_end = new_entity_pos[1][0], new_entity_pos[1][1] entity_seg_pos_[entity2_start+1] = entity_start_pos_tag # Strategy 3 entity_span1_pos = [0] * len(input_ids) entity1_start, entity1_end = new_entity_pos[0][0], new_entity_pos[0][1] for i in range(len(entity_span1_pos)): if i < entity1_start: #entity_span1_pos[i] = np.abs(i - entity1_start) entity_span1_pos[i] = i - entity1_start elif entity1_start <= i and i < entity1_end: entity_span1_pos[i] = 0 elif i >= entity1_end: entity_span1_pos[i] = i - entity1_end + 1 entity_span2_pos = [0] * len(input_ids) entity2_start, entity2_end = new_entity_pos[1][0], new_entity_pos[1][1] for i in range(len(entity_span2_pos)): if i < entity2_start: #entity_span2_pos[i] = np.abs(i - entity2_start) entity_span2_pos[i] = i - entity2_start elif entity2_start <= i and i < entity2_end: entity_span2_pos[i] = 0 elif i >= entity2_end: entity_span2_pos[i] = i - entity2_end + 1 # Avoid to get negative position to fuck the nn.Embedding #entity_span1_pos = [pos+max_seq_length-1 for pos in entity_span1_pos] #entity_span2_pos = [pos+max_seq_length-1 for pos in entity_span2_pos] assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(entity_mask) == max_seq_length assert len(entity_seg_pos) == max_seq_length assert len(entity_seg_pos_) == max_seq_length assert len(entity_span1_pos) == max_seq_length assert len(entity_span2_pos) == max_seq_length if output_mode == "classification": label_id = label_map[example.label] elif output_mode == "regression": label_id = float(example.label) else: raise KeyError(output_mode) if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info("entity_mask: %s" % " ".join([str(x) for x in entity_mask])) logger.info("entity_seg_pos: %s" % " ".join([str(x) for x in entity_seg_pos])) logger.info("entity_seg_pos_: %s" % " ".join([str(x) for x in entity_seg_pos_])) logger.info("entity_span1_pos: %s" % " ".join([str(x) for x in entity_span1_pos])) logger.info("entity_span2_pos: %s" % " ".join([str(x) for x in entity_span2_pos])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logger.info("label: %s (id = %d)" % (example.label, label_id)) #if example.guid == 'train-3': # import pdb;pdb.set_trace() features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, entity_mask=entity_mask, entity_seg_pos=entity_seg_pos_, entity_span1_pos=entity_span1_pos, entity_span2_pos=entity_span2_pos)) return features def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def simple_accuracy(preds, labels): return (preds == labels).mean() def acc_and_f1(preds, labels): acc = simple_accuracy(preds, labels) f1 = f1_score(y_true=labels, y_pred=preds,average='micro') report = classification_report(labels, preds) return { "acc": acc, "f1": f1, "acc_and_f1": (acc + f1) / 2, "report": report } def pearson_and_spearman(preds, labels): pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def compute_metrics(task_name, preds, labels): assert len(preds) == len(labels) if task_name == "cola": return {"mcc": matthews_corrcoef(labels, preds)} elif task_name == "sst-2": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mrpc": return acc_and_f1(preds, labels) elif task_name == "sem": return acc_and_f1(preds, labels) elif task_name == "sts-b": return pearson_and_spearman(preds, labels) elif task_name == "qqp": return acc_and_f1(preds, labels) elif task_name == "mnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mnli-mm": return {"acc": simple_accuracy(preds, labels)} elif task_name == "qnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "rte": return {"acc": simple_accuracy(preds, labels)} elif task_name == "wnli": return {"acc": simple_accuracy(preds, labels)} else: raise KeyError(task_name) def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, " "bert-base-multilingual-cased, bert-base-chinese.") parser.add_argument("--task_name", default=None, type=str, required=True, help="The name of the task to train.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") ## Other parameters parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument('--loss_scale', type=float, default=0, help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mnli-mm": MnliMismatchedProcessor, "mrpc": MrpcProcessor, "sem": SemProcessor, "sst-2": Sst2Processor, "sts-b": StsbProcessor, "qqp": QqpProcessor, "qnli": QnliProcessor, "rte": RteProcessor, "wnli": WnliProcessor, } output_modes = { "cola": "classification", "mnli": "classification", "mrpc": "classification", "sem": "classification", "sst-2": "classification", "sts-b": "regression", "qqp": "classification", "qnli": "classification", "rte": "classification", "wnli": "classification", } if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) task_name = args.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() output_mode = output_modes[task_name] label_list = processor.get_labels() num_labels = len(label_list) tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) train_examples = None num_train_optimization_steps = None if args.do_train: train_examples = processor.get_train_examples(args.data_dir) num_train_optimization_steps = int( len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs if args.local_rank != -1: num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size() # Prepare model cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank)) model = BertForSequenceClassification.from_pretrained(args.bert_model, cache_dir=cache_dir, num_labels=num_labels) if args.fp16: model.half() model.to(device) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare optimizer if args.do_train: param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion, t_total=num_train_optimization_steps) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=num_train_optimization_steps) global_step = 0 nb_tr_steps = 0 tr_loss = 0 if args.do_train: train_features = convert_examples_to_features( train_examples, label_list, args.max_seq_length, tokenizer, output_mode) logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_examples)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_optimization_steps) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) # FloatTensor(forward) all_entity_mask = torch.tensor([f.entity_mask for f in train_features], dtype=torch.float) all_entity_seg_pos = torch.tensor([f.entity_seg_pos for f in train_features], dtype=torch.long) all_entity_span1_pos = torch.tensor([f.entity_span1_pos for f in train_features], dtype=torch.float) all_entity_span2_pos = torch.tensor([f.entity_span2_pos for f in train_features], dtype=torch.float) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) if output_mode == "classification": all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) elif output_mode == "regression": all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float) train_data = TensorDataset(all_input_ids, all_input_mask, all_entity_mask, all_entity_seg_pos, all_entity_span1_pos, all_entity_span2_pos, all_segment_ids, all_label_ids) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) model.train() for _ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, entity_mask, entity_seg_pos, entity_span1_pos, entity_span2_pos, segment_ids, label_ids = batch # define a new function to compute loss values for both output_modes logits = model(input_ids, segment_ids, input_mask, entity_mask, entity_seg_pos, entity_span1_pos, entity_span2_pos, labels=None) if output_mode == "classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1)) elif output_mode == "regression": loss_fct = MSELoss() loss = loss_fct(logits.view(-1), label_ids.view(-1)) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward() tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: # modify learning rate with special warm up BERT uses # if args.fp16 is False, BertAdam is used that handles this automatically lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Save a trained model, configuration and tokenizer model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) # Load a trained model and vocabulary that you have fine-tuned model = BertForSequenceClassification.from_pretrained(args.output_dir, num_labels=num_labels) tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) else: model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels) model.to(device) if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0): eval_examples = processor.get_dev_examples(args.data_dir) eval_features = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer, output_mode) logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_entity_mask = torch.tensor([f.entity_mask for f in eval_features], dtype=torch.float) all_entity_seg_pos = torch.tensor([f.entity_seg_pos for f in eval_features], dtype=torch.long) all_entity_span1_pos = torch.tensor([f.entity_span1_pos for f in eval_features], dtype=torch.float) all_entity_span2_pos = torch.tensor([f.entity_span2_pos for f in eval_features], dtype=torch.float) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) if output_mode == "classification": all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) elif output_mode == "regression": all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.float) eval_data = TensorDataset(all_input_ids, all_input_mask, all_entity_mask, all_entity_seg_pos, all_entity_span1_pos, all_entity_span2_pos, all_segment_ids, all_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss = 0 nb_eval_steps = 0 preds = [] for input_ids, input_mask, entity_mask, entity_seg_pos, entity_span1_pos, entity_span2_pos, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"): input_ids = input_ids.to(device) input_mask = input_mask.to(device) entity_mask = entity_mask.to(device) entity_seg_pos = entity_seg_pos.to(device) entity_span1_pos = entity_span1_pos.to(device) entity_span2_pos = entity_span2_pos.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): logits = model(input_ids, segment_ids, input_mask, entity_mask, entity_seg_pos, entity_span1_pos, entity_span2_pos, labels=None) #logits = model(input_ids, segment_ids, input_mask, labels=None) # create eval loss and other metric required by the task if output_mode == "classification": loss_fct = CrossEntropyLoss() tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1)) elif output_mode == "regression": loss_fct = MSELoss() tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1)) eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if len(preds) == 0: preds.append(logits.detach().cpu().numpy()) else: preds[0] = np.append( preds[0], logits.detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps preds = preds[0] if output_mode == "classification": preds = np.argmax(preds, axis=1) elif output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(task_name, preds, all_label_ids.numpy()) loss = tr_loss/global_step if args.do_train else None result['eval_loss'] = eval_loss result['global_step'] = global_step result['loss'] = loss output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) # hack for MNLI-MM if task_name == "mnli": task_name = "mnli-mm" processor = processors[task_name]() if os.path.exists(args.output_dir + '-MM') and os.listdir(args.output_dir + '-MM') and args.do_train: raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) if not os.path.exists(args.output_dir + '-MM'): os.makedirs(args.output_dir + '-MM') eval_examples = processor.get_dev_examples(args.data_dir) eval_features = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer, output_mode) logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss = 0 nb_eval_steps = 0 preds = [] for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): logits = model(input_ids, segment_ids, input_mask, labels=None) loss_fct = CrossEntropyLoss() tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1)) eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if len(preds) == 0: preds.append(logits.detach().cpu().numpy()) else: preds[0] = np.append( preds[0], logits.detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps preds = preds[0] preds = np.argmax(preds, axis=1) result = compute_metrics(task_name, preds, all_label_ids.numpy()) loss = tr_loss/global_step if args.do_train else None result['eval_loss'] = eval_loss result['global_step'] = global_step result['loss'] = loss output_eval_file = os.path.join(args.output_dir + '-MM', "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": main() ================================================ FILE: examples/run_classifier_dataset_utils.py ================================================ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ BERT classification fine-tuning: utilities to work with GLUE tasks """ from __future__ import absolute_import, division, print_function import csv import logging import os import sys from scipy.stats import pearsonr, spearmanr from sklearn.metrics import matthews_corrcoef, f1_score logger = logging.getLogger(__name__) class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with open(input_file, "r", encoding="utf-8") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) return lines class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv"))) return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[3] text_b = line[4] label = line[0] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[8] text_b = line[9] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MnliMismatchedProcessor(MnliProcessor): """Processor for the MultiNLI Mismatched data set (GLUE version).""" def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_matched") class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = line[3] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class Sst2Processor(DataProcessor): """Processor for the SST-2 data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[0] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class StsbProcessor(DataProcessor): """Processor for the STS-B data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return [None] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[7] text_b = line[8] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QqpProcessor(DataProcessor): """Processor for the QQP data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) try: text_a = line[3] text_b = line[4] label = line[5] except IndexError: continue examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QnliProcessor(DataProcessor): """Processor for the QNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev_matched") def get_labels(self): """See base class.""" return ["entailment", "not_entailment"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class RteProcessor(DataProcessor): """Processor for the RTE data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["entailment", "not_entailment"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class WnliProcessor(DataProcessor): """Processor for the WNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode): """Loads a data file into a list of `InputBatch`s.""" label_map = {label : i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = ["[CLS]"] + tokens_a + ["[SEP]"] segment_ids = [0] * len(tokens) if tokens_b: tokens += tokens_b + ["[SEP]"] segment_ids += [1] * (len(tokens_b) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if output_mode == "classification": label_id = label_map[example.label] elif output_mode == "regression": label_id = float(example.label) else: raise KeyError(output_mode) if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logger.info("label: %s (id = %d)" % (example.label, label_id)) features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id)) return features def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def simple_accuracy(preds, labels): return (preds == labels).mean() def acc_and_f1(preds, labels): acc = simple_accuracy(preds, labels) f1 = f1_score(y_true=labels, y_pred=preds) return { "acc": acc, "f1": f1, "acc_and_f1": (acc + f1) / 2, } def pearson_and_spearman(preds, labels): pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def compute_metrics(task_name, preds, labels): assert len(preds) == len(labels) if task_name == "cola": return {"mcc": matthews_corrcoef(labels, preds)} elif task_name == "sst-2": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mrpc": return acc_and_f1(preds, labels) elif task_name == "sts-b": return pearson_and_spearman(preds, labels) elif task_name == "qqp": return acc_and_f1(preds, labels) elif task_name == "mnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mnli-mm": return {"acc": simple_accuracy(preds, labels)} elif task_name == "qnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "rte": return {"acc": simple_accuracy(preds, labels)} elif task_name == "wnli": return {"acc": simple_accuracy(preds, labels)} else: raise KeyError(task_name) processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mnli-mm": MnliMismatchedProcessor, "mrpc": MrpcProcessor, "sst-2": Sst2Processor, "sts-b": StsbProcessor, "qqp": QqpProcessor, "qnli": QnliProcessor, "rte": RteProcessor, "wnli": WnliProcessor, } output_modes = { "cola": "classification", "mnli": "classification", "mrpc": "classification", "sst-2": "classification", "sts-b": "regression", "qqp": "classification", "qnli": "classification", "rte": "classification", "wnli": "classification", } ================================================ FILE: examples/run_gpt2.py ================================================ #!/usr/bin/env python3 import argparse import logging from tqdm import trange import torch import torch.nn.functional as F import numpy as np from pytorch_pretrained_bert import GPT2LMHeadModel, GPT2Tokenizer logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) def top_k_logits(logits, k): """ Masks everything but the k top entries as -infinity (1e10). Used to mask logits such that e^-infinity -> 0 won't contribute to the sum of the denominator. """ if k == 0: return logits else: values = torch.topk(logits, k)[0] batch_mins = values[:, -1].view(-1, 1).expand_as(logits) return torch.where(logits < batch_mins, torch.ones_like(logits) * -1e10, logits) def sample_sequence(model, length, start_token=None, batch_size=None, context=None, temperature=1, top_k=0, device='cuda', sample=True): if start_token is None: assert context is not None, 'Specify exactly one of start_token and context!' context = torch.tensor(context, device=device, dtype=torch.long).unsqueeze(0).repeat(batch_size, 1) else: assert context is None, 'Specify exactly one of start_token and context!' context = torch.full((batch_size, 1), start_token, device=device, dtype=torch.long) prev = context output = context past = None with torch.no_grad(): for i in trange(length): logits, past = model(prev, past=past) logits = logits[:, -1, :] / temperature logits = top_k_logits(logits, k=top_k) log_probs = F.softmax(logits, dim=-1) if sample: prev = torch.multinomial(log_probs, num_samples=1) else: _, prev = torch.topk(log_probs, k=1, dim=-1) output = torch.cat((output, prev), dim=1) return output def run_model(): parser = argparse.ArgumentParser() parser.add_argument('--model_name_or_path', type=str, default='gpt2', help='pretrained model name or path to local checkpoint') parser.add_argument("--seed", type=int, default=0) parser.add_argument("--nsamples", type=int, default=1) parser.add_argument("--batch_size", type=int, default=-1) parser.add_argument("--length", type=int, default=-1) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--top_k", type=int, default=0) parser.add_argument('--unconditional', action='store_true', help='If true, unconditional generation.') args = parser.parse_args() print(args) if args.batch_size == -1: args.batch_size = 1 assert args.nsamples % args.batch_size == 0 np.random.seed(args.seed) torch.random.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") enc = GPT2Tokenizer.from_pretrained(args.model_name_or_path) model = GPT2LMHeadModel.from_pretrained(args.model_name_or_path) model.to(device) model.eval() if args.length == -1: args.length = model.config.n_ctx // 2 elif args.length > model.config.n_ctx: raise ValueError("Can't get samples longer than window size: %s" % model.config.n_ctx) while True: context_tokens = [] if not args.unconditional: raw_text = input("Model prompt >>> ") while not raw_text: print('Prompt should not be empty!') raw_text = input("Model prompt >>> ") context_tokens = enc.encode(raw_text) generated = 0 for _ in range(args.nsamples // args.batch_size): out = sample_sequence( model=model, length=args.length, context=context_tokens, start_token=None, batch_size=args.batch_size, temperature=args.temperature, top_k=args.top_k, device=device ) out = out[:, len(context_tokens):].tolist() for i in range(args.batch_size): generated += 1 text = enc.decode(out[i]) print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40) print(text) print("=" * 80) else: generated = 0 for _ in range(args.nsamples // args.batch_size): out = sample_sequence( model=model, length=args.length, context=None, start_token=enc.encoder['<|endoftext|>'], batch_size=args.batch_size, temperature=args.temperature, top_k=args.top_k, device=device ) out = out[:,1:].tolist() for i in range(args.batch_size): generated += 1 text = enc.decode(out[i]) print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40) print(text) print("=" * 80) if __name__ == '__main__': run_model() ================================================ FILE: examples/run_openai_gpt.py ================================================ # coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OpenAI GPT model fine-tuning script. Adapted from https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/train.py It self adapted from https://github.com/openai/finetune-transformer-lm/blob/master/train.py This script with default values fine-tunes and evaluate a pretrained OpenAI GPT on the RocStories dataset: python run_openai_gpt.py \ --model_name openai-gpt \ --do_train \ --do_eval \ --train_dataset $ROC_STORIES_DIR/cloze_test_val__spring2016\ -\ cloze_test_ALL_val.csv \ --eval_dataset $ROC_STORIES_DIR/cloze_test_test__spring2016\ -\ cloze_test_ALL_test.csv \ --output_dir ../log \ --train_batch_size 16 \ """ import argparse import os import csv import random import logging from tqdm import tqdm, trange import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from pytorch_pretrained_bert import (OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, OpenAIAdam, cached_path, WEIGHTS_NAME, CONFIG_NAME) ROCSTORIES_URL = "https://s3.amazonaws.com/datasets.huggingface.co/ROCStories.tar.gz" logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def load_rocstories_dataset(dataset_path): """ Output a list of tuples(story, 1st continuation, 2nd continuation, label) """ with open(dataset_path, encoding='utf_8') as f: f = csv.reader(f) output = [] next(f) # skip the first line for line in tqdm(f): output.append((' '.join(line[1:5]), line[5], line[6], int(line[-1])-1)) return output def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token): """ Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label) To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation: input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] """ tensor_datasets = [] for dataset in encoded_datasets: n_batch = len(dataset) input_ids = np.zeros((n_batch, 2, input_len), dtype=np.int64) mc_token_ids = np.zeros((n_batch, 2), dtype=np.int64) lm_labels = np.full((n_batch, 2, input_len), fill_value=-1, dtype=np.int64) mc_labels = np.zeros((n_batch,), dtype=np.int64) for i, (story, cont1, cont2, mc_label), in enumerate(dataset): with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token] input_ids[i, 0, :len(with_cont1)] = with_cont1 input_ids[i, 1, :len(with_cont2)] = with_cont2 mc_token_ids[i, 0] = len(with_cont1) - 1 mc_token_ids[i, 1] = len(with_cont2) - 1 lm_labels[i, 0, :len(with_cont1)] = with_cont1 lm_labels[i, 1, :len(with_cont2)] = with_cont2 mc_labels[i] = mc_label all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs)) return tensor_datasets def main(): parser = argparse.ArgumentParser() parser.add_argument('--model_name', type=str, default='openai-gpt', help='pretrained model name') parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") parser.add_argument('--train_dataset', type=str, default='') parser.add_argument('--eval_dataset', type=str, default='') parser.add_argument('--seed', type=int, default=42) parser.add_argument('--num_train_epochs', type=int, default=3) parser.add_argument('--train_batch_size', type=int, default=8) parser.add_argument('--eval_batch_size', type=int, default=16) parser.add_argument('--max_grad_norm', type=int, default=1) parser.add_argument('--learning_rate', type=float, default=6.25e-5) parser.add_argument('--warmup_proportion', type=float, default=0.002) parser.add_argument('--lr_schedule', type=str, default='warmup_linear') parser.add_argument('--weight_decay', type=float, default=0.01) parser.add_argument('--lm_coef', type=float, default=0.9) parser.add_argument('--n_valid', type=int, default=374) parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() print(args) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() logger.info("device: {}, n_gpu {}".format(device, n_gpu)) if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset special_tokens = ['_start_', '_delimiter_', '_classify_'] tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_name, special_tokens=special_tokens) special_tokens_ids = list(tokenizer.convert_tokens_to_ids(token) for token in special_tokens) model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name, num_special_tokens=len(special_tokens)) model.to(device) # Load and encode the datasets if not args.train_dataset and not args.eval_dataset: roc_stories = cached_path(ROCSTORIES_URL) def tokenize_and_encode(obj): """ Tokenize and encode a nested object """ if isinstance(obj, str): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj)) elif isinstance(obj, int): return obj return list(tokenize_and_encode(o) for o in obj) logger.info("Encoding dataset...") train_dataset = load_rocstories_dataset(args.train_dataset) eval_dataset = load_rocstories_dataset(args.eval_dataset) datasets = (train_dataset, eval_dataset) encoded_datasets = tokenize_and_encode(datasets) # Compute the max input length for the Transformer max_length = model.config.n_positions // 2 - 2 input_length = max(len(story[:max_length]) + max(len(cont1[:max_length]), len(cont2[:max_length])) + 3 \ for dataset in encoded_datasets for story, cont1, cont2, _ in dataset) input_length = min(input_length, model.config.n_positions) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders tensor_datasets = pre_process_datasets(encoded_datasets, input_length, max_length, *special_tokens_ids) train_tensor_dataset, eval_tensor_dataset = tensor_datasets[0], tensor_datasets[1] train_data = TensorDataset(*train_tensor_dataset) train_sampler = RandomSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) eval_data = TensorDataset(*eval_tensor_dataset) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) # Prepare optimizer if args.do_train: param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] num_train_optimization_steps = len(train_dataloader) * args.num_train_epochs optimizer = OpenAIAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, max_grad_norm=args.max_grad_norm, weight_decay=args.weight_decay, t_total=num_train_optimization_steps) if args.do_train: nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_steps = 0 tqdm_bar = tqdm(train_dataloader, desc="Training") for step, batch in enumerate(tqdm_bar): batch = tuple(t.to(device) for t in batch) input_ids, mc_token_ids, lm_labels, mc_labels = batch losses = model(input_ids, mc_token_ids, lm_labels, mc_labels) loss = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() optimizer.zero_grad() tr_loss += loss.item() exp_average_loss = loss.item() if exp_average_loss is None else 0.7*exp_average_loss+0.3*loss.item() nb_tr_steps += 1 tqdm_bar.desc = "Training loss: {:.2e} lr: {:.2e}".format(exp_average_loss, optimizer.get_lr()[0]) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) # Load a trained model and vocabulary that you have fine-tuned model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir) tokenizer = OpenAIGPTTokenizer.from_pretrained(args.output_dir) model.to(device) if args.do_eval: model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for batch in tqdm(eval_dataloader, desc="Evaluating"): batch = tuple(t.to(device) for t in batch) input_ids, mc_token_ids, lm_labels, mc_labels = batch with torch.no_grad(): _, mc_loss = model(input_ids, mc_token_ids, lm_labels, mc_labels) _, mc_logits = model(input_ids, mc_token_ids) mc_logits = mc_logits.detach().cpu().numpy() mc_labels = mc_labels.to('cpu').numpy() tmp_eval_accuracy = accuracy(mc_logits, mc_labels) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples train_loss = tr_loss/nb_tr_steps if args.do_train else None result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == '__main__': main() ================================================ FILE: examples/run_squad.py ================================================ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run BERT on SQuAD.""" from __future__ import absolute_import, division, print_function import argparse import logging import os import random import sys from io import open import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from tensorboardX import SummaryWriter from pytorch_pretrained_bert.file_utils import WEIGHTS_NAME, CONFIG_NAME from pytorch_pretrained_bert.modeling import BertForQuestionAnswering from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule from pytorch_pretrained_bert.tokenization import BertTokenizer from run_squad_dataset_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions if sys.version_info[0] == 2: import cPickle as pickle else: import pickle logger = logging.getLogger(__name__) def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, " "bert-base-multilingual-cased, bert-base-chinese.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.") ## Other parameters parser.add_argument("--train_file", default=None, type=str, help="SQuAD json for training. E.g., train-v1.1.json") parser.add_argument("--predict_file", default=None, type=str, help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") parser.add_argument("--max_seq_length", default=384, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.") parser.add_argument("--max_query_length", default=64, type=int, help="The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_predict", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--predict_batch_size", default=8, type=int, help="Total batch size for predictions.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% " "of training.") parser.add_argument("--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json " "output file.") parser.add_argument("--max_answer_length", default=30, type=int, help="The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another.") parser.add_argument("--verbose_logging", action='store_true', help="If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--do_lower_case", action='store_true', help="Whether to lower case the input text. True for uncased models, False for cased models.") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--loss_scale', type=float, default=0, help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument('--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.') parser.add_argument('--null_score_diff_threshold', type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() print(args) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_predict: raise ValueError("At least one of `do_train` or `do_predict` must be True.") if args.do_train: if not args.train_file: raise ValueError( "If `do_train` is True, then `train_file` must be specified.") if args.do_predict: if not args.predict_file: raise ValueError( "If `do_predict` is True, then `predict_file` must be specified.") if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError("Output directory () already exists and is not empty.") if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) model = BertForQuestionAnswering.from_pretrained(args.bert_model) if args.local_rank == 0: torch.distributed.barrier() if args.fp16: model.half() model.to(device) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) elif n_gpu > 1: model = torch.nn.DataParallel(model) if args.do_train: if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() # Prepare data loader train_examples = read_squad_examples( input_file=args.train_file, is_training=True, version_2_with_negative=args.version_2_with_negative) cached_train_features_file = args.train_file+'_{0}_{1}_{2}_{3}'.format( list(filter(None, args.bert_model.split('/'))).pop(), str(args.max_seq_length), str(args.doc_stride), str(args.max_query_length)) try: with open(cached_train_features_file, "rb") as reader: train_features = pickle.load(reader) except: train_features = convert_examples_to_features( examples=train_examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=True) if args.local_rank == -1 or torch.distributed.get_rank() == 0: logger.info(" Saving train features into cached file %s", cached_train_features_file) with open(cached_train_features_file, "wb") as writer: pickle.dump(train_features, writer) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long) train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) num_train_optimization_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # if args.local_rank != -1: # num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size() # Prepare optimizer param_optimizer = list(model.named_parameters()) # hack to remove pooler, which is not used # thus it produce None grad that break apex param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion, t_total=num_train_optimization_steps) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=num_train_optimization_steps) global_step = 0 logger.info("***** Running training *****") logger.info(" Num orig examples = %d", len(train_examples)) logger.info(" Num split examples = %d", len(train_features)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_optimization_steps) model.train() for epoch in trange(int(args.num_train_epochs), desc="Epoch"): for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])): if n_gpu == 1: batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self input_ids, input_mask, segment_ids, start_positions, end_positions = batch loss = model(input_ids, segment_ids, input_mask, start_positions, end_positions) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: # modify learning rate with special warm up BERT uses # if args.fp16 is False, BertAdam is used and handles this automatically lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 if args.local_rank in [-1, 0]: tb_writer.add_scalar('lr', optimizer.get_lr()[0], global_step) tb_writer.add_scalar('loss', loss.item(), global_step) if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Save a trained model, configuration and tokenizer model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) # Load a trained model and vocabulary that you have fine-tuned model = BertForQuestionAnswering.from_pretrained(args.output_dir) tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) # Good practice: save your training arguments together with the trained model output_args_file = os.path.join(args.output_dir, 'training_args.bin') torch.save(args, output_args_file) else: # Load a trained model and vocabulary that you have fine-tuned model = BertForQuestionAnswering.from_pretrained(args.output_dir) tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(device) if args.do_predict and (args.local_rank == -1 or torch.distributed.get_rank() == 0): eval_examples = read_squad_examples( input_file=args.predict_file, is_training=False, version_2_with_negative=args.version_2_with_negative) eval_features = convert_examples_to_features( examples=eval_examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=False) logger.info("***** Running predictions *****") logger.info(" Num orig examples = %d", len(eval_examples)) logger.info(" Num split examples = %d", len(eval_features)) logger.info(" Batch size = %d", args.predict_batch_size) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size) model.eval() all_results = [] logger.info("Start evaluating") for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating", disable=args.local_rank not in [-1, 0]): if len(all_results) % 1000 == 0: logger.info("Processing example: %d" % (len(all_results))) input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) with torch.no_grad(): batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask) for i, example_index in enumerate(example_indices): start_logits = batch_start_logits[i].detach().cpu().tolist() end_logits = batch_end_logits[i].detach().cpu().tolist() eval_feature = eval_features[example_index.item()] unique_id = int(eval_feature.unique_id) all_results.append(RawResult(unique_id=unique_id, start_logits=start_logits, end_logits=end_logits)) output_prediction_file = os.path.join(args.output_dir, "predictions.json") output_nbest_file = os.path.join(args.output_dir, "nbest_predictions.json") output_null_log_odds_file = os.path.join(args.output_dir, "null_odds.json") write_predictions(eval_examples, eval_features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold) if __name__ == "__main__": main() ================================================ FILE: examples/run_squad_dataset_utils.py ================================================ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Load SQuAD dataset. """ from __future__ import absolute_import, division, print_function import json import logging import math import collections from io import open from pytorch_pretrained_bert.tokenization import BasicTokenizer, whitespace_tokenize logger = logging.getLogger(__name__) class SquadExample(object): """ A single training/test example for the Squad dataset. For examples without an answer, the start and end position are -1. """ def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, start_position=None, end_position=None, is_impossible=None): self.qas_id = qas_id self.question_text = question_text self.doc_tokens = doc_tokens self.orig_answer_text = orig_answer_text self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def __str__(self): return self.__repr__() def __repr__(self): s = "" s += "qas_id: %s" % (self.qas_id) s += ", question_text: %s" % ( self.question_text) s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) if self.start_position: s += ", start_position: %d" % (self.start_position) if self.end_position: s += ", end_position: %d" % (self.end_position) if self.is_impossible: s += ", is_impossible: %r" % (self.is_impossible) return s class InputFeatures(object): """A single set of features of data.""" def __init__(self, unique_id, example_index, doc_span_index, tokens, token_to_orig_map, token_is_max_context, input_ids, input_mask, segment_ids, start_position=None, end_position=None, is_impossible=None): self.unique_id = unique_id self.example_index = example_index self.doc_span_index = doc_span_index self.tokens = tokens self.token_to_orig_map = token_to_orig_map self.token_is_max_context = token_is_max_context self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def read_squad_examples(input_file, is_training, version_2_with_negative): """Read a SQuAD json file into a list of SquadExample.""" with open(input_file, "r", encoding='utf-8') as reader: input_data = json.load(reader)["data"] def is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False examples = [] for entry in input_data: for paragraph in entry["paragraphs"]: paragraph_text = paragraph["context"] doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True for c in paragraph_text: if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position = None end_position = None orig_answer_text = None is_impossible = False if is_training: if version_2_with_negative: is_impossible = qa["is_impossible"] if (len(qa["answers"]) != 1) and (not is_impossible): raise ValueError( "For training, each question should have exactly 1 answer.") if not is_impossible: answer = qa["answers"][0] orig_answer_text = answer["text"] answer_offset = answer["answer_start"] answer_length = len(orig_answer_text) start_position = char_to_word_offset[answer_offset] end_position = char_to_word_offset[answer_offset + answer_length - 1] # Only add answers where the text can be exactly recovered from the # document. If this CAN'T happen it's likely due to weird Unicode # stuff so we will just skip the example. # # Note that this means for training mode, every example is NOT # guaranteed to be preserved. actual_text = " ".join(doc_tokens[start_position:(end_position + 1)]) cleaned_answer_text = " ".join( whitespace_tokenize(orig_answer_text)) if actual_text.find(cleaned_answer_text) == -1: logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) continue else: start_position = -1 end_position = -1 orig_answer_text = "" example = SquadExample( qas_id=qas_id, question_text=question_text, doc_tokens=doc_tokens, orig_answer_text=orig_answer_text, start_position=start_position, end_position=end_position, is_impossible=is_impossible) examples.append(example) return examples def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training): """Loads a data file into a list of `InputBatch`s.""" unique_id = 1000000000 features = [] for (example_index, example) in enumerate(examples): query_tokens = tokenizer.tokenize(example.question_text) if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) tok_start_position = None tok_end_position = None if is_training and example.is_impossible: tok_start_position = -1 tok_end_position = -1 if is_training and not example.is_impossible: tok_start_position = orig_to_tok_index[example.start_position] if example.end_position < len(example.doc_tokens) - 1: tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 else: tok_end_position = len(all_doc_tokens) - 1 (tok_start_position, tok_end_position) = _improve_answer_span( all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text) # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. _DocSpan = collections.namedtuple( # pylint: disable=invalid-name "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] token_to_orig_map = {} token_is_max_context = {} segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in query_tokens: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) for i in range(doc_span.length): split_token_index = doc_span.start + i token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length start_position = None end_position = None if is_training and not example.is_impossible: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: start_position = 0 end_position = 0 else: doc_offset = len(query_tokens) + 2 start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset if is_training and example.is_impossible: start_position = 0 end_position = 0 if example_index < 20: logger.info("*** Example ***") logger.info("unique_id: %s" % (unique_id)) logger.info("example_index: %s" % (example_index)) logger.info("doc_span_index: %s" % (doc_span_index)) logger.info("tokens: %s" % " ".join(tokens)) logger.info("token_to_orig_map: %s" % " ".join([ "%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()])) logger.info("token_is_max_context: %s" % " ".join([ "%d:%s" % (x, y) for (x, y) in token_is_max_context.items() ])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info( "input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) if is_training and example.is_impossible: logger.info("impossible example") if is_training and not example.is_impossible: answer_text = " ".join(tokens[start_position:(end_position + 1)]) logger.info("start_position: %d" % (start_position)) logger.info("end_position: %d" % (end_position)) logger.info( "answer: %s" % (answer_text)) features.append( InputFeatures( unique_id=unique_id, example_index=example_index, doc_span_index=doc_span_index, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, start_position=start_position, end_position=end_position, is_impossible=example.is_impossible)) unique_id += 1 return features def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" # The SQuAD annotations are character based. We first project them to # whitespace-tokenized words. But then after WordPiece tokenization, we can # often find a "better match". For example: # # Question: What year was John Smith born? # Context: The leader was John Smith (1895-1943). # Answer: 1895 # # The original whitespace-tokenized answer will be "(1895-1943).". However # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match # the exact answer, 1895. # # However, this is not always possible. Consider the following: # # Question: What country is the top exporter of electornics? # Context: The Japanese electronics industry is the lagest in the world. # Answer: Japan # # In this case, the annotator chose "Japan" as a character sub-span of # the word "Japanese". Since our WordPiece tokenizer does not split # "Japanese", we just use "Japanese" as the annotation. This is fairly rare # in SQuAD, but does happen. tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end) def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"]) def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, verbose_logging, version_2_with_negative, null_score_diff_threshold): """Write final predictions to the json file and log-odds of null if needed.""" logger.info("Writing predictions to: %s" % (output_prediction_file)) logger.info("Writing nbest to: %s" % (output_nbest_file)) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive min_null_feature_index = 0 # the paragraph slice with min null score null_start_logit = 0 # the start logit at the slice with min null score null_end_logit = 0 # the end logit at the slice with min null score for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_logits, n_best_size) end_indexes = _get_best_indexes(result.end_logits, n_best_size) # if we could have irrelevant answers, get the min score of irrelevant if version_2_with_negative: feature_null_score = result.start_logits[0] + result.end_logits[0] if feature_null_score < score_null: score_null = feature_null_score min_null_feature_index = feature_index null_start_logit = result.start_logits[0] null_end_logit = result.end_logits[0] for start_index in start_indexes: for end_index in end_indexes: # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) if version_2_with_negative: prelim_predictions.append( _PrelimPrediction( feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit)) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_logit", "end_logit"]) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: # this is a non-null prediction tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = " ".join(tok_tokens) # De-tokenize WordPieces that have been split off. tok_text = tok_text.replace(" ##", "") tok_text = tok_text.replace("##", "") # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) # if we didn't include the empty option in the n-best, include it if version_2_with_negative: if "" not in seen_predictions: nbest.append( _NbestPrediction( text="", start_logit=null_start_logit, end_logit=null_end_logit)) # In very rare edge cases we could only have single null prediction. # So we just create a nonce prediction in this case to avoid failure. if len(nbest)==1: nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) assert len(nbest) >= 1 total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry: if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1 if not version_2_with_negative: all_predictions[example.qas_id] = nbest_json[0]["text"] else: # predict "" iff the null score - the score of best non-null > threshold score_diff = score_null - best_non_null_entry.start_logit - ( best_non_null_entry.end_logit) scores_diff_json[example.qas_id] = score_diff if score_diff > null_score_diff_threshold: all_predictions[example.qas_id] = "" else: all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json with open(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") with open(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if version_2_with_negative: with open(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): """Project the tokenized prediction back to the original text.""" # When we created the data, we kept track of the alignment between original # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So # now `orig_text` contains the span of our original text corresponding to the # span that we predicted. # # However, `orig_text` may contain extra characters that we don't want in # our prediction. # # For example, let's say: # pred_text = steve smith # orig_text = Steve Smith's # # We don't want to return `orig_text` because it contains the extra "'s". # # We don't want to return `pred_text` because it's already been normalized # (the SQuAD eval script also does punctuation stripping/lower casing but # our tokenizer does additional normalization like stripping accent # characters). # # What we really want to return is "Steve Smith". # # Therefore, we have to apply a semi-complicated alignment heuristic between # `pred_text` and `orig_text` to get a character-to-character alignment. This # can fail in certain cases in which case we just return `orig_text`. def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == " ": continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = "".join(ns_chars) return (ns_text, ns_to_s_map) # We first tokenize `orig_text`, strip whitespace from the result # and `pred_text`, and check if they are the same length. If they are # NOT the same length, the heuristic has failed. If they are the same # length, we assume the characters are one-to-one aligned. tokenizer = BasicTokenizer(do_lower_case=do_lower_case) tok_text = " ".join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: if verbose_logging: logger.info( "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) return orig_text end_position = start_position + len(pred_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): if verbose_logging: logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) return orig_text # We then project the characters in `pred_text` back to `orig_text` using # the character-to-character alignment. tok_s_to_ns_map = {} for (i, tok_index) in tok_ns_to_s_map.items(): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if verbose_logging: logger.info("Couldn't map start position") return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if verbose_logging: logger.info("Couldn't map end position") return orig_text output_text = orig_text[orig_start_position:(orig_end_position + 1)] return output_text def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs ================================================ FILE: examples/run_swag.py ================================================ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import import argparse import csv import logging import os import random import sys from io import open import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME from pytorch_pretrained_bert.modeling import BertForMultipleChoice, BertConfig from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule from pytorch_pretrained_bert.tokenization import BertTokenizer logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) class SwagExample(object): """A single training/test example for the SWAG dataset.""" def __init__(self, swag_id, context_sentence, start_ending, ending_0, ending_1, ending_2, ending_3, label = None): self.swag_id = swag_id self.context_sentence = context_sentence self.start_ending = start_ending self.endings = [ ending_0, ending_1, ending_2, ending_3, ] self.label = label def __str__(self): return self.__repr__() def __repr__(self): l = [ "swag_id: {}".format(self.swag_id), "context_sentence: {}".format(self.context_sentence), "start_ending: {}".format(self.start_ending), "ending_0: {}".format(self.endings[0]), "ending_1: {}".format(self.endings[1]), "ending_2: {}".format(self.endings[2]), "ending_3: {}".format(self.endings[3]), ] if self.label is not None: l.append("label: {}".format(self.label)) return ", ".join(l) class InputFeatures(object): def __init__(self, example_id, choices_features, label ): self.example_id = example_id self.choices_features = [ { 'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids } for _, input_ids, input_mask, segment_ids in choices_features ] self.label = label def read_swag_examples(input_file, is_training): with open(input_file, 'r', encoding='utf-8') as f: reader = csv.reader(f) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) if is_training and lines[0][-1] != 'label': raise ValueError( "For training, the input file must contain a label column." ) examples = [ SwagExample( swag_id = line[2], context_sentence = line[4], start_ending = line[5], # in the swag dataset, the # common beginning of each # choice is stored in "sent2". ending_0 = line[7], ending_1 = line[8], ending_2 = line[9], ending_3 = line[10], label = int(line[11]) if is_training else None ) for line in lines[1:] # we skip the line with the column names ] return examples def convert_examples_to_features(examples, tokenizer, max_seq_length, is_training): """Loads a data file into a list of `InputBatch`s.""" # Swag is a multiple choice task. To perform this task using Bert, # we will use the formatting proposed in "Improving Language # Understanding by Generative Pre-Training" and suggested by # @jacobdevlin-google in this issue # https://github.com/google-research/bert/issues/38. # # Each choice will correspond to a sample on which we run the # inference. For a given Swag example, we will create the 4 # following inputs: # - [CLS] context [SEP] choice_1 [SEP] # - [CLS] context [SEP] choice_2 [SEP] # - [CLS] context [SEP] choice_3 [SEP] # - [CLS] context [SEP] choice_4 [SEP] # The model will output a single value for each input. To get the # final decision of the model, we will run a softmax over these 4 # outputs. features = [] for example_index, example in enumerate(examples): context_tokens = tokenizer.tokenize(example.context_sentence) start_ending_tokens = tokenizer.tokenize(example.start_ending) choices_features = [] for ending_index, ending in enumerate(example.endings): # We create a copy of the context tokens in order to be # able to shrink it according to ending_tokens context_tokens_choice = context_tokens[:] ending_tokens = start_ending_tokens + tokenizer.tokenize(ending) # Modifies `context_tokens_choice` and `ending_tokens` in # place so that the total length is less than the # specified length. Account for [CLS], [SEP], [SEP] with # "- 3" _truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3) tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"] segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length choices_features.append((tokens, input_ids, input_mask, segment_ids)) label = example.label if example_index < 5: logger.info("*** Example ***") logger.info("swag_id: {}".format(example.swag_id)) for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features): logger.info("choice: {}".format(choice_idx)) logger.info("tokens: {}".format(' '.join(tokens))) logger.info("input_ids: {}".format(' '.join(map(str, input_ids)))) logger.info("input_mask: {}".format(' '.join(map(str, input_mask)))) logger.info("segment_ids: {}".format(' '.join(map(str, segment_ids)))) if is_training: logger.info("label: {}".format(label)) features.append( InputFeatures( example_id = example.swag_id, choices_features = choices_features, label = label ) ) return features def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def select_field(features, field): return [ [ choice[field] for choice in feature.choices_features ] for feature in features ] def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .csv files (or other data files) for the task.") parser.add_argument("--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, " "bert-base-multilingual-cased, bert-base-chinese.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints will be written.") ## Other parameters parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument('--loss_scale', type=float, default=0, help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") args = parser.parse_args() if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") if os.path.exists(args.output_dir) and os.listdir(args.output_dir): raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) # Prepare model model = BertForMultipleChoice.from_pretrained(args.bert_model, cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank)), num_choices=4) if args.fp16: model.half() model.to(device) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) if args.do_train: # Prepare data loader train_examples = read_swag_examples(os.path.join(args.data_dir, 'train.csv'), is_training = True) train_features = convert_examples_to_features( train_examples, tokenizer, args.max_seq_length, True) all_input_ids = torch.tensor(select_field(train_features, 'input_ids'), dtype=torch.long) all_input_mask = torch.tensor(select_field(train_features, 'input_mask'), dtype=torch.long) all_segment_ids = torch.tensor(select_field(train_features, 'segment_ids'), dtype=torch.long) all_label = torch.tensor([f.label for f in train_features], dtype=torch.long) train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) num_train_optimization_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs if args.local_rank != -1: num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size() # Prepare optimizer param_optimizer = list(model.named_parameters()) # hack to remove pooler, which is not used # thus it produce None grad that break apex param_optimizer = [n for n in param_optimizer] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion, t_total=num_train_optimization_steps) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=num_train_optimization_steps) global_step = 0 logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_examples)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_optimization_steps) model.train() for _ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, label_ids = batch loss = model(input_ids, segment_ids, input_mask, label_ids) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.fp16 and args.loss_scale != 1.0: # rescale loss for fp16 training # see https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html loss = loss * args.loss_scale if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if args.fp16: optimizer.backward(loss) else: loss.backward() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: # modify learning rate with special warm up BERT uses # if args.fp16 is False, BertAdam is used that handles this automatically lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 if args.do_train: # Save a trained model, configuration and tokenizer model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) # Load a trained model and vocabulary that you have fine-tuned model = BertForMultipleChoice.from_pretrained(args.output_dir, num_choices=4) tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) else: model = BertForMultipleChoice.from_pretrained(args.bert_model, num_choices=4) model.to(device) if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0): eval_examples = read_swag_examples(os.path.join(args.data_dir, 'val.csv'), is_training = True) eval_features = convert_examples_to_features( eval_examples, tokenizer, args.max_seq_length, True) logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) all_input_ids = torch.tensor(select_field(eval_features, 'input_ids'), dtype=torch.long) all_input_mask = torch.tensor(select_field(eval_features, 'input_mask'), dtype=torch.long) all_segment_ids = torch.tensor(select_field(eval_features, 'segment_ids'), dtype=torch.long) all_label = torch.tensor([f.label for f in eval_features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids) logits = model(input_ids, segment_ids, input_mask) logits = logits.detach().cpu().numpy() label_ids = label_ids.to('cpu').numpy() tmp_eval_accuracy = accuracy(logits, label_ids) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'global_step': global_step, 'loss': tr_loss/global_step} output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": main() ================================================ FILE: examples/run_transfo_xl.py ================================================ # coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Transformer XL model evaluation script. Adapted from https://github.com/kimiyoung/transformer-xl. In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/eval.py This script with default values evaluates a pretrained Transformer-XL on WikiText 103 """ from __future__ import absolute_import, division, print_function, unicode_literals import argparse import logging import time import math import torch from pytorch_pretrained_bert import TransfoXLLMHeadModel, TransfoXLCorpus, TransfoXLTokenizer logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) def main(): parser = argparse.ArgumentParser(description='PyTorch Transformer Language Model') parser.add_argument('--model_name', type=str, default='transfo-xl-wt103', help='pretrained model name') parser.add_argument('--split', type=str, default='test', choices=['all', 'valid', 'test'], help='which split to evaluate') parser.add_argument('--batch_size', type=int, default=10, help='batch size') parser.add_argument('--tgt_len', type=int, default=128, help='number of tokens to predict') parser.add_argument('--ext_len', type=int, default=0, help='length of the extended context') parser.add_argument('--mem_len', type=int, default=1600, help='length of the retained previous heads') parser.add_argument('--clamp_len', type=int, default=1000, help='max positional embedding index') parser.add_argument('--no_cuda', action='store_true', help='Do not use CUDA even though CUA is available') parser.add_argument('--work_dir', type=str, required=True, help='path to the work_dir') parser.add_argument('--no_log', action='store_true', help='do not log the eval result') parser.add_argument('--same_length', action='store_true', help='set same length attention with masking') parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() assert args.ext_len >= 0, 'extended context length must be non-negative' if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") logger.info("device: {}".format(device)) # Load a pre-processed dataset # You can also build the corpus yourself using TransfoXLCorpus methods # The pre-processing involve computing word frequencies to prepare the Adaptive input and SoftMax # and tokenizing the dataset # The pre-processed corpus is a convertion (using the conversion script ) tokenizer = TransfoXLTokenizer.from_pretrained(args.model_name) corpus = TransfoXLCorpus.from_pretrained(args.model_name) ntokens = len(corpus.vocab) va_iter = corpus.get_iterator('valid', args.batch_size, args.tgt_len, device=device, ext_len=args.ext_len) te_iter = corpus.get_iterator('test', args.batch_size, args.tgt_len, device=device, ext_len=args.ext_len) # Load a pre-trained model model = TransfoXLLMHeadModel.from_pretrained(args.model_name) model = model.to(device) logger.info('Evaluating with bsz {} tgt_len {} ext_len {} mem_len {} clamp_len {}'.format( args.batch_size, args.tgt_len, args.ext_len, args.mem_len, args.clamp_len)) model.reset_length(args.tgt_len, args.ext_len, args.mem_len) if args.clamp_len > 0: model.clamp_len = args.clamp_len if args.same_length: model.same_length = True ############################################################################### # Evaluation code ############################################################################### def evaluate(eval_iter): # Turn on evaluation mode which disables dropout. model.eval() total_len, total_loss = 0, 0. start_time = time.time() with torch.no_grad(): mems = None for idx, (data, target, seq_len) in enumerate(eval_iter): ret = model(data, target, mems) loss, mems = ret loss = loss.mean() total_loss += seq_len * loss.item() total_len += seq_len total_time = time.time() - start_time logger.info('Time : {:.2f}s, {:.2f}ms/segment'.format( total_time, 1000 * total_time / (idx+1))) return total_loss / total_len # Run on test data. if args.split == 'all': test_loss = evaluate(te_iter) valid_loss = evaluate(va_iter) elif args.split == 'valid': valid_loss = evaluate(va_iter) test_loss = None elif args.split == 'test': test_loss = evaluate(te_iter) valid_loss = None def format_log(loss, split): log_str = '| {0} loss {1:5.2f} | {0} ppl {2:9.3f} '.format( split, loss, math.exp(loss)) return log_str log_str = '' if valid_loss is not None: log_str += format_log(valid_loss, 'valid') if test_loss is not None: log_str += format_log(test_loss, 'test') logger.info('=' * 100) logger.info(log_str) logger.info('=' * 100) if __name__ == '__main__': main() ================================================ FILE: examples/sem_run_classifier.py ================================================ #coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import, division, print_function import argparse import csv import logging import os import random import sys sys.path.append('..') import copy import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from torch.nn import CrossEntropyLoss, MSELoss from scipy.stats import pearsonr, spearmanr from sklearn.metrics import matthews_corrcoef, f1_score, classification_report from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule logger = logging.getLogger(__name__) class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None, entity_pos=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label self.entity_pos = entity_pos class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id, entity_mask=None, entity_seg_pos=None, entity_span1_pos=None, entity_span2_pos=None): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.entity_mask = entity_mask self.entity_seg_pos = entity_seg_pos self.entity_span1_pos = entity_span1_pos self.entity_span2_pos = entity_span2_pos class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with open(input_file, "r", encoding="utf-8") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) return lines class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv"))) return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[3] text_b = line[4] label = line[0] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class SemProcessor(DataProcessor): """Processor for the SemEval 2010 Task 8 dataset.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.jsonl"))) return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.jsonl")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.jsonl")), "dev") def get_labels(self): """See base class.""" return ['Message-Topic(e2,e1)', 'Instrument-Agency(e2,e1)', 'Entity-Origin(e2,e1)', 'Member-Collection(e1,e2)', 'Member-Collection(e2,e1)', 'Other', 'Component-Whole(e1,e2)', 'Product-Producer(e2,e1)', 'Component-Whole(e2,e1)', 'Entity-Destination(e2,e1)', 'Content-Container(e2,e1)', 'Entity-Destination(e1,e2)', 'Instrument-Agency(e1,e2)', 'Cause-Effect(e2,e1)', 'Entity-Origin(e1,e2)', 'Product-Producer(e1,e2)', 'Cause-Effect(e1,e2)', 'Message-Topic(e1,e2)', 'Content-Container(e1,e2)'] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" import json examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) line = json.loads(line[0]) text_a = ' '.join(line['tokens']) label = line['label'] entity_pos = line['entities'] examples.append( InputExample(guid=guid, text_a=text_a, label=label, entity_pos = entity_pos)) return examples class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[8] text_b = line[9] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MnliMismatchedProcessor(MnliProcessor): """Processor for the MultiNLI Mismatched data set (GLUE version).""" def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_matched") class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = line[3] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class Sst2Processor(DataProcessor): """Processor for the SST-2 data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[0] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class StsbProcessor(DataProcessor): """Processor for the STS-B data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return [None] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[7] text_b = line[8] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QqpProcessor(DataProcessor): """Processor for the QQP data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) try: text_a = line[3] text_b = line[4] label = line[5] except IndexError: continue examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QnliProcessor(DataProcessor): """Processor for the QNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev_matched") def get_labels(self): """See base class.""" return ["entailment", "not_entailment"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class RteProcessor(DataProcessor): """Processor for the RTE data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["entailment", "not_entailment"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class WnliProcessor(DataProcessor): """Processor for the WNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode): """Loads a data file into a list of `InputBatch`s.""" label_map = {label : i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) old_entity_pos = copy.deepcopy(example.entity_pos) tokens_a, new_entity_pos = tokenizer.tokenize(example.text_a,example.entity_pos) old_entity0 = ''.join(example.text_a.split()[old_entity_pos[0][0]:old_entity_pos[0][1]]) old_entity1 = ''.join(example.text_a.split()[old_entity_pos[1][0]:old_entity_pos[1][1]]) new_entity0 = ''.join(tokens_a[new_entity_pos[0][0]:new_entity_pos[0][1]]) new_entity1 = ''.join(tokens_a[new_entity_pos[1][0]:new_entity_pos[1][1]]) old_entity0 = old_entity0.lower() old_entity1 = old_entity1.lower() if '##' in new_entity0 or '##' in new_entity1: new_entity0 = new_entity0.replace('#','') new_entity1 = new_entity1.replace('#','') try: assert(old_entity0 == new_entity0) assert(old_entity1 == new_entity1) except: import pdb;pdb.set_trace() # Entity marker tokens_a_ = copy.deepcopy(tokens_a) new_entity_pos_ = copy.deepcopy(new_entity_pos) entity1_start, entity1_end = new_entity_pos[0][0], new_entity_pos[0][1] entity2_start, entity2_end = new_entity_pos[1][0], new_entity_pos[1][1] tokens_a.insert(entity1_start, '') new_entity_pos[0][0] = entity1_start tokens_a.insert(entity1_end+1, '') new_entity_pos[0][1] = entity1_end+1+1 tokens_a.insert(entity2_start+2, '') new_entity_pos[1][0] = entity2_start+2 tokens_a.insert(entity2_end+3,'') new_entity_pos[1][1] = entity2_end+3+1 if new_entity_pos[1][1] > max_seq_length - 2 - 1: import pdb;pdb.set_trace() tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = ["[CLS]"] + tokens_a + ["[SEP]"] segment_ids = [0] * len(tokens) if tokens_b: tokens += tokens_b + ["[SEP]"] segment_ids += [1] * (len(tokens_b) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding # Used for mention pooling entity_mask_tag = 1 entity_mask = [0] * len(input_ids) for entity in new_entity_pos: start, end = entity[0],entity[1] for i in range(start, end): # [CLS], need to +1 offset entity_mask[i+1] = entity_mask_tag """ Different position embedding """ # Strategy 1 entity1_pos_tag = 1 entity2_pos_tag = 2 entity_seg_pos = [0] * len(input_ids) entity1_start, entity1_end = new_entity_pos[0][0], new_entity_pos[0][1] for i in range(entity1_start, entity1_end): entity_seg_pos[i+1] = entity1_pos_tag entity2_start, entity2_end = new_entity_pos[1][0], new_entity_pos[1][1] for i in range(entity2_start, entity2_end): entity_seg_pos[i+1] = entity2_pos_tag # Strategy 2 entity_start_pos_tag = 1 entity_seg_pos_ = [0] * len(input_ids) entity1_start, entity1_end = new_entity_pos[0][0], new_entity_pos[0][1] entity_seg_pos_[entity1_start+1] = entity_start_pos_tag entity2_start, entity2_end = new_entity_pos[1][0], new_entity_pos[1][1] entity_seg_pos_[entity2_start+1] = entity_start_pos_tag # Strategy 3 entity_span1_pos = [0] * len(input_ids) entity1_start, entity1_end = new_entity_pos[0][0], new_entity_pos[0][1] for i in range(len(entity_span1_pos)): if i < entity1_start: #entity_span1_pos[i] = np.abs(i - entity1_start) entity_span1_pos[i] = i - entity1_start elif entity1_start <= i and i < entity1_end: entity_span1_pos[i] = 0 elif i >= entity1_end: entity_span1_pos[i] = i - entity1_end + 1 entity_span2_pos = [0] * len(input_ids) entity2_start, entity2_end = new_entity_pos[1][0], new_entity_pos[1][1] for i in range(len(entity_span2_pos)): if i < entity2_start: #entity_span2_pos[i] = np.abs(i - entity2_start) entity_span2_pos[i] = i - entity2_start elif entity2_start <= i and i < entity2_end: entity_span2_pos[i] = 0 elif i >= entity2_end: entity_span2_pos[i] = i - entity2_end + 1 # Avoid to get negative position to fuck the nn.Embedding #entity_span1_pos = [pos+max_seq_length-1 for pos in entity_span1_pos] #entity_span2_pos = [pos+max_seq_length-1 for pos in entity_span2_pos] assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(entity_mask) == max_seq_length assert len(entity_seg_pos) == max_seq_length assert len(entity_seg_pos_) == max_seq_length assert len(entity_span1_pos) == max_seq_length assert len(entity_span2_pos) == max_seq_length if output_mode == "classification": label_id = label_map[example.label] elif output_mode == "regression": label_id = float(example.label) else: raise KeyError(output_mode) if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info("entity_mask: %s" % " ".join([str(x) for x in entity_mask])) logger.info("entity_seg_pos: %s" % " ".join([str(x) for x in entity_seg_pos])) logger.info("entity_seg_pos_: %s" % " ".join([str(x) for x in entity_seg_pos_])) logger.info("entity_span1_pos: %s" % " ".join([str(x) for x in entity_span1_pos])) logger.info("entity_span2_pos: %s" % " ".join([str(x) for x in entity_span2_pos])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logger.info("label: %s (id = %d)" % (example.label, label_id)) #if example.guid == 'train-3': # import pdb;pdb.set_trace() features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, entity_mask=entity_mask, entity_seg_pos=entity_seg_pos_, entity_span1_pos=entity_span1_pos, entity_span2_pos=entity_span2_pos)) return features def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def simple_accuracy(preds, labels): return (preds == labels).mean() def acc_and_f1(preds, labels): acc = simple_accuracy(preds, labels) f1 = f1_score(y_true=labels, y_pred=preds,average='micro') report = classification_report(labels, preds) return { "acc": acc, "f1": f1, "acc_and_f1": (acc + f1) / 2, "report": report } def pearson_and_spearman(preds, labels): pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def compute_metrics(task_name, preds, labels): assert len(preds) == len(labels) if task_name == "cola": return {"mcc": matthews_corrcoef(labels, preds)} elif task_name == "sst-2": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mrpc": return acc_and_f1(preds, labels) elif task_name == "sem": return acc_and_f1(preds, labels) elif task_name == "sts-b": return pearson_and_spearman(preds, labels) elif task_name == "qqp": return acc_and_f1(preds, labels) elif task_name == "mnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mnli-mm": return {"acc": simple_accuracy(preds, labels)} elif task_name == "qnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "rte": return {"acc": simple_accuracy(preds, labels)} elif task_name == "wnli": return {"acc": simple_accuracy(preds, labels)} else: raise KeyError(task_name) def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, " "bert-base-multilingual-cased, bert-base-chinese.") parser.add_argument("--task_name", default=None, type=str, required=True, help="The name of the task to train.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") ## Other parameters parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument('--loss_scale', type=float, default=0, help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mnli-mm": MnliMismatchedProcessor, "mrpc": MrpcProcessor, "sem": SemProcessor, "sst-2": Sst2Processor, "sts-b": StsbProcessor, "qqp": QqpProcessor, "qnli": QnliProcessor, "rte": RteProcessor, "wnli": WnliProcessor, } output_modes = { "cola": "classification", "mnli": "classification", "mrpc": "classification", "sem": "classification", "sst-2": "classification", "sts-b": "regression", "qqp": "classification", "qnli": "classification", "rte": "classification", "wnli": "classification", } if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) task_name = args.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() output_mode = output_modes[task_name] label_list = processor.get_labels() num_labels = len(label_list) tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) train_examples = None num_train_optimization_steps = None if args.do_train: train_examples = processor.get_train_examples(args.data_dir) num_train_optimization_steps = int( len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs if args.local_rank != -1: num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size() # Prepare model cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank)) model = BertForSequenceClassification.from_pretrained(args.bert_model, cache_dir=cache_dir, num_labels=num_labels) if args.fp16: model.half() model.to(device) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare optimizer if args.do_train: param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion, t_total=num_train_optimization_steps) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=num_train_optimization_steps) global_step = 0 nb_tr_steps = 0 tr_loss = 0 if args.do_train: train_features = convert_examples_to_features( train_examples, label_list, args.max_seq_length, tokenizer, output_mode) logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_examples)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_optimization_steps) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) # FloatTensor(forward) all_entity_mask = torch.tensor([f.entity_mask for f in train_features], dtype=torch.float) all_entity_seg_pos = torch.tensor([f.entity_seg_pos for f in train_features], dtype=torch.long) all_entity_span1_pos = torch.tensor([f.entity_span1_pos for f in train_features], dtype=torch.float) all_entity_span2_pos = torch.tensor([f.entity_span2_pos for f in train_features], dtype=torch.float) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) if output_mode == "classification": all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) elif output_mode == "regression": all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float) train_data = TensorDataset(all_input_ids, all_input_mask, all_entity_mask, all_entity_seg_pos, all_entity_span1_pos, all_entity_span2_pos, all_segment_ids, all_label_ids) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) model.train() for _ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, entity_mask, entity_seg_pos, entity_span1_pos, entity_span2_pos, segment_ids, label_ids = batch # define a new function to compute loss values for both output_modes logits = model(input_ids, segment_ids, input_mask, entity_mask, entity_seg_pos, entity_span1_pos, entity_span2_pos, labels=None) if output_mode == "classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1)) elif output_mode == "regression": loss_fct = MSELoss() loss = loss_fct(logits.view(-1), label_ids.view(-1)) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward() tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: # modify learning rate with special warm up BERT uses # if args.fp16 is False, BertAdam is used that handles this automatically lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Save a trained model, configuration and tokenizer model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) # Load a trained model and vocabulary that you have fine-tuned model = BertForSequenceClassification.from_pretrained(args.output_dir, num_labels=num_labels) tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) else: model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels) model.to(device) if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0): eval_examples = processor.get_dev_examples(args.data_dir) eval_features = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer, output_mode) logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_entity_mask = torch.tensor([f.entity_mask for f in eval_features], dtype=torch.float) all_entity_seg_pos = torch.tensor([f.entity_seg_pos for f in eval_features], dtype=torch.long) all_entity_span1_pos = torch.tensor([f.entity_span1_pos for f in eval_features], dtype=torch.float) all_entity_span2_pos = torch.tensor([f.entity_span2_pos for f in eval_features], dtype=torch.float) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) if output_mode == "classification": all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) elif output_mode == "regression": all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.float) eval_data = TensorDataset(all_input_ids, all_input_mask, all_entity_mask, all_entity_seg_pos, all_entity_span1_pos, all_entity_span2_pos, all_segment_ids, all_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss = 0 nb_eval_steps = 0 preds = [] for input_ids, input_mask, entity_mask, entity_seg_pos, entity_span1_pos, entity_span2_pos, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"): input_ids = input_ids.to(device) input_mask = input_mask.to(device) entity_mask = entity_mask.to(device) entity_seg_pos = entity_seg_pos.to(device) entity_span1_pos = entity_span1_pos.to(device) entity_span2_pos = entity_span2_pos.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): logits = model(input_ids, segment_ids, input_mask, entity_mask, entity_seg_pos, entity_span1_pos, entity_span2_pos, labels=None) #logits = model(input_ids, segment_ids, input_mask, labels=None) # create eval loss and other metric required by the task if output_mode == "classification": loss_fct = CrossEntropyLoss() tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1)) elif output_mode == "regression": loss_fct = MSELoss() tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1)) eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if len(preds) == 0: preds.append(logits.detach().cpu().numpy()) else: preds[0] = np.append( preds[0], logits.detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps preds = preds[0] if output_mode == "classification": preds = np.argmax(preds, axis=1) elif output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(task_name, preds, all_label_ids.numpy()) loss = tr_loss/global_step if args.do_train else None result['eval_loss'] = eval_loss result['global_step'] = global_step result['loss'] = loss output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) # hack for MNLI-MM if task_name == "mnli": task_name = "mnli-mm" processor = processors[task_name]() if os.path.exists(args.output_dir + '-MM') and os.listdir(args.output_dir + '-MM') and args.do_train: raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) if not os.path.exists(args.output_dir + '-MM'): os.makedirs(args.output_dir + '-MM') eval_examples = processor.get_dev_examples(args.data_dir) eval_features = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer, output_mode) logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss = 0 nb_eval_steps = 0 preds = [] for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): logits = model(input_ids, segment_ids, input_mask, labels=None) loss_fct = CrossEntropyLoss() tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1)) eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if len(preds) == 0: preds.append(logits.detach().cpu().numpy()) else: preds[0] = np.append( preds[0], logits.detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps preds = preds[0] preds = np.argmax(preds, axis=1) result = compute_metrics(task_name, preds, all_label_ids.numpy()) loss = tr_loss/global_step if args.do_train else None result['eval_loss'] = eval_loss result['global_step'] = global_step result['loss'] = loss output_eval_file = os.path.join(args.output_dir + '-MM', "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": main() ================================================ FILE: examples/tacred_run_classifier.py ================================================ #coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import, division, print_function import argparse import csv import logging import os import random import sys sys.path.append('..') import copy import json import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from torch.nn import CrossEntropyLoss, MSELoss from scipy.stats import pearsonr, spearmanr from sklearn.metrics import matthews_corrcoef, f1_score, classification_report,precision_recall_fscore_support from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule logger = logging.getLogger(__name__) class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None, entity_pos=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label self.entity_pos = entity_pos class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id, entity_mask=None, entity_seg_pos=None, entity_span1_pos=None, entity_span2_pos=None): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.entity_mask = entity_mask self.entity_seg_pos = entity_seg_pos self.entity_span1_pos = entity_span1_pos self.entity_span2_pos = entity_span2_pos class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with open(input_file, "r", encoding="utf-8") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) return lines class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv"))) return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[3] text_b = line[4] label = line[0] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class SemProcessor(DataProcessor): """Processor for the SemEval 2010 Task 8 dataset.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.jsonl"))) return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.jsonl")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.jsonl")), "dev") def get_labels(self): """See base class.""" return ['Message-Topic(e2,e1)', 'Instrument-Agency(e2,e1)', 'Entity-Origin(e2,e1)', 'Member-Collection(e1,e2)', 'Member-Collection(e2,e1)', 'Other', 'Component-Whole(e1,e2)', 'Product-Producer(e2,e1)', 'Component-Whole(e2,e1)', 'Entity-Destination(e2,e1)', 'Content-Container(e2,e1)', 'Entity-Destination(e1,e2)', 'Instrument-Agency(e1,e2)', 'Cause-Effect(e2,e1)', 'Entity-Origin(e1,e2)', 'Product-Producer(e1,e2)', 'Cause-Effect(e1,e2)', 'Message-Topic(e1,e2)', 'Content-Container(e1,e2)'] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) line = json.loads(line[0]) text_a = ' '.join(line['tokens']) label = line['label'] entity_pos = line['entities'] examples.append( InputExample(guid=guid, text_a=text_a, label=label, entity_pos = entity_pos)) return examples class TacredProcessor(DataProcessor): """Processor for the TACRED dataset.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.jsonl"))) return self._create_examples( self._read_tsv(os.path.join(data_dir, "train_dev.jsonl")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.jsonl")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.jsonl")), "test") def get_labels(self): """See base class.""" return ['per:parents', 'per:country_of_birth', 'org:political/religious_affiliation', 'org:parents', 'org:members', 'per:schools_attended', 'org:shareholders', 'per:stateorprovince_of_death', 'per:age', 'per:city_of_death', 'per:siblings', 'per:date_of_birth', 'org:founded', 'per:stateorprovince_of_birth', 'per:origin', 'per:charges', 'per:children', 'per:title', 'per:countries_of_residence', 'org:top_members/employees', 'per:religion', 'per:country_of_death', 'per:employee_of', 'no_relation', 'per:stateorprovinces_of_residence', 'org:city_of_headquarters', 'org:dissolved', 'per:date_of_death', 'per:other_family', 'per:alternate_names', 'org:number_of_employees/members', 'per:spouse', 'per:cause_of_death', 'org:alternate_names', 'org:founded_by', 'org:stateorprovince_of_headquarters', 'per:city_of_birth', 'org:subsidiaries', 'org:website', 'org:member_of', 'per:cities_of_residence', 'org:country_of_headquarters'] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) line = json.loads(line[0]) text_a = ' '.join(line['tokens']) label = line['label'] entity_pos = line['entities'] # 假设entity之间不重叠 entity_pos = sorted(entity_pos) examples.append( InputExample(guid=guid, text_a=text_a, label=label, entity_pos = entity_pos)) return examples class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[8] text_b = line[9] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MnliMismatchedProcessor(MnliProcessor): """Processor for the MultiNLI Mismatched data set (GLUE version).""" def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_matched") class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = line[3] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class Sst2Processor(DataProcessor): """Processor for the SST-2 data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[0] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class StsbProcessor(DataProcessor): """Processor for the STS-B data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return [None] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[7] text_b = line[8] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QqpProcessor(DataProcessor): """Processor for the QQP data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) try: text_a = line[3] text_b = line[4] label = line[5] except IndexError: continue examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QnliProcessor(DataProcessor): """Processor for the QNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev_matched") def get_labels(self): """See base class.""" return ["entailment", "not_entailment"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class RteProcessor(DataProcessor): """Processor for the RTE data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["entailment", "not_entailment"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class WnliProcessor(DataProcessor): """Processor for the WNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode): """Loads a data file into a list of `InputBatch`s.""" label_map = {label : i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) old_entity_pos = copy.deepcopy(example.entity_pos) tokens_a, new_entity_pos = tokenizer.tokenize(example.text_a,example.entity_pos) old_entity0 = ''.join(example.text_a.split()[old_entity_pos[0][0]:old_entity_pos[0][1]]) old_entity1 = ''.join(example.text_a.split()[old_entity_pos[1][0]:old_entity_pos[1][1]]) new_entity0 = ''.join(tokens_a[new_entity_pos[0][0]:new_entity_pos[0][1]]) new_entity1 = ''.join(tokens_a[new_entity_pos[1][0]:new_entity_pos[1][1]]) old_entity0 = old_entity0.lower() old_entity1 = old_entity1.lower() if '##' in new_entity0 or '##' in new_entity1: new_entity0 = new_entity0.replace('#','') new_entity1 = new_entity1.replace('#','') try: assert(old_entity0 == new_entity0) assert(old_entity1 == new_entity1) except: continue #import pdb;pdb.set_trace() # Entity marker tokens_a_ = copy.deepcopy(tokens_a) new_entity_pos_ = copy.deepcopy(new_entity_pos) entity1_start, entity1_end = new_entity_pos[0][0], new_entity_pos[0][1] entity2_start, entity2_end = new_entity_pos[1][0], new_entity_pos[1][1] tokens_a.insert(entity1_start, '') new_entity_pos[0][0] = entity1_start tokens_a.insert(entity1_end+1, '') new_entity_pos[0][1] = entity1_end+1+1 tokens_a.insert(entity2_start+2, '') new_entity_pos[1][0] = entity2_start+2 tokens_a.insert(entity2_end+3,'') new_entity_pos[1][1] = entity2_end+3+1 if new_entity_pos[1][1] > max_seq_length - 2 - 1: continue #import pdb;pdb.set_trace() tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = ["[CLS]"] + tokens_a + ["[SEP]"] segment_ids = [0] * len(tokens) if tokens_b: tokens += tokens_b + ["[SEP]"] segment_ids += [1] * (len(tokens_b) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding # Used for mention pooling entity_mask_tag = 1 entity_mask = [0] * len(input_ids) for entity in new_entity_pos: start, end = entity[0],entity[1] for i in range(start, end): # [CLS], need to +1 offset entity_mask[i+1] = entity_mask_tag """ Different position embedding """ # Strategy 1 entity1_pos_tag = 1 entity2_pos_tag = 2 entity_seg_pos = [0] * len(input_ids) entity1_start, entity1_end = new_entity_pos[0][0], new_entity_pos[0][1] for i in range(entity1_start, entity1_end): entity_seg_pos[i+1] = entity1_pos_tag entity2_start, entity2_end = new_entity_pos[1][0], new_entity_pos[1][1] for i in range(entity2_start, entity2_end): entity_seg_pos[i+1] = entity2_pos_tag # Strategy 2 entity_start_pos_tag = 1 entity_seg_pos_ = [0] * len(input_ids) entity1_start, entity1_end = new_entity_pos[0][0], new_entity_pos[0][1] entity_seg_pos_[entity1_start+1] = entity_start_pos_tag entity2_start, entity2_end = new_entity_pos[1][0], new_entity_pos[1][1] entity_seg_pos_[entity2_start+1] = entity_start_pos_tag # Strategy 3 entity_span1_pos = [0] * len(input_ids) entity1_start, entity1_end = new_entity_pos[0][0], new_entity_pos[0][1] for i in range(len(entity_span1_pos)): if i < entity1_start: #entity_span1_pos[i] = np.abs(i - entity1_start) entity_span1_pos[i] = i - entity1_start elif entity1_start <= i and i < entity1_end: entity_span1_pos[i] = 0 elif i >= entity1_end: entity_span1_pos[i] = i - entity1_end + 1 entity_span2_pos = [0] * len(input_ids) entity2_start, entity2_end = new_entity_pos[1][0], new_entity_pos[1][1] for i in range(len(entity_span2_pos)): if i < entity2_start: #entity_span2_pos[i] = np.abs(i - entity2_start) entity_span2_pos[i] = i - entity2_start elif entity2_start <= i and i < entity2_end: entity_span2_pos[i] = 0 elif i >= entity2_end: entity_span2_pos[i] = i - entity2_end + 1 # Avoid to get negative position to fuck the nn.Embedding #entity_span1_pos = [pos+max_seq_length-1 for pos in entity_span1_pos] #entity_span2_pos = [pos+max_seq_length-1 for pos in entity_span2_pos] assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(entity_mask) == max_seq_length assert len(entity_seg_pos) == max_seq_length assert len(entity_seg_pos_) == max_seq_length assert len(entity_span1_pos) == max_seq_length assert len(entity_span2_pos) == max_seq_length if output_mode == "classification": label_id = label_map[example.label] elif output_mode == "regression": label_id = float(example.label) else: raise KeyError(output_mode) if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info("entity_mask: %s" % " ".join([str(x) for x in entity_mask])) logger.info("entity_seg_pos: %s" % " ".join([str(x) for x in entity_seg_pos])) logger.info("entity_seg_pos_: %s" % " ".join([str(x) for x in entity_seg_pos_])) logger.info("entity_span1_pos: %s" % " ".join([str(x) for x in entity_span1_pos])) logger.info("entity_span2_pos: %s" % " ".join([str(x) for x in entity_span2_pos])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logger.info("label: %s (id = %d)" % (example.label, label_id)) #if example.guid == 'train-3': # import pdb;pdb.set_trace() features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, entity_mask=entity_mask, entity_seg_pos=entity_seg_pos_, entity_span1_pos=entity_span1_pos, entity_span2_pos=entity_span2_pos)) return features def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def simple_accuracy(preds, labels): return (preds == labels).mean() def acc_and_f1(preds, labels): # 删除no_relation的样本 class_num = 42 no_relation_label = 23 labels_ = [i for i in range(class_num)] labels_.remove(no_relation_label) report = classification_report(labels, preds, labels=labels_) return { "report": report } def pearson_and_spearman(preds, labels): pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def compute_metrics(task_name, preds, labels): assert len(preds) == len(labels) if task_name == "cola": return {"mcc": matthews_corrcoef(labels, preds)} elif task_name == "sst-2": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mrpc": return acc_and_f1(preds, labels) elif task_name == "sem": return acc_and_f1(preds, labels) elif task_name == "tacred": return acc_and_f1(preds, labels) elif task_name == "sts-b": return pearson_and_spearman(preds, labels) elif task_name == "qqp": return acc_and_f1(preds, labels) elif task_name == "mnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mnli-mm": return {"acc": simple_accuracy(preds, labels)} elif task_name == "qnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "rte": return {"acc": simple_accuracy(preds, labels)} elif task_name == "wnli": return {"acc": simple_accuracy(preds, labels)} else: raise KeyError(task_name) def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, " "bert-base-multilingual-cased, bert-base-chinese.") parser.add_argument("--task_name", default=None, type=str, required=True, help="The name of the task to train.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") ## Other parameters parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--do_test", action='store_true', help="Whether to run eval on the test set.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument('--loss_scale', type=float, default=0, help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mnli-mm": MnliMismatchedProcessor, "mrpc": MrpcProcessor, "sem": SemProcessor, "tacred": TacredProcessor, "sst-2": Sst2Processor, "sts-b": StsbProcessor, "qqp": QqpProcessor, "qnli": QnliProcessor, "rte": RteProcessor, "wnli": WnliProcessor, } output_modes = { "cola": "classification", "mnli": "classification", "mrpc": "classification", "sem": "classification", "tacred": "classification", "sst-2": "classification", "sts-b": "regression", "qqp": "classification", "qnli": "classification", "rte": "classification", "wnli": "classification", } if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval and not args.do_test: raise ValueError("At least one of `do_train` or `do_eval` must be True.") if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) task_name = args.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() output_mode = output_modes[task_name] label_list = processor.get_labels() num_labels = len(label_list) tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) train_examples = None num_train_optimization_steps = None if args.do_train: train_examples = processor.get_train_examples(args.data_dir) num_train_optimization_steps = int( len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs if args.local_rank != -1: num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size() # Prepare model cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank)) model = BertForSequenceClassification.from_pretrained(args.bert_model, cache_dir=cache_dir, num_labels=num_labels) if args.fp16: model.half() model.to(device) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare optimizer if args.do_train: param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion, t_total=num_train_optimization_steps) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=num_train_optimization_steps) global_step = 0 nb_tr_steps = 0 tr_loss = 0 if args.do_train: train_features = convert_examples_to_features( train_examples, label_list, args.max_seq_length, tokenizer, output_mode) logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_examples)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_optimization_steps) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) # FloatTensor(forward) all_entity_mask = torch.tensor([f.entity_mask for f in train_features], dtype=torch.float) all_entity_seg_pos = torch.tensor([f.entity_seg_pos for f in train_features], dtype=torch.long) all_entity_span1_pos = torch.tensor([f.entity_span1_pos for f in train_features], dtype=torch.float) all_entity_span2_pos = torch.tensor([f.entity_span2_pos for f in train_features], dtype=torch.float) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) if output_mode == "classification": all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) elif output_mode == "regression": all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float) train_data = TensorDataset(all_input_ids, all_input_mask, all_entity_mask, all_entity_seg_pos, all_entity_span1_pos, all_entity_span2_pos, all_segment_ids, all_label_ids) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) model.train() for epoch_num in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, entity_mask, entity_seg_pos, entity_span1_pos, entity_span2_pos, segment_ids, label_ids = batch # define a new function to compute loss values for both output_modes logits = model(input_ids, segment_ids, input_mask, entity_mask, entity_seg_pos, entity_span1_pos, entity_span2_pos, labels=None) if output_mode == "classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1)) elif output_mode == "regression": loss_fct = MSELoss() loss = loss_fct(logits.view(-1), label_ids.view(-1)) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward() tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: # modify learning rate with special warm up BERT uses # if args.fp16 is False, BertAdam is used that handles this automatically lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 # Save a trained model, configuration and tokenizer model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self # If we save using the predefined names, we can load using `from_pretrained` output_dir_ = args.output_dir+str(epoch_num) if not os.path.exists(output_dir_): os.makedirs(output_dir_) output_model_file = os.path.join(output_dir_, WEIGHTS_NAME) output_config_file = os.path.join(output_dir_, CONFIG_NAME) torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(output_dir_) # Save latest model to load output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) if args.do_test and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Load a trained model and vocabulary that you have fine-tuned model = BertForSequenceClassification.from_pretrained(args.output_dir, num_labels=num_labels) tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(device) eval_examples = processor.get_test_examples(args.data_dir) eval_features = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer, output_mode) logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_entity_mask = torch.tensor([f.entity_mask for f in eval_features], dtype=torch.float) all_entity_seg_pos = torch.tensor([f.entity_seg_pos for f in eval_features], dtype=torch.long) all_entity_span1_pos = torch.tensor([f.entity_span1_pos for f in eval_features], dtype=torch.float) all_entity_span2_pos = torch.tensor([f.entity_span2_pos for f in eval_features], dtype=torch.float) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) if output_mode == "classification": all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) elif output_mode == "regression": all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.float) eval_data = TensorDataset(all_input_ids, all_input_mask, all_entity_mask, all_entity_seg_pos, all_entity_span1_pos, all_entity_span2_pos, all_segment_ids, all_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss = 0 nb_eval_steps = 0 preds = [] for input_ids, input_mask, entity_mask, entity_seg_pos, entity_span1_pos, entity_span2_pos, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"): input_ids = input_ids.to(device) input_mask = input_mask.to(device) entity_mask = entity_mask.to(device) entity_seg_pos = entity_seg_pos.to(device) entity_span1_pos = entity_span1_pos.to(device) entity_span2_pos = entity_span2_pos.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): logits = model(input_ids, segment_ids, input_mask, entity_mask, entity_seg_pos, entity_span1_pos, entity_span2_pos, labels=None) #logits = model(input_ids, segment_ids, input_mask, labels=None) # create eval loss and other metric required by the task if output_mode == "classification": loss_fct = CrossEntropyLoss() tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1)) elif output_mode == "regression": loss_fct = MSELoss() tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1)) eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if len(preds) == 0: preds.append(logits.detach().cpu().numpy()) else: preds[0] = np.append( preds[0], logits.detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps preds = preds[0] if output_mode == "classification": preds = np.argmax(preds, axis=1) elif output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(task_name, preds, all_label_ids.numpy()) loss = tr_loss/global_step if args.do_train else None result['eval_loss'] = eval_loss result['global_step'] = global_step result['loss'] = loss output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": main() ================================================ FILE: examples/tacred_run_infer.py ================================================ from __future__ import absolute_import, division, print_function import argparse import csv import logging import os import random import sys sys.path.append('..') os.environ['CUDA_VISIBLE_DEVICES']='0' import copy import json import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from scipy.stats import pearsonr, spearmanr from sklearn.metrics import matthews_corrcoef, f1_score, classification_report,precision_recall_fscore_support from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig from pytorch_pretrained_bert.tokenization import BertTokenizer from pprint import pprint import textdistance import neuralcoref import en_core_web_sm from itertools import groupby, combinations from utils import get_candidate_input from flask import Flask,request,jsonify app = Flask(__name__) class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None, entity_pos=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label self.entity_pos = entity_pos class InputFeatures(object): """A single set of features of data.""" def __init__(self,input_ids, input_mask, segment_ids, label_id, entity_mask=None, entity_seg_pos=None, entity_span1_pos=None, entity_span2_pos=None): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.entity_mask = entity_mask self.entity_seg_pos = entity_seg_pos self.entity_span1_pos = entity_span1_pos self.entity_span2_pos = entity_span2_pos class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with open(input_file, "r", encoding="utf-8") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) return lines class TacredProcessor(DataProcessor): """Processor for the TACRED dataset.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.jsonl"))) return self._create_examples( self._read_tsv(os.path.join(data_dir, "train_dev.jsonl")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.jsonl")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "test.jsonl")), "test") def get_labels(self): """See base class.""" return ['per:parents', 'per:country_of_birth', 'org:political/religious_affiliation', 'org:parents', 'org:members', 'per:schools_attended', 'org:shareholders', 'per:stateorprovince_of_death', 'per:age', 'per:city_of_death', 'per:siblings', 'per:date_of_birth', 'org:founded', 'per:stateorprovince_of_birth', 'per:origin', 'per:charges', 'per:children', 'per:title', 'per:countries_of_residence', 'org:top_members/employees', 'per:religion', 'per:country_of_death', 'per:employee_of', 'no_relation', 'per:stateorprovinces_of_residence', 'org:city_of_headquarters', 'org:dissolved', 'per:date_of_death', 'per:other_family', 'per:alternate_names', 'org:number_of_employees/members', 'per:spouse', 'per:cause_of_death', 'org:alternate_names', 'org:founded_by', 'org:stateorprovince_of_headquarters', 'per:city_of_birth', 'org:subsidiaries', 'org:website', 'org:member_of', 'per:cities_of_residence', 'org:country_of_headquarters'] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) line = json.loads(line[0]) text_a = ' '.join(line['tokens']) label = line['label'] entity_pos = line['entities'] # 假设entity之间不重叠 entity_pos = sorted(entity_pos) examples.append( InputExample(guid=guid, text_a=text_a, label=label, entity_pos = entity_pos)) return examples class _TacredProcessor(DataProcessor): """Processor for the TACRED dataset.""" def get_test_examples(self, lines): """See base class.""" return self._create_examples(lines, "test") def get_labels(self): """See base class.""" return ['per:parents', 'per:country_of_birth', 'org:political/religious_affiliation', 'org:parents', 'org:members', 'per:schools_attended', 'org:shareholders', 'per:stateorprovince_of_death', 'per:age', 'per:city_of_death', 'per:siblings', 'per:date_of_birth', 'org:founded', 'per:stateorprovince_of_birth', 'per:origin', 'per:charges', 'per:children', 'per:title', 'per:countries_of_residence', 'org:top_members/employees', 'per:religion', 'per:country_of_death', 'per:employee_of', 'no_relation', 'per:stateorprovinces_of_residence', 'org:city_of_headquarters', 'org:dissolved', 'per:date_of_death', 'per:other_family', 'per:alternate_names', 'org:number_of_employees/members', 'per:spouse', 'per:cause_of_death', 'org:alternate_names', 'org:founded_by', 'org:stateorprovince_of_headquarters', 'per:city_of_birth', 'org:subsidiaries', 'org:website', 'org:member_of', 'per:cities_of_residence', 'org:country_of_headquarters'] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = ' '.join(line['tokens']) label = line['label'] entity_pos = line['entities'] # 假设entity之间不重叠 entity_pos = sorted(entity_pos) examples.append( InputExample(guid=guid, text_a=text_a, label=label, entity_pos = entity_pos)) return examples def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode): """Loads a data file into a list of `InputBatch`s.""" label_map = {label : i for i, label in enumerate(label_list)} reverse_label_map = {i : label for i, label in enumerate(label_list)} samples = [] features = [] for (ex_index, example) in enumerate(examples): old_entity_pos = copy.deepcopy(example.entity_pos) tokens_a, new_entity_pos = tokenizer.tokenize(example.text_a,example.entity_pos) old_entity0_ = ' '.join(example.text_a.split()[old_entity_pos[0][0]:old_entity_pos[0][1]]) old_entity1_ = ' '.join(example.text_a.split()[old_entity_pos[1][0]:old_entity_pos[1][1]]) old_entity0 = ''.join(example.text_a.split()[old_entity_pos[0][0]:old_entity_pos[0][1]]) old_entity1 = ''.join(example.text_a.split()[old_entity_pos[1][0]:old_entity_pos[1][1]]) new_entity0 = ''.join(tokens_a[new_entity_pos[0][0]:new_entity_pos[0][1]]) new_entity1 = ''.join(tokens_a[new_entity_pos[1][0]:new_entity_pos[1][1]]) old_entity0 = old_entity0.lower() old_entity1 = old_entity1.lower() if '##' in new_entity0 or '##' in new_entity1: new_entity0 = new_entity0.replace('#','') new_entity1 = new_entity1.replace('#','') try: assert(old_entity0 == new_entity0) assert(old_entity1 == new_entity1) except: continue # Entity marker tokens_a_ = copy.deepcopy(tokens_a) new_entity_pos_ = copy.deepcopy(new_entity_pos) entity1_start, entity1_end = new_entity_pos[0][0], new_entity_pos[0][1] entity2_start, entity2_end = new_entity_pos[1][0], new_entity_pos[1][1] tokens_a.insert(entity1_start, '') new_entity_pos[0][0] = entity1_start tokens_a.insert(entity1_end+1, '') new_entity_pos[0][1] = entity1_end+1+1 tokens_a.insert(entity2_start+2, '') new_entity_pos[1][0] = entity2_start+2 tokens_a.insert(entity2_end+3,'') new_entity_pos[1][1] = entity2_end+3+1 if new_entity_pos[1][1] > max_seq_length - 2 - 1: continue tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = ["[CLS]"] + tokens_a + ["[SEP]"] segment_ids = [0] * len(tokens) if tokens_b: tokens += tokens_b + ["[SEP]"] segment_ids += [1] * (len(tokens_b) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding # Used for mention pooling entity_mask_tag = 1 entity_mask = [0] * len(input_ids) for entity in new_entity_pos: start, end = entity[0],entity[1] for i in range(start, end): # [CLS], need to +1 offset entity_mask[i+1] = entity_mask_tag """ Different position embedding """ # Strategy 1 entity1_pos_tag = 1 entity2_pos_tag = 2 entity_seg_pos = [0] * len(input_ids) entity1_start, entity1_end = new_entity_pos[0][0], new_entity_pos[0][1] for i in range(entity1_start, entity1_end): entity_seg_pos[i+1] = entity1_pos_tag entity2_start, entity2_end = new_entity_pos[1][0], new_entity_pos[1][1] for i in range(entity2_start, entity2_end): entity_seg_pos[i+1] = entity2_pos_tag # Strategy 2 entity_start_pos_tag = 1 entity_seg_pos_ = [0] * len(input_ids) entity1_start, entity1_end = new_entity_pos[0][0], new_entity_pos[0][1] entity_seg_pos_[entity1_start+1] = entity_start_pos_tag entity2_start, entity2_end = new_entity_pos[1][0], new_entity_pos[1][1] entity_seg_pos_[entity2_start+1] = entity_start_pos_tag # Strategy 3 entity_span1_pos = [0] * len(input_ids) entity1_start, entity1_end = new_entity_pos[0][0], new_entity_pos[0][1] for i in range(len(entity_span1_pos)): if i < entity1_start: #entity_span1_pos[i] = np.abs(i - entity1_start) entity_span1_pos[i] = i - entity1_start elif entity1_start <= i and i < entity1_end: entity_span1_pos[i] = 0 elif i >= entity1_end: entity_span1_pos[i] = i - entity1_end + 1 entity_span2_pos = [0] * len(input_ids) entity2_start, entity2_end = new_entity_pos[1][0], new_entity_pos[1][1] for i in range(len(entity_span2_pos)): if i < entity2_start: #entity_span2_pos[i] = np.abs(i - entity2_start) entity_span2_pos[i] = i - entity2_start elif entity2_start <= i and i < entity2_end: entity_span2_pos[i] = 0 elif i >= entity2_end: entity_span2_pos[i] = i - entity2_end + 1 # Avoid to get negative position to fuck the nn.Embedding #entity_span1_pos = [pos+max_seq_length-1 for pos in entity_span1_pos] #entity_span2_pos = [pos+max_seq_length-1 for pos in entity_span2_pos] assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(entity_mask) == max_seq_length assert len(entity_seg_pos) == max_seq_length assert len(entity_seg_pos_) == max_seq_length assert len(entity_span1_pos) == max_seq_length assert len(entity_span2_pos) == max_seq_length if output_mode == "classification": label_id = label_map[example.label] else: raise KeyError(output_mode) print("tokens: %s" % " ".join([str(x) for x in tokens])) print("input_ids: %s" % " ".join([str(x) for x in input_ids])) print("input_mask: %s" % " ".join([str(x) for x in input_mask])) print("entity_mask: %s" % " ".join([str(x) for x in entity_mask])) print("entity_seg_pos: %s" % " ".join([str(x) for x in entity_seg_pos])) print("entity_seg_pos_: %s" % " ".join([str(x) for x in entity_seg_pos_])) print("entity_span1_pos: %s" % " ".join([str(x) for x in entity_span1_pos])) print("entity_span2_pos: %s" % " ".join([str(x) for x in entity_span2_pos])) print("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) print("label: %s (id = %d)" % (example.label, label_id)) samples.append([example.text_a, (old_entity0_,old_entity1_)]) features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, entity_mask=entity_mask, entity_seg_pos=entity_seg_pos_, entity_span1_pos=entity_span1_pos, entity_span2_pos=entity_span2_pos)) return features, samples, reverse_label_map def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def load_model(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default='/data/share/zhanghaipeng/tre/datasets/data/tacred/', type=str, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--bert_model", default='model/bert-base-uncased', type=str, help="Bert pre-trained model selected in the list: bert-base-uncased, ""bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, ""bert-base-multilingual-cased, bert-base-chinese.") parser.add_argument("--task_name", default='tacred', type=str, help="The name of the task to train.") parser.add_argument("--output_dir", default='train/23/tacred3.0', type=str, help="The output directory where the model predictions and checkpoints will be written.") ## Other parameters parser.add_argument("--cache_dir", default="cache/", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--test_batch_size", default=1, type=int, help="Total batch size for eval.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") args = parser.parse_args() if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 task_name = args.task_name.lower() processor = _TacredProcessor() output_mode = 'classification' label_list = processor.get_labels() num_labels = len(label_list) # Load a trained model and vocabulary that you have fine-tuned model = BertForSequenceClassification.from_pretrained(args.output_dir, num_labels=num_labels) tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) if n_gpu > 1: model = torch.nn.DataParallel(model) model.to(device) model.eval() return model, tokenizer, label_list, args, output_mode, processor, device def get_helper_model(spacy_used=False): from simplex_sdk import SimplexClient if spacy_used: model = en_core_web_sm.load() else: model = SimplexClient('BertNerApi-tmp') return model # 载入模型 model, tokenizer, label_list, args, output_mode, processor, device = load_model() ner_model = get_helper_model(spacy_used=False) @app.route('/nre') def predict(): data = request.args if 'text' not in data.keys(): warning_str = 'pleasen input right arg!\n' return jsonify(warning_str) else: line = data['text'] relation = [] candidate_input_list, entity = get_candidate_input(line, ner_model,spacy_used=False) # 实体未找到 if not candidate_input_list: return jsonify({'relations':relation,'entities':entity}) test_examples = processor.get_test_examples(candidate_input_list) test_features,samples,reverse_label_map = convert_examples_to_features( test_examples, label_list, args.max_seq_length, tokenizer, output_mode) test_num = len(candidate_input_list) for i in range(test_num): f,sample = test_features[i],samples[i] input_ids = torch.tensor(f.input_ids,dtype=torch.long).reshape(1, args.max_seq_length).to(device) segment_ids = torch.tensor(f.segment_ids,dtype=torch.long).reshape(1, args.max_seq_length).to(device) input_mask = torch.tensor(f.input_mask,dtype=torch.long).reshape(1, args.max_seq_length).to(device) entity_mask = torch.tensor(f.entity_mask,dtype=torch.float).reshape(1, args.max_seq_length).to(device) entity_seg_pos = torch.tensor(f.entity_seg_pos,dtype=torch.long).reshape(1, args.max_seq_length).to(device) entity_span1_pos = torch.tensor(f.entity_span1_pos,dtype=torch.float).reshape(1, args.max_seq_length).to(device) entity_span2_pos = torch.tensor(f.entity_span2_pos,dtype=torch.float).reshape(1, args.max_seq_length).to(device) with torch.no_grad(): logits = model(input_ids, segment_ids, input_mask, entity_mask, entity_seg_pos, entity_span1_pos, entity_span2_pos, labels=None) """ np.argmax v.s. torch.argmax """ pred_id = np.argmax(logits.detach().cpu().numpy()[0].tolist()) pred_label = reverse_label_map[pred_id] text,entity0, entity1 = sample[0], sample[1][0], sample[1][1] relation.append({'text':text,'entity pair':[entity0,entity1],'relation':pred_label}) return jsonify({'relations':relation,'entities':entity}) if __name__ == "__main__": app.run(host='0.0.0.0',port='5050',debug=True) ================================================ FILE: examples/test.sh ================================================ #export GLUE_DIR=/data/share/zhanghaipeng/pytorch-pretrained-BERT/examples/general_ner_test export GLUE_DIR=/data/share/zhanghaipeng/tre/datasets/data export TASK_NAME=tacred EXPR=23 BS=64 CUDA=0 EPOCH=3.0 CUDA_VISIBLE_DEVICES=$CUDA python tacred_run_classifier.py \ --task_name $TASK_NAME \ --do_test \ --do_lower_case \ --data_dir $GLUE_DIR/tacred/ \ --max_seq_length 128 \ --eval_batch_size $BS \ --output_dir train/$EXPR/$TASK_NAME$EPOCH \ --bert_model model/bert-large-uncased ================================================ FILE: examples/train.sh ================================================ export GLUE_DIR=/data/share/zhanghaipeng/tre/datasets/data export TASK_NAME=tacred EXPR=25 BS=16 CUDA=2 LR=3e-5 EPOCH=4.0 CUDA_VISIBLE_DEVICES=$CUDA python tacred_run_classifier.py \ --task_name $TASK_NAME \ --do_train \ --do_lower_case \ --data_dir $GLUE_DIR/tacred/ \ --max_seq_length 128 \ --train_batch_size $BS \ --learning_rate $LR \ --num_train_epochs $EPOCH \ --output_dir train/$EXPR/$TASK_NAME$EPOCH \ --bert_model model/bert-base-uncased ================================================ FILE: hubconf.py ================================================ dependencies = ['torch', 'tqdm', 'boto3', 'requests', 'regex'] from hubconfs.bert_hubconf import ( bertTokenizer, bertModel, bertForNextSentencePrediction, bertForPreTraining, bertForMaskedLM, bertForSequenceClassification, bertForMultipleChoice, bertForQuestionAnswering, bertForTokenClassification ) from hubconfs.gpt_hubconf import ( openAIGPTTokenizer, openAIGPTModel, openAIGPTLMHeadModel, openAIGPTDoubleHeadsModel ) from hubconfs.gpt2_hubconf import ( gpt2Tokenizer, gpt2Model, gpt2LMHeadModel, gpt2DoubleHeadsModel ) from hubconfs.transformer_xl_hubconf import ( transformerXLTokenizer, transformerXLModel, transformerXLLMHeadModel ) ================================================ FILE: hubconfs/bert_hubconf.py ================================================ from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.modeling import ( BertModel, BertForNextSentencePrediction, BertForMaskedLM, BertForMultipleChoice, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, ) # A lot of models share the same param doc. Use a decorator # to save typing bert_docstring = """ Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load . `bert-base-uncased` . `bert-large-uncased` . `bert-base-cased` . `bert-large-cased` . `bert-base-multilingual-uncased` . `bert-base-multilingual-cased` . `bert-base-chinese` . `bert-base-german-cased` . `bert-large-uncased-whole-word-masking` . `bert-large-cased-whole-word-masking` - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `model.chkpt` a TensorFlow checkpoint from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models *inputs, **kwargs: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification) """ def _append_from_pretrained_docstring(docstr): def docstring_decorator(fn): fn.__doc__ = fn.__doc__ + docstr return fn return docstring_decorator def bertTokenizer(*args, **kwargs): """ Instantiate a BertTokenizer from a pre-trained/customized vocab file Args: pretrained_model_name_or_path: Path to pretrained model archive or one of pre-trained vocab configs below. * bert-base-uncased * bert-large-uncased * bert-base-cased * bert-large-cased * bert-base-multilingual-uncased * bert-base-multilingual-cased * bert-base-chinese Keyword args: cache_dir: an optional path to a specific directory to download and cache the pre-trained model weights. Default: None do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False Default: True do_basic_tokenize: Whether to do basic tokenization before wordpiece. Default: True max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the minimum of this value (if specified) and the underlying BERT model's sequence length. Default: None never_split: List of tokens which will never be split during tokenization. Only has an effect when do_wordpiece_only=False Default: ["[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]"] Example: >>> import torch >>> sentence = 'Hello, World!' >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False) >>> toks = tokenizer.tokenize(sentence) ['Hello', '##,', 'World', '##!'] >>> ids = tokenizer.convert_tokens_to_ids(toks) [8667, 28136, 1291, 28125] """ tokenizer = BertTokenizer.from_pretrained(*args, **kwargs) return tokenizer @_append_from_pretrained_docstring(bert_docstring) def bertModel(*args, **kwargs): """ BertModel is the basic BERT Transformer model with a layer of summed token, position and sequence embeddings followed by a series of identical self-attention blocks (12 for BERT-base, 24 for BERT-large). Example: # Load the tokenizer >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False) # Prepare tokenized input >>> text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]" >>> tokenized_text = tokenizer.tokenize(text) >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) >>> segments_ids = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1] >>> tokens_tensor = torch.tensor([indexed_tokens]) >>> segments_tensors = torch.tensor([segments_ids]) # Load bertModel >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertModel', 'bert-base-cased') >>> model.eval() # Predict hidden states features for each layer >>> with torch.no_grad(): encoded_layers, _ = model(tokens_tensor, segments_tensors) """ model = BertModel.from_pretrained(*args, **kwargs) return model @_append_from_pretrained_docstring(bert_docstring) def bertForNextSentencePrediction(*args, **kwargs): """ BERT model with next sentence prediction head. This module comprises the BERT model followed by the next sentence classification head. Example: # Load the tokenizer >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False) # Prepare tokenized input >>> text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]" >>> tokenized_text = tokenizer.tokenize(text) >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) >>> segments_ids = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1] >>> tokens_tensor = torch.tensor([indexed_tokens]) >>> segments_tensors = torch.tensor([segments_ids]) # Load bertForNextSentencePrediction >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForNextSentencePrediction', 'bert-base-cased') >>> model.eval() # Predict the next sentence classification logits >>> with torch.no_grad(): next_sent_classif_logits = model(tokens_tensor, segments_tensors) """ model = BertForNextSentencePrediction.from_pretrained(*args, **kwargs) return model @_append_from_pretrained_docstring(bert_docstring) def bertForPreTraining(*args, **kwargs): """ BERT model with pre-training heads. This module comprises the BERT model followed by the two pre-training heads - the masked language modeling head, and - the next sentence classification head. Example: # Load the tokenizer >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False) # Prepare tokenized input >>> text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]" >>> tokenized_text = tokenizer.tokenize(text) >>> segments_ids = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1] >>> tokens_tensor = torch.tensor([indexed_tokens]) >>> segments_tensors = torch.tensor([segments_ids]) # Load bertForPreTraining >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForPreTraining', 'bert-base-cased') >>> masked_lm_logits_scores, seq_relationship_logits = model(tokens_tensor, segments_tensors) """ model = BertForPreTraining.from_pretrained(*args, **kwargs) return model @_append_from_pretrained_docstring(bert_docstring) def bertForMaskedLM(*args, **kwargs): """ BertForMaskedLM includes the BertModel Transformer followed by the (possibly) pre-trained masked language modeling head. Example: # Load the tokenizer >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False) # Prepare tokenized input >>> text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]" >>> tokenized_text = tokenizer.tokenize(text) >>> masked_index = 8 >>> tokenized_text[masked_index] = '[MASK]' >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) >>> segments_ids = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1] >>> tokens_tensor = torch.tensor([indexed_tokens]) >>> segments_tensors = torch.tensor([segments_ids]) # Load bertForMaskedLM >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForMaskedLM', 'bert-base-cased') >>> model.eval() # Predict all tokens >>> with torch.no_grad(): predictions = model(tokens_tensor, segments_tensors) >>> predicted_index = torch.argmax(predictions[0, masked_index]).item() >>> predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0] 'henson' """ model = BertForMaskedLM.from_pretrained(*args, **kwargs) return model @_append_from_pretrained_docstring(bert_docstring) def bertForSequenceClassification(*args, **kwargs): """ BertForSequenceClassification is a fine-tuning model that includes BertModel and a sequence-level (sequence or pair of sequences) classifier on top of the BertModel. Note that the classification head is only initialized and has to be trained. The sequence-level classifier is a linear layer that takes as input the last hidden state of the first character in the input sequence (see Figures 3a and 3b in the BERT paper). Args: num_labels: the number (>=2) of classes for the classifier. Example: # Load the tokenizer >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False) # Prepare tokenized input >>> text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]" >>> tokenized_text = tokenizer.tokenize(text) >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) >>> segments_ids = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1] >>> tokens_tensor = torch.tensor([indexed_tokens]) >>> segments_tensors = torch.tensor([segments_ids]) # Load bertForSequenceClassification >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForSequenceClassification', 'bert-base-cased', num_labels=2) >>> model.eval() # Predict the sequence classification logits >>> with torch.no_grad(): seq_classif_logits = model(tokens_tensor, segments_tensors) # Or get the sequence classification loss >>> labels = torch.tensor([1]) >>> seq_classif_loss = model(tokens_tensor, segments_tensors, labels=labels) # set model.train() before if training this loss """ model = BertForSequenceClassification.from_pretrained(*args, **kwargs) return model @_append_from_pretrained_docstring(bert_docstring) def bertForMultipleChoice(*args, **kwargs): """ BertForMultipleChoice is a fine-tuning model that includes BertModel and a linear layer on top of the BertModel. Note that the multiple choice head is only initialized and has to be trained. Args: num_choices: the number (>=2) of classes for the classifier. Example: # Load the tokenizer >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False) # Prepare tokenized input >>> text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]" >>> tokenized_text = tokenizer.tokenize(text) >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) >>> segments_ids = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1] >>> tokens_tensor = torch.tensor([indexed_tokens, indexed_tokens]).unsqueeze(0) >>> segments_tensors = torch.tensor([segments_ids, segments_ids]).unsqueeze(0) # Load bertForMultipleChoice >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForMultipleChoice', 'bert-base-cased', num_choices=2) >>> model.eval() # Predict the multiple choice logits >>> with torch.no_grad(): multiple_choice_logits = model(tokens_tensor, segments_tensors) # Or get the multiple choice loss >>> labels = torch.tensor([1]) >>> multiple_choice_loss = model(tokens_tensor, segments_tensors, labels=labels) # set model.train() before if training this loss """ model = BertForMultipleChoice.from_pretrained(*args, **kwargs) return model @_append_from_pretrained_docstring(bert_docstring) def bertForQuestionAnswering(*args, **kwargs): """ BertForQuestionAnswering is a fine-tuning model that includes BertModel with a token-level classifiers on top of the full sequence of last hidden states. Note that the classification head is only initialized and has to be trained. Example: # Load the tokenizer >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False) # Prepare tokenized input >>> text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]" >>> tokenized_text = tokenizer.tokenize(text) >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) >>> segments_ids = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1] >>> tokens_tensor = torch.tensor([indexed_tokens]) >>> segments_tensors = torch.tensor([segments_ids]) # Load bertForQuestionAnswering >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForQuestionAnswering', 'bert-base-cased') >>> model.eval() # Predict the start and end positions logits >>> with torch.no_grad(): start_logits, end_logits = model(tokens_tensor, segments_tensors) # Or get the total loss which is the sum of the CrossEntropy loss for the start and end token positions >>> start_positions, end_positions = torch.tensor([12]), torch.tensor([14]) # set model.train() before if training this loss >>> multiple_choice_loss = model(tokens_tensor, segments_tensors, start_positions=start_positions, end_positions=end_positions) """ model = BertForQuestionAnswering.from_pretrained(*args, **kwargs) return model @_append_from_pretrained_docstring(bert_docstring) def bertForTokenClassification(*args, **kwargs): """ BertForTokenClassification is a fine-tuning model that includes BertModel and a token-level classifier on top of the BertModel. Note that the classification head is only initialized and has to be trained. The token-level classifier is a linear layer that takes as input the last hidden state of the sequence. Args: num_labels: the number (>=2) of classes for the classifier. Example: # Load the tokenizer >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False) # Prepare tokenized input >>> text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]" >>> tokenized_text = tokenizer.tokenize(text) >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) >>> segments_ids = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1] >>> tokens_tensor = torch.tensor([indexed_tokens]) >>> segments_tensors = torch.tensor([segments_ids]) # Load bertForTokenClassification >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'bertForTokenClassification', 'bert-base-cased', num_labels=2) >>> model.eval() # Predict the token classification logits >>> with torch.no_grad(): classif_logits = model(tokens_tensor, segments_tensors) # Or get the token classification loss >>> labels = torch.tensor([[0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0]]) >>> classif_loss = model(tokens_tensor, segments_tensors, labels=labels) # set model.train() before if training this loss """ model = BertForTokenClassification.from_pretrained(*args, **kwargs) return model ================================================ FILE: hubconfs/gpt2_hubconf.py ================================================ from pytorch_pretrained_bert.tokenization_gpt2 import GPT2Tokenizer from pytorch_pretrained_bert.modeling_gpt2 import ( GPT2Model, GPT2LMHeadModel, GPT2DoubleHeadsModel ) # A lot of models share the same param doc. Use a decorator # to save typing gpt2_docstring = """ Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `gpt2`, `gpt2-medium` - a path or url to a pretrained model archive containing: . `gpt2_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a GPT2Model instance - a path or url to a pretrained model archive containing: . `gpt2_config.json` a configuration file for the model . a TensorFlow checkpoint with trained weights from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of pre-trained models *inputs, **kwargs: additional input for the specific GPT-2 class """ def _append_from_pretrained_docstring(docstr): def docstring_decorator(fn): fn.__doc__ = fn.__doc__ + docstr return fn return docstring_decorator def gpt2Tokenizer(*args, **kwargs): """ Instantiate a GPT-2 BPE tokenizer for OpenAI GPT-2 from a pre-trained/customized vocab file. Peculiarities: - Byte-level BPE Args: pretrained_model_name_or_path: Path to pretrained model archive or one of pre-trained vocab configs below. * gpt2 Keyword args: special_tokens: Special tokens in vocabulary that are not pretrained ([SEP], [CLS]...) Default: None max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the minimum of this value (if specified) and the underlying BERT model's sequence length. Default: None Example: >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'gpt2Tokenizer', 'gpt2') >>> text = "Who was Jim Henson ?" >>> indexed_tokens = tokenizer.encode(tokenized_text) """ tokenizer = GPT2Tokenizer.from_pretrained(*args, **kwargs) return tokenizer @_append_from_pretrained_docstring(gpt2_docstring) def gpt2Model(*args, **kwargs): """ gpt2Model is the basic OpenAI GPT-2 Transformer model based on identical stacked masked self-attention blocks and pre-trained on large scale dataset using language modeling signal. Example: # Load the tokenizer >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'gpt2Tokenizer', 'gpt2') # Prepare tokenized input >>> text_1 = "Who was Jim Henson ?" >>> text_2 = "Jim Henson was a puppeteer" >>> indexed_tokens_1 = tokenizer.encode(text_1) >>> indexed_tokens_2 = tokenizer.encode(text_2) >>> tokens_tensor_1 = torch.tensor([indexed_tokens_1]) >>> tokens_tensor_2 = torch.tensor([indexed_tokens_2]) # Load gpt2Model >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'gpt2Model', 'gpt2') >>> model.eval() # Predict hidden states features for each layer # past can be used to reuse precomputed hidden state in a subsequent predictions >>> with torch.no_grad(): hidden_states_1, past = model(tokens_tensor_1) hidden_states_2, past = model(tokens_tensor_2, past=past) """ model = GPT2Model.from_pretrained(*args, **kwargs) return model @_append_from_pretrained_docstring(gpt2_docstring) def gpt2LMHeadModel(*args, **kwargs): """ gpt2LMHeadModel is the OpenAI GPT-2 Transformer model with the tied (pre-trained) language modeling head on top. Example: # Load the tokenizer >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'gpt2Tokenizer', 'gpt2') # Prepare tokenized input >>> text_1 = "Who was Jim Henson ?" >>> text_2 = "Jim Henson was a puppeteer" >>> indexed_tokens_1 = tokenizer.encode(text_1) >>> indexed_tokens_2 = tokenizer.encode(text_2) >>> tokens_tensor_1 = torch.tensor([indexed_tokens_1]) >>> tokens_tensor_2 = torch.tensor([indexed_tokens_2]) # Load gpt2LMHeadModel >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'gpt2LMHeadModel', 'gpt2') >>> model.eval() # Predict hidden states features for each layer # past can be used to reuse precomputed hidden state in a subsequent predictions >>> with torch.no_grad(): predictions_1, past = model(tokens_tensor_1) predictions_2, past = model(tokens_tensor_2, past=past) # Get the predicted last token >>> predicted_index = torch.argmax(predictions_2[0, -1, :]).item() >>> predicted_token = tokenizer.decode([predicted_index]) >>> assert predicted_token == ' who' """ model = GPT2LMHeadModel.from_pretrained(*args, **kwargs) return model @_append_from_pretrained_docstring(gpt2_docstring) def gpt2DoubleHeadsModel(*args, **kwargs): """ gpt2DoubleHeadsModel is the OpenAI GPT-2 Transformer model with the tied (pre-trained) language modeling head and a multiple choice classification head (only initialized, not pre-trained). Example: # Load the tokenizer >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'gpt2Tokenizer', 'gpt2') # Prepare tokenized input >>> text1 = "Who was Jim Henson ? Jim Henson was a puppeteer" >>> text2 = "Who was Jim Henson ? Jim Henson was a mysterious young man" >>> tokenized_text1 = tokenizer.tokenize(text1) >>> tokenized_text2 = tokenizer.tokenize(text2) >>> indexed_tokens1 = tokenizer.convert_tokens_to_ids(tokenized_text1) >>> indexed_tokens2 = tokenizer.convert_tokens_to_ids(tokenized_text2) >>> tokens_tensor = torch.tensor([[indexed_tokens1, indexed_tokens2]]) >>> mc_token_ids = torch.LongTensor([[len(tokenized_text1)-1, len(tokenized_text2)-1]]) # Load gpt2DoubleHeadsModel >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'gpt2DoubleHeadsModel', 'gpt2') >>> model.eval() # Predict hidden states features for each layer >>> with torch.no_grad(): lm_logits, multiple_choice_logits, presents = model(tokens_tensor, mc_token_ids) """ model = GPT2DoubleHeadsModel.from_pretrained(*args, **kwargs) return model ================================================ FILE: hubconfs/gpt_hubconf.py ================================================ from pytorch_pretrained_bert.tokenization_openai import OpenAIGPTTokenizer from pytorch_pretrained_bert.modeling_openai import ( OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel ) # Dependecies that are not specified in global hubconf.py specific_dependencies = ['spacy', 'ftfy'] # A lot of models share the same param doc. Use a decorator # to save typing gpt_docstring = """ OpenAI GPT use a single embedding matrix to store the word and special embeddings. Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]... Special tokens need to be trained during the fine-tuning if you use them. The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function. The embeddings are ordered as follow in the token embeddings matrice: [0, ---------------------- ... -> word embeddings config.vocab_size - 1, ______________________ config.vocab_size, ... -> special embeddings config.vocab_size + config.n_special - 1] ______________________ where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is: total_tokens_embeddings = config.vocab_size + config.n_special You should use the associate indices to index the embeddings. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `openai-gpt` - a path or url to a pretrained model archive containing: . `openai_gpt_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a OpenAIGPTModel instance - a path or url to a pretrained model archive containing: . `openai-gpt-config.json` a configuration file for the model . a series of NumPy files containing OpenAI TensorFlow trained weights from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models *inputs, **kwargs: additional input for the specific OpenAI-GPT class """ def _append_from_pretrained_docstring(docstr): def docstring_decorator(fn): fn.__doc__ = fn.__doc__ + docstr return fn return docstring_decorator def openAIGPTTokenizer(*args, **kwargs): """ Instantiate a BPE tokenizer for OpenAI GPT from a pre-trained/customized vocab file. Peculiarities: - lower case all inputs - uses SpaCy tokenizer ('en' model) and ftfy for pre-BPE tokenization if they are installed, fallback to BERT's BasicTokenizer if not. - argument special_tokens and function set_special_tokens: can be used to add additional symbols (ex: "__classify__") to a vocabulary. Args: pretrained_model_name_or_path: Path to pretrained model archive or one of pre-trained vocab configs below. * openai-gpt Keyword args: special_tokens: Special tokens in vocabulary that are not pretrained ([SEP], [CLS]...) Default: None max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the minimum of this value (if specified) and the underlying BERT model's sequence length. Default: None Example: >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'openAIGPTTokenizer', 'openai-gpt') >>> text = "Who was Jim Henson ? Jim Henson was a puppeteer" >>> tokenized_text = tokenizer.tokenize(text) >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) [763, 509, 4265, 2298, 945, 257, 4265, 2298, 945, 509, 246, 10148, 39041, 483] """ tokenizer = OpenAIGPTTokenizer.from_pretrained(*args, **kwargs) return tokenizer @_append_from_pretrained_docstring(gpt_docstring) def openAIGPTModel(*args, **kwargs): """ OpenAIGPTModel is the basic OpenAI GPT Transformer model based on identical stacked masked self-attention blocks and pre-trained on large scale dataset using language modeling signal. Example: # Load the tokenizer >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'openAIGPTTokenizer', 'openai-gpt') # Prepare tokenized input >>> text = "Who was Jim Henson ? Jim Henson was a puppeteer" >>> tokenized_text = tokenizer.tokenize(text) >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) >>> tokens_tensor = torch.tensor([indexed_tokens]) # Load openAIGPTModel >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'openAIGPTModel', 'openai-gpt') >>> model.eval() # Predict hidden states features for each layer >>> with torch.no_grad(): hidden_states = model(tokens_tensor) """ model = OpenAIGPTModel.from_pretrained(*args, **kwargs) return model @_append_from_pretrained_docstring(gpt_docstring) def openAIGPTLMHeadModel(*args, **kwargs): """ OpenAIGPTLMHeadModel is the OpenAI GPT Transformer model with the tied (pre-trained) language modeling head on top. Example: # Load the tokenizer >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'openAIGPTTokenizer', 'openai-gpt') # Prepare tokenized input >>> text = "Who was Jim Henson ? Jim Henson was a puppeteer" >>> tokenized_text = tokenizer.tokenize(text) >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) >>> tokens_tensor = torch.tensor([indexed_tokens]) # Load openAIGPTLMHeadModel >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'openAIGPTLMHeadModel', 'openai-gpt') >>> model.eval() # Predict hidden states features for each layer >>> with torch.no_grad(): predictions = model(tokens_tensor) # Get the predicted last token >>> predicted_index = torch.argmax(predictions[0, -1, :]).item() >>> predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0] '.' """ model = OpenAIGPTLMHeadModel.from_pretrained(*args, **kwargs) return model @_append_from_pretrained_docstring(gpt_docstring) def openAIGPTDoubleHeadsModel(*args, **kwargs): """ OpenAIGPTDoubleHeadsModel is the OpenAI GPT Transformer model with the tied (pre-trained) language modeling head and a multiple choice classification head (only initialized, not pre-trained). Example: # Load the tokenizer >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'openAIGPTTokenizer', 'openai-gpt') # Prepare tokenized input >>> text1 = "Who was Jim Henson ? Jim Henson was a puppeteer" >>> text2 = "Who was Jim Henson ? Jim Henson was a mysterious young man" >>> tokenized_text1 = tokenizer.tokenize(text1) >>> tokenized_text2 = tokenizer.tokenize(text2) >>> indexed_tokens1 = tokenizer.convert_tokens_to_ids(tokenized_text1) >>> indexed_tokens2 = tokenizer.convert_tokens_to_ids(tokenized_text2) >>> tokens_tensor = torch.tensor([[indexed_tokens1, indexed_tokens2]]) >>> mc_token_ids = torch.LongTensor([[len(tokenized_text1)-1, len(tokenized_text2)-1]]) # Load openAIGPTDoubleHeadsModel >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'openAIGPTDoubleHeadsModel', 'openai-gpt') >>> model.eval() # Predict hidden states features for each layer >>> with torch.no_grad(): lm_logits, multiple_choice_logits = model(tokens_tensor, mc_token_ids) """ model = OpenAIGPTDoubleHeadsModel.from_pretrained(*args, **kwargs) return model ================================================ FILE: hubconfs/transformer_xl_hubconf.py ================================================ from pytorch_pretrained_bert.tokenization_transfo_xl import TransfoXLTokenizer from pytorch_pretrained_bert.modeling_transfo_xl import ( TransfoXLModel, TransfoXLLMHeadModel ) # A lot of models share the same param doc. Use a decorator # to save typing transformer_xl_docstring = """ Transformer XL use a relative positioning (with sinusiodal patterns) and adaptive softmax inputs which means that: - you don't need to specify positioning embeddings indices - the tokens in the vocabulary have to be sorted to decreasing frequency. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `transfo-xl-wt103` - a path or url to a pretrained model archive containing: . `transfo_xl_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a TransfoXLModel instance - a path or url to a pretrained model archive containing: . `transfo_xl_config.json` a configuration file for the model . `model.chkpt` a TensorFlow checkpoint from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models *inputs, **kwargs: additional input for the specific TransformerXL class """ def _append_from_pretrained_docstring(docstr): def docstring_decorator(fn): fn.__doc__ = fn.__doc__ + docstr return fn return docstring_decorator def transformerXLTokenizer(*args, **kwargs): """ Instantiate a Transformer-XL tokenizer adapted from Vocab class in https://github.com/kimiyoung/transformer-xl Args: pretrained_model_name_or_path: Path to pretrained model archive or one of pre-trained vocab configs below. * transfo-xl-wt103 Example: >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'transformerXLTokenizer', 'transfo-xl-wt103') >>> text = "Who was Jim Henson ?" >>> tokenized_text = tokenizer.tokenize(tokenized_text) >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) """ tokenizer = TransfoXLTokenizer.from_pretrained(*args, **kwargs) return tokenizer @_append_from_pretrained_docstring(transformer_xl_docstring) def transformerXLModel(*args, **kwargs): """ transformerXLModel is the basic Transformer XL model. Example: # Load the tokenizer >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'transformerXLTokenizer', 'transfo-xl-wt103') # Prepare tokenized input >>> text_1 = "Who was Jim Henson ?" >>> text_2 = "Jim Henson was a puppeteer" >>> tokenized_text_1 = tokenizer.tokenize(text_1) >>> tokenized_text_2 = tokenizer.tokenize(text_2) >>> indexed_tokens_1 = tokenizer.convert_tokens_to_ids(tokenized_text_1) >>> indexed_tokens_2 = tokenizer.convert_tokens_to_ids(tokenized_text_2) >>> tokens_tensor_1 = torch.tensor([indexed_tokens_1]) >>> tokens_tensor_2 = torch.tensor([indexed_tokens_2]) # Load transformerXLModel >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'transformerXLModel', 'transfo-xl-wt103') >>> model.eval() # Predict hidden states features for each layer # We can re-use the memory cells in a subsequent call to attend a longer context >>> with torch.no_grad(): hidden_states_1, mems_1 = model(tokens_tensor_1) hidden_states_2, mems_2 = model(tokens_tensor_2, mems=mems_1) """ model = TransfoXLModel.from_pretrained(*args, **kwargs) return model @_append_from_pretrained_docstring(transformer_xl_docstring) def transformerXLLMHeadModel(*args, **kwargs): """ transformerXLModel is the basic Transformer XL model with the tied (pre-trained) language modeling head on top. Example: # Load the tokenizer >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'transformerXLTokenizer', 'transfo-xl-wt103') # Prepare tokenized input >>> text_1 = "Who was Jim Henson ?" >>> text_2 = "Jim Henson was a puppeteer" >>> tokenized_text_1 = tokenizer.tokenize(text_1) >>> tokenized_text_2 = tokenizer.tokenize(text_2) >>> indexed_tokens_1 = tokenizer.convert_tokens_to_ids(tokenized_text_1) >>> indexed_tokens_2 = tokenizer.convert_tokens_to_ids(tokenized_text_2) >>> tokens_tensor_1 = torch.tensor([indexed_tokens_1]) >>> tokens_tensor_2 = torch.tensor([indexed_tokens_2]) # Load transformerXLLMHeadModel >>> model = torch.hub.load('huggingface/pytorch-pretrained-BERT', 'transformerXLLMHeadModel', 'transfo-xl-wt103') >>> model.eval() # Predict hidden states features for each layer # We can re-use the memory cells in a subsequent call to attend a longer context >>> with torch.no_grad(): predictions_1, mems_1 = model(tokens_tensor_1) predictions_2, mems_2 = model(tokens_tensor_2, mems=mems_1) # Get the predicted last token >>> predicted_index = torch.argmax(predictions_2[0, -1, :]).item() >>> predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0] >>> assert predicted_token == 'who' """ model = TransfoXLLMHeadModel.from_pretrained(*args, **kwargs) return model ================================================ FILE: notebooks/Comparing-PT-and-TF-models.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Pytorch to Tensorflow Conversion Test Notebook\n", "\n", "To run this notebook follow these steps, modifying the **Config** section as necessary:\n", "\n", "1. Point `pt_model_dir` to your local directory containing the pytorch Bert model to be converted.\n", "2. Point `tf_bert_dir` to your clone of Google's Bert implementation which can be found here: https://github.com/google-research/bert.\n", "\n", "Note: \n", "1. This feature currently only supports the base BERT models (uncased/cased).\n", "2. Tensorflow model will be dumped in `tf_model_dir`." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Config" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import os\n", "import sys\n", "\n", "model_cls = 'BertModel'\n", "model_typ = 'bert-base-uncased'\n", "token_cls = 'BertTokenizer'\n", "max_seq = 12\n", "CLS = \"[CLS]\"\n", "SEP = \"[SEP]\"\n", "MASK = \"[MASK]\"\n", "CLS_IDX = 0\n", "layer_idxs = tuple(range(12))\n", "input_text = \"jim henson was a puppeteer\"\n", "\n", "pt_model_dir = \"/home/ubuntu/.pytorch-pretrained-BERT-cache/{}\".format(model_typ)\n", "tf_bert_dir = \"/home/ubuntu/bert\"\n", "\n", "pt_vocab_file = os.path.join(pt_model_dir, \"vocab.txt\")\n", "pt_init_ckpt = os.path.join(pt_model_dir, model_typ.replace(\"-\", \"_\") + \".bin\")\n", "tf_model_dir = os.path.join(pt_model_dir, 'tf')\n", "tf_vocab_file = os.path.join(tf_model_dir, \"vocab.txt\")\n", "tf_init_ckpt = os.path.join(tf_model_dir, model_typ.replace(\"-\", \"_\") + \".ckpt\")\n", "tf_config_file = os.path.join(tf_model_dir, \"bert_config.json\")\n", "\n", "if not os.path.isdir(tf_model_dir): \n", " os.makedirs(tf_model_dir, exist_ok=True)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Tokenization" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "def tokenize(text, tokenizer):\n", " text = text.strip().lower()\n", " tok_ids = tokenizer.tokenize(text)\n", " if len(tok_ids) > max_seq - 2:\n", " tok_ids = tok_ids[:max_seq - 2]\n", " tok_ids.insert(CLS_IDX, CLS)\n", " tok_ids.append(SEP)\n", " input_ids = tokenizer.convert_tokens_to_ids(tok_ids)\n", " mask_ids = [1] * len(input_ids)\n", " seg_ids = [0] * len(input_ids)\n", " padding = [0] * (max_seq - len(input_ids))\n", " input_ids += padding\n", " mask_ids += padding\n", " seg_ids += padding\n", " return input_ids, mask_ids, seg_ids" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Pytorch execution" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 231508/231508 [00:00<00:00, 41092464.26B/s]\n", "100%|██████████| 407873900/407873900 [00:07<00:00, 58092479.52B/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Pytorch embedding shape: (1, 768)\n" ] } ], "source": [ "import numpy as np\n", "import torch\n", "from pytorch_pretrained_bert import (BertConfig,\n", " BertModel, \n", " BertTokenizer, \n", " BertForSequenceClassification)\n", "\n", "# Save Vocab\n", "pt_tokenizer = BertTokenizer.from_pretrained(\n", " pretrained_model_name_or_path=model_typ, \n", " cache_dir=pt_model_dir)\n", "pt_tokenizer.save_vocabulary(pt_model_dir)\n", "pt_tokenizer.save_vocabulary(tf_model_dir)\n", "\n", "# Save Model\n", "pt_model = BertModel.from_pretrained(\n", " pretrained_model_name_or_path=model_typ, \n", " cache_dir=pt_model_dir).to('cpu')\n", "pt_model.eval()\n", "pt_model.config.hidden_dropout_prob = 0.0\n", "pt_model.config.attention_probs_dropout_prob = 0.0\n", "pt_model.config.to_json_file(tf_config_file)\n", "torch.save(pt_model.state_dict(), pt_init_ckpt)\n", "\n", "# Inputs\n", "input_ids_pt, mask_ids_pt, seg_ids_pt = tokenize(input_text, pt_tokenizer)\n", "\n", "# PT Embedding\n", "tok_tensor = torch.tensor(input_ids_pt).to('cpu').unsqueeze(0)\n", "seg_tensor = torch.tensor(seg_ids_pt).to('cpu').unsqueeze(0)\n", "msk_tensor = torch.tensor(mask_ids_pt).to('cpu').unsqueeze(0)\n", "attn_blks, nsp_logits = pt_model(tok_tensor, seg_tensor, msk_tensor)\n", "pt_embedding = nsp_logits.detach().numpy() \n", "print(\"Pytorch embedding shape: {}\".format(pt_embedding.shape))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Pytorch → Tensorflow conversion" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:From /home/ubuntu/anaconda3/envs/nlp/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n", "Instructions for updating:\n", "Colocations handled automatically by placer.\n", "bert/embeddings/word_embeddings initialized\n", "bert/embeddings/position_embeddings initialized\n", "bert/embeddings/token_type_embeddings initialized\n", "bert/embeddings/LayerNorm/gamma initialized\n", "bert/embeddings/LayerNorm/beta initialized\n", "bert/encoder/layer_0/attention/self/query/kernel initialized\n", "bert/encoder/layer_0/attention/self/query/bias initialized\n", "bert/encoder/layer_0/attention/self/key/kernel initialized\n", "bert/encoder/layer_0/attention/self/key/bias initialized\n", "bert/encoder/layer_0/attention/self/value/kernel initialized\n", "bert/encoder/layer_0/attention/self/value/bias initialized\n", "bert/encoder/layer_0/attention/output/dense/kernel initialized\n", "bert/encoder/layer_0/attention/output/dense/bias initialized\n", "bert/encoder/layer_0/attention/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_0/attention/output/LayerNorm/beta initialized\n", "bert/encoder/layer_0/intermediate/dense/kernel initialized\n", "bert/encoder/layer_0/intermediate/dense/bias initialized\n", "bert/encoder/layer_0/output/dense/kernel initialized\n", "bert/encoder/layer_0/output/dense/bias initialized\n", "bert/encoder/layer_0/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_0/output/LayerNorm/beta initialized\n", "bert/encoder/layer_1/attention/self/query/kernel initialized\n", "bert/encoder/layer_1/attention/self/query/bias initialized\n", "bert/encoder/layer_1/attention/self/key/kernel initialized\n", "bert/encoder/layer_1/attention/self/key/bias initialized\n", "bert/encoder/layer_1/attention/self/value/kernel initialized\n", "bert/encoder/layer_1/attention/self/value/bias initialized\n", "bert/encoder/layer_1/attention/output/dense/kernel initialized\n", "bert/encoder/layer_1/attention/output/dense/bias initialized\n", "bert/encoder/layer_1/attention/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_1/attention/output/LayerNorm/beta initialized\n", "bert/encoder/layer_1/intermediate/dense/kernel initialized\n", "bert/encoder/layer_1/intermediate/dense/bias initialized\n", "bert/encoder/layer_1/output/dense/kernel initialized\n", "bert/encoder/layer_1/output/dense/bias initialized\n", "bert/encoder/layer_1/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_1/output/LayerNorm/beta initialized\n", "bert/encoder/layer_2/attention/self/query/kernel initialized\n", "bert/encoder/layer_2/attention/self/query/bias initialized\n", "bert/encoder/layer_2/attention/self/key/kernel initialized\n", "bert/encoder/layer_2/attention/self/key/bias initialized\n", "bert/encoder/layer_2/attention/self/value/kernel initialized\n", "bert/encoder/layer_2/attention/self/value/bias initialized\n", "bert/encoder/layer_2/attention/output/dense/kernel initialized\n", "bert/encoder/layer_2/attention/output/dense/bias initialized\n", "bert/encoder/layer_2/attention/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_2/attention/output/LayerNorm/beta initialized\n", "bert/encoder/layer_2/intermediate/dense/kernel initialized\n", "bert/encoder/layer_2/intermediate/dense/bias initialized\n", "bert/encoder/layer_2/output/dense/kernel initialized\n", "bert/encoder/layer_2/output/dense/bias initialized\n", "bert/encoder/layer_2/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_2/output/LayerNorm/beta initialized\n", "bert/encoder/layer_3/attention/self/query/kernel initialized\n", "bert/encoder/layer_3/attention/self/query/bias initialized\n", "bert/encoder/layer_3/attention/self/key/kernel initialized\n", "bert/encoder/layer_3/attention/self/key/bias initialized\n", "bert/encoder/layer_3/attention/self/value/kernel initialized\n", "bert/encoder/layer_3/attention/self/value/bias initialized\n", "bert/encoder/layer_3/attention/output/dense/kernel initialized\n", "bert/encoder/layer_3/attention/output/dense/bias initialized\n", "bert/encoder/layer_3/attention/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_3/attention/output/LayerNorm/beta initialized\n", "bert/encoder/layer_3/intermediate/dense/kernel initialized\n", "bert/encoder/layer_3/intermediate/dense/bias initialized\n", "bert/encoder/layer_3/output/dense/kernel initialized\n", "bert/encoder/layer_3/output/dense/bias initialized\n", "bert/encoder/layer_3/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_3/output/LayerNorm/beta initialized\n", "bert/encoder/layer_4/attention/self/query/kernel initialized\n", "bert/encoder/layer_4/attention/self/query/bias initialized\n", "bert/encoder/layer_4/attention/self/key/kernel initialized\n", "bert/encoder/layer_4/attention/self/key/bias initialized\n", "bert/encoder/layer_4/attention/self/value/kernel initialized\n", "bert/encoder/layer_4/attention/self/value/bias initialized\n", "bert/encoder/layer_4/attention/output/dense/kernel initialized\n", "bert/encoder/layer_4/attention/output/dense/bias initialized\n", "bert/encoder/layer_4/attention/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_4/attention/output/LayerNorm/beta initialized\n", "bert/encoder/layer_4/intermediate/dense/kernel initialized\n", "bert/encoder/layer_4/intermediate/dense/bias initialized\n", "bert/encoder/layer_4/output/dense/kernel initialized\n", "bert/encoder/layer_4/output/dense/bias initialized\n", "bert/encoder/layer_4/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_4/output/LayerNorm/beta initialized\n", "bert/encoder/layer_5/attention/self/query/kernel initialized\n", "bert/encoder/layer_5/attention/self/query/bias initialized\n", "bert/encoder/layer_5/attention/self/key/kernel initialized\n", "bert/encoder/layer_5/attention/self/key/bias initialized\n", "bert/encoder/layer_5/attention/self/value/kernel initialized\n", "bert/encoder/layer_5/attention/self/value/bias initialized\n", "bert/encoder/layer_5/attention/output/dense/kernel initialized\n", "bert/encoder/layer_5/attention/output/dense/bias initialized\n", "bert/encoder/layer_5/attention/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_5/attention/output/LayerNorm/beta initialized\n", "bert/encoder/layer_5/intermediate/dense/kernel initialized\n", "bert/encoder/layer_5/intermediate/dense/bias initialized\n", "bert/encoder/layer_5/output/dense/kernel initialized\n", "bert/encoder/layer_5/output/dense/bias initialized\n", "bert/encoder/layer_5/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_5/output/LayerNorm/beta initialized\n", "bert/encoder/layer_6/attention/self/query/kernel initialized\n", "bert/encoder/layer_6/attention/self/query/bias initialized\n", "bert/encoder/layer_6/attention/self/key/kernel initialized\n", "bert/encoder/layer_6/attention/self/key/bias initialized\n", "bert/encoder/layer_6/attention/self/value/kernel initialized\n", "bert/encoder/layer_6/attention/self/value/bias initialized\n", "bert/encoder/layer_6/attention/output/dense/kernel initialized\n", "bert/encoder/layer_6/attention/output/dense/bias initialized\n", "bert/encoder/layer_6/attention/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_6/attention/output/LayerNorm/beta initialized\n", "bert/encoder/layer_6/intermediate/dense/kernel initialized\n", "bert/encoder/layer_6/intermediate/dense/bias initialized\n", "bert/encoder/layer_6/output/dense/kernel initialized\n", "bert/encoder/layer_6/output/dense/bias initialized\n", "bert/encoder/layer_6/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_6/output/LayerNorm/beta initialized\n", "bert/encoder/layer_7/attention/self/query/kernel initialized\n", "bert/encoder/layer_7/attention/self/query/bias initialized\n", "bert/encoder/layer_7/attention/self/key/kernel initialized\n", "bert/encoder/layer_7/attention/self/key/bias initialized\n", "bert/encoder/layer_7/attention/self/value/kernel initialized\n", "bert/encoder/layer_7/attention/self/value/bias initialized\n", "bert/encoder/layer_7/attention/output/dense/kernel initialized\n", "bert/encoder/layer_7/attention/output/dense/bias initialized\n", "bert/encoder/layer_7/attention/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_7/attention/output/LayerNorm/beta initialized\n", "bert/encoder/layer_7/intermediate/dense/kernel initialized\n", "bert/encoder/layer_7/intermediate/dense/bias initialized\n", "bert/encoder/layer_7/output/dense/kernel initialized\n", "bert/encoder/layer_7/output/dense/bias initialized\n", "bert/encoder/layer_7/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_7/output/LayerNorm/beta initialized\n", "bert/encoder/layer_8/attention/self/query/kernel initialized\n", "bert/encoder/layer_8/attention/self/query/bias initialized\n", "bert/encoder/layer_8/attention/self/key/kernel initialized\n", "bert/encoder/layer_8/attention/self/key/bias initialized\n", "bert/encoder/layer_8/attention/self/value/kernel initialized\n", "bert/encoder/layer_8/attention/self/value/bias initialized\n", "bert/encoder/layer_8/attention/output/dense/kernel initialized\n", "bert/encoder/layer_8/attention/output/dense/bias initialized\n", "bert/encoder/layer_8/attention/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_8/attention/output/LayerNorm/beta initialized\n", "bert/encoder/layer_8/intermediate/dense/kernel initialized\n", "bert/encoder/layer_8/intermediate/dense/bias initialized\n", "bert/encoder/layer_8/output/dense/kernel initialized\n", "bert/encoder/layer_8/output/dense/bias initialized\n", "bert/encoder/layer_8/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_8/output/LayerNorm/beta initialized\n", "bert/encoder/layer_9/attention/self/query/kernel initialized\n", "bert/encoder/layer_9/attention/self/query/bias initialized\n", "bert/encoder/layer_9/attention/self/key/kernel initialized\n", "bert/encoder/layer_9/attention/self/key/bias initialized\n", "bert/encoder/layer_9/attention/self/value/kernel initialized\n", "bert/encoder/layer_9/attention/self/value/bias initialized\n", "bert/encoder/layer_9/attention/output/dense/kernel initialized\n", "bert/encoder/layer_9/attention/output/dense/bias initialized\n", "bert/encoder/layer_9/attention/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_9/attention/output/LayerNorm/beta initialized\n", "bert/encoder/layer_9/intermediate/dense/kernel initialized\n", "bert/encoder/layer_9/intermediate/dense/bias initialized\n", "bert/encoder/layer_9/output/dense/kernel initialized\n", "bert/encoder/layer_9/output/dense/bias initialized\n", "bert/encoder/layer_9/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_9/output/LayerNorm/beta initialized\n", "bert/encoder/layer_10/attention/self/query/kernel initialized\n", "bert/encoder/layer_10/attention/self/query/bias initialized\n", "bert/encoder/layer_10/attention/self/key/kernel initialized\n", "bert/encoder/layer_10/attention/self/key/bias initialized\n", "bert/encoder/layer_10/attention/self/value/kernel initialized\n", "bert/encoder/layer_10/attention/self/value/bias initialized\n", "bert/encoder/layer_10/attention/output/dense/kernel initialized\n", "bert/encoder/layer_10/attention/output/dense/bias initialized\n", "bert/encoder/layer_10/attention/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_10/attention/output/LayerNorm/beta initialized\n", "bert/encoder/layer_10/intermediate/dense/kernel initialized\n", "bert/encoder/layer_10/intermediate/dense/bias initialized\n", "bert/encoder/layer_10/output/dense/kernel initialized\n", "bert/encoder/layer_10/output/dense/bias initialized\n", "bert/encoder/layer_10/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_10/output/LayerNorm/beta initialized\n", "bert/encoder/layer_11/attention/self/query/kernel initialized\n", "bert/encoder/layer_11/attention/self/query/bias initialized\n", "bert/encoder/layer_11/attention/self/key/kernel initialized\n", "bert/encoder/layer_11/attention/self/key/bias initialized\n", "bert/encoder/layer_11/attention/self/value/kernel initialized\n", "bert/encoder/layer_11/attention/self/value/bias initialized\n", "bert/encoder/layer_11/attention/output/dense/kernel initialized\n", "bert/encoder/layer_11/attention/output/dense/bias initialized\n", "bert/encoder/layer_11/attention/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_11/attention/output/LayerNorm/beta initialized\n", "bert/encoder/layer_11/intermediate/dense/kernel initialized\n", "bert/encoder/layer_11/intermediate/dense/bias initialized\n", "bert/encoder/layer_11/output/dense/kernel initialized\n", "bert/encoder/layer_11/output/dense/bias initialized\n", "bert/encoder/layer_11/output/LayerNorm/gamma initialized\n", "bert/encoder/layer_11/output/LayerNorm/beta initialized\n", "bert/pooler/dense/kernel initialized\n", "bert/pooler/dense/bias initialized\n" ] } ], "source": [ "from pytorch_pretrained_bert.convert_pytorch_checkpoint_to_tf import main\n", "\n", "main([\n", " '--model_name', model_typ, \n", " '--pytorch_model_path', pt_init_ckpt,\n", " '--tf_cache_dir', tf_model_dir,\n", " '--cache_dir', pt_model_dir\n", "])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Tensorflow execution" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.\n", "For more information, please see:\n", " * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n", " * https://github.com/tensorflow/addons\n", "If you depend on functionality not listed there, please file an issue.\n", "\n", "WARNING:tensorflow:From /home/ubuntu/bert/modeling.py:671: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\n", "Instructions for updating:\n", "Use keras.layers.dense instead.\n", "WARNING:tensorflow:From /home/ubuntu/anaconda3/envs/nlp/lib/python3.6/site-packages/tensorflow/python/training/saver.py:1266: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n", "Instructions for updating:\n", "Use standard file APIs to check for files with this prefix.\n", "INFO:tensorflow:Restoring parameters from /home/ubuntu/.pytorch-pretrained-BERT-cache/bert-base-uncased/tf/bert_base_uncased.ckpt\n", "Tensorflow embedding shape: (1, 768)\n" ] } ], "source": [ "import tensorflow as tf\n", "sys.path.insert(0, tf_bert_dir)\n", "import modeling\n", "import tokenization\n", "\n", "tf.reset_default_graph()\n", "\n", "# Process text\n", "tf_tokenizer = tokenization.FullTokenizer(vocab_file=tf_vocab_file)\n", "\n", "# Graph inputs\n", "input_ids_tf, mask_ids_tf, seg_ids_tf = tokenize(input_text, tf_tokenizer)\n", "config = modeling.BertConfig.from_json_file(\n", " os.path.join(tf_model_dir, 'bert_config.json'))\n", "input_tensor = tf.placeholder(\n", " dtype=tf.int32,\n", " shape=[1, None],\n", " name='input_ids')\n", "mask_tensor = tf.placeholder(\n", " dtype=tf.int32,\n", " shape=[1, None],\n", " name='mask_ids')\n", "seg_tensor = tf.placeholder(\n", " dtype=tf.int32,\n", " shape=[1, None],\n", " name='seg_ids')\n", "tf_model = modeling.BertModel(\n", " config=config,\n", " is_training=False,\n", " input_ids=input_tensor,\n", " input_mask=mask_tensor,\n", " token_type_ids=seg_tensor,\n", " use_one_hot_embeddings=False)\n", "output_layer = tf_model.get_pooled_output()\n", "\n", "# Load tf model\n", "session = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n", "vars_to_load = [v for v in tf.global_variables()]\n", "session.run(tf.variables_initializer(var_list=vars_to_load))\n", "saver = tf.train.Saver(vars_to_load)\n", "saver.restore(session, save_path=tf_init_ckpt)\n", "\n", "# TF Embedding\n", "fetches = output_layer\n", "feed_dict = {\n", " input_tensor: [input_ids_tf],\n", " mask_tensor: [mask_ids_tf],\n", " seg_tensor: [seg_ids_tf]\n", "}\n", "tf_embedding = session.run(fetches=fetches, feed_dict=feed_dict)\n", "print(\"Tensorflow embedding shape: {}\".format(tf_embedding.shape))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Compare Tokenization" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "TOKEN_IDS_PT: [101, 3958, 27227, 2001, 1037, 13997, 11510, 102, 0, 0, 0, 0]\n", "TOKEN_IDS_TF: [101, 3958, 27227, 2001, 1037, 13997, 11510, 102, 0, 0, 0, 0]\n", "SEG_IDS_PT: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "SEG_IDS_TF: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "MASK_IDS_PT: [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]\n", "MASK_IDS_TF: [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]\n" ] } ], "source": [ "print(\"TOKEN_IDS_PT: {}\".format(input_ids_pt))\n", "print(\"TOKEN_IDS_TF: {}\".format(input_ids_tf))\n", "print(\"SEG_IDS_PT: {}\".format(seg_ids_pt))\n", "print(\"SEG_IDS_TF: {}\".format(seg_ids_tf))\n", "print(\"MASK_IDS_PT: {}\".format(mask_ids_pt))\n", "print(\"MASK_IDS_TF: {}\".format(mask_ids_tf))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Compare Model Weights" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "bert/embeddings/word_embeddings\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (30522, 768) values: [-0.01018257 -0.06154883 -0.02649689 -0.0420608 0.00116716]\n", "TF: shape: (30522, 768) values: [-0.01018257 -0.06154883 -0.02649689 -0.0420608 0.00116716]\n", "\n", "bert/embeddings/token_type_embeddings\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (2, 768) values: [0.00043164 0.01098826 0.00370439 0.00150542 0.00057812]\n", "TF: shape: (2, 768) values: [0.00043164 0.01098826 0.00370439 0.00150542 0.00057812]\n", "\n", "bert/embeddings/position_embeddings\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (512, 768) values: [ 0.01750538 -0.02563101 -0.03664156 -0.02528613 0.00797095]\n", "TF: shape: (512, 768) values: [ 0.01750538 -0.02563101 -0.03664156 -0.02528613 0.00797095]\n", "\n", "bert/embeddings/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.02591471 -0.0195513 0.02423946 0.08904593 -0.06281059]\n", "TF: shape: (768,) values: [-0.02591471 -0.0195513 0.02423946 0.08904593 -0.06281059]\n", "\n", "bert/embeddings/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.9260566 0.8851115 0.85807985 0.8616906 0.8937205 ]\n", "TF: shape: (768,) values: [0.9260566 0.8851115 0.85807985 0.8616906 0.8937205 ]\n", "\n", "bert/encoder/layer_0/attention/self/query/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.01640572 -0.03257025 0.01046295 -0.04442816 -0.02256124]\n", "TF: shape: (768, 768) values: [-0.01640572 -0.03257025 0.01046295 -0.04442816 -0.02256124]\n", "\n", "bert/encoder/layer_0/attention/self/query/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.58488506 -0.3312432 -0.43010172 0.37446147 -0.29811692]\n", "TF: shape: (768,) values: [ 0.58488506 -0.3312432 -0.43010172 0.37446147 -0.29811692]\n", "\n", "bert/encoder/layer_0/attention/self/key/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.00807745 0.02652155 -0.01866494 0.01797846 0.00450485]\n", "TF: shape: (768, 768) values: [ 0.00807745 0.02652155 -0.01866494 0.01797846 0.00450485]\n", "\n", "bert/encoder/layer_0/attention/self/key/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.00104306 0.00035106 -0.0024626 -0.00010567 -0.00119283]\n", "TF: shape: (768,) values: [ 0.00104306 0.00035106 -0.0024626 -0.00010567 -0.00119283]\n", "\n", "bert/encoder/layer_0/attention/self/value/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.01144261 -0.02663044 0.01911472 -0.02206182 -0.00287949]\n", "TF: shape: (768, 768) values: [ 0.01144261 -0.02663044 0.01911472 -0.02206182 -0.00287949]\n", "\n", "bert/encoder/layer_0/attention/self/value/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.01184616 -0.01596605 -0.00251847 0.01736802 0.00449983]\n", "TF: shape: (768,) values: [-0.01184616 -0.01596605 -0.00251847 0.01736802 0.00449983]\n", "\n", "bert/encoder/layer_0/attention/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.00581949 0.03170148 -0.06135742 -0.01706108 -0.00759045]\n", "TF: shape: (768, 768) values: [ 0.00581949 0.03170148 -0.06135742 -0.01706108 -0.00759045]\n", "\n", "bert/encoder/layer_0/attention/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.00511063 -0.0166625 0.02812938 -0.01166061 0.01942627]\n", "TF: shape: (768,) values: [ 0.00511063 -0.0166625 0.02812938 -0.01166061 0.01942627]\n", "\n", "bert/encoder/layer_0/attention/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.25779155 -0.03077853 -0.2772697 -0.38847703 0.36841765]\n", "TF: shape: (768,) values: [ 0.25779155 -0.03077853 -0.2772697 -0.38847703 0.36841765]\n", "\n", "bert/encoder/layer_0/attention/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.9803408 0.959969 0.96368986 0.9603653 0.9801324 ]\n", "TF: shape: (768,) values: [0.9803408 0.959969 0.96368986 0.9603653 0.9801324 ]\n", "\n", "bert/encoder/layer_0/intermediate/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 3072) values: [-0.01010427 -0.060398 -0.01468864 0.00311493 0.02862451]\n", "TF: shape: (768, 3072) values: [-0.01010427 -0.060398 -0.01468864 0.00311493 0.02862451]\n", "\n", "bert/encoder/layer_0/intermediate/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072,) values: [-0.11498757 -0.09629171 -0.12399033 -0.129036 -0.06369043]\n", "TF: shape: (3072,) values: [-0.11498757 -0.09629171 -0.12399033 -0.129036 -0.06369043]\n", "\n", "bert/encoder/layer_0/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072, 768) values: [-0.03710171 0.0648794 0.00758566 -0.05224452 -0.04348791]\n", "TF: shape: (3072, 768) values: [-0.03710171 0.0648794 0.00758566 -0.05224452 -0.04348791]\n", "\n", "bert/encoder/layer_0/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.04801027 0.19766568 0.02154854 0.02880666 0.0444298 ]\n", "TF: shape: (768,) values: [-0.04801027 0.19766568 0.02154854 0.02880666 0.0444298 ]\n", "\n", "bert/encoder/layer_0/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.10142924 -0.00499344 0.04274083 0.09324206 -0.10700516]\n", "TF: shape: (768,) values: [-0.10142924 -0.00499344 0.04274083 0.09324206 -0.10700516]\n", "\n", "bert/encoder/layer_0/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.7835125 0.8072406 0.7670588 0.73706394 0.76303864]\n", "TF: shape: (768,) values: [0.7835125 0.8072406 0.7670588 0.73706394 0.76303864]\n", "\n", "bert/encoder/layer_1/attention/self/query/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.03132744 -0.01340016 -0.07761582 0.0655639 -0.00337808]\n", "TF: shape: (768, 768) values: [ 0.03132744 -0.01340016 -0.07761582 0.0655639 -0.00337808]\n", "\n", "bert/encoder/layer_1/attention/self/query/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.27827993 0.17387655 -0.2497937 -0.8809636 0.41262135]\n", "TF: shape: (768,) values: [-0.27827993 0.17387655 -0.2497937 -0.8809636 0.41262135]\n", "\n", "bert/encoder/layer_1/attention/self/key/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.03353037 0.04007257 0.05320328 -0.02166729 -0.03581231]\n", "TF: shape: (768, 768) values: [-0.03353037 0.04007257 0.05320328 -0.02166729 -0.03581231]\n", "\n", "bert/encoder/layer_1/attention/self/key/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.00504407 0.00136887 -0.00394336 0.00646125 -0.00148919]\n", "TF: shape: (768,) values: [-0.00504407 0.00136887 -0.00394336 0.00646125 -0.00148919]\n", "\n", "bert/encoder/layer_1/attention/self/value/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.00464159 0.06674305 -0.00970626 -0.0276653 -0.01597566]\n", "TF: shape: (768, 768) values: [-0.00464159 0.06674305 -0.00970626 -0.0276653 -0.01597566]\n", "\n", "bert/encoder/layer_1/attention/self/value/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.00381288 0.02650839 -0.0059689 -0.00508269 -0.01293722]\n", "TF: shape: (768,) values: [ 0.00381288 0.02650839 -0.0059689 -0.00508269 -0.01293722]\n", "\n", "bert/encoder/layer_1/attention/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.01390745 -0.01100563 0.01303005 -0.01969771 0.0125082 ]\n", "TF: shape: (768, 768) values: [-0.01390745 -0.01100563 0.01303005 -0.01969771 0.0125082 ]\n", "\n", "bert/encoder/layer_1/attention/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.02946591 0.05715097 0.01293636 0.01920356 0.00805334]\n", "TF: shape: (768,) values: [0.02946591 0.05715097 0.01293636 0.01920356 0.00805334]\n", "\n", "bert/encoder/layer_1/attention/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.08583715 0.14199966 -0.0856637 -0.18797271 0.21056814]\n", "TF: shape: (768,) values: [ 0.08583715 0.14199966 -0.0856637 -0.18797271 0.21056814]\n", "\n", "bert/encoder/layer_1/attention/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.896962 0.87148863 0.8531161 0.8690647 0.9488987 ]\n", "TF: shape: (768,) values: [0.896962 0.87148863 0.8531161 0.8690647 0.9488987 ]\n", "\n", "bert/encoder/layer_1/intermediate/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 3072) values: [ 0.01841293 -0.02650284 -0.09708428 -0.01734244 -0.05529237]\n", "TF: shape: (768, 3072) values: [ 0.01841293 -0.02650284 -0.09708428 -0.01734244 -0.05529237]\n", "\n", "bert/encoder/layer_1/intermediate/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072,) values: [-0.15203774 -0.10449131 -0.08440229 -0.09323178 -0.08511415]\n", "TF: shape: (3072,) values: [-0.15203774 -0.10449131 -0.08440229 -0.09323178 -0.08511415]\n", "\n", "bert/encoder/layer_1/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072, 768) values: [-0.02372648 0.03326349 0.08291997 -0.01519038 0.01868557]\n", "TF: shape: (3072, 768) values: [-0.02372648 0.03326349 0.08291997 -0.01519038 0.01868557]\n", "\n", "bert/encoder/layer_1/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.02514724 0.09868994 -0.027811 0.03749462 0.01086514]\n", "TF: shape: (768,) values: [-0.02514724 0.09868994 -0.027811 0.03749462 0.01086514]\n", "\n", "bert/encoder/layer_1/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.07662535 -0.10506564 0.03191236 0.07633785 -0.11187791]\n", "TF: shape: (768,) values: [-0.07662535 -0.10506564 0.03191236 0.07633785 -0.11187791]\n", "\n", "bert/encoder/layer_1/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.9017883 0.8868776 0.8862677 0.85865664 0.87496454]\n", "TF: shape: (768,) values: [0.9017883 0.8868776 0.8862677 0.85865664 0.87496454]\n", "\n", "bert/encoder/layer_2/attention/self/query/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.08433672 0.09580533 0.07543895 -0.01126779 -0.01354045]\n", "TF: shape: (768, 768) values: [ 0.08433672 0.09580533 0.07543895 -0.01126779 -0.01354045]\n", "\n", "bert/encoder/layer_2/attention/self/query/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.0371241 0.03406003 0.27713948 -0.21613775 -0.05275448]\n", "TF: shape: (768,) values: [ 0.0371241 0.03406003 0.27713948 -0.21613775 -0.05275448]\n", "\n", "bert/encoder/layer_2/attention/self/key/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.04794507 0.02517631 -0.01319554 -0.02094732 0.09073472]\n", "TF: shape: (768, 768) values: [ 0.04794507 0.02517631 -0.01319554 -0.02094732 0.09073472]\n", "\n", "bert/encoder/layer_2/attention/self/key/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.00037404 -0.00125881 -0.00114734 -0.00157741 0.00037122]\n", "TF: shape: (768,) values: [-0.00037404 -0.00125881 -0.00114734 -0.00157741 0.00037122]\n", "\n", "bert/encoder/layer_2/attention/self/value/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.01119406 -0.01488636 -0.02960914 0.04746444 0.00428481]\n", "TF: shape: (768, 768) values: [-0.01119406 -0.01488636 -0.02960914 0.04746444 0.00428481]\n", "\n", "bert/encoder/layer_2/attention/self/value/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.02728729 0.04979054 0.08326469 0.04150949 0.600959 ]\n", "TF: shape: (768,) values: [-0.02728729 0.04979054 0.08326469 0.04150949 0.600959 ]\n", "\n", "bert/encoder/layer_2/attention/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.00517425 0.01197957 0.0393172 -0.0063884 -0.02673388]\n", "TF: shape: (768, 768) values: [ 0.00517425 0.01197957 0.0393172 -0.0063884 -0.02673388]\n", "\n", "bert/encoder/layer_2/attention/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.01754025 0.1226335 -0.05733554 0.06844623 0.00879776]\n", "TF: shape: (768,) values: [ 0.01754025 0.1226335 -0.05733554 0.06844623 0.00879776]\n", "\n", "bert/encoder/layer_2/attention/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.1490809 0.12386955 -0.19382021 -0.26515856 0.32723007]\n", "TF: shape: (768,) values: [ 0.1490809 0.12386955 -0.19382021 -0.26515856 0.32723007]\n", "\n", "bert/encoder/layer_2/attention/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.8983343 0.88877076 0.86283594 0.8584952 0.9587886 ]\n", "TF: shape: (768,) values: [0.8983343 0.88877076 0.86283594 0.8584952 0.9587886 ]\n", "\n", "bert/encoder/layer_2/intermediate/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 3072) values: [-0.01619919 0.00662888 0.01492284 -0.01280748 0.01318596]\n", "TF: shape: (768, 3072) values: [-0.01619919 0.00662888 0.01492284 -0.01280748 0.01318596]\n", "\n", "bert/encoder/layer_2/intermediate/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072,) values: [-0.08474881 -0.12850781 -0.11550345 -0.09513011 -0.02519853]\n", "TF: shape: (3072,) values: [-0.08474881 -0.12850781 -0.11550345 -0.09513011 -0.02519853]\n", "\n", "bert/encoder/layer_2/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072, 768) values: [-0.07225161 -0.0129784 0.00618811 -0.01593373 -0.02160194]\n", "TF: shape: (3072, 768) values: [-0.07225161 -0.0129784 0.00618811 -0.01593373 -0.02160194]\n", "\n", "bert/encoder/layer_2/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.06319264 0.06169628 -0.03041368 0.00924282 0.06277442]\n", "TF: shape: (768,) values: [-0.06319264 0.06169628 -0.03041368 0.00924282 0.06277442]\n", "\n", "bert/encoder/layer_2/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.1139038 -0.11665309 0.07883061 0.07796711 -0.14219187]\n", "TF: shape: (768,) values: [-0.1139038 -0.11665309 0.07883061 0.07796711 -0.14219187]\n", "\n", "bert/encoder/layer_2/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.8813261 0.85744697 0.8511922 0.85261875 0.8329574 ]\n", "TF: shape: (768,) values: [0.8813261 0.85744697 0.8511922 0.85261875 0.8329574 ]\n", "\n", "bert/encoder/layer_3/attention/self/query/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.05855456 -0.00111438 -0.00828963 0.04117409 -0.07591715]\n", "TF: shape: (768, 768) values: [ 0.05855456 -0.00111438 -0.00828963 0.04117409 -0.07591715]\n", "\n", "bert/encoder/layer_3/attention/self/query/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.09740101 -0.19290674 0.04332267 0.17937997 -0.08023558]\n", "TF: shape: (768,) values: [ 0.09740101 -0.19290674 0.04332267 0.17937997 -0.08023558]\n", "\n", "bert/encoder/layer_3/attention/self/key/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.02562077 0.02507281 -0.03361562 0.05613289 -0.05435724]\n", "TF: shape: (768, 768) values: [ 0.02562077 0.02507281 -0.03361562 0.05613289 -0.05435724]\n", "\n", "bert/encoder/layer_3/attention/self/key/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.00188639 -0.00379197 -0.01020415 0.00969649 -0.00094182]\n", "TF: shape: (768,) values: [ 0.00188639 -0.00379197 -0.01020415 0.00969649 -0.00094182]\n", "\n", "bert/encoder/layer_3/attention/self/value/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.00539032 0.00959642 0.01325458 0.00490616 0.0129908 ]\n", "TF: shape: (768, 768) values: [-0.00539032 0.00959642 0.01325458 0.00490616 0.0129908 ]\n", "\n", "bert/encoder/layer_3/attention/self/value/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.04573824 0.05405985 0.00681163 0.00655945 0.01141771]\n", "TF: shape: (768,) values: [0.04573824 0.05405985 0.00681163 0.00655945 0.01141771]\n", "\n", "bert/encoder/layer_3/attention/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.01850341 0.03148198 0.02705758 -0.0004669 0.01367511]\n", "TF: shape: (768, 768) values: [ 0.01850341 0.03148198 0.02705758 -0.0004669 0.01367511]\n", "\n", "bert/encoder/layer_3/attention/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.01981483 0.03566506 -0.05016088 0.02958186 0.04989756]\n", "TF: shape: (768,) values: [ 0.01981483 0.03566506 -0.05016088 0.02958186 0.04989756]\n", "\n", "bert/encoder/layer_3/attention/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.09815404 0.00063774 -0.01257733 -0.26485074 0.22568701]\n", "TF: shape: (768,) values: [ 0.09815404 0.00063774 -0.01257733 -0.26485074 0.22568701]\n", "\n", "bert/encoder/layer_3/attention/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.91457725 0.88453823 0.8340887 0.84203583 0.95247847]\n", "TF: shape: (768,) values: [0.91457725 0.88453823 0.8340887 0.84203583 0.95247847]\n", "\n", "bert/encoder/layer_3/intermediate/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 3072) values: [-0.02733567 0.03307878 -0.01331292 -0.00032527 0.03252084]\n", "TF: shape: (768, 3072) values: [-0.02733567 0.03307878 -0.01331292 -0.00032527 0.03252084]\n", "\n", "bert/encoder/layer_3/intermediate/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072,) values: [-0.11436842 -0.15038085 -0.07842971 0.01335877 -0.09492484]\n", "TF: shape: (3072,) values: [-0.11436842 -0.15038085 -0.07842971 0.01335877 -0.09492484]\n", "\n", "bert/encoder/layer_3/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072, 768) values: [-0.01751153 0.01631314 -0.02660011 0.03569947 -0.01394763]\n", "TF: shape: (3072, 768) values: [-0.01751153 0.01631314 -0.02660011 0.03569947 -0.01394763]\n", "\n", "bert/encoder/layer_3/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.03873252 0.08414765 -0.0399323 0.01997361 0.12924597]\n", "TF: shape: (768,) values: [-0.03873252 0.08414765 -0.0399323 0.01997361 0.12924597]\n", "\n", "bert/encoder/layer_3/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.08049371 -0.06923949 -0.03357155 0.05231095 -0.09717073]\n", "TF: shape: (768,) values: [-0.08049371 -0.06923949 -0.03357155 0.05231095 -0.09717073]\n", "\n", "bert/encoder/layer_3/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.827748 0.83012533 0.82399255 0.81772 0.80794513]\n", "TF: shape: (768,) values: [0.827748 0.83012533 0.82399255 0.81772 0.80794513]\n", "\n", "bert/encoder/layer_4/attention/self/query/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.08296382 0.02076941 0.06525186 -0.02659729 0.03491377]\n", "TF: shape: (768, 768) values: [ 0.08296382 0.02076941 0.06525186 -0.02659729 0.03491377]\n", "\n", "bert/encoder/layer_4/attention/self/query/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.07045844 -0.13412629 -0.0514146 0.00061329 0.1248519 ]\n", "TF: shape: (768,) values: [ 0.07045844 -0.13412629 -0.0514146 0.00061329 0.1248519 ]\n", "\n", "bert/encoder/layer_4/attention/self/key/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.06941643 0.08133814 -0.0453992 0.0668715 -0.06014847]\n", "TF: shape: (768, 768) values: [ 0.06941643 0.08133814 -0.0453992 0.0668715 -0.06014847]\n", "\n", "bert/encoder/layer_4/attention/self/key/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.00588725 -0.00235185 0.00281131 0.00173088 -0.00546653]\n", "TF: shape: (768,) values: [-0.00588725 -0.00235185 0.00281131 0.00173088 -0.00546653]\n", "\n", "bert/encoder/layer_4/attention/self/value/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.06889665 0.06645385 0.01232084 0.0132611 -0.01595679]\n", "TF: shape: (768, 768) values: [ 0.06889665 0.06645385 0.01232084 0.0132611 -0.01595679]\n", "\n", "bert/encoder/layer_4/attention/self/value/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.01126871 -0.02704018 0.0301532 0.02332082 -0.04233487]\n", "TF: shape: (768,) values: [-0.01126871 -0.02704018 0.0301532 0.02332082 -0.04233487]\n", "\n", "bert/encoder/layer_4/attention/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.02285513 -0.04172142 -0.0146292 0.04862929 -0.0442014 ]\n", "TF: shape: (768, 768) values: [ 0.02285513 -0.04172142 -0.0146292 0.04862929 -0.0442014 ]\n", "\n", "bert/encoder/layer_4/attention/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.03054528 0.00479777 -0.02729505 -0.0325212 -0.00525727]\n", "TF: shape: (768,) values: [ 0.03054528 0.00479777 -0.02729505 -0.0325212 -0.00525727]\n", "\n", "bert/encoder/layer_4/attention/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.00903359 0.0052285 -0.02841488 -0.22355485 0.28281343]\n", "TF: shape: (768,) values: [ 0.00903359 0.0052285 -0.02841488 -0.22355485 0.28281343]\n", "\n", "bert/encoder/layer_4/attention/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.8849676 0.86927813 0.8114595 0.80269504 0.94864094]\n", "TF: shape: (768,) values: [0.8849676 0.86927813 0.8114595 0.80269504 0.94864094]\n", "\n", "bert/encoder/layer_4/intermediate/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 3072) values: [-0.00639783 0.06198016 -0.03184223 0.00485356 -0.02453273]\n", "TF: shape: (768, 3072) values: [-0.00639783 0.06198016 -0.03184223 0.00485356 -0.02453273]\n", "\n", "bert/encoder/layer_4/intermediate/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072,) values: [-0.08770327 -0.11779705 -0.11764182 -0.00192611 -0.1335473 ]\n", "TF: shape: (3072,) values: [-0.08770327 -0.11779705 -0.11764182 -0.00192611 -0.1335473 ]\n", "\n", "bert/encoder/layer_4/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072, 768) values: [-0.05421264 0.0221118 -0.02674172 0.03672203 -0.02399626]\n", "TF: shape: (3072, 768) values: [-0.05421264 0.0221118 -0.02674172 0.03672203 -0.02399626]\n", "\n", "bert/encoder/layer_4/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.05068972 0.04838871 0.01156022 0.05381602 0.08857913]\n", "TF: shape: (768,) values: [-0.05068972 0.04838871 0.01156022 0.05381602 0.08857913]\n", "\n", "bert/encoder/layer_4/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.04338909 -0.0781464 -0.01518662 0.04936362 -0.12378412]\n", "TF: shape: (768,) values: [-0.04338909 -0.0781464 -0.01518662 0.04936362 -0.12378412]\n", "\n", "bert/encoder/layer_4/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.8734387 0.8576282 0.8339444 0.8450325 0.8105372]\n", "TF: shape: (768,) values: [0.8734387 0.8576282 0.8339444 0.8450325 0.8105372]\n", "\n", "bert/encoder/layer_5/attention/self/query/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.00858843 -0.03920127 0.02552994 -0.02786552 0.02436485]\n", "TF: shape: (768, 768) values: [-0.00858843 -0.03920127 0.02552994 -0.02786552 0.02436485]\n", "\n", "bert/encoder/layer_5/attention/self/query/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.00859117 -0.01642405 -0.04391079 0.01085692 0.02925887]\n", "TF: shape: (768,) values: [-0.00859117 -0.01642405 -0.04391079 0.01085692 0.02925887]\n", "\n", "bert/encoder/layer_5/attention/self/key/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.00352847 0.02330176 -0.00369894 -0.03904612 0.00294574]\n", "TF: shape: (768, 768) values: [ 0.00352847 0.02330176 -0.00369894 -0.03904612 0.00294574]\n", "\n", "bert/encoder/layer_5/attention/self/key/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.01087186 -0.01176561 0.00016575 -0.01163023 0.00946616]\n", "TF: shape: (768,) values: [-0.01087186 -0.01176561 0.00016575 -0.01163023 0.00946616]\n", "\n", "bert/encoder/layer_5/attention/self/value/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.06134222 0.04238288 0.02796064 -0.01284983 0.03683741]\n", "TF: shape: (768, 768) values: [ 0.06134222 0.04238288 0.02796064 -0.01284983 0.03683741]\n", "\n", "bert/encoder/layer_5/attention/self/value/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.05061118 -0.02954445 -0.0034053 -0.00025261 0.0437019 ]\n", "TF: shape: (768,) values: [ 0.05061118 -0.02954445 -0.0034053 -0.00025261 0.0437019 ]\n", "\n", "bert/encoder/layer_5/attention/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.00739815 0.0533964 -0.03736389 -0.04999201 0.01693069]\n", "TF: shape: (768, 768) values: [-0.00739815 0.0533964 -0.03736389 -0.04999201 0.01693069]\n", "\n", "bert/encoder/layer_5/attention/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.0021682 0.01711399 -0.04201518 0.01605333 0.00552063]\n", "TF: shape: (768,) values: [-0.0021682 0.01711399 -0.04201518 0.01605333 0.00552063]\n", "\n", "bert/encoder/layer_5/attention/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.06841327 -0.0146848 0.09792476 -0.23284538 0.2785602 ]\n", "TF: shape: (768,) values: [-0.06841327 -0.0146848 0.09792476 -0.23284538 0.2785602 ]\n", "\n", "bert/encoder/layer_5/attention/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.8908311 0.87884724 0.81637293 0.8047641 0.96539867]\n", "TF: shape: (768,) values: [0.8908311 0.87884724 0.81637293 0.8047641 0.96539867]\n", "\n", "bert/encoder/layer_5/intermediate/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 3072) values: [-0.03246041 0.07251058 -0.08201726 0.00772481 0.02532209]\n", "TF: shape: (768, 3072) values: [-0.03246041 0.07251058 -0.08201726 0.00772481 0.02532209]\n", "\n", "bert/encoder/layer_5/intermediate/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072,) values: [-0.09689714 -0.27696273 -0.13047501 -0.10892326 -0.1057625 ]\n", "TF: shape: (3072,) values: [-0.09689714 -0.27696273 -0.13047501 -0.10892326 -0.1057625 ]\n", "\n", "bert/encoder/layer_5/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072, 768) values: [ 0.0642072 -0.01738782 -0.05095377 0.00523853 0.04425264]\n", "TF: shape: (3072, 768) values: [ 0.0642072 -0.01738782 -0.05095377 0.00523853 0.04425264]\n", "\n", "bert/encoder/layer_5/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.0007217 0.06006297 0.0016595 0.03848181 0.06703516]\n", "TF: shape: (768,) values: [-0.0007217 0.06006297 0.0016595 0.03848181 0.06703516]\n", "\n", "bert/encoder/layer_5/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.00278729 -0.05594506 -0.0631047 0.06023621 -0.18672828]\n", "TF: shape: (768,) values: [-0.00278729 -0.05594506 -0.0631047 0.06023621 -0.18672828]\n", "\n", "bert/encoder/layer_5/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.8621183 0.8515807 0.82654256 0.81729776 0.7985204 ]\n", "TF: shape: (768,) values: [0.8621183 0.8515807 0.82654256 0.81729776 0.7985204 ]\n", "\n", "bert/encoder/layer_6/attention/self/query/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.02527807 -0.01429243 0.01467054 0.08624706 -0.00188593]\n", "TF: shape: (768, 768) values: [-0.02527807 -0.01429243 0.01467054 0.08624706 -0.00188593]\n", "\n", "bert/encoder/layer_6/attention/self/query/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.17319514 0.27564248 0.16801168 -0.10946485 0.1643271 ]\n", "TF: shape: (768,) values: [-0.17319514 0.27564248 0.16801168 -0.10946485 0.1643271 ]\n", "\n", "bert/encoder/layer_6/attention/self/key/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.05886372 0.00706217 0.0398422 0.00882155 -0.04571463]\n", "TF: shape: (768, 768) values: [ 0.05886372 0.00706217 0.0398422 0.00882155 -0.04571463]\n", "\n", "bert/encoder/layer_6/attention/self/key/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.00424696 -0.0001192 0.0046079 -0.00315606 0.00434314]\n", "TF: shape: (768,) values: [-0.00424696 -0.0001192 0.0046079 -0.00315606 0.00434314]\n", "\n", "bert/encoder/layer_6/attention/self/value/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.01720381 0.01170722 0.02346902 -0.02284313 -0.03173028]\n", "TF: shape: (768, 768) values: [-0.01720381 0.01170722 0.02346902 -0.02284313 -0.03173028]\n", "\n", "bert/encoder/layer_6/attention/self/value/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.03492057 0.01813157 -0.00182878 -0.01420629 -0.00508944]\n", "TF: shape: (768,) values: [-0.03492057 0.01813157 -0.00182878 -0.01420629 -0.00508944]\n", "\n", "bert/encoder/layer_6/attention/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.0323688 -0.00689882 0.07379091 0.01121114 -0.02059202]\n", "TF: shape: (768, 768) values: [ 0.0323688 -0.00689882 0.07379091 0.01121114 -0.02059202]\n", "\n", "bert/encoder/layer_6/attention/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.00648672 -0.05935453 -0.05673229 -0.01152384 -0.02766573]\n", "TF: shape: (768,) values: [-0.00648672 -0.05935453 -0.05673229 -0.01152384 -0.02766573]\n", "\n", "bert/encoder/layer_6/attention/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.06793639 0.03157783 0.15647687 -0.15025291 0.14727171]\n", "TF: shape: (768,) values: [-0.06793639 0.03157783 0.15647687 -0.15025291 0.14727171]\n", "\n", "bert/encoder/layer_6/attention/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.8882361 0.8704905 0.80289173 0.77365315 0.92333615]\n", "TF: shape: (768,) values: [0.8882361 0.8704905 0.80289173 0.77365315 0.92333615]\n", "\n", "bert/encoder/layer_6/intermediate/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 3072) values: [ 0.04492201 0.05160861 0.09041415 -0.00742628 0.048133 ]\n", "TF: shape: (768, 3072) values: [ 0.04492201 0.05160861 0.09041415 -0.00742628 0.048133 ]\n", "\n", "bert/encoder/layer_6/intermediate/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072,) values: [-0.09301704 -0.158612 -0.10633879 -0.09706812 -0.17319229]\n", "TF: shape: (3072,) values: [-0.09301704 -0.158612 -0.10633879 -0.09706812 -0.17319229]\n", "\n", "bert/encoder/layer_6/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072, 768) values: [-0.00085372 -0.00974195 0.00684915 0.00038686 0.06610142]\n", "TF: shape: (3072, 768) values: [-0.00085372 -0.00974195 0.00684915 0.00038686 0.06610142]\n", "\n", "bert/encoder/layer_6/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.03254414 0.05681704 0.03720434 0.01936359 0.09134153]\n", "TF: shape: (768,) values: [-0.03254414 0.05681704 0.03720434 0.01936359 0.09134153]\n", "\n", "bert/encoder/layer_6/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.0117129 -0.03209404 -0.08646043 0.03760341 -0.13841423]\n", "TF: shape: (768,) values: [-0.0117129 -0.03209404 -0.08646043 0.03760341 -0.13841423]\n", "\n", "bert/encoder/layer_6/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.8674175 0.8657014 0.8151861 0.82301307 0.8305737 ]\n", "TF: shape: (768,) values: [0.8674175 0.8657014 0.8151861 0.82301307 0.8305737 ]\n", "\n", "bert/encoder/layer_7/attention/self/query/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.00075523 -0.01501983 0.04090893 0.01884826 0.04670674]\n", "TF: shape: (768, 768) values: [-0.00075523 -0.01501983 0.04090893 0.01884826 0.04670674]\n", "\n", "bert/encoder/layer_7/attention/self/query/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.0010344 -0.00423982 0.3117479 0.04494623 -0.01260845]\n", "TF: shape: (768,) values: [ 0.0010344 -0.00423982 0.3117479 0.04494623 -0.01260845]\n", "\n", "bert/encoder/layer_7/attention/self/key/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.02781927 -0.00906972 0.02121989 0.0298591 0.05854786]\n", "TF: shape: (768, 768) values: [ 0.02781927 -0.00906972 0.02121989 0.0298591 0.05854786]\n", "\n", "bert/encoder/layer_7/attention/self/key/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.00074918 0.00731079 0.00089338 0.00345652 0.00043817]\n", "TF: shape: (768,) values: [-0.00074918 0.00731079 0.00089338 0.00345652 0.00043817]\n", "\n", "bert/encoder/layer_7/attention/self/value/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.01080035 -0.03468366 0.03167168 0.01583073 0.0327719 ]\n", "TF: shape: (768, 768) values: [-0.01080035 -0.03468366 0.03167168 0.01583073 0.0327719 ]\n", "\n", "bert/encoder/layer_7/attention/self/value/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.02824226 0.01605172 0.00067929 -0.04553111 0.0076044 ]\n", "TF: shape: (768,) values: [-0.02824226 0.01605172 0.00067929 -0.04553111 0.0076044 ]\n", "\n", "bert/encoder/layer_7/attention/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.05496112 0.01006968 0.02206531 -0.01873116 0.02149118]\n", "TF: shape: (768, 768) values: [-0.05496112 0.01006968 0.02206531 -0.01873116 0.02149118]\n", "\n", "bert/encoder/layer_7/attention/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.00349772 -0.05831751 -0.0594084 -0.0342187 0.02965918]\n", "TF: shape: (768,) values: [ 0.00349772 -0.05831751 -0.0594084 -0.0342187 0.02965918]\n", "\n", "bert/encoder/layer_7/attention/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.02826844 0.04427591 0.05678326 -0.0475907 0.16136196]\n", "TF: shape: (768,) values: [-0.02826844 0.04427591 0.05678326 -0.0475907 0.16136196]\n", "\n", "bert/encoder/layer_7/attention/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.8742141 0.870608 0.79147685 0.7595279 0.9223656 ]\n", "TF: shape: (768,) values: [0.8742141 0.870608 0.79147685 0.7595279 0.9223656 ]\n", "\n", "bert/encoder/layer_7/intermediate/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 3072) values: [ 0.03598932 -0.12225644 0.03019998 0.05691092 0.03717208]\n", "TF: shape: (768, 3072) values: [ 0.03598932 -0.12225644 0.03019998 0.05691092 0.03717208]\n", "\n", "bert/encoder/layer_7/intermediate/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072,) values: [-0.12465011 -0.08639494 -0.06206005 -0.08012587 -0.08773767]\n", "TF: shape: (3072,) values: [-0.12465011 -0.08639494 -0.06206005 -0.08012587 -0.08773767]\n", "\n", "bert/encoder/layer_7/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072, 768) values: [-0.02190432 -0.02279165 0.03279508 0.01011065 -0.07793335]\n", "TF: shape: (3072, 768) values: [-0.02190432 -0.02279165 0.03279508 0.01011065 -0.07793335]\n", "\n", "bert/encoder/layer_7/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.04282642 0.03700675 0.06142357 -0.04787201 0.02958163]\n", "TF: shape: (768,) values: [-0.04282642 0.03700675 0.06142357 -0.04787201 0.02958163]\n", "\n", "bert/encoder/layer_7/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.03142036 -0.04358427 -0.05132087 -0.01788123 -0.16399944]\n", "TF: shape: (768,) values: [-0.03142036 -0.04358427 -0.05132087 -0.01788123 -0.16399944]\n", "\n", "bert/encoder/layer_7/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.83858097 0.8179645 0.80693793 0.81225365 0.7844832 ]\n", "TF: shape: (768,) values: [0.83858097 0.8179645 0.80693793 0.81225365 0.7844832 ]\n", "\n", "bert/encoder/layer_8/attention/self/query/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [0.0448719 0.02289526 0.03083764 0.03048073 0.02436891]\n", "TF: shape: (768, 768) values: [0.0448719 0.02289526 0.03083764 0.03048073 0.02436891]\n", "\n", "bert/encoder/layer_8/attention/self/query/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.25132924 -0.23753347 0.02581017 0.00901509 0.18424493]\n", "TF: shape: (768,) values: [-0.25132924 -0.23753347 0.02581017 0.00901509 0.18424493]\n", "\n", "bert/encoder/layer_8/attention/self/key/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.01999719 0.00711403 0.03949134 -0.0102224 0.03152475]\n", "TF: shape: (768, 768) values: [-0.01999719 0.00711403 0.03949134 -0.0102224 0.03152475]\n", "\n", "bert/encoder/layer_8/attention/self/key/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 5.5668897e-05 3.4638541e-03 -1.7605867e-03 -6.1321147e-03\n", " -4.4074579e-04]\n", "TF: shape: (768,) values: [ 5.5668897e-05 3.4638541e-03 -1.7605867e-03 -6.1321147e-03\n", " -4.4074579e-04]\n", "\n", "bert/encoder/layer_8/attention/self/value/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.00736056 -0.01795213 0.00104576 -0.00034653 0.03190543]\n", "TF: shape: (768, 768) values: [-0.00736056 -0.01795213 0.00104576 -0.00034653 0.03190543]\n", "\n", "bert/encoder/layer_8/attention/self/value/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.02892835 0.00642501 -0.03608712 0.00264269 -0.0245198 ]\n", "TF: shape: (768,) values: [ 0.02892835 0.00642501 -0.03608712 0.00264269 -0.0245198 ]\n", "\n", "bert/encoder/layer_8/attention/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.03971623 0.05307067 -0.01298818 0.00946693 -0.00121235]\n", "TF: shape: (768, 768) values: [ 0.03971623 0.05307067 -0.01298818 0.00946693 -0.00121235]\n", "\n", "bert/encoder/layer_8/attention/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.01468131 -0.05406622 -0.06289103 0.004484 0.0240819 ]\n", "TF: shape: (768,) values: [ 0.01468131 -0.05406622 -0.06289103 0.004484 0.0240819 ]\n", "\n", "bert/encoder/layer_8/attention/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.06004262 0.0457275 0.08688109 -0.14416659 -0.05500487]\n", "TF: shape: (768,) values: [-0.06004262 0.0457275 0.08688109 -0.14416659 -0.05500487]\n", "\n", "bert/encoder/layer_8/attention/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.8907534 0.89116573 0.811639 0.7810443 0.9045574 ]\n", "TF: shape: (768,) values: [0.8907534 0.89116573 0.811639 0.7810443 0.9045574 ]\n", "\n", "bert/encoder/layer_8/intermediate/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 3072) values: [-0.01962814 -0.01482586 -0.02292624 0.03397145 0.02457482]\n", "TF: shape: (768, 3072) values: [-0.01962814 -0.01482586 -0.02292624 0.03397145 0.02457482]\n", "\n", "bert/encoder/layer_8/intermediate/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072,) values: [-0.08129632 -0.1691108 -0.10681771 -0.10392351 -0.13120006]\n", "TF: shape: (3072,) values: [-0.08129632 -0.1691108 -0.10681771 -0.10392351 -0.13120006]\n", "\n", "bert/encoder/layer_8/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072, 768) values: [-0.04683433 -0.02690669 0.02979059 0.02223369 -0.00130287]\n", "TF: shape: (3072, 768) values: [-0.04683433 -0.02690669 0.02979059 0.02223369 -0.00130287]\n", "\n", "bert/encoder/layer_8/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.09155537 -0.04465394 0.05649116 -0.09628641 0.11875238]\n", "TF: shape: (768,) values: [-0.09155537 -0.04465394 0.05649116 -0.09628641 0.11875238]\n", "\n", "bert/encoder/layer_8/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.06043394 -0.06657387 -0.05341128 -0.00374733 -0.10855272]\n", "TF: shape: (768,) values: [-0.06043394 -0.06657387 -0.05341128 -0.00374733 -0.10855272]\n", "\n", "bert/encoder/layer_8/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.84467345 0.84421015 0.82582206 0.84553087 0.8207573 ]\n", "TF: shape: (768,) values: [0.84467345 0.84421015 0.82582206 0.84553087 0.8207573 ]\n", "\n", "bert/encoder/layer_9/attention/self/query/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.08004542 -0.0143706 -0.04219061 -0.05175152 -0.01147588]\n", "TF: shape: (768, 768) values: [ 0.08004542 -0.0143706 -0.04219061 -0.05175152 -0.01147588]\n", "\n", "bert/encoder/layer_9/attention/self/query/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.14508031 0.40926442 -0.3281781 -0.02869792 -0.26104516]\n", "TF: shape: (768,) values: [-0.14508031 0.40926442 -0.3281781 -0.02869792 -0.26104516]\n", "\n", "bert/encoder/layer_9/attention/self/key/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.01337681 0.00615428 -0.0455939 0.03379053 -0.01992556]\n", "TF: shape: (768, 768) values: [-0.01337681 0.00615428 -0.0455939 0.03379053 -0.01992556]\n", "\n", "bert/encoder/layer_9/attention/self/key/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.0051302 0.0083288 0.00377641 0.00928865 -0.00418182]\n", "TF: shape: (768,) values: [-0.0051302 0.0083288 0.00377641 0.00928865 -0.00418182]\n", "\n", "bert/encoder/layer_9/attention/self/value/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.02485976 -0.0301923 0.00984638 -0.02495162 0.01074037]\n", "TF: shape: (768, 768) values: [-0.02485976 -0.0301923 0.00984638 -0.02495162 0.01074037]\n", "\n", "bert/encoder/layer_9/attention/self/value/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.04229928 -0.02636711 0.0060447 0.00222829 0.04979481]\n", "TF: shape: (768,) values: [-0.04229928 -0.02636711 0.0060447 0.00222829 0.04979481]\n", "\n", "bert/encoder/layer_9/attention/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.01258144 0.00871274 0.00482882 -0.00675888 -0.04390825]\n", "TF: shape: (768, 768) values: [-0.01258144 0.00871274 0.00482882 -0.00675888 -0.04390825]\n", "\n", "bert/encoder/layer_9/attention/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.02457753 0.05051134 -0.06890804 -0.00962795 0.00864793]\n", "TF: shape: (768,) values: [ 0.02457753 0.05051134 -0.06890804 -0.00962795 0.00864793]\n", "\n", "bert/encoder/layer_9/attention/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.08963391 -0.06362236 0.0676669 -0.09895685 0.08318913]\n", "TF: shape: (768,) values: [-0.08963391 -0.06362236 0.0676669 -0.09895685 0.08318913]\n", "\n", "bert/encoder/layer_9/attention/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.85100883 0.82569736 0.7927931 0.7660444 0.8912934 ]\n", "TF: shape: (768,) values: [0.85100883 0.82569736 0.7927931 0.7660444 0.8912934 ]\n", "\n", "bert/encoder/layer_9/intermediate/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 3072) values: [ 0.06290598 0.0203122 -0.05384256 0.05442941 0.00484769]\n", "TF: shape: (768, 3072) values: [ 0.06290598 0.0203122 -0.05384256 0.05442941 0.00484769]\n", "\n", "bert/encoder/layer_9/intermediate/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072,) values: [-0.10818483 -0.00169527 -0.08962701 -0.10280421 -0.14310956]\n", "TF: shape: (3072,) values: [-0.10818483 -0.00169527 -0.08962701 -0.10280421 -0.14310956]\n", "\n", "bert/encoder/layer_9/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072, 768) values: [ 0.05487705 0.01644666 0.00436198 -0.00490768 -0.03238423]\n", "TF: shape: (3072, 768) values: [ 0.05487705 0.01644666 0.00436198 -0.00490768 -0.03238423]\n", "\n", "bert/encoder/layer_9/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.08755219 -0.01910074 -0.02988298 -0.08150438 0.09897955]\n", "TF: shape: (768,) values: [-0.08755219 -0.01910074 -0.02988298 -0.08150438 0.09897955]\n", "\n", "bert/encoder/layer_9/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.04136161 -0.02113917 -0.07581077 -0.00809791 -0.09790538]\n", "TF: shape: (768,) values: [-0.04136161 -0.02113917 -0.07581077 -0.00809791 -0.09790538]\n", "\n", "bert/encoder/layer_9/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.8250572 0.83477134 0.7794141 0.81264955 0.7827918 ]\n", "TF: shape: (768,) values: [0.8250572 0.83477134 0.7794141 0.81264955 0.7827918 ]\n", "\n", "bert/encoder/layer_10/attention/self/query/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.00071212 -0.00853064 0.01776993 0.03189976 0.02183623]\n", "TF: shape: (768, 768) values: [ 0.00071212 -0.00853064 0.01776993 0.03189976 0.02183623]\n", "\n", "bert/encoder/layer_10/attention/self/query/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.03667567 -0.01449654 -0.03822913 0.00118343 -0.05489838]\n", "TF: shape: (768,) values: [-0.03667567 -0.01449654 -0.03822913 0.00118343 -0.05489838]\n", "\n", "bert/encoder/layer_10/attention/self/key/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.0494106 0.05531096 -0.02459413 -0.06019118 -0.02829785]\n", "TF: shape: (768, 768) values: [-0.0494106 0.05531096 -0.02459413 -0.06019118 -0.02829785]\n", "\n", "bert/encoder/layer_10/attention/self/key/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.00692997 0.00855893 0.00670777 -0.0052475 -0.00017074]\n", "TF: shape: (768,) values: [-0.00692997 0.00855893 0.00670777 -0.0052475 -0.00017074]\n", "\n", "bert/encoder/layer_10/attention/self/value/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.01911842 0.04858809 -0.02608485 0.00794924 -0.02246636]\n", "TF: shape: (768, 768) values: [ 0.01911842 0.04858809 -0.02608485 0.00794924 -0.02246636]\n", "\n", "bert/encoder/layer_10/attention/self/value/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.0133503 -0.01224133 -0.0051834 -0.00232528 0.00148614]\n", "TF: shape: (768,) values: [-0.0133503 -0.01224133 -0.0051834 -0.00232528 0.00148614]\n", "\n", "bert/encoder/layer_10/attention/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.05904732 0.02616 0.00794104 -0.02889086 -0.03692576]\n", "TF: shape: (768, 768) values: [-0.05904732 0.02616 0.00794104 -0.02889086 -0.03692576]\n", "\n", "bert/encoder/layer_10/attention/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.02089205 0.01458059 0.05217785 0.0324267 0.00907548]\n", "TF: shape: (768,) values: [0.02089205 0.01458059 0.05217785 0.0324267 0.00907548]\n", "\n", "bert/encoder/layer_10/attention/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.10986238 -0.04332284 0.02603893 -0.06236923 0.14469369]\n", "TF: shape: (768,) values: [-0.10986238 -0.04332284 0.02603893 -0.06236923 0.14469369]\n", "\n", "bert/encoder/layer_10/attention/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.8515822 0.81392974 0.836747 0.78040504 0.88091415]\n", "TF: shape: (768,) values: [0.8515822 0.81392974 0.836747 0.78040504 0.88091415]\n", "\n", "bert/encoder/layer_10/intermediate/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 3072) values: [-0.07061081 0.06997397 0.01433633 0.04150929 0.02865192]\n", "TF: shape: (768, 3072) values: [-0.07061081 0.06997397 0.01433633 0.04150929 0.02865192]\n", "\n", "bert/encoder/layer_10/intermediate/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072,) values: [-0.13879126 -0.06401426 -0.1408043 -0.15043251 -0.10193057]\n", "TF: shape: (3072,) values: [-0.13879126 -0.06401426 -0.1408043 -0.15043251 -0.10193057]\n", "\n", "bert/encoder/layer_10/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072, 768) values: [ 0.02918765 0.02609882 -0.02259856 0.01636725 -0.00038442]\n", "TF: shape: (3072, 768) values: [ 0.02918765 0.02609882 -0.02259856 0.01636725 -0.00038442]\n", "\n", "bert/encoder/layer_10/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.01799502 0.10970547 -0.02384165 -0.03350981 0.10491351]\n", "TF: shape: (768,) values: [-0.01799502 0.10970547 -0.02384165 -0.03350981 0.10491351]\n", "\n", "bert/encoder/layer_10/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.00999107 -0.0217309 -0.0854177 -0.01109101 -0.07902174]\n", "TF: shape: (768,) values: [ 0.00999107 -0.0217309 -0.0854177 -0.01109101 -0.07902174]\n", "\n", "bert/encoder/layer_10/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.8272796 0.8597452 0.79116803 0.81267637 0.8273501 ]\n", "TF: shape: (768,) values: [0.8272796 0.8597452 0.79116803 0.81267637 0.8273501 ]\n", "\n", "bert/encoder/layer_11/attention/self/query/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.04141425 -0.06491017 -0.03202523 0.06226195 0.02193764]\n", "TF: shape: (768, 768) values: [-0.04141425 -0.06491017 -0.03202523 0.06226195 0.02193764]\n", "\n", "bert/encoder/layer_11/attention/self/query/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.0501296 0.11886728 0.2186807 0.08720991 -0.20476632]\n", "TF: shape: (768,) values: [ 0.0501296 0.11886728 0.2186807 0.08720991 -0.20476632]\n", "\n", "bert/encoder/layer_11/attention/self/key/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.02634268 -0.01357682 -0.06076496 0.04210597 0.01783857]\n", "TF: shape: (768, 768) values: [ 0.02634268 -0.01357682 -0.06076496 0.04210597 0.01783857]\n", "\n", "bert/encoder/layer_11/attention/self/key/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.0007798 -0.00065806 -0.00010521 0.00119144 -0.00180091]\n", "TF: shape: (768,) values: [-0.0007798 -0.00065806 -0.00010521 0.00119144 -0.00180091]\n", "\n", "bert/encoder/layer_11/attention/self/value/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.03520973 -0.00678078 -0.02883583 -0.01011515 0.04519828]\n", "TF: shape: (768, 768) values: [ 0.03520973 -0.00678078 -0.02883583 -0.01011515 0.04519828]\n", "\n", "bert/encoder/layer_11/attention/self/value/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.01502306 -0.00530942 0.00023572 0.00205218 -0.00578036]\n", "TF: shape: (768,) values: [ 0.01502306 -0.00530942 0.00023572 0.00205218 -0.00578036]\n", "\n", "bert/encoder/layer_11/attention/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [ 0.02361419 0.03112707 -0.00063031 0.04209773 -0.02434015]\n", "TF: shape: (768, 768) values: [ 0.02361419 0.03112707 -0.00063031 0.04209773 -0.02434015]\n", "\n", "bert/encoder/layer_11/attention/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [ 0.02566087 0.0028438 -0.00475678 0.02149458 -0.01755187]\n", "TF: shape: (768,) values: [ 0.02566087 0.0028438 -0.00475678 0.02149458 -0.01755187]\n", "\n", "bert/encoder/layer_11/attention/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.03134411 0.01207957 -0.04636396 -0.03013046 0.07944281]\n", "TF: shape: (768,) values: [-0.03134411 0.01207957 -0.04636396 -0.03013046 0.07944281]\n", "\n", "bert/encoder/layer_11/attention/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.85203767 0.8020145 0.8554237 0.8150477 0.8441815 ]\n", "TF: shape: (768,) values: [0.85203767 0.8020145 0.8554237 0.8150477 0.8441815 ]\n", "\n", "bert/encoder/layer_11/intermediate/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 3072) values: [ 0.05871898 -0.01124212 0.00206979 -0.04366514 -0.00716808]\n", "TF: shape: (768, 3072) values: [ 0.05871898 -0.01124212 0.00206979 -0.04366514 -0.00716808]\n", "\n", "bert/encoder/layer_11/intermediate/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072,) values: [-0.09762521 -0.06175711 -0.05153917 -0.08580919 -0.09734315]\n", "TF: shape: (3072,) values: [-0.09762521 -0.06175711 -0.05153917 -0.08580919 -0.09734315]\n", "\n", "bert/encoder/layer_11/output/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (3072, 768) values: [-0.022382 0.01073206 -0.01357213 0.02484621 0.01403091]\n", "TF: shape: (3072, 768) values: [-0.022382 0.01073206 -0.01357213 0.02484621 0.01403091]\n", "\n", "bert/encoder/layer_11/output/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.06574099 0.04207807 0.01201084 0.00229322 0.05551811]\n", "TF: shape: (768,) values: [-0.06574099 0.04207807 0.01201084 0.00229322 0.05551811]\n", "\n", "bert/encoder/layer_11/output/LayerNorm/beta\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.00634605 -0.01989403 0.04628465 0.01585056 -0.04256899]\n", "TF: shape: (768,) values: [-0.00634605 -0.01989403 0.04628465 0.01585056 -0.04256899]\n", "\n", "bert/encoder/layer_11/output/LayerNorm/gamma\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [0.6384234 0.6300364 0.66570055 0.6126921 0.63756436]\n", "TF: shape: (768,) values: [0.6384234 0.6300364 0.66570055 0.6126921 0.63756436]\n", "\n", "bert/pooler/dense/kernel\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768, 768) values: [-0.00127425 0.00199868 -0.03863145 -0.00139355 0.00691627]\n", "TF: shape: (768, 768) values: [-0.00127425 0.00199868 -0.03863145 -0.00139355 0.00691627]\n", "\n", "bert/pooler/dense/bias\n", "|sum(pt_wts - tf_wts)| = 0.0\n", "PT: shape: (768,) values: [-0.03597581 -0.00389536 0.05181352 0.02224747 -0.00493723]\n", "TF: shape: (768,) values: [-0.03597581 -0.00389536 0.05181352 0.02224747 -0.00493723]\n", "\n" ] } ], "source": [ "tensors_to_transopse = (\n", " \"dense.weight\",\n", " \"attention.self.query\",\n", " \"attention.self.key\",\n", " \"attention.self.value\"\n", ")\n", "var_map = (\n", " ('layer.', 'layer_'),\n", " ('word_embeddings.weight', 'word_embeddings'),\n", " ('position_embeddings.weight', 'position_embeddings'),\n", " ('token_type_embeddings.weight', 'token_type_embeddings'),\n", " ('.', '/'),\n", " ('LayerNorm/weight', 'LayerNorm/gamma'),\n", " ('LayerNorm/bias', 'LayerNorm/beta'),\n", " ('weight', 'kernel')\n", ")\n", "\n", "def to_tf_var_name(name:str):\n", " for patt, repl in iter(var_map):\n", " name = name.replace(patt, repl)\n", " return 'bert/{}'.format(name)\n", "\n", "tf_vars = {v.name: session.run(fetches=v) for v in tf.global_variables()}\n", "pt_vars = {}\n", "for v, T in pt_model.state_dict().items():\n", " T = T.detach().numpy()\n", " if any([x in v for x in tensors_to_transopse]):\n", " T = T.T\n", " pt_vars.update({to_tf_var_name(v): T})\n", "\n", "for var_name in tf_vars:\n", " \n", " pt = pt_vars[var_name.strip(\":0\")]\n", " tf = tf_vars[var_name]\n", "\n", " print(var_name.strip(\":0\"))\n", " \n", " # Assert equivalence\n", " print(\"|sum(pt_wts - tf_wts)| = {}\".format(\n", " np.abs(np.sum(pt - tf, keepdims=False))\n", " ))\n", " assert not np.sum(pt - tf, keepdims=False)\n", " \n", " if len(pt.shape) == 2:\n", " print(\"PT: shape: {0} values: {1}\".format(pt.shape, pt[0, :5]))\n", " print(\"TF: shape: {0} values: {1}\".format(tf.shape, tf[0, :5]))\n", " else:\n", " print(\"PT: shape: {0} values: {1}\".format(pt.shape, pt[:5]))\n", " print(\"TF: shape: {0} values: {1}\".format(tf.shape, tf[:5]))\n", " print()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Compare Layer-12 Projections" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "MSE: 2.7155439966009e-05\n", "PT-values: [-0.876663 -0.41088238 -0.12200808 0.44941 0.19445966]\n", "TF-values: [-0.8742865 -0.40621698 -0.10585472 0.444904 0.1825743 ]\n" ] } ], "source": [ "# Mean Squared Error (MSE) between last projection of each model\n", "MSE = np.mean((pt_embedding - tf_embedding) ** 2, keepdims=False)\n", "print(\"MSE: {}\".format(MSE))\n", "print(\"PT-values: {}\".format(pt_embedding[0, :5]))\n", "print(\"TF-values: {}\".format(tf_embedding[0, :5]))" ] } ], "metadata": { "kernelspec": { "display_name": "nlp", "language": "python", "name": "nlp" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.8" } }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: notebooks/Comparing-TF-and-PT-models-MLM-NSP.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Comparing TensorFlow (original) and PyTorch models\n", "\n", "You can use this small notebook to check the conversion of the model's weights from the TensorFlow model to the PyTorch model. In the following, we compare the weights of the last layer on a simple example (in `input.txt`) but both models returns all the hidden layers so you can check every stage of the model.\n", "\n", "To run this notebook, follow these instructions:\n", "- make sure that your Python environment has both TensorFlow and PyTorch installed,\n", "- download the original TensorFlow implementation,\n", "- download a pre-trained TensorFlow model as indicaded in the TensorFlow implementation readme,\n", "- run the script `convert_tf_checkpoint_to_pytorch.py` as indicated in the `README` to convert the pre-trained TensorFlow model to PyTorch.\n", "\n", "If needed change the relative paths indicated in this notebook (at the beggining of Sections 1 and 2) to point to the relevent models and code." ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:02:26.999106Z", "start_time": "2018-11-16T10:02:26.985709Z" } }, "outputs": [], "source": [ "import os\n", "os.chdir('../')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 1/ TensorFlow code" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:02:27.664528Z", "start_time": "2018-11-16T10:02:27.651019Z" } }, "outputs": [], "source": [ "original_tf_inplem_dir = \"./tensorflow_code/\"\n", "model_dir = \"../google_models/uncased_L-12_H-768_A-12/\"\n", "\n", "vocab_file = model_dir + \"vocab.txt\"\n", "bert_config_file = model_dir + \"bert_config.json\"\n", "init_checkpoint = model_dir + \"bert_model.ckpt\"\n", "\n", "input_file = \"./samples/input.txt\"\n", "max_seq_length = 128\n", "max_predictions_per_seq = 20\n", "\n", "masked_lm_positions = [6]" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:02:30.202182Z", "start_time": "2018-11-16T10:02:28.112570Z" } }, "outputs": [], "source": [ "import importlib.util\n", "import sys\n", "import tensorflow as tf\n", "import pytorch_pretrained_bert as ppb\n", "\n", "def del_all_flags(FLAGS):\n", " flags_dict = FLAGS._flags() \n", " keys_list = [keys for keys in flags_dict] \n", " for keys in keys_list:\n", " FLAGS.__delattr__(keys)\n", "\n", "del_all_flags(tf.flags.FLAGS)\n", "import tensorflow_code.extract_features as ef\n", "del_all_flags(tf.flags.FLAGS)\n", "import tensorflow_code.modeling as tfm\n", "del_all_flags(tf.flags.FLAGS)\n", "import tensorflow_code.tokenization as tft\n", "del_all_flags(tf.flags.FLAGS)\n", "import tensorflow_code.run_pretraining as rp\n", "del_all_flags(tf.flags.FLAGS)\n", "import tensorflow_code.create_pretraining_data as cpp" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:02:30.238027Z", "start_time": "2018-11-16T10:02:30.204943Z" }, "code_folding": [ 15 ] }, "outputs": [], "source": [ "import re\n", "class InputExample(object):\n", " \"\"\"A single instance example.\"\"\"\n", "\n", " def __init__(self, tokens, segment_ids, masked_lm_positions,\n", " masked_lm_labels, is_random_next):\n", " self.tokens = tokens\n", " self.segment_ids = segment_ids\n", " self.masked_lm_positions = masked_lm_positions\n", " self.masked_lm_labels = masked_lm_labels\n", " self.is_random_next = is_random_next\n", " def __repr__(self):\n", " return '\\n'.join(k + \":\" + str(v) for k, v in self.__dict__.items())\n", "\n", "\n", "def read_examples(input_file, tokenizer, masked_lm_positions):\n", " \"\"\"Read a list of `InputExample`s from an input file.\"\"\"\n", " examples = []\n", " unique_id = 0\n", " with tf.gfile.GFile(input_file, \"r\") as reader:\n", " while True:\n", " line = reader.readline()\n", " if not line:\n", " break\n", " line = line.strip()\n", " text_a = None\n", " text_b = None\n", " m = re.match(r\"^(.*) \\|\\|\\| (.*)$\", line)\n", " if m is None:\n", " text_a = line\n", " else:\n", " text_a = m.group(1)\n", " text_b = m.group(2)\n", " tokens_a = tokenizer.tokenize(text_a)\n", " tokens_b = None\n", " if text_b:\n", " tokens_b = tokenizer.tokenize(text_b)\n", " tokens = tokens_a + tokens_b\n", " masked_lm_labels = []\n", " for m_pos in masked_lm_positions:\n", " masked_lm_labels.append(tokens[m_pos])\n", " tokens[m_pos] = '[MASK]'\n", " examples.append(\n", " InputExample(\n", " tokens = tokens,\n", " segment_ids = [0] * len(tokens_a) + [1] * len(tokens_b),\n", " masked_lm_positions = masked_lm_positions,\n", " masked_lm_labels = masked_lm_labels,\n", " is_random_next = False))\n", " unique_id += 1\n", " return examples" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:02:30.304018Z", "start_time": "2018-11-16T10:02:30.240189Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tokens:['who', 'was', 'jim', 'henson', '?', 'jim', '[MASK]', 'was', 'a', 'puppet', '##eer']\n", "segment_ids:[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]\n", "masked_lm_positions:[6]\n", "masked_lm_labels:['henson']\n", "is_random_next:False\n" ] } ], "source": [ "bert_config = tfm.BertConfig.from_json_file(bert_config_file)\n", "tokenizer = ppb.BertTokenizer(\n", " vocab_file=vocab_file, do_lower_case=True)\n", "examples = read_examples(input_file, tokenizer, masked_lm_positions=masked_lm_positions)\n", "\n", "print(examples[0])" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:02:33.324167Z", "start_time": "2018-11-16T10:02:33.291909Z" }, "code_folding": [ 16 ] }, "outputs": [], "source": [ "class InputFeatures(object):\n", " \"\"\"A single set of features of data.\"\"\"\n", "\n", " def __init__(self, input_ids, input_mask, segment_ids, masked_lm_positions,\n", " masked_lm_ids, masked_lm_weights, next_sentence_label):\n", " self.input_ids = input_ids\n", " self.input_mask = input_mask\n", " self.segment_ids = segment_ids\n", " self.masked_lm_positions = masked_lm_positions\n", " self.masked_lm_ids = masked_lm_ids\n", " self.masked_lm_weights = masked_lm_weights\n", " self.next_sentence_labels = next_sentence_label\n", "\n", " def __repr__(self):\n", " return '\\n'.join(k + \":\" + str(v) for k, v in self.__dict__.items())\n", "\n", "def pretraining_convert_examples_to_features(instances, tokenizer, max_seq_length,\n", " max_predictions_per_seq):\n", " \"\"\"Create TF example files from `TrainingInstance`s.\"\"\"\n", " features = []\n", " for (inst_index, instance) in enumerate(instances):\n", " input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)\n", " input_mask = [1] * len(input_ids)\n", " segment_ids = list(instance.segment_ids)\n", " assert len(input_ids) <= max_seq_length\n", "\n", " while len(input_ids) < max_seq_length:\n", " input_ids.append(0)\n", " input_mask.append(0)\n", " segment_ids.append(0)\n", "\n", " assert len(input_ids) == max_seq_length\n", " assert len(input_mask) == max_seq_length\n", " assert len(segment_ids) == max_seq_length\n", "\n", " masked_lm_positions = list(instance.masked_lm_positions)\n", " masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)\n", " masked_lm_weights = [1.0] * len(masked_lm_ids)\n", "\n", " while len(masked_lm_positions) < max_predictions_per_seq:\n", " masked_lm_positions.append(0)\n", " masked_lm_ids.append(0)\n", " masked_lm_weights.append(0.0)\n", "\n", " next_sentence_label = 1 if instance.is_random_next else 0\n", "\n", " features.append(\n", " InputFeatures(input_ids, input_mask, segment_ids,\n", " masked_lm_positions, masked_lm_ids,\n", " masked_lm_weights, next_sentence_label))\n", "\n", " if inst_index < 5:\n", " tf.logging.info(\"*** Example ***\")\n", " tf.logging.info(\"tokens: %s\" % \" \".join(\n", " [str(x) for x in instance.tokens]))\n", " tf.logging.info(\"features: %s\" % str(features[-1]))\n", " return features" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:02:34.185367Z", "start_time": "2018-11-16T10:02:34.155046Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:*** Example ***\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:34 - INFO - tensorflow - *** Example ***\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:tokens: who was jim henson ? jim [MASK] was a puppet ##eer\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:34 - INFO - tensorflow - tokens: who was jim henson ? jim [MASK] was a puppet ##eer\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:features: input_ids:[2040, 2001, 3958, 27227, 1029, 3958, 103, 2001, 1037, 13997, 11510, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "input_mask:[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "segment_ids:[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "masked_lm_positions:[6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "masked_lm_ids:[27227, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "masked_lm_weights:[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n", "next_sentence_labels:0\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:34 - INFO - tensorflow - features: input_ids:[2040, 2001, 3958, 27227, 1029, 3958, 103, 2001, 1037, 13997, 11510, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "input_mask:[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "segment_ids:[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "masked_lm_positions:[6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "masked_lm_ids:[27227, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "masked_lm_weights:[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n", "next_sentence_labels:0\n" ] } ], "source": [ "features = pretraining_convert_examples_to_features(\n", " instances=examples, max_seq_length=max_seq_length, \n", " max_predictions_per_seq=max_predictions_per_seq, tokenizer=tokenizer)" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:02:34.912005Z", "start_time": "2018-11-16T10:02:34.882111Z" } }, "outputs": [], "source": [ "def input_fn_builder(features, seq_length, max_predictions_per_seq, tokenizer):\n", " \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n", "\n", " all_input_ids = []\n", " all_input_mask = []\n", " all_segment_ids = []\n", " all_masked_lm_positions = []\n", " all_masked_lm_ids = []\n", " all_masked_lm_weights = []\n", " all_next_sentence_labels = []\n", "\n", " for feature in features:\n", " all_input_ids.append(feature.input_ids)\n", " all_input_mask.append(feature.input_mask)\n", " all_segment_ids.append(feature.segment_ids)\n", " all_masked_lm_positions.append(feature.masked_lm_positions)\n", " all_masked_lm_ids.append(feature.masked_lm_ids)\n", " all_masked_lm_weights.append(feature.masked_lm_weights)\n", " all_next_sentence_labels.append(feature.next_sentence_labels)\n", "\n", " def input_fn(params):\n", " \"\"\"The actual input function.\"\"\"\n", " batch_size = params[\"batch_size\"]\n", "\n", " num_examples = len(features)\n", "\n", " # This is for demo purposes and does NOT scale to large data sets. We do\n", " # not use Dataset.from_generator() because that uses tf.py_func which is\n", " # not TPU compatible. The right way to load data is with TFRecordReader.\n", " d = tf.data.Dataset.from_tensor_slices({\n", " \"input_ids\":\n", " tf.constant(\n", " all_input_ids, shape=[num_examples, seq_length],\n", " dtype=tf.int32),\n", " \"input_mask\":\n", " tf.constant(\n", " all_input_mask,\n", " shape=[num_examples, seq_length],\n", " dtype=tf.int32),\n", " \"segment_ids\":\n", " tf.constant(\n", " all_segment_ids,\n", " shape=[num_examples, seq_length],\n", " dtype=tf.int32),\n", " \"masked_lm_positions\":\n", " tf.constant(\n", " all_masked_lm_positions,\n", " shape=[num_examples, max_predictions_per_seq],\n", " dtype=tf.int32),\n", " \"masked_lm_ids\":\n", " tf.constant(\n", " all_masked_lm_ids,\n", " shape=[num_examples, max_predictions_per_seq],\n", " dtype=tf.int32),\n", " \"masked_lm_weights\":\n", " tf.constant(\n", " all_masked_lm_weights,\n", " shape=[num_examples, max_predictions_per_seq],\n", " dtype=tf.float32),\n", " \"next_sentence_labels\":\n", " tf.constant(\n", " all_next_sentence_labels,\n", " shape=[num_examples, 1],\n", " dtype=tf.int32),\n", " })\n", "\n", " d = d.batch(batch_size=batch_size, drop_remainder=False)\n", " return d\n", "\n", " return input_fn\n" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:02:35.671603Z", "start_time": "2018-11-16T10:02:35.626167Z" }, "code_folding": [ 64, 77 ] }, "outputs": [], "source": [ "def model_fn_builder(bert_config, init_checkpoint, learning_rate,\n", " num_train_steps, num_warmup_steps, use_tpu,\n", " use_one_hot_embeddings):\n", " \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n", "\n", " def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n", " \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n", "\n", " tf.logging.info(\"*** Features ***\")\n", " for name in sorted(features.keys()):\n", " tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n", "\n", " input_ids = features[\"input_ids\"]\n", " input_mask = features[\"input_mask\"]\n", " segment_ids = features[\"segment_ids\"]\n", " masked_lm_positions = features[\"masked_lm_positions\"]\n", " masked_lm_ids = features[\"masked_lm_ids\"]\n", " masked_lm_weights = features[\"masked_lm_weights\"]\n", " next_sentence_labels = features[\"next_sentence_labels\"]\n", "\n", " is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n", "\n", " model = tfm.BertModel(\n", " config=bert_config,\n", " is_training=is_training,\n", " input_ids=input_ids,\n", " input_mask=input_mask,\n", " token_type_ids=segment_ids,\n", " use_one_hot_embeddings=use_one_hot_embeddings)\n", "\n", " (masked_lm_loss,\n", " masked_lm_example_loss, masked_lm_log_probs) = rp.get_masked_lm_output(\n", " bert_config, model.get_sequence_output(), model.get_embedding_table(),\n", " masked_lm_positions, masked_lm_ids, masked_lm_weights)\n", "\n", " (next_sentence_loss, next_sentence_example_loss,\n", " next_sentence_log_probs) = rp.get_next_sentence_output(\n", " bert_config, model.get_pooled_output(), next_sentence_labels)\n", "\n", " total_loss = masked_lm_loss + next_sentence_loss\n", "\n", " tvars = tf.trainable_variables()\n", "\n", " initialized_variable_names = {}\n", " scaffold_fn = None\n", " if init_checkpoint:\n", " (assignment_map,\n", " initialized_variable_names) = tfm.get_assigment_map_from_checkpoint(\n", " tvars, init_checkpoint)\n", " if use_tpu:\n", "\n", " def tpu_scaffold():\n", " tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n", " return tf.train.Scaffold()\n", "\n", " scaffold_fn = tpu_scaffold\n", " else:\n", " tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n", "\n", " tf.logging.info(\"**** Trainable Variables ****\")\n", " for var in tvars:\n", " init_string = \"\"\n", " if var.name in initialized_variable_names:\n", " init_string = \", *INIT_FROM_CKPT*\"\n", " tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n", " init_string)\n", "\n", " output_spec = None\n", " if mode == tf.estimator.ModeKeys.TRAIN:\n", " masked_lm_positions = features[\"masked_lm_positions\"]\n", " masked_lm_ids = features[\"masked_lm_ids\"]\n", " masked_lm_weights = features[\"masked_lm_weights\"]\n", " next_sentence_labels = features[\"next_sentence_labels\"]\n", " train_op = optimization.create_optimizer(\n", " total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n", "\n", " output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n", " mode=mode,\n", " loss=total_loss,\n", " train_op=train_op,\n", " scaffold_fn=scaffold_fn)\n", " elif mode == tf.estimator.ModeKeys.EVAL:\n", " masked_lm_positions = features[\"masked_lm_positions\"]\n", " masked_lm_ids = features[\"masked_lm_ids\"]\n", " masked_lm_weights = features[\"masked_lm_weights\"]\n", " next_sentence_labels = features[\"next_sentence_labels\"]\n", "\n", " def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n", " masked_lm_weights, next_sentence_example_loss,\n", " next_sentence_log_probs, next_sentence_labels):\n", " \"\"\"Computes the loss and accuracy of the model.\"\"\"\n", " masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n", " [-1, masked_lm_log_probs.shape[-1]])\n", " masked_lm_predictions = tf.argmax(\n", " masked_lm_log_probs, axis=-1, output_type=tf.int32)\n", " masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])\n", " masked_lm_ids = tf.reshape(masked_lm_ids, [-1])\n", " masked_lm_weights = tf.reshape(masked_lm_weights, [-1])\n", " masked_lm_accuracy = tf.metrics.accuracy(\n", " labels=masked_lm_ids,\n", " predictions=masked_lm_predictions,\n", " weights=masked_lm_weights)\n", " masked_lm_mean_loss = tf.metrics.mean(\n", " values=masked_lm_example_loss, weights=masked_lm_weights)\n", "\n", " next_sentence_log_probs = tf.reshape(\n", " next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])\n", " next_sentence_predictions = tf.argmax(\n", " next_sentence_log_probs, axis=-1, output_type=tf.int32)\n", " next_sentence_labels = tf.reshape(next_sentence_labels, [-1])\n", " next_sentence_accuracy = tf.metrics.accuracy(\n", " labels=next_sentence_labels, predictions=next_sentence_predictions)\n", " next_sentence_mean_loss = tf.metrics.mean(\n", " values=next_sentence_example_loss)\n", "\n", " return {\n", " \"masked_lm_accuracy\": masked_lm_accuracy,\n", " \"masked_lm_loss\": masked_lm_mean_loss,\n", " \"next_sentence_accuracy\": next_sentence_accuracy,\n", " \"next_sentence_loss\": next_sentence_mean_loss,\n", " }\n", "\n", " eval_metrics = (metric_fn, [\n", " masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n", " masked_lm_weights, next_sentence_example_loss,\n", " next_sentence_log_probs, next_sentence_labels\n", " ])\n", " output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n", " mode=mode,\n", " loss=total_loss,\n", " eval_metrics=eval_metrics,\n", " scaffold_fn=scaffold_fn)\n", " elif mode == tf.estimator.ModeKeys.PREDICT:\n", " masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n", " [-1, masked_lm_log_probs.shape[-1]])\n", " masked_lm_predictions = tf.argmax(\n", " masked_lm_log_probs, axis=-1, output_type=tf.int32)\n", "\n", " next_sentence_log_probs = tf.reshape(\n", " next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])\n", " next_sentence_predictions = tf.argmax(\n", " next_sentence_log_probs, axis=-1, output_type=tf.int32)\n", "\n", " masked_lm_predictions = tf.reshape(masked_lm_predictions,\n", " [1, masked_lm_positions.shape[-1]])\n", " next_sentence_predictions = tf.reshape(next_sentence_predictions,\n", " [1, 1])\n", "\n", " predictions = {\n", " \"masked_lm_predictions\": masked_lm_predictions,\n", " \"next_sentence_predictions\": next_sentence_predictions\n", " }\n", "\n", " output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n", " mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n", " return output_spec\n", " else:\n", " raise ValueError(\"Only TRAIN, EVAL and PREDICT modes are supported: %s\" % (mode))\n", "\n", " return output_spec\n", "\n", " return model_fn" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:02:40.328700Z", "start_time": "2018-11-16T10:02:36.289676Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Estimator's model_fn (.model_fn at 0x12a864ae8>) includes params argument, but params are not passed to Estimator.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:40 - WARNING - tensorflow - Estimator's model_fn (.model_fn at 0x12a864ae8>) includes params argument, but params are not passed to Estimator.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Using temporary folder as model directory: /var/folders/yx/cw8n_njx3js5jksyw_qlp8p00000gn/T/tmp4x8r3x3d\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:40 - WARNING - tensorflow - Using temporary folder as model directory: /var/folders/yx/cw8n_njx3js5jksyw_qlp8p00000gn/T/tmp4x8r3x3d\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:Using config: {'_model_dir': '/var/folders/yx/cw8n_njx3js5jksyw_qlp8p00000gn/T/tmp4x8r3x3d', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': allow_soft_placement: true\n", "graph_options {\n", " rewrite_options {\n", " meta_optimizer_iterations: ONE\n", " }\n", "}\n", ", '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': None, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': , '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1, '_tpu_config': TPUConfig(iterations_per_loop=2, num_shards=1, num_cores_per_replica=None, per_host_input_for_training=3, tpu_job_name=None, initial_infeed_sleep_secs=None, input_partition_dims=None), '_cluster': None}\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:40 - INFO - tensorflow - Using config: {'_model_dir': '/var/folders/yx/cw8n_njx3js5jksyw_qlp8p00000gn/T/tmp4x8r3x3d', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': allow_soft_placement: true\n", "graph_options {\n", " rewrite_options {\n", " meta_optimizer_iterations: ONE\n", " }\n", "}\n", ", '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': None, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': , '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1, '_tpu_config': TPUConfig(iterations_per_loop=2, num_shards=1, num_cores_per_replica=None, per_host_input_for_training=3, tpu_job_name=None, initial_infeed_sleep_secs=None, input_partition_dims=None), '_cluster': None}\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Setting TPUConfig.num_shards==1 is an unsupported behavior. Please fix as soon as possible (leaving num_shards as None.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:40 - WARNING - tensorflow - Setting TPUConfig.num_shards==1 is an unsupported behavior. Please fix as soon as possible (leaving num_shards as None.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:_TPUContext: eval_on_tpu True\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:40 - INFO - tensorflow - _TPUContext: eval_on_tpu True\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:eval_on_tpu ignored because use_tpu is False.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:40 - WARNING - tensorflow - eval_on_tpu ignored because use_tpu is False.\n" ] } ], "source": [ "is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n", "run_config = tf.contrib.tpu.RunConfig(\n", " master=None,\n", " tpu_config=tf.contrib.tpu.TPUConfig(\n", " num_shards=1,\n", " per_host_input_for_training=is_per_host))\n", "\n", "model_fn = model_fn_builder(\n", " bert_config=bert_config,\n", " init_checkpoint=init_checkpoint,\n", " learning_rate=0,\n", " num_train_steps=1,\n", " num_warmup_steps=1,\n", " use_tpu=False,\n", " use_one_hot_embeddings=False)\n", "\n", "# If TPU is not available, this will fall back to normal Estimator on CPU\n", "# or GPU.\n", "estimator = tf.contrib.tpu.TPUEstimator(\n", " use_tpu=False,\n", " model_fn=model_fn,\n", " config=run_config,\n", " predict_batch_size=1)\n", "\n", "input_fn = input_fn_builder(\n", " features=features, seq_length=max_seq_length, max_predictions_per_seq=max_predictions_per_seq,\n", "tokenizer=tokenizer)" ] }, { "cell_type": "code", "execution_count": 11, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:02:46.596956Z", "start_time": "2018-11-16T10:02:40.331008Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:Could not find trained model in model_dir: /var/folders/yx/cw8n_njx3js5jksyw_qlp8p00000gn/T/tmp4x8r3x3d, running initialization to predict.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:40 - INFO - tensorflow - Could not find trained model in model_dir: /var/folders/yx/cw8n_njx3js5jksyw_qlp8p00000gn/T/tmp4x8r3x3d, running initialization to predict.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:Calling model_fn.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:40 - INFO - tensorflow - Calling model_fn.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:Running infer on CPU\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:40 - INFO - tensorflow - Running infer on CPU\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:*** Features ***\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:40 - INFO - tensorflow - *** Features ***\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = input_ids, shape = (?, 128)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:40 - INFO - tensorflow - name = input_ids, shape = (?, 128)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = input_mask, shape = (?, 128)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:40 - INFO - tensorflow - name = input_mask, shape = (?, 128)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = masked_lm_ids, shape = (?, 20)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:40 - INFO - tensorflow - name = masked_lm_ids, shape = (?, 20)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = masked_lm_positions, shape = (?, 20)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:40 - INFO - tensorflow - name = masked_lm_positions, shape = (?, 20)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = masked_lm_weights, shape = (?, 20)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:40 - INFO - tensorflow - name = masked_lm_weights, shape = (?, 20)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = next_sentence_labels, shape = (?, 1)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:40 - INFO - tensorflow - name = next_sentence_labels, shape = (?, 1)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = segment_ids, shape = (?, 128)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:40 - INFO - tensorflow - name = segment_ids, shape = (?, 128)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:**** Trainable Variables ****\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - **** Trainable Variables ****\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/embeddings/word_embeddings:0, shape = (30522, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/embeddings/word_embeddings:0, shape = (30522, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/embeddings/token_type_embeddings:0, shape = (2, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/embeddings/token_type_embeddings:0, shape = (2, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/embeddings/position_embeddings:0, shape = (512, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/embeddings/position_embeddings:0, shape = (512, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/embeddings/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/embeddings/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/embeddings/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/embeddings/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_0/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_0/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_0/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_0/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_0/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_0/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_0/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_0/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_0/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_0/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_0/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_0/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_0/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_0/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_0/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_0/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_0/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_0/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_0/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_0/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_0/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_0/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_0/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_0/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_0/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_0/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_0/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_0/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_0/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_0/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_0/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_0/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_1/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_1/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_1/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_1/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_1/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_1/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_1/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_1/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_1/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_1/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_1/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_1/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_1/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_1/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_1/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_1/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_1/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_1/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_1/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_1/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_1/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_1/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_1/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_1/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_1/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_1/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_1/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_1/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_1/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_1/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_1/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_1/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_2/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_2/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_2/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_2/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_2/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_2/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_2/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_2/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_2/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_2/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_2/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_2/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_2/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_2/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_2/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_2/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_2/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_2/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_2/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_2/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_2/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_2/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_2/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_2/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_2/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_2/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_2/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_2/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_2/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_2/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_2/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_2/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_3/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_3/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_3/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_3/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_3/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_3/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_3/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_3/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_3/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_3/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_3/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_3/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_3/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_3/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_3/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_3/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_3/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_3/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_3/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_3/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_3/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_3/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_3/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_3/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_3/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_3/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_3/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_3/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_3/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_3/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_3/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_3/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_4/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_4/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_4/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_4/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_4/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_4/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_4/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_4/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_4/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_4/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_4/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_4/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_4/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_4/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_4/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_4/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_4/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_4/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_4/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_4/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_4/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_4/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_4/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_4/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_4/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_4/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_4/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_4/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_4/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_4/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_4/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_4/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_5/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_5/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_5/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_5/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_5/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_5/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_5/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_5/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_5/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_5/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_5/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_5/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_5/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_5/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_5/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_5/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_5/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_5/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_5/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_5/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_5/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_5/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_5/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_5/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_5/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_5/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_5/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_5/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_5/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_5/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_5/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_5/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_6/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_6/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_6/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_6/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_6/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_6/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_6/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_6/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_6/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_6/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_6/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_6/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_6/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_6/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_6/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_6/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_6/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_6/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_6/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_6/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_6/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_6/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_6/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_6/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_6/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_6/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_6/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_6/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_6/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_6/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_6/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_6/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_7/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_7/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_7/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_7/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_7/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_7/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_7/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_7/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_7/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_7/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_7/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_7/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_7/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_7/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_7/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_7/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_7/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_7/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_7/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_7/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_7/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_7/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_7/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_7/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_7/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_7/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_7/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_7/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_7/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_7/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_7/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_7/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_8/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_8/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_8/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_8/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_8/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_8/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_8/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_8/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_8/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_8/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_8/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_8/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_8/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_8/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_8/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_8/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_8/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_8/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_8/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_8/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_8/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_8/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_8/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_8/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_8/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_8/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_8/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_8/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_8/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_8/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_8/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_8/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_9/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_9/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_9/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_9/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_9/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_9/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_9/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_9/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_9/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_9/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_9/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_9/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_9/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_9/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_9/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_9/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_9/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_9/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_9/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_9/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_9/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_9/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_9/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_9/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_9/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_9/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_9/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_9/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_9/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_9/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_9/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_9/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_10/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_10/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_10/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_10/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_10/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_10/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_10/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_10/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_10/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_10/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_10/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_10/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_10/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_10/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_10/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_10/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_10/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_10/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_10/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_10/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_10/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_10/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_10/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_10/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_10/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_10/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_10/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_10/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_10/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_10/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_10/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_10/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_11/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_11/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_11/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_11/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_11/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_11/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_11/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_11/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_11/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_11/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_11/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_11/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_11/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_11/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_11/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_11/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_11/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_11/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_11/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_11/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_11/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_11/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_11/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_11/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_11/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_11/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_11/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_11/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_11/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_11/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_11/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/encoder/layer_11/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/pooler/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/pooler/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/pooler/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = bert/pooler/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = cls/predictions/transform/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = cls/predictions/transform/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = cls/predictions/transform/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = cls/predictions/transform/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = cls/predictions/transform/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = cls/predictions/transform/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = cls/predictions/transform/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = cls/predictions/transform/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = cls/predictions/output_bias:0, shape = (30522,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = cls/predictions/output_bias:0, shape = (30522,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = cls/seq_relationship/output_weights:0, shape = (2, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = cls/seq_relationship/output_weights:0, shape = (2, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = cls/seq_relationship/output_bias:0, shape = (2,), *INIT_FROM_CKPT*\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - name = cls/seq_relationship/output_bias:0, shape = (2,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:Done calling model_fn.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:43 - INFO - tensorflow - Done calling model_fn.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:Graph was finalized.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:44 - INFO - tensorflow - Graph was finalized.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:Running local_init_op.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:45 - INFO - tensorflow - Running local_init_op.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:Done running local_init_op.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:45 - INFO - tensorflow - Done running local_init_op.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:prediction_loop marked as finished\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:46 - INFO - tensorflow - prediction_loop marked as finished\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:prediction_loop marked as finished\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:02:46 - INFO - tensorflow - prediction_loop marked as finished\n" ] } ], "source": [ "tensorflow_all_out = []\n", "for result in estimator.predict(input_fn, yield_single_examples=True):\n", " tensorflow_all_out.append(result)" ] }, { "cell_type": "code", "execution_count": 12, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:02:46.634304Z", "start_time": "2018-11-16T10:02:46.598800Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1\n", "2\n", "dict_keys(['masked_lm_predictions', 'next_sentence_predictions'])\n", "masked_lm_predictions [27227 1010 1010 1010 1010 1010 1010 1010 1010 1010 1010 1010\n", " 1010 1010 1010 1010 1010 1010 1010 1010]\n", "predicted token ['henson', ',', ',', ',', ',', ',', ',', ',', ',', ',', ',', ',', ',', ',', ',', ',', ',', ',', ',', ',']\n" ] } ], "source": [ "print(len(tensorflow_all_out))\n", "print(len(tensorflow_all_out[0]))\n", "print(tensorflow_all_out[0].keys())\n", "print(\"masked_lm_predictions\", tensorflow_all_out[0]['masked_lm_predictions'])\n", "print(\"predicted token\", tokenizer.convert_ids_to_tokens(tensorflow_all_out[0]['masked_lm_predictions']))" ] }, { "cell_type": "code", "execution_count": 13, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:02:46.671229Z", "start_time": "2018-11-16T10:02:46.637102Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensorflow_output: ['henson']\n" ] } ], "source": [ "tensorflow_outputs = tokenizer.convert_ids_to_tokens(tensorflow_all_out[0]['masked_lm_predictions'])[:len(masked_lm_positions)]\n", "print(\"tensorflow_output:\", tensorflow_outputs)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 2/ PyTorch code" ] }, { "cell_type": "code", "execution_count": 14, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:03:03.556557Z", "start_time": "2018-11-16T10:03:03.519654Z" } }, "outputs": [], "source": [ "from examples import extract_features\n", "from examples.extract_features import *" ] }, { "cell_type": "code", "execution_count": 15, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:03:03.952710Z", "start_time": "2018-11-16T10:03:03.921917Z" } }, "outputs": [], "source": [ "init_checkpoint_pt = \"../google_models/uncased_L-12_H-768_A-12/pytorch_model.bin\"" ] }, { "cell_type": "code", "execution_count": 16, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:03:12.307673Z", "start_time": "2018-11-16T10:03:04.439317Z" }, "scrolled": true }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "11/16/2018 11:03:05 - INFO - pytorch_pretrained_bert.modeling - loading archive file https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz from cache at /Users/thomaswolf/.pytorch_pretrained_bert/9c41111e2de84547a463fd39217199738d1e3deb72d4fec4399e6e241983c6f0.ae3cef932725ca7a30cdcb93fc6e09150a55e2a130ec7af63975a16c153ae2ba\n", "11/16/2018 11:03:05 - INFO - pytorch_pretrained_bert.modeling - extracting archive file /Users/thomaswolf/.pytorch_pretrained_bert/9c41111e2de84547a463fd39217199738d1e3deb72d4fec4399e6e241983c6f0.ae3cef932725ca7a30cdcb93fc6e09150a55e2a130ec7af63975a16c153ae2ba to temp dir /var/folders/yx/cw8n_njx3js5jksyw_qlp8p00000gn/T/tmpaqgsm566\n", "11/16/2018 11:03:08 - INFO - pytorch_pretrained_bert.modeling - Model config {\n", " \"attention_probs_dropout_prob\": 0.1,\n", " \"hidden_act\": \"gelu\",\n", " \"hidden_dropout_prob\": 0.1,\n", " \"hidden_size\": 768,\n", " \"initializer_range\": 0.02,\n", " \"intermediate_size\": 3072,\n", " \"max_position_embeddings\": 512,\n", " \"num_attention_heads\": 12,\n", " \"num_hidden_layers\": 12,\n", " \"type_vocab_size\": 2,\n", " \"vocab_size\": 30522\n", "}\n", "\n" ] }, { "data": { "text/plain": [ "BertForPreTraining(\n", " (bert): BertModel(\n", " (embeddings): BertEmbeddings(\n", " (word_embeddings): Embedding(30522, 768)\n", " (position_embeddings): Embedding(512, 768)\n", " (token_type_embeddings): Embedding(2, 768)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (encoder): BertEncoder(\n", " (layer): ModuleList(\n", " (0): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (1): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (2): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (3): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (4): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (5): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (6): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (7): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (8): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (9): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (10): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (11): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " )\n", " )\n", " (pooler): BertPooler(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (activation): Tanh()\n", " )\n", " )\n", " (cls): BertPreTrainingHeads(\n", " (predictions): BertLMPredictionHead(\n", " (transform): BertPredictionHeadTransform(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " )\n", " (decoder): Linear(in_features=768, out_features=30522, bias=False)\n", " )\n", " (seq_relationship): Linear(in_features=768, out_features=2, bias=True)\n", " )\n", ")" ] }, "execution_count": 16, "metadata": {}, "output_type": "execute_result" } ], "source": [ "device = torch.device(\"cpu\")\n", "model = ppb.BertForPreTraining.from_pretrained('bert-base-uncased')\n", "model.to(device)" ] }, { "cell_type": "code", "execution_count": 17, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:03:12.351625Z", "start_time": "2018-11-16T10:03:12.310736Z" }, "code_folding": [] }, "outputs": [ { "data": { "text/plain": [ "BertForPreTraining(\n", " (bert): BertModel(\n", " (embeddings): BertEmbeddings(\n", " (word_embeddings): Embedding(30522, 768)\n", " (position_embeddings): Embedding(512, 768)\n", " (token_type_embeddings): Embedding(2, 768)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (encoder): BertEncoder(\n", " (layer): ModuleList(\n", " (0): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (1): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (2): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (3): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (4): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (5): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (6): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (7): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (8): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (9): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (10): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (11): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " )\n", " )\n", " (pooler): BertPooler(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (activation): Tanh()\n", " )\n", " )\n", " (cls): BertPreTrainingHeads(\n", " (predictions): BertLMPredictionHead(\n", " (transform): BertPredictionHeadTransform(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " )\n", " (decoder): Linear(in_features=768, out_features=30522, bias=False)\n", " )\n", " (seq_relationship): Linear(in_features=768, out_features=2, bias=True)\n", " )\n", ")" ] }, "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ "all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n", "all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n", "all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n", "all_masked_lm_positions = torch.tensor([f.masked_lm_positions for f in features], dtype=torch.long)\n", "\n", "eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_masked_lm_positions)\n", "eval_sampler = SequentialSampler(eval_data)\n", "eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1)\n", "\n", "model.eval()" ] }, { "cell_type": "code", "execution_count": 18, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:03:12.792741Z", "start_time": "2018-11-16T10:03:12.354253Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor([[ 2040, 2001, 3958, 27227, 1029, 3958, 103, 2001, 1037, 13997,\n", " 11510, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0]])\n", "tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0]])\n", "tensor([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0]])\n", "(1, 20, 30522)\n", "[27227, 1010, 1010, 1010, 1010, 1010, 1010, 1010, 1010, 1010, 1010, 1010, 1010, 1010, 1010, 1010, 1010, 1010, 1010, 1010]\n" ] } ], "source": [ "import numpy as np\n", "pytorch_all_out = []\n", "for input_ids, input_mask, segment_ids, tensor_masked_lm_positions in eval_dataloader:\n", " print(input_ids)\n", " print(input_mask)\n", " print(segment_ids)\n", " input_ids = input_ids.to(device)\n", " input_mask = input_mask.to(device)\n", " segment_ids = segment_ids.to(device)\n", "\n", " prediction_scores, _ = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)\n", " prediction_scores = prediction_scores[0, tensor_masked_lm_positions].detach().cpu().numpy()\n", " print(prediction_scores.shape)\n", " masked_lm_predictions = np.argmax(prediction_scores, axis=-1).squeeze().tolist()\n", " print(masked_lm_predictions)\n", " pytorch_all_out.append(masked_lm_predictions)" ] }, { "cell_type": "code", "execution_count": 19, "metadata": { "ExecuteTime": { "end_time": "2018-11-16T10:03:12.828439Z", "start_time": "2018-11-16T10:03:12.795420Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "pytorch_output: ['henson']\n", "tensorflow_output: ['henson']\n" ] } ], "source": [ "pytorch_outputs = tokenizer.convert_ids_to_tokens(pytorch_all_out[0])[:len(masked_lm_positions)]\n", "print(\"pytorch_output:\", pytorch_outputs)\n", "print(\"tensorflow_output:\", tensorflow_outputs)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "hide_input": false, "kernelspec": { "display_name": "Python [default]", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.7" }, "toc": { "colors": { "hover_highlight": "#DAA520", "running_highlight": "#FF0000", "selected_highlight": "#FFD700" }, "moveMenuLeft": true, "nav_menu": { "height": "48px", "width": "252px" }, "navigate_menu": true, "number_sections": true, "sideBar": true, "threshold": 4, "toc_cell": false, "toc_section_display": "block", "toc_window_display": false } }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: notebooks/Comparing-TF-and-PT-models-SQuAD.ipynb ================================================ { "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Comparing TensorFlow (original) and PyTorch model on the SQuAD task\n", "\n", "You can use this small notebook to check the loss computation from the TensorFlow model to the PyTorch model. In the following, we compare the total loss computed by the models starting from identical initializations (position prediction linear layers with weights at 1 and bias at 0).\n", "\n", "To run this notebook, follow these instructions:\n", "- make sure that your Python environment has both TensorFlow and PyTorch installed,\n", "- download the original TensorFlow implementation,\n", "- download a pre-trained TensorFlow model as indicaded in the TensorFlow implementation readme,\n", "- run the script `convert_tf_checkpoint_to_pytorch.py` as indicated in the `README` to convert the pre-trained TensorFlow model to PyTorch.\n", "\n", "If needed change the relative paths indicated in this notebook (at the beggining of Sections 1 and 2) to point to the relevent models and code." ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:33.636911Z", "start_time": "2018-11-06T10:11:33.623091Z" } }, "outputs": [], "source": [ "import os\n", "os.chdir('../')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 1/ TensorFlow code" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:33.651792Z", "start_time": "2018-11-06T10:11:33.638984Z" } }, "outputs": [], "source": [ "original_tf_inplem_dir = \"./tensorflow_code/\"\n", "model_dir = \"../google_models/uncased_L-12_H-768_A-12/\"\n", "\n", "vocab_file = model_dir + \"vocab.txt\"\n", "bert_config_file = model_dir + \"bert_config.json\"\n", "init_checkpoint = model_dir + \"bert_model.ckpt\"\n", "\n", "input_file = \"../data/squad_data/train-v1.1.json\"\n", "max_seq_length = 384\n", "outside_pos = max_seq_length + 10\n", "doc_stride = 128\n", "max_query_length = 64\n", "max_answer_length = 30\n", "output_dir = \"/tmp/squad_base/\"\n", "learning_rate = 3e-5" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:35.165788Z", "start_time": "2018-11-06T10:11:33.653401Z" } }, "outputs": [], "source": [ "import importlib.util\n", "import sys\n", "\n", "spec = importlib.util.spec_from_file_location('*', original_tf_inplem_dir + '/modeling.py')\n", "module = importlib.util.module_from_spec(spec)\n", "spec.loader.exec_module(module)\n", "sys.modules['modeling_tensorflow'] = module\n", "\n", "spec = importlib.util.spec_from_file_location('*', original_tf_inplem_dir + '/run_squad.py')\n", "module = importlib.util.module_from_spec(spec)\n", "spec.loader.exec_module(module)\n", "sys.modules['run_squad_tensorflow'] = module\n", "import modeling_tensorflow\n", "from run_squad_tensorflow import *" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:37.494391Z", "start_time": "2018-11-06T10:11:35.168615Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000000\n", "INFO:tensorflow:example_index: 0\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] to whom did the virgin mary allegedly appear in 1858 in lou ##rdes france ? [SEP] architectural ##ly , the school has a catholic character . atop the main building ' s gold dome is a golden statue of the virgin mary . immediately in front of the main building and facing it , is a copper statue of christ with arms up ##rai ##sed with the legend \" ve ##ni ##te ad me om ##nes \" . next to the main building is the basilica of the sacred heart . immediately behind the basilica is the gr ##otto , a marian place of prayer and reflection . it is a replica of the gr ##otto at lou ##rdes , france where the virgin mary reputed ##ly appeared to saint bern ##ade ##tte so ##ub ##iro ##us in 1858 . at the end of the main drive ( and in a direct line that connects through 3 statues and the gold dome ) , is a simple , modern stone statue of mary . [SEP]\n", "INFO:tensorflow:token_to_orig_map: 17:0 18:0 19:0 20:1 21:2 22:3 23:4 24:5 25:6 26:6 27:7 28:8 29:9 30:10 31:10 32:10 33:11 34:12 35:13 36:14 37:15 38:16 39:17 40:18 41:19 42:20 43:20 44:21 45:22 46:23 47:24 48:25 49:26 50:27 51:28 52:29 53:30 54:30 55:31 56:32 57:33 58:34 59:35 60:36 61:37 62:38 63:39 64:39 65:39 66:40 67:41 68:42 69:43 70:43 71:43 72:43 73:44 74:45 75:46 76:46 77:46 78:46 79:47 80:48 81:49 82:50 83:51 84:52 85:53 86:54 87:55 88:56 89:57 90:58 91:58 92:59 93:60 94:61 95:62 96:63 97:64 98:65 99:65 100:65 101:66 102:67 103:68 104:69 105:70 106:71 107:72 108:72 109:73 110:74 111:75 112:76 113:77 114:78 115:79 116:79 117:80 118:81 119:81 120:81 121:82 122:83 123:84 124:85 125:86 126:87 127:87 128:88 129:89 130:90 131:91 132:91 133:91 134:92 135:92 136:92 137:92 138:93 139:94 140:94 141:95 142:96 143:97 144:98 145:99 146:100 147:101 148:102 149:102 150:103 151:104 152:105 153:106 154:107 155:108 156:109 157:110 158:111 159:112 160:113 161:114 162:115 163:115 164:115 165:116 166:117 167:118 168:118 169:119 170:120 171:121 172:122 173:123 174:123\n", "INFO:tensorflow:token_is_max_context: 17:True 18:True 19:True 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True 154:True 155:True 156:True 157:True 158:True 159:True 160:True 161:True 162:True 163:True 164:True 165:True 166:True 167:True 168:True 169:True 170:True 171:True 172:True 173:True 174:True\n", "INFO:tensorflow:input_ids: 101 2000 3183 2106 1996 6261 2984 9382 3711 1999 8517 1999 10223 26371 2605 1029 102 6549 2135 1010 1996 2082 2038 1037 3234 2839 1012 10234 1996 2364 2311 1005 1055 2751 8514 2003 1037 3585 6231 1997 1996 6261 2984 1012 3202 1999 2392 1997 1996 2364 2311 1998 5307 2009 1010 2003 1037 6967 6231 1997 4828 2007 2608 2039 14995 6924 2007 1996 5722 1000 2310 3490 2618 4748 2033 18168 5267 1000 1012 2279 2000 1996 2364 2311 2003 1996 13546 1997 1996 6730 2540 1012 3202 2369 1996 13546 2003 1996 24665 23052 1010 1037 14042 2173 1997 7083 1998 9185 1012 2009 2003 1037 15059 1997 1996 24665 23052 2012 10223 26371 1010 2605 2073 1996 6261 2984 22353 2135 2596 2000 3002 16595 9648 4674 2061 12083 9711 2271 1999 8517 1012 2012 1996 2203 1997 1996 2364 3298 1006 1998 1999 1037 3622 2240 2008 8539 2083 1017 11342 1998 1996 2751 8514 1007 1010 2003 1037 3722 1010 2715 2962 6231 1997 2984 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 130\n", "INFO:tensorflow:end_position: 137\n", "INFO:tensorflow:answer: saint bern ##ade ##tte so ##ub ##iro ##us\n", "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000001\n", "INFO:tensorflow:example_index: 1\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] what is in front of the notre dame main building ? [SEP] architectural ##ly , the school has a catholic character . atop the main building ' s gold dome is a golden statue of the virgin mary . immediately in front of the main building and facing it , is a copper statue of christ with arms up ##rai ##sed with the legend \" ve ##ni ##te ad me om ##nes \" . next to the main building is the basilica of the sacred heart . immediately behind the basilica is the gr ##otto , a marian place of prayer and reflection . it is a replica of the gr ##otto at lou ##rdes , france where the virgin mary reputed ##ly appeared to saint bern ##ade ##tte so ##ub ##iro ##us in 1858 . at the end of the main drive ( and in a direct line that connects through 3 statues and the gold dome ) , is a simple , modern stone statue of mary . [SEP]\n", "INFO:tensorflow:token_to_orig_map: 13:0 14:0 15:0 16:1 17:2 18:3 19:4 20:5 21:6 22:6 23:7 24:8 25:9 26:10 27:10 28:10 29:11 30:12 31:13 32:14 33:15 34:16 35:17 36:18 37:19 38:20 39:20 40:21 41:22 42:23 43:24 44:25 45:26 46:27 47:28 48:29 49:30 50:30 51:31 52:32 53:33 54:34 55:35 56:36 57:37 58:38 59:39 60:39 61:39 62:40 63:41 64:42 65:43 66:43 67:43 68:43 69:44 70:45 71:46 72:46 73:46 74:46 75:47 76:48 77:49 78:50 79:51 80:52 81:53 82:54 83:55 84:56 85:57 86:58 87:58 88:59 89:60 90:61 91:62 92:63 93:64 94:65 95:65 96:65 97:66 98:67 99:68 100:69 101:70 102:71 103:72 104:72 105:73 106:74 107:75 108:76 109:77 110:78 111:79 112:79 113:80 114:81 115:81 116:81 117:82 118:83 119:84 120:85 121:86 122:87 123:87 124:88 125:89 126:90 127:91 128:91 129:91 130:92 131:92 132:92 133:92 134:93 135:94 136:94 137:95 138:96 139:97 140:98 141:99 142:100 143:101 144:102 145:102 146:103 147:104 148:105 149:106 150:107 151:108 152:109 153:110 154:111 155:112 156:113 157:114 158:115 159:115 160:115 161:116 162:117 163:118 164:118 165:119 166:120 167:121 168:122 169:123 170:123\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:token_is_max_context: 13:True 14:True 15:True 16:True 17:True 18:True 19:True 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True 154:True 155:True 156:True 157:True 158:True 159:True 160:True 161:True 162:True 163:True 164:True 165:True 166:True 167:True 168:True 169:True 170:True\n", "INFO:tensorflow:input_ids: 101 2054 2003 1999 2392 1997 1996 10289 8214 2364 2311 1029 102 6549 2135 1010 1996 2082 2038 1037 3234 2839 1012 10234 1996 2364 2311 1005 1055 2751 8514 2003 1037 3585 6231 1997 1996 6261 2984 1012 3202 1999 2392 1997 1996 2364 2311 1998 5307 2009 1010 2003 1037 6967 6231 1997 4828 2007 2608 2039 14995 6924 2007 1996 5722 1000 2310 3490 2618 4748 2033 18168 5267 1000 1012 2279 2000 1996 2364 2311 2003 1996 13546 1997 1996 6730 2540 1012 3202 2369 1996 13546 2003 1996 24665 23052 1010 1037 14042 2173 1997 7083 1998 9185 1012 2009 2003 1037 15059 1997 1996 24665 23052 2012 10223 26371 1010 2605 2073 1996 6261 2984 22353 2135 2596 2000 3002 16595 9648 4674 2061 12083 9711 2271 1999 8517 1012 2012 1996 2203 1997 1996 2364 3298 1006 1998 1999 1037 3622 2240 2008 8539 2083 1017 11342 1998 1996 2751 8514 1007 1010 2003 1037 3722 1010 2715 2962 6231 1997 2984 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 52\n", "INFO:tensorflow:end_position: 56\n", "INFO:tensorflow:answer: a copper statue of christ\n", "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000002\n", "INFO:tensorflow:example_index: 2\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] the basilica of the sacred heart at notre dame is beside to which structure ? [SEP] architectural ##ly , the school has a catholic character . atop the main building ' s gold dome is a golden statue of the virgin mary . immediately in front of the main building and facing it , is a copper statue of christ with arms up ##rai ##sed with the legend \" ve ##ni ##te ad me om ##nes \" . next to the main building is the basilica of the sacred heart . immediately behind the basilica is the gr ##otto , a marian place of prayer and reflection . it is a replica of the gr ##otto at lou ##rdes , france where the virgin mary reputed ##ly appeared to saint bern ##ade ##tte so ##ub ##iro ##us in 1858 . at the end of the main drive ( and in a direct line that connects through 3 statues and the gold dome ) , is a simple , modern stone statue of mary . [SEP]\n", "INFO:tensorflow:token_to_orig_map: 17:0 18:0 19:0 20:1 21:2 22:3 23:4 24:5 25:6 26:6 27:7 28:8 29:9 30:10 31:10 32:10 33:11 34:12 35:13 36:14 37:15 38:16 39:17 40:18 41:19 42:20 43:20 44:21 45:22 46:23 47:24 48:25 49:26 50:27 51:28 52:29 53:30 54:30 55:31 56:32 57:33 58:34 59:35 60:36 61:37 62:38 63:39 64:39 65:39 66:40 67:41 68:42 69:43 70:43 71:43 72:43 73:44 74:45 75:46 76:46 77:46 78:46 79:47 80:48 81:49 82:50 83:51 84:52 85:53 86:54 87:55 88:56 89:57 90:58 91:58 92:59 93:60 94:61 95:62 96:63 97:64 98:65 99:65 100:65 101:66 102:67 103:68 104:69 105:70 106:71 107:72 108:72 109:73 110:74 111:75 112:76 113:77 114:78 115:79 116:79 117:80 118:81 119:81 120:81 121:82 122:83 123:84 124:85 125:86 126:87 127:87 128:88 129:89 130:90 131:91 132:91 133:91 134:92 135:92 136:92 137:92 138:93 139:94 140:94 141:95 142:96 143:97 144:98 145:99 146:100 147:101 148:102 149:102 150:103 151:104 152:105 153:106 154:107 155:108 156:109 157:110 158:111 159:112 160:113 161:114 162:115 163:115 164:115 165:116 166:117 167:118 168:118 169:119 170:120 171:121 172:122 173:123 174:123\n", "INFO:tensorflow:token_is_max_context: 17:True 18:True 19:True 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True 154:True 155:True 156:True 157:True 158:True 159:True 160:True 161:True 162:True 163:True 164:True 165:True 166:True 167:True 168:True 169:True 170:True 171:True 172:True 173:True 174:True\n", "INFO:tensorflow:input_ids: 101 1996 13546 1997 1996 6730 2540 2012 10289 8214 2003 3875 2000 2029 3252 1029 102 6549 2135 1010 1996 2082 2038 1037 3234 2839 1012 10234 1996 2364 2311 1005 1055 2751 8514 2003 1037 3585 6231 1997 1996 6261 2984 1012 3202 1999 2392 1997 1996 2364 2311 1998 5307 2009 1010 2003 1037 6967 6231 1997 4828 2007 2608 2039 14995 6924 2007 1996 5722 1000 2310 3490 2618 4748 2033 18168 5267 1000 1012 2279 2000 1996 2364 2311 2003 1996 13546 1997 1996 6730 2540 1012 3202 2369 1996 13546 2003 1996 24665 23052 1010 1037 14042 2173 1997 7083 1998 9185 1012 2009 2003 1037 15059 1997 1996 24665 23052 2012 10223 26371 1010 2605 2073 1996 6261 2984 22353 2135 2596 2000 3002 16595 9648 4674 2061 12083 9711 2271 1999 8517 1012 2012 1996 2203 1997 1996 2364 3298 1006 1998 1999 1037 3622 2240 2008 8539 2083 1017 11342 1998 1996 2751 8514 1007 1010 2003 1037 3722 1010 2715 2962 6231 1997 2984 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 81\n", "INFO:tensorflow:end_position: 83\n", "INFO:tensorflow:answer: the main building\n", "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000003\n", "INFO:tensorflow:example_index: 3\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] what is the gr ##otto at notre dame ? [SEP] architectural ##ly , the school has a catholic character . atop the main building ' s gold dome is a golden statue of the virgin mary . immediately in front of the main building and facing it , is a copper statue of christ with arms up ##rai ##sed with the legend \" ve ##ni ##te ad me om ##nes \" . next to the main building is the basilica of the sacred heart . immediately behind the basilica is the gr ##otto , a marian place of prayer and reflection . it is a replica of the gr ##otto at lou ##rdes , france where the virgin mary reputed ##ly appeared to saint bern ##ade ##tte so ##ub ##iro ##us in 1858 . at the end of the main drive ( and in a direct line that connects through 3 statues and the gold dome ) , is a simple , modern stone statue of mary . [SEP]\n", "INFO:tensorflow:token_to_orig_map: 11:0 12:0 13:0 14:1 15:2 16:3 17:4 18:5 19:6 20:6 21:7 22:8 23:9 24:10 25:10 26:10 27:11 28:12 29:13 30:14 31:15 32:16 33:17 34:18 35:19 36:20 37:20 38:21 39:22 40:23 41:24 42:25 43:26 44:27 45:28 46:29 47:30 48:30 49:31 50:32 51:33 52:34 53:35 54:36 55:37 56:38 57:39 58:39 59:39 60:40 61:41 62:42 63:43 64:43 65:43 66:43 67:44 68:45 69:46 70:46 71:46 72:46 73:47 74:48 75:49 76:50 77:51 78:52 79:53 80:54 81:55 82:56 83:57 84:58 85:58 86:59 87:60 88:61 89:62 90:63 91:64 92:65 93:65 94:65 95:66 96:67 97:68 98:69 99:70 100:71 101:72 102:72 103:73 104:74 105:75 106:76 107:77 108:78 109:79 110:79 111:80 112:81 113:81 114:81 115:82 116:83 117:84 118:85 119:86 120:87 121:87 122:88 123:89 124:90 125:91 126:91 127:91 128:92 129:92 130:92 131:92 132:93 133:94 134:94 135:95 136:96 137:97 138:98 139:99 140:100 141:101 142:102 143:102 144:103 145:104 146:105 147:106 148:107 149:108 150:109 151:110 152:111 153:112 154:113 155:114 156:115 157:115 158:115 159:116 160:117 161:118 162:118 163:119 164:120 165:121 166:122 167:123 168:123\n", "INFO:tensorflow:token_is_max_context: 11:True 12:True 13:True 14:True 15:True 16:True 17:True 18:True 19:True 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True 154:True 155:True 156:True 157:True 158:True 159:True 160:True 161:True 162:True 163:True 164:True 165:True 166:True 167:True 168:True\n", "INFO:tensorflow:input_ids: 101 2054 2003 1996 24665 23052 2012 10289 8214 1029 102 6549 2135 1010 1996 2082 2038 1037 3234 2839 1012 10234 1996 2364 2311 1005 1055 2751 8514 2003 1037 3585 6231 1997 1996 6261 2984 1012 3202 1999 2392 1997 1996 2364 2311 1998 5307 2009 1010 2003 1037 6967 6231 1997 4828 2007 2608 2039 14995 6924 2007 1996 5722 1000 2310 3490 2618 4748 2033 18168 5267 1000 1012 2279 2000 1996 2364 2311 2003 1996 13546 1997 1996 6730 2540 1012 3202 2369 1996 13546 2003 1996 24665 23052 1010 1037 14042 2173 1997 7083 1998 9185 1012 2009 2003 1037 15059 1997 1996 24665 23052 2012 10223 26371 1010 2605 2073 1996 6261 2984 22353 2135 2596 2000 3002 16595 9648 4674 2061 12083 9711 2271 1999 8517 1012 2012 1996 2203 1997 1996 2364 3298 1006 1998 1999 1037 3622 2240 2008 8539 2083 1017 11342 1998 1996 2751 8514 1007 1010 2003 1037 3722 1010 2715 2962 6231 1997 2984 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 95\n", "INFO:tensorflow:end_position: 101\n", "INFO:tensorflow:answer: a marian place of prayer and reflection\n", "INFO:tensorflow:*** Example ***\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:unique_id: 1000000004\n", "INFO:tensorflow:example_index: 4\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] what sits on top of the main building at notre dame ? [SEP] architectural ##ly , the school has a catholic character . atop the main building ' s gold dome is a golden statue of the virgin mary . immediately in front of the main building and facing it , is a copper statue of christ with arms up ##rai ##sed with the legend \" ve ##ni ##te ad me om ##nes \" . next to the main building is the basilica of the sacred heart . immediately behind the basilica is the gr ##otto , a marian place of prayer and reflection . it is a replica of the gr ##otto at lou ##rdes , france where the virgin mary reputed ##ly appeared to saint bern ##ade ##tte so ##ub ##iro ##us in 1858 . at the end of the main drive ( and in a direct line that connects through 3 statues and the gold dome ) , is a simple , modern stone statue of mary . [SEP]\n", "INFO:tensorflow:token_to_orig_map: 14:0 15:0 16:0 17:1 18:2 19:3 20:4 21:5 22:6 23:6 24:7 25:8 26:9 27:10 28:10 29:10 30:11 31:12 32:13 33:14 34:15 35:16 36:17 37:18 38:19 39:20 40:20 41:21 42:22 43:23 44:24 45:25 46:26 47:27 48:28 49:29 50:30 51:30 52:31 53:32 54:33 55:34 56:35 57:36 58:37 59:38 60:39 61:39 62:39 63:40 64:41 65:42 66:43 67:43 68:43 69:43 70:44 71:45 72:46 73:46 74:46 75:46 76:47 77:48 78:49 79:50 80:51 81:52 82:53 83:54 84:55 85:56 86:57 87:58 88:58 89:59 90:60 91:61 92:62 93:63 94:64 95:65 96:65 97:65 98:66 99:67 100:68 101:69 102:70 103:71 104:72 105:72 106:73 107:74 108:75 109:76 110:77 111:78 112:79 113:79 114:80 115:81 116:81 117:81 118:82 119:83 120:84 121:85 122:86 123:87 124:87 125:88 126:89 127:90 128:91 129:91 130:91 131:92 132:92 133:92 134:92 135:93 136:94 137:94 138:95 139:96 140:97 141:98 142:99 143:100 144:101 145:102 146:102 147:103 148:104 149:105 150:106 151:107 152:108 153:109 154:110 155:111 156:112 157:113 158:114 159:115 160:115 161:115 162:116 163:117 164:118 165:118 166:119 167:120 168:121 169:122 170:123 171:123\n", "INFO:tensorflow:token_is_max_context: 14:True 15:True 16:True 17:True 18:True 19:True 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True 154:True 155:True 156:True 157:True 158:True 159:True 160:True 161:True 162:True 163:True 164:True 165:True 166:True 167:True 168:True 169:True 170:True 171:True\n", "INFO:tensorflow:input_ids: 101 2054 7719 2006 2327 1997 1996 2364 2311 2012 10289 8214 1029 102 6549 2135 1010 1996 2082 2038 1037 3234 2839 1012 10234 1996 2364 2311 1005 1055 2751 8514 2003 1037 3585 6231 1997 1996 6261 2984 1012 3202 1999 2392 1997 1996 2364 2311 1998 5307 2009 1010 2003 1037 6967 6231 1997 4828 2007 2608 2039 14995 6924 2007 1996 5722 1000 2310 3490 2618 4748 2033 18168 5267 1000 1012 2279 2000 1996 2364 2311 2003 1996 13546 1997 1996 6730 2540 1012 3202 2369 1996 13546 2003 1996 24665 23052 1010 1037 14042 2173 1997 7083 1998 9185 1012 2009 2003 1037 15059 1997 1996 24665 23052 2012 10223 26371 1010 2605 2073 1996 6261 2984 22353 2135 2596 2000 3002 16595 9648 4674 2061 12083 9711 2271 1999 8517 1012 2012 1996 2203 1997 1996 2364 3298 1006 1998 1999 1037 3622 2240 2008 8539 2083 1017 11342 1998 1996 2751 8514 1007 1010 2003 1037 3722 1010 2715 2962 6231 1997 2984 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 33\n", "INFO:tensorflow:end_position: 39\n", "INFO:tensorflow:answer: a golden statue of the virgin mary\n", "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000005\n", "INFO:tensorflow:example_index: 5\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] when did the scholastic magazine of notre dame begin publishing ? [SEP] as at most other universities , notre dame ' s students run a number of news media outlets . the nine student - run outlets include three newspapers , both a radio and television station , and several magazines and journals . begun as a one - page journal in september 1876 , the scholastic magazine is issued twice monthly and claims to be the oldest continuous collegiate publication in the united states . the other magazine , the jug ##gler , is released twice a year and focuses on student literature and artwork . the dome yearbook is published annually . the newspapers have varying publication interests , with the observer published daily and mainly reporting university and other news , and staffed by students from both notre dame and saint mary ' s college . unlike scholastic and the dome , the observer is an independent publication and does not have a faculty advisor or any editorial oversight from the university . in 1987 , when some students believed that the observer began to show a conservative bias , a liberal newspaper , common sense was published . likewise , in 2003 , when other students believed that the paper showed a liberal bias , the conservative paper irish rover went into production . neither paper is published as often as the observer ; however , all three are distributed to all students . finally , in spring 2008 an undergraduate journal for political science research , beyond politics , made its debut . [SEP]\n", "INFO:tensorflow:token_to_orig_map: 13:0 14:1 15:2 16:3 17:4 18:4 19:5 20:6 21:6 22:6 23:7 24:8 25:9 26:10 27:11 28:12 29:13 30:14 31:14 32:15 33:16 34:17 35:17 36:17 37:18 38:19 39:20 40:21 41:21 42:22 43:23 44:24 45:25 46:26 47:27 48:27 49:28 50:29 51:30 52:31 53:32 54:32 55:33 56:34 57:35 58:36 59:36 60:36 61:37 62:38 63:39 64:40 65:40 66:41 67:42 68:43 69:44 70:45 71:46 72:47 73:48 74:49 75:50 76:51 77:52 78:53 79:54 80:55 81:56 82:57 83:58 84:59 85:60 86:60 87:61 88:62 89:63 90:63 91:64 92:65 93:65 94:65 95:66 96:67 97:68 98:69 99:70 100:71 101:72 102:73 103:74 104:75 105:76 106:77 107:77 108:78 109:79 110:80 111:81 112:82 113:83 114:83 115:84 116:85 117:86 118:87 119:88 120:89 121:89 122:90 123:91 124:92 125:93 126:94 127:95 128:96 129:97 130:98 131:99 132:100 133:101 134:101 135:102 136:103 137:104 138:105 139:106 140:107 141:108 142:109 143:110 144:111 145:112 146:112 147:112 148:113 149:113 150:114 151:115 152:116 153:117 154:118 155:118 156:119 157:120 158:121 159:122 160:123 161:124 162:125 163:126 164:127 165:128 166:129 167:130 168:131 169:132 170:133 171:134 172:135 173:136 174:137 175:138 176:138 177:139 178:140 179:140 180:141 181:142 182:143 183:144 184:145 185:146 186:147 187:148 188:149 189:150 190:151 191:152 192:153 193:153 194:154 195:155 196:156 197:156 198:157 199:158 200:159 201:160 202:160 203:161 204:161 205:162 206:163 207:163 208:164 209:165 210:166 211:167 212:168 213:169 214:170 215:171 216:172 217:173 218:174 219:174 220:175 221:176 222:177 223:178 224:179 225:180 226:181 227:182 228:182 229:183 230:184 231:185 232:186 233:187 234:188 235:189 236:190 237:191 238:191 239:192 240:192 241:193 242:194 243:195 244:196 245:197 246:198 247:199 248:199 249:200 250:200 251:201 252:202 253:203 254:204 255:205 256:206 257:207 258:208 259:209 260:210 261:210 262:211 263:212 264:212 265:213 266:214 267:215 268:215\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:token_is_max_context: 13:True 14:True 15:True 16:True 17:True 18:True 19:True 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True 154:True 155:True 156:True 157:True 158:True 159:True 160:True 161:True 162:True 163:True 164:True 165:True 166:True 167:True 168:True 169:True 170:True 171:True 172:True 173:True 174:True 175:True 176:True 177:True 178:True 179:True 180:True 181:True 182:True 183:True 184:True 185:True 186:True 187:True 188:True 189:True 190:True 191:True 192:True 193:True 194:True 195:True 196:True 197:True 198:True 199:True 200:True 201:True 202:True 203:True 204:True 205:True 206:True 207:True 208:True 209:True 210:True 211:True 212:True 213:True 214:True 215:True 216:True 217:True 218:True 219:True 220:True 221:True 222:True 223:True 224:True 225:True 226:True 227:True 228:True 229:True 230:True 231:True 232:True 233:True 234:True 235:True 236:True 237:True 238:True 239:True 240:True 241:True 242:True 243:True 244:True 245:True 246:True 247:True 248:True 249:True 250:True 251:True 252:True 253:True 254:True 255:True 256:True 257:True 258:True 259:True 260:True 261:True 262:True 263:True 264:True 265:True 266:True 267:True 268:True\n", "INFO:tensorflow:input_ids: 101 2043 2106 1996 24105 2932 1997 10289 8214 4088 4640 1029 102 2004 2012 2087 2060 5534 1010 10289 8214 1005 1055 2493 2448 1037 2193 1997 2739 2865 11730 1012 1996 3157 3076 1011 2448 11730 2421 2093 6399 1010 2119 1037 2557 1998 2547 2276 1010 1998 2195 7298 1998 9263 1012 5625 2004 1037 2028 1011 3931 3485 1999 2244 7326 1010 1996 24105 2932 2003 3843 3807 7058 1998 4447 2000 2022 1996 4587 7142 9234 4772 1999 1996 2142 2163 1012 1996 2060 2932 1010 1996 26536 17420 1010 2003 2207 3807 1037 2095 1998 7679 2006 3076 3906 1998 8266 1012 1996 8514 24803 2003 2405 6604 1012 1996 6399 2031 9671 4772 5426 1010 2007 1996 9718 2405 3679 1998 3701 7316 2118 1998 2060 2739 1010 1998 21121 2011 2493 2013 2119 10289 8214 1998 3002 2984 1005 1055 2267 1012 4406 24105 1998 1996 8514 1010 1996 9718 2003 2019 2981 4772 1998 2515 2025 2031 1037 4513 8619 2030 2151 8368 15709 2013 1996 2118 1012 1999 3055 1010 2043 2070 2493 3373 2008 1996 9718 2211 2000 2265 1037 4603 13827 1010 1037 4314 3780 1010 2691 3168 2001 2405 1012 10655 1010 1999 2494 1010 2043 2060 2493 3373 2008 1996 3259 3662 1037 4314 13827 1010 1996 4603 3259 3493 13631 2253 2046 2537 1012 4445 3259 2003 2405 2004 2411 2004 1996 9718 1025 2174 1010 2035 2093 2024 5500 2000 2035 2493 1012 2633 1010 1999 3500 2263 2019 8324 3485 2005 2576 2671 2470 1010 3458 4331 1010 2081 2049 2834 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 63\n", "INFO:tensorflow:end_position: 64\n", "INFO:tensorflow:answer: september 1876\n", "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000006\n", "INFO:tensorflow:example_index: 6\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] how often is notre dame ' s the jug ##gler published ? [SEP] as at most other universities , notre dame ' s students run a number of news media outlets . the nine student - run outlets include three newspapers , both a radio and television station , and several magazines and journals . begun as a one - page journal in september 1876 , the scholastic magazine is issued twice monthly and claims to be the oldest continuous collegiate publication in the united states . the other magazine , the jug ##gler , is released twice a year and focuses on student literature and artwork . the dome yearbook is published annually . the newspapers have varying publication interests , with the observer published daily and mainly reporting university and other news , and staffed by students from both notre dame and saint mary ' s college . unlike scholastic and the dome , the observer is an independent publication and does not have a faculty advisor or any editorial oversight from the university . in 1987 , when some students believed that the observer began to show a conservative bias , a liberal newspaper , common sense was published . likewise , in 2003 , when other students believed that the paper showed a liberal bias , the conservative paper irish rover went into production . neither paper is published as often as the observer ; however , all three are distributed to all students . finally , in spring 2008 an undergraduate journal for political science research , beyond politics , made its debut . [SEP]\n", "INFO:tensorflow:token_to_orig_map: 14:0 15:1 16:2 17:3 18:4 19:4 20:5 21:6 22:6 23:6 24:7 25:8 26:9 27:10 28:11 29:12 30:13 31:14 32:14 33:15 34:16 35:17 36:17 37:17 38:18 39:19 40:20 41:21 42:21 43:22 44:23 45:24 46:25 47:26 48:27 49:27 50:28 51:29 52:30 53:31 54:32 55:32 56:33 57:34 58:35 59:36 60:36 61:36 62:37 63:38 64:39 65:40 66:40 67:41 68:42 69:43 70:44 71:45 72:46 73:47 74:48 75:49 76:50 77:51 78:52 79:53 80:54 81:55 82:56 83:57 84:58 85:59 86:60 87:60 88:61 89:62 90:63 91:63 92:64 93:65 94:65 95:65 96:66 97:67 98:68 99:69 100:70 101:71 102:72 103:73 104:74 105:75 106:76 107:77 108:77 109:78 110:79 111:80 112:81 113:82 114:83 115:83 116:84 117:85 118:86 119:87 120:88 121:89 122:89 123:90 124:91 125:92 126:93 127:94 128:95 129:96 130:97 131:98 132:99 133:100 134:101 135:101 136:102 137:103 138:104 139:105 140:106 141:107 142:108 143:109 144:110 145:111 146:112 147:112 148:112 149:113 150:113 151:114 152:115 153:116 154:117 155:118 156:118 157:119 158:120 159:121 160:122 161:123 162:124 163:125 164:126 165:127 166:128 167:129 168:130 169:131 170:132 171:133 172:134 173:135 174:136 175:137 176:138 177:138 178:139 179:140 180:140 181:141 182:142 183:143 184:144 185:145 186:146 187:147 188:148 189:149 190:150 191:151 192:152 193:153 194:153 195:154 196:155 197:156 198:156 199:157 200:158 201:159 202:160 203:160 204:161 205:161 206:162 207:163 208:163 209:164 210:165 211:166 212:167 213:168 214:169 215:170 216:171 217:172 218:173 219:174 220:174 221:175 222:176 223:177 224:178 225:179 226:180 227:181 228:182 229:182 230:183 231:184 232:185 233:186 234:187 235:188 236:189 237:190 238:191 239:191 240:192 241:192 242:193 243:194 244:195 245:196 246:197 247:198 248:199 249:199 250:200 251:200 252:201 253:202 254:203 255:204 256:205 257:206 258:207 259:208 260:209 261:210 262:210 263:211 264:212 265:212 266:213 267:214 268:215 269:215\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:token_is_max_context: 14:True 15:True 16:True 17:True 18:True 19:True 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True 154:True 155:True 156:True 157:True 158:True 159:True 160:True 161:True 162:True 163:True 164:True 165:True 166:True 167:True 168:True 169:True 170:True 171:True 172:True 173:True 174:True 175:True 176:True 177:True 178:True 179:True 180:True 181:True 182:True 183:True 184:True 185:True 186:True 187:True 188:True 189:True 190:True 191:True 192:True 193:True 194:True 195:True 196:True 197:True 198:True 199:True 200:True 201:True 202:True 203:True 204:True 205:True 206:True 207:True 208:True 209:True 210:True 211:True 212:True 213:True 214:True 215:True 216:True 217:True 218:True 219:True 220:True 221:True 222:True 223:True 224:True 225:True 226:True 227:True 228:True 229:True 230:True 231:True 232:True 233:True 234:True 235:True 236:True 237:True 238:True 239:True 240:True 241:True 242:True 243:True 244:True 245:True 246:True 247:True 248:True 249:True 250:True 251:True 252:True 253:True 254:True 255:True 256:True 257:True 258:True 259:True 260:True 261:True 262:True 263:True 264:True 265:True 266:True 267:True 268:True 269:True\n", "INFO:tensorflow:input_ids: 101 2129 2411 2003 10289 8214 1005 1055 1996 26536 17420 2405 1029 102 2004 2012 2087 2060 5534 1010 10289 8214 1005 1055 2493 2448 1037 2193 1997 2739 2865 11730 1012 1996 3157 3076 1011 2448 11730 2421 2093 6399 1010 2119 1037 2557 1998 2547 2276 1010 1998 2195 7298 1998 9263 1012 5625 2004 1037 2028 1011 3931 3485 1999 2244 7326 1010 1996 24105 2932 2003 3843 3807 7058 1998 4447 2000 2022 1996 4587 7142 9234 4772 1999 1996 2142 2163 1012 1996 2060 2932 1010 1996 26536 17420 1010 2003 2207 3807 1037 2095 1998 7679 2006 3076 3906 1998 8266 1012 1996 8514 24803 2003 2405 6604 1012 1996 6399 2031 9671 4772 5426 1010 2007 1996 9718 2405 3679 1998 3701 7316 2118 1998 2060 2739 1010 1998 21121 2011 2493 2013 2119 10289 8214 1998 3002 2984 1005 1055 2267 1012 4406 24105 1998 1996 8514 1010 1996 9718 2003 2019 2981 4772 1998 2515 2025 2031 1037 4513 8619 2030 2151 8368 15709 2013 1996 2118 1012 1999 3055 1010 2043 2070 2493 3373 2008 1996 9718 2211 2000 2265 1037 4603 13827 1010 1037 4314 3780 1010 2691 3168 2001 2405 1012 10655 1010 1999 2494 1010 2043 2060 2493 3373 2008 1996 3259 3662 1037 4314 13827 1010 1996 4603 3259 3493 13631 2253 2046 2537 1012 4445 3259 2003 2405 2004 2411 2004 1996 9718 1025 2174 1010 2035 2093 2024 5500 2000 2035 2493 1012 2633 1010 1999 3500 2263 2019 8324 3485 2005 2576 2671 2470 1010 3458 4331 1010 2081 2049 2834 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 98\n", "INFO:tensorflow:end_position: 98\n", "INFO:tensorflow:answer: twice\n", "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000007\n", "INFO:tensorflow:example_index: 7\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] what is the daily student paper at notre dame called ? [SEP] as at most other universities , notre dame ' s students run a number of news media outlets . the nine student - run outlets include three newspapers , both a radio and television station , and several magazines and journals . begun as a one - page journal in september 1876 , the scholastic magazine is issued twice monthly and claims to be the oldest continuous collegiate publication in the united states . the other magazine , the jug ##gler , is released twice a year and focuses on student literature and artwork . the dome yearbook is published annually . the newspapers have varying publication interests , with the observer published daily and mainly reporting university and other news , and staffed by students from both notre dame and saint mary ' s college . unlike scholastic and the dome , the observer is an independent publication and does not have a faculty advisor or any editorial oversight from the university . in 1987 , when some students believed that the observer began to show a conservative bias , a liberal newspaper , common sense was published . likewise , in 2003 , when other students believed that the paper showed a liberal bias , the conservative paper irish rover went into production . neither paper is published as often as the observer ; however , all three are distributed to all students . finally , in spring 2008 an undergraduate journal for political science research , beyond politics , made its debut . [SEP]\n", "INFO:tensorflow:token_to_orig_map: 13:0 14:1 15:2 16:3 17:4 18:4 19:5 20:6 21:6 22:6 23:7 24:8 25:9 26:10 27:11 28:12 29:13 30:14 31:14 32:15 33:16 34:17 35:17 36:17 37:18 38:19 39:20 40:21 41:21 42:22 43:23 44:24 45:25 46:26 47:27 48:27 49:28 50:29 51:30 52:31 53:32 54:32 55:33 56:34 57:35 58:36 59:36 60:36 61:37 62:38 63:39 64:40 65:40 66:41 67:42 68:43 69:44 70:45 71:46 72:47 73:48 74:49 75:50 76:51 77:52 78:53 79:54 80:55 81:56 82:57 83:58 84:59 85:60 86:60 87:61 88:62 89:63 90:63 91:64 92:65 93:65 94:65 95:66 96:67 97:68 98:69 99:70 100:71 101:72 102:73 103:74 104:75 105:76 106:77 107:77 108:78 109:79 110:80 111:81 112:82 113:83 114:83 115:84 116:85 117:86 118:87 119:88 120:89 121:89 122:90 123:91 124:92 125:93 126:94 127:95 128:96 129:97 130:98 131:99 132:100 133:101 134:101 135:102 136:103 137:104 138:105 139:106 140:107 141:108 142:109 143:110 144:111 145:112 146:112 147:112 148:113 149:113 150:114 151:115 152:116 153:117 154:118 155:118 156:119 157:120 158:121 159:122 160:123 161:124 162:125 163:126 164:127 165:128 166:129 167:130 168:131 169:132 170:133 171:134 172:135 173:136 174:137 175:138 176:138 177:139 178:140 179:140 180:141 181:142 182:143 183:144 184:145 185:146 186:147 187:148 188:149 189:150 190:151 191:152 192:153 193:153 194:154 195:155 196:156 197:156 198:157 199:158 200:159 201:160 202:160 203:161 204:161 205:162 206:163 207:163 208:164 209:165 210:166 211:167 212:168 213:169 214:170 215:171 216:172 217:173 218:174 219:174 220:175 221:176 222:177 223:178 224:179 225:180 226:181 227:182 228:182 229:183 230:184 231:185 232:186 233:187 234:188 235:189 236:190 237:191 238:191 239:192 240:192 241:193 242:194 243:195 244:196 245:197 246:198 247:199 248:199 249:200 250:200 251:201 252:202 253:203 254:204 255:205 256:206 257:207 258:208 259:209 260:210 261:210 262:211 263:212 264:212 265:213 266:214 267:215 268:215\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:token_is_max_context: 13:True 14:True 15:True 16:True 17:True 18:True 19:True 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True 154:True 155:True 156:True 157:True 158:True 159:True 160:True 161:True 162:True 163:True 164:True 165:True 166:True 167:True 168:True 169:True 170:True 171:True 172:True 173:True 174:True 175:True 176:True 177:True 178:True 179:True 180:True 181:True 182:True 183:True 184:True 185:True 186:True 187:True 188:True 189:True 190:True 191:True 192:True 193:True 194:True 195:True 196:True 197:True 198:True 199:True 200:True 201:True 202:True 203:True 204:True 205:True 206:True 207:True 208:True 209:True 210:True 211:True 212:True 213:True 214:True 215:True 216:True 217:True 218:True 219:True 220:True 221:True 222:True 223:True 224:True 225:True 226:True 227:True 228:True 229:True 230:True 231:True 232:True 233:True 234:True 235:True 236:True 237:True 238:True 239:True 240:True 241:True 242:True 243:True 244:True 245:True 246:True 247:True 248:True 249:True 250:True 251:True 252:True 253:True 254:True 255:True 256:True 257:True 258:True 259:True 260:True 261:True 262:True 263:True 264:True 265:True 266:True 267:True 268:True\n", "INFO:tensorflow:input_ids: 101 2054 2003 1996 3679 3076 3259 2012 10289 8214 2170 1029 102 2004 2012 2087 2060 5534 1010 10289 8214 1005 1055 2493 2448 1037 2193 1997 2739 2865 11730 1012 1996 3157 3076 1011 2448 11730 2421 2093 6399 1010 2119 1037 2557 1998 2547 2276 1010 1998 2195 7298 1998 9263 1012 5625 2004 1037 2028 1011 3931 3485 1999 2244 7326 1010 1996 24105 2932 2003 3843 3807 7058 1998 4447 2000 2022 1996 4587 7142 9234 4772 1999 1996 2142 2163 1012 1996 2060 2932 1010 1996 26536 17420 1010 2003 2207 3807 1037 2095 1998 7679 2006 3076 3906 1998 8266 1012 1996 8514 24803 2003 2405 6604 1012 1996 6399 2031 9671 4772 5426 1010 2007 1996 9718 2405 3679 1998 3701 7316 2118 1998 2060 2739 1010 1998 21121 2011 2493 2013 2119 10289 8214 1998 3002 2984 1005 1055 2267 1012 4406 24105 1998 1996 8514 1010 1996 9718 2003 2019 2981 4772 1998 2515 2025 2031 1037 4513 8619 2030 2151 8368 15709 2013 1996 2118 1012 1999 3055 1010 2043 2070 2493 3373 2008 1996 9718 2211 2000 2265 1037 4603 13827 1010 1037 4314 3780 1010 2691 3168 2001 2405 1012 10655 1010 1999 2494 1010 2043 2060 2493 3373 2008 1996 3259 3662 1037 4314 13827 1010 1996 4603 3259 3493 13631 2253 2046 2537 1012 4445 3259 2003 2405 2004 2411 2004 1996 9718 1025 2174 1010 2035 2093 2024 5500 2000 2035 2493 1012 2633 1010 1999 3500 2263 2019 8324 3485 2005 2576 2671 2470 1010 3458 4331 1010 2081 2049 2834 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 123\n", "INFO:tensorflow:end_position: 124\n", "INFO:tensorflow:answer: the observer\n", "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000008\n", "INFO:tensorflow:example_index: 8\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] how many student news papers are found at notre dame ? [SEP] as at most other universities , notre dame ' s students run a number of news media outlets . the nine student - run outlets include three newspapers , both a radio and television station , and several magazines and journals . begun as a one - page journal in september 1876 , the scholastic magazine is issued twice monthly and claims to be the oldest continuous collegiate publication in the united states . the other magazine , the jug ##gler , is released twice a year and focuses on student literature and artwork . the dome yearbook is published annually . the newspapers have varying publication interests , with the observer published daily and mainly reporting university and other news , and staffed by students from both notre dame and saint mary ' s college . unlike scholastic and the dome , the observer is an independent publication and does not have a faculty advisor or any editorial oversight from the university . in 1987 , when some students believed that the observer began to show a conservative bias , a liberal newspaper , common sense was published . likewise , in 2003 , when other students believed that the paper showed a liberal bias , the conservative paper irish rover went into production . neither paper is published as often as the observer ; however , all three are distributed to all students . finally , in spring 2008 an undergraduate journal for political science research , beyond politics , made its debut . [SEP]\n", "INFO:tensorflow:token_to_orig_map: 13:0 14:1 15:2 16:3 17:4 18:4 19:5 20:6 21:6 22:6 23:7 24:8 25:9 26:10 27:11 28:12 29:13 30:14 31:14 32:15 33:16 34:17 35:17 36:17 37:18 38:19 39:20 40:21 41:21 42:22 43:23 44:24 45:25 46:26 47:27 48:27 49:28 50:29 51:30 52:31 53:32 54:32 55:33 56:34 57:35 58:36 59:36 60:36 61:37 62:38 63:39 64:40 65:40 66:41 67:42 68:43 69:44 70:45 71:46 72:47 73:48 74:49 75:50 76:51 77:52 78:53 79:54 80:55 81:56 82:57 83:58 84:59 85:60 86:60 87:61 88:62 89:63 90:63 91:64 92:65 93:65 94:65 95:66 96:67 97:68 98:69 99:70 100:71 101:72 102:73 103:74 104:75 105:76 106:77 107:77 108:78 109:79 110:80 111:81 112:82 113:83 114:83 115:84 116:85 117:86 118:87 119:88 120:89 121:89 122:90 123:91 124:92 125:93 126:94 127:95 128:96 129:97 130:98 131:99 132:100 133:101 134:101 135:102 136:103 137:104 138:105 139:106 140:107 141:108 142:109 143:110 144:111 145:112 146:112 147:112 148:113 149:113 150:114 151:115 152:116 153:117 154:118 155:118 156:119 157:120 158:121 159:122 160:123 161:124 162:125 163:126 164:127 165:128 166:129 167:130 168:131 169:132 170:133 171:134 172:135 173:136 174:137 175:138 176:138 177:139 178:140 179:140 180:141 181:142 182:143 183:144 184:145 185:146 186:147 187:148 188:149 189:150 190:151 191:152 192:153 193:153 194:154 195:155 196:156 197:156 198:157 199:158 200:159 201:160 202:160 203:161 204:161 205:162 206:163 207:163 208:164 209:165 210:166 211:167 212:168 213:169 214:170 215:171 216:172 217:173 218:174 219:174 220:175 221:176 222:177 223:178 224:179 225:180 226:181 227:182 228:182 229:183 230:184 231:185 232:186 233:187 234:188 235:189 236:190 237:191 238:191 239:192 240:192 241:193 242:194 243:195 244:196 245:197 246:198 247:199 248:199 249:200 250:200 251:201 252:202 253:203 254:204 255:205 256:206 257:207 258:208 259:209 260:210 261:210 262:211 263:212 264:212 265:213 266:214 267:215 268:215\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:token_is_max_context: 13:True 14:True 15:True 16:True 17:True 18:True 19:True 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True 154:True 155:True 156:True 157:True 158:True 159:True 160:True 161:True 162:True 163:True 164:True 165:True 166:True 167:True 168:True 169:True 170:True 171:True 172:True 173:True 174:True 175:True 176:True 177:True 178:True 179:True 180:True 181:True 182:True 183:True 184:True 185:True 186:True 187:True 188:True 189:True 190:True 191:True 192:True 193:True 194:True 195:True 196:True 197:True 198:True 199:True 200:True 201:True 202:True 203:True 204:True 205:True 206:True 207:True 208:True 209:True 210:True 211:True 212:True 213:True 214:True 215:True 216:True 217:True 218:True 219:True 220:True 221:True 222:True 223:True 224:True 225:True 226:True 227:True 228:True 229:True 230:True 231:True 232:True 233:True 234:True 235:True 236:True 237:True 238:True 239:True 240:True 241:True 242:True 243:True 244:True 245:True 246:True 247:True 248:True 249:True 250:True 251:True 252:True 253:True 254:True 255:True 256:True 257:True 258:True 259:True 260:True 261:True 262:True 263:True 264:True 265:True 266:True 267:True 268:True\n", "INFO:tensorflow:input_ids: 101 2129 2116 3076 2739 4981 2024 2179 2012 10289 8214 1029 102 2004 2012 2087 2060 5534 1010 10289 8214 1005 1055 2493 2448 1037 2193 1997 2739 2865 11730 1012 1996 3157 3076 1011 2448 11730 2421 2093 6399 1010 2119 1037 2557 1998 2547 2276 1010 1998 2195 7298 1998 9263 1012 5625 2004 1037 2028 1011 3931 3485 1999 2244 7326 1010 1996 24105 2932 2003 3843 3807 7058 1998 4447 2000 2022 1996 4587 7142 9234 4772 1999 1996 2142 2163 1012 1996 2060 2932 1010 1996 26536 17420 1010 2003 2207 3807 1037 2095 1998 7679 2006 3076 3906 1998 8266 1012 1996 8514 24803 2003 2405 6604 1012 1996 6399 2031 9671 4772 5426 1010 2007 1996 9718 2405 3679 1998 3701 7316 2118 1998 2060 2739 1010 1998 21121 2011 2493 2013 2119 10289 8214 1998 3002 2984 1005 1055 2267 1012 4406 24105 1998 1996 8514 1010 1996 9718 2003 2019 2981 4772 1998 2515 2025 2031 1037 4513 8619 2030 2151 8368 15709 2013 1996 2118 1012 1999 3055 1010 2043 2070 2493 3373 2008 1996 9718 2211 2000 2265 1037 4603 13827 1010 1037 4314 3780 1010 2691 3168 2001 2405 1012 10655 1010 1999 2494 1010 2043 2060 2493 3373 2008 1996 3259 3662 1037 4314 13827 1010 1996 4603 3259 3493 13631 2253 2046 2537 1012 4445 3259 2003 2405 2004 2411 2004 1996 9718 1025 2174 1010 2035 2093 2024 5500 2000 2035 2493 1012 2633 1010 1999 3500 2263 2019 8324 3485 2005 2576 2671 2470 1010 3458 4331 1010 2081 2049 2834 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 39\n", "INFO:tensorflow:end_position: 39\n", "INFO:tensorflow:answer: three\n", "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000009\n", "INFO:tensorflow:example_index: 9\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] in what year did the student paper common sense begin publication at notre dame ? [SEP] as at most other universities , notre dame ' s students run a number of news media outlets . the nine student - run outlets include three newspapers , both a radio and television station , and several magazines and journals . begun as a one - page journal in september 1876 , the scholastic magazine is issued twice monthly and claims to be the oldest continuous collegiate publication in the united states . the other magazine , the jug ##gler , is released twice a year and focuses on student literature and artwork . the dome yearbook is published annually . the newspapers have varying publication interests , with the observer published daily and mainly reporting university and other news , and staffed by students from both notre dame and saint mary ' s college . unlike scholastic and the dome , the observer is an independent publication and does not have a faculty advisor or any editorial oversight from the university . in 1987 , when some students believed that the observer began to show a conservative bias , a liberal newspaper , common sense was published . likewise , in 2003 , when other students believed that the paper showed a liberal bias , the conservative paper irish rover went into production . neither paper is published as often as the observer ; however , all three are distributed to all students . finally , in spring 2008 an undergraduate journal for political science research , beyond politics , made its debut . [SEP]\n", "INFO:tensorflow:token_to_orig_map: 17:0 18:1 19:2 20:3 21:4 22:4 23:5 24:6 25:6 26:6 27:7 28:8 29:9 30:10 31:11 32:12 33:13 34:14 35:14 36:15 37:16 38:17 39:17 40:17 41:18 42:19 43:20 44:21 45:21 46:22 47:23 48:24 49:25 50:26 51:27 52:27 53:28 54:29 55:30 56:31 57:32 58:32 59:33 60:34 61:35 62:36 63:36 64:36 65:37 66:38 67:39 68:40 69:40 70:41 71:42 72:43 73:44 74:45 75:46 76:47 77:48 78:49 79:50 80:51 81:52 82:53 83:54 84:55 85:56 86:57 87:58 88:59 89:60 90:60 91:61 92:62 93:63 94:63 95:64 96:65 97:65 98:65 99:66 100:67 101:68 102:69 103:70 104:71 105:72 106:73 107:74 108:75 109:76 110:77 111:77 112:78 113:79 114:80 115:81 116:82 117:83 118:83 119:84 120:85 121:86 122:87 123:88 124:89 125:89 126:90 127:91 128:92 129:93 130:94 131:95 132:96 133:97 134:98 135:99 136:100 137:101 138:101 139:102 140:103 141:104 142:105 143:106 144:107 145:108 146:109 147:110 148:111 149:112 150:112 151:112 152:113 153:113 154:114 155:115 156:116 157:117 158:118 159:118 160:119 161:120 162:121 163:122 164:123 165:124 166:125 167:126 168:127 169:128 170:129 171:130 172:131 173:132 174:133 175:134 176:135 177:136 178:137 179:138 180:138 181:139 182:140 183:140 184:141 185:142 186:143 187:144 188:145 189:146 190:147 191:148 192:149 193:150 194:151 195:152 196:153 197:153 198:154 199:155 200:156 201:156 202:157 203:158 204:159 205:160 206:160 207:161 208:161 209:162 210:163 211:163 212:164 213:165 214:166 215:167 216:168 217:169 218:170 219:171 220:172 221:173 222:174 223:174 224:175 225:176 226:177 227:178 228:179 229:180 230:181 231:182 232:182 233:183 234:184 235:185 236:186 237:187 238:188 239:189 240:190 241:191 242:191 243:192 244:192 245:193 246:194 247:195 248:196 249:197 250:198 251:199 252:199 253:200 254:200 255:201 256:202 257:203 258:204 259:205 260:206 261:207 262:208 263:209 264:210 265:210 266:211 267:212 268:212 269:213 270:214 271:215 272:215\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:token_is_max_context: 17:True 18:True 19:True 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True 154:True 155:True 156:True 157:True 158:True 159:True 160:True 161:True 162:True 163:True 164:True 165:True 166:True 167:True 168:True 169:True 170:True 171:True 172:True 173:True 174:True 175:True 176:True 177:True 178:True 179:True 180:True 181:True 182:True 183:True 184:True 185:True 186:True 187:True 188:True 189:True 190:True 191:True 192:True 193:True 194:True 195:True 196:True 197:True 198:True 199:True 200:True 201:True 202:True 203:True 204:True 205:True 206:True 207:True 208:True 209:True 210:True 211:True 212:True 213:True 214:True 215:True 216:True 217:True 218:True 219:True 220:True 221:True 222:True 223:True 224:True 225:True 226:True 227:True 228:True 229:True 230:True 231:True 232:True 233:True 234:True 235:True 236:True 237:True 238:True 239:True 240:True 241:True 242:True 243:True 244:True 245:True 246:True 247:True 248:True 249:True 250:True 251:True 252:True 253:True 254:True 255:True 256:True 257:True 258:True 259:True 260:True 261:True 262:True 263:True 264:True 265:True 266:True 267:True 268:True 269:True 270:True 271:True 272:True\n", "INFO:tensorflow:input_ids: 101 1999 2054 2095 2106 1996 3076 3259 2691 3168 4088 4772 2012 10289 8214 1029 102 2004 2012 2087 2060 5534 1010 10289 8214 1005 1055 2493 2448 1037 2193 1997 2739 2865 11730 1012 1996 3157 3076 1011 2448 11730 2421 2093 6399 1010 2119 1037 2557 1998 2547 2276 1010 1998 2195 7298 1998 9263 1012 5625 2004 1037 2028 1011 3931 3485 1999 2244 7326 1010 1996 24105 2932 2003 3843 3807 7058 1998 4447 2000 2022 1996 4587 7142 9234 4772 1999 1996 2142 2163 1012 1996 2060 2932 1010 1996 26536 17420 1010 2003 2207 3807 1037 2095 1998 7679 2006 3076 3906 1998 8266 1012 1996 8514 24803 2003 2405 6604 1012 1996 6399 2031 9671 4772 5426 1010 2007 1996 9718 2405 3679 1998 3701 7316 2118 1998 2060 2739 1010 1998 21121 2011 2493 2013 2119 10289 8214 1998 3002 2984 1005 1055 2267 1012 4406 24105 1998 1996 8514 1010 1996 9718 2003 2019 2981 4772 1998 2515 2025 2031 1037 4513 8619 2030 2151 8368 15709 2013 1996 2118 1012 1999 3055 1010 2043 2070 2493 3373 2008 1996 9718 2211 2000 2265 1037 4603 13827 1010 1037 4314 3780 1010 2691 3168 2001 2405 1012 10655 1010 1999 2494 1010 2043 2060 2493 3373 2008 1996 3259 3662 1037 4314 13827 1010 1996 4603 3259 3493 13631 2253 2046 2537 1012 4445 3259 2003 2405 2004 2411 2004 1996 9718 1025 2174 1010 2035 2093 2024 5500 2000 2035 2493 1012 2633 1010 1999 3500 2263 2019 8324 3485 2005 2576 2671 2470 1010 3458 4331 1010 2081 2049 2834 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 182\n", "INFO:tensorflow:end_position: 182\n", "INFO:tensorflow:answer: 1987\n", "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000010\n", "INFO:tensorflow:example_index: 10\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] where is the headquarters of the congregation of the holy cross ? [SEP] the university is the major seat of the congregation of holy cross ( albeit not its official headquarters , which are in rome ) . its main seminary , more ##au seminary , is located on the campus across st . joseph lake from the main building . old college , the oldest building on campus and located near the shore of st . mary lake , houses undergraduate seminar ##ians . retired priests and brothers reside in fatima house ( a former retreat center ) , holy cross house , as well as col ##umb ##a hall near the gr ##otto . the university through the more ##au seminary has ties to theologian frederick bu ##ech ##ner . while not catholic , bu ##ech ##ner has praised writers from notre dame and more ##au seminary created a bu ##ech ##ner prize for preaching . [SEP]\n", "INFO:tensorflow:token_to_orig_map: 14:0 15:1 16:2 17:3 18:4 19:5 20:6 21:7 22:8 23:9 24:10 25:11 26:12 27:12 28:13 29:14 30:15 31:16 32:16 33:17 34:18 35:19 36:20 37:20 38:20 39:21 40:22 41:23 42:23 43:24 44:24 45:25 46:25 47:26 48:27 49:28 50:29 51:30 52:31 53:32 54:32 55:33 56:34 57:35 58:36 59:37 60:38 61:38 62:39 63:40 64:40 65:41 66:42 67:43 68:44 69:45 70:46 71:47 72:48 73:49 74:50 75:51 76:52 77:52 78:53 79:54 80:54 81:55 82:56 83:57 84:57 85:57 86:58 87:59 88:60 89:61 90:62 91:63 92:64 93:65 94:66 95:66 96:67 97:68 98:69 99:69 100:69 101:70 102:71 103:72 104:72 105:73 106:74 107:75 108:76 109:76 110:76 111:77 112:78 113:79 114:80 115:80 116:80 117:81 118:82 119:83 120:84 121:85 122:85 123:86 124:87 125:88 126:89 127:90 128:91 129:92 130:92 131:92 132:92 133:93 134:94 135:95 136:95 137:96 138:96 139:96 140:97 141:98 142:99 143:100 144:101 145:102 146:103 147:104 148:104 149:105 150:106 151:107 152:108 153:108 154:108 155:109 156:110 157:111 158:111\n", "INFO:tensorflow:token_is_max_context: 14:True 15:True 16:True 17:True 18:True 19:True 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True 154:True 155:True 156:True 157:True 158:True\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:input_ids: 101 2073 2003 1996 4075 1997 1996 7769 1997 1996 4151 2892 1029 102 1996 2118 2003 1996 2350 2835 1997 1996 7769 1997 4151 2892 1006 12167 2025 2049 2880 4075 1010 2029 2024 1999 4199 1007 1012 2049 2364 8705 1010 2062 4887 8705 1010 2003 2284 2006 1996 3721 2408 2358 1012 3312 2697 2013 1996 2364 2311 1012 2214 2267 1010 1996 4587 2311 2006 3721 1998 2284 2379 1996 5370 1997 2358 1012 2984 2697 1010 3506 8324 18014 7066 1012 3394 8656 1998 3428 13960 1999 27596 2160 1006 1037 2280 7822 2415 1007 1010 4151 2892 2160 1010 2004 2092 2004 8902 25438 2050 2534 2379 1996 24665 23052 1012 1996 2118 2083 1996 2062 4887 8705 2038 7208 2000 17200 5406 20934 15937 3678 1012 2096 2025 3234 1010 20934 15937 3678 2038 5868 4898 2013 10289 8214 1998 2062 4887 8705 2580 1037 20934 15937 3678 3396 2005 17979 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 36\n", "INFO:tensorflow:end_position: 36\n", "INFO:tensorflow:answer: rome\n", "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000011\n", "INFO:tensorflow:example_index: 11\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] what is the primary seminary of the congregation of the holy cross ? [SEP] the university is the major seat of the congregation of holy cross ( albeit not its official headquarters , which are in rome ) . its main seminary , more ##au seminary , is located on the campus across st . joseph lake from the main building . old college , the oldest building on campus and located near the shore of st . mary lake , houses undergraduate seminar ##ians . retired priests and brothers reside in fatima house ( a former retreat center ) , holy cross house , as well as col ##umb ##a hall near the gr ##otto . the university through the more ##au seminary has ties to theologian frederick bu ##ech ##ner . while not catholic , bu ##ech ##ner has praised writers from notre dame and more ##au seminary created a bu ##ech ##ner prize for preaching . [SEP]\n", "INFO:tensorflow:token_to_orig_map: 15:0 16:1 17:2 18:3 19:4 20:5 21:6 22:7 23:8 24:9 25:10 26:11 27:12 28:12 29:13 30:14 31:15 32:16 33:16 34:17 35:18 36:19 37:20 38:20 39:20 40:21 41:22 42:23 43:23 44:24 45:24 46:25 47:25 48:26 49:27 50:28 51:29 52:30 53:31 54:32 55:32 56:33 57:34 58:35 59:36 60:37 61:38 62:38 63:39 64:40 65:40 66:41 67:42 68:43 69:44 70:45 71:46 72:47 73:48 74:49 75:50 76:51 77:52 78:52 79:53 80:54 81:54 82:55 83:56 84:57 85:57 86:57 87:58 88:59 89:60 90:61 91:62 92:63 93:64 94:65 95:66 96:66 97:67 98:68 99:69 100:69 101:69 102:70 103:71 104:72 105:72 106:73 107:74 108:75 109:76 110:76 111:76 112:77 113:78 114:79 115:80 116:80 117:80 118:81 119:82 120:83 121:84 122:85 123:85 124:86 125:87 126:88 127:89 128:90 129:91 130:92 131:92 132:92 133:92 134:93 135:94 136:95 137:95 138:96 139:96 140:96 141:97 142:98 143:99 144:100 145:101 146:102 147:103 148:104 149:104 150:105 151:106 152:107 153:108 154:108 155:108 156:109 157:110 158:111 159:111\n", "INFO:tensorflow:token_is_max_context: 15:True 16:True 17:True 18:True 19:True 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True 154:True 155:True 156:True 157:True 158:True 159:True\n", "INFO:tensorflow:input_ids: 101 2054 2003 1996 3078 8705 1997 1996 7769 1997 1996 4151 2892 1029 102 1996 2118 2003 1996 2350 2835 1997 1996 7769 1997 4151 2892 1006 12167 2025 2049 2880 4075 1010 2029 2024 1999 4199 1007 1012 2049 2364 8705 1010 2062 4887 8705 1010 2003 2284 2006 1996 3721 2408 2358 1012 3312 2697 2013 1996 2364 2311 1012 2214 2267 1010 1996 4587 2311 2006 3721 1998 2284 2379 1996 5370 1997 2358 1012 2984 2697 1010 3506 8324 18014 7066 1012 3394 8656 1998 3428 13960 1999 27596 2160 1006 1037 2280 7822 2415 1007 1010 4151 2892 2160 1010 2004 2092 2004 8902 25438 2050 2534 2379 1996 24665 23052 1012 1996 2118 2083 1996 2062 4887 8705 2038 7208 2000 17200 5406 20934 15937 3678 1012 2096 2025 3234 1010 20934 15937 3678 2038 5868 4898 2013 10289 8214 1998 2062 4887 8705 2580 1037 20934 15937 3678 3396 2005 17979 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 44\n", "INFO:tensorflow:end_position: 46\n", "INFO:tensorflow:answer: more ##au seminary\n", "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000012\n", "INFO:tensorflow:example_index: 12\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] what is the oldest structure at notre dame ? [SEP] the university is the major seat of the congregation of holy cross ( albeit not its official headquarters , which are in rome ) . its main seminary , more ##au seminary , is located on the campus across st . joseph lake from the main building . old college , the oldest building on campus and located near the shore of st . mary lake , houses undergraduate seminar ##ians . retired priests and brothers reside in fatima house ( a former retreat center ) , holy cross house , as well as col ##umb ##a hall near the gr ##otto . the university through the more ##au seminary has ties to theologian frederick bu ##ech ##ner . while not catholic , bu ##ech ##ner has praised writers from notre dame and more ##au seminary created a bu ##ech ##ner prize for preaching . [SEP]\n", "INFO:tensorflow:token_to_orig_map: 11:0 12:1 13:2 14:3 15:4 16:5 17:6 18:7 19:8 20:9 21:10 22:11 23:12 24:12 25:13 26:14 27:15 28:16 29:16 30:17 31:18 32:19 33:20 34:20 35:20 36:21 37:22 38:23 39:23 40:24 41:24 42:25 43:25 44:26 45:27 46:28 47:29 48:30 49:31 50:32 51:32 52:33 53:34 54:35 55:36 56:37 57:38 58:38 59:39 60:40 61:40 62:41 63:42 64:43 65:44 66:45 67:46 68:47 69:48 70:49 71:50 72:51 73:52 74:52 75:53 76:54 77:54 78:55 79:56 80:57 81:57 82:57 83:58 84:59 85:60 86:61 87:62 88:63 89:64 90:65 91:66 92:66 93:67 94:68 95:69 96:69 97:69 98:70 99:71 100:72 101:72 102:73 103:74 104:75 105:76 106:76 107:76 108:77 109:78 110:79 111:80 112:80 113:80 114:81 115:82 116:83 117:84 118:85 119:85 120:86 121:87 122:88 123:89 124:90 125:91 126:92 127:92 128:92 129:92 130:93 131:94 132:95 133:95 134:96 135:96 136:96 137:97 138:98 139:99 140:100 141:101 142:102 143:103 144:104 145:104 146:105 147:106 148:107 149:108 150:108 151:108 152:109 153:110 154:111 155:111\n", "INFO:tensorflow:token_is_max_context: 11:True 12:True 13:True 14:True 15:True 16:True 17:True 18:True 19:True 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True 154:True 155:True\n", "INFO:tensorflow:input_ids: 101 2054 2003 1996 4587 3252 2012 10289 8214 1029 102 1996 2118 2003 1996 2350 2835 1997 1996 7769 1997 4151 2892 1006 12167 2025 2049 2880 4075 1010 2029 2024 1999 4199 1007 1012 2049 2364 8705 1010 2062 4887 8705 1010 2003 2284 2006 1996 3721 2408 2358 1012 3312 2697 2013 1996 2364 2311 1012 2214 2267 1010 1996 4587 2311 2006 3721 1998 2284 2379 1996 5370 1997 2358 1012 2984 2697 1010 3506 8324 18014 7066 1012 3394 8656 1998 3428 13960 1999 27596 2160 1006 1037 2280 7822 2415 1007 1010 4151 2892 2160 1010 2004 2092 2004 8902 25438 2050 2534 2379 1996 24665 23052 1012 1996 2118 2083 1996 2062 4887 8705 2038 7208 2000 17200 5406 20934 15937 3678 1012 2096 2025 3234 1010 20934 15937 3678 2038 5868 4898 2013 10289 8214 1998 2062 4887 8705 2580 1037 20934 15937 3678 3396 2005 17979 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 59\n", "INFO:tensorflow:end_position: 60\n", "INFO:tensorflow:answer: old college\n", "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000013\n", "INFO:tensorflow:example_index: 13\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] what individuals live at fatima house at notre dame ? [SEP] the university is the major seat of the congregation of holy cross ( albeit not its official headquarters , which are in rome ) . its main seminary , more ##au seminary , is located on the campus across st . joseph lake from the main building . old college , the oldest building on campus and located near the shore of st . mary lake , houses undergraduate seminar ##ians . retired priests and brothers reside in fatima house ( a former retreat center ) , holy cross house , as well as col ##umb ##a hall near the gr ##otto . the university through the more ##au seminary has ties to theologian frederick bu ##ech ##ner . while not catholic , bu ##ech ##ner has praised writers from notre dame and more ##au seminary created a bu ##ech ##ner prize for preaching . [SEP]\n", "INFO:tensorflow:token_to_orig_map: 12:0 13:1 14:2 15:3 16:4 17:5 18:6 19:7 20:8 21:9 22:10 23:11 24:12 25:12 26:13 27:14 28:15 29:16 30:16 31:17 32:18 33:19 34:20 35:20 36:20 37:21 38:22 39:23 40:23 41:24 42:24 43:25 44:25 45:26 46:27 47:28 48:29 49:30 50:31 51:32 52:32 53:33 54:34 55:35 56:36 57:37 58:38 59:38 60:39 61:40 62:40 63:41 64:42 65:43 66:44 67:45 68:46 69:47 70:48 71:49 72:50 73:51 74:52 75:52 76:53 77:54 78:54 79:55 80:56 81:57 82:57 83:57 84:58 85:59 86:60 87:61 88:62 89:63 90:64 91:65 92:66 93:66 94:67 95:68 96:69 97:69 98:69 99:70 100:71 101:72 102:72 103:73 104:74 105:75 106:76 107:76 108:76 109:77 110:78 111:79 112:80 113:80 114:80 115:81 116:82 117:83 118:84 119:85 120:85 121:86 122:87 123:88 124:89 125:90 126:91 127:92 128:92 129:92 130:92 131:93 132:94 133:95 134:95 135:96 136:96 137:96 138:97 139:98 140:99 141:100 142:101 143:102 144:103 145:104 146:104 147:105 148:106 149:107 150:108 151:108 152:108 153:109 154:110 155:111 156:111\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:token_is_max_context: 12:True 13:True 14:True 15:True 16:True 17:True 18:True 19:True 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True 154:True 155:True 156:True\n", "INFO:tensorflow:input_ids: 101 2054 3633 2444 2012 27596 2160 2012 10289 8214 1029 102 1996 2118 2003 1996 2350 2835 1997 1996 7769 1997 4151 2892 1006 12167 2025 2049 2880 4075 1010 2029 2024 1999 4199 1007 1012 2049 2364 8705 1010 2062 4887 8705 1010 2003 2284 2006 1996 3721 2408 2358 1012 3312 2697 2013 1996 2364 2311 1012 2214 2267 1010 1996 4587 2311 2006 3721 1998 2284 2379 1996 5370 1997 2358 1012 2984 2697 1010 3506 8324 18014 7066 1012 3394 8656 1998 3428 13960 1999 27596 2160 1006 1037 2280 7822 2415 1007 1010 4151 2892 2160 1010 2004 2092 2004 8902 25438 2050 2534 2379 1996 24665 23052 1012 1996 2118 2083 1996 2062 4887 8705 2038 7208 2000 17200 5406 20934 15937 3678 1012 2096 2025 3234 1010 20934 15937 3678 2038 5868 4898 2013 10289 8214 1998 2062 4887 8705 2580 1037 20934 15937 3678 3396 2005 17979 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 84\n", "INFO:tensorflow:end_position: 87\n", "INFO:tensorflow:answer: retired priests and brothers\n", "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000014\n", "INFO:tensorflow:example_index: 14\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] which prize did frederick bu ##ech ##ner create ? [SEP] the university is the major seat of the congregation of holy cross ( albeit not its official headquarters , which are in rome ) . its main seminary , more ##au seminary , is located on the campus across st . joseph lake from the main building . old college , the oldest building on campus and located near the shore of st . mary lake , houses undergraduate seminar ##ians . retired priests and brothers reside in fatima house ( a former retreat center ) , holy cross house , as well as col ##umb ##a hall near the gr ##otto . the university through the more ##au seminary has ties to theologian frederick bu ##ech ##ner . while not catholic , bu ##ech ##ner has praised writers from notre dame and more ##au seminary created a bu ##ech ##ner prize for preaching . [SEP]\n", "INFO:tensorflow:token_to_orig_map: 11:0 12:1 13:2 14:3 15:4 16:5 17:6 18:7 19:8 20:9 21:10 22:11 23:12 24:12 25:13 26:14 27:15 28:16 29:16 30:17 31:18 32:19 33:20 34:20 35:20 36:21 37:22 38:23 39:23 40:24 41:24 42:25 43:25 44:26 45:27 46:28 47:29 48:30 49:31 50:32 51:32 52:33 53:34 54:35 55:36 56:37 57:38 58:38 59:39 60:40 61:40 62:41 63:42 64:43 65:44 66:45 67:46 68:47 69:48 70:49 71:50 72:51 73:52 74:52 75:53 76:54 77:54 78:55 79:56 80:57 81:57 82:57 83:58 84:59 85:60 86:61 87:62 88:63 89:64 90:65 91:66 92:66 93:67 94:68 95:69 96:69 97:69 98:70 99:71 100:72 101:72 102:73 103:74 104:75 105:76 106:76 107:76 108:77 109:78 110:79 111:80 112:80 113:80 114:81 115:82 116:83 117:84 118:85 119:85 120:86 121:87 122:88 123:89 124:90 125:91 126:92 127:92 128:92 129:92 130:93 131:94 132:95 133:95 134:96 135:96 136:96 137:97 138:98 139:99 140:100 141:101 142:102 143:103 144:104 145:104 146:105 147:106 148:107 149:108 150:108 151:108 152:109 153:110 154:111 155:111\n", "INFO:tensorflow:token_is_max_context: 11:True 12:True 13:True 14:True 15:True 16:True 17:True 18:True 19:True 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True 154:True 155:True\n", "INFO:tensorflow:input_ids: 101 2029 3396 2106 5406 20934 15937 3678 3443 1029 102 1996 2118 2003 1996 2350 2835 1997 1996 7769 1997 4151 2892 1006 12167 2025 2049 2880 4075 1010 2029 2024 1999 4199 1007 1012 2049 2364 8705 1010 2062 4887 8705 1010 2003 2284 2006 1996 3721 2408 2358 1012 3312 2697 2013 1996 2364 2311 1012 2214 2267 1010 1996 4587 2311 2006 3721 1998 2284 2379 1996 5370 1997 2358 1012 2984 2697 1010 3506 8324 18014 7066 1012 3394 8656 1998 3428 13960 1999 27596 2160 1006 1037 2280 7822 2415 1007 1010 4151 2892 2160 1010 2004 2092 2004 8902 25438 2050 2534 2379 1996 24665 23052 1012 1996 2118 2083 1996 2062 4887 8705 2038 7208 2000 17200 5406 20934 15937 3678 1012 2096 2025 3234 1010 20934 15937 3678 2038 5868 4898 2013 10289 8214 1998 2062 4887 8705 2580 1037 20934 15937 3678 3396 2005 17979 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 149\n", "INFO:tensorflow:end_position: 154\n", "INFO:tensorflow:answer: bu ##ech ##ner prize for preaching\n", "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000015\n", "INFO:tensorflow:example_index: 15\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] how many bs level degrees are offered in the college of engineering at notre dame ? [SEP] the college of engineering was established in 1920 , however , early courses in civil and mechanical engineering were a part of the college of science since the 1870s . today the college , housed in the fitzpatrick , cu ##shing , and st ##ins ##on - re ##mic ##k halls of engineering , includes five departments of study – aerospace and mechanical engineering , chemical and bio ##mo ##le ##cular engineering , civil engineering and geological sciences , computer science and engineering , and electrical engineering – with eight b . s . degrees offered . additionally , the college offers five - year dual degree programs with the colleges of arts and letters and of business awarding additional b . a . and master of business administration ( mba ) degrees , respectively . [SEP]\n", "INFO:tensorflow:token_to_orig_map: 18:0 19:1 20:2 21:3 22:4 23:5 24:6 25:7 26:7 27:8 28:8 29:9 30:10 31:11 32:12 33:13 34:14 35:15 36:16 37:17 38:18 39:19 40:20 41:21 42:22 43:23 44:24 45:25 46:26 47:26 48:27 49:28 50:29 51:29 52:30 53:31 54:32 55:33 56:33 57:34 58:34 59:34 60:35 61:36 62:36 63:36 64:36 65:36 66:36 67:36 68:37 69:38 70:39 71:39 72:40 73:41 74:42 75:43 76:44 77:45 78:46 79:47 80:48 81:49 82:49 83:50 84:51 85:52 86:52 87:52 88:52 89:53 90:53 91:54 92:55 93:56 94:57 95:58 96:58 97:59 98:60 99:61 100:62 101:62 102:63 103:64 104:65 105:66 106:67 107:68 108:69 109:69 110:69 111:69 112:70 113:71 114:71 115:72 116:72 117:73 118:74 119:75 120:76 121:76 122:76 123:77 124:78 125:79 126:80 127:81 128:82 129:83 130:84 131:85 132:86 133:87 134:88 135:89 136:90 137:91 138:92 139:92 140:92 141:92 142:93 143:94 144:95 145:96 146:97 147:98 148:98 149:98 150:99 151:99 152:100 153:100\n", "INFO:tensorflow:token_is_max_context: 18:True 19:True 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True\n", "INFO:tensorflow:input_ids: 101 2129 2116 18667 2504 5445 2024 3253 1999 1996 2267 1997 3330 2012 10289 8214 1029 102 1996 2267 1997 3330 2001 2511 1999 4444 1010 2174 1010 2220 5352 1999 2942 1998 6228 3330 2020 1037 2112 1997 1996 2267 1997 2671 2144 1996 14896 1012 2651 1996 2267 1010 7431 1999 1996 26249 1010 12731 12227 1010 1998 2358 7076 2239 1011 2128 7712 2243 9873 1997 3330 1010 2950 2274 7640 1997 2817 1516 13395 1998 6228 3330 1010 5072 1998 16012 5302 2571 15431 3330 1010 2942 3330 1998 9843 4163 1010 3274 2671 1998 3330 1010 1998 5992 3330 1516 2007 2809 1038 1012 1055 1012 5445 3253 1012 5678 1010 1996 2267 4107 2274 1011 2095 7037 3014 3454 2007 1996 6667 1997 2840 1998 4144 1998 1997 2449 21467 3176 1038 1012 1037 1012 1998 3040 1997 2449 3447 1006 15038 1007 5445 1010 4414 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 107\n", "INFO:tensorflow:end_position: 107\n", "INFO:tensorflow:answer: eight\n", "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000016\n", "INFO:tensorflow:example_index: 16\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] in what year was the college of engineering at notre dame formed ? [SEP] the college of engineering was established in 1920 , however , early courses in civil and mechanical engineering were a part of the college of science since the 1870s . today the college , housed in the fitzpatrick , cu ##shing , and st ##ins ##on - re ##mic ##k halls of engineering , includes five departments of study – aerospace and mechanical engineering , chemical and bio ##mo ##le ##cular engineering , civil engineering and geological sciences , computer science and engineering , and electrical engineering – with eight b . s . degrees offered . additionally , the college offers five - year dual degree programs with the colleges of arts and letters and of business awarding additional b . a . and master of business administration ( mba ) degrees , respectively . [SEP]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:token_to_orig_map: 15:0 16:1 17:2 18:3 19:4 20:5 21:6 22:7 23:7 24:8 25:8 26:9 27:10 28:11 29:12 30:13 31:14 32:15 33:16 34:17 35:18 36:19 37:20 38:21 39:22 40:23 41:24 42:25 43:26 44:26 45:27 46:28 47:29 48:29 49:30 50:31 51:32 52:33 53:33 54:34 55:34 56:34 57:35 58:36 59:36 60:36 61:36 62:36 63:36 64:36 65:37 66:38 67:39 68:39 69:40 70:41 71:42 72:43 73:44 74:45 75:46 76:47 77:48 78:49 79:49 80:50 81:51 82:52 83:52 84:52 85:52 86:53 87:53 88:54 89:55 90:56 91:57 92:58 93:58 94:59 95:60 96:61 97:62 98:62 99:63 100:64 101:65 102:66 103:67 104:68 105:69 106:69 107:69 108:69 109:70 110:71 111:71 112:72 113:72 114:73 115:74 116:75 117:76 118:76 119:76 120:77 121:78 122:79 123:80 124:81 125:82 126:83 127:84 128:85 129:86 130:87 131:88 132:89 133:90 134:91 135:92 136:92 137:92 138:92 139:93 140:94 141:95 142:96 143:97 144:98 145:98 146:98 147:99 148:99 149:100 150:100\n", "INFO:tensorflow:token_is_max_context: 15:True 16:True 17:True 18:True 19:True 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True\n", "INFO:tensorflow:input_ids: 101 1999 2054 2095 2001 1996 2267 1997 3330 2012 10289 8214 2719 1029 102 1996 2267 1997 3330 2001 2511 1999 4444 1010 2174 1010 2220 5352 1999 2942 1998 6228 3330 2020 1037 2112 1997 1996 2267 1997 2671 2144 1996 14896 1012 2651 1996 2267 1010 7431 1999 1996 26249 1010 12731 12227 1010 1998 2358 7076 2239 1011 2128 7712 2243 9873 1997 3330 1010 2950 2274 7640 1997 2817 1516 13395 1998 6228 3330 1010 5072 1998 16012 5302 2571 15431 3330 1010 2942 3330 1998 9843 4163 1010 3274 2671 1998 3330 1010 1998 5992 3330 1516 2007 2809 1038 1012 1055 1012 5445 3253 1012 5678 1010 1996 2267 4107 2274 1011 2095 7037 3014 3454 2007 1996 6667 1997 2840 1998 4144 1998 1997 2449 21467 3176 1038 1012 1037 1012 1998 3040 1997 2449 3447 1006 15038 1007 5445 1010 4414 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 22\n", "INFO:tensorflow:end_position: 22\n", "INFO:tensorflow:answer: 1920\n", "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000017\n", "INFO:tensorflow:example_index: 17\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] before the creation of the college of engineering similar studies were carried out at which notre dame college ? [SEP] the college of engineering was established in 1920 , however , early courses in civil and mechanical engineering were a part of the college of science since the 1870s . today the college , housed in the fitzpatrick , cu ##shing , and st ##ins ##on - re ##mic ##k halls of engineering , includes five departments of study – aerospace and mechanical engineering , chemical and bio ##mo ##le ##cular engineering , civil engineering and geological sciences , computer science and engineering , and electrical engineering – with eight b . s . degrees offered . additionally , the college offers five - year dual degree programs with the colleges of arts and letters and of business awarding additional b . a . and master of business administration ( mba ) degrees , respectively . [SEP]\n", "INFO:tensorflow:token_to_orig_map: 21:0 22:1 23:2 24:3 25:4 26:5 27:6 28:7 29:7 30:8 31:8 32:9 33:10 34:11 35:12 36:13 37:14 38:15 39:16 40:17 41:18 42:19 43:20 44:21 45:22 46:23 47:24 48:25 49:26 50:26 51:27 52:28 53:29 54:29 55:30 56:31 57:32 58:33 59:33 60:34 61:34 62:34 63:35 64:36 65:36 66:36 67:36 68:36 69:36 70:36 71:37 72:38 73:39 74:39 75:40 76:41 77:42 78:43 79:44 80:45 81:46 82:47 83:48 84:49 85:49 86:50 87:51 88:52 89:52 90:52 91:52 92:53 93:53 94:54 95:55 96:56 97:57 98:58 99:58 100:59 101:60 102:61 103:62 104:62 105:63 106:64 107:65 108:66 109:67 110:68 111:69 112:69 113:69 114:69 115:70 116:71 117:71 118:72 119:72 120:73 121:74 122:75 123:76 124:76 125:76 126:77 127:78 128:79 129:80 130:81 131:82 132:83 133:84 134:85 135:86 136:87 137:88 138:89 139:90 140:91 141:92 142:92 143:92 144:92 145:93 146:94 147:95 148:96 149:97 150:98 151:98 152:98 153:99 154:99 155:100 156:100\n", "INFO:tensorflow:token_is_max_context: 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True 154:True 155:True 156:True\n", "INFO:tensorflow:input_ids: 101 2077 1996 4325 1997 1996 2267 1997 3330 2714 2913 2020 3344 2041 2012 2029 10289 8214 2267 1029 102 1996 2267 1997 3330 2001 2511 1999 4444 1010 2174 1010 2220 5352 1999 2942 1998 6228 3330 2020 1037 2112 1997 1996 2267 1997 2671 2144 1996 14896 1012 2651 1996 2267 1010 7431 1999 1996 26249 1010 12731 12227 1010 1998 2358 7076 2239 1011 2128 7712 2243 9873 1997 3330 1010 2950 2274 7640 1997 2817 1516 13395 1998 6228 3330 1010 5072 1998 16012 5302 2571 15431 3330 1010 2942 3330 1998 9843 4163 1010 3274 2671 1998 3330 1010 1998 5992 3330 1516 2007 2809 1038 1012 1055 1012 5445 3253 1012 5678 1010 1996 2267 4107 2274 1011 2095 7037 3014 3454 2007 1996 6667 1997 2840 1998 4144 1998 1997 2449 21467 3176 1038 1012 1037 1012 1998 3040 1997 2449 3447 1006 15038 1007 5445 1010 4414 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 43\n", "INFO:tensorflow:end_position: 46\n", "INFO:tensorflow:answer: the college of science\n", "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000018\n", "INFO:tensorflow:example_index: 18\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] how many departments are within the st ##ins ##on - re ##mic ##k hall of engineering ? [SEP] the college of engineering was established in 1920 , however , early courses in civil and mechanical engineering were a part of the college of science since the 1870s . today the college , housed in the fitzpatrick , cu ##shing , and st ##ins ##on - re ##mic ##k halls of engineering , includes five departments of study – aerospace and mechanical engineering , chemical and bio ##mo ##le ##cular engineering , civil engineering and geological sciences , computer science and engineering , and electrical engineering – with eight b . s . degrees offered . additionally , the college offers five - year dual degree programs with the colleges of arts and letters and of business awarding additional b . a . and master of business administration ( mba ) degrees , respectively . [SEP]\n", "INFO:tensorflow:token_to_orig_map: 19:0 20:1 21:2 22:3 23:4 24:5 25:6 26:7 27:7 28:8 29:8 30:9 31:10 32:11 33:12 34:13 35:14 36:15 37:16 38:17 39:18 40:19 41:20 42:21 43:22 44:23 45:24 46:25 47:26 48:26 49:27 50:28 51:29 52:29 53:30 54:31 55:32 56:33 57:33 58:34 59:34 60:34 61:35 62:36 63:36 64:36 65:36 66:36 67:36 68:36 69:37 70:38 71:39 72:39 73:40 74:41 75:42 76:43 77:44 78:45 79:46 80:47 81:48 82:49 83:49 84:50 85:51 86:52 87:52 88:52 89:52 90:53 91:53 92:54 93:55 94:56 95:57 96:58 97:58 98:59 99:60 100:61 101:62 102:62 103:63 104:64 105:65 106:66 107:67 108:68 109:69 110:69 111:69 112:69 113:70 114:71 115:71 116:72 117:72 118:73 119:74 120:75 121:76 122:76 123:76 124:77 125:78 126:79 127:80 128:81 129:82 130:83 131:84 132:85 133:86 134:87 135:88 136:89 137:90 138:91 139:92 140:92 141:92 142:92 143:93 144:94 145:95 146:96 147:97 148:98 149:98 150:98 151:99 152:99 153:100 154:100\n", "INFO:tensorflow:token_is_max_context: 19:True 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True 154:True\n", "INFO:tensorflow:input_ids: 101 2129 2116 7640 2024 2306 1996 2358 7076 2239 1011 2128 7712 2243 2534 1997 3330 1029 102 1996 2267 1997 3330 2001 2511 1999 4444 1010 2174 1010 2220 5352 1999 2942 1998 6228 3330 2020 1037 2112 1997 1996 2267 1997 2671 2144 1996 14896 1012 2651 1996 2267 1010 7431 1999 1996 26249 1010 12731 12227 1010 1998 2358 7076 2239 1011 2128 7712 2243 9873 1997 3330 1010 2950 2274 7640 1997 2817 1516 13395 1998 6228 3330 1010 5072 1998 16012 5302 2571 15431 3330 1010 2942 3330 1998 9843 4163 1010 3274 2671 1998 3330 1010 1998 5992 3330 1516 2007 2809 1038 1012 1055 1012 5445 3253 1012 5678 1010 1996 2267 4107 2274 1011 2095 7037 3014 3454 2007 1996 6667 1997 2840 1998 4144 1998 1997 2449 21467 3176 1038 1012 1037 1012 1998 3040 1997 2449 3447 1006 15038 1007 5445 1010 4414 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 74\n", "INFO:tensorflow:end_position: 74\n", "INFO:tensorflow:answer: five\n", "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 1000000019\n", "INFO:tensorflow:example_index: 19\n", "INFO:tensorflow:doc_span_index: 0\n", "INFO:tensorflow:tokens: [CLS] the college of science began to offer civil engineering courses beginning at what time at notre dame ? [SEP] the college of engineering was established in 1920 , however , early courses in civil and mechanical engineering were a part of the college of science since the 1870s . today the college , housed in the fitzpatrick , cu ##shing , and st ##ins ##on - re ##mic ##k halls of engineering , includes five departments of study – aerospace and mechanical engineering , chemical and bio ##mo ##le ##cular engineering , civil engineering and geological sciences , computer science and engineering , and electrical engineering – with eight b . s . degrees offered . additionally , the college offers five - year dual degree programs with the colleges of arts and letters and of business awarding additional b . a . and master of business administration ( mba ) degrees , respectively . [SEP]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:token_to_orig_map: 20:0 21:1 22:2 23:3 24:4 25:5 26:6 27:7 28:7 29:8 30:8 31:9 32:10 33:11 34:12 35:13 36:14 37:15 38:16 39:17 40:18 41:19 42:20 43:21 44:22 45:23 46:24 47:25 48:26 49:26 50:27 51:28 52:29 53:29 54:30 55:31 56:32 57:33 58:33 59:34 60:34 61:34 62:35 63:36 64:36 65:36 66:36 67:36 68:36 69:36 70:37 71:38 72:39 73:39 74:40 75:41 76:42 77:43 78:44 79:45 80:46 81:47 82:48 83:49 84:49 85:50 86:51 87:52 88:52 89:52 90:52 91:53 92:53 93:54 94:55 95:56 96:57 97:58 98:58 99:59 100:60 101:61 102:62 103:62 104:63 105:64 106:65 107:66 108:67 109:68 110:69 111:69 112:69 113:69 114:70 115:71 116:71 117:72 118:72 119:73 120:74 121:75 122:76 123:76 124:76 125:77 126:78 127:79 128:80 129:81 130:82 131:83 132:84 133:85 134:86 135:87 136:88 137:89 138:90 139:91 140:92 141:92 142:92 143:92 144:93 145:94 146:95 147:96 148:97 149:98 150:98 151:98 152:99 153:99 154:100 155:100\n", "INFO:tensorflow:token_is_max_context: 20:True 21:True 22:True 23:True 24:True 25:True 26:True 27:True 28:True 29:True 30:True 31:True 32:True 33:True 34:True 35:True 36:True 37:True 38:True 39:True 40:True 41:True 42:True 43:True 44:True 45:True 46:True 47:True 48:True 49:True 50:True 51:True 52:True 53:True 54:True 55:True 56:True 57:True 58:True 59:True 60:True 61:True 62:True 63:True 64:True 65:True 66:True 67:True 68:True 69:True 70:True 71:True 72:True 73:True 74:True 75:True 76:True 77:True 78:True 79:True 80:True 81:True 82:True 83:True 84:True 85:True 86:True 87:True 88:True 89:True 90:True 91:True 92:True 93:True 94:True 95:True 96:True 97:True 98:True 99:True 100:True 101:True 102:True 103:True 104:True 105:True 106:True 107:True 108:True 109:True 110:True 111:True 112:True 113:True 114:True 115:True 116:True 117:True 118:True 119:True 120:True 121:True 122:True 123:True 124:True 125:True 126:True 127:True 128:True 129:True 130:True 131:True 132:True 133:True 134:True 135:True 136:True 137:True 138:True 139:True 140:True 141:True 142:True 143:True 144:True 145:True 146:True 147:True 148:True 149:True 150:True 151:True 152:True 153:True 154:True 155:True\n", "INFO:tensorflow:input_ids: 101 1996 2267 1997 2671 2211 2000 3749 2942 3330 5352 2927 2012 2054 2051 2012 10289 8214 1029 102 1996 2267 1997 3330 2001 2511 1999 4444 1010 2174 1010 2220 5352 1999 2942 1998 6228 3330 2020 1037 2112 1997 1996 2267 1997 2671 2144 1996 14896 1012 2651 1996 2267 1010 7431 1999 1996 26249 1010 12731 12227 1010 1998 2358 7076 2239 1011 2128 7712 2243 9873 1997 3330 1010 2950 2274 7640 1997 2817 1516 13395 1998 6228 3330 1010 5072 1998 16012 5302 2571 15431 3330 1010 2942 3330 1998 9843 4163 1010 3274 2671 1998 3330 1010 1998 5992 3330 1516 2007 2809 1038 1012 1055 1012 5445 3253 1012 5678 1010 1996 2267 4107 2274 1011 2095 7037 3014 3454 2007 1996 6667 1997 2840 1998 4144 1998 1997 2449 21467 3176 1038 1012 1037 1012 1998 3040 1997 2449 3447 1006 15038 1007 5445 1010 4414 1012 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:start_position: 47\n", "INFO:tensorflow:end_position: 48\n", "INFO:tensorflow:answer: the 1870s\n" ] } ], "source": [ "bert_config = modeling_tensorflow.BertConfig.from_json_file(bert_config_file)\n", "tokenizer = tokenization.BertTokenizer(\n", " vocab_file=vocab_file, do_lower_case=True)\n", "\n", "eval_examples = read_squad_examples(\n", " input_file=input_file, is_training=True, max_num=16)\n", "\n", "eval_features = convert_examples_to_features(\n", " examples=eval_examples,\n", " tokenizer=tokenizer,\n", " max_seq_length=max_seq_length,\n", " doc_stride=doc_stride,\n", " max_query_length=max_query_length,\n", " is_training=True)\n", "\n", "# You can use that to test the behavior of the models when target are outside of the model input sequence\n", "# for feature in eval_features:\n", "# feature.start_position = outside_pos\n", "# feature.end_position = outside_pos" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:37.525632Z", "start_time": "2018-11-06T10:11:37.498695Z" } }, "outputs": [], "source": [ "eval_unique_id_to_feature = {}\n", "for eval_feature in eval_features:\n", " eval_unique_id_to_feature[eval_feature.unique_id] = eval_feature" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:37.558325Z", "start_time": "2018-11-06T10:11:37.527972Z" } }, "outputs": [], "source": [ "def input_fn_builder(features, seq_length, drop_remainder):\n", " \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n", "\n", " all_unique_ids = []\n", " all_input_ids = []\n", " all_input_mask = []\n", " all_segment_ids = []\n", " all_start_positions = []\n", " all_end_positions = []\n", "\n", " for feature in features:\n", " all_unique_ids.append(feature.unique_id)\n", " all_input_ids.append(feature.input_ids)\n", " all_input_mask.append(feature.input_mask)\n", " all_segment_ids.append(feature.segment_ids)\n", " all_start_positions.append(feature.start_position)\n", " all_end_positions.append(feature.end_position)\n", "\n", " def input_fn(params):\n", " \"\"\"The actual input function.\"\"\"\n", " batch_size = params[\"batch_size\"]\n", "\n", " num_examples = len(features)\n", "\n", " # This is for demo purposes and does NOT scale to large data sets. We do\n", " # not use Dataset.from_generator() because that uses tf.py_func which is\n", " # not TPU compatible. The right way to load data is with TFRecordReader.\n", " feature_map = {\n", " \"unique_ids\":\n", " tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),\n", " \"input_ids\":\n", " tf.constant(\n", " all_input_ids, shape=[num_examples, seq_length],\n", " dtype=tf.int32),\n", " \"input_mask\":\n", " tf.constant(\n", " all_input_mask,\n", " shape=[num_examples, seq_length],\n", " dtype=tf.int32),\n", " \"segment_ids\":\n", " tf.constant(\n", " all_segment_ids,\n", " shape=[num_examples, seq_length],\n", " dtype=tf.int32),\n", " \"start_positions\":\n", " tf.constant(\n", " all_start_positions,\n", " shape=[num_examples],\n", " dtype=tf.int32),\n", " \"end_positions\":\n", " tf.constant(\n", " all_end_positions,\n", " shape=[num_examples],\n", " dtype=tf.int32),\n", " }\n", "\n", " d = tf.data.Dataset.from_tensor_slices(feature_map)\n", " d = d.repeat()\n", " d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n", " return d\n", "\n", " return input_fn" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:37.601666Z", "start_time": "2018-11-06T10:11:37.560082Z" } }, "outputs": [], "source": [ "def model_fn_builder(bert_config, init_checkpoint, learning_rate,\n", " num_train_steps, num_warmup_steps, use_tpu,\n", " use_one_hot_embeddings):\n", " \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n", "\n", " def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n", " \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n", "\n", " tf.logging.info(\"*** Features ***\")\n", " for name in sorted(features.keys()):\n", " tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n", "\n", " unique_ids = features[\"unique_ids\"]\n", " input_ids = features[\"input_ids\"]\n", " input_mask = features[\"input_mask\"]\n", " segment_ids = features[\"segment_ids\"]\n", "\n", " is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n", "\n", " (start_logits, end_logits) = create_model(\n", " bert_config=bert_config,\n", " is_training=is_training,\n", " input_ids=input_ids,\n", " input_mask=input_mask,\n", " segment_ids=segment_ids,\n", " use_one_hot_embeddings=use_one_hot_embeddings)\n", "\n", " tvars = tf.trainable_variables()\n", "\n", " initialized_variable_names = {}\n", " scaffold_fn = None\n", " if init_checkpoint:\n", " (assignment_map,\n", " initialized_variable_names) = modeling_tensorflow.get_assigment_map_from_checkpoint(\n", " tvars, init_checkpoint)\n", " if use_tpu:\n", "\n", " def tpu_scaffold():\n", " tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n", " return tf.train.Scaffold()\n", "\n", " scaffold_fn = tpu_scaffold\n", " else:\n", " tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n", "\n", " tf.logging.info(\"**** Trainable Variables ****\")\n", " for var in tvars:\n", " init_string = \"\"\n", " if var.name in initialized_variable_names:\n", " init_string = \", *INIT_FROM_CKPT*\"\n", " tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n", " init_string)\n", "\n", " output_spec = None\n", " if mode == tf.estimator.ModeKeys.TRAIN:\n", " seq_length = modeling_tensorflow.get_shape_list(input_ids)[1]\n", "\n", " def compute_loss(logits, positions):\n", " one_hot_positions = tf.one_hot(\n", " positions, depth=seq_length, dtype=tf.float32)\n", " log_probs = tf.nn.log_softmax(logits, axis=-1)\n", " loss = -tf.reduce_mean(\n", " tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n", " return loss\n", "\n", " start_positions = features[\"start_positions\"]\n", " end_positions = features[\"end_positions\"]\n", "\n", " start_loss = compute_loss(start_logits, start_positions)\n", " end_loss = compute_loss(end_logits, end_positions)\n", "\n", " total_loss = (start_loss + end_loss) / 2.0\n", "\n", " train_op = optimization.create_optimizer(\n", " total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n", "\n", " output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n", " mode=mode,\n", " loss=total_loss,\n", " train_op=train_op,\n", " scaffold_fn=scaffold_fn)\n", " elif mode == tf.estimator.ModeKeys.PREDICT:\n", " batch_size = modeling_tensorflow.get_shape_list(start_logits)[0]\n", " seq_length = modeling_tensorflow.get_shape_list(input_ids)[1]\n", "\n", " def compute_loss(logits, positions):\n", " one_hot_positions = tf.one_hot(\n", " positions, depth=seq_length, dtype=tf.float32)\n", " log_probs = tf.nn.log_softmax(logits, axis=-1)\n", " loss = -tf.reduce_mean(\n", " tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n", " return loss\n", "\n", " start_positions = features[\"start_positions\"]\n", " end_positions = features[\"end_positions\"]\n", "\n", " start_loss = compute_loss(start_logits, start_positions)\n", " end_loss = compute_loss(end_logits, end_positions)\n", "\n", " total_loss = (start_loss + end_loss) / 2.0\n", "\n", " predictions = {\n", " \"unique_ids\": unique_ids,\n", " \"start_logits\": start_logits,\n", " \"end_logits\": end_logits,\n", " \"total_loss\": tf.reshape(total_loss, [batch_size, 1]),\n", " \"start_loss\": tf.reshape(start_loss, [batch_size, 1]),\n", " \"end_loss\": tf.reshape(end_loss, [batch_size, 1]),\n", " }\n", " output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n", " mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n", " else:\n", " raise ValueError(\n", " \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n", "\n", " return output_spec\n", "\n", " return model_fn" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:41.104542Z", "start_time": "2018-11-06T10:11:37.603474Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Estimator's model_fn (.model_fn at 0x120df3f28>) includes params argument, but params are not passed to Estimator.\n", "INFO:tensorflow:Using config: {'_model_dir': '/tmp/squad_base/', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': 1000, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true\n", "graph_options {\n", " rewrite_options {\n", " meta_optimizer_iterations: ONE\n", " }\n", "}\n", ", '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': None, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': , '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1, '_tpu_config': TPUConfig(iterations_per_loop=1000, num_shards=8, num_cores_per_replica=None, per_host_input_for_training=3, tpu_job_name=None, initial_infeed_sleep_secs=None, input_partition_dims=None), '_cluster': None}\n", "INFO:tensorflow:_TPUContext: eval_on_tpu True\n", "WARNING:tensorflow:eval_on_tpu ignored because use_tpu is False.\n" ] } ], "source": [ "is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n", "run_config = tf.contrib.tpu.RunConfig(\n", " cluster=None,\n", " master=None,\n", " model_dir=output_dir,\n", " save_checkpoints_steps=1000,\n", " tpu_config=tf.contrib.tpu.TPUConfig(\n", " iterations_per_loop=1000,\n", " num_shards=8,\n", " per_host_input_for_training=is_per_host))\n", "\n", "model_fn = model_fn_builder(\n", " bert_config=bert_config,\n", " init_checkpoint=init_checkpoint,\n", " learning_rate=learning_rate,\n", " num_train_steps=None,\n", " num_warmup_steps=None,\n", " use_tpu=False,\n", " use_one_hot_embeddings=False)\n", "\n", "estimator = tf.contrib.tpu.TPUEstimator(\n", " use_tpu=False,\n", " model_fn=model_fn,\n", " config=run_config,\n", " train_batch_size=12,\n", " predict_batch_size=1)\n", "\n", "predict_input_fn = input_fn_builder(\n", " features=eval_features,\n", " seq_length=max_seq_length,\n", " drop_remainder=True)" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:47.857601Z", "start_time": "2018-11-06T10:11:41.106219Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:Could not find trained model in model_dir: /tmp/squad_base/, running initialization to predict.\n", "INFO:tensorflow:Calling model_fn.\n", "INFO:tensorflow:Running infer on CPU\n", "INFO:tensorflow:*** Features ***\n", "INFO:tensorflow: name = end_positions, shape = (1,)\n", "INFO:tensorflow: name = input_ids, shape = (1, 384)\n", "INFO:tensorflow: name = input_mask, shape = (1, 384)\n", "INFO:tensorflow: name = segment_ids, shape = (1, 384)\n", "INFO:tensorflow: name = start_positions, shape = (1,)\n", "INFO:tensorflow: name = unique_ids, shape = (1,)\n", "INFO:tensorflow:**** Trainable Variables ****\n", "INFO:tensorflow: name = bert/embeddings/word_embeddings:0, shape = (30522, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/embeddings/token_type_embeddings:0, shape = (2, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/embeddings/position_embeddings:0, shape = (512, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/embeddings/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/embeddings/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_0/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_0/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_0/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_0/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_0/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_0/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_0/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_0/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_0/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_0/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_0/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_0/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_0/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_0/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_0/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_0/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_1/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_1/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_1/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_1/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_1/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_1/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_1/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_1/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_1/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_1/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_1/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_1/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_1/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_1/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_1/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_1/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_2/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_2/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_2/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_2/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_2/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_2/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_2/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_2/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_2/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_2/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_2/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_2/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_2/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_2/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_2/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_2/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_3/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_3/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_3/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_3/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_3/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_3/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_3/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_3/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_3/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_3/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_3/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_3/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_3/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_3/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_3/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_3/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_4/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_4/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_4/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_4/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_4/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_4/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_4/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_4/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_4/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_4/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_4/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_4/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_4/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_4/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_4/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_4/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_5/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_5/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_5/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_5/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_5/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_5/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_5/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_5/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_5/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_5/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_5/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_5/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_5/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_5/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_5/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_5/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_6/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_6/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_6/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_6/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_6/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_6/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_6/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_6/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_6/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_6/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_6/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_6/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_6/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_6/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_6/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_6/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_7/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_7/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_7/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_7/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_7/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_7/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_7/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_7/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_7/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_7/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_7/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_7/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_7/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_7/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_7/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_7/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_8/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_8/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_8/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_8/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_8/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_8/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_8/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_8/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_8/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_8/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_8/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_8/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow: name = bert/encoder/layer_8/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_8/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_8/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_8/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_9/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_9/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_9/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_9/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_9/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_9/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_9/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_9/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_9/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_9/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_9/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_9/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_9/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_9/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_9/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_9/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_10/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_10/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_10/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_10/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_10/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_10/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_10/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_10/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_10/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_10/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_10/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_10/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_10/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_10/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_10/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_10/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_11/attention/self/query/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_11/attention/self/query/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_11/attention/self/key/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_11/attention/self/key/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_11/attention/self/value/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_11/attention/self/value/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_11/attention/output/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_11/attention/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_11/attention/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_11/attention/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_11/intermediate/dense/kernel:0, shape = (768, 3072), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_11/intermediate/dense/bias:0, shape = (3072,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_11/output/dense/kernel:0, shape = (3072, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_11/output/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_11/output/LayerNorm/beta:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/encoder/layer_11/output/LayerNorm/gamma:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/pooler/dense/kernel:0, shape = (768, 768), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = bert/pooler/dense/bias:0, shape = (768,), *INIT_FROM_CKPT*\n", "INFO:tensorflow: name = cls/squad/output_weights:0, shape = (2, 768)\n", "INFO:tensorflow: name = cls/squad/output_bias:0, shape = (2,)\n", "INFO:tensorflow:Done calling model_fn.\n", "INFO:tensorflow:Graph was finalized.\n", "INFO:tensorflow:Running local_init_op.\n", "INFO:tensorflow:Done running local_init_op.\n", "INFO:tensorflow:prediction_loop marked as finished\n" ] } ], "source": [ "tensorflow_all_out = []\n", "tensorflow_all_results = []\n", "for result in estimator.predict(predict_input_fn, yield_single_examples=True):\n", " unique_id = int(result[\"unique_ids\"])\n", " eval_feature = eval_unique_id_to_feature[unique_id]\n", " start_logits = result[\"start_logits\"]\n", " end_logits = result[\"end_logits\"]\n", " total_loss = result[\"total_loss\"]\n", " start_loss = result[\"start_loss\"]\n", " end_loss = result[\"end_loss\"]\n", "\n", " output_json = collections.OrderedDict()\n", " output_json[\"linex_index\"] = unique_id\n", " output_json[\"tokens\"] = [token for (i, token) in enumerate(eval_feature.tokens)]\n", " output_json[\"start_logits\"] = [round(float(x), 6) for x in start_logits.flat]\n", " output_json[\"end_logits\"] = [round(float(x), 6) for x in end_logits.flat]\n", " output_json[\"total_loss\"] = [round(float(x), 6) for x in total_loss.flat]\n", " output_json[\"start_loss\"] = [round(float(x), 6) for x in start_loss.flat]\n", " output_json[\"end_loss\"] = [round(float(x), 6) for x in end_loss.flat]\n", " tensorflow_all_out.append(output_json)\n", " tensorflow_all_results.append(RawResult(\n", " unique_id=unique_id,\n", " start_logits=start_logits,\n", " end_logits=end_logits))\n", " break" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:47.912836Z", "start_time": "2018-11-06T10:11:47.859679Z" }, "code_folding": [] }, "outputs": [], "source": [ "def _get_best_indexes(logits, n_best_size):\n", " \"\"\"Get the n-best logits from a list.\"\"\"\n", " index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n", "\n", " best_indexes = []\n", " for i in range(len(index_and_score)):\n", " if i >= n_best_size:\n", " break\n", " best_indexes.append(index_and_score[i][0])\n", " return best_indexes\n", "\n", "def _compute_softmax(scores):\n", " \"\"\"Compute softmax probability over raw logits.\"\"\"\n", " if not scores:\n", " return []\n", "\n", " max_score = None\n", " for score in scores:\n", " if max_score is None or score > max_score:\n", " max_score = score\n", "\n", " exp_scores = []\n", " total_sum = 0.0\n", " for score in scores:\n", " x = math.exp(score - max_score)\n", " exp_scores.append(x)\n", " total_sum += x\n", "\n", " probs = []\n", " for score in exp_scores:\n", " probs.append(score / total_sum)\n", " return probs\n", "\n", "\n", "def compute_predictions(all_examples, all_features, all_results, n_best_size,\n", " max_answer_length, do_lower_case):\n", " \"\"\"Compute final predictions.\"\"\"\n", " example_index_to_features = collections.defaultdict(list)\n", " for feature in all_features:\n", " example_index_to_features[feature.example_index].append(feature)\n", "\n", " unique_id_to_result = {}\n", " for result in all_results:\n", " unique_id_to_result[result.unique_id] = result\n", "\n", " _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n", " \"PrelimPrediction\",\n", " [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n", "\n", " all_predictions = collections.OrderedDict()\n", " all_nbest_json = collections.OrderedDict()\n", " for (example_index, example) in enumerate(all_examples):\n", " features = example_index_to_features[example_index]\n", "\n", " prelim_predictions = []\n", " for (feature_index, feature) in enumerate(features):\n", " result = unique_id_to_result[feature.unique_id]\n", "\n", " start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n", " end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n", " for start_index in start_indexes:\n", " for end_index in end_indexes:\n", " # We could hypothetically create invalid predictions, e.g., predict\n", " # that the start of the span is in the question. We throw out all\n", " # invalid predictions.\n", " if start_index >= len(feature.tokens):\n", " continue\n", " if end_index >= len(feature.tokens):\n", " continue\n", " if start_index not in feature.token_to_orig_map:\n", " continue\n", " if end_index not in feature.token_to_orig_map:\n", " continue\n", " if not feature.token_is_max_context.get(start_index, False):\n", " continue\n", " if end_index < start_index:\n", " continue\n", " length = end_index - start_index + 1\n", " if length > max_answer_length:\n", " continue\n", " prelim_predictions.append(\n", " _PrelimPrediction(\n", " feature_index=feature_index,\n", " start_index=start_index,\n", " end_index=end_index,\n", " start_logit=result.start_logits[start_index],\n", " end_logit=result.end_logits[end_index]))\n", "\n", " prelim_predictions = sorted(\n", " prelim_predictions,\n", " key=lambda x: (x.start_logit + x.end_logit),\n", " reverse=True)\n", "\n", " _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n", " \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"])\n", "\n", " seen_predictions = {}\n", " nbest = []\n", " for pred in prelim_predictions:\n", " if len(nbest) >= n_best_size:\n", " break\n", " feature = features[pred.feature_index]\n", "\n", " tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]\n", " orig_doc_start = feature.token_to_orig_map[pred.start_index]\n", " orig_doc_end = feature.token_to_orig_map[pred.end_index]\n", " orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]\n", " tok_text = \" \".join(tok_tokens)\n", "\n", " # De-tokenize WordPieces that have been split off.\n", " tok_text = tok_text.replace(\" ##\", \"\")\n", " tok_text = tok_text.replace(\"##\", \"\")\n", "\n", " # Clean whitespace\n", " tok_text = tok_text.strip()\n", " tok_text = \" \".join(tok_text.split())\n", " orig_text = \" \".join(orig_tokens)\n", "\n", " final_text = get_final_text(tok_text, orig_text, do_lower_case)\n", " if final_text in seen_predictions:\n", " continue\n", "\n", " seen_predictions[final_text] = True\n", " nbest.append(\n", " _NbestPrediction(\n", " text=final_text,\n", " start_logit=pred.start_logit,\n", " end_logit=pred.end_logit))\n", "\n", " # In very rare edge cases we could have no valid predictions. So we\n", " # just create a nonce prediction in this case to avoid failure.\n", " if not nbest:\n", " nbest.append(\n", " _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n", "\n", " assert len(nbest) >= 1\n", "\n", " total_scores = []\n", " for entry in nbest:\n", " total_scores.append(entry.start_logit + entry.end_logit)\n", "\n", " probs = _compute_softmax(total_scores)\n", "\n", " nbest_json = []\n", " for (i, entry) in enumerate(nbest):\n", " output = collections.OrderedDict()\n", " output[\"text\"] = entry.text\n", " output[\"probability\"] = probs[i]\n", " output[\"start_logit\"] = entry.start_logit\n", " output[\"end_logit\"] = entry.end_logit\n", " nbest_json.append(output)\n", "\n", " assert len(nbest_json) >= 1\n", "\n", " all_predictions[example.qas_id] = nbest_json[0][\"text\"]\n", " all_nbest_json[example.qas_id] = nbest_json\n", "\n", " return all_predictions, all_nbest_json" ] }, { "cell_type": "code", "execution_count": 11, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:47.953205Z", "start_time": "2018-11-06T10:11:47.914751Z" } }, "outputs": [], "source": [ "all_predictions, all_nbest_json = compute_predictions(eval_examples[:1], eval_features[:1], tensorflow_all_results, 20, max_answer_length, True)" ] }, { "cell_type": "code", "execution_count": 12, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:47.994647Z", "start_time": "2018-11-06T10:11:47.955015Z" } }, "outputs": [ { "data": { "text/plain": [ "OrderedDict([('5733be284776f41900661182',\n", " [OrderedDict([('text', 'empty'),\n", " ('probability', 1.0),\n", " ('start_logit', 0.0),\n", " ('end_logit', 0.0)])])])" ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "all_nbest_json" ] }, { "cell_type": "code", "execution_count": 13, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:48.028473Z", "start_time": "2018-11-06T10:11:47.996311Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1\n", "7\n", "odict_keys(['linex_index', 'tokens', 'start_logits', 'end_logits', 'total_loss', 'start_loss', 'end_loss'])\n", "number of tokens 176\n", "number of start_logits 384\n", "shape of end_logits 384\n" ] } ], "source": [ "print(len(tensorflow_all_out))\n", "print(len(tensorflow_all_out[0]))\n", "print(tensorflow_all_out[0].keys())\n", "print(\"number of tokens\", len(tensorflow_all_out[0]['tokens']))\n", "print(\"number of start_logits\", len(tensorflow_all_out[0]['start_logits']))\n", "print(\"shape of end_logits\", len(tensorflow_all_out[0]['end_logits']))" ] }, { "cell_type": "code", "execution_count": 14, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:48.060658Z", "start_time": "2018-11-06T10:11:48.030289Z" } }, "outputs": [], "source": [ "tensorflow_outputs = [tensorflow_all_out[0]['start_logits'], tensorflow_all_out[0]['end_logits'],\n", " tensorflow_all_out[0]['total_loss'], tensorflow_all_out[0]['start_loss'],\n", " tensorflow_all_out[0]['end_loss']]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 2/ PyTorch code" ] }, { "cell_type": "code", "execution_count": 15, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:48.478814Z", "start_time": "2018-11-06T10:11:48.062585Z" } }, "outputs": [], "source": [ "import modeling\n", "from run_squad import *" ] }, { "cell_type": "code", "execution_count": 16, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:48.512607Z", "start_time": "2018-11-06T10:11:48.480729Z" } }, "outputs": [], "source": [ "init_checkpoint_pt = \"../google_models/uncased_L-12_H-768_A-12/pytorch_model.bin\"" ] }, { "cell_type": "code", "execution_count": 17, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:51.023405Z", "start_time": "2018-11-06T10:11:48.514306Z" }, "scrolled": true }, "outputs": [ { "data": { "text/plain": [ "tensor([0., 0.])" ] }, "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ "device = torch.device(\"cpu\")\n", "model = modeling.BertForQuestionAnswering(bert_config)\n", "model.bert.load_state_dict(torch.load(init_checkpoint_pt, map_location='cpu'))\n", "model.to(device)\n", "model.qa_outputs.weight.data.fill_(1.0)\n", "model.qa_outputs.bias.data.zero_()" ] }, { "cell_type": "code", "execution_count": 18, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:51.079364Z", "start_time": "2018-11-06T10:11:51.028228Z" }, "code_folding": [] }, "outputs": [], "source": [ "all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n", "all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)\n", "all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)\n", "all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)\n", "all_start_positions = torch.tensor([[f.start_position] for f in eval_features], dtype=torch.long)\n", "all_end_positions = torch.tensor([[f.end_position] for f in eval_features], dtype=torch.long)\n", "\n", "eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,\n", " all_start_positions, all_end_positions, all_example_index)\n", "eval_sampler = SequentialSampler(eval_data)\n", "eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1)\n", "\n", "model.eval()\n", "None" ] }, { "cell_type": "code", "execution_count": 19, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:51.114686Z", "start_time": "2018-11-06T10:11:51.081474Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[torch.Size([1, 384]), torch.Size([1, 384]), torch.Size([1, 384]), torch.Size([1, 1]), torch.Size([1, 1]), torch.Size([1])]\n" ] }, { "data": { "text/plain": [ "torch.Size([1, 1])" ] }, "execution_count": 19, "metadata": {}, "output_type": "execute_result" } ], "source": [ "batch = iter(eval_dataloader).next()\n", "input_ids, input_mask, segment_ids, start_positions, end_positions, example_index = batch\n", "print([t.shape for t in batch])\n", "start_positions.size()" ] }, { "cell_type": "code", "execution_count": 20, "metadata": { "ExecuteTime": { "end_time": "2018-11-06T10:11:52.298367Z", "start_time": "2018-11-06T10:11:51.116219Z" } }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Evaluating: 0%| | 0/270 [00:00\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mspec\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mimportlib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutil\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mspec_from_file_location\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'*'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moriginal_tf_inplem_dir\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m'/extract_features_tensorflow.py'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mmodule\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mimportlib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutil\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodule_from_spec\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mspec\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m \u001b[0mspec\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloader\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexec_module\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodule\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 7\u001b[0m \u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodules\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'extract_features_tensorflow'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodule\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/miniconda3/envs/bert/lib/python3.6/importlib/_bootstrap_external.py\u001b[0m in \u001b[0;36mexec_module\u001b[0;34m(self, module)\u001b[0m\n", "\u001b[0;32m~/miniconda3/envs/bert/lib/python3.6/importlib/_bootstrap.py\u001b[0m in \u001b[0;36m_call_with_frames_removed\u001b[0;34m(f, *args, **kwds)\u001b[0m\n", "\u001b[0;32m~/Documents/Thomas/Code/HF/BERT/pytorch-pretrained-BERT/tensorflow_code/extract_features_tensorflow.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[0mFLAGS\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mflags\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mFLAGS\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 33\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 34\u001b[0;31m \u001b[0mflags\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDEFINE_string\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"input_file\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 35\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[0mflags\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDEFINE_string\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"output_file\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/miniconda3/envs/bert/lib/python3.6/site-packages/tensorflow/python/platform/flags.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 56\u001b[0m \u001b[0;34m'Use of the keyword argument names (flag_name, default_value, '\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 57\u001b[0m 'docstring) is deprecated, please use (name, default, help) instead.')\n\u001b[0;32m---> 58\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0moriginal_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 59\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 60\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mtf_decorator\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmake_decorator\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moriginal_function\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwrapper\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/miniconda3/envs/bert/lib/python3.6/site-packages/absl/flags/_defines.py\u001b[0m in \u001b[0;36mDEFINE_string\u001b[0;34m(name, default, help, flag_values, **args)\u001b[0m\n\u001b[1;32m 239\u001b[0m \u001b[0mparser\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_argument_parser\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mArgumentParser\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 240\u001b[0m \u001b[0mserializer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_argument_parser\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mArgumentSerializer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 241\u001b[0;31m \u001b[0mDEFINE\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mparser\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdefault\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhelp\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mflag_values\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mserializer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 242\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 243\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/miniconda3/envs/bert/lib/python3.6/site-packages/absl/flags/_defines.py\u001b[0m in \u001b[0;36mDEFINE\u001b[0;34m(parser, name, default, help, flag_values, serializer, module_name, **args)\u001b[0m\n\u001b[1;32m 80\u001b[0m \"\"\"\n\u001b[1;32m 81\u001b[0m DEFINE_flag(_flag.Flag(parser, serializer, name, default, help, **args),\n\u001b[0;32m---> 82\u001b[0;31m flag_values, module_name)\n\u001b[0m\u001b[1;32m 83\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 84\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/miniconda3/envs/bert/lib/python3.6/site-packages/absl/flags/_defines.py\u001b[0m in \u001b[0;36mDEFINE_flag\u001b[0;34m(flag, flag_values, module_name)\u001b[0m\n\u001b[1;32m 102\u001b[0m \u001b[0;31m# Copying the reference to flag_values prevents pychecker warnings.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 103\u001b[0m \u001b[0mfv\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mflag_values\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 104\u001b[0;31m \u001b[0mfv\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mflag\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mflag\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 105\u001b[0m \u001b[0;31m# Tell flag_values who's defining the flag.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 106\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mmodule_name\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/miniconda3/envs/bert/lib/python3.6/site-packages/absl/flags/_flagvalues.py\u001b[0m in \u001b[0;36m__setitem__\u001b[0;34m(self, name, flag)\u001b[0m\n\u001b[1;32m 427\u001b[0m \u001b[0;31m# module is simply being imported a subsequent time.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 428\u001b[0m \u001b[0;32mreturn\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 429\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0m_exceptions\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDuplicateFlagError\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfrom_flag\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 430\u001b[0m \u001b[0mshort_name\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mflag\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshort_name\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 431\u001b[0m \u001b[0;31m# If a new flag overrides an old one, we need to cleanup the old flag's\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mDuplicateFlagError\u001b[0m: The flag 'input_file' is defined twice. First from *, Second from *. Description from first occurrence: (no help available)" ] } ], "source": [ "import importlib.util\n", "import sys\n", "\n", "spec = importlib.util.spec_from_file_location('*', original_tf_inplem_dir + '/extract_features_tensorflow.py')\n", "module = importlib.util.module_from_spec(spec)\n", "spec.loader.exec_module(module)\n", "sys.modules['extract_features_tensorflow'] = module\n", "\n", "from extract_features_tensorflow import *" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "ExecuteTime": { "end_time": "2018-11-15T14:58:05.650987Z", "start_time": "2018-11-15T14:58:05.541620Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:*** Example ***\n", "INFO:tensorflow:unique_id: 0\n", "INFO:tensorflow:tokens: [CLS] who was jim henson ? [SEP] jim henson was a puppet ##eer [SEP]\n", "INFO:tensorflow:input_ids: 101 2040 2001 3958 27227 1029 102 3958 27227 2001 1037 13997 11510 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", "INFO:tensorflow:input_type_ids: 0 0 0 0 0 0 0 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n" ] } ], "source": [ "layer_indexes = list(range(12))\n", "bert_config = modeling.BertConfig.from_json_file(bert_config_file)\n", "tokenizer = tokenization.FullTokenizer(\n", " vocab_file=vocab_file, do_lower_case=True)\n", "examples = read_examples(input_file)\n", "\n", "features = convert_examples_to_features(\n", " examples=examples, seq_length=max_seq_length, tokenizer=tokenizer)\n", "unique_id_to_feature = {}\n", "for feature in features:\n", " unique_id_to_feature[feature.unique_id] = feature" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "ExecuteTime": { "end_time": "2018-11-15T14:58:11.562443Z", "start_time": "2018-11-15T14:58:08.036485Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Estimator's model_fn (.model_fn at 0x11ea7f1e0>) includes params argument, but params are not passed to Estimator.\n", "WARNING:tensorflow:Using temporary folder as model directory: /var/folders/yx/cw8n_njx3js5jksyw_qlp8p00000gn/T/tmphs4_nsq9\n", "INFO:tensorflow:Using config: {'_model_dir': '/var/folders/yx/cw8n_njx3js5jksyw_qlp8p00000gn/T/tmphs4_nsq9', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': allow_soft_placement: true\n", "graph_options {\n", " rewrite_options {\n", " meta_optimizer_iterations: ONE\n", " }\n", "}\n", ", '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': None, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': , '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1, '_tpu_config': TPUConfig(iterations_per_loop=2, num_shards=1, num_cores_per_replica=None, per_host_input_for_training=3, tpu_job_name=None, initial_infeed_sleep_secs=None, input_partition_dims=None), '_cluster': None}\n", "WARNING:tensorflow:Setting TPUConfig.num_shards==1 is an unsupported behavior. Please fix as soon as possible (leaving num_shards as None.\n", "INFO:tensorflow:_TPUContext: eval_on_tpu True\n", "WARNING:tensorflow:eval_on_tpu ignored because use_tpu is False.\n" ] } ], "source": [ "is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n", "run_config = tf.contrib.tpu.RunConfig(\n", " master=None,\n", " tpu_config=tf.contrib.tpu.TPUConfig(\n", " num_shards=1,\n", " per_host_input_for_training=is_per_host))\n", "\n", "model_fn = model_fn_builder(\n", " bert_config=bert_config,\n", " init_checkpoint=init_checkpoint,\n", " layer_indexes=layer_indexes,\n", " use_tpu=False,\n", " use_one_hot_embeddings=False)\n", "\n", "# If TPU is not available, this will fall back to normal Estimator on CPU\n", "# or GPU.\n", "estimator = tf.contrib.tpu.TPUEstimator(\n", " use_tpu=False,\n", " model_fn=model_fn,\n", " config=run_config,\n", " predict_batch_size=1)\n", "\n", "input_fn = input_fn_builder(\n", " features=features, seq_length=max_seq_length)" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "ExecuteTime": { "end_time": "2018-11-15T14:58:21.736543Z", "start_time": "2018-11-15T14:58:16.723829Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:Could not find trained model in model_dir: /var/folders/yx/cw8n_njx3js5jksyw_qlp8p00000gn/T/tmphs4_nsq9, running initialization to predict.\n", "INFO:tensorflow:Calling model_fn.\n", "INFO:tensorflow:Running infer on CPU\n", "INFO:tensorflow:Done calling model_fn.\n", "INFO:tensorflow:Graph was finalized.\n", "INFO:tensorflow:Running local_init_op.\n", "INFO:tensorflow:Done running local_init_op.\n", "extracting layer 0\n", "extracting layer 1\n", "extracting layer 2\n", "extracting layer 3\n", "extracting layer 4\n", "extracting layer 5\n", "extracting layer 6\n", "extracting layer 7\n", "extracting layer 8\n", "extracting layer 9\n", "extracting layer 10\n", "extracting layer 11\n", "INFO:tensorflow:prediction_loop marked as finished\n", "INFO:tensorflow:prediction_loop marked as finished\n" ] } ], "source": [ "tensorflow_all_out = []\n", "for result in estimator.predict(input_fn, yield_single_examples=True):\n", " unique_id = int(result[\"unique_id\"])\n", " feature = unique_id_to_feature[unique_id]\n", " output_json = collections.OrderedDict()\n", " output_json[\"linex_index\"] = unique_id\n", " tensorflow_all_out_features = []\n", " # for (i, token) in enumerate(feature.tokens):\n", " all_layers = []\n", " for (j, layer_index) in enumerate(layer_indexes):\n", " print(\"extracting layer {}\".format(j))\n", " layer_output = result[\"layer_output_%d\" % j]\n", " layers = collections.OrderedDict()\n", " layers[\"index\"] = layer_index\n", " layers[\"values\"] = layer_output\n", " all_layers.append(layers)\n", " tensorflow_out_features = collections.OrderedDict()\n", " tensorflow_out_features[\"layers\"] = all_layers\n", " tensorflow_all_out_features.append(tensorflow_out_features)\n", "\n", " output_json[\"features\"] = tensorflow_all_out_features\n", " tensorflow_all_out.append(output_json)" ] }, { "cell_type": "code", "execution_count": 11, "metadata": { "ExecuteTime": { "end_time": "2018-11-15T14:58:23.970714Z", "start_time": "2018-11-15T14:58:23.931930Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1\n", "2\n", "odict_keys(['linex_index', 'features'])\n", "number of tokens 1\n", "number of layers 12\n" ] }, { "data": { "text/plain": [ "(128, 768)" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "print(len(tensorflow_all_out))\n", "print(len(tensorflow_all_out[0]))\n", "print(tensorflow_all_out[0].keys())\n", "print(\"number of tokens\", len(tensorflow_all_out[0]['features']))\n", "print(\"number of layers\", len(tensorflow_all_out[0]['features'][0]['layers']))\n", "tensorflow_all_out[0]['features'][0]['layers'][0]['values'].shape" ] }, { "cell_type": "code", "execution_count": 12, "metadata": { "ExecuteTime": { "end_time": "2018-11-15T14:58:25.547012Z", "start_time": "2018-11-15T14:58:25.516076Z" } }, "outputs": [], "source": [ "tensorflow_outputs = list(tensorflow_all_out[0]['features'][0]['layers'][t]['values'] for t in layer_indexes)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 2/ PyTorch code" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "os.chdir('./examples')" ] }, { "cell_type": "code", "execution_count": 17, "metadata": { "ExecuteTime": { "end_time": "2018-11-15T15:03:49.528679Z", "start_time": "2018-11-15T15:03:49.497697Z" } }, "outputs": [], "source": [ "import extract_features\n", "import pytorch_pretrained_bert as ppb\n", "from extract_features import *" ] }, { "cell_type": "code", "execution_count": 25, "metadata": { "ExecuteTime": { "end_time": "2018-11-15T15:21:18.001177Z", "start_time": "2018-11-15T15:21:17.970369Z" } }, "outputs": [], "source": [ "init_checkpoint_pt = \"../../google_models/uncased_L-12_H-768_A-12/\"" ] }, { "cell_type": "code", "execution_count": 26, "metadata": { "ExecuteTime": { "end_time": "2018-11-15T15:21:20.893669Z", "start_time": "2018-11-15T15:21:18.786623Z" }, "scrolled": true }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "11/15/2018 16:21:18 - INFO - pytorch_pretrained_bert.modeling - loading archive file ../../google_models/uncased_L-12_H-768_A-12/\n", "11/15/2018 16:21:18 - INFO - pytorch_pretrained_bert.modeling - Model config {\n", " \"attention_probs_dropout_prob\": 0.1,\n", " \"hidden_act\": \"gelu\",\n", " \"hidden_dropout_prob\": 0.1,\n", " \"hidden_size\": 768,\n", " \"initializer_range\": 0.02,\n", " \"intermediate_size\": 3072,\n", " \"max_position_embeddings\": 512,\n", " \"num_attention_heads\": 12,\n", " \"num_hidden_layers\": 12,\n", " \"type_vocab_size\": 2,\n", " \"vocab_size\": 30522\n", "}\n", "\n" ] }, { "data": { "text/plain": [ "BertModel(\n", " (embeddings): BertEmbeddings(\n", " (word_embeddings): Embedding(30522, 768)\n", " (position_embeddings): Embedding(512, 768)\n", " (token_type_embeddings): Embedding(2, 768)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (encoder): BertEncoder(\n", " (layer): ModuleList(\n", " (0): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (1): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (2): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (3): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (4): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (5): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (6): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (7): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (8): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (9): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (10): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (11): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " )\n", " )\n", " (pooler): BertPooler(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (activation): Tanh()\n", " )\n", ")" ] }, "execution_count": 26, "metadata": {}, "output_type": "execute_result" } ], "source": [ "device = torch.device(\"cpu\")\n", "model = ppb.BertModel.from_pretrained(init_checkpoint_pt)\n", "model.to(device)" ] }, { "cell_type": "code", "execution_count": 27, "metadata": { "ExecuteTime": { "end_time": "2018-11-15T15:21:26.963427Z", "start_time": "2018-11-15T15:21:26.922494Z" }, "code_folding": [] }, "outputs": [ { "data": { "text/plain": [ "BertModel(\n", " (embeddings): BertEmbeddings(\n", " (word_embeddings): Embedding(30522, 768)\n", " (position_embeddings): Embedding(512, 768)\n", " (token_type_embeddings): Embedding(2, 768)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (encoder): BertEncoder(\n", " (layer): ModuleList(\n", " (0): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (1): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (2): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (3): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (4): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (5): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (6): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (7): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (8): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (9): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (10): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (11): BertLayer(\n", " (attention): BertAttention(\n", " (self): BertSelfAttention(\n", " (query): Linear(in_features=768, out_features=768, bias=True)\n", " (key): Linear(in_features=768, out_features=768, bias=True)\n", " (value): Linear(in_features=768, out_features=768, bias=True)\n", " (dropout): Dropout(p=0.1)\n", " )\n", " (output): BertSelfOutput(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " (intermediate): BertIntermediate(\n", " (dense): Linear(in_features=768, out_features=3072, bias=True)\n", " )\n", " (output): BertOutput(\n", " (dense): Linear(in_features=3072, out_features=768, bias=True)\n", " (LayerNorm): BertLayerNorm()\n", " (dropout): Dropout(p=0.1)\n", " )\n", " )\n", " )\n", " )\n", " (pooler): BertPooler(\n", " (dense): Linear(in_features=768, out_features=768, bias=True)\n", " (activation): Tanh()\n", " )\n", ")" ] }, "execution_count": 27, "metadata": {}, "output_type": "execute_result" } ], "source": [ "all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n", "all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n", "all_input_type_ids = torch.tensor([f.input_type_ids for f in features], dtype=torch.long)\n", "all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)\n", "\n", "eval_data = TensorDataset(all_input_ids, all_input_mask, all_input_type_ids, all_example_index)\n", "eval_sampler = SequentialSampler(eval_data)\n", "eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1)\n", "\n", "model.eval()" ] }, { "cell_type": "code", "execution_count": 28, "metadata": { "ExecuteTime": { "end_time": "2018-11-15T15:21:30.718724Z", "start_time": "2018-11-15T15:21:30.329205Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor([[ 101, 2040, 2001, 3958, 27227, 1029, 102, 3958, 27227, 2001,\n", " 1037, 13997, 11510, 102, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0]])\n", "tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0]])\n", "tensor([0])\n", "layer 0 0\n", "layer 1 1\n", "layer 2 2\n", "layer 3 3\n", "layer 4 4\n", "layer 5 5\n", "layer 6 6\n", "layer 7 7\n", "layer 8 8\n", "layer 9 9\n", "layer 10 10\n", "layer 11 11\n" ] } ], "source": [ "layer_indexes = list(range(12))\n", "\n", "pytorch_all_out = []\n", "for input_ids, input_mask, input_type_ids, example_indices in eval_dataloader:\n", " print(input_ids)\n", " print(input_mask)\n", " print(example_indices)\n", " input_ids = input_ids.to(device)\n", " input_mask = input_mask.to(device)\n", "\n", " all_encoder_layers, _ = model(input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)\n", "\n", " for b, example_index in enumerate(example_indices):\n", " feature = features[example_index.item()]\n", " unique_id = int(feature.unique_id)\n", " # feature = unique_id_to_feature[unique_id]\n", " output_json = collections.OrderedDict()\n", " output_json[\"linex_index\"] = unique_id\n", " all_out_features = []\n", " # for (i, token) in enumerate(feature.tokens):\n", " all_layers = []\n", " for (j, layer_index) in enumerate(layer_indexes):\n", " print(\"layer\", j, layer_index)\n", " layer_output = all_encoder_layers[int(layer_index)].detach().cpu().numpy()\n", " layer_output = layer_output[b]\n", " layers = collections.OrderedDict()\n", " layers[\"index\"] = layer_index\n", " layer_output = layer_output\n", " layers[\"values\"] = layer_output if not isinstance(layer_output, (int, float)) else [layer_output]\n", " all_layers.append(layers)\n", "\n", " out_features = collections.OrderedDict()\n", " out_features[\"layers\"] = all_layers\n", " all_out_features.append(out_features)\n", " output_json[\"features\"] = all_out_features\n", " pytorch_all_out.append(output_json)" ] }, { "cell_type": "code", "execution_count": 29, "metadata": { "ExecuteTime": { "end_time": "2018-11-15T15:21:35.703615Z", "start_time": "2018-11-15T15:21:35.666150Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1\n", "2\n", "odict_keys(['linex_index', 'features'])\n", "number of tokens 1\n", "number of layers 12\n", "hidden_size 128\n" ] }, { "data": { "text/plain": [ "(128, 768)" ] }, "execution_count": 29, "metadata": {}, "output_type": "execute_result" } ], "source": [ "print(len(pytorch_all_out))\n", "print(len(pytorch_all_out[0]))\n", "print(pytorch_all_out[0].keys())\n", "print(\"number of tokens\", len(pytorch_all_out))\n", "print(\"number of layers\", len(pytorch_all_out[0]['features'][0]['layers']))\n", "print(\"hidden_size\", len(pytorch_all_out[0]['features'][0]['layers'][0]['values']))\n", "pytorch_all_out[0]['features'][0]['layers'][0]['values'].shape" ] }, { "cell_type": "code", "execution_count": 30, "metadata": { "ExecuteTime": { "end_time": "2018-11-15T15:21:36.999073Z", "start_time": "2018-11-15T15:21:36.966762Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(128, 768)\n", "(128, 768)\n" ] } ], "source": [ "pytorch_outputs = list(pytorch_all_out[0]['features'][0]['layers'][t]['values'] for t in layer_indexes)\n", "print(pytorch_outputs[0].shape)\n", "print(pytorch_outputs[1].shape)" ] }, { "cell_type": "code", "execution_count": 31, "metadata": { "ExecuteTime": { "end_time": "2018-11-15T15:21:37.936522Z", "start_time": "2018-11-15T15:21:37.905269Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(128, 768)\n", "(128, 768)\n" ] } ], "source": [ "print(tensorflow_outputs[0].shape)\n", "print(tensorflow_outputs[1].shape)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 3/ Comparing the standard deviation on the last layer of both models" ] }, { "cell_type": "code", "execution_count": 32, "metadata": { "ExecuteTime": { "end_time": "2018-11-15T15:21:39.437137Z", "start_time": "2018-11-15T15:21:39.406150Z" } }, "outputs": [], "source": [ "import numpy as np" ] }, { "cell_type": "code", "execution_count": 33, "metadata": { "ExecuteTime": { "end_time": "2018-11-15T15:21:40.181870Z", "start_time": "2018-11-15T15:21:40.137023Z" } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "shape tensorflow layer, shape pytorch layer, standard deviation\n", "((128, 768), (128, 768), 1.5258875e-07)\n", "((128, 768), (128, 768), 2.342731e-07)\n", "((128, 768), (128, 768), 2.801949e-07)\n", "((128, 768), (128, 768), 3.5904986e-07)\n", "((128, 768), (128, 768), 4.2842768e-07)\n", "((128, 768), (128, 768), 5.127951e-07)\n", "((128, 768), (128, 768), 6.14668e-07)\n", "((128, 768), (128, 768), 7.063922e-07)\n", "((128, 768), (128, 768), 7.906173e-07)\n", "((128, 768), (128, 768), 8.475192e-07)\n", "((128, 768), (128, 768), 8.975489e-07)\n", "((128, 768), (128, 768), 4.1671223e-07)\n" ] } ], "source": [ "print('shape tensorflow layer, shape pytorch layer, standard deviation')\n", "print('\\n'.join(list(str((np.array(tensorflow_outputs[i]).shape,\n", " np.array(pytorch_outputs[i]).shape, \n", " np.sqrt(np.mean((np.array(tensorflow_outputs[i]) - np.array(pytorch_outputs[i]))**2.0)))) for i in range(12))))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "hide_input": false, "kernelspec": { "display_name": "Python [default]", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.7" }, "toc": { "colors": { "hover_highlight": "#DAA520", "running_highlight": "#FF0000", "selected_highlight": "#FFD700" }, "moveMenuLeft": true, "nav_menu": { "height": "48px", "width": "252px" }, "navigate_menu": true, "number_sections": true, "sideBar": true, "threshold": 4, "toc_cell": false, "toc_section_display": "block", "toc_window_display": false } }, "nbformat": 4, "nbformat_minor": 2 } ================================================ FILE: pytorch_pretrained_bert/__init__.py ================================================ __version__ = "0.6.2" from .tokenization import BertTokenizer, BasicTokenizer, WordpieceTokenizer from .tokenization_openai import OpenAIGPTTokenizer from .tokenization_transfo_xl import (TransfoXLTokenizer, TransfoXLCorpus) from .tokenization_gpt2 import GPT2Tokenizer from .modeling import (BertConfig, BertModel, BertForPreTraining, BertForMaskedLM, BertForNextSentencePrediction, BertForSequenceClassification, BertForMultipleChoice, BertForTokenClassification, BertForQuestionAnswering, load_tf_weights_in_bert) from .modeling_openai import (OpenAIGPTConfig, OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, load_tf_weights_in_openai_gpt) from .modeling_transfo_xl import (TransfoXLConfig, TransfoXLModel, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl) from .modeling_gpt2 import (GPT2Config, GPT2Model, GPT2LMHeadModel, GPT2DoubleHeadsModel, GPT2MultipleChoiceHead, load_tf_weights_in_gpt2) from .optimization import BertAdam from .optimization_openai import OpenAIAdam from .file_utils import PYTORCH_PRETRAINED_BERT_CACHE, cached_path, WEIGHTS_NAME, CONFIG_NAME ================================================ FILE: pytorch_pretrained_bert/__main__.py ================================================ # coding: utf8 def main(): import sys if (len(sys.argv) != 4 and len(sys.argv) != 5) or sys.argv[1] not in [ "convert_tf_checkpoint_to_pytorch", "convert_openai_checkpoint", "convert_transfo_xl_checkpoint", "convert_gpt2_checkpoint", ]: print( "Should be used as one of: \n" ">> `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`, \n" ">> `pytorch_pretrained_bert convert_openai_checkpoint OPENAI_GPT_CHECKPOINT_FOLDER_PATH PYTORCH_DUMP_OUTPUT [OPENAI_GPT_CONFIG]`, \n" ">> `pytorch_pretrained_bert convert_transfo_xl_checkpoint TF_CHECKPOINT_OR_DATASET PYTORCH_DUMP_OUTPUT [TF_CONFIG]` or \n" ">> `pytorch_pretrained_bert convert_gpt2_checkpoint TF_CHECKPOINT PYTORCH_DUMP_OUTPUT [GPT2_CONFIG]`") else: if sys.argv[1] == "convert_tf_checkpoint_to_pytorch": try: from .convert_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, " "In that case, it requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise if len(sys.argv) != 5: # pylint: disable=line-too-long print("Should be used as `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`") else: PYTORCH_DUMP_OUTPUT = sys.argv.pop() TF_CONFIG = sys.argv.pop() TF_CHECKPOINT = sys.argv.pop() convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT) elif sys.argv[1] == "convert_openai_checkpoint": from .convert_openai_checkpoint_to_pytorch import convert_openai_checkpoint_to_pytorch OPENAI_GPT_CHECKPOINT_FOLDER_PATH = sys.argv[2] PYTORCH_DUMP_OUTPUT = sys.argv[3] if len(sys.argv) == 5: OPENAI_GPT_CONFIG = sys.argv[4] else: OPENAI_GPT_CONFIG = "" convert_openai_checkpoint_to_pytorch(OPENAI_GPT_CHECKPOINT_FOLDER_PATH, OPENAI_GPT_CONFIG, PYTORCH_DUMP_OUTPUT) elif sys.argv[1] == "convert_transfo_xl_checkpoint": try: from .convert_transfo_xl_checkpoint_to_pytorch import convert_transfo_xl_checkpoint_to_pytorch except ImportError: print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, " "In that case, it requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise if 'ckpt' in sys.argv[2].lower(): TF_CHECKPOINT = sys.argv[2] TF_DATASET_FILE = "" else: TF_DATASET_FILE = sys.argv[2] TF_CHECKPOINT = "" PYTORCH_DUMP_OUTPUT = sys.argv[3] if len(sys.argv) == 5: TF_CONFIG = sys.argv[4] else: TF_CONFIG = "" convert_transfo_xl_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT, TF_DATASET_FILE) else: try: from .convert_gpt2_checkpoint_to_pytorch import convert_gpt2_checkpoint_to_pytorch except ImportError: print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, " "In that case, it requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise TF_CHECKPOINT = sys.argv[2] PYTORCH_DUMP_OUTPUT = sys.argv[3] if len(sys.argv) == 5: TF_CONFIG = sys.argv[4] else: TF_CONFIG = "" convert_gpt2_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT) if __name__ == '__main__': main() ================================================ FILE: pytorch_pretrained_bert/convert_gpt2_checkpoint_to_pytorch.py ================================================ # coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OpenAI GPT checkpoint.""" from __future__ import absolute_import, division, print_function import argparse from io import open import torch from pytorch_pretrained_bert.modeling_gpt2 import (CONFIG_NAME, WEIGHTS_NAME, GPT2Config, GPT2Model, load_tf_weights_in_gpt2) def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path): # Construct model if gpt2_config_file == "": config = GPT2Config() else: config = GPT2Config(gpt2_config_file) model = GPT2Model(config) # Load weights from numpy load_tf_weights_in_gpt2(model, gpt2_checkpoint_path) # Save pytorch-model pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME print("Save PyTorch model to {}".format(pytorch_weights_dump_path)) torch.save(model.state_dict(), pytorch_weights_dump_path) print("Save configuration file to {}".format(pytorch_config_dump_path)) with open(pytorch_config_dump_path, "w", encoding="utf-8") as f: f.write(config.to_json_string()) if __name__ == "__main__": parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--gpt2_checkpoint_path", default = None, type = str, required = True, help = "Path the TensorFlow checkpoint path.") parser.add_argument("--pytorch_dump_folder_path", default = None, type = str, required = True, help = "Path to the output PyTorch model.") parser.add_argument("--gpt2_config_file", default = "", type = str, help = "An optional config json file corresponding to the pre-trained OpenAI model. \n" "This specifies the model architecture.") args = parser.parse_args() convert_gpt2_checkpoint_to_pytorch(args.gpt2_checkpoint_path, args.gpt2_config_file, args.pytorch_dump_folder_path) ================================================ FILE: pytorch_pretrained_bert/convert_openai_checkpoint_to_pytorch.py ================================================ # coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OpenAI GPT checkpoint.""" from __future__ import absolute_import, division, print_function import argparse from io import open import torch from pytorch_pretrained_bert.modeling_openai import (CONFIG_NAME, WEIGHTS_NAME, OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt) def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path): # Construct model if openai_config_file == "": config = OpenAIGPTConfig() else: config = OpenAIGPTConfig(openai_config_file) model = OpenAIGPTModel(config) # Load weights from numpy load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path) # Save pytorch-model pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME print("Save PyTorch model to {}".format(pytorch_weights_dump_path)) torch.save(model.state_dict(), pytorch_weights_dump_path) print("Save configuration file to {}".format(pytorch_config_dump_path)) with open(pytorch_config_dump_path, "w", encoding="utf-8") as f: f.write(config.to_json_string()) if __name__ == "__main__": parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--openai_checkpoint_folder_path", default = None, type = str, required = True, help = "Path the TensorFlow checkpoint path.") parser.add_argument("--pytorch_dump_folder_path", default = None, type = str, required = True, help = "Path to the output PyTorch model.") parser.add_argument("--openai_config_file", default = "", type = str, help = "An optional config json file corresponding to the pre-trained OpenAI model. \n" "This specifies the model architecture.") args = parser.parse_args() convert_openai_checkpoint_to_pytorch(args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path) ================================================ FILE: pytorch_pretrained_bert/convert_pytorch_checkpoint_to_tf.py ================================================ # coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Huggingface Pytorch checkpoint to Tensorflow checkpoint.""" import os import argparse import torch import numpy as np import tensorflow as tf from pytorch_pretrained_bert.modeling import BertModel def convert_pytorch_checkpoint_to_tf(model:BertModel, ckpt_dir:str, model_name:str): """ :param model:BertModel Pytorch model instance to be converted :param ckpt_dir: Tensorflow model directory :param model_name: model name :return: Currently supported HF models: Y BertModel N BertForMaskedLM N BertForPreTraining N BertForMultipleChoice N BertForNextSentencePrediction N BertForSequenceClassification N BertForQuestionAnswering """ tensors_to_transopse = ( "dense.weight", "attention.self.query", "attention.self.key", "attention.self.value" ) var_map = ( ('layer.', 'layer_'), ('word_embeddings.weight', 'word_embeddings'), ('position_embeddings.weight', 'position_embeddings'), ('token_type_embeddings.weight', 'token_type_embeddings'), ('.', '/'), ('LayerNorm/weight', 'LayerNorm/gamma'), ('LayerNorm/bias', 'LayerNorm/beta'), ('weight', 'kernel') ) if not os.path.isdir(ckpt_dir): os.makedirs(ckpt_dir) session = tf.Session() state_dict = model.state_dict() tf_vars = [] def to_tf_var_name(name:str): for patt, repl in iter(var_map): name = name.replace(patt, repl) return 'bert/{}'.format(name) def assign_tf_var(tensor:np.ndarray, name:str): tmp_var = tf.Variable(initial_value=tensor) tf_var = tf.get_variable(dtype=tmp_var.dtype, shape=tmp_var.shape, name=name) op = tf.assign(ref=tf_var, value=tmp_var) session.run(tf.variables_initializer([tmp_var, tf_var])) session.run(fetches=[op, tf_var]) return tf_var for var_name in state_dict: tf_name = to_tf_var_name(var_name) torch_tensor = state_dict[var_name].numpy() if any([x in var_name for x in tensors_to_transopse]): torch_tensor = torch_tensor.T tf_tensor = assign_tf_var(tensor=torch_tensor, name=tf_name) tf_vars.append(tf_tensor) print("{0}{1}initialized".format(tf_name, " " * (60 - len(tf_name)))) saver = tf.train.Saver(tf_vars) saver.save(session, os.path.join(ckpt_dir, model_name.replace("-", "_") + ".ckpt")) def main(raw_args=None): parser = argparse.ArgumentParser() parser.add_argument("--model_name", type=str, required=True, help="model name e.g. bert-base-uncased") parser.add_argument("--cache_dir", type=str, default=None, required=False, help="Directory containing pytorch model") parser.add_argument("--pytorch_model_path", type=str, required=True, help="/path/to/.bin") parser.add_argument("--tf_cache_dir", type=str, required=True, help="Directory in which to save tensorflow model") args = parser.parse_args(raw_args) model = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path), cache_dir=args.cache_dir ) convert_pytorch_checkpoint_to_tf( model=model, ckpt_dir=args.tf_cache_dir, model_name=args.model_name ) if __name__ == "__main__": main() ================================================ FILE: pytorch_pretrained_bert/convert_tf_checkpoint_to_pytorch.py ================================================ # coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert BERT checkpoint.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import re import argparse import tensorflow as tf import torch import numpy as np from pytorch_pretrained_bert.modeling import BertConfig, BertForPreTraining, load_tf_weights_in_bert def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path): # Initialise PyTorch model config = BertConfig.from_json_file(bert_config_file) print("Building PyTorch model from configuration: {}".format(str(config))) model = BertForPreTraining(config) # Load weights from tf checkpoint load_tf_weights_in_bert(model, tf_checkpoint_path) # Save pytorch-model print("Save PyTorch model to {}".format(pytorch_dump_path)) torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--tf_checkpoint_path", default = None, type = str, required = True, help = "Path the TensorFlow checkpoint path.") parser.add_argument("--bert_config_file", default = None, type = str, required = True, help = "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture.") parser.add_argument("--pytorch_dump_path", default = None, type = str, required = True, help = "Path to the output PyTorch model.") args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) ================================================ FILE: pytorch_pretrained_bert/convert_transfo_xl_checkpoint_to_pytorch.py ================================================ # coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Transformer XL checkpoint and datasets.""" from __future__ import absolute_import, division, print_function import argparse import os import sys from io import open import torch import pytorch_pretrained_bert.tokenization_transfo_xl as data_utils from pytorch_pretrained_bert.modeling_transfo_xl import (CONFIG_NAME, WEIGHTS_NAME, TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl) from pytorch_pretrained_bert.tokenization_transfo_xl import (CORPUS_NAME, VOCAB_NAME) if sys.version_info[0] == 2: import cPickle as pickle else: import pickle # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 data_utils.Vocab = data_utils.TransfoXLTokenizer data_utils.Corpus = data_utils.TransfoXLCorpus sys.modules['data_utils'] = data_utils sys.modules['vocabulary'] = data_utils def convert_transfo_xl_checkpoint_to_pytorch(tf_checkpoint_path, transfo_xl_config_file, pytorch_dump_folder_path, transfo_xl_dataset_file): if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(transfo_xl_dataset_file, "rb") as fp: corpus = pickle.load(fp, encoding="latin1") # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) pytorch_vocab_dump_path = pytorch_dump_folder_path + '/' + VOCAB_NAME print("Save vocabulary to {}".format(pytorch_vocab_dump_path)) corpus_vocab_dict = corpus.vocab.__dict__ torch.save(corpus_vocab_dict, pytorch_vocab_dump_path) corpus_dict_no_vocab = corpus.__dict__ corpus_dict_no_vocab.pop('vocab', None) pytorch_dataset_dump_path = pytorch_dump_folder_path + '/' + CORPUS_NAME print("Save dataset to {}".format(pytorch_dataset_dump_path)) torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model config_path = os.path.abspath(transfo_xl_config_file) tf_path = os.path.abspath(tf_checkpoint_path) print("Converting Transformer XL checkpoint from {} with config at {}".format(tf_path, config_path)) # Initialise PyTorch model if transfo_xl_config_file == "": config = TransfoXLConfig() else: config = TransfoXLConfig(transfo_xl_config_file) print("Building PyTorch model from configuration: {}".format(str(config))) model = TransfoXLLMHeadModel(config) model = load_tf_weights_in_transfo_xl(model, config, tf_path) # Save pytorch-model pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME) pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME) print("Save PyTorch model to {}".format(os.path.abspath(pytorch_weights_dump_path))) torch.save(model.state_dict(), pytorch_weights_dump_path) print("Save configuration file to {}".format(os.path.abspath(pytorch_config_dump_path))) with open(pytorch_config_dump_path, "w", encoding="utf-8") as f: f.write(config.to_json_string()) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default = None, type = str, required = True, help = "Path to the folder to store the PyTorch model or dataset/vocab.") parser.add_argument("--tf_checkpoint_path", default = "", type = str, help = "An optional path to a TensorFlow checkpoint path to be converted.") parser.add_argument("--transfo_xl_config_file", default = "", type = str, help = "An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture.") parser.add_argument("--transfo_xl_dataset_file", default = "", type = str, help = "An optional dataset file to be converted in a vocabulary.") args = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch(args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file) ================================================ FILE: pytorch_pretrained_bert/file_utils.py ================================================ """ Utilities for working with the local dataset cache. This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp Copyright by the AllenNLP authors. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import sys import json import logging import os import shutil import tempfile import fnmatch from functools import wraps from hashlib import sha256 import sys from io import open import boto3 import requests from botocore.exceptions import ClientError from tqdm import tqdm try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv('TORCH_HOME', os.path.join( os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))) default_cache_path = os.path.join(torch_cache_home, 'pytorch_pretrained_bert') try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse try: from pathlib import Path PYTORCH_PRETRAINED_BERT_CACHE = Path( os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)) except (AttributeError, ImportError): PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path) CONFIG_NAME = "config.json" WEIGHTS_NAME = "pytorch_model.bin" logger = logging.getLogger(__name__) # pylint: disable=invalid-name def url_to_filename(url, etag=None): """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the url's, delimited by a period. """ url_bytes = url.encode('utf-8') url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode('utf-8') etag_hash = sha256(etag_bytes) filename += '.' + etag_hash.hexdigest() return filename def filename_to_url(filename, cache_dir=None): """ Return the url and etag (which may be ``None``) stored for `filename`. Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. """ if cache_dir is None: cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if sys.version_info[0] == 3 and isinstance(cache_dir, Path): cache_dir = str(cache_dir) cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): raise EnvironmentError("file {} not found".format(cache_path)) meta_path = cache_path + '.json' if not os.path.exists(meta_path): raise EnvironmentError("file {} not found".format(meta_path)) with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata['url'] etag = metadata['etag'] return url, etag def cached_path(url_or_filename, cache_dir=None): """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. """ if cache_dir is None: cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if sys.version_info[0] == 3 and isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if sys.version_info[0] == 3 and isinstance(cache_dir, Path): cache_dir = str(cache_dir) parsed = urlparse(url_or_filename) if parsed.scheme in ('http', 'https', 's3'): # URL, so get it from the cache (downloading if necessary) return get_from_cache(url_or_filename, cache_dir) elif os.path.exists(url_or_filename): # File, and it exists. return url_or_filename elif parsed.scheme == '': # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(url_or_filename)) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) def split_s3_path(url): """Split a full s3 path into the bucket name and path.""" parsed = urlparse(url) if not parsed.netloc or not parsed.path: raise ValueError("bad s3 path {}".format(url)) bucket_name = parsed.netloc s3_path = parsed.path # Remove '/' at beginning of path. if s3_path.startswith("/"): s3_path = s3_path[1:] return bucket_name, s3_path def s3_request(func): """ Wrapper function for s3 requests in order to create more helpful error messages. """ @wraps(func) def wrapper(url, *args, **kwargs): try: return func(url, *args, **kwargs) except ClientError as exc: if int(exc.response["Error"]["Code"]) == 404: raise EnvironmentError("file {} not found".format(url)) else: raise return wrapper @s3_request def s3_etag(url): """Check ETag on S3 object.""" s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_object = s3_resource.Object(bucket_name, s3_path) return s3_object.e_tag @s3_request def s3_get(url, temp_file): """Pull a file directly from S3.""" s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) def http_get(url, temp_file): req = requests.get(url, stream=True) content_length = req.headers.get('Content-Length') total = int(content_length) if content_length is not None else None progress = tqdm(unit="B", total=total) for chunk in req.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks progress.update(len(chunk)) temp_file.write(chunk) progress.close() def get_from_cache(url, cache_dir=None): """ Given a URL, look for the corresponding dataset in the local cache. If it's not there, download it. Then return the path to the cached file. """ if cache_dir is None: cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if sys.version_info[0] == 3 and isinstance(cache_dir, Path): cache_dir = str(cache_dir) if not os.path.exists(cache_dir): os.makedirs(cache_dir) # Get eTag to add to filename, if it exists. if url.startswith("s3://"): etag = s3_etag(url) else: try: response = requests.head(url, allow_redirects=True) if response.status_code != 200: etag = None else: etag = response.headers.get("ETag") except EnvironmentError: etag = None if sys.version_info[0] == 2 and etag is not None: etag = etag.decode('utf-8') filename = url_to_filename(url, etag) # get cache path to put the file cache_path = os.path.join(cache_dir, filename) # If we don't have a connection (etag is None) and can't identify the file # try to get the last downloaded one if not os.path.exists(cache_path) and etag is None: matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*') matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files)) if matching_files: cache_path = os.path.join(cache_dir, matching_files[-1]) if not os.path.exists(cache_path): # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with tempfile.NamedTemporaryFile() as temp_file: logger.info("%s not found in cache, downloading to %s", url, temp_file.name) # GET file object if url.startswith("s3://"): s3_get(url, temp_file) else: http_get(url, temp_file) # we are copying the file before closing it, so flush to avoid truncation temp_file.flush() # shutil.copyfileobj() starts at the current position, so go to the start temp_file.seek(0) logger.info("copying %s to cache at %s", temp_file.name, cache_path) with open(cache_path, 'wb') as cache_file: shutil.copyfileobj(temp_file, cache_file) logger.info("creating metadata file for %s", cache_path) meta = {'url': url, 'etag': etag} meta_path = cache_path + '.json' with open(meta_path, 'w') as meta_file: output_string = json.dumps(meta) if sys.version_info[0] == 2 and isinstance(output_string, str): output_string = unicode(output_string, 'utf-8') # The beauty of python 2 meta_file.write(output_string) logger.info("removing temp file %s", temp_file.name) return cache_path def read_set_from_file(filename): ''' Extract a de-duped collection (set) of text from a file. Expected file format is one item per line. ''' collection = set() with open(filename, 'r', encoding='utf-8') as file_: for line in file_: collection.add(line.rstrip()) return collection def get_file_extension(path, dot=True, lower=True): ext = os.path.splitext(path)[1] ext = ext if dot else ext[1:] return ext.lower() if lower else ext ================================================ FILE: pytorch_pretrained_bert/modeling.py ================================================ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model.""" from __future__ import absolute_import, division, print_function, unicode_literals import copy import json import logging import math import os import shutil import tarfile import tempfile import sys from io import open import torch from torch import nn from torch.nn import CrossEntropyLoss from .file_utils import cached_path, WEIGHTS_NAME, CONFIG_NAME logger = logging.getLogger(__name__) PRETRAINED_MODEL_ARCHIVE_MAP = { 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz", 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz", 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz", 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz", 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz", 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz", 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz", } BERT_CONFIG_NAME = 'bert_config.json' TF_WEIGHTS_NAME = 'model.ckpt' def load_tf_weights_in_bert(model, tf_checkpoint_path): """ Load tf checkpoints in a pytorch model """ try: import re import numpy as np import tensorflow as tf except ImportError: print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise tf_path = os.path.abspath(tf_checkpoint_path) print("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: print("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split('/') # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any(n in ["adam_v", "adam_m", "global_step"] for n in name): print("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+_\d+', m_name): l = re.split(r'_(\d+)', m_name) else: l = [m_name] if l[0] == 'kernel' or l[0] == 'gamma': pointer = getattr(pointer, 'weight') elif l[0] == 'output_bias' or l[0] == 'beta': pointer = getattr(pointer, 'bias') elif l[0] == 'output_weights': pointer = getattr(pointer, 'weight') elif l[0] == 'squad': pointer = getattr(pointer, 'classifier') else: try: pointer = getattr(pointer, l[0]) except AttributeError: print("Skipping {}".format("/".join(name))) continue if len(l) >= 2: num = int(l[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name == 'kernel': array = np.transpose(array) try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def swish(x): return x * torch.sigmoid(x) ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} class BertConfig(object): """Configuration class to store the configuration of a `BertModel`. """ def __init__(self, vocab_size_or_config_json_file, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12): """Constructs BertConfig. Args: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported. hidden_dropout_prob: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `BertModel`. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps: The epsilon used by LayerNorm. """ if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2 and isinstance(vocab_size_or_config_json_file, unicode)): with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif isinstance(vocab_size_or_config_json_file, int): self.vocab_size = vocab_size_or_config_json_file self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps else: raise ValueError("First argument must be either a vocabulary size (int)" "or the path to a pretrained model config file (str)") @classmethod def from_dict(cls, json_object): """Constructs a `BertConfig` from a Python dictionary of parameters.""" config = BertConfig(vocab_size_or_config_json_file=-1) for key, value in json_object.items(): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `BertConfig` from a json file of parameters.""" with open(json_file, "r", encoding='utf-8') as reader: text = reader.read() return cls.from_dict(json.loads(text)) def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path): """ Save this instance to a json file.""" with open(json_file_path, "w", encoding='utf-8') as writer: writer.write(self.to_json_string()) try: from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm except ImportError: logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .") class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): super(BertEmbeddings, self).__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids, entity_pos_seg=None, entity_span1_pos=None, entity_span2_pos=None, token_type_ids=None): seq_length = input_ids.size(1) position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) if entity_pos_seg is None: entity_pos_seg = torch.zeros_like(input_ids) if entity_span1_pos is None: entity_span1_pos = torch.zeros_like(input_ids) if entity_span2_pos is None: entity_span2_pos = torch.zeros_like(input_ids) words_embeddings = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) """ pos embedding 0 """ # '0','1','2' for non-entity, entity1, and entity2 entity_seg_pos_num = 3 hidden_size = words_embeddings.shape[2] entity_seg_pos_embeddings_func = nn.Embedding(entity_seg_pos_num, hidden_size) entity_seg_pos_embeddings_func = entity_seg_pos_embeddings_func.cuda() entity_seg_pos_embeddings = entity_seg_pos_embeddings_func(entity_pos_seg) """ pos embedding 1 """ num_embeddings = 2 * (seq_length - 1) + 1 entity_span_pos_embeddings_func = nn.Embedding(num_embeddings, hidden_size) entity_span_pos_embeddings_func = entity_span_pos_embeddings_func.cuda() try: pass #print(entity_span1_pos) #entity_span1_pos_embeddings = entity_span_pos_embeddings_func(entity_span1_pos) #print(entity_span2_pos) #entity_span2_pos_embeddings = entity_span_pos_embeddings_func(entity_span2_pos) except: import pdb;pdb.set_trace() """ Different feature strategy """ # 0 #embeddings = words_embeddings + position_embeddings + token_type_embeddings + entity_seg_pos_embeddings # 0 #embeddings = words_embeddings + token_type_embeddings + entity_seg_pos_embeddings # 1 embeddings = words_embeddings + position_embeddings + token_type_embeddings # 0 #embeddings = words_embeddings + position_embeddings + token_type_embeddings+entity_span1_pos_embeddings+entity_span2_pos_embeddings # 0 #embeddings = words_embeddings + token_type_embeddings+entity_span1_pos_embeddings+entity_span2_pos_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_tensor, attention_mask): self_output = self.self(input_tensor, attention_mask) attention_output = self.output(self_output, input_tensor) return attention_output class BertIntermediate(nn.Module): def __init__(self, config): super(BertIntermediate, self).__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super(BertOutput, self).__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(nn.Module): def __init__(self, config): super(BertLayer, self).__init__() self.attention = BertAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward(self, hidden_states, attention_mask): attention_output = self.attention(hidden_states, attention_mask) intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BertEncoder(nn.Module): def __init__(self, config): super(BertEncoder, self).__init__() layer = BertLayer(config) self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)]) def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True): all_encoder_layers = [] for layer_module in self.layer: hidden_states = layer_module(hidden_states, attention_mask) if output_all_encoded_layers: all_encoder_layers.append(hidden_states) if not output_all_encoded_layers: all_encoder_layers.append(hidden_states) return all_encoder_layers class BertPooler(nn.Module): def __init__(self, config): super(BertPooler, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertLMPredictionHead, self).__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(bert_model_embedding_weights.size(1), bert_model_embedding_weights.size(0), bias=False) self.decoder.weight = bert_model_embedding_weights self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0))) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertOnlyMLMHead, self).__init__() self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super(BertOnlyNSPHead, self).__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertPreTrainingHeads, self).__init__() self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BertPreTrainedModel(nn.Module): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ def __init__(self, config, *inputs, **kwargs): super(BertPreTrainedModel, self).__init__() if not isinstance(config, BertConfig): raise ValueError( "Parameter config in `{}(config)` should be an instance of class `BertConfig`. " "To create a model from a Google pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__ )) self.config = config def init_bert_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs): """ Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `bert-base-uncased` . `bert-large-uncased` . `bert-base-cased` . `bert-large-cased` . `bert-base-multilingual-uncased` . `bert-base-multilingual-cased` . `bert-base-chinese` - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `model.chkpt` a TensorFlow checkpoint from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models *inputs, **kwargs: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification) """ state_dict = kwargs.get('state_dict', None) kwargs.pop('state_dict', None) cache_dir = kwargs.get('cache_dir', None) kwargs.pop('cache_dir', None) from_tf = kwargs.get('from_tf', False) kwargs.pop('from_tf', None) if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] else: archive_file = pretrained_model_name_or_path # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) except EnvironmentError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), archive_file)) return None if resolved_archive_file == archive_file: logger.info("loading archive file {}".format(archive_file)) else: logger.info("loading archive file {} from cache at {}".format( archive_file, resolved_archive_file)) tempdir = None if os.path.isdir(resolved_archive_file) or from_tf: serialization_dir = resolved_archive_file else: # Extract archive to temp dir tempdir = tempfile.mkdtemp() logger.info("extracting archive file {} to temp dir {}".format( resolved_archive_file, tempdir)) with tarfile.open(resolved_archive_file, 'r:gz') as archive: archive.extractall(tempdir) serialization_dir = tempdir # Load config config_file = os.path.join(serialization_dir, CONFIG_NAME) if not os.path.exists(config_file): # Backward compatibility with old naming format config_file = os.path.join(serialization_dir, BERT_CONFIG_NAME) config = BertConfig.from_json_file(config_file) logger.info("Model config {}".format(config)) # Instantiate model. model = cls(config, *inputs, **kwargs) if state_dict is None and not from_tf: weights_path = os.path.join(serialization_dir, WEIGHTS_NAME) state_dict = torch.load(weights_path, map_location='cpu') if tempdir: # Clean up temp dir shutil.rmtree(tempdir) if from_tf: # Directly load from a TensorFlow checkpoint weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME) return load_tf_weights_in_bert(model, weights_path) # Load from a PyTorch state_dict old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') start_prefix = '' if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()): start_prefix = 'bert.' load(model, prefix=start_prefix) if len(missing_keys) > 0: logger.info("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) if len(error_msgs) > 0: raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( model.__class__.__name__, "\n\t".join(error_msgs))) return model class BertModel(BertPreTrainedModel): """BERT model ("Bidirectional Embedding Representations from a Transformer"). Params: config: a BertConfig class instance with the configuration to build a new model Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`. Outputs: Tuple of (encoded_layers, pooled_output) `encoded_layers`: controled by `output_all_encoded_layers` argument: - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size], - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding to the last attention block of shape [batch_size, sequence_length, hidden_size], `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a classifier pretrained on top of the hidden state associated to the first character of the input (`CLS`) to train on the Next-Sentence task (see BERT's paper). Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = modeling.BertModel(config=config) all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertModel, self).__init__(config) self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) self.apply(self.init_bert_weights) def forward(self, input_ids, entity_seg_pos = None, entity_span1_pos=None, entity_span2_pos=None, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True): if attention_mask is None: attention_mask = torch.ones_like(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) if entity_seg_pos is None: entity_seg_pos = torch.zeros_like(input_ids) if entity_span1_pos is None: entity_span1_pos = torch.zeros_like(input_ids) if entity_span2_pos is None: entity_span2_pos = torch.zeros_like(input_ids) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 embedding_output = self.embeddings(input_ids, entity_seg_pos, entity_span1_pos, entity_span2_pos, token_type_ids) encoded_layers = self.encoder(embedding_output, extended_attention_mask, output_all_encoded_layers=output_all_encoded_layers) sequence_output = encoded_layers[-1] pooled_output = self.pooler(sequence_output) if not output_all_encoded_layers: encoded_layers = encoded_layers[-1] return encoded_layers, pooled_output class BertForPreTraining(BertPreTrainedModel): """BERT model with pre-training heads. This module comprises the BERT model followed by the two pre-training heads: - the masked language modeling head, and - the next sentence classification head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., vocab_size] `next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size] with indices selected in [0, 1]. 0 => next sentence is the continuation, 1 => next sentence is a random sentence. Outputs: if `masked_lm_labels` and `next_sentence_label` are not `None`: Outputs the total_loss which is the sum of the masked language modeling loss and the next sentence classification loss. if `masked_lm_labels` or `next_sentence_label` is `None`: Outputs a tuple comprising - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and - the next sentence classification logits of shape [batch_size, 2]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForPreTraining(config) masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForPreTraining, self).__init__(config) self.bert = BertModel(config) self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None): sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) if masked_lm_labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss return total_loss else: return prediction_scores, seq_relationship_score class BertForMaskedLM(BertPreTrainedModel): """BERT model with the masked language modeling head. This module comprises the BERT model followed by the masked language modeling head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., vocab_size] Outputs: if `masked_lm_labels` is not `None`: Outputs the masked language modeling loss. if `masked_lm_labels` is `None`: Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForMaskedLM(config) masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForMaskedLM, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None): sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) prediction_scores = self.cls(sequence_output) if masked_lm_labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) return masked_lm_loss else: return prediction_scores class BertForNextSentencePrediction(BertPreTrainedModel): """BERT model with next sentence prediction head. This module comprises the BERT model followed by the next sentence classification head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size] with indices selected in [0, 1]. 0 => next sentence is the continuation, 1 => next sentence is a random sentence. Outputs: if `next_sentence_label` is not `None`: Outputs the total_loss which is the sum of the masked language modeling loss and the next sentence classification loss. if `next_sentence_label` is `None`: Outputs the next sentence classification logits of shape [batch_size, 2]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForNextSentencePrediction(config) seq_relationship_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForNextSentencePrediction, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyNSPHead(config) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None): _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) seq_relationship_score = self.cls( pooled_output) if next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) return next_sentence_loss else: return seq_relationship_score class BertForSequenceClassification(BertPreTrainedModel): """BERT model for classification. This module is composed of the BERT model with a linear layer on top of the pooled output. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_labels`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary. Items in the batch should begin with the special "CLS" token. (see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] with indices selected in [0, ..., num_labels]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) num_labels = 2 model = BertForSequenceClassification(config, num_labels) logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, num_labels): super(BertForSequenceClassification, self).__init__(config) self.num_labels = num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.layernorm = nn.LayerNorm(config.hidden_size) self.layernorm_concat = nn.LayerNorm(config.hidden_size * 2) max_seq_length = 128 #self.layernorm_concat = nn.LayerNorm(config.hidden_size + max_seq_length * 2) self.layernorm_span = nn.LayerNorm(max_seq_length) self.relu = nn.ReLU() self.classifier = nn.Linear(config.hidden_size, num_labels) self.classifier_concat = nn.Linear(config.hidden_size * 2, num_labels) #self.classifier_concat = nn.Linear(config.hidden_size + max_seq_length * 2, num_labels) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, entity_mask=None, entity_seg_pos=None, entity_span1_pos=None, entity_span2_pos=None, labels=None): encoded_layers, pooled_output = self.bert(input_ids, entity_seg_pos, entity_span1_pos, entity_span2_pos, token_type_ids, attention_mask, output_all_encoded_layers=False) batch_size, max_seq_length = entity_mask.shape[0],entity_mask.shape[1] diag_entity_mask_ = [] for i in range(batch_size): diag_entity_mask_.append(torch.diag(entity_mask[i]).cpu().numpy()) diag_entity_mask = torch.tensor(diag_entity_mask_).cuda() diag_entity_seg_pos_ = [] for i in range(batch_size): diag_entity_seg_pos_.append(torch.diag(entity_seg_pos[i]).cpu().numpy()) diag_entity_seg_pos = torch.tensor(diag_entity_seg_pos_,dtype=torch.float).cuda() # Get all embedding of entity #batch_entity_emb = torch.matmul(diag_entity_mask, encoded_layers) # Get start embedding of entity with marker batch_entity_emb = torch.matmul(diag_entity_seg_pos, encoded_layers) """ Strategy 0: concat start entity marker Bug: TODO, get [1536,768,1536,1536,...] """ concat_tag = 0 if concat_tag == 1: entity_marker_emb_ = [] for i in range(batch_size): marker_index = entity_seg_pos[i] per_encoded_layer = encoded_layers[i] entity_marker_emb_.append(torch.index_select(per_encoded_layer, 0, torch.nonzero(marker_index).view(-1)).view(-1).detach().cpu().numpy()) entity_emb_output = torch.tensor(entity_marker_emb_).cuda() entity_emb_output = self.dropout(entity_emb_output) """ Strategy 1: sum all the emb of entity """ entity_emb_output = batch_entity_emb.sum(dim=1) entity_emb_output = self.dropout(entity_emb_output) """ Strategy 2: pooling the emb of entity get the max value along the embedding axis """ #pooling = nn.MaxPool1d(kernel_size=max_seq_length, stride=1) #entity_emb_output = pooling( batch_entity_emb.permute(0,2,1) ).squeeze() #entity_emb_output = self.dropout(entity_emb_output) """ Strategy 3: mention pooling + position embedding """ #entity_span1_pos = self.layernorm_span(entity_span1_pos) #entity_span2_pos = self.layernorm_span(entity_span2_pos) #entity_span_concat = torch.cat((entity_span1_pos,entity_span2_pos),1) #entity_emb_output = torch.cat((entity_span_concat, entity_emb_output),1) """ Strategy TODO """ #entity_emb_output = self.layernorm(entity_emb_output) #entity_emb_output = self.layernorm_concat(entity_emb_output) #entity_emb_output = self.relu(entity_emb_output) #import pdb;pdb.set_trace() representation = entity_emb_output # Classifier without concat embedding[hidden_size] logits = self.classifier(representation) # Classifier with concat embedding[hidden_size * 2] #logits = self.classifier_concat(representation) # Classifier with [CLS] #pooled_output = self.dropout(pooled_output) #logits = self.classifier(pooled_output) if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return loss else: return logits class BertForMultipleChoice(BertPreTrainedModel): """BERT model for multiple choice tasks. This module is composed of the BERT model with a linear layer on top of the pooled output. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_choices`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] with indices selected in [0, ..., num_choices]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]]) input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]]) token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) num_choices = 2 model = BertForMultipleChoice(config, num_choices) logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, num_choices): super(BertForMultipleChoice, self).__init__(config) self.num_choices = num_choices self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None): flat_input_ids = input_ids.view(-1, input_ids.size(-1)) flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, self.num_choices) if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) return loss else: return reshaped_logits class BertForTokenClassification(BertPreTrainedModel): """BERT model for token-level classification. This module is composed of the BERT model with a linear layer on top of the full hidden state of the last layer. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_labels`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, ..., num_labels]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, sequence_length, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) num_labels = 2 model = BertForTokenClassification(config, num_labels) logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, num_labels): super(BertForTokenClassification, self).__init__(config) self.num_labels = num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, num_labels) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None): sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return logits,loss else: return logits class BertForQuestionAnswering(BertPreTrainedModel): """BERT model for Question Answering (span extraction). This module is composed of the BERT model with a linear layer on top of the sequence output that computes start_logits and end_logits Params: `config`: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size]. Positions are clamped to the length of the sequence and position outside of the sequence are not taken into account for computing the loss. `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size]. Positions are clamped to the length of the sequence and position outside of the sequence are not taken into account for computing the loss. Outputs: if `start_positions` and `end_positions` are not `None`: Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions. if `start_positions` or `end_positions` is `None`: Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end position tokens of shape [batch_size, sequence_length]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForQuestionAnswering(config) start_logits, end_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForQuestionAnswering, self).__init__(config) self.bert = BertModel(config) # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version # self.dropout = nn.Dropout(config.hidden_dropout_prob) self.qa_outputs = nn.Linear(config.hidden_size, 2) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None): sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 return total_loss else: return start_logits, end_logits ================================================ FILE: pytorch_pretrained_bert/modeling_gpt2.py ================================================ # coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch OpenAI GPT-2 model.""" from __future__ import absolute_import, division, print_function, unicode_literals import collections import copy import json import logging import math import os import sys from io import open import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from torch.nn.parameter import Parameter from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME from .modeling import BertLayerNorm as LayerNorm logger = logging.getLogger(__name__) PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin", "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-pytorch_model.bin"} PRETRAINED_CONFIG_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-config.json", "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-config.json"} def prune_conv1d_layer(layer, index, dim=1): """ Prune a Conv1D layer (a model parameters) to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed. Return the pruned layer as a new layer with requires_grad=True. Used to remove heads. """ index = index.to(layer.weight.device) W = layer.weight.index_select(dim, index).clone().detach() if dim == 0: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer def load_tf_weights_in_gpt2(model, gpt2_checkpoint_path): """ Load tf checkpoints in a pytorch model """ try: import re import numpy as np import tensorflow as tf except ImportError: print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise tf_path = os.path.abspath(gpt2_checkpoint_path) print("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: print("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array.squeeze()) for name, array in zip(names, arrays): name = name[6:] # skip "model/" name = name.split('/') pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+\d+', m_name): l = re.split(r'(\d+)', m_name) else: l = [m_name] if l[0] == 'w' or l[0] == 'g': pointer = getattr(pointer, 'weight') elif l[0] == 'b': pointer = getattr(pointer, 'bias') elif l[0] == 'wpe' or l[0] == 'wte': pointer = getattr(pointer, l[0]) pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, l[0]) if len(l) >= 2: num = int(l[1]) pointer = pointer[num] try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class GPT2Config(object): """Configuration class to store the configuration of a `GPT2Model`. """ def __init__( self, vocab_size_or_config_json_file=50257, n_special=0, n_positions=1024, n_ctx=1024, n_embd=768, n_layer=12, n_head=12, resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.02, predict_special_tokens=True ): """Constructs GPT2Config. Args: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file. n_special: The number of special tokens to learn during fine-tuning ('[SEP]', '[CLF]', ...) n_positions: Number of positional embeddings. n_ctx: Size of the causal mask (usually same as n_positions). n_embd: Dimensionality of the embeddings and hidden states. n_layer: Number of hidden layers in the Transformer encoder. n_head: Number of attention heads for each attention layer in the Transformer encoder. layer_norm_epsilon: epsilon to use in the layer norm layers resid_pdrop: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attn_pdrop: The dropout ratio for the attention probabilities. embd_pdrop: The dropout ratio for the embeddings. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. predict_special_tokens: should we predict special tokens (when the model has a LM head) """ if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2 and isinstance(vocab_size_or_config_json_file, unicode)): with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif isinstance(vocab_size_or_config_json_file, int): self.vocab_size = vocab_size_or_config_json_file self.n_special = n_special self.n_ctx = n_ctx self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.predict_special_tokens = predict_special_tokens else: raise ValueError( "First argument must be either a vocabulary size (int)" "or the path to a pretrained model config file (str)" ) @property def total_tokens_embeddings(self): return self.vocab_size + self.n_special @classmethod def from_dict(cls, json_object): """Constructs a `GPT2Config` from a Python dictionary of parameters.""" config = GPT2Config(vocab_size_or_config_json_file=-1) for key, value in json_object.items(): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `GPT2Config` from a json file of parameters.""" with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() return cls.from_dict(json.loads(text)) def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path): """ Save this instance to a json file.""" with open(json_file_path, "w", encoding='utf-8') as writer: writer.write(self.to_json_string()) class Conv1D(nn.Module): def __init__(self, nf, nx): super(Conv1D, self).__init__() self.nf = nf w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = Parameter(w) self.bias = Parameter(torch.zeros(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(*size_out) return x class Attention(nn.Module): def __init__(self, nx, n_ctx, config, scale=False, output_attentions=False, keep_multihead_output=False): super(Attention, self).__init__() n_state = nx # in Attention: n_state=768 (nx=n_embd) # [switch nx => n_state from Block to Attention to keep identical to TF implem] assert n_state % config.n_head == 0 self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx)) self.n_head = config.n_head self.split_size = n_state self.scale = scale self.output_attentions = output_attentions self.keep_multihead_output = keep_multihead_output self.multihead_output = None self.c_attn = Conv1D(n_state * 3, nx) self.c_proj = Conv1D(n_state, nx) self.attn_dropout = nn.Dropout(config.attn_pdrop) self.resid_dropout = nn.Dropout(config.resid_pdrop) def prune_heads(self, heads): if len(heads) == 0: return mask = torch.ones(self.n_head, self.split_size // self.n_head) for head in heads: mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() index_attn = torch.cat([index, index + self.split_size, index + (2*self.split_size)]) # Prune conv1d layers self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1) self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0) # Update hyper params self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads)) self.n_head = self.n_head - len(heads) def _attn(self, q, k, v, head_mask=None): w = torch.matmul(q, k) if self.scale: w = w / math.sqrt(v.size(-1)) nd, ns = w.size(-2), w.size(-1) b = self.bias[:, :, ns-nd:ns, :ns] w = w * b - 1e4 * (1 - b) w = nn.Softmax(dim=-1)(w) w = self.attn_dropout(w) # Mask heads if we want to if head_mask is not None: w = w * head_mask if self.output_attentions: return w, torch.matmul(w, v) return torch.matmul(w, v) def merge_heads(self, x): x = x.permute(0, 2, 1, 3).contiguous() new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),) return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states def split_heads(self, x, k=False): new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head) x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states if k: return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length) else: return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) def forward(self, x, layer_past=None, head_mask=None): x = self.c_attn(x) query, key, value = x.split(self.split_size, dim=2) query = self.split_heads(query) key = self.split_heads(key, k=True) value = self.split_heads(value) if layer_past is not None: past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below key = torch.cat((past_key, key), dim=-1) value = torch.cat((past_value, value), dim=-2) present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking a = self._attn(query, key, value, head_mask) if self.keep_multihead_output: self.multihead_output = a self.multihead_output.retain_grad() if self.output_attentions: attentions, a = a a = self.merge_heads(a) a = self.c_proj(a) a = self.resid_dropout(a) if self.output_attentions: return attentions, a, present return a, present class MLP(nn.Module): def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd) super(MLP, self).__init__() nx = config.n_embd self.c_fc = Conv1D(n_state, nx) self.c_proj = Conv1D(nx, n_state) self.act = gelu self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, x): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) return self.dropout(h2) class Block(nn.Module): def __init__(self, n_ctx, config, scale=False, output_attentions=False, keep_multihead_output=False): super(Block, self).__init__() nx = config.n_embd self.output_attentions = output_attentions self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon) self.attn = Attention(nx, n_ctx, config, scale, output_attentions, keep_multihead_output) self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon) self.mlp = MLP(4 * nx, config) def forward(self, x, layer_past=None, head_mask=None): output_attn = self.attn(self.ln_1(x), layer_past=layer_past, head_mask=head_mask) if self.output_attentions: attentions, a, present = output_attn else: a, present = output_attn x = x + a m = self.mlp(self.ln_2(x)) x = x + m if self.output_attentions: return attentions, x, present return x, present class GPT2LMHead(nn.Module): """ Language Model Head for the transformer """ def __init__(self, model_embeddings_weights, config): super(GPT2LMHead, self).__init__() self.n_embd = config.n_embd self.vocab_size = config.vocab_size self.predict_special_tokens = config.predict_special_tokens embed_shape = model_embeddings_weights.shape self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False) self.set_embeddings_weights(model_embeddings_weights) def set_embeddings_weights(self, model_embeddings_weights, predict_special_tokens=True): self.predict_special_tokens = predict_special_tokens self.decoder.weight = model_embeddings_weights # Tied weights def forward(self, hidden_state): lm_logits = self.decoder(hidden_state) if not self.predict_special_tokens: lm_logits = lm_logits[..., :self.vocab_size] return lm_logits class GPT2MultipleChoiceHead(nn.Module): """ Classifier Head for the transformer """ def __init__(self, config): super(GPT2MultipleChoiceHead, self).__init__() self.n_embd = config.n_embd self.dropout = nn.Dropout2d(config.resid_pdrop) # To reproduce the noise_shape parameter of TF implementation self.linear = nn.Linear(config.n_embd, 1) nn.init.normal_(self.linear.weight, std=0.02) nn.init.normal_(self.linear.bias, 0) def forward(self, hidden_states, mc_token_ids): # Classification logits # hidden_state (bsz, num_choices, seq_length, hidden_size) # mc_token_ids (bsz, num_choices) mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1)) # (bsz, num_choices, 1, hidden_size) multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2) # (bsz, num_choices, hidden_size) multiple_choice_h = self.dropout(multiple_choice_h.transpose(1, 2)).transpose(1, 2) multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1) # (bsz, num_choices) return multiple_choice_logits class GPT2PreTrainedModel(nn.Module): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ def __init__(self, config, *inputs, **kwargs): super(GPT2PreTrainedModel, self).__init__() if not isinstance(config, GPT2Config): raise ValueError( "Parameter config in `{}(config)` should be an instance of class `GPT2Config`. " "To create a model from a pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__ ) ) self.config = config def init_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs): """ Instantiate a GPT2PreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `gpt2` - a path or url to a pretrained model archive containing: . `gpt2_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a GPT2Model instance - a path or url to a pretrained model archive containing: . `gpt2_config.json` a configuration file for the model . a TensorFlow checkpoint with trained weights from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of pre-trained models *inputs, **kwargs: additional input for the specific GPT2 class """ state_dict = kwargs.get('state_dict', None) kwargs.pop('state_dict', None) cache_dir = kwargs.get('cache_dir', None) kwargs.pop('cache_dir', None) from_tf = kwargs.get('from_tf', False) kwargs.pop('from_tf', None) num_special_tokens = kwargs.get('num_special_tokens', None) kwargs.pop('num_special_tokens', None) if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path] else: archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME) # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) except EnvironmentError: if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: logger.error( "Couldn't reach server at '{}' to download pretrained weights.".format( archive_file)) else: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find file {} " "at this path or url.".format( pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path, archive_file ) ) return None try: resolved_config_file = cached_path(config_file, cache_dir=cache_dir) except EnvironmentError: if pretrained_model_name_or_path in PRETRAINED_CONFIG_ARCHIVE_MAP: logger.error( "Couldn't reach server at '{}' to download pretrained model configuration file.".format( config_file)) else: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find file {} " "at this path or url.".format( pretrained_model_name_or_path, ", ".join(PRETRAINED_CONFIG_ARCHIVE_MAP.keys()), pretrained_model_name_or_path, config_file ) ) return None if resolved_archive_file == archive_file and resolved_config_file == config_file: logger.info("loading weights file {}".format(archive_file)) logger.info("loading configuration file {}".format(config_file)) else: logger.info("loading weights file {} from cache at {}".format( archive_file, resolved_archive_file)) logger.info("loading configuration file {} from cache at {}".format( config_file, resolved_config_file)) # Load config config = GPT2Config.from_json_file(resolved_config_file) logger.info("Model config {}".format(config)) # Instantiate model. model = cls(config, *inputs, **kwargs) if state_dict is None and not from_tf: state_dict = torch.load(resolved_archive_file, map_location='cpu') if from_tf: # Directly load from a TensorFlow checkpoint (stored as NumPy array) return load_tf_weights_in_gpt2(model, resolved_archive_file) old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if key.endswith(".g"): new_key = key[:-2] + ".weight" elif key.endswith(".b"): new_key = key[:-2] + ".bias" elif key.endswith(".w"): new_key = key[:-2] + ".weight" if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=""): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs ) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + ".") start_model = model if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()): start_model = model.transformer load(start_model, prefix="") if len(missing_keys) > 0: logger.info( "Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys) ) if len(unexpected_keys) > 0: logger.info( "Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys) ) if len(error_msgs) > 0: raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs)) ) # Add additional embeddings for special tokens if needed # This step also make sure we are still sharing the output and input embeddings after loading weights model.set_num_special_tokens(num_special_tokens if num_special_tokens is not None else config.n_special) return model class GPT2Model(GPT2PreTrainedModel): """OpenAI GPT-2 model ("Language Models are Unsupervised Multitask Learners"). GPT-2 use a single embedding matrix to store the word and special embeddings. Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]... Special tokens need to be trained during the fine-tuning if you use them. The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function. The embeddings are ordered as follow in the token embeddings matrice: [0, ---------------------- ... -> word embeddings config.vocab_size - 1, ______________________ config.vocab_size, ... -> special embeddings config.vocab_size + config.n_special - 1] ______________________ where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is: total_tokens_embeddings = config.vocab_size + config.n_special You should use the associate indices to index the embeddings. Params: `config`: a GPT2Config class instance with the configuration to build a new model `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. This can be used to compute head importance metrics. Default: False Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length] were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, config.vocab_size[ `position_ids`: an optional torch.LongTensor with the same shape as input_ids with the position indices (selected in the range [0, config.n_positions - 1[. `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids You can use it to add a third type of embedding to each input token in the sequence (the previous two being the word and position embeddings). The input, position and token_type embeddings are summed inside the Transformer before the first self-attention block. `past`: an optional list of torch.LongTensor that contains pre-computed hidden-states (key and values in the attention blocks) to speed up sequential decoding (this is the presents output of the model, cf. below). `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1. It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked. Outputs a tuple consisting of: `hidden_states`: a list of all the encoded-hidden-states in the model (length of the list: number of layers + 1 for the output of the embeddings) as torch.FloatTensor of size [batch_size, sequence_length, hidden_size] (or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids) `presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as torch.FloatTensors. They can be reused to speed up sequential decoding. Example usage: ```python # Already been converted into BPE token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) config = modeling_gpt2.GPT2Config() model = modeling_gpt2.GPT2Model(config) hidden_states, presents = model(input_ids) ``` """ def __init__(self, config, output_attentions=False, keep_multihead_output=False): super(GPT2Model, self).__init__(config) self.output_attentions = output_attentions self.wte = nn.Embedding(config.total_tokens_embeddings, config.n_embd) self.wpe = nn.Embedding(config.n_positions, config.n_embd) self.drop = nn.Dropout(config.embd_pdrop) block = Block(config.n_ctx, config, scale=True, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output) self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)]) self.ln_f = LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) self.apply(self.init_weights) def set_num_special_tokens(self, num_special_tokens): " Update input embeddings with new embedding matrice if needed " if self.config.n_special == num_special_tokens: return # Update config self.config.n_special = num_special_tokens # Build new embeddings and initialize all new embeddings (in particular the special tokens) old_embed = self.wte self.wte = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd) self.wte.to(old_embed.weight.device) self.init_weights(self.wte) # Copy word embeddings from the previous weights self.wte.weight.data[:self.config.vocab_size, :] = old_embed.weight.data[:self.config.vocab_size, :] def prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ for layer, heads in heads_to_prune.items(): self.h[layer].attn.prune_heads(heads) def get_multihead_outputs(self): """ Gather all multi-head outputs. Return: list (layers) of multihead module outputs with gradients """ return [h.attn.multihead_output for h in self.h] def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None, head_mask=None): if past is None: past_length = 0 past = [None] * len(self.h) else: past_length = past[0][0].size(-2) if position_ids is None: position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # head_mask has shape n_layer x batch x n_heads x N x N if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand_as(self.config.n_layer, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility else: head_mask = [None] * self.config.n_layer input_shape = input_ids.size() input_ids = input_ids.view(-1, input_ids.size(-1)) position_ids = position_ids.view(-1, position_ids.size(-1)) inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) token_type_embeds = self.wte(token_type_ids) else: token_type_embeds = 0 hidden_states = inputs_embeds + position_embeds + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = input_shape + (hidden_states.size(-1),) presents = [] all_attentions = [] all_hidden_states = [] for i, (block, layer_past) in enumerate(zip(self.h, past)): all_hidden_states.append(hidden_states.view(*output_shape)) outputs = block(hidden_states, layer_past, head_mask[i]) if self.output_attentions: attentions, hidden_states, present = outputs all_attentions.append(attentions) else: hidden_states, present = outputs presents.append(present) hidden_states = self.ln_f(hidden_states) all_hidden_states.append(hidden_states.view(*output_shape)) if self.output_attentions: return all_attentions, all_hidden_states, presents return all_hidden_states, presents class GPT2LMHeadModel(GPT2PreTrainedModel): """OpenAI GPT-2 model with a Language Modeling head ("Language Models are Unsupervised Multitask Learners"). Params: `config`: a GPT2Config class instance with the configuration to build a new model `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. This can be used to compute head importance metrics. Default: False Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length] were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, config.vocab_size[ `position_ids`: an optional torch.LongTensor with the same shape as input_ids with the position indices (selected in the range [0, config.n_positions - 1[. `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids You can use it to add a third type of embedding to each input token in the sequence (the previous two being the word and position embeddings). The input, position and token_type embeddings are summed inside the Transformer before the first self-attention block. `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., vocab_size] `past`: an optional list of torch.LongTensor that contains pre-computed hidden-states (key and values in the attention blocks) to speed up sequential decoding (this is the presents output of the model, cf. below). `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1. It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked. Outputs: if `lm_labels` is not `None`: Outputs the language modeling loss. else a tuple: `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, config.vocab_size] (or more generally [d_1, ..., d_n, config.vocab_size] were d_1 ... d_n are the dimension of input_ids) `presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as torch.FloatTensors. They can be reused to speed up sequential decoding. Example usage: ```python # Already been converted into BPE token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) config = modeling_gpt2.GPT2Config() model = modeling_gpt2.GPT2LMHeadModel(config) lm_logits, presents = model(input_ids) ``` """ def __init__(self, config, output_attentions=False, keep_multihead_output=False): super(GPT2LMHeadModel, self).__init__(config) self.transformer = GPT2Model(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output) self.lm_head = GPT2LMHead(self.transformer.wte.weight, config) self.apply(self.init_weights) def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True): """ Update input and output embeddings with new embedding matrice Make sure we are sharing the embeddings """ self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens self.transformer.set_num_special_tokens(num_special_tokens) self.lm_head.set_embeddings_weights(self.transformer.wte.weight, predict_special_tokens=predict_special_tokens) def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None, past=None, head_mask=None): transformer_output = self.transformer(input_ids, position_ids, token_type_ids, past, head_mask) if self.transformer.output_attentions: all_attentions, hidden_states, presents = transformer_output else: hidden_states, presents = transformer_output hidden_states = hidden_states[-1] lm_logits = self.lm_head(hidden_states) if lm_labels is not None: # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = lm_labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss(ignore_index=-1) loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) return loss if self.transformer.output_attentions: return all_attentions, lm_logits, presents return lm_logits, presents class GPT2DoubleHeadsModel(GPT2PreTrainedModel): """OpenAI GPT-2 model with a Language Modeling and a Multiple Choice head ("Language Models are Unsupervised Multitask Learners"). Params: `config`: a GPT2Config class instance with the configuration to build a new model `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. This can be used to compute head importance metrics. Default: False Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token indices selected in the range [0, config.vocab_size[ `mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence) `position_ids`: an optional torch.LongTensor with the same shape as input_ids with the position indices (selected in the range [0, config.n_positions - 1[. `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids You can use it to add a third type of embedding to each input token in the sequence (the previous two being the word and position embeddings). The input, position and token_type embeddings are summed inside the Transformer before the first self-attention block. `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices selected in [-1, 0, ..., config.vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., config.vocab_size] `multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size] with indices selected in [0, ..., num_choices]. `past`: an optional list of torch.LongTensor that contains pre-computed hidden-states (key and values in the attention blocks) to speed up sequential decoding (this is the presents output of the model, cf. below). `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1. It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked. Outputs: if `lm_labels` and `multiple_choice_labels` are not `None`: Outputs a tuple of losses with the language modeling loss and the multiple choice loss. else: a tuple with `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, config.vocab_size] `multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices] `presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as torch.FloatTensors. They can be reused to speed up sequential decoding. Example usage: ```python # Already been converted into BPE token ids input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]]) # (bsz, number of choice, seq length) mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice) config = modeling_gpt2.GPT2Config() model = modeling_gpt2.GPT2DoubleHeadsModel(config) lm_logits, multiple_choice_logits, presents = model(input_ids, mc_token_ids) ``` """ def __init__(self, config, output_attentions=False, keep_multihead_output=False): super(GPT2DoubleHeadsModel, self).__init__(config) self.transformer = GPT2Model(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output) self.lm_head = GPT2LMHead(self.transformer.wte.weight, config) self.multiple_choice_head = GPT2MultipleChoiceHead(config) self.apply(self.init_weights) def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True): """ Update input and output embeddings with new embedding matrice Make sure we are sharing the embeddings """ self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens self.transformer.set_num_special_tokens(num_special_tokens) self.lm_head.set_embeddings_weights(self.transformer.wte.weight, predict_special_tokens=predict_special_tokens) def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None, token_type_ids=None, position_ids=None, past=None, head_mask=None): transformer_output = self.transformer(input_ids, position_ids, token_type_ids, past, head_mask) if self.transformer.output_attentions: all_attentions, hidden_states, presents = transformer_output else: hidden_states, presents = transformer_output hidden_states = hidden_states[-1] lm_logits = self.lm_head(hidden_states) mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids) losses = [] if lm_labels is not None: shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = lm_labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss(ignore_index=-1) losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))) if mc_labels is not None: loss_fct = CrossEntropyLoss() losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))) if losses: return losses if self.transformer.output_attentions: return all_attentions, lm_logits, mc_logits, presents return lm_logits, mc_logits, presents ================================================ FILE: pytorch_pretrained_bert/modeling_openai.py ================================================ # coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch OpenAI GPT model.""" from __future__ import absolute_import, division, print_function, unicode_literals import collections import copy import json import logging import math import os import sys from io import open import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from torch.nn.parameter import Parameter from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME from .modeling import BertLayerNorm as LayerNorm from .modeling_gpt2 import prune_conv1d_layer logger = logging.getLogger(__name__) PRETRAINED_MODEL_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-pytorch_model.bin"} PRETRAINED_CONFIG_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-config.json"} def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path): """ Load tf pre-trained weights in a pytorch model (from NumPy arrays here) """ import re import numpy as np print("Loading weights...") names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8')) shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8')) offsets = np.cumsum([np.prod(shape) for shape in shapes]) init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)] init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1] init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)] # This was used when we had a single embedding matrix for positions and tokens # init_params[0] = np.concatenate([init_params[1], init_params[0]], 0) # del init_params[1] init_params = [arr.squeeze() for arr in init_params] try: assert model.tokens_embed.weight.shape == init_params[1].shape assert model.positions_embed.weight.shape == init_params[0].shape except AssertionError as e: e.args += (model.tokens_embed.weight.shape, init_params[1].shape) e.args += (model.positions_embed.weight.shape, init_params[0].shape) raise model.tokens_embed.weight.data = torch.from_numpy(init_params[1]) model.positions_embed.weight.data = torch.from_numpy(init_params[0]) names.pop(0) # Pop position and token embedding arrays init_params.pop(0) init_params.pop(0) for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]): name = name[6:] # skip "model/" assert name[-2:] == ":0" name = name[:-2] name = name.split('/') pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+\d+', m_name): l = re.split(r'(\d+)', m_name) else: l = [m_name] if l[0] == 'g': pointer = getattr(pointer, 'weight') elif l[0] == 'b': pointer = getattr(pointer, 'bias') elif l[0] == 'w': pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, l[0]) if len(l) >= 2: num = int(l[1]) pointer = pointer[num] try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) def swish(x): return x * torch.sigmoid(x) ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu} class OpenAIGPTConfig(object): """Configuration class to store the configuration of a `OpenAIGPTModel`. """ def __init__( self, vocab_size_or_config_json_file=40478, n_special=0, n_positions=512, n_ctx=512, n_embd=768, n_layer=12, n_head=12, afn="gelu", resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.02, predict_special_tokens=True ): """Constructs OpenAIGPTConfig. Args: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `OpenAIGPTModel` or a configuration json file. n_special: The number of special tokens to learn during fine-tuning ('[SEP]', '[CLF]', ...) n_positions: Number of positional embeddings. n_ctx: Size of the causal mask (usually same as n_positions). n_embd: Dimensionality of the embeddings and hidden states. n_layer: Number of hidden layers in the Transformer encoder. n_head: Number of attention heads for each attention layer in the Transformer encoder. afn: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported. resid_pdrop: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attn_pdrop: The dropout ratio for the attention probabilities. embd_pdrop: The dropout ratio for the embeddings. layer_norm_epsilon: epsilon to use in the layer norm layers initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. predict_special_tokens: should we predict special tokens (when the model has a LM head) """ if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2 and isinstance(vocab_size_or_config_json_file, unicode)): with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif isinstance(vocab_size_or_config_json_file, int): self.vocab_size = vocab_size_or_config_json_file self.n_special = n_special self.n_ctx = n_ctx self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.afn = afn self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.predict_special_tokens = predict_special_tokens else: raise ValueError( "First argument must be either a vocabulary size (int)" "or the path to a pretrained model config file (str)" ) @property def total_tokens_embeddings(self): return self.vocab_size + self.n_special @classmethod def from_dict(cls, json_object): """Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters.""" config = OpenAIGPTConfig(vocab_size_or_config_json_file=-1) for key, value in json_object.items(): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `OpenAIGPTConfig` from a json file of parameters.""" with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() return cls.from_dict(json.loads(text)) def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path): """ Save this instance to a json file.""" with open(json_file_path, "w", encoding='utf-8') as writer: writer.write(self.to_json_string()) class Conv1D(nn.Module): def __init__(self, nf, rf, nx): super(Conv1D, self).__init__() self.rf = rf self.nf = nf if rf == 1: # faster 1x1 conv w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = Parameter(w) self.bias = Parameter(torch.zeros(nf)) else: # was used to train LM raise NotImplementedError def forward(self, x): if self.rf == 1: size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(*size_out) else: raise NotImplementedError return x class Attention(nn.Module): def __init__(self, nx, n_ctx, config, scale=False, output_attentions=False, keep_multihead_output=False): super(Attention, self).__init__() n_state = nx # in Attention: n_state=768 (nx=n_embd) # [switch nx => n_state from Block to Attention to keep identical to TF implem] assert n_state % config.n_head == 0 self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx)) self.n_head = config.n_head self.split_size = n_state self.scale = scale self.output_attentions = output_attentions self.keep_multihead_output = keep_multihead_output self.multihead_output = None self.c_attn = Conv1D(n_state * 3, 1, nx) self.c_proj = Conv1D(n_state, 1, nx) self.attn_dropout = nn.Dropout(config.attn_pdrop) self.resid_dropout = nn.Dropout(config.resid_pdrop) def prune_heads(self, heads): if len(heads) == 0: return mask = torch.ones(self.n_head, self.split_size // self.n_head) for head in heads: mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() index_attn = torch.cat([index, index + self.split_size, index + (2*self.split_size)]) # Prune conv1d layers self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1) self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0) # Update hyper params self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads)) self.n_head = self.n_head - len(heads) def _attn(self, q, k, v, head_mask=None): w = torch.matmul(q, k) if self.scale: w = w / math.sqrt(v.size(-1)) # w = w * self.bias + -1e9 * (1 - self.bias) # TF implem method: mask_attn_weights # XD: self.b may be larger than w, so we need to crop it b = self.bias[:, :, : w.size(-2), : w.size(-1)] w = w * b + -1e9 * (1 - b) w = nn.Softmax(dim=-1)(w) w = self.attn_dropout(w) # Mask heads if we want to if head_mask is not None: w = w * head_mask if self.output_attentions: return w, torch.matmul(w, v) return torch.matmul(w, v) def merge_heads(self, x): x = x.permute(0, 2, 1, 3).contiguous() new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),) return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states def split_heads(self, x, k=False): new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head) x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states if k: return x.permute(0, 2, 3, 1) else: return x.permute(0, 2, 1, 3) def forward(self, x, head_mask=None): x = self.c_attn(x) query, key, value = x.split(self.split_size, dim=2) query = self.split_heads(query) key = self.split_heads(key, k=True) value = self.split_heads(value) a = self._attn(query, key, value, head_mask) if self.keep_multihead_output: self.multihead_output = a self.multihead_output.retain_grad() if self.output_attentions: attentions, a = a a = self.merge_heads(a) a = self.c_proj(a) a = self.resid_dropout(a) if self.output_attentions: return attentions, a return a class MLP(nn.Module): def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd) super(MLP, self).__init__() nx = config.n_embd self.c_fc = Conv1D(n_state, 1, nx) self.c_proj = Conv1D(nx, 1, n_state) self.act = ACT_FNS[config.afn] self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, x): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) return self.dropout(h2) class Block(nn.Module): def __init__(self, n_ctx, config, scale=False, output_attentions=False, keep_multihead_output=False): super(Block, self).__init__() nx = config.n_embd self.output_attentions = output_attentions self.attn = Attention(nx, n_ctx, config, scale, output_attentions, keep_multihead_output) self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon) self.mlp = MLP(4 * nx, config) self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon) def forward(self, x, head_mask=None): a = self.attn(x, head_mask=head_mask) if self.output_attentions: attentions, a = a n = self.ln_1(x + a) m = self.mlp(n) h = self.ln_2(n + m) if self.output_attentions: return attentions, h return h class OpenAIGPTLMHead(nn.Module): """ Language Model Head for the transformer """ def __init__(self, model_embeddings_weights, config): super(OpenAIGPTLMHead, self).__init__() self.n_embd = config.n_embd self.vocab_size = config.vocab_size self.predict_special_tokens = config.predict_special_tokens embed_shape = model_embeddings_weights.shape self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False) self.set_embeddings_weights(model_embeddings_weights) def set_embeddings_weights(self, model_embeddings_weights, predict_special_tokens=True): self.predict_special_tokens = predict_special_tokens embed_shape = model_embeddings_weights.shape self.decoder.weight = model_embeddings_weights # Tied weights def forward(self, hidden_state): lm_logits = self.decoder(hidden_state) if not self.predict_special_tokens: lm_logits = lm_logits[..., :self.vocab_size] return lm_logits class OpenAIGPTMultipleChoiceHead(nn.Module): """ Classifier Head for the transformer """ def __init__(self, config): super(OpenAIGPTMultipleChoiceHead, self).__init__() self.n_embd = config.n_embd self.dropout = nn.Dropout2d(config.resid_pdrop) # To reproduce the noise_shape parameter of TF implementation self.linear = nn.Linear(config.n_embd, 1) nn.init.normal_(self.linear.weight, std=0.02) nn.init.normal_(self.linear.bias, 0) def forward(self, hidden_states, mc_token_ids): # Classification logits # hidden_state (bsz, num_choices, seq_length, hidden_size) # mc_token_ids (bsz, num_choices) mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1)) # (bsz, num_choices, 1, hidden_size) multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2) # (bsz, num_choices, hidden_size) multiple_choice_h = self.dropout(multiple_choice_h.transpose(1, 2)).transpose(1, 2) multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1) # (bsz, num_choices) return multiple_choice_logits class OpenAIGPTPreTrainedModel(nn.Module): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ def __init__(self, config, *inputs, **kwargs): super(OpenAIGPTPreTrainedModel, self).__init__() if not isinstance(config, OpenAIGPTConfig): raise ValueError( "Parameter config in `{}(config)` should be an instance of class `OpenAIGPTConfig`. " "To create a model from a pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__ ) ) self.config = config def init_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() @classmethod def from_pretrained(cls, pretrained_model_name_or_path, num_special_tokens=None, *inputs, **kwargs): """ Instantiate a OpenAIGPTPreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `openai-gpt` - a path or url to a pretrained model archive containing: . `openai_gpt_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a OpenAIGPTModel instance - a path or url to a pretrained model archive containing: . `openai-gpt-config.json` a configuration file for the model . a series of NumPy files containing OpenAI TensorFlow trained weights from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models *inputs, **kwargs: additional input for the specific OpenAI-GPT class """ state_dict = kwargs.get('state_dict', None) kwargs.pop('state_dict', None) cache_dir = kwargs.get('cache_dir', None) kwargs.pop('cache_dir', None) from_tf = kwargs.get('from_tf', False) kwargs.pop('from_tf', None) if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path] else: archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME) # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) except EnvironmentError: if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: logger.error( "Couldn't reach server at '{}' to download pretrained weights.".format( archive_file)) else: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find file {} " "at this path or url.".format( pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path, archive_file ) ) return None try: resolved_config_file = cached_path(config_file, cache_dir=cache_dir) except EnvironmentError: if pretrained_model_name_or_path in PRETRAINED_CONFIG_ARCHIVE_MAP: logger.error( "Couldn't reach server at '{}' to download pretrained model configuration file.".format( config_file)) else: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find file {} " "at this path or url.".format( pretrained_model_name_or_path, ", ".join(PRETRAINED_CONFIG_ARCHIVE_MAP.keys()), pretrained_model_name_or_path, config_file ) ) return None if resolved_archive_file == archive_file and resolved_config_file == config_file: logger.info("loading weights file {}".format(archive_file)) logger.info("loading configuration file {}".format(config_file)) else: logger.info("loading weights file {} from cache at {}".format( archive_file, resolved_archive_file)) logger.info("loading configuration file {} from cache at {}".format( config_file, resolved_config_file)) # Load config config = OpenAIGPTConfig.from_json_file(resolved_config_file) logger.info("Model config {}".format(config)) # Instantiate model. model = cls(config, *inputs, **kwargs) if state_dict is None and not from_tf: state_dict = torch.load(resolved_archive_file, map_location='cpu') if from_tf: # Directly load from a TensorFlow checkpoint (stored as NumPy array) return load_tf_weights_in_openai_gpt(model, resolved_archive_file) old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if key.endswith(".g"): new_key = key[:-2] + ".weight" elif key.endswith(".b"): new_key = key[:-2] + ".bias" elif key.endswith(".w"): new_key = key[:-2] + ".weight" if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=""): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs ) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + ".") start_model = model if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()): start_model = model.transformer load(start_model, prefix="") if len(missing_keys) > 0: logger.info( "Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys) ) if len(unexpected_keys) > 0: logger.info( "Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys) ) if len(error_msgs) > 0: raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs)) ) # Add additional embeddings for special tokens if needed # This step also make sure we are still sharing the output and input embeddings after loading weights model.set_num_special_tokens(num_special_tokens if num_special_tokens is not None else config.n_special) return model class OpenAIGPTModel(OpenAIGPTPreTrainedModel): """OpenAI GPT model ("Improving Language Understanding by Generative Pre-Training"). OpenAI GPT use a single embedding matrix to store the word and special embeddings. Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]... Special tokens need to be trained during the fine-tuning if you use them. The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function. The embeddings are ordered as follow in the token embeddings matrice: [0, ---------------------- ... -> word embeddings config.vocab_size - 1, ______________________ config.vocab_size, ... -> special embeddings config.vocab_size + config.n_special - 1] ______________________ where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is: total_tokens_embeddings = config.vocab_size + config.n_special You should use the associate indices to index the embeddings. Params: `config`: a OpenAIGPTConfig class instance with the configuration to build a new model `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. This can be used to compute head importance metrics. Default: False Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length] were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[ `position_ids`: an optional torch.LongTensor with the same shape as input_ids with the position indices (selected in the range [0, config.n_positions - 1[. `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids You can use it to add a third type of embedding to each input token in the sequence (the previous two being the word and position embeddings). The input, position and token_type embeddings are summed inside the Transformer before the first self-attention block. `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1. It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked. Outputs: `hidden_states`: a list of all the encoded-hidden-states in the model (length of the list: number of layers + 1 for the output of the embeddings) as torch.FloatTensor of size [batch_size, sequence_length, hidden_size] (or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids) Example usage: ```python # Already been converted into BPE token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) config = modeling_openai.OpenAIGPTConfig() model = modeling_openai.OpenAIGPTModel(config) hidden_states = model(input_ids) ``` """ def __init__(self, config, output_attentions=False, keep_multihead_output=False): super(OpenAIGPTModel, self).__init__(config) self.output_attentions = output_attentions self.tokens_embed = nn.Embedding(config.total_tokens_embeddings, config.n_embd) self.positions_embed = nn.Embedding(config.n_positions, config.n_embd) self.drop = nn.Dropout(config.embd_pdrop) block = Block(config.n_ctx, config, scale=True, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output) self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)]) self.apply(self.init_weights) def set_num_special_tokens(self, num_special_tokens): " Update input embeddings with new embedding matrice if needed " if self.config.n_special == num_special_tokens: return # Update config self.config.n_special = num_special_tokens # Build new embeddings and initialize all new embeddings (in particular the special tokens) old_embed = self.tokens_embed self.tokens_embed = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd) self.tokens_embed.to(old_embed.weight.device) self.init_weights(self.tokens_embed) # Copy word embeddings from the previous weights self.tokens_embed.weight.data[:self.config.vocab_size, :] = old_embed.weight.data[:self.config.vocab_size, :] def prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ for layer, heads in heads_to_prune.items(): self.h[layer].attn.prune_heads(heads) def get_multihead_outputs(self): """ Gather all multi-head outputs. Return: list (layers) of multihead module outputs with gradients """ return [h.attn.multihead_output for h in self.h] def forward(self, input_ids, position_ids=None, token_type_ids=None, head_mask=None): if position_ids is None: # This was used when we had a single embedding matrice from position and token embeddings # start = self.config.vocab_size + self.config.n_special # end = start + input_ids.size(-1) # position_ids = torch.arange(start, end, dtype=torch.long, device=input_ids.device) position_ids = torch.arange(input_ids.size(-1), dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # head_mask has shape n_layer x batch x n_heads x N x N if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand_as(self.config.n_layer, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility else: head_mask = [None] * self.config.n_layer input_shape = input_ids.size() input_ids = input_ids.view(-1, input_ids.size(-1)) position_ids = position_ids.view(-1, position_ids.size(-1)) inputs_embeds = self.tokens_embed(input_ids) position_embeds = self.positions_embed(position_ids) if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) token_type_embeds = self.tokens_embed(token_type_ids) else: token_type_embeds = 0 hidden_states = inputs_embeds + position_embeds + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = input_shape + (hidden_states.size(-1),) all_attentions = [] all_hidden_states = [hidden_states.view(*output_shape)] for i, block in enumerate(self.h): outputs = block(hidden_states, head_mask[i]) if self.output_attentions: attentions, hidden_states = outputs all_attentions.append(attentions) else: hidden_states = outputs all_hidden_states.append(hidden_states.view(*output_shape)) if self.output_attentions: return all_attentions, all_hidden_states return all_hidden_states class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel): """OpenAI GPT model with a Language Modeling head ("Improving Language Understanding by Generative Pre-Training"). OpenAI GPT use a single embedding matrix to store the word and special embeddings. Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]... Special tokens need to be trained during the fine-tuning if you use them. The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function. The embeddings are ordered as follow in the token embeddings matrice: [0, ---------------------- ... -> word embeddings config.vocab_size - 1, ______________________ config.vocab_size, ... -> special embeddings config.vocab_size + config.n_special - 1] ______________________ where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is: total_tokens_embeddings = config.vocab_size + config.n_special You should use the associate indices to index the embeddings. Params: `config`: a OpenAIGPTConfig class instance with the configuration to build a new model `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. This can be used to compute head importance metrics. Default: False Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length] were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[ `position_ids`: an optional torch.LongTensor with the same shape as input_ids with the position indices (selected in the range [0, config.n_positions - 1[. `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids You can use it to add a third type of embedding to each input token in the sequence (the previous two being the word and position embeddings). The input, position and token_type embeddings are summed inside the Transformer before the first self-attention block. `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., vocab_size] `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1. It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked. Outputs: if `lm_labels` is not `None`: Outputs the language modeling loss. else: `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, total_tokens_embeddings] (or more generally [d_1, ..., d_n, total_tokens_embeddings] were d_1 ... d_n are the dimension of input_ids) Example usage: ```python # Already been converted into BPE token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) config = modeling_openai.OpenAIGPTConfig() model = modeling_openai.OpenAIGPTLMHeadModel(config) lm_logits = model(input_ids) ``` """ def __init__(self, config, output_attentions=False, keep_multihead_output=False): super(OpenAIGPTLMHeadModel, self).__init__(config) self.transformer = OpenAIGPTModel(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output) self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config) self.apply(self.init_weights) def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True): """ Update input and output embeddings with new embedding matrice Make sure we are sharing the embeddings """ self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens self.transformer.set_num_special_tokens(num_special_tokens) self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight, predict_special_tokens=predict_special_tokens) def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None, head_mask=None): hidden_states = self.transformer(input_ids, position_ids, token_type_ids, head_mask) if self.transformer.output_attentions: all_attentions, hidden_states = hidden_states hidden_states = hidden_states[-1] lm_logits = self.lm_head(hidden_states) if lm_labels is not None: # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = lm_labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss(ignore_index=-1) loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) return loss if self.transformer.output_attentions: return all_attentions, lm_logits return lm_logits class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel): """OpenAI GPT model with a Language Modeling and a Multiple Choice head ("Improving Language Understanding by Generative Pre-Training"). OpenAI GPT use a single embedding matrix to store the word and special embeddings. Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]... Special tokens need to be trained during the fine-tuning if you use them. The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function. The embeddings are ordered as follow in the token embeddings matrice: [0, ---------------------- ... -> word embeddings config.vocab_size - 1, ______________________ config.vocab_size, ... -> special embeddings config.vocab_size + config.n_special - 1] ______________________ where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is: total_tokens_embeddings = config.vocab_size + config.n_special You should use the associate indices to index the embeddings. Params: `config`: a OpenAIGPTConfig class instance with the configuration to build a new model `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. This can be used to compute head importance metrics. Default: False Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token indices selected in the range [0, total_tokens_embeddings[ `mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence) `position_ids`: an optional torch.LongTensor with the same shape as input_ids with the position indices (selected in the range [0, config.n_positions - 1[. `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids You can use it to add a third type of embedding to each input token in the sequence (the previous two being the word and position embeddings). The input, position and token_type embeddings are summed inside the Transformer before the first self-attention block. `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices selected in [-1, 0, ..., total_tokens_embeddings]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., total_tokens_embeddings] `multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size] with indices selected in [0, ..., num_choices]. `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1. It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked. Outputs: if `lm_labels` and `multiple_choice_labels` are not `None`: Outputs a tuple of losses with the language modeling loss and the multiple choice loss. else: a tuple with `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, total_tokens_embeddings] `multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices] Example usage: ```python # Already been converted into BPE token ids input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]]) # (bsz, number of choice, seq length) mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice) config = modeling_openai.OpenAIGPTConfig() model = modeling_openai.OpenAIGPTDoubleHeadsModel(config) lm_logits, multiple_choice_logits = model(input_ids, mc_token_ids) ``` """ def __init__(self, config, output_attentions=False, keep_multihead_output=False): super(OpenAIGPTDoubleHeadsModel, self).__init__(config) self.transformer = OpenAIGPTModel(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output) self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config) self.multiple_choice_head = OpenAIGPTMultipleChoiceHead(config) self.apply(self.init_weights) def set_num_special_tokens(self, num_special_tokens, predict_special_tokens=True): """ Update input and output embeddings with new embedding matrice Make sure we are sharing the embeddings """ self.config.predict_special_tokens = self.transformer.config.predict_special_tokens = predict_special_tokens self.transformer.set_num_special_tokens(num_special_tokens) self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight, predict_special_tokens=predict_special_tokens) def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None, token_type_ids=None, position_ids=None, head_mask=None): hidden_states = self.transformer(input_ids, position_ids, token_type_ids, head_mask) if self.transformer.output_attentions: all_attentions, hidden_states = hidden_states hidden_states = hidden_states[-1] lm_logits = self.lm_head(hidden_states) mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids) losses = [] if lm_labels is not None: shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = lm_labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss(ignore_index=-1) losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))) if mc_labels is not None: loss_fct = CrossEntropyLoss() losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))) if losses: return losses if self.transformer.output_attentions: return all_attentions, lm_logits, mc_logits return lm_logits, mc_logits ================================================ FILE: pytorch_pretrained_bert/modeling_transfo_xl.py ================================================ # coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Transformer XL model. Adapted from https://github.com/kimiyoung/transformer-xl. In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py """ from __future__ import absolute_import, division, print_function, unicode_literals import os import copy import json import math import logging import collections import sys from io import open import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import CrossEntropyLoss from torch.nn.parameter import Parameter from .modeling import BertLayerNorm as LayerNorm from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax, sample_logits from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME logger = logging.getLogger(__name__) PRETRAINED_MODEL_ARCHIVE_MAP = { 'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-pytorch_model.bin", } PRETRAINED_CONFIG_ARCHIVE_MAP = { 'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-config.json", } TF_WEIGHTS_NAME = 'model.ckpt' def build_tf_to_pytorch_map(model, config): """ A map of modules from TF to PyTorch. This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible. """ tf_to_pt_map = {} if hasattr(model, 'transformer'): # We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax tf_to_pt_map.update({ "transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight, "transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias}) for i, (out_l, proj_l, tie_proj) in enumerate(zip( model.crit.out_layers, model.crit.out_projs, config.tie_projs)): layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i if config.tie_weight: tf_to_pt_map.update({ layer_str + 'b': out_l.bias}) else: raise NotImplementedError # I don't think this is implemented in the TF code tf_to_pt_map.update({ layer_str + 'lookup_table': out_l.weight, layer_str + 'b': out_l.bias}) if not tie_proj: tf_to_pt_map.update({ layer_str + 'proj': proj_l }) # Now load the rest of the transformer model = model.transformer # Embeddings for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)): layer_str = "transformer/adaptive_embed/cutoff_%d/" % i tf_to_pt_map.update({ layer_str + 'lookup_table': embed_l.weight, layer_str + 'proj_W': proj_l }) # Transformer blocks for i, b in enumerate(model.layers): layer_str = "transformer/layer_%d/" % i tf_to_pt_map.update({ layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight, layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias, layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight, layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight, layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight, layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight, layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias, layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight, layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias, layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight, layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias, }) # Relative positioning biases if config.untie_r: r_r_list = [] r_w_list = [] for b in model.layers: r_r_list.append(b.dec_attn.r_r_bias) r_w_list.append(b.dec_attn.r_w_bias) else: r_r_list = [model.r_r_bias] r_w_list = [model.r_w_bias] tf_to_pt_map.update({ 'transformer/r_r_bias': r_r_list, 'transformer/r_w_bias': r_w_list}) return tf_to_pt_map def load_tf_weights_in_transfo_xl(model, config, tf_path): """ Load tf checkpoints in a pytorch model """ try: import numpy as np import tensorflow as tf except ImportError: print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise # Build TF to PyTorch weights loading map tf_to_pt_map = build_tf_to_pytorch_map(model, config) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) tf_weights = {} for name, shape in init_vars: print("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) tf_weights[name] = array for name, pointer in tf_to_pt_map.items(): assert name in tf_weights array = tf_weights[name] # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if 'kernel' in name or 'proj' in name: array = np.transpose(array) if ('r_r_bias' in name or 'r_w_bias' in name) and len(pointer) > 1: # Here we will split the TF weigths assert len(pointer) == array.shape[0] for i, p_i in enumerate(pointer): arr_i = array[i, ...] try: assert p_i.shape == arr_i.shape except AssertionError as e: e.args += (p_i.shape, arr_i.shape) raise print("Initialize PyTorch weight {} for layer {}".format(name, i)) p_i.data = torch.from_numpy(arr_i) else: try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) tf_weights.pop(name, None) tf_weights.pop(name + '/Adam', None) tf_weights.pop(name + '/Adam_1', None) print("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys()))) return model class TransfoXLConfig(object): """Configuration class to store the configuration of a `TransfoXLModel`. """ def __init__(self, vocab_size_or_config_json_file=267735, cutoffs=[20000, 40000, 200000], d_model=1024, d_embed=1024, n_head=16, d_head=64, d_inner=4096, div_val=4, pre_lnorm=False, n_layer=18, tgt_len=128, ext_len=0, mem_len=1600, clamp_len=1000, same_length=True, proj_share_all_but_first=True, attn_type=0, sample_softmax=-1, adaptive=True, tie_weight=True, dropout=0.1, dropatt=0.0, untie_r=True, init="normal", init_range=0.01, proj_init_std=0.01, init_std=0.02): """Constructs TransfoXLConfig. Args: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `TransfoXLModel` or a configuration json file. cutoffs: cutoffs for the adaptive softmax d_model: Dimensionality of the model's hidden states. d_embed: Dimensionality of the embeddings d_head: Dimensionality of the model's heads. div_val: divident value for adapative input and softmax pre_lnorm: apply LayerNorm to the input instead of the output d_inner: Inner dimension in FF n_layer: Number of hidden layers in the Transformer encoder. n_head: Number of attention heads for each attention layer in the Transformer encoder. tgt_len: number of tokens to predict ext_len: length of the extended context mem_len: length of the retained previous heads same_length: use the same attn length for all tokens proj_share_all_but_first: True to share all but first projs, False not to share. attn_type: attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al. clamp_len: use the same pos embeddings after clamp_len sample_softmax: number of samples in sampled softmax adaptive: use adaptive softmax tie_weight: tie the word embedding and softmax weights dropout: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. dropatt: The dropout ratio for the attention probabilities. untie_r: untie relative position biases embd_pdrop: The dropout ratio for the embeddings. init: parameter initializer to use init_range: parameters initialized by U(-init_range, init_range). proj_init_std: parameters initialized by N(0, init_std) init_std: parameters initialized by N(0, init_std) """ if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2 and isinstance(vocab_size_or_config_json_file, unicode)): with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif isinstance(vocab_size_or_config_json_file, int): self.n_token = vocab_size_or_config_json_file self.cutoffs = [] self.cutoffs.extend(cutoffs) self.tie_weight = tie_weight if proj_share_all_but_first: self.tie_projs = [False] + [True] * len(self.cutoffs) else: self.tie_projs = [False] + [False] * len(self.cutoffs) self.d_model = d_model self.d_embed = d_embed self.d_head = d_head self.d_inner = d_inner self.div_val = div_val self.pre_lnorm = pre_lnorm self.n_layer = n_layer self.n_head = n_head self.tgt_len = tgt_len self.ext_len = ext_len self.mem_len = mem_len self.same_length = same_length self.attn_type = attn_type self.clamp_len = clamp_len self.sample_softmax = sample_softmax self.adaptive = adaptive self.dropout = dropout self.dropatt = dropatt self.untie_r = untie_r self.init = init self.init_range = init_range self.proj_init_std = proj_init_std self.init_std = init_std else: raise ValueError("First argument must be either a vocabulary size (int)" "or the path to a pretrained model config file (str)") @classmethod def from_dict(cls, json_object): """Constructs a `TransfoXLConfig` from a Python dictionary of parameters.""" config = TransfoXLConfig(vocab_size_or_config_json_file=-1) for key, value in json_object.items(): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `TransfoXLConfig` from a json file of parameters.""" with open(json_file, "r", encoding='utf-8') as reader: text = reader.read() return cls.from_dict(json.loads(text)) def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path): """ Save this instance to a json file.""" with open(json_file_path, "w", encoding='utf-8') as writer: writer.write(self.to_json_string()) class PositionalEmbedding(nn.Module): def __init__(self, demb): super(PositionalEmbedding, self).__init__() self.demb = demb inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb)) self.register_buffer('inv_freq', inv_freq) def forward(self, pos_seq, bsz=None): sinusoid_inp = torch.ger(pos_seq, self.inv_freq) pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1) if bsz is not None: return pos_emb[:,None,:].expand(-1, bsz, -1) else: return pos_emb[:,None,:] class PositionwiseFF(nn.Module): def __init__(self, d_model, d_inner, dropout, pre_lnorm=False): super(PositionwiseFF, self).__init__() self.d_model = d_model self.d_inner = d_inner self.dropout = dropout self.CoreNet = nn.Sequential( nn.Linear(d_model, d_inner), nn.ReLU(inplace=True), nn.Dropout(dropout), nn.Linear(d_inner, d_model), nn.Dropout(dropout), ) self.layer_norm = LayerNorm(d_model) self.pre_lnorm = pre_lnorm def forward(self, inp): if self.pre_lnorm: ##### layer normalization + positionwise feed-forward core_out = self.CoreNet(self.layer_norm(inp)) ##### residual connection output = core_out + inp else: ##### positionwise feed-forward core_out = self.CoreNet(inp) ##### residual connection + layer normalization output = self.layer_norm(inp + core_out) return output class MultiHeadAttn(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0, pre_lnorm=False, r_r_bias=None, r_w_bias=None): super(MultiHeadAttn, self).__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.dropout = dropout self.q_net = nn.Linear(d_model, n_head * d_head, bias=False) self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = LayerNorm(d_model) self.scale = 1 / (d_head ** 0.5) self.pre_lnorm = pre_lnorm if r_r_bias is None or r_w_bias is None: # Biases are not shared self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head)) self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head)) else: self.r_r_bias = r_r_bias self.r_w_bias = r_w_bias def forward(self, h, attn_mask=None, mems=None): ##### multihead attention # [hlen x bsz x n_head x d_head] if mems is not None: c = torch.cat([mems, h], 0) else: c = h if self.pre_lnorm: ##### layer normalization c = self.layer_norm(c) head_q = self.q_net(h) head_k, head_v = torch.chunk(self.kv_net(c), 2, -1) head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head) head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head) head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head) # [qlen x klen x bsz x n_head] attn_score = torch.einsum('ibnd,jbnd->ijbn', (head_q, head_k)) attn_score.mul_(self.scale) if attn_mask is not None and attn_mask.any().item(): if attn_mask.dim() == 2: attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf')) elif attn_mask.dim() == 3: attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf')) # [qlen x klen x bsz x n_head] attn_prob = F.softmax(attn_score, dim=1) attn_prob = self.dropatt(attn_prob) # [qlen x klen x bsz x n_head] + [klen x bsz x n_head x d_head] -> [qlen x bsz x n_head x d_head] attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, head_v)) attn_vec = attn_vec.contiguous().view( attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head) ##### linear projection attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: ##### residual connection output = h + attn_out else: ##### residual connection + layer normalization output = self.layer_norm(h + attn_out) return output class RelMultiHeadAttn(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0, tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False, r_r_bias=None, r_w_bias=None): super(RelMultiHeadAttn, self).__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.dropout = dropout self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = LayerNorm(d_model) self.scale = 1 / (d_head ** 0.5) self.pre_lnorm = pre_lnorm if r_r_bias is None or r_w_bias is None: # Biases are not shared self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head)) self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head)) else: self.r_r_bias = r_r_bias self.r_w_bias = r_w_bias def _parallelogram_mask(self, h, w, left=False): mask = torch.ones((h, w)).byte() m = min(h, w) mask[:m,:m] = torch.triu(mask[:m,:m]) mask[-m:,-m:] = torch.tril(mask[-m:,-m:]) if left: return mask else: return mask.flip(0) def _shift(self, x, qlen, klen, mask, left=False): if qlen > 1: zero_pad = torch.zeros((x.size(0), qlen-1, x.size(2), x.size(3)), device=x.device, dtype=x.dtype) else: zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype) if left: mask = mask.flip(1) x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1) else: x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1) x = x_padded.masked_select(mask[:,:,None,None]) \ .view(qlen, klen, x.size(2), x.size(3)) return x def _rel_shift(self, x, zero_triu=False): zero_pad_shape = (x.size(0), 1) + x.size()[2:] zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=1) x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:] x_padded = x_padded.view(*x_padded_shape) x = x_padded[1:].view_as(x) if zero_triu: ones = torch.ones((x.size(0), x.size(1))) x = x * torch.tril(ones, x.size(1) - x.size(0))[:,:,None,None] return x def forward(self, w, r, attn_mask=None, mems=None): raise NotImplementedError class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn): def __init__(self, *args, **kwargs): super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs) self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False) def forward(self, w, r, attn_mask=None, mems=None): qlen, rlen, bsz = w.size(0), r.size(0), w.size(1) if mems is not None: cat = torch.cat([mems, w], 0) if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(cat)) else: w_heads = self.qkv_net(cat) r_head_k = self.r_net(r) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) w_head_q = w_head_q[-qlen:] else: if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(w)) else: w_heads = self.qkv_net(w) r_head_k = self.r_net(r) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) klen = w_head_k.size(0) w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head #### compute attention score rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head rr_head_q = w_head_q + self.r_r_bias BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head BD = self._rel_shift(BD) # [qlen x klen x bsz x n_head] attn_score = AC + BD attn_score.mul_(self.scale) #### compute attention probability if attn_mask is not None and attn_mask.any().item(): if attn_mask.dim() == 2: attn_score = attn_score.float().masked_fill( attn_mask[None,:,:,None], -1e30).type_as(attn_score) elif attn_mask.dim() == 3: attn_score = attn_score.float().masked_fill( attn_mask[:,:,:,None], -1e30).type_as(attn_score) # [qlen x klen x bsz x n_head] attn_prob = F.softmax(attn_score, dim=1) attn_prob = self.dropatt(attn_prob) #### compute attention vector attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v)) # [qlen x bsz x n_head x d_head] attn_vec = attn_vec.contiguous().view( attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head) ##### linear projection attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: ##### residual connection output = w + attn_out else: ##### residual connection + layer normalization output = self.layer_norm(w + attn_out) return output class RelLearnableMultiHeadAttn(RelMultiHeadAttn): def __init__(self, *args, **kwargs): super(RelLearnableMultiHeadAttn, self).__init__(*args, **kwargs) def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None): # r_emb: [klen, n_head, d_head], used for term B # r_w_bias: [n_head, d_head], used for term C # r_bias: [klen, n_head], used for term D qlen, bsz = w.size(0), w.size(1) if mems is not None: cat = torch.cat([mems, w], 0) if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(cat)) else: w_heads = self.qkv_net(cat) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) w_head_q = w_head_q[-qlen:] else: if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(w)) else: w_heads = self.qkv_net(w) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) klen = w_head_k.size(0) w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) if klen > r_emb.size(0): r_emb_pad = r_emb[0:1].expand(klen-r_emb.size(0), -1, -1) r_emb = torch.cat([r_emb_pad, r_emb], 0) r_bias_pad = r_bias[0:1].expand(klen-r_bias.size(0), -1) r_bias = torch.cat([r_bias_pad, r_bias], 0) else: r_emb = r_emb[-klen:] r_bias = r_bias[-klen:] #### compute attention score rw_head_q = w_head_q + r_w_bias[None] # qlen x bsz x n_head x d_head AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head B_ = torch.einsum('ibnd,jnd->ijbn', (w_head_q, r_emb)) # qlen x klen x bsz x n_head D_ = r_bias[None, :, None] # 1 x klen x 1 x n_head BD = self._rel_shift(B_ + D_) # [qlen x klen x bsz x n_head] attn_score = AC + BD attn_score.mul_(self.scale) #### compute attention probability if attn_mask is not None and attn_mask.any().item(): if attn_mask.dim() == 2: attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf')) elif attn_mask.dim() == 3: attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf')) # [qlen x klen x bsz x n_head] attn_prob = F.softmax(attn_score, dim=1) attn_prob = self.dropatt(attn_prob) #### compute attention vector attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v)) # [qlen x bsz x n_head x d_head] attn_vec = attn_vec.contiguous().view( attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head) ##### linear projection attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: ##### residual connection output = w + attn_out else: ##### residual connection + layer normalization output = self.layer_norm(w + attn_out) return output class DecoderLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs): super(DecoderLayer, self).__init__() self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs) self.pos_ff = PositionwiseFF(d_model, d_inner, dropout, pre_lnorm=kwargs.get('pre_lnorm')) def forward(self, dec_inp, dec_attn_mask=None, mems=None): output = self.dec_attn(dec_inp, attn_mask=dec_attn_mask, mems=mems) output = self.pos_ff(output) return output class RelLearnableDecoderLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs): super(RelLearnableDecoderLayer, self).__init__() self.dec_attn = RelLearnableMultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs) self.pos_ff = PositionwiseFF(d_model, d_inner, dropout, pre_lnorm=kwargs.get('pre_lnorm')) def forward(self, dec_inp, r_emb, r_w_bias, r_bias, dec_attn_mask=None, mems=None): output = self.dec_attn(dec_inp, r_emb, r_w_bias, r_bias, attn_mask=dec_attn_mask, mems=mems) output = self.pos_ff(output) return output class RelPartialLearnableDecoderLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs): super(RelPartialLearnableDecoderLayer, self).__init__() self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs) self.pos_ff = PositionwiseFF(d_model, d_inner, dropout, pre_lnorm=kwargs.get('pre_lnorm')) def forward(self, dec_inp, r, dec_attn_mask=None, mems=None): output = self.dec_attn(dec_inp, r, attn_mask=dec_attn_mask, mems=mems) output = self.pos_ff(output) return output class AdaptiveEmbedding(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False): super(AdaptiveEmbedding, self).__init__() self.n_token = n_token self.d_embed = d_embed self.cutoffs = cutoffs + [n_token] self.div_val = div_val self.d_proj = d_proj self.emb_scale = d_proj ** 0.5 self.cutoff_ends = [0] + self.cutoffs self.emb_layers = nn.ModuleList() self.emb_projs = nn.ParameterList() if div_val == 1: self.emb_layers.append( nn.Embedding(n_token, d_embed, sparse=sample_softmax>0) ) if d_proj != d_embed: self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_embed))) else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1] d_emb_i = d_embed // (div_val ** i) self.emb_layers.append(nn.Embedding(r_idx-l_idx, d_emb_i)) self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_emb_i))) def forward(self, inp): if self.div_val == 1: embed = self.emb_layers[0](inp) if self.d_proj != self.d_embed: embed = F.linear(embed, self.emb_projs[0]) else: param = next(self.parameters()) inp_flat = inp.view(-1) emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device) for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx) indices_i = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue inp_i = inp_flat.index_select(0, indices_i) - l_idx emb_i = self.emb_layers[i](inp_i) emb_i = F.linear(emb_i, self.emb_projs[i]) emb_flat.index_copy_(0, indices_i, emb_i) embed_shape = inp.size() + (self.d_proj,) embed = emb_flat.view(embed_shape) embed.mul_(self.emb_scale) return embed class TransfoXLPreTrainedModel(nn.Module): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ def __init__(self, config, *inputs, **kwargs): super(TransfoXLPreTrainedModel, self).__init__() if not isinstance(config, TransfoXLConfig): raise ValueError( "Parameter config in `{}(config)` should be an instance of class `TransfoXLConfig`. " "To create a model from a pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__ )) self.config = config def init_weight(self, weight): if self.config.init == 'uniform': nn.init.uniform_(weight, -self.config.init_range, self.config.init_range) elif self.config.init == 'normal': nn.init.normal_(weight, 0.0, self.config.init_std) def init_bias(self, bias): nn.init.constant_(bias, 0.0) def init_weights(self, m): """ Initialize the weights. """ classname = m.__class__.__name__ if classname.find('Linear') != -1: if hasattr(m, 'weight') and m.weight is not None: self.init_weight(m.weight) if hasattr(m, 'bias') and m.bias is not None: self.init_bias(m.bias) elif classname.find('AdaptiveEmbedding') != -1: if hasattr(m, 'emb_projs'): for i in range(len(m.emb_projs)): if m.emb_projs[i] is not None: nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std) elif classname.find('Embedding') != -1: if hasattr(m, 'weight'): self.init_weight(m.weight) elif classname.find('ProjectedAdaptiveLogSoftmax') != -1: if hasattr(m, 'cluster_weight') and m.cluster_weight is not None: self.init_weight(m.cluster_weight) if hasattr(m, 'cluster_bias') and m.cluster_bias is not None: self.init_bias(m.cluster_bias) if hasattr(m, 'out_projs'): for i in range(len(m.out_projs)): if m.out_projs[i] is not None: nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std) elif classname.find('LayerNorm') != -1: if hasattr(m, 'weight'): nn.init.normal_(m.weight, 1.0, self.config.init_std) if hasattr(m, 'bias') and m.bias is not None: self.init_bias(m.bias) elif classname.find('TransformerLM') != -1: if hasattr(m, 'r_emb'): self.init_weight(m.r_emb) if hasattr(m, 'r_w_bias'): self.init_weight(m.r_w_bias) if hasattr(m, 'r_r_bias'): self.init_weight(m.r_r_bias) if hasattr(m, 'r_bias'): self.init_bias(m.r_bias) def set_num_special_tokens(self, num_special_tokens): pass @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs): """ Instantiate a TransfoXLPreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `transfo-xl-wt103` - a path or url to a pretrained model archive containing: . `transfo_xl_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a TransfoXLModel instance - a path or url to a pretrained model archive containing: . `transfo_xl_config.json` a configuration file for the model . `model.chkpt` a TensorFlow checkpoint from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models *inputs, **kwargs: additional input for the specific TransformerXL class """ state_dict = kwargs.get('state_dict', None) kwargs.pop('state_dict', None) cache_dir = kwargs.get('cache_dir', None) kwargs.pop('cache_dir', None) from_tf = kwargs.get('from_tf', False) kwargs.pop('from_tf', None) if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path] else: archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME) # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) except EnvironmentError: if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: logger.error( "Couldn't reach server at '{}' to download pretrained weights.".format( archive_file)) else: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find file {} " "at this path or url.".format( pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path, archive_file ) ) return None try: resolved_config_file = cached_path(config_file, cache_dir=cache_dir) except EnvironmentError: if pretrained_model_name_or_path in PRETRAINED_CONFIG_ARCHIVE_MAP: logger.error( "Couldn't reach server at '{}' to download pretrained model configuration file.".format( config_file)) else: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find file {} " "at this path or url.".format( pretrained_model_name_or_path, ", ".join(PRETRAINED_CONFIG_ARCHIVE_MAP.keys()), pretrained_model_name_or_path, config_file ) ) return None if resolved_archive_file == archive_file and resolved_config_file == config_file: logger.info("loading weights file {}".format(archive_file)) logger.info("loading configuration file {}".format(config_file)) else: logger.info("loading weights file {} from cache at {}".format( archive_file, resolved_archive_file)) logger.info("loading configuration file {} from cache at {}".format( config_file, resolved_config_file)) # Load config config = TransfoXLConfig.from_json_file(resolved_config_file) logger.info("Model config {}".format(config)) # Instantiate model. model = cls(config, *inputs, **kwargs) if state_dict is None and not from_tf: state_dict = torch.load(resolved_archive_file, map_location='cpu') if from_tf: # Directly load from a TensorFlow checkpoint return load_tf_weights_in_transfo_xl(model, config, pretrained_model_name_or_path) missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') start_prefix = '' if not hasattr(model, 'transformer') and any(s.startswith('transformer.') for s in state_dict.keys()): start_prefix = 'transformer.' load(model, prefix=start_prefix) if len(missing_keys) > 0: logger.info("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) if len(error_msgs) > 0: raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( model.__class__.__name__, "\n\t".join(error_msgs))) # Make sure we are still sharing the input and output embeddings if hasattr(model, 'tie_weights'): model.tie_weights() return model class TransfoXLModel(TransfoXLPreTrainedModel): """Transformer XL model ("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context"). Transformer XL use a relative positioning (with sinusiodal patterns) and adaptive softmax inputs which means that: - you don't need to specify positioning embeddings indices - the tokens in the vocabulary have to be sorted to decreasing frequency. Params: config: a TransfoXLConfig class instance with the configuration to build a new model Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the token indices selected in the range [0, self.config.n_token[ `mems`: optional memomry of hidden states from previous forward passes as a list (num layers) of hidden states at the entry of each layer each hidden states has shape [self.config.mem_len, bsz, self.config.d_model] Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target` Outputs: A tuple of (last_hidden_state, new_mems) `last_hidden_state`: the encoded-hidden-states at the top of the model as a torch.FloatTensor of size [batch_size, sequence_length, self.config.d_model] `new_mems`: list (num layers) of updated mem states at the entry of each layer each mem state is a torch.FloatTensor of size [self.config.mem_len, batch_size, self.config.d_model] Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target` Example usage: ```python # Already been converted into BPE token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_ids_next = torch.LongTensor([[53, 21, 1], [64, 23, 100]]) config = TransfoXLConfig() model = TransfoXLModel(config) last_hidden_state, new_mems = model(input_ids) # Another time on input_ids_next using the memory: last_hidden_state, new_mems = model(input_ids_next, new_mems) ``` """ def __init__(self, config): super(TransfoXLModel, self).__init__(config) self.n_token = config.n_token self.d_embed = config.d_embed self.d_model = config.d_model self.n_head = config.n_head self.d_head = config.d_head self.word_emb = AdaptiveEmbedding(config.n_token, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val) self.drop = nn.Dropout(config.dropout) self.n_layer = config.n_layer self.tgt_len = config.tgt_len self.mem_len = config.mem_len self.ext_len = config.ext_len self.max_klen = config.tgt_len + config.ext_len + config.mem_len self.attn_type = config.attn_type if not config.untie_r: self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head)) self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head)) self.layers = nn.ModuleList() if config.attn_type == 0: # the default attention for i in range(config.n_layer): self.layers.append( RelPartialLearnableDecoderLayer( config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout, tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len, dropatt=config.dropatt, pre_lnorm=config.pre_lnorm, r_w_bias=None if config.untie_r else self.r_w_bias, r_r_bias=None if config.untie_r else self.r_r_bias) ) elif config.attn_type == 1: # learnable embeddings for i in range(config.n_layer): self.layers.append( RelLearnableDecoderLayer( config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout, tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len, dropatt=config.dropatt, pre_lnorm=config.pre_lnorm, r_w_bias=None if config.untie_r else self.r_w_bias, r_r_bias=None if config.untie_r else self.r_r_bias) ) elif config.attn_type in [2, 3]: # absolute embeddings for i in range(config.n_layer): self.layers.append( DecoderLayer( config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout, dropatt=config.dropatt, pre_lnorm=config.pre_lnorm, r_w_bias=None if config.untie_r else self.r_w_bias, r_r_bias=None if config.untie_r else self.r_r_bias) ) self.same_length = config.same_length self.clamp_len = config.clamp_len if self.attn_type == 0: # default attention self.pos_emb = PositionalEmbedding(self.d_model) elif self.attn_type == 1: # learnable self.r_emb = nn.Parameter(torch.Tensor( self.n_layer, self.max_klen, self.n_head, self.d_head)) self.r_bias = nn.Parameter(torch.Tensor( self.n_layer, self.max_klen, self.n_head)) elif self.attn_type == 2: # absolute standard self.pos_emb = PositionalEmbedding(self.d_model) elif self.attn_type == 3: # absolute deeper SA self.r_emb = nn.Parameter(torch.Tensor( self.n_layer, self.max_klen, self.n_head, self.d_head)) self.apply(self.init_weights) def backward_compatible(self): self.sample_softmax = -1 def reset_length(self, tgt_len, ext_len, mem_len): self.tgt_len = tgt_len self.mem_len = mem_len self.ext_len = ext_len def init_mems(self, data): if self.mem_len > 0: mems = [] param = next(self.parameters()) for i in range(self.n_layer): empty = torch.zeros(self.mem_len, data.size(1), self.config.d_model, dtype=param.dtype, device=param.device) mems.append(empty) return mems else: return None def _update_mems(self, hids, mems, qlen, mlen): # does not deal with None if mems is None: return None # mems is not None assert len(hids) == len(mems), 'len(hids) != len(mems)' # There are `mlen + qlen` steps that can be cached into mems # For the next step, the last `ext_len` of the `qlen` tokens # will be used as the extended context. Hence, we only cache # the tokens from `mlen + qlen - self.ext_len - self.mem_len` # to `mlen + qlen - self.ext_len`. with torch.no_grad(): new_mems = [] end_idx = mlen + max(0, qlen - 0 - self.ext_len) beg_idx = max(0, end_idx - self.mem_len) for i in range(len(hids)): cat = torch.cat([mems[i], hids[i]], dim=0) new_mems.append(cat[beg_idx:end_idx].detach()) return new_mems def _forward(self, dec_inp, mems=None): qlen, bsz = dec_inp.size() word_emb = self.word_emb(dec_inp) mlen = mems[0].size(0) if mems is not None else 0 klen = mlen + qlen if self.same_length: all_ones = word_emb.new_ones(qlen, klen) mask_len = klen - self.mem_len if mask_len > 0: mask_shift_len = qlen - mask_len else: mask_shift_len = qlen dec_attn_mask = (torch.triu(all_ones, 1+mlen) + torch.tril(all_ones, -mask_shift_len)).byte()[:, :, None] # -1 else: dec_attn_mask = torch.triu( word_emb.new_ones(qlen, klen), diagonal=1+mlen).byte()[:,:,None] hids = [] if self.attn_type == 0: # default pos_seq = torch.arange(klen-1, -1, -1.0, device=word_emb.device, dtype=word_emb.dtype) if self.clamp_len > 0: pos_seq.clamp_(max=self.clamp_len) pos_emb = self.pos_emb(pos_seq) core_out = self.drop(word_emb) pos_emb = self.drop(pos_emb) for i, layer in enumerate(self.layers): hids.append(core_out) mems_i = None if mems is None else mems[i] core_out = layer(core_out, pos_emb, dec_attn_mask=dec_attn_mask, mems=mems_i) elif self.attn_type == 1: # learnable core_out = self.drop(word_emb) for i, layer in enumerate(self.layers): hids.append(core_out) if self.clamp_len > 0: r_emb = self.r_emb[i][-self.clamp_len :] r_bias = self.r_bias[i][-self.clamp_len :] else: r_emb, r_bias = self.r_emb[i], self.r_bias[i] mems_i = None if mems is None else mems[i] core_out = layer(core_out, r_emb, self.r_w_bias[i], r_bias, dec_attn_mask=dec_attn_mask, mems=mems_i) elif self.attn_type == 2: # absolute pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device, dtype=word_emb.dtype) if self.clamp_len > 0: pos_seq.clamp_(max=self.clamp_len) pos_emb = self.pos_emb(pos_seq) core_out = self.drop(word_emb + pos_emb[-qlen:]) for i, layer in enumerate(self.layers): hids.append(core_out) mems_i = None if mems is None else mems[i] if mems_i is not None and i == 0: mems_i += pos_emb[:mlen] core_out = layer(core_out, dec_attn_mask=dec_attn_mask, mems=mems_i) elif self.attn_type == 3: core_out = self.drop(word_emb) for i, layer in enumerate(self.layers): hids.append(core_out) mems_i = None if mems is None else mems[i] if mems_i is not None and mlen > 0: cur_emb = self.r_emb[i][:-qlen] cur_size = cur_emb.size(0) if cur_size < mlen: cur_emb_pad = cur_emb[0:1].expand(mlen-cur_size, -1, -1) cur_emb = torch.cat([cur_emb_pad, cur_emb], 0) else: cur_emb = cur_emb[-mlen:] mems_i += cur_emb.view(mlen, 1, -1) core_out += self.r_emb[i][-qlen:].view(qlen, 1, -1) core_out = layer(core_out, dec_attn_mask=dec_attn_mask, mems=mems_i) core_out = self.drop(core_out) new_mems = self._update_mems(hids, mems, mlen, qlen) return core_out, new_mems def forward(self, input_ids, mems=None): """ Params: input_ids :: [bsz, len] mems :: optional mems from previous forwar passes (or init_mems) list (num layers) of mem states at the entry of each layer shape :: [self.config.mem_len, bsz, self.config.d_model] Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target` Returns: tuple (last_hidden, new_mems) where: new_mems: list (num layers) of mem states at the entry of each layer shape :: [self.config.mem_len, bsz, self.config.d_model] last_hidden: output of the last layer: shape :: [bsz, len, self.config.d_model] """ # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library # so we transpose here from shape [bsz, len] to shape [len, bsz] input_ids = input_ids.transpose(0, 1).contiguous() if mems is None: mems = self.init_mems(input_ids) last_hidden, new_mems = self._forward(input_ids, mems=mems) # We transpose back here to shape [bsz, len, hidden_dim] last_hidden = last_hidden.transpose(0, 1).contiguous() return (last_hidden, new_mems) class TransfoXLLMHeadModel(TransfoXLPreTrainedModel): """Transformer XL model ("Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context"). This model add an (adaptive) softmax head on top of the TransfoXLModel Transformer XL use a relative positioning (with sinusiodal patterns) and adaptive softmax inputs which means that: - you don't need to specify positioning embeddings indices - the tokens in the vocabulary have to be sorted to decreasing frequency. Call self.tie_weights() if you update/load the weights of the transformer to keep the weights tied. Params: config: a TransfoXLConfig class instance with the configuration to build a new model Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the token indices selected in the range [0, self.config.n_token[ `target`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the target token indices selected in the range [0, self.config.n_token[ `mems`: an optional memory of hidden states from previous forward passes as a list (num layers) of hidden states at the entry of each layer each hidden states has shape [self.config.mem_len, bsz, self.config.d_model] Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target` Outputs: A tuple of (last_hidden_state, new_mems) `softmax_output`: output of the (adaptive) softmax: if target is None: Negative log likelihood of shape [batch_size, sequence_length] else: log probabilities of tokens, shape [batch_size, sequence_length, n_tokens] `new_mems`: list (num layers) of updated mem states at the entry of each layer each mem state is a torch.FloatTensor of size [self.config.mem_len, batch_size, self.config.d_model] Note that the first two dimensions are transposed in `mems` with regards to `input_ids` and `target` Example usage: ```python # Already been converted into BPE token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_ids_next = torch.LongTensor([[53, 21, 1], [64, 23, 100]]) config = TransfoXLConfig() model = TransfoXLModel(config) last_hidden_state, new_mems = model(input_ids) # Another time on input_ids_next using the memory: last_hidden_state, new_mems = model(input_ids_next, mems=new_mems) ``` """ def __init__(self, config): super(TransfoXLLMHeadModel, self).__init__(config) self.transformer = TransfoXLModel(config) self.sample_softmax = config.sample_softmax # use sampled softmax if config.sample_softmax > 0: self.out_layer = nn.Linear(config.d_model, config.n_token) self.sampler = LogUniformSampler(config.n_token, config.sample_softmax) # use adaptive softmax (including standard softmax) else: self.crit = ProjectedAdaptiveLogSoftmax(config.n_token, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val) self.apply(self.init_weights) self.tie_weights() def tie_weights(self): """ Run this to be sure output and input (adaptive) softmax weights are tied """ # sampled softmax if self.sample_softmax > 0: if self.config.tie_weight: self.out_layer.weight = self.transformer.word_emb.weight # adaptive softmax (including standard softmax) else: if self.config.tie_weight: for i in range(len(self.crit.out_layers)): self.crit.out_layers[i].weight = self.transformer.word_emb.emb_layers[i].weight if self.config.tie_projs: for i, tie_proj in enumerate(self.config.tie_projs): if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed: self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0] elif tie_proj and self.config.div_val != 1: self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i] def reset_length(self, tgt_len, ext_len, mem_len): self.transformer.reset_length(tgt_len, ext_len, mem_len) def init_mems(self, data): return self.transformer.init_mems(data) def forward(self, input_ids, target=None, mems=None): """ Params: input_ids :: [bsz, len] target :: [bsz, len] Returns: tuple(softmax_output, new_mems) where: new_mems: list (num layers) of hidden states at the entry of each layer shape :: [mem_len, bsz, self.config.d_model] :: Warning: shapes are transposed here w. regards to input_ids softmax_output: output of the (adaptive) softmax: if target is None: Negative log likelihood of shape :: [bsz, len] else: log probabilities of tokens, shape :: [bsz, len, n_tokens] """ bsz = input_ids.size(0) tgt_len = input_ids.size(1) last_hidden, new_mems = self.transformer(input_ids, mems) pred_hid = last_hidden[:, -tgt_len:] if self.sample_softmax > 0 and self.training: assert self.config.tie_weight logit = sample_logits(self.transformer.word_emb, self.out_layer.bias, target, pred_hid, self.sampler) softmax_output = -F.log_softmax(logit, -1)[:, :, 0] else: softmax_output = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target) if target is None: softmax_output = softmax_output.view(bsz, tgt_len, -1) else: softmax_output = softmax_output.view(bsz, tgt_len) # We transpose back return (softmax_output, new_mems) ================================================ FILE: pytorch_pretrained_bert/modeling_transfo_xl_utilities.py ================================================ # coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utilities for PyTorch Transformer XL model. Directly adapted from https://github.com/kimiyoung/transformer-xl. """ from collections import defaultdict import numpy as np import torch import torch.nn as nn import torch.nn.functional as F # CUDA_MAJOR = int(torch.version.cuda.split('.')[0]) # CUDA_MINOR = int(torch.version.cuda.split('.')[1]) class ProjectedAdaptiveLogSoftmax(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, keep_order=False): super(ProjectedAdaptiveLogSoftmax, self).__init__() self.n_token = n_token self.d_embed = d_embed self.d_proj = d_proj self.cutoffs = cutoffs + [n_token] self.cutoff_ends = [0] + self.cutoffs self.div_val = div_val self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size = self.shortlist_size + self.n_clusters if self.n_clusters > 0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) self.out_layers = nn.ModuleList() self.out_projs = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs)): if d_proj != d_embed: self.out_projs.append( nn.Parameter(torch.Tensor(d_proj, d_embed)) ) else: self.out_projs.append(None) self.out_layers.append(nn.Linear(d_embed, n_token)) else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1] d_emb_i = d_embed // (div_val ** i) self.out_projs.append( nn.Parameter(torch.Tensor(d_proj, d_emb_i)) ) self.out_layers.append(nn.Linear(d_emb_i, r_idx-l_idx)) self.keep_order = keep_order def _compute_logit(self, hidden, weight, bias, proj): if proj is None: logit = F.linear(hidden, weight, bias=bias) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: proj_hid = F.linear(hidden, proj.t().contiguous()) logit = F.linear(proj_hid, weight, bias=bias) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def forward(self, hidden, target=None, keep_order=False): ''' Params: hidden :: [len*bsz x d_proj] target :: [len*bsz] Return: if target is None: out :: [len*bsz] Negative log likelihood else: out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary We could replace this implementation by the native PyTorch one if their's had an option to set bias on all clusters in the native one. here: https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138 ''' if target is not None: target = target.view(-1) if hidden.size(0) != target.size(0): raise RuntimeError('Input and target should have the same size ' 'in the batch dimension.') if self.n_clusters == 0: logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]) if target is not None: out = -F.log_softmax(logit, dim=-1) \ .gather(1, target.unsqueeze(1)).squeeze(1) else: out = F.log_softmax(logit, dim=-1) else: # construct weights and biases weights, biases = [], [] for i in range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias if i == 0: weight_i = torch.cat( [weight_i, self.cluster_weight], dim=0) bias_i = torch.cat( [bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0] head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj) head_logprob = F.log_softmax(head_logit, dim=1) if target is None: out = hidden.new_empty((head_logit.size(0), self.n_token)) else: out = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device) offset = 0 cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) - 1): l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1] if target is not None: mask_i = (target >= l_idx) & (target < r_idx) indices_i = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue target_i = target.index_select(0, indices_i) - l_idx head_logprob_i = head_logprob.index_select(0, indices_i) hidden_i = hidden.index_select(0, indices_i) else: hidden_i = hidden if i == 0: if target is not None: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]] else: weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i] tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster if target is not None: logprob_i = head_logprob_i[:, cluster_prob_idx] \ + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i out[:, l_idx:r_idx] = logprob_i if target is not None: if (hasattr(self, 'keep_order') and self.keep_order) or keep_order: out.index_copy_(0, indices_i, -logprob_i) else: out[offset:offset+logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return out def log_prob(self, hidden): r""" Computes log probabilities for all :math:`n\_classes` From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.py Args: hidden (Tensor): a minibatch of examples Returns: log-probabilities of for each class :math:`c` in range :math:`0 <= c <= n\_classes`, where :math:`n\_classes` is a parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor. Shape: - Input: :math:`(N, in\_features)` - Output: :math:`(N, n\_classes)` """ if self.n_clusters == 0: logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]) return F.log_softmax(logit, dim=-1) else: # construct weights and biases weights, biases = [], [] for i in range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias if i == 0: weight_i = torch.cat( [weight_i, self.cluster_weight], dim=0) bias_i = torch.cat( [bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0] head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj) out = hidden.new_empty((head_logit.size(0), self.n_token)) head_logprob = F.log_softmax(head_logit, dim=1) cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) - 1): start_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1] if i == 0: out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]] else: weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i] tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob[:, -i] + tail_logprob_i out[:, start_idx, stop_idx] = logprob_i return out class LogUniformSampler(object): def __init__(self, range_max, n_sample): """ Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)` expected count can be approximated by 1 - (1 - p)^n and we use a numerically stable version -expm1(num_tries * log1p(-p)) Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run """ with torch.no_grad(): self.range_max = range_max log_indices = torch.arange(1., range_max+2., 1.).log_() self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1] # print('P', self.dist.numpy().tolist()[-30:]) self.log_q = (- (-self.dist.double().log1p_() * 2 * n_sample).expm1_()).log_().float() self.n_sample = n_sample def sample(self, labels): """ labels: [b1, b2] Return true_log_probs: [b1, b2] samp_log_probs: [n_sample] neg_samples: [n_sample] """ # neg_samples = torch.empty(0).long() n_sample = self.n_sample n_tries = 2 * n_sample with torch.no_grad(): neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique() device = labels.device neg_samples = neg_samples.to(device) true_log_probs = self.log_q[labels].to(device) samp_log_probs = self.log_q[neg_samples].to(device) return true_log_probs, samp_log_probs, neg_samples def sample_logits(embedding, bias, labels, inputs, sampler): """ embedding: an nn.Embedding layer bias: [n_vocab] labels: [b1, b2] inputs: [b1, b2, n_emb] sampler: you may use a LogUniformSampler Return logits: [b1, b2, 1 + n_sample] """ true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels) n_sample = neg_samples.size(0) b1, b2 = labels.size(0), labels.size(1) all_ids = torch.cat([labels.view(-1), neg_samples]) all_w = embedding(all_ids) true_w = all_w[: -n_sample].view(b1, b2, -1) sample_w = all_w[- n_sample:].view(n_sample, -1) all_b = bias[all_ids] true_b = all_b[: -n_sample].view(b1, b2) sample_b = all_b[- n_sample:] hit = (labels[:, :, None] == neg_samples).detach() true_logits = torch.einsum('ijk,ijk->ij', [true_w, inputs]) + true_b - true_log_probs sample_logits = torch.einsum('lk,ijk->ijl', [sample_w, inputs]) + sample_b - samp_log_probs sample_logits.masked_fill_(hit, -1e30) logits = torch.cat([true_logits[:, :, None], sample_logits], -1) return logits # class LogUniformSampler(object): # def __init__(self, range_max, unique=False): # """ # Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py # `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)` # """ # self.range_max = range_max # log_indices = torch.arange(1., range_max+2., 1.).log_() # self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1] # self.unique = unique # if self.unique: # self.exclude_mask = torch.ByteTensor(range_max).fill_(0) # def sample(self, n_sample, labels): # pos_sample, new_labels = labels.unique(return_inverse=True) # n_pos_sample = pos_sample.size(0) # n_neg_sample = n_sample - n_pos_sample # if self.unique: # self.exclude_mask.index_fill_(0, pos_sample, 1) # sample_dist = self.dist.clone().masked_fill_(self.exclude_mask, 0) # self.exclude_mask.index_fill_(0, pos_sample, 0) # else: # sample_dist = self.dist # neg_sample = torch.multinomial(sample_dist, n_neg_sample) # sample = torch.cat([pos_sample, neg_sample]) # sample_prob = self.dist[sample] # return new_labels, sample, sample_prob if __name__ == '__main__': S, B = 3, 4 n_vocab = 10000 n_sample = 5 H = 32 labels = torch.LongTensor(S, B).random_(0, n_vocab) # sampler = LogUniformSampler(n_vocab, unique=False) # new_labels, sample, sample_prob = sampler.sample(n_sample, labels) sampler = LogUniformSampler(n_vocab, n_sample)#, unique=True) # true_probs, samp_probs, neg_samples = sampler.sample(n_sample, labels) # print('true_probs', true_probs.numpy().tolist()) # print('samp_probs', samp_probs.numpy().tolist()) # print('neg_samples', neg_samples.numpy().tolist()) # print('sum', torch.sum(sampler.dist).item()) # assert torch.all(torch.sort(sample.unique())[0].eq(torch.sort(sample)[0])).item() embedding = nn.Embedding(n_vocab, H) bias = torch.zeros(n_vocab) inputs = torch.Tensor(S, B, H).normal_() logits, out_labels = sample_logits(embedding, bias, labels, inputs, sampler, n_sample) print('logits', logits.detach().numpy().tolist()) print('logits shape', logits.size()) print('out_labels', out_labels.detach().numpy().tolist()) print('out_labels shape', out_labels.size()) ================================================ FILE: pytorch_pretrained_bert/optimization.py ================================================ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch optimization for BERT model.""" import math import torch from torch.optim import Optimizer from torch.optim.optimizer import required from torch.nn.utils import clip_grad_norm_ import logging import abc import sys logger = logging.getLogger(__name__) if sys.version_info >= (3, 4): ABC = abc.ABC else: ABC = abc.ABCMeta('ABC', (), {}) class _LRSchedule(ABC): """ Parent of all LRSchedules here. """ warn_t_total = False # is set to True for schedules where progressing beyond t_total steps doesn't make sense def __init__(self, warmup=0.002, t_total=-1, **kw): """ :param warmup: what fraction of t_total steps will be used for linear warmup :param t_total: how many training steps (updates) are planned :param kw: """ super(_LRSchedule, self).__init__(**kw) if t_total < 0: logger.warning("t_total value of {} results in schedule not being applied".format(t_total)) if not 0.0 <= warmup < 1.0 and not warmup == -1: raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup)) warmup = max(warmup, 0.) self.warmup, self.t_total = float(warmup), float(t_total) self.warned_for_t_total_at_progress = -1 def get_lr(self, step, nowarn=False): """ :param step: which of t_total steps we're on :param nowarn: set to True to suppress warning regarding training beyond specified 't_total' steps :return: learning rate multiplier for current update """ if self.t_total < 0: return 1. progress = float(step) / self.t_total ret = self.get_lr_(progress) # warning for exceeding t_total (only active with warmup_linear if not nowarn and self.warn_t_total and progress > 1. and progress > self.warned_for_t_total_at_progress: logger.warning( "Training beyond specified 't_total'. Learning rate multiplier set to {}. Please set 't_total' of {} correctly." .format(ret, self.__class__.__name__)) self.warned_for_t_total_at_progress = progress # end warning return ret @abc.abstractmethod def get_lr_(self, progress): """ :param progress: value between 0 and 1 (unless going beyond t_total steps) specifying training progress :return: learning rate multiplier for current update """ return 1. class ConstantLR(_LRSchedule): def get_lr_(self, progress): return 1. class WarmupCosineSchedule(_LRSchedule): """ Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps. Decreases learning rate from 1. to 0. over remaining `1 - warmup` steps following a cosine curve. If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup. """ warn_t_total = True def __init__(self, warmup=0.002, t_total=-1, cycles=.5, **kw): """ :param warmup: see LRSchedule :param t_total: see LRSchedule :param cycles: number of cycles. Default: 0.5, corresponding to cosine decay from 1. at progress==warmup and 0 at progress==1. :param kw: """ super(WarmupCosineSchedule, self).__init__(warmup=warmup, t_total=t_total, **kw) self.cycles = cycles def get_lr_(self, progress): if progress < self.warmup: return progress / self.warmup else: progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup return 0.5 * (1. + math.cos(math.pi * self.cycles * 2 * progress)) class WarmupCosineWithHardRestartsSchedule(WarmupCosineSchedule): """ Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps. If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying learning rate (with hard restarts). """ def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw): super(WarmupCosineWithHardRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw) assert(cycles >= 1.) def get_lr_(self, progress): if progress < self.warmup: return progress / self.warmup else: progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup ret = 0.5 * (1. + math.cos(math.pi * ((self.cycles * progress) % 1))) return ret class WarmupCosineWithWarmupRestartsSchedule(WarmupCosineWithHardRestartsSchedule): """ All training progress is divided in `cycles` (default=1.) parts of equal length. Every part follows a schedule with the first `warmup` fraction of the training steps linearly increasing from 0. to 1., followed by a learning rate decreasing from 1. to 0. following a cosine curve. """ def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw): assert(warmup * cycles < 1.) warmup = warmup * cycles if warmup >= 0 else warmup super(WarmupCosineWithWarmupRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw) def get_lr_(self, progress): progress = progress * self.cycles % 1. if progress < self.warmup: return progress / self.warmup else: progress = (progress - self.warmup) / (1 - self.warmup) # progress after warmup ret = 0.5 * (1. + math.cos(math.pi * progress)) return ret class WarmupConstantSchedule(_LRSchedule): """ Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps. Keeps learning rate equal to 1. after warmup. """ def get_lr_(self, progress): if progress < self.warmup: return progress / self.warmup return 1. class WarmupLinearSchedule(_LRSchedule): """ Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps. Linearly decreases learning rate from 1. to 0. over remaining `1 - warmup` steps. """ warn_t_total = True def get_lr_(self, progress): if progress < self.warmup: return progress / self.warmup return max((progress - 1.) / (self.warmup - 1.), 0.) SCHEDULES = { None: ConstantLR, "none": ConstantLR, "warmup_cosine": WarmupCosineSchedule, "warmup_constant": WarmupConstantSchedule, "warmup_linear": WarmupLinearSchedule } class BertAdam(Optimizer): """Implements BERT version of Adam algorithm with weight decay fix. Params: lr: learning rate warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1 t_total: total number of training steps for the learning rate schedule, -1 means constant learning rate of 1. (no warmup regardless of warmup setting). Default: -1 schedule: schedule to use for the warmup (see above). Can be `'warmup_linear'`, `'warmup_constant'`, `'warmup_cosine'`, `'none'`, `None` or a `_LRSchedule` object (see below). If `None` or `'none'`, learning rate is always kept constant. Default : `'warmup_linear'` betas: Adams betas. Default: (0.9, 0.999) e: Adams epsilon. Default: 1e-6 weight_decay: Weight decay. Default: 0.01 max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0 """ def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear', betas=(0.9, 0.999), e=1e-6, weight_decay=0.01, max_grad_norm=1.0, **kwargs): if lr is not required and lr < 0.0: raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr)) if not isinstance(schedule, _LRSchedule) and schedule not in SCHEDULES: raise ValueError("Invalid schedule parameter: {}".format(schedule)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {} - should be in [0.0, 1.0[".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {} - should be in [0.0, 1.0[".format(betas[1])) if not e >= 0.0: raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e)) # initialize schedule object if not isinstance(schedule, _LRSchedule): schedule_type = SCHEDULES[schedule] schedule = schedule_type(warmup=warmup, t_total=t_total) else: if warmup != -1 or t_total != -1: logger.warning("warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. " "Please specify custom warmup and t_total in _LRSchedule object.") defaults = dict(lr=lr, schedule=schedule, betas=betas, e=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm) super(BertAdam, self).__init__(params, defaults) def get_lr(self): lr = [] for group in self.param_groups: for p in group['params']: state = self.state[p] if len(state) == 0: return [0] lr_scheduled = group['lr'] lr_scheduled *= group['schedule'].get_lr(state['step']) lr.append(lr_scheduled) return lr def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['next_m'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['next_v'] = torch.zeros_like(p.data) next_m, next_v = state['next_m'], state['next_v'] beta1, beta2 = group['betas'] # Add grad clipping if group['max_grad_norm'] > 0: clip_grad_norm_(p, group['max_grad_norm']) # Decay the first and second moment running average coefficient # In-place operations to update the averages at the same time next_m.mul_(beta1).add_(1 - beta1, grad) next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) update = next_m / (next_v.sqrt() + group['e']) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want to decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. if group['weight_decay'] > 0.0: update += group['weight_decay'] * p.data lr_scheduled = group['lr'] lr_scheduled *= group['schedule'].get_lr(state['step']) update_with_lr = lr_scheduled * update p.data.add_(-update_with_lr) state['step'] += 1 # step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1 # No bias correction # bias_correction1 = 1 - beta1 ** state['step'] # bias_correction2 = 1 - beta2 ** state['step'] return loss ================================================ FILE: pytorch_pretrained_bert/optimization_openai.py ================================================ # coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch optimization for OpenAI GPT model.""" import math import torch from torch.optim import Optimizer from torch.optim.optimizer import required from torch.nn.utils import clip_grad_norm_ import logging from .optimization import SCHEDULES, _LRSchedule, WarmupCosineWithWarmupRestartsSchedule, \ WarmupCosineWithHardRestartsSchedule, WarmupCosineSchedule, WarmupLinearSchedule, WarmupConstantSchedule logger = logging.getLogger(__name__) class OpenAIAdam(Optimizer): """Implements Open AI version of Adam algorithm with weight decay fix. """ def __init__(self, params, lr=required, schedule='warmup_linear', warmup=-1, t_total=-1, betas=(0.9, 0.999), e=1e-8, weight_decay=0, vector_l2=False, max_grad_norm=-1, **kwargs): if lr is not required and lr < 0.0: raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr)) if not isinstance(schedule, _LRSchedule) and schedule not in SCHEDULES: raise ValueError("Invalid schedule parameter: {}".format(schedule)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {} - should be in [0.0, 1.0[".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {} - should be in [0.0, 1.0[".format(betas[1])) if not e >= 0.0: raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e)) # initialize schedule object if not isinstance(schedule, _LRSchedule): schedule_type = SCHEDULES[schedule] schedule = schedule_type(warmup=warmup, t_total=t_total) else: if warmup != -1 or t_total != -1: logger.warning("warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. " "Please specify custom warmup and t_total in _LRSchedule object.") defaults = dict(lr=lr, schedule=schedule, betas=betas, e=e, weight_decay=weight_decay, vector_l2=vector_l2, max_grad_norm=max_grad_norm) super(OpenAIAdam, self).__init__(params, defaults) def get_lr(self): lr = [] for group in self.param_groups: for p in group['params']: state = self.state[p] if len(state) == 0: return [0] lr_scheduled = group['lr'] lr_scheduled *= group['schedule'].get_lr(state['step']) lr.append(lr_scheduled) return lr def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 # Add grad clipping if group['max_grad_norm'] > 0: clip_grad_norm_(p, group['max_grad_norm']) # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(1 - beta1, grad) exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) denom = exp_avg_sq.sqrt().add_(group['e']) bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] lr_scheduled = group['lr'] lr_scheduled *= group['schedule'].get_lr(state['step']) step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1 p.data.addcdiv_(-step_size, exp_avg, denom) # Add weight decay at the end (fixed version) if (len(p.size()) > 1 or group['vector_l2']) and group['weight_decay'] > 0: p.data.add_(-lr_scheduled * group['weight_decay'], p.data) return loss ================================================ FILE: pytorch_pretrained_bert/tokenization.py ================================================ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes.""" from __future__ import absolute_import, division, print_function, unicode_literals import collections import logging import os import unicodedata from io import open from .file_utils import cached_path logger = logging.getLogger(__name__) PRETRAINED_VOCAB_ARCHIVE_MAP = { 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt", 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt", } PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = { 'bert-base-uncased': 512, 'bert-large-uncased': 512, 'bert-base-cased': 512, 'bert-large-cased': 512, 'bert-base-multilingual-uncased': 512, 'bert-base-multilingual-cased': 512, 'bert-base-chinese': 512, } VOCAB_NAME = 'vocab.txt' def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() index = 0 with open(vocab_file, "r", encoding="utf-8") as reader: while True: token = reader.readline() if not token: break token = token.strip() vocab[token] = index index += 1 return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class BertTokenizer(object): """Runs end-to-end tokenization: punctuation splitting + wordpiece""" def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True, never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): """Constructs a BertTokenizer. Args: vocab_file: Path to a one-wordpiece-per-line vocabulary file do_lower_case: Whether to lower case the input Only has an effect when do_wordpiece_only=False do_basic_tokenize: Whether to do basic tokenization before wordpiece. max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the minimum of this value (if specified) and the underlying BERT model's sequence length. never_split: List of tokens which will never be split during tokenization. Only has an effect when do_wordpiece_only=False """ if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict( [(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) self.max_len = max_len if max_len is not None else int(1e12) def tokenize(self, text, entity_pos=None): split_tokens = [] if entity_pos: entity0_start = entity_pos[0][0] entity0_end = entity_pos[0][1] entity1_start = entity_pos[1][0] entity1_end = entity_pos[1][1] if self.do_basic_tokenize: basic_tokens = text.split() basic_tokens = [word.lower() for word in basic_tokens] """ 会将'high-order'分成三个词: ['high','-','order'] """ #for i, token in enumerate(self.basic_tokenizer.tokenize(text)): for i, token in enumerate(basic_tokens): wordpiece_tokens = self.wordpiece_tokenizer.tokenize(token) if entity_pos: if i == entity0_start: entity_pos[0][0] = len(split_tokens) if i == entity0_end: entity_pos[0][1] = len(split_tokens) if i == entity1_start: entity_pos[1][0] = len(split_tokens) if i == entity1_end: entity_pos[1][1] = len(split_tokens) for sub_token in wordpiece_tokens: split_tokens.append(sub_token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) if entity_pos == None: return split_tokens else: return split_tokens, entity_pos def convert_tokens_to_ids(self, tokens): """Converts a sequence of tokens into ids using the vocab.""" ids = [] for token in tokens: ids.append(self.vocab[token]) if len(ids) > self.max_len: logger.warning( "Token indices sequence length is longer than the specified maximum " " sequence length for this BERT model ({} > {}). Running this" " sequence through BERT will result in indexing errors".format(len(ids), self.max_len) ) return ids def convert_ids_to_tokens(self, ids): """Converts a sequence of ids in wordpiece tokens using the vocab.""" tokens = [] for i in ids: tokens.append(self.ids_to_tokens[i]) return tokens def save_vocabulary(self, vocab_path): """Save the tokenizer vocabulary to a directory or file.""" index = 0 if os.path.isdir(vocab_path): vocab_file = os.path.join(vocab_path, VOCAB_NAME) with open(vocab_file, "w", encoding="utf-8") as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!".format(vocab_file)) index = token_index writer.write(token + u'\n') index += 1 return vocab_file @classmethod def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): """ Instantiate a PreTrainedBertModel from a pre-trained model file. Download and cache the pre-trained model file if needed. """ if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path] if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True): logger.warning("The pre-trained model you are loading is a cased model but you have not set " "`do_lower_case` to False. We are setting `do_lower_case=False` for you but " "you may want to check this behavior.") kwargs['do_lower_case'] = False elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True): logger.warning("The pre-trained model you are loading is an uncased model but you have set " "`do_lower_case` to False. We are setting `do_lower_case=True` for you " "but you may want to check this behavior.") kwargs['do_lower_case'] = True else: vocab_file = pretrained_model_name_or_path if os.path.isdir(vocab_file): vocab_file = os.path.join(vocab_file, VOCAB_NAME) # redirect to the cache, if necessary try: resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) except EnvironmentError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), vocab_file)) return None if resolved_vocab_file == vocab_file: logger.info("loading vocabulary file {}".format(vocab_file)) else: logger.info("loading vocabulary file {} from cache at {}".format( vocab_file, resolved_vocab_file)) if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: # if we're using a pretrained model, ensure the tokenizer wont index sequences longer # than the number of positional embeddings max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path] kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) # Instantiate tokenizer. tokenizer = cls(resolved_vocab_file, *inputs, **kwargs) return tokenizer class BasicTokenizer(object): """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" def __init__(self, do_lower_case=True, never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): """Constructs a BasicTokenizer. Args: do_lower_case: Whether to lower case the input. """ self.do_lower_case = do_lower_case self.never_split = never_split def tokenize(self, text): """Tokenizes a piece of text.""" text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case and token not in self.never_split: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text): """Splits punctuation on a piece of text.""" if text in self.never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ((cp >= 0x4E00 and cp <= 0x9FFF) or # (cp >= 0x3400 and cp <= 0x4DBF) or # (cp >= 0x20000 and cp <= 0x2A6DF) or # (cp >= 0x2A700 and cp <= 0x2B73F) or # (cp >= 0x2B740 and cp <= 0x2B81F) or # (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or # (cp >= 0x2F800 and cp <= 0x2FA1F)): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xfffd or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically contorl characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False ================================================ FILE: pytorch_pretrained_bert/tokenization_gpt2.py ================================================ # coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for OpenAI GPT.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import sys import json import logging import os import regex as re from io import open try: from functools import lru_cache except ImportError: # Just a dummy decorator to get the checks to run on python2 # because honestly I don't want to support a byte-level unicode BPE tokenizer on python 2 right now. def lru_cache(): return lambda func: func from .file_utils import cached_path logger = logging.getLogger(__name__) PRETRAINED_VOCAB_ARCHIVE_MAP = { 'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json", 'gpt2-medium': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-vocab.json", } PRETRAINED_MERGES_ARCHIVE_MAP = { 'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt", 'gpt2-medium': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-merges.txt", } PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = { 'gpt2': 1024, } VOCAB_NAME = 'vocab.json' MERGES_NAME = 'merges.txt' SPECIAL_TOKENS_NAME = 'special_tokens.txt' @lru_cache() def bytes_to_unicode(): """ Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. """ _chr = unichr if sys.version_info[0] == 2 else chr bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8+n) n += 1 cs = [_chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): """Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class GPT2Tokenizer(object): """ GPT-2 BPE tokenizer. Peculiarities: - Byte-level BPE """ @classmethod def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): """ Instantiate a GPT2Tokenizer from a pre-trained model file. Download and cache the pre-trained model file if needed. """ if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path] merges_file = PRETRAINED_MERGES_ARCHIVE_MAP[pretrained_model_name_or_path] special_tokens_file = None else: vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME) merges_file = os.path.join(pretrained_model_name_or_path, MERGES_NAME) special_tokens_file = os.path.join(pretrained_model_name_or_path, SPECIAL_TOKENS_NAME) if not os.path.exists(special_tokens_file): special_tokens_file = None else: logger.info("loading special tokens file {}".format(special_tokens_file)) # redirect to the cache, if necessary try: resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) resolved_merges_file = cached_path(merges_file, cache_dir=cache_dir) except EnvironmentError: if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: logger.error( "Couldn't reach server at '{}' to download vocabulary.".format( vocab_file)) else: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find files {} and {} " "at this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), pretrained_model_name_or_path, vocab_file, merges_file)) return None if resolved_vocab_file == vocab_file and resolved_merges_file == merges_file: logger.info("loading vocabulary file {}".format(vocab_file)) logger.info("loading merges file {}".format(merges_file)) else: logger.info("loading vocabulary file {} from cache at {}".format( vocab_file, resolved_vocab_file)) logger.info("loading merges file {} from cache at {}".format( merges_file, resolved_merges_file)) if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: # if we're using a pretrained model, ensure the tokenizer wont index sequences longer # than the number of positional embeddings max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path] kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) # Instantiate tokenizer. if special_tokens_file and 'special_tokens' not in kwargs: special_tokens = open(special_tokens_file, encoding='utf-8').read().split('\n')[:-1] else: special_tokens = kwargs.pop('special_tokens', []) tokenizer = cls(resolved_vocab_file, resolved_merges_file, special_tokens=special_tokens, *inputs, **kwargs) return tokenizer def __init__(self, vocab_file, merges_file, errors='replace', special_tokens=None, max_len=None): self.max_len = max_len if max_len is not None else int(1e12) self.encoder = json.load(open(vocab_file)) self.decoder = {v:k for k,v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v:k for k, v in self.byte_encoder.items()} bpe_data = open(merges_file, encoding='utf-8').read().split('\n')[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_data] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} # Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") self.special_tokens = {} self.special_tokens_decoder = {} self.set_special_tokens(special_tokens) def __len__(self): return len(self.encoder) + len(self.special_tokens) def set_special_tokens(self, special_tokens): """ Add a list of additional tokens to the encoder. The additional tokens are indexed starting from the last index of the current vocabulary in the order of the `special_tokens` list. """ if not special_tokens: self.special_tokens = {} self.special_tokens_decoder = {} return self.special_tokens = dict((tok, len(self.encoder) + i) for i, tok in enumerate(special_tokens)) self.special_tokens_decoder = {v:k for k, v in self.special_tokens.items()} logger.info("Special tokens {}".format(self.special_tokens)) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if word[i] == first and i < len(word)-1 and word[i+1] == second: new_word.append(first+second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def tokenize(self, text): """ Tokenize a string. """ bpe_tokens = [] for token in re.findall(self.pat, text): if sys.version_info[0] == 2: token = ''.join(self.byte_encoder[ord(b)] for b in token) else: token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(' ')) return bpe_tokens def convert_tokens_to_ids(self, tokens): """ Converts a sequence of tokens into ids using the vocab. """ ids = [] if isinstance(tokens, str) or (sys.version_info[0] == 2 and isinstance(tokens, unicode)): if tokens in self.special_tokens: return self.special_tokens[tokens] else: return self.encoder.get(tokens, 0) for token in tokens: if token in self.special_tokens: ids.append(self.special_tokens[token]) else: ids.append(self.encoder.get(token, 0)) if len(ids) > self.max_len: logger.warning( "Token indices sequence length is longer than the specified maximum " " sequence length for this OpenAI GPT model ({} > {}). Running this" " sequence through the model will result in indexing errors".format(len(ids), self.max_len) ) return ids def convert_ids_to_tokens(self, ids, skip_special_tokens=False): """Converts a sequence of ids in BPE tokens using the vocab.""" tokens = [] for i in ids: if i in self.special_tokens_decoder: if not skip_special_tokens: tokens.append(self.special_tokens_decoder[i]) else: tokens.append(self.decoder[i]) return tokens def encode(self, text): return self.convert_tokens_to_ids(self.tokenize(text)) def decode(self, tokens, skip_special_tokens=False, clean_up_tokenization_spaces=True): text = ''.join(self.convert_ids_to_tokens(tokens, skip_special_tokens=skip_special_tokens)) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors) if clean_up_tokenization_spaces: text = text.replace('', '') text = text.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',' ).replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't" ).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re") return text def save_vocabulary(self, vocab_path): """Save the tokenizer vocabulary and merge files to a directory.""" if not os.path.isdir(vocab_path): logger.error("Vocabulary path ({}) should be a directory".format(vocab_path)) return vocab_file = os.path.join(vocab_path, VOCAB_NAME) merge_file = os.path.join(vocab_path, MERGES_NAME) special_tokens_file = os.path.join(vocab_path, SPECIAL_TOKENS_NAME) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, ensure_ascii=False)) index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write(u'#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!".format(merge_file)) index = token_index writer.write(' '.join(bpe_tokens) + u'\n') index += 1 index = len(self.encoder) with open(special_tokens_file, 'w', encoding='utf-8') as writer: for token, token_index in sorted(self.special_tokens.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning("Saving special tokens vocabulary to {}: BPE indices are not consecutive." " Please check that the tokenizer is not corrupted!".format(special_tokens_file)) index = token_index writer.write(token + u'\n') index += 1 return vocab_file, merge_file, special_tokens_file ================================================ FILE: pytorch_pretrained_bert/tokenization_openai.py ================================================ # coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for OpenAI GPT.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import json import logging import os import re import sys from io import open from tqdm import tqdm from .file_utils import cached_path from .tokenization import BasicTokenizer logger = logging.getLogger(__name__) PRETRAINED_VOCAB_ARCHIVE_MAP = { 'openai-gpt': "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-vocab.json", } PRETRAINED_MERGES_ARCHIVE_MAP = { 'openai-gpt': "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-merges.txt", } PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = { 'openai-gpt': 512, } VOCAB_NAME = 'vocab.json' MERGES_NAME = 'merges.txt' SPECIAL_TOKENS_NAME = 'special_tokens.txt' def get_pairs(word): """ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings) """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def text_standardize(text): """ fixes some issues the spacy tokenizer had on books corpus also does some whitespace standardization """ text = text.replace('—', '-') text = text.replace('–', '-') text = text.replace('―', '-') text = text.replace('…', '...') text = text.replace('´', "'") text = re.sub(r'''(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)''', r' \1 ', text) text = re.sub(r'\s*\n\s*', ' \n ', text) text = re.sub(r'[^\S\n]+', ' ', text) return text.strip() class OpenAIGPTTokenizer(object): """ BPE tokenizer. Peculiarities: - lower case all inputs - uses SpaCy tokenizer and ftfy for pre-BPE tokenization if they are installed, fallback to BERT's BasicTokenizer if not. - argument special_tokens and function set_special_tokens: can be used to add additional symbols (ex: "__classify__") to a vocabulary. """ @classmethod def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): """ Instantiate a PreTrainedBertModel from a pre-trained model file. Download and cache the pre-trained model file if needed. """ if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path] merges_file = PRETRAINED_MERGES_ARCHIVE_MAP[pretrained_model_name_or_path] special_tokens_file = None else: vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME) merges_file = os.path.join(pretrained_model_name_or_path, MERGES_NAME) special_tokens_file = os.path.join(pretrained_model_name_or_path, SPECIAL_TOKENS_NAME) if not os.path.exists(special_tokens_file): special_tokens_file = None else: logger.info("loading special tokens file {}".format(special_tokens_file)) # redirect to the cache, if necessary try: resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) resolved_merges_file = cached_path(merges_file, cache_dir=cache_dir) except EnvironmentError: if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: logger.error( "Couldn't reach server at '{}' to download vocabulary.".format( vocab_file)) else: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find files {} and {} " "at this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), pretrained_model_name_or_path, vocab_file, merges_file)) return None if resolved_vocab_file == vocab_file and resolved_merges_file == merges_file: logger.info("loading vocabulary file {}".format(vocab_file)) logger.info("loading merges file {}".format(merges_file)) else: logger.info("loading vocabulary file {} from cache at {}".format( vocab_file, resolved_vocab_file)) logger.info("loading merges file {} from cache at {}".format( merges_file, resolved_merges_file)) if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: # if we're using a pretrained model, ensure the tokenizer wont index sequences longer # than the number of positional embeddings max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path] kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) # Instantiate tokenizer. if special_tokens_file and 'special_tokens' not in kwargs: special_tokens = open(special_tokens_file, encoding='utf-8').read().split('\n')[:-1] else: special_tokens = kwargs.pop('special_tokens', []) tokenizer = cls(resolved_vocab_file, resolved_merges_file, special_tokens=special_tokens, *inputs, **kwargs) return tokenizer def __init__(self, vocab_file, merges_file, special_tokens=None, max_len=None): try: import ftfy import spacy self.nlp = spacy.load('en', disable=['parser', 'tagger', 'ner', 'textcat']) self.fix_text = ftfy.fix_text except ImportError: logger.warning("ftfy or spacy is not installed using BERT BasicTokenizer instead of SpaCy & ftfy.") self.nlp = BasicTokenizer(do_lower_case=True, never_split=special_tokens if special_tokens is not None else []) self.fix_text = None self.max_len = max_len if max_len is not None else int(1e12) self.encoder = json.load(open(vocab_file, encoding="utf-8")) self.decoder = {v:k for k,v in self.encoder.items()} merges = open(merges_file, encoding='utf-8').read().split('\n')[1:-1] merges = [tuple(merge.split()) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} self.special_tokens = {} self.special_tokens_decoder = {} self.set_special_tokens(special_tokens) def __len__(self): return len(self.encoder) + len(self.special_tokens) def set_special_tokens(self, special_tokens): """ Add a list of additional tokens to the encoder. The additional tokens are indexed starting from the last index of the current vocabulary in the order of the `special_tokens` list. """ if not special_tokens: self.special_tokens = {} self.special_tokens_decoder = {} return self.special_tokens = dict((tok, len(self.encoder) + i) for i, tok in enumerate(special_tokens)) self.special_tokens_decoder = {v:k for k, v in self.special_tokens.items()} if self.fix_text is None: # Using BERT's BasicTokenizer: we can update the tokenizer self.nlp.never_split = special_tokens logger.info("Special tokens {}".format(self.special_tokens)) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + '',) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token+'' while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if word[i] == first and i < len(word)-1 and word[i+1] == second: new_word.append(first+second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) if word == '\n ': word = '\n' self.cache[token] = word return word def tokenize(self, text): """ Tokenize a string. """ split_tokens = [] if self.fix_text is None: # Using BERT's BasicTokenizer text = self.nlp.tokenize(text) for token in text: split_tokens.extend([t for t in self.bpe(token).split(' ')]) else: # Using SpaCy & ftfy (original tokenization process of OpenAI GPT) text = self.nlp(text_standardize(self.fix_text(text))) for token in text: split_tokens.extend([t for t in self.bpe(token.text.lower()).split(' ')]) return split_tokens def convert_tokens_to_ids(self, tokens): """ Converts a sequence of tokens into ids using the vocab. """ ids = [] if isinstance(tokens, str) or (sys.version_info[0] == 2 and isinstance(tokens, unicode)): if tokens in self.special_tokens: return self.special_tokens[tokens] else: return self.encoder.get(tokens, 0) for token in tokens: if token in self.special_tokens: ids.append(self.special_tokens[token]) else: ids.append(self.encoder.get(token, 0)) if len(ids) > self.max_len: logger.warning( "Token indices sequence length is longer than the specified maximum " " sequence length for this OpenAI GPT model ({} > {}). Running this" " sequence through the model will result in indexing errors".format(len(ids), self.max_len) ) return ids def convert_ids_to_tokens(self, ids, skip_special_tokens=False): """Converts a sequence of ids in BPE tokens using the vocab.""" tokens = [] for i in ids: if i in self.special_tokens_decoder: if not skip_special_tokens: tokens.append(self.special_tokens_decoder[i]) else: tokens.append(self.decoder[i]) return tokens def encode(self, text): return self.convert_tokens_to_ids(self.tokenize(text)) def decode(self, ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): """Converts a sequence of ids in a string.""" tokens = self.convert_ids_to_tokens(ids, skip_special_tokens=skip_special_tokens) out_string = ''.join(tokens).replace('', ' ').strip() if clean_up_tokenization_spaces: out_string = out_string.replace('', '') out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',' ).replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't" ).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re") return out_string def save_vocabulary(self, vocab_path): """Save the tokenizer vocabulary and merge files to a directory.""" if not os.path.isdir(vocab_path): logger.error("Vocabulary path ({}) should be a directory".format(vocab_path)) return vocab_file = os.path.join(vocab_path, VOCAB_NAME) merge_file = os.path.join(vocab_path, MERGES_NAME) special_tokens_file = os.path.join(vocab_path, SPECIAL_TOKENS_NAME) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, ensure_ascii=False)) index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write(u'#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!".format(merge_file)) index = token_index writer.write(' '.join(bpe_tokens) + u'\n') index += 1 index = len(self.encoder) with open(special_tokens_file, 'w', encoding='utf-8') as writer: for token, token_index in sorted(self.special_tokens.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning("Saving special tokens vocabulary to {}: BPE indices are not consecutive." " Please check that the tokenizer is not corrupted!".format(special_tokens_file)) index = token_index writer.write(token + u'\n') index += 1 return vocab_file, merge_file, special_tokens_file ================================================ FILE: pytorch_pretrained_bert/tokenization_transfo_xl.py ================================================ # coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization classes for Transformer XL model. Adapted from https://github.com/kimiyoung/transformer-xl. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import glob import logging import os import sys from collections import Counter, OrderedDict from io import open import unicodedata import torch import numpy as np from .file_utils import cached_path if sys.version_info[0] == 2: import cPickle as pickle else: import pickle logger = logging.getLogger(__name__) PRETRAINED_VOCAB_ARCHIVE_MAP = { 'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-vocab.bin", } VOCAB_NAME = 'vocab.bin' PRETRAINED_CORPUS_ARCHIVE_MAP = { 'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-corpus.bin", } CORPUS_NAME = 'corpus.bin' class TransfoXLTokenizer(object): """ Transformer-XL tokenizer adapted from Vocab class in https://github.com/kimiyoung/transformer-xl """ @classmethod def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): """ Instantiate a TransfoXLTokenizer. The TransfoXLTokenizer. """ if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path] else: if os.path.isdir(pretrained_model_name_or_path): vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME) else: vocab_file = pretrained_model_name_or_path # redirect to the cache, if necessary try: resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) except EnvironmentError: if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: logger.error( "Couldn't reach server at '{}' to download vocabulary.".format( vocab_file)) else: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find files {} " "at this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), pretrained_model_name_or_path, vocab_file)) return None if resolved_vocab_file == vocab_file: logger.info("loading vocabulary file {}".format(vocab_file)) else: logger.info("loading vocabulary file {} from cache at {}".format( vocab_file, resolved_vocab_file)) # Instantiate tokenizer. tokenizer = cls(*inputs, **kwargs) vocab_dict = torch.load(resolved_vocab_file) for key, value in vocab_dict.items(): tokenizer.__dict__[key] = value return tokenizer def __init__(self, special=[], min_freq=0, max_size=None, lower_case=False, delimiter=None, vocab_file=None, never_split=("", "", "")): self.counter = Counter() self.special = special self.min_freq = min_freq self.max_size = max_size self.lower_case = lower_case self.delimiter = delimiter self.vocab_file = vocab_file self.never_split = never_split def count_file(self, path, verbose=False, add_eos=False): if verbose: print('counting file {} ...'.format(path)) assert os.path.exists(path) sents = [] with open(path, 'r', encoding='utf-8') as f: for idx, line in enumerate(f): if verbose and idx > 0 and idx % 500000 == 0: print(' line {}'.format(idx)) symbols = self.tokenize(line, add_eos=add_eos) self.counter.update(symbols) sents.append(symbols) return sents def count_sents(self, sents, verbose=False): """ sents : a list of sentences, each a list of tokenized symbols """ if verbose: print('counting {} sents ...'.format(len(sents))) for idx, symbols in enumerate(sents): if verbose and idx > 0 and idx % 500000 == 0: print(' line {}'.format(idx)) self.counter.update(symbols) def _build_from_file(self, vocab_file): self.idx2sym = [] self.sym2idx = OrderedDict() with open(vocab_file, 'r', encoding='utf-8') as f: for line in f: symb = line.strip().split()[0] self.add_symbol(symb) if '' in self.sym2idx: self.unk_idx = self.sym2idx[''] elif '' in self.sym2idx: self.unk_idx = self.sym2idx[''] else: raise ValueError('No token in vocabulary') def save_vocabulary(self, vocab_path): """Save the tokenizer vocabulary to a directory or file.""" index = 0 if os.path.isdir(vocab_path): vocab_file = os.path.join(vocab_path, VOCAB_NAME) torch.save(self.__dict__, vocab_file) return vocab_file def build_vocab(self): if self.vocab_file: print('building vocab from {}'.format(self.vocab_file)) self._build_from_file(self.vocab_file) print('final vocab size {}'.format(len(self))) else: print('building vocab with min_freq={}, max_size={}'.format( self.min_freq, self.max_size)) self.idx2sym = [] self.sym2idx = OrderedDict() for sym in self.special: self.add_special(sym) for sym, cnt in self.counter.most_common(self.max_size): if cnt < self.min_freq: break self.add_symbol(sym) print('final vocab size {} from {} unique tokens'.format( len(self), len(self.counter))) def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False): if verbose: print('encoding file {} ...'.format(path)) assert os.path.exists(path) encoded = [] with open(path, 'r', encoding='utf-8') as f: for idx, line in enumerate(f): if verbose and idx > 0 and idx % 500000 == 0: print(' line {}'.format(idx)) symbols = self.tokenize(line, add_eos=add_eos, add_double_eos=add_double_eos) encoded.append(self.convert_to_tensor(symbols)) if ordered: encoded = torch.cat(encoded) return encoded def encode_sents(self, sents, ordered=False, verbose=False): if verbose: print('encoding {} sents ...'.format(len(sents))) encoded = [] for idx, symbols in enumerate(sents): if verbose and idx > 0 and idx % 500000 == 0: print(' line {}'.format(idx)) encoded.append(self.convert_to_tensor(symbols)) if ordered: encoded = torch.cat(encoded) return encoded def add_special(self, sym): if sym not in self.sym2idx: self.idx2sym.append(sym) self.sym2idx[sym] = len(self.idx2sym) - 1 setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym]) def add_symbol(self, sym): if sym not in self.sym2idx: self.idx2sym.append(sym) self.sym2idx[sym] = len(self.idx2sym) - 1 def get_sym(self, idx): assert 0 <= idx < len(self), 'Index {} out of vocabulary range'.format(idx) return self.idx2sym[idx] def get_idx(self, sym): if sym in self.sym2idx: return self.sym2idx[sym] else: # print('encounter unk {}'.format(sym)) # assert '' not in sym if hasattr(self, 'unk_idx'): return self.sym2idx.get(sym, self.unk_idx) # Backward compatibility with pre-trained models elif '' in self.sym2idx: return self.sym2idx[''] elif '' in self.sym2idx: return self.sym2idx[''] else: raise ValueError('Token not in vocabulary and no token in vocabulary for replacement') def convert_ids_to_tokens(self, indices): """Converts a sequence of indices in symbols using the vocab.""" return [self.get_sym(idx) for idx in indices] def convert_tokens_to_ids(self, symbols): """Converts a sequence of symbols into ids using the vocab.""" return [self.get_idx(sym) for sym in symbols] def convert_to_tensor(self, symbols): return torch.LongTensor(self.convert_tokens_to_ids(symbols)) def decode(self, indices, exclude=None): """Converts a sequence of indices in a string.""" if exclude is None: return ' '.join([self.get_sym(idx) for idx in indices]) else: return ' '.join([self.get_sym(idx) for idx in indices if idx not in exclude]) def __len__(self): return len(self.idx2sym) def tokenize(self, line, add_eos=False, add_double_eos=False): line = line.strip() # convert to lower case if self.lower_case: line = line.lower() # empty delimiter '' will evaluate False if self.delimiter == '': symbols = line else: symbols = line.split(self.delimiter) if add_double_eos: # lm1b return [''] + symbols + [''] elif add_eos: return symbols + [''] else: return symbols class LMOrderedIterator(object): def __init__(self, data, bsz, bptt, device='cpu', ext_len=None): """ data -- LongTensor -- the LongTensor is strictly ordered """ self.bsz = bsz self.bptt = bptt self.ext_len = ext_len if ext_len is not None else 0 self.device = device # Work out how cleanly we can divide the dataset into bsz parts. self.n_step = data.size(0) // bsz # Trim off any extra elements that wouldn't cleanly fit (remainders). data = data.narrow(0, 0, self.n_step * bsz) # Evenly divide the data across the bsz batches. self.data = data.view(bsz, -1).t().contiguous().to(device) # Number of mini-batches self.n_batch = (self.n_step + self.bptt - 1) // self.bptt def get_batch(self, i, bptt=None): if bptt is None: bptt = self.bptt seq_len = min(bptt, self.data.size(0) - 1 - i) end_idx = i + seq_len beg_idx = max(0, i - self.ext_len) data = self.data[beg_idx:end_idx] target = self.data[i+1:i+1+seq_len] data_out = data.transpose(0, 1).contiguous().to(self.device) target_out = target.transpose(0, 1).contiguous().to(self.device) return data_out, target_out, seq_len def get_fixlen_iter(self, start=0): for i in range(start, self.data.size(0) - 1, self.bptt): yield self.get_batch(i) def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3): max_len = self.bptt + max_deviation * std i = start while True: bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2. bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std)))) data, target, seq_len = self.get_batch(i, bptt) i += seq_len yield data, target, seq_len if i >= self.data.size(0) - 2: break def __iter__(self): return self.get_fixlen_iter() class LMShuffledIterator(object): def __init__(self, data, bsz, bptt, device='cpu', ext_len=None, shuffle=False): """ data -- list[LongTensor] -- there is no order among the LongTensors """ self.data = data self.bsz = bsz self.bptt = bptt self.ext_len = ext_len if ext_len is not None else 0 self.device = device self.shuffle = shuffle def get_sent_stream(self): # index iterator epoch_indices = np.random.permutation(len(self.data)) if self.shuffle \ else np.array(range(len(self.data))) # sentence iterator for idx in epoch_indices: yield self.data[idx] def stream_iterator(self, sent_stream): # streams for each data in the batch streams = [None] * self.bsz data = torch.LongTensor(self.bptt, self.bsz) target = torch.LongTensor(self.bptt, self.bsz) n_retain = 0 while True: # data : [n_retain+bptt x bsz] # target : [bptt x bsz] data[n_retain:].fill_(-1) target.fill_(-1) valid_batch = True for i in range(self.bsz): n_filled = 0 try: while n_filled < self.bptt: if streams[i] is None or len(streams[i]) <= 1: streams[i] = next(sent_stream) # number of new tokens to fill in n_new = min(len(streams[i]) - 1, self.bptt - n_filled) # first n_retain tokens are retained from last batch data[n_retain+n_filled:n_retain+n_filled+n_new, i] = \ streams[i][:n_new] target[n_filled:n_filled+n_new, i] = \ streams[i][1:n_new+1] streams[i] = streams[i][n_new:] n_filled += n_new except StopIteration: valid_batch = False break if not valid_batch: return data_out = data.transpose(0, 1).contiguous().to(self.device) target_out = target.transpose(0, 1).contiguous().to(self.device) yield data_out, target_out, self.bptt n_retain = min(data.size(0), self.ext_len) if n_retain > 0: data[:n_retain] = data[-n_retain:] data.resize_(n_retain + self.bptt, data.size(1)) def __iter__(self): # sent_stream is an iterator sent_stream = self.get_sent_stream() for batch in self.stream_iterator(sent_stream): yield batch class LMMultiFileIterator(LMShuffledIterator): def __init__(self, paths, vocab, bsz, bptt, device='cpu', ext_len=None, shuffle=False): self.paths = paths self.vocab = vocab self.bsz = bsz self.bptt = bptt self.ext_len = ext_len if ext_len is not None else 0 self.device = device self.shuffle = shuffle def get_sent_stream(self, path): sents = self.vocab.encode_file(path, add_double_eos=True) if self.shuffle: np.random.shuffle(sents) sent_stream = iter(sents) return sent_stream def __iter__(self): if self.shuffle: np.random.shuffle(self.paths) for path in self.paths: # sent_stream is an iterator sent_stream = self.get_sent_stream(path) for batch in self.stream_iterator(sent_stream): yield batch class TransfoXLCorpus(object): @classmethod def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): """ Instantiate a pre-processed corpus. """ vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) if pretrained_model_name_or_path in PRETRAINED_CORPUS_ARCHIVE_MAP: corpus_file = PRETRAINED_CORPUS_ARCHIVE_MAP[pretrained_model_name_or_path] else: corpus_file = os.path.join(pretrained_model_name_or_path, CORPUS_NAME) # redirect to the cache, if necessary try: resolved_corpus_file = cached_path(corpus_file, cache_dir=cache_dir) except EnvironmentError: logger.error( "Corpus '{}' was not found in corpus list ({}). " "We assumed '{}' was a path or url but couldn't find files {} " "at this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), pretrained_model_name_or_path, corpus_file)) return None if resolved_corpus_file == corpus_file: logger.info("loading corpus file {}".format(corpus_file)) else: logger.info("loading corpus file {} from cache at {}".format( corpus_file, resolved_corpus_file)) # Instantiate tokenizer. corpus = cls(*inputs, **kwargs) corpus_dict = torch.load(resolved_corpus_file) for key, value in corpus_dict.items(): corpus.__dict__[key] = value corpus.vocab = vocab if corpus.train is not None: corpus.train = torch.tensor(corpus.train, dtype=torch.long) if corpus.valid is not None: corpus.valid = torch.tensor(corpus.valid, dtype=torch.long) if corpus.test is not None: corpus.test = torch.tensor(corpus.test, dtype=torch.long) return corpus def __init__(self, *args, **kwargs): self.vocab = TransfoXLTokenizer(*args, **kwargs) self.dataset = None self.train = None self.valid = None self.test = None def build_corpus(self, path, dataset): self.dataset = dataset if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']: self.vocab.count_file(os.path.join(path, 'train.txt')) self.vocab.count_file(os.path.join(path, 'valid.txt')) self.vocab.count_file(os.path.join(path, 'test.txt')) elif self.dataset == 'wt103': self.vocab.count_file(os.path.join(path, 'train.txt')) elif self.dataset == 'lm1b': train_path_pattern = os.path.join( path, '1-billion-word-language-modeling-benchmark-r13output', 'training-monolingual.tokenized.shuffled', 'news.en-*') train_paths = glob.glob(train_path_pattern) # the vocab will load from file when build_vocab() is called self.vocab.build_vocab() if self.dataset in ['ptb', 'wt2', 'wt103']: self.train = self.vocab.encode_file( os.path.join(path, 'train.txt'), ordered=True) self.valid = self.vocab.encode_file( os.path.join(path, 'valid.txt'), ordered=True) self.test = self.vocab.encode_file( os.path.join(path, 'test.txt'), ordered=True) elif self.dataset in ['enwik8', 'text8']: self.train = self.vocab.encode_file( os.path.join(path, 'train.txt'), ordered=True, add_eos=False) self.valid = self.vocab.encode_file( os.path.join(path, 'valid.txt'), ordered=True, add_eos=False) self.test = self.vocab.encode_file( os.path.join(path, 'test.txt'), ordered=True, add_eos=False) elif self.dataset == 'lm1b': self.train = train_paths self.valid = self.vocab.encode_file( os.path.join(path, 'valid.txt'), ordered=False, add_double_eos=True) self.test = self.vocab.encode_file( os.path.join(path, 'test.txt'), ordered=False, add_double_eos=True) def get_iterator(self, split, *args, **kwargs): if split == 'train': if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']: data_iter = LMOrderedIterator(self.train, *args, **kwargs) elif self.dataset == 'lm1b': kwargs['shuffle'] = True data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs) elif split in ['valid', 'test']: data = self.valid if split == 'valid' else self.test if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']: data_iter = LMOrderedIterator(data, *args, **kwargs) elif self.dataset == 'lm1b': data_iter = LMShuffledIterator(data, *args, **kwargs) return data_iter def get_lm_corpus(datadir, dataset): fn = os.path.join(datadir, 'cache.pt') fn_pickle = os.path.join(datadir, 'cache.pkl') if os.path.exists(fn): print('Loading cached dataset...') corpus = torch.load(fn_pickle) elif os.path.exists(fn): print('Loading cached dataset from pickle...') with open(fn, "rb") as fp: corpus = pickle.load(fp) else: print('Producing dataset {}...'.format(dataset)) kwargs = {} if dataset in ['wt103', 'wt2']: kwargs['special'] = [''] kwargs['lower_case'] = False elif dataset == 'ptb': kwargs['special'] = [''] kwargs['lower_case'] = True elif dataset == 'lm1b': kwargs['special'] = [] kwargs['lower_case'] = False kwargs['vocab_file'] = os.path.join(datadir, '1b_word_vocab.txt') elif dataset in ['enwik8', 'text8']: pass corpus = TransfoXLCorpus(datadir, dataset, **kwargs) torch.save(corpus, fn) return corpus ================================================ FILE: requirements.txt ================================================ # PyTorch torch>=0.4.1 # progress bars in model download and training scripts tqdm # Accessing files from S3 directly. boto3 # Used for downloading models over HTTP requests # For OpenAI GPT regex ================================================ FILE: samples/input.txt ================================================ Who was Jim Henson ? ||| Jim Henson was a puppeteer ================================================ FILE: samples/sample_text.txt ================================================ This text is included to make sure Unicode is handled properly: 力加勝北区ᴵᴺᵀᵃছজটডণত Text should be one-sentence-per-line, with empty lines between documents. This sample text is public domain and was randomly selected from Project Guttenberg. The rain had only ceased with the gray streaks of morning at Blazing Star, and the settlement awoke to a moral sense of cleanliness, and the finding of forgotten knives, tin cups, and smaller camp utensils, where the heavy showers had washed away the debris and dust heaps before the cabin doors. Indeed, it was recorded in Blazing Star that a fortunate early riser had once picked up on the highway a solid chunk of gold quartz which the rain had freed from its incumbering soil, and washed into immediate and glittering popularity. Possibly this may have been the reason why early risers in that locality, during the rainy season, adopted a thoughtful habit of body, and seldom lifted their eyes to the rifted or india-ink washed skies above them. "Cass" Beard had risen early that morning, but not with a view to discovery. A leak in his cabin roof,--quite consistent with his careless, improvident habits,--had roused him at 4 A. M., with a flooded "bunk" and wet blankets. The chips from his wood pile refused to kindle a fire to dry his bed-clothes, and he had recourse to a more provident neighbor's to supply the deficiency. This was nearly opposite. Mr. Cassius crossed the highway, and stopped suddenly. Something glittered in the nearest red pool before him. Gold, surely! But, wonderful to relate, not an irregular, shapeless fragment of crude ore, fresh from Nature's crucible, but a bit of jeweler's handicraft in the form of a plain gold ring. Looking at it more attentively, he saw that it bore the inscription, "May to Cass." Like most of his fellow gold-seekers, Cass was superstitious. The fountain of classic wisdom, Hypatia herself. As the ancient sage--the name is unimportant to a monk--pumped water nightly that he might study by day, so I, the guardian of cloaks and parasols, at the sacred doors of her lecture-room, imbibe celestial knowledge. From my youth I felt in me a soul above the matter-entangled herd. She revealed to me the glorious fact, that I am a spark of Divinity itself. A fallen star, I am, sir!' continued he, pensively, stroking his lean stomach--'a fallen star!--fallen, if the dignity of philosophy will allow of the simile, among the hogs of the lower world--indeed, even into the hog-bucket itself. Well, after all, I will show you the way to the Archbishop's. There is a philosophic pleasure in opening one's treasures to the modest young. Perhaps you will assist me by carrying this basket of fruit?' And the little man jumped up, put his basket on Philammon's head, and trotted off up a neighbouring street. Philammon followed, half contemptuous, half wondering at what this philosophy might be, which could feed the self-conceit of anything so abject as his ragged little apish guide; but the novel roar and whirl of the street, the perpetual stream of busy faces, the line of curricles, palanquins, laden asses, camels, elephants, which met and passed him, and squeezed him up steps and into doorways, as they threaded their way through the great Moon-gate into the ample street beyond, drove everything from his mind but wondering curiosity, and a vague, helpless dread of that great living wilderness, more terrible than any dead wilderness of sand which he had left behind. Already he longed for the repose, the silence of the Laura--for faces which knew him and smiled upon him; but it was too late to turn back now. His guide held on for more than a mile up the great main street, crossed in the centre of the city, at right angles, by one equally magnificent, at each end of which, miles away, appeared, dim and distant over the heads of the living stream of passengers, the yellow sand-hills of the desert; while at the end of the vista in front of them gleamed the blue harbour, through a network of countless masts. At last they reached the quay at the opposite end of the street; and there burst on Philammon's astonished eyes a vast semicircle of blue sea, ringed with palaces and towers. He stopped involuntarily; and his little guide stopped also, and looked askance at the young monk, to watch the effect which that grand panorama should produce on him. ================================================ FILE: setup.py ================================================ """ Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py To create the package for pypi. 1. Change the version in __init__.py and setup.py. 2. Commit these changes with the message: "Release: VERSION" 3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' " Push the tag to git: git push --tags origin master 4. Build both the sources and the wheel. Do not change anything in setup.py between creating the wheel and the source distribution (obviously). For the wheel, run: "python setup.py bdist_wheel" in the top level allennlp directory. (this will build a wheel for the python version you use to build it - make sure you use python 3.x). For the sources, run: "python setup.py sdist" You should now have a /dist directory with both .whl and .tar.gz source versions of allennlp. 5. Check that everything looks correct by uploading the package to the pypi test server: twine upload dist/* -r pypitest (pypi suggest using twine as other methods upload files via plaintext.) Check that you can install it in a virtualenv by running: pip install -i https://testpypi.python.org/pypi allennlp 6. Upload the final version to actual pypi: twine upload dist/* -r pypi 7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory. """ from io import open from setuptools import find_packages, setup setup( name="pytorch_pretrained_bert", version="0.6.2", author="Thomas Wolf, Victor Sanh, Tim Rault, Google AI Language Team Authors, Open AI team Authors", author_email="thomas@huggingface.co", description="PyTorch version of Google AI BERT model with script to load Google pre-trained models", long_description=open("README.md", "r", encoding='utf-8').read(), long_description_content_type="text/markdown", keywords='BERT NLP deep learning google', license='Apache', url="https://github.com/huggingface/pytorch-pretrained-BERT", packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), install_requires=['torch>=0.4.1', 'numpy', 'boto3', 'requests', 'tqdm', 'regex'], entry_points={ 'console_scripts': [ "pytorch_pretrained_bert=pytorch_pretrained_bert.__main__:main", ] }, # python_requires='>=3.5.0', tests_require=['pytest'], classifiers=[ 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3', 'Topic :: Scientific/Engineering :: Artificial Intelligence', ], ) ================================================ FILE: tests/conftest.py ================================================ # content of conftest.py import pytest def pytest_addoption(parser): parser.addoption( "--runslow", action="store_true", default=False, help="run slow tests" ) def pytest_collection_modifyitems(config, items): if config.getoption("--runslow"): # --runslow given in cli: do not skip slow tests return skip_slow = pytest.mark.skip(reason="need --runslow option to run") for item in items: if "slow" in item.keywords: item.add_marker(skip_slow) ================================================ FILE: tests/modeling_gpt2_test.py ================================================ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import unittest import json import random import shutil import pytest import torch from pytorch_pretrained_bert import (GPT2Config, GPT2Model, GPT2LMHeadModel, GPT2DoubleHeadsModel) from pytorch_pretrained_bert.modeling_gpt2 import PRETRAINED_MODEL_ARCHIVE_MAP class GPT2ModelTest(unittest.TestCase): class GPT2ModelTester(object): def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_position_ids=True, use_token_type_ids=True, use_labels=True, vocab_size=99, n_special=1, n_positions=33, n_embd=32, n_layer=5, n_head=4, n_choices=3, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, scope=None): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_position_ids = use_position_ids self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.n_special = n_special self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.n_choices = n_choices self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): total_num_tokens = self.vocab_size + self.n_special input_ids = GPT2ModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], total_num_tokens) position_ids = None if self.use_position_ids: position_ids = GPT2ModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], self.n_positions) token_type_ids = None if self.use_token_type_ids: total_voc = self.vocab_size token_type_ids = GPT2ModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], total_voc) mc_labels = None lm_labels = None mc_token_ids = None if self.use_labels: mc_labels = GPT2ModelTest.ids_tensor([self.batch_size], self.type_sequence_label_size) lm_labels = GPT2ModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], self.num_labels) mc_token_ids = GPT2ModelTest.ids_tensor([self.batch_size, self.n_choices], self.seq_length) config = GPT2Config( vocab_size_or_config_json_file=self.vocab_size, n_special=self.n_special, n_positions=self.n_positions, n_embd=self.n_embd, n_layer=self.n_layer, n_head=self.n_head, initializer_range=self.initializer_range) return (config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids) def create_gpt2_model(self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids): model = GPT2Model(config) model.eval() hidden_states, presents = model(input_ids, position_ids, token_type_ids) outputs = { "hidden_states": hidden_states, "presents": presents, } return outputs def check_gpt2_model_output(self, result): self.parent.assertEqual(len(result["hidden_states"]), self.n_layer + 1) self.parent.assertListEqual( list(result["hidden_states"][0].size()), [self.batch_size, self.n_choices, self.seq_length, self.n_embd]) def create_gpt2_lm_head(self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids): model = GPT2LMHeadModel(config) model.eval() loss = model(input_ids, position_ids, token_type_ids, lm_labels) lm_logits, presents = model(input_ids, position_ids, token_type_ids) outputs = { "loss": loss, "lm_logits": lm_logits, "presents": presents, } return outputs def create_gpt2_lm_head_with_output_attention(self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids): model = GPT2LMHeadModel(config, output_attentions=True) model.eval() loss = model(input_ids, position_ids, token_type_ids, lm_labels) attentions, lm_logits, presents = model(input_ids, position_ids, token_type_ids) outputs = { "loss": loss, "lm_logits": lm_logits, "presents": presents, "attentions": attentions, } return outputs def check_gpt2_lm_head_output(self, result): total_voc = self.n_special + self.vocab_size self.parent.assertListEqual( list(result["lm_logits"].size()), [self.batch_size, self.n_choices, self.seq_length, total_voc]) self.parent.assertEqual(self.n_layer, len(result["presents"])) self.parent.assertListEqual( list(result["presents"][0].size()), [2, self.batch_size * self.n_choices, self.n_head, self.seq_length, self.n_embd // self.n_head]) def check_gpt2_lm_head_loss_output(self, result): self.parent.assertListEqual( list(result["loss"].size()), []) def create_gpt2_double_heads(self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids): model = GPT2DoubleHeadsModel(config) model.eval() loss = model(input_ids, mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels, token_type_ids=token_type_ids, position_ids=position_ids) lm_logits, mc_logits, presents = model(input_ids, mc_token_ids, position_ids=position_ids, token_type_ids=token_type_ids) outputs = { "loss": loss, "lm_logits": lm_logits, "mc_logits": mc_logits, "presents": presents, } return outputs def create_gpt2_double_heads_with_output_attention(self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids): model = GPT2DoubleHeadsModel(config, output_attentions=True) model.eval() loss = model(input_ids, mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels, token_type_ids=token_type_ids, position_ids=position_ids) attentions, lm_logits, mc_logits, presents = model(input_ids, mc_token_ids, position_ids=position_ids, token_type_ids=token_type_ids) outputs = { "loss": loss, "lm_logits": lm_logits, "mc_logits": mc_logits, "presents": presents, "attentions": attentions, } return outputs def check_gpt2_double_heads_output(self, result): total_voc = self.n_special + self.vocab_size self.parent.assertListEqual( list(result["lm_logits"].size()), [self.batch_size, self.n_choices, self.seq_length, total_voc]) self.parent.assertListEqual( list(result["mc_logits"].size()), [self.batch_size, self.n_choices]) def check_gpt2_double_heads_loss_output(self, result): self.parent.assertListEqual( [list(l.size()) for l in result["loss"]], [[], []]) def create_and_check_gpt2_for_headmasking(self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids): for model_class in (GPT2Model, GPT2LMHeadModel, GPT2DoubleHeadsModel): model = model_class(config=config, keep_multihead_output=True) model.eval() head_mask = torch.ones(self.n_layer, self.n_head).to(input_ids.device) head_mask[0, 1:-1] = 0.0 # Mask all but the first and last heads on the first layer head_mask[-1, 1:] = 0.0 # Mask all but the first head on the last layer if isinstance(model, GPT2DoubleHeadsModel): output = model(input_ids, mc_token_ids, head_mask=head_mask) else: output = model(input_ids, head_mask=head_mask) if isinstance(model, GPT2Model): output = sum(t.sum() for t in output[0]) elif isinstance(output, (list, tuple)): output = sum(t.sum() for t in output[:-1]) output = output.sum() output.backward() multihead_outputs = (model if isinstance(model, GPT2Model) else model.transformer).get_multihead_outputs() self.parent.assertEqual(len(multihead_outputs), self.n_layer) self.parent.assertListEqual( list(multihead_outputs[0].size()), [self.batch_size * self.n_choices, self.n_head, self.seq_length, self.n_embd // self.n_head]) self.parent.assertEqual( len(multihead_outputs[0][:, 1:(self.n_head-1), :, :].nonzero()), 0) self.parent.assertEqual( len(multihead_outputs[0][:, 0, :, :].nonzero()), self.batch_size * self.n_choices * self.seq_length * self.n_embd // self.n_head) self.parent.assertEqual( len(multihead_outputs[0][:, self.n_head-1, :, :].nonzero()), self.batch_size * self.n_choices * self.seq_length * self.n_embd // self.n_head) self.parent.assertListEqual( list(multihead_outputs[1].size()), [self.batch_size * self.n_choices, self.n_head, self.seq_length, self.n_embd // self.n_head]) self.parent.assertEqual( len(multihead_outputs[1].nonzero()), multihead_outputs[1].numel()) self.parent.assertListEqual( list(multihead_outputs[-1].size()), [self.batch_size * self.n_choices, self.n_head, self.seq_length, self.n_embd // self.n_head]) self.parent.assertEqual( len(multihead_outputs[-1][:, 1:, :, :].nonzero()), 0) self.parent.assertEqual( len(multihead_outputs[-1][:, 0, :, :].nonzero()), self.batch_size * self.n_choices * self.seq_length * self.n_embd // self.n_head) def create_and_check_gpt2_for_head_pruning(self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids): for model_class in (GPT2Model, GPT2LMHeadModel, GPT2DoubleHeadsModel): model = model_class(config=config, keep_multihead_output=True) model.eval() transformer = model if isinstance(model, GPT2Model) else model.transformer heads_to_prune = {0: list(range(1, self.n_head)), -1: [0]} transformer.prune_heads(heads_to_prune) if isinstance(model, GPT2DoubleHeadsModel): output = model(input_ids, mc_token_ids) else: output = model(input_ids) if isinstance(model, GPT2Model): output = sum(t.sum() for t in output[0]) elif isinstance(output, (list, tuple)): output = sum(t.sum() for t in output[:-1]) output = output.sum() output.backward() multihead_outputs = transformer.get_multihead_outputs() self.parent.assertEqual(len(multihead_outputs), self.n_layer) self.parent.assertListEqual( list(multihead_outputs[0].size()), [self.batch_size * self.n_choices, 1, self.seq_length, self.n_embd // self.n_head]) self.parent.assertListEqual( list(multihead_outputs[1].size()), [self.batch_size * self.n_choices, self.n_head, self.seq_length, self.n_embd // self.n_head]) self.parent.assertListEqual( list(multihead_outputs[-1].size()), [self.batch_size * self.n_choices, self.n_head-1, self.seq_length, self.n_embd // self.n_head]) def test_default(self): self.run_tester(GPT2ModelTest.GPT2ModelTester(self)) def test_config_to_json_string(self): config = GPT2Config(vocab_size_or_config_json_file=99, n_embd=37) obj = json.loads(config.to_json_string()) self.assertEqual(obj["vocab_size"], 99) self.assertEqual(obj["n_embd"], 37) def test_config_to_json_file(self): config_first = GPT2Config(vocab_size_or_config_json_file=99, n_embd=37) json_file_path = "/tmp/config.json" config_first.to_json_file(json_file_path) config_second = GPT2Config.from_json_file(json_file_path) os.remove(json_file_path) self.assertEqual(config_second.to_dict(), config_first.to_dict()) @pytest.mark.slow def test_model_from_pretrained(self): cache_dir = "/tmp/pytorch_pretrained_bert_test/" for model_name in list(PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: model = GPT2Model.from_pretrained(model_name, cache_dir=cache_dir) shutil.rmtree(cache_dir) self.assertIsNotNone(model) def run_tester(self, tester): config_and_inputs = tester.prepare_config_and_inputs() output_result = tester.create_gpt2_model(*config_and_inputs) tester.check_gpt2_model_output(output_result) output_result = tester.create_gpt2_lm_head(*config_and_inputs) tester.check_gpt2_lm_head_output(output_result) tester.check_gpt2_lm_head_loss_output(output_result) output_result = tester.create_gpt2_double_heads(*config_and_inputs) tester.check_gpt2_double_heads_output(output_result) tester.check_gpt2_double_heads_loss_output(output_result) tester.create_and_check_gpt2_for_headmasking(*config_and_inputs) tester.create_and_check_gpt2_for_head_pruning(*config_and_inputs) @classmethod def ids_tensor(cls, shape, vocab_size, rng=None, name=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) return torch.tensor(data=values, dtype=torch.long).view(shape).contiguous() if __name__ == "__main__": unittest.main() ================================================ FILE: tests/modeling_openai_test.py ================================================ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import unittest import json import random import shutil import pytest import torch from pytorch_pretrained_bert import (OpenAIGPTConfig, OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel) from pytorch_pretrained_bert.modeling_openai import PRETRAINED_MODEL_ARCHIVE_MAP class OpenAIGPTModelTest(unittest.TestCase): class OpenAIGPTModelTester(object): def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_position_ids=True, use_token_type_ids=True, use_labels=True, vocab_size=99, n_special=1, n_positions=33, n_embd=32, n_layer=5, n_head=4, n_choices=3, afn="gelu", resid_pdrop=0.1, attn_pdrop=0.1, embd_pdrop=0.1, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, scope=None): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_position_ids = use_position_ids self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.n_special = n_special self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.afn = afn self.n_choices = n_choices self.resid_pdrop = resid_pdrop self.attn_pdrop = attn_pdrop self.embd_pdrop = embd_pdrop self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): input_ids = OpenAIGPTModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], self.vocab_size) position_ids = None if self.use_position_ids: position_ids = OpenAIGPTModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], self.n_positions) token_type_ids = None if self.use_token_type_ids: total_voc = self.vocab_size + self.n_special token_type_ids = OpenAIGPTModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], total_voc) mc_labels = None lm_labels = None mc_token_ids = None if self.use_labels: mc_labels = OpenAIGPTModelTest.ids_tensor([self.batch_size], self.type_sequence_label_size) lm_labels = OpenAIGPTModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], self.num_labels) mc_token_ids = OpenAIGPTModelTest.ids_tensor([self.batch_size, self.n_choices], self.seq_length) config = OpenAIGPTConfig( vocab_size_or_config_json_file=self.vocab_size, n_positions=self.n_positions, n_special=self.n_special, n_embd=self.n_embd, n_layer=self.n_layer, n_head=self.n_head, afn=self.afn, resid_pdrop=self.resid_pdrop, attn_pdrop=self.attn_pdrop, embd_pdrop=self.embd_pdrop, initializer_range=self.initializer_range) return (config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids) def create_openai_model(self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids): model = OpenAIGPTModel(config) model.eval() hidden_states = model(input_ids, position_ids, token_type_ids) outputs = { "hidden_states": hidden_states, } return outputs def check_openai_model_output(self, result): self.parent.assertEqual(len(result["hidden_states"]), self.n_layer + 1) self.parent.assertListEqual( list(result["hidden_states"][0].size()), [self.batch_size, self.n_choices, self.seq_length, self.n_embd]) def create_openai_lm_head(self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids): model = OpenAIGPTLMHeadModel(config) model.eval() loss = model(input_ids, position_ids, token_type_ids, lm_labels) lm_logits = model(input_ids, position_ids, token_type_ids) outputs = { "loss": loss, "lm_logits": lm_logits, } return outputs def check_openai_lm_head_output(self, result): total_voc = self.n_special + self.vocab_size self.parent.assertListEqual( list(result["lm_logits"].size()), [self.batch_size, self.n_choices, self.seq_length, total_voc]) def check_openai_lm_head_loss_output(self, result): self.parent.assertListEqual( list(result["loss"].size()), []) def create_openai_double_heads(self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids): model = OpenAIGPTDoubleHeadsModel(config) model.eval() loss = model(input_ids, mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels, token_type_ids=token_type_ids, position_ids=position_ids) lm_logits, mc_logits = model(input_ids, mc_token_ids, position_ids=position_ids, token_type_ids=token_type_ids) outputs = { "loss": loss, "lm_logits": lm_logits, "mc_logits": mc_logits, } return outputs def check_openai_double_heads_output(self, result): total_voc = self.n_special + self.vocab_size self.parent.assertListEqual( list(result["lm_logits"].size()), [self.batch_size, self.n_choices, self.seq_length, total_voc]) self.parent.assertListEqual( list(result["mc_logits"].size()), [self.batch_size, self.n_choices]) def check_openai_double_heads_loss_output(self, result): self.parent.assertListEqual( [list(l.size()) for l in result["loss"]], [[], []]) def create_and_check_openai_for_headmasking(self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids): for model_class in (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel): model = model_class(config=config, keep_multihead_output=True) model.eval() head_mask = torch.ones(self.n_layer, self.n_head).to(input_ids.device) head_mask[0, 1:-1] = 0.0 # Mask all but the first and last heads on the first layer head_mask[-1, 1:] = 0.0 # Mask all but the first head on the last layer if isinstance(model, OpenAIGPTDoubleHeadsModel): output = model(input_ids, mc_token_ids, head_mask=head_mask) else: output = model(input_ids, head_mask=head_mask) if isinstance(model, OpenAIGPTModel): output = sum(t.sum() for t in output[0]) elif isinstance(output, (list, tuple)): output = sum(t.sum() for t in output) output = output.sum() output.backward() multihead_outputs = (model if isinstance(model, OpenAIGPTModel) else model.transformer).get_multihead_outputs() self.parent.assertEqual(len(multihead_outputs), self.n_layer) self.parent.assertListEqual( list(multihead_outputs[0].size()), [self.batch_size * self.n_choices, self.n_head, self.seq_length, self.n_embd // self.n_head]) self.parent.assertEqual( len(multihead_outputs[0][:, 1:(self.n_head-1), :, :].nonzero()), 0) self.parent.assertEqual( len(multihead_outputs[0][:, 0, :, :].nonzero()), self.batch_size * self.n_choices * self.seq_length * self.n_embd // self.n_head) self.parent.assertEqual( len(multihead_outputs[0][:, self.n_head-1, :, :].nonzero()), self.batch_size * self.n_choices * self.seq_length * self.n_embd // self.n_head) self.parent.assertListEqual( list(multihead_outputs[1].size()), [self.batch_size * self.n_choices, self.n_head, self.seq_length, self.n_embd // self.n_head]) self.parent.assertEqual( len(multihead_outputs[1].nonzero()), multihead_outputs[1].numel()) self.parent.assertListEqual( list(multihead_outputs[-1].size()), [self.batch_size * self.n_choices, self.n_head, self.seq_length, self.n_embd // self.n_head]) self.parent.assertEqual( len(multihead_outputs[-1][:, 1:, :, :].nonzero()), 0) self.parent.assertEqual( len(multihead_outputs[-1][:, 0, :, :].nonzero()), self.batch_size * self.n_choices * self.seq_length * self.n_embd // self.n_head) def create_and_check_openai_for_head_pruning(self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids): for model_class in (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel): model = model_class(config=config, keep_multihead_output=True) model.eval() transformer = model if isinstance(model, OpenAIGPTModel) else model.transformer heads_to_prune = {0: list(range(1, self.n_head)), -1: [0]} transformer.prune_heads(heads_to_prune) if isinstance(model, OpenAIGPTDoubleHeadsModel): output = model(input_ids, mc_token_ids) else: output = model(input_ids) if isinstance(model, OpenAIGPTModel): output = sum(t.sum() for t in output[0]) elif isinstance(output, (list, tuple)): output = sum(t.sum() for t in output) output = output.sum() output.backward() multihead_outputs = transformer.get_multihead_outputs() self.parent.assertEqual(len(multihead_outputs), self.n_layer) self.parent.assertListEqual( list(multihead_outputs[0].size()), [self.batch_size * self.n_choices, 1, self.seq_length, self.n_embd // self.n_head]) self.parent.assertListEqual( list(multihead_outputs[1].size()), [self.batch_size * self.n_choices, self.n_head, self.seq_length, self.n_embd // self.n_head]) self.parent.assertListEqual( list(multihead_outputs[-1].size()), [self.batch_size * self.n_choices, self.n_head-1, self.seq_length, self.n_embd // self.n_head]) def test_default(self): self.run_tester(OpenAIGPTModelTest.OpenAIGPTModelTester(self)) def test_config_to_json_string(self): config = OpenAIGPTConfig(vocab_size_or_config_json_file=99, n_embd=37) obj = json.loads(config.to_json_string()) self.assertEqual(obj["vocab_size"], 99) self.assertEqual(obj["n_embd"], 37) def test_config_to_json_file(self): config_first = OpenAIGPTConfig(vocab_size_or_config_json_file=99, n_embd=37) json_file_path = "/tmp/config.json" config_first.to_json_file(json_file_path) config_second = OpenAIGPTConfig.from_json_file(json_file_path) os.remove(json_file_path) self.assertEqual(config_second.to_dict(), config_first.to_dict()) @pytest.mark.slow def test_model_from_pretrained(self): cache_dir = "/tmp/pytorch_pretrained_bert_test/" for model_name in list(PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: model = OpenAIGPTModel.from_pretrained(model_name, cache_dir=cache_dir) shutil.rmtree(cache_dir) self.assertIsNotNone(model) def run_tester(self, tester): config_and_inputs = tester.prepare_config_and_inputs() output_result = tester.create_openai_model(*config_and_inputs) tester.check_openai_model_output(output_result) output_result = tester.create_openai_lm_head(*config_and_inputs) tester.check_openai_lm_head_output(output_result) tester.check_openai_lm_head_loss_output(output_result) output_result = tester.create_openai_double_heads(*config_and_inputs) tester.check_openai_double_heads_output(output_result) tester.check_openai_double_heads_loss_output(output_result) tester.create_and_check_openai_for_headmasking(*config_and_inputs) tester.create_and_check_openai_for_head_pruning(*config_and_inputs) @classmethod def ids_tensor(cls, shape, vocab_size, rng=None, name=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) return torch.tensor(data=values, dtype=torch.long).view(shape).contiguous() if __name__ == "__main__": unittest.main() ================================================ FILE: tests/modeling_test.py ================================================ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import unittest import json import random import shutil import pytest import torch from pytorch_pretrained_bert import (BertConfig, BertModel, BertForMaskedLM, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertForMultipleChoice) from pytorch_pretrained_bert.modeling import PRETRAINED_MODEL_ARCHIVE_MAP class BertModelTest(unittest.TestCase): class BertModelTester(object): def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = BertModelTest.ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = BertModelTest.ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = BertModelTest.ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = BertModelTest.ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = BertModelTest.ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = BertModelTest.ids_tensor([self.batch_size], self.num_choices) config = BertConfig( vocab_size_or_config_json_file=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def check_loss_output(self, result): self.parent.assertListEqual( list(result["loss"].size()), []) def create_bert_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = BertModel(config=config) model.eval() all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) outputs = { "sequence_output": all_encoder_layers[-1], "pooled_output": pooled_output, "all_encoder_layers": all_encoder_layers, } return outputs def check_bert_model_output(self, result): self.parent.assertListEqual( [size for layer in result["all_encoder_layers"] for size in layer.size()], [self.batch_size, self.seq_length, self.hidden_size] * self.num_hidden_layers) self.parent.assertListEqual( list(result["sequence_output"].size()), [self.batch_size, self.seq_length, self.hidden_size]) self.parent.assertListEqual(list(result["pooled_output"].size()), [self.batch_size, self.hidden_size]) def create_bert_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = BertForMaskedLM(config=config) model.eval() loss = model(input_ids, token_type_ids, input_mask, token_labels) prediction_scores = model(input_ids, token_type_ids, input_mask) outputs = { "loss": loss, "prediction_scores": prediction_scores, } return outputs def check_bert_for_masked_lm_output(self, result): self.parent.assertListEqual( list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size]) def create_bert_for_next_sequence_prediction(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = BertForNextSentencePrediction(config=config) model.eval() loss = model(input_ids, token_type_ids, input_mask, sequence_labels) seq_relationship_score = model(input_ids, token_type_ids, input_mask) outputs = { "loss": loss, "seq_relationship_score": seq_relationship_score, } return outputs def check_bert_for_next_sequence_prediction_output(self, result): self.parent.assertListEqual( list(result["seq_relationship_score"].size()), [self.batch_size, 2]) def create_bert_for_pretraining(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = BertForPreTraining(config=config) model.eval() loss = model(input_ids, token_type_ids, input_mask, token_labels, sequence_labels) prediction_scores, seq_relationship_score = model(input_ids, token_type_ids, input_mask) outputs = { "loss": loss, "prediction_scores": prediction_scores, "seq_relationship_score": seq_relationship_score, } return outputs def check_bert_for_pretraining_output(self, result): self.parent.assertListEqual( list(result["prediction_scores"].size()), [self.batch_size, self.seq_length, self.vocab_size]) self.parent.assertListEqual( list(result["seq_relationship_score"].size()), [self.batch_size, 2]) def create_bert_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = BertForQuestionAnswering(config=config) model.eval() loss = model(input_ids, token_type_ids, input_mask, sequence_labels, sequence_labels) start_logits, end_logits = model(input_ids, token_type_ids, input_mask) outputs = { "loss": loss, "start_logits": start_logits, "end_logits": end_logits, } return outputs def check_bert_for_question_answering_output(self, result): self.parent.assertListEqual( list(result["start_logits"].size()), [self.batch_size, self.seq_length]) self.parent.assertListEqual( list(result["end_logits"].size()), [self.batch_size, self.seq_length]) def create_bert_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = BertForSequenceClassification(config=config, num_labels=self.num_labels) model.eval() loss = model(input_ids, token_type_ids, input_mask, sequence_labels) logits = model(input_ids, token_type_ids, input_mask) outputs = { "loss": loss, "logits": logits, } return outputs def check_bert_for_sequence_classification_output(self, result): self.parent.assertListEqual( list(result["logits"].size()), [self.batch_size, self.num_labels]) def create_bert_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = BertForTokenClassification(config=config, num_labels=self.num_labels) model.eval() loss = model(input_ids, token_type_ids, input_mask, token_labels) logits = model(input_ids, token_type_ids, input_mask) outputs = { "loss": loss, "logits": logits, } return outputs def check_bert_for_token_classification_output(self, result): self.parent.assertListEqual( list(result["logits"].size()), [self.batch_size, self.seq_length, self.num_labels]) def create_bert_for_multiple_choice(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = BertForMultipleChoice(config=config, num_choices=self.num_choices) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() loss = model(multiple_choice_inputs_ids, multiple_choice_token_type_ids, multiple_choice_input_mask, choice_labels) logits = model(multiple_choice_inputs_ids, multiple_choice_token_type_ids, multiple_choice_input_mask) outputs = { "loss": loss, "logits": logits, } return outputs def check_bert_for_multiple_choice(self, result): self.parent.assertListEqual( list(result["logits"].size()), [self.batch_size, self.num_choices]) def create_and_check_bert_for_attentions(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): for model_class in (BertModel, BertForMaskedLM, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification): if model_class in [BertForSequenceClassification, BertForTokenClassification]: model = model_class(config=config, num_labels=self.num_labels, output_attentions=True) else: model = model_class(config=config, output_attentions=True) model.eval() output = model(input_ids, token_type_ids, input_mask) attentions = output[0] self.parent.assertEqual(len(attentions), self.num_hidden_layers) self.parent.assertListEqual( list(attentions[0].size()), [self.batch_size, self.num_attention_heads, self.seq_length, self.seq_length]) def create_and_check_bert_for_headmasking(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): for model_class in (BertModel, BertForMaskedLM, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification): if model_class in [BertForSequenceClassification, BertForTokenClassification]: model = model_class(config=config, num_labels=self.num_labels, keep_multihead_output=True) else: model = model_class(config=config, keep_multihead_output=True) model.eval() head_mask = torch.ones(self.num_hidden_layers, self.num_attention_heads).to(input_ids.device) head_mask[0, 1:-1] = 0.0 # Mask all but the first and last heads on the first layer head_mask[-1, 1:] = 0.0 # Mask all but the first head on the last layer output = model(input_ids, token_type_ids, input_mask, head_mask=head_mask) if isinstance(model, BertModel): output = sum(t.sum() for t in output[0]) elif isinstance(output, (list, tuple)): output = sum(t.sum() for t in output) output = output.sum() output.backward() multihead_outputs = (model if isinstance(model, BertModel) else model.bert).get_multihead_outputs() self.parent.assertEqual(len(multihead_outputs), self.num_hidden_layers) self.parent.assertListEqual( list(multihead_outputs[0].size()), [self.batch_size, self.num_attention_heads, self.seq_length, self.hidden_size // self.num_attention_heads]) self.parent.assertEqual( len(multihead_outputs[0][:, 1:(self.num_attention_heads-1), :, :].nonzero()), 0) self.parent.assertEqual( len(multihead_outputs[0][:, 0, :, :].nonzero()), self.batch_size * self.seq_length * self.hidden_size // self.num_attention_heads) self.parent.assertEqual( len(multihead_outputs[0][:, self.num_attention_heads-1, :, :].nonzero()), self.batch_size * self.seq_length * self.hidden_size // self.num_attention_heads) self.parent.assertListEqual( list(multihead_outputs[1].size()), [self.batch_size, self.num_attention_heads, self.seq_length, self.hidden_size // self.num_attention_heads]) self.parent.assertEqual( len(multihead_outputs[1].nonzero()), multihead_outputs[1].numel()) self.parent.assertListEqual( list(multihead_outputs[-1].size()), [self.batch_size, self.num_attention_heads, self.seq_length, self.hidden_size // self.num_attention_heads]) self.parent.assertEqual( len(multihead_outputs[-1][:, 1:, :, :].nonzero()), 0) self.parent.assertEqual( len(multihead_outputs[-1][:, 0, :, :].nonzero()), self.batch_size * self.seq_length * self.hidden_size // self.num_attention_heads) def create_and_check_bert_for_head_pruning(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): for model_class in (BertModel, BertForMaskedLM, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification): if model_class in [BertForSequenceClassification, BertForTokenClassification]: model = model_class(config=config, num_labels=self.num_labels, keep_multihead_output=True) else: model = model_class(config=config, keep_multihead_output=True) model.eval() bert_model = model if isinstance(model, BertModel) else model.bert heads_to_prune = {0: list(range(1, self.num_attention_heads)), -1: [0]} bert_model.prune_heads(heads_to_prune) output = model(input_ids, token_type_ids, input_mask) if isinstance(model, BertModel): output = sum(t.sum() for t in output[0]) elif isinstance(output, (list, tuple)): output = sum(t.sum() for t in output) output = output.sum() output.backward() multihead_outputs = bert_model.get_multihead_outputs() self.parent.assertEqual(len(multihead_outputs), self.num_hidden_layers) self.parent.assertListEqual( list(multihead_outputs[0].size()), [self.batch_size, 1, self.seq_length, self.hidden_size // self.num_attention_heads]) self.parent.assertListEqual( list(multihead_outputs[1].size()), [self.batch_size, self.num_attention_heads, self.seq_length, self.hidden_size // self.num_attention_heads]) self.parent.assertListEqual( list(multihead_outputs[-1].size()), [self.batch_size, self.num_attention_heads-1, self.seq_length, self.hidden_size // self.num_attention_heads]) def test_default(self): self.run_tester(BertModelTest.BertModelTester(self)) def test_config_to_json_string(self): config = BertConfig(vocab_size_or_config_json_file=99, hidden_size=37) obj = json.loads(config.to_json_string()) self.assertEqual(obj["vocab_size"], 99) self.assertEqual(obj["hidden_size"], 37) def test_config_to_json_file(self): config_first = BertConfig(vocab_size_or_config_json_file=99, hidden_size=37) json_file_path = "/tmp/config.json" config_first.to_json_file(json_file_path) config_second = BertConfig.from_json_file(json_file_path) os.remove(json_file_path) self.assertEqual(config_second.to_dict(), config_first.to_dict()) @pytest.mark.slow def test_model_from_pretrained(self): cache_dir = "/tmp/pytorch_pretrained_bert_test/" for model_name in list(PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: model = BertModel.from_pretrained(model_name, cache_dir=cache_dir) shutil.rmtree(cache_dir) self.assertIsNotNone(model) def run_tester(self, tester): config_and_inputs = tester.prepare_config_and_inputs() output_result = tester.create_bert_model(*config_and_inputs) tester.check_bert_model_output(output_result) output_result = tester.create_bert_for_masked_lm(*config_and_inputs) tester.check_bert_for_masked_lm_output(output_result) tester.check_loss_output(output_result) output_result = tester.create_bert_for_next_sequence_prediction(*config_and_inputs) tester.check_bert_for_next_sequence_prediction_output(output_result) tester.check_loss_output(output_result) output_result = tester.create_bert_for_pretraining(*config_and_inputs) tester.check_bert_for_pretraining_output(output_result) tester.check_loss_output(output_result) output_result = tester.create_bert_for_question_answering(*config_and_inputs) tester.check_bert_for_question_answering_output(output_result) tester.check_loss_output(output_result) output_result = tester.create_bert_for_sequence_classification(*config_and_inputs) tester.check_bert_for_sequence_classification_output(output_result) tester.check_loss_output(output_result) output_result = tester.create_bert_for_token_classification(*config_and_inputs) tester.check_bert_for_token_classification_output(output_result) tester.check_loss_output(output_result) output_result = tester.create_bert_for_multiple_choice(*config_and_inputs) tester.check_bert_for_multiple_choice(output_result) tester.check_loss_output(output_result) tester.create_and_check_bert_for_attentions(*config_and_inputs) tester.create_and_check_bert_for_headmasking(*config_and_inputs) tester.create_and_check_bert_for_head_pruning(*config_and_inputs) @classmethod def ids_tensor(cls, shape, vocab_size, rng=None, name=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) return torch.tensor(data=values, dtype=torch.long).view(shape).contiguous() if __name__ == "__main__": unittest.main() ================================================ FILE: tests/modeling_transfo_xl_test.py ================================================ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import unittest import json import random import shutil import pytest import torch from pytorch_pretrained_bert import (TransfoXLConfig, TransfoXLModel, TransfoXLLMHeadModel) from pytorch_pretrained_bert.modeling_transfo_xl import PRETRAINED_MODEL_ARCHIVE_MAP class TransfoXLModelTest(unittest.TestCase): class TransfoXLModelTester(object): def __init__(self, parent, batch_size=13, seq_length=7, mem_len=30, clamp_len=15, is_training=True, use_labels=True, vocab_size=99, cutoffs=[10, 50, 80], d_model=32, d_embed=32, n_head=4, d_head=8, d_inner=128, div_val=2, n_layer=5, scope=None, seed=1): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.mem_len = mem_len self.clamp_len = clamp_len self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.cutoffs = cutoffs self.d_model = d_model self.d_embed = d_embed self.n_head = n_head self.d_head = d_head self.d_inner = d_inner self.div_val = div_val self.n_layer = n_layer self.scope = scope self.seed = seed def prepare_config_and_inputs(self): input_ids_1 = TransfoXLModelTest.ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids_2 = TransfoXLModelTest.ids_tensor([self.batch_size, self.seq_length], self.vocab_size) lm_labels = None if self.use_labels: lm_labels = TransfoXLModelTest.ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = TransfoXLConfig( vocab_size_or_config_json_file=self.vocab_size, mem_len=self.mem_len, clamp_len=self.clamp_len, cutoffs=self.cutoffs, d_model=self.d_model, d_embed=self.d_embed, n_head=self.n_head, d_head=self.d_head, d_inner=self.d_inner, div_val=self.div_val, n_layer=self.n_layer) return (config, input_ids_1, input_ids_2, lm_labels) def set_seed(self): random.seed(self.seed) torch.manual_seed(self.seed) def create_transfo_xl_model(self, config, input_ids_1, input_ids_2, lm_labels): model = TransfoXLModel(config) model.eval() hidden_states_1, mems_1 = model(input_ids_1) hidden_states_2, mems_2 = model(input_ids_2, mems_1) outputs = { "hidden_states_1": hidden_states_1, "mems_1": mems_1, "hidden_states_2": hidden_states_2, "mems_2": mems_2, } return outputs def check_transfo_xl_model_output(self, result): self.parent.assertListEqual( list(result["hidden_states_1"].size()), [self.batch_size, self.seq_length, self.d_model]) self.parent.assertListEqual( list(result["hidden_states_2"].size()), [self.batch_size, self.seq_length, self.d_model]) self.parent.assertListEqual( list(list(mem.size()) for mem in result["mems_1"]), [[self.mem_len, self.batch_size, self.d_model]] * self.n_layer) self.parent.assertListEqual( list(list(mem.size()) for mem in result["mems_2"]), [[self.mem_len, self.batch_size, self.d_model]] * self.n_layer) def create_transfo_xl_lm_head(self, config, input_ids_1, input_ids_2, lm_labels): model = TransfoXLLMHeadModel(config) model.eval() loss_1, mems_1a = model(input_ids_1, target=lm_labels) lm_logits_1, mems_1b = model(input_ids_1) loss_2, mems_2a = model(input_ids_2, target=lm_labels, mems=mems_1a) lm_logits_2, mems_2b = model(input_ids_2, mems=mems_1b) outputs = { "loss_1": loss_1, "mems_1a": mems_1a, "lm_logits_1": lm_logits_1, "mems_1b": mems_1b, "loss_2": loss_2, "mems_2a": mems_2a, "lm_logits_2": lm_logits_2, "mems_2b": mems_2b, } return outputs def check_transfo_xl_lm_head_output(self, result): self.parent.assertListEqual( list(result["loss_1"].size()), [self.batch_size, self.seq_length]) self.parent.assertListEqual( list(result["lm_logits_1"].size()), [self.batch_size, self.seq_length, self.vocab_size]) self.parent.assertListEqual( list(list(mem.size()) for mem in result["mems_1a"]), [[self.mem_len, self.batch_size, self.d_model]] * self.n_layer) self.parent.assertListEqual( list(list(mem.size()) for mem in result["mems_1b"]), [[self.mem_len, self.batch_size, self.d_model]] * self.n_layer) self.parent.assertListEqual( list(mem[~torch.isnan(mem)].sum() for mem in result["mems_1a"]), list(mem[~torch.isnan(mem)].sum() for mem in result["mems_1b"])) self.parent.assertListEqual( list(result["loss_2"].size()), [self.batch_size, self.seq_length]) self.parent.assertListEqual( list(result["lm_logits_2"].size()), [self.batch_size, self.seq_length, self.vocab_size]) self.parent.assertListEqual( list(list(mem.size()) for mem in result["mems_2a"]), [[self.mem_len, self.batch_size, self.d_model]] * self.n_layer) self.parent.assertListEqual( list(list(mem.size()) for mem in result["mems_2b"]), [[self.mem_len, self.batch_size, self.d_model]] * self.n_layer) self.parent.assertListEqual( list(mem[~torch.isnan(mem)].sum() for mem in result["mems_2a"]), list(mem[~torch.isnan(mem)].sum() for mem in result["mems_2b"])) def test_default(self): self.run_tester(TransfoXLModelTest.TransfoXLModelTester(self)) def test_config_to_json_string(self): config = TransfoXLConfig(vocab_size_or_config_json_file=96, d_embed=37) obj = json.loads(config.to_json_string()) self.assertEqual(obj["n_token"], 96) self.assertEqual(obj["d_embed"], 37) def test_config_to_json_file(self): config_first = TransfoXLConfig(vocab_size_or_config_json_file=96, d_embed=37) json_file_path = "/tmp/config.json" config_first.to_json_file(json_file_path) config_second = TransfoXLConfig.from_json_file(json_file_path) os.remove(json_file_path) self.assertEqual(config_second.to_dict(), config_first.to_dict()) @pytest.mark.slow def test_model_from_pretrained(self): cache_dir = "/tmp/pytorch_pretrained_bert_test/" for model_name in list(PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: model = TransfoXLModel.from_pretrained(model_name, cache_dir=cache_dir) shutil.rmtree(cache_dir) self.assertIsNotNone(model) def run_tester(self, tester): config_and_inputs = tester.prepare_config_and_inputs() tester.set_seed() output_result = tester.create_transfo_xl_model(*config_and_inputs) tester.check_transfo_xl_model_output(output_result) tester.set_seed() output_result = tester.create_transfo_xl_lm_head(*config_and_inputs) tester.check_transfo_xl_lm_head_output(output_result) @classmethod def ids_tensor(cls, shape, vocab_size, rng=None, name=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) return torch.tensor(data=values, dtype=torch.long).view(shape).contiguous() if __name__ == "__main__": unittest.main() ================================================ FILE: tests/optimization_test.py ================================================ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest import torch from pytorch_pretrained_bert import BertAdam from pytorch_pretrained_bert import OpenAIAdam from pytorch_pretrained_bert.optimization import ConstantLR, WarmupLinearSchedule, WarmupConstantSchedule, \ WarmupCosineWithWarmupRestartsSchedule, WarmupCosineWithHardRestartsSchedule, WarmupCosineSchedule import numpy as np class OptimizationTest(unittest.TestCase): def assertListAlmostEqual(self, list1, list2, tol): self.assertEqual(len(list1), len(list2)) for a, b in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol) def test_adam(self): w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True) target = torch.tensor([0.4, 0.2, -0.5]) criterion = torch.nn.MSELoss() # No warmup, constant schedule, no gradient clipping optimizer = BertAdam(params=[w], lr=2e-1, weight_decay=0.0, max_grad_norm=-1) for _ in range(100): loss = criterion(w, target) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2) class ScheduleInitTest(unittest.TestCase): def test_bert_sched_init(self): m = torch.nn.Linear(50, 50) optim = BertAdam(m.parameters(), lr=0.001, warmup=.1, t_total=1000, schedule=None) self.assertTrue(isinstance(optim.param_groups[0]["schedule"], ConstantLR)) optim = BertAdam(m.parameters(), lr=0.001, warmup=.1, t_total=1000, schedule="none") self.assertTrue(isinstance(optim.param_groups[0]["schedule"], ConstantLR)) optim = BertAdam(m.parameters(), lr=0.001, warmup=.01, t_total=1000) self.assertTrue(isinstance(optim.param_groups[0]["schedule"], WarmupLinearSchedule)) # shouldn't fail def test_openai_sched_init(self): m = torch.nn.Linear(50, 50) optim = OpenAIAdam(m.parameters(), lr=0.001, warmup=.1, t_total=1000, schedule=None) self.assertTrue(isinstance(optim.param_groups[0]["schedule"], ConstantLR)) optim = OpenAIAdam(m.parameters(), lr=0.001, warmup=.1, t_total=1000, schedule="none") self.assertTrue(isinstance(optim.param_groups[0]["schedule"], ConstantLR)) optim = OpenAIAdam(m.parameters(), lr=0.001, warmup=.01, t_total=1000) self.assertTrue(isinstance(optim.param_groups[0]["schedule"], WarmupLinearSchedule)) # shouldn't fail class WarmupCosineWithRestartsTest(unittest.TestCase): def test_it(self): m = WarmupCosineWithWarmupRestartsSchedule(warmup=0.05, t_total=1000., cycles=5) x = np.arange(0, 1000) y = [m.get_lr(xe) for xe in x] y = np.asarray(y) expected_zeros = y[[0, 200, 400, 600, 800]] print(expected_zeros) expected_ones = y[[50, 250, 450, 650, 850]] print(expected_ones) self.assertTrue(np.allclose(expected_ones, 1)) self.assertTrue(np.allclose(expected_zeros, 0)) if __name__ == "__main__": unittest.main() ================================================ FILE: tests/tokenization_gpt2_test.py ================================================ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function, unicode_literals import os import unittest import json import shutil import pytest from pytorch_pretrained_bert.tokenization_gpt2 import GPT2Tokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP class GPT2TokenizationTest(unittest.TestCase): def test_full_tokenizer(self): """ Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt """ vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "low", "er", "low", "lowest", "newer", "wider"] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l o", "lo w", "e r", ""] with open("/tmp/openai_tokenizer_vocab_test.json", "w") as fp: fp.write(json.dumps(vocab_tokens)) vocab_file = fp.name with open("/tmp/openai_tokenizer_merges_test.txt", "w") as fp: fp.write("\n".join(merges)) merges_file = fp.name tokenizer = GPT2Tokenizer(vocab_file, merges_file, special_tokens=["", ""]) os.remove(vocab_file) os.remove(merges_file) text = "lower" bpe_tokens = ["low", "er"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [""] input_bpe_tokens = [13, 12, 16] self.assertListEqual( tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) vocab_file, merges_file, special_tokens_file = tokenizer.save_vocabulary(vocab_path="/tmp/") tokenizer_2 = GPT2Tokenizer.from_pretrained("/tmp/") os.remove(vocab_file) os.remove(merges_file) os.remove(special_tokens_file) self.assertListEqual( [tokenizer.encoder, tokenizer.decoder, tokenizer.bpe_ranks, tokenizer.special_tokens, tokenizer.special_tokens_decoder], [tokenizer_2.encoder, tokenizer_2.decoder, tokenizer_2.bpe_ranks, tokenizer_2.special_tokens, tokenizer_2.special_tokens_decoder]) # @pytest.mark.slow def test_tokenizer_from_pretrained(self): cache_dir = "/tmp/pytorch_pretrained_bert_test/" for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]: tokenizer = GPT2Tokenizer.from_pretrained(model_name, cache_dir=cache_dir) shutil.rmtree(cache_dir) self.assertIsNotNone(tokenizer) if __name__ == '__main__': unittest.main() ================================================ FILE: tests/tokenization_openai_test.py ================================================ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function, unicode_literals import os import unittest import json import shutil import pytest from pytorch_pretrained_bert.tokenization_openai import OpenAIGPTTokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP class OpenAIGPTTokenizationTest(unittest.TestCase): def test_full_tokenizer(self): """ Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt """ vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w", "r", "t", "lo", "low", "er", "low", "lowest", "newer", "wider"] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l o", "lo w", "e r", ""] with open("/tmp/openai_tokenizer_vocab_test.json", "w") as fp: fp.write(json.dumps(vocab_tokens)) vocab_file = fp.name with open("/tmp/openai_tokenizer_merges_test.txt", "w") as fp: fp.write("\n".join(merges)) merges_file = fp.name tokenizer = OpenAIGPTTokenizer(vocab_file, merges_file, special_tokens=["", ""]) os.remove(vocab_file) os.remove(merges_file) text = "lower" bpe_tokens = ["low", "er"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [""] input_bpe_tokens = [14, 15, 20] self.assertListEqual( tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) vocab_file, merges_file, special_tokens_file = tokenizer.save_vocabulary(vocab_path="/tmp/") tokenizer_2 = OpenAIGPTTokenizer.from_pretrained("/tmp/") os.remove(vocab_file) os.remove(merges_file) os.remove(special_tokens_file) self.assertListEqual( [tokenizer.encoder, tokenizer.decoder, tokenizer.bpe_ranks, tokenizer.special_tokens, tokenizer.special_tokens_decoder], [tokenizer_2.encoder, tokenizer_2.decoder, tokenizer_2.bpe_ranks, tokenizer_2.special_tokens, tokenizer_2.special_tokens_decoder]) @pytest.mark.slow def test_tokenizer_from_pretrained(self): cache_dir = "/tmp/pytorch_pretrained_bert_test/" for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]: tokenizer = OpenAIGPTTokenizer.from_pretrained(model_name, cache_dir=cache_dir) shutil.rmtree(cache_dir) self.assertIsNotNone(tokenizer) if __name__ == '__main__': unittest.main() ================================================ FILE: tests/tokenization_test.py ================================================ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function, unicode_literals import os import unittest from io import open import shutil import pytest from pytorch_pretrained_bert.tokenization import (BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, PRETRAINED_VOCAB_ARCHIVE_MAP) class TokenizationTest(unittest.TestCase): def test_full_tokenizer(self): vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", "," ] with open("/tmp/bert_tokenizer_test.txt", "w", encoding='utf-8') as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) vocab_file = vocab_writer.name tokenizer = BertTokenizer(vocab_file) os.remove(vocab_file) tokens = tokenizer.tokenize(u"UNwant\u00E9d,running") self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9]) vocab_file = tokenizer.save_vocabulary(vocab_path="/tmp/") tokenizer.from_pretrained(vocab_file) os.remove(vocab_file) tokens = tokenizer.tokenize(u"UNwant\u00E9d,running") self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9]) @pytest.mark.slow def test_tokenizer_from_pretrained(self): cache_dir = "/tmp/pytorch_pretrained_bert_test/" for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]: tokenizer = BertTokenizer.from_pretrained(model_name, cache_dir=cache_dir) shutil.rmtree(cache_dir) self.assertIsNotNone(tokenizer) def test_chinese(self): tokenizer = BasicTokenizer() self.assertListEqual( tokenizer.tokenize(u"ah\u535A\u63A8zz"), [u"ah", u"\u535A", u"\u63A8", u"zz"]) def test_basic_tokenizer_lower(self): tokenizer = BasicTokenizer(do_lower_case=True) self.assertListEqual( tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"]) self.assertListEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"]) def test_basic_tokenizer_no_lower(self): tokenizer = BasicTokenizer(do_lower_case=False) self.assertListEqual( tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]) def test_wordpiece_tokenizer(self): vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing" ] vocab = {} for (i, token) in enumerate(vocab_tokens): vocab[token] = i tokenizer = WordpieceTokenizer(vocab=vocab) self.assertListEqual(tokenizer.tokenize(""), []) self.assertListEqual( tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"]) self.assertListEqual( tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"]) def test_is_whitespace(self): self.assertTrue(_is_whitespace(u" ")) self.assertTrue(_is_whitespace(u"\t")) self.assertTrue(_is_whitespace(u"\r")) self.assertTrue(_is_whitespace(u"\n")) self.assertTrue(_is_whitespace(u"\u00A0")) self.assertFalse(_is_whitespace(u"A")) self.assertFalse(_is_whitespace(u"-")) def test_is_control(self): self.assertTrue(_is_control(u"\u0005")) self.assertFalse(_is_control(u"A")) self.assertFalse(_is_control(u" ")) self.assertFalse(_is_control(u"\t")) self.assertFalse(_is_control(u"\r")) def test_is_punctuation(self): self.assertTrue(_is_punctuation(u"-")) self.assertTrue(_is_punctuation(u"$")) self.assertTrue(_is_punctuation(u"`")) self.assertTrue(_is_punctuation(u".")) self.assertFalse(_is_punctuation(u"A")) self.assertFalse(_is_punctuation(u" ")) if __name__ == '__main__': unittest.main() ================================================ FILE: tests/tokenization_transfo_xl_test.py ================================================ # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function, unicode_literals import os import unittest from io import open import shutil import pytest from pytorch_pretrained_bert.tokenization_transfo_xl import TransfoXLTokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP class TransfoXLTokenizationTest(unittest.TestCase): def test_full_tokenizer(self): vocab_tokens = [ "", "[CLS]", "[SEP]", "want", "unwanted", "wa", "un", "running", "," ] with open("/tmp/transfo_xl_tokenizer_test.txt", "w", encoding='utf-8') as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) vocab_file = vocab_writer.name tokenizer = TransfoXLTokenizer(vocab_file=vocab_file, lower_case=True) tokenizer.build_vocab() os.remove(vocab_file) tokens = tokenizer.tokenize(u" UNwanted , running") self.assertListEqual(tokens, ["", "unwanted", ",", "running"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [0, 4, 8, 7]) vocab_file = tokenizer.save_vocabulary(vocab_path="/tmp/") tokenizer.from_pretrained(vocab_file) os.remove(vocab_file) tokens = tokenizer.tokenize(u" UNwanted , running") self.assertListEqual(tokens, ["", "unwanted", ",", "running"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [0, 4, 8, 7]) def test_full_tokenizer_lower(self): tokenizer = TransfoXLTokenizer(lower_case=True) self.assertListEqual( tokenizer.tokenize(u" \tHeLLo ! how \n Are yoU ? "), ["hello", "!", "how", "are", "you", "?"]) def test_full_tokenizer_no_lower(self): tokenizer = TransfoXLTokenizer(lower_case=False) self.assertListEqual( tokenizer.tokenize(u" \tHeLLo ! how \n Are yoU ? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]) @pytest.mark.slow def test_tokenizer_from_pretrained(self): cache_dir = "/tmp/pytorch_pretrained_bert_test/" for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]: tokenizer = TransfoXLTokenizer.from_pretrained(model_name, cache_dir=cache_dir) shutil.rmtree(cache_dir) self.assertIsNotNone(tokenizer) if __name__ == '__main__': unittest.main()