File size: 12,137 Bytes
5980447 |
1 2 |
{"repo": "NVIDIA/NeMo", "pull_number": 162, "instance_id": "NVIDIA__NeMo-162", "issue_numbers": "", "base_commit": "f5f09838b96ab48f40d97c100fbcfc5b7f1ac59e", "patch": "diff --git a/collections/nemo_nlp/nemo_nlp/data/data_layers.py b/collections/nemo_nlp/nemo_nlp/data/data_layers.py\n--- a/collections/nemo_nlp/nemo_nlp/data/data_layers.py\n+++ b/collections/nemo_nlp/nemo_nlp/data/data_layers.py\n@@ -683,9 +683,9 @@ def _collate_fn(self, x):\n [np.stack(x, axis=0) for x in components]\n src_ids = torch.Tensor(src_ids).long().to(self._device)\n src_segment_ids = torch.Tensor(src_segment_ids).long().to(self._device)\n- src_mask = torch.Tensor(src_mask).float().to(self._device)\n+ src_mask = torch.Tensor(src_mask).long().to(self._device)\n tgt_ids = torch.Tensor(tgt_ids).long().to(self._device)\n- tgt_mask = torch.Tensor(tgt_mask).float().to(self._device)\n+ tgt_mask = torch.Tensor(tgt_mask).long().to(self._device)\n sent_ids = torch.Tensor(sent_ids).long().to(self._device)\n return src_ids, src_segment_ids, src_mask, tgt_ids, tgt_mask, sent_ids\n \ndiff --git a/collections/nemo_nlp/nemo_nlp/data/datasets/bert_pretraining.py b/collections/nemo_nlp/nemo_nlp/data/datasets/bert_pretraining.py\n--- a/collections/nemo_nlp/nemo_nlp/data/datasets/bert_pretraining.py\n+++ b/collections/nemo_nlp/nemo_nlp/data/datasets/bert_pretraining.py\n@@ -249,7 +249,7 @@ def truncate_seq_pair(a, b, max_num_tokens):\n \n input_ids, output_mask = self.mask_ids(output_ids)\n \n- input_mask = np.zeros(self.max_seq_length, dtype=np.float32)\n+ input_mask = np.zeros(self.max_seq_length, dtype=np.long)\n input_mask[:len(input_ids)] = 1\n \n input_type_ids = np.zeros(self.max_seq_length, dtype=np.int)\n@@ -263,7 +263,7 @@ def truncate_seq_pair(a, b, max_num_tokens):\n \n # TODO: wrap the return value with () for consistent style.\n return np.array(input_ids), input_type_ids,\\\n- np.array(input_mask, dtype=np.float32), np.array(output_ids),\\\n+ np.array(input_mask, dtype=np.long), np.array(output_ids),\\\n np.array(output_mask, dtype=np.float32), is_next\n \n def mask_ids(self, ids):\ndiff --git a/collections/nemo_nlp/nemo_nlp/data/datasets/glue.py b/collections/nemo_nlp/nemo_nlp/data/datasets/glue.py\n--- a/collections/nemo_nlp/nemo_nlp/data/datasets/glue.py\n+++ b/collections/nemo_nlp/nemo_nlp/data/datasets/glue.py\n@@ -55,7 +55,7 @@ def __getitem__(self, idx):\n feature = self.features[idx]\n return (np.array(feature.input_ids),\n np.array(feature.segment_ids),\n- np.array(feature.input_mask, dtype=np.float32),\n+ np.array(feature.input_mask, dtype=np.long),\n np.array(feature.label_id))\n \n \ndiff --git a/collections/nemo_nlp/nemo_nlp/data/datasets/joint_intent_slot.py b/collections/nemo_nlp/nemo_nlp/data/datasets/joint_intent_slot.py\n--- a/collections/nemo_nlp/nemo_nlp/data/datasets/joint_intent_slot.py\n+++ b/collections/nemo_nlp/nemo_nlp/data/datasets/joint_intent_slot.py\n@@ -214,7 +214,7 @@ def __len__(self):\n def __getitem__(self, idx):\n return (np.array(self.all_input_ids[idx]),\n np.array(self.all_segment_ids[idx]),\n- np.array(self.all_input_mask[idx]),\n+ np.array(self.all_input_mask[idx], dtype=np.long),\n np.array(self.all_loss_mask[idx]),\n np.array(self.all_subtokens_mask[idx]),\n self.all_intents[idx],\n@@ -263,6 +263,6 @@ def __len__(self):\n def __getitem__(self, idx):\n return (np.array(self.all_input_ids[idx]),\n np.array(self.all_segment_ids[idx]),\n- np.array(self.all_input_mask[idx], dtype=np.float32),\n+ np.array(self.all_input_mask[idx], dtype=np.long),\n np.array(self.all_loss_mask[idx]),\n np.array(self.all_subtokens_mask[idx]))\ndiff --git a/collections/nemo_nlp/nemo_nlp/data/datasets/punctuation_capitalization.py b/collections/nemo_nlp/nemo_nlp/data/datasets/punctuation_capitalization.py\n--- a/collections/nemo_nlp/nemo_nlp/data/datasets/punctuation_capitalization.py\n+++ b/collections/nemo_nlp/nemo_nlp/data/datasets/punctuation_capitalization.py\n@@ -386,7 +386,7 @@ def __len__(self):\n def __getitem__(self, idx):\n return (np.array(self.all_input_ids[idx]),\n np.array(self.all_segment_ids[idx]),\n- np.array(self.all_input_mask[idx], dtype=np.float32),\n+ np.array(self.all_input_mask[idx], dtype=np.long),\n np.array(self.all_loss_mask[idx]),\n np.array(self.all_subtokens_mask[idx]),\n np.array(self.punct_all_labels[idx]),\ndiff --git a/collections/nemo_nlp/nemo_nlp/data/datasets/sentence_classification.py b/collections/nemo_nlp/nemo_nlp/data/datasets/sentence_classification.py\n--- a/collections/nemo_nlp/nemo_nlp/data/datasets/sentence_classification.py\n+++ b/collections/nemo_nlp/nemo_nlp/data/datasets/sentence_classification.py\n@@ -115,7 +115,7 @@ def __getitem__(self, idx):\n \n return (np.array(feature.input_ids),\n np.array(feature.segment_ids),\n- np.array(feature.input_mask, dtype=np.float32),\n+ np.array(feature.input_mask, dtype=np.long),\n feature.sent_label)\n \n def convert_sequences_to_features(self,\ndiff --git a/collections/nemo_nlp/nemo_nlp/data/datasets/token_classification.py b/collections/nemo_nlp/nemo_nlp/data/datasets/token_classification.py\n--- a/collections/nemo_nlp/nemo_nlp/data/datasets/token_classification.py\n+++ b/collections/nemo_nlp/nemo_nlp/data/datasets/token_classification.py\n@@ -333,7 +333,7 @@ def __len__(self):\n def __getitem__(self, idx):\n return (np.array(self.all_input_ids[idx]),\n np.array(self.all_segment_ids[idx]),\n- np.array(self.all_input_mask[idx], dtype=np.float32),\n+ np.array(self.all_input_mask[idx], dtype=np.long),\n np.array(self.all_loss_mask[idx]),\n np.array(self.all_subtokens_mask[idx]),\n np.array(self.all_labels[idx]))\n@@ -377,6 +377,6 @@ def __len__(self):\n def __getitem__(self, idx):\n return (np.array(self.all_input_ids[idx]),\n np.array(self.all_segment_ids[idx]),\n- np.array(self.all_input_mask[idx], dtype=np.float32),\n+ np.array(self.all_input_mask[idx], dtype=np.long),\n np.array(self.all_loss_mask[idx]),\n np.array(self.all_subtokens_mask[idx]))\ndiff --git a/collections/nemo_nlp/nemo_nlp/data/tokenizers/bert_tokenizer.py b/collections/nemo_nlp/nemo_nlp/data/tokenizers/bert_tokenizer.py\n--- a/collections/nemo_nlp/nemo_nlp/data/tokenizers/bert_tokenizer.py\n+++ b/collections/nemo_nlp/nemo_nlp/data/tokenizers/bert_tokenizer.py\n@@ -1,5 +1,5 @@\n from .tokenizer_spec import TokenizerSpec\n-from pytorch_transformers import BertTokenizer\n+from transformers import BertTokenizer\n import re\n \n \ndiff --git a/collections/nemo_nlp/nemo_nlp/data/tokenizers/gpt2_tokenizer.py b/collections/nemo_nlp/nemo_nlp/data/tokenizers/gpt2_tokenizer.py\n--- a/collections/nemo_nlp/nemo_nlp/data/tokenizers/gpt2_tokenizer.py\n+++ b/collections/nemo_nlp/nemo_nlp/data/tokenizers/gpt2_tokenizer.py\n@@ -1,5 +1,5 @@\n from .tokenizer_spec import TokenizerSpec\n-from pytorch_transformers import GPT2Tokenizer\n+from transformers import GPT2Tokenizer\n \n \n class NemoGPT2Tokenizer(TokenizerSpec):\ndiff --git a/collections/nemo_nlp/nemo_nlp/huggingface/bert.py b/collections/nemo_nlp/nemo_nlp/huggingface/bert.py\n--- a/collections/nemo_nlp/nemo_nlp/huggingface/bert.py\n+++ b/collections/nemo_nlp/nemo_nlp/huggingface/bert.py\n@@ -1,10 +1,10 @@\n # Copyright (c) 2019 NVIDIA Corporation\n from typing import Optional, List\n \n-from pytorch_transformers import (BertConfig,\n- BertModel,\n- BERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n- BERT_PRETRAINED_CONFIG_ARCHIVE_MAP)\n+from transformers import (BertConfig,\n+ BertModel,\n+ BERT_PRETRAINED_MODEL_ARCHIVE_MAP,\n+ BERT_PRETRAINED_CONFIG_ARCHIVE_MAP)\n \n from nemo.backends.pytorch.nm import TrainableNM\n from nemo.core.neural_modules import PretrainedModelInfo\n@@ -18,7 +18,7 @@\n class BERT(TrainableNM):\n \"\"\"\n BERT wraps around the Huggingface implementation of BERT from their\n- pytorch-transformers repository for easy use within NeMo.\n+ transformers repository for easy use within NeMo.\n \n Args:\n pretrained_model_name (str): If using a pretrained model, this should\ndiff --git a/collections/nemo_nlp/setup.py b/collections/nemo_nlp/setup.py\n--- a/collections/nemo_nlp/setup.py\n+++ b/collections/nemo_nlp/setup.py\n@@ -25,7 +25,7 @@\n 'python-dateutil<2.8.1,>=2.1',\n 'boto3',\n 'unidecode',\n- 'pytorch-transformers',\n+ 'transformers',\n 'matplotlib',\n 'h5py',\n 'youtokentome'\ndiff --git a/examples/nlp/joint_intent_slot_infer.py b/examples/nlp/joint_intent_slot_infer.py\n--- a/examples/nlp/joint_intent_slot_infer.py\n+++ b/examples/nlp/joint_intent_slot_infer.py\n@@ -2,7 +2,7 @@\n import os\n \n import numpy as np\n-from pytorch_transformers import BertTokenizer\n+from transformers import BertTokenizer\n from sklearn.metrics import confusion_matrix, classification_report\n \n import nemo\ndiff --git a/examples/nlp/joint_intent_slot_infer_b1.py b/examples/nlp/joint_intent_slot_infer_b1.py\n--- a/examples/nlp/joint_intent_slot_infer_b1.py\n+++ b/examples/nlp/joint_intent_slot_infer_b1.py\n@@ -1,7 +1,7 @@\n import argparse\n \n import numpy as np\n-from pytorch_transformers import BertTokenizer\n+from transformers import BertTokenizer\n \n import nemo\n import nemo_nlp\ndiff --git a/examples/nlp/joint_intent_slot_with_bert.py b/examples/nlp/joint_intent_slot_with_bert.py\n--- a/examples/nlp/joint_intent_slot_with_bert.py\n+++ b/examples/nlp/joint_intent_slot_with_bert.py\n@@ -3,7 +3,7 @@\n import os\n \n import numpy as np\n-from pytorch_transformers import BertTokenizer\n+from transformers import BertTokenizer\n \n import nemo\n from nemo.utils.lr_policies import get_lr_policy\ndiff --git a/examples/nlp/sentence_classification_with_bert.py b/examples/nlp/sentence_classification_with_bert.py\n--- a/examples/nlp/sentence_classification_with_bert.py\n+++ b/examples/nlp/sentence_classification_with_bert.py\n@@ -2,7 +2,7 @@\n import math\n \n import numpy as np\n-from pytorch_transformers import BertTokenizer\n+from transformers import BertTokenizer\n from torch import nn\n import torch\n \ndiff --git a/nemo/nemo/backends/pytorch/nm.py b/nemo/nemo/backends/pytorch/nm.py\n--- a/nemo/nemo/backends/pytorch/nm.py\n+++ b/nemo/nemo/backends/pytorch/nm.py\n@@ -36,7 +36,7 @@ def __init__(self, **kwargs):\n nn.Module.__init__(self) # For PyTorch API\n self._device = get_cuda_device(self.placement)\n \n- def __call__(self, force_pt=False, *input, **kwargs):\n+ def __call__(self, *input, force_pt=False, **kwargs):\n pt_call = len(input) > 0 or force_pt\n if pt_call:\n return nn.Module.__call__(self, *input, **kwargs)\ndiff --git a/scripts/get_decoder_params_from_bert.py b/scripts/get_decoder_params_from_bert.py\n--- a/scripts/get_decoder_params_from_bert.py\n+++ b/scripts/get_decoder_params_from_bert.py\n@@ -1,6 +1,6 @@\n import torch\n-from pytorch_transformers import BERT_PRETRAINED_MODEL_ARCHIVE_MAP\n-from pytorch_transformers.file_utils import cached_path\n+from transformers import BERT_PRETRAINED_MODEL_ARCHIVE_MAP\n+from transformers.file_utils import cached_path\n import argparse\n \n state_dict_mappings = {\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2019-12-03T01:19:14Z"}
|