python_code stringlengths 0 187k | repo_name stringlengths 8 46 | file_path stringlengths 6 135 |
|---|---|---|
from typing import Dict
from allennlp.data import DatasetReader, Instance, TokenIndexer
from allennlp.data.data_loaders import MultiProcessDataLoader
from allennlp.data.fields import LabelField, TextField
from allennlp.data.tokenizers import Token
from allennlp.data.token_indexers import SingleIdTokenIndexer
from alle... | allennlp-guide-master | exercises/part2/reading-data/data_loader_setup.py |
# To create fields, simply pass the data to constructor.
# NOTE: Don't worry about the token_indexers too much for now. We have a whole
# chapter on why TextFields are set up this way, and how they work.
tokens = [Token("The"), Token("best"), Token("movie"), Token("ever"), Token("!")]
token_indexers: Dict[str, TokenInd... | allennlp-guide-master | exercises/part2/reading-data/fields_source.py |
reader = MyDatasetReader()
vocab = Vocabulary.from_instances(reader.read("path_to_data"))
print("Default:")
data_loader = MultiProcessDataLoader(reader, "path_to_data", batch_size=4)
data_loader.index_with(vocab)
for batch in data_loader:
print(batch)
print("Shuffle, and drop last batch if incomplete:")
data_load... | allennlp-guide-master | exercises/part2/reading-data/data_loader_basic.py |
from collections import Counter, defaultdict
from typing import Dict
from allennlp.data.fields import TextField, LabelField, SequenceLabelField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
| allennlp-guide-master | exercises/part2/reading-data/fields_setup.py |
# Create fields and instances
token_indexers: Dict[str, TokenIndexer] = {
"tokens": SingleIdTokenIndexer(namespace="tokens")
}
text_field_pos = TextField(
[Token("The"), Token("best"), Token("movie"), Token("ever"), Token("!")],
token_indexers=token_indexers,
)
text_field_neg = TextField(
[Token("Such")... | allennlp-guide-master | exercises/part2/reading-data/vocabulary_count_source.py |
from typing import Dict
from allennlp.data.instance import Instance
from allennlp.data.fields import TextField, LabelField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
| allennlp-guide-master | exercises/part2/reading-data/vocabulary_count_setup.py |
from typing import Dict
from allennlp.data.instance import Instance
from allennlp.data.fields import Field, TextField, LabelField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
| allennlp-guide-master | exercises/part2/reading-data/vocabulary_creation_setup.py |
# Create fields and instances
# We will use the namespace 'tokens' to map tokens to integers. This is the
# default value, but we are passing it here explicitly to make it clear.
token_indexers: Dict[str, TokenIndexer] = {
"tokens": SingleIdTokenIndexer(namespace="tokens")
}
text_field_pos = TextField(
[Token... | allennlp-guide-master | exercises/part2/reading-data/vocabulary_creation_source.py |
from typing import Dict, Iterable, List
from allennlp.data import DatasetReader, Instance
from allennlp.data.fields import Field, LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
| allennlp-guide-master | exercises/part2/reading-data/dataset_reader_basic_setup.py |
# Create Fields
tokens = [Token("The"), Token("best"), Token("movie"), Token("ever"), Token("!")]
token_indexers: Dict[str, TokenIndexer] = {"tokens": SingleIdTokenIndexer()}
text_field = TextField(tokens, token_indexers=token_indexers)
label_field = LabelField("pos")
sequence_label_field = SequenceLabelField(
["... | allennlp-guide-master | exercises/part2/reading-data/instances_source.py |
reader = MyDatasetReader()
vocab = Vocabulary.from_instances(reader.read("path_to_data"))
print("Using the BucketBatchSampler:")
# The sorting_keys argument is unnecessary here, because the sampler will
# automatically detect that 'tokens' is the right sorting key, but we are
# including it in our example for complete... | allennlp-guide-master | exercises/part2/reading-data/data_loader_bucket.py |
# Splits text into words (instead of wordpieces or characters).
tokenizer: Tokenizer = WhitespaceTokenizer()
# Represents each token with a single ID from a vocabulary.
token_indexer: TokenIndexer = SingleIdTokenIndexer(namespace="token_vocab")
vocab = Vocabulary()
vocab.add_tokens_to_namespace(
["This", "is", "s... | allennlp-guide-master | exercises/part2/representing-text-as-features/token_indexers_simple.py |
# This is what gets created by TextField.as_tensor with a SingleIdTokenIndexer
# and a TokenCharactersIndexer; see the code snippet above. This time we're using
# more intuitive names for the indexers and embedders.
token_tensor = {
"tokens": {"tokens": torch.LongTensor([[2, 4, 3, 5]])},
"token_characters": {
... | allennlp-guide-master | exercises/part2/representing-text-as-features/token_embedders_combined.py |
# Splits text into words (instead of wordpieces or characters).
tokenizer: Tokenizer = WhitespaceTokenizer()
# Represents each token with both an id from a vocabulary and a sequence of
# characters.
token_indexers: Dict[str, TokenIndexer] = {
"tokens": SingleIdTokenIndexer(namespace="token_vocab"),
"token_char... | allennlp-guide-master | exercises/part2/representing-text-as-features/token_indexers_combined.py |
# This is what gets created by TextField.as_tensor with a SingleIdTokenIndexer;
# Note that we added the batch dimension at the front. You choose the 'indexer1'
# name when you configure your data processing code.
token_tensor = {"indexer1": {"tokens": torch.LongTensor([[1, 3, 2, 9, 4, 3]])}}
# You would typically ge... | allennlp-guide-master | exercises/part2/representing-text-as-features/token_embedders_simple.py |
import warnings
from typing import Dict
import torch
from allennlp.data import Token, Vocabulary, TokenIndexer, Tokenizer
from allennlp.data.fields import ListField, TextField
from allennlp.data.token_indexers import (
SingleIdTokenIndexer,
TokenCharactersIndexer,
ELMoTokenCharactersIndexer,
Pretrained... | allennlp-guide-master | exercises/part2/representing-text-as-features/setup.py |
# Splits text into words (instead of wordpieces or characters). For ELMo, you can
# just use any word-level tokenizer that you like, though for best results you
# should use the same tokenizer that was used with ELMo, which is an older version
# of spacy. We're using a whitespace tokenizer here for ease of demonstrat... | allennlp-guide-master | exercises/part2/representing-text-as-features/token_indexers_contextual.py |
# It's easiest to get ELMo input by just running the data code. See the
# exercise above for an explanation of this code.
tokenizer: Tokenizer = WhitespaceTokenizer()
token_indexer: TokenIndexer = ELMoTokenCharactersIndexer()
vocab = Vocabulary()
text = "This is some text."
tokens = tokenizer.tokenize(text)
print("ELM... | allennlp-guide-master | exercises/part2/representing-text-as-features/token_embedders_contextual.py |
from allennlp.data import Vocabulary
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
import torch
# This is what gets created by TextField.as_tensor with a SingleIdTokenIndexer;
# see the exercises above.
token_tensor = {"tokens": {"tokens... | allennlp-guide-master | exercises/part2/representing-text-as-features/pretrained_embedding.py |
# We're following the logic from the "Combining multiple TokenIndexers" example
# above.
tokenizer = SpacyTokenizer(pos_tags=True)
vocab = Vocabulary()
vocab.add_tokens_to_namespace(
["This", "is", "some", "text", "."], namespace="token_vocab"
)
vocab.add_tokens_to_namespace(
["T", "h", "i", "s", " ", "o", "m"... | allennlp-guide-master | exercises/part2/representing-text-as-features/interacting_with_tensors.py |
# This pattern is typically used in cases where your input data is already
# tokenized, so we're showing that here.
text_tokens = ["This", "is", "some", "frandibulous", "text", "."]
tokens = [Token(x) for x in text_tokens]
print(tokens)
# We're using a very small transformer here so that it runs quickly in binder. You... | allennlp-guide-master | exercises/part2/representing-text-as-features/mismatched_tokenization.py |
import torch
from allennlp.modules.seq2vec_encoders import (
Seq2VecEncoder,
CnnEncoder,
LstmSeq2VecEncoder,
)
batch_size = 8
sequence_length = 10
input_size = 5
hidden_size = 2
x = torch.rand(batch_size, sequence_length, input_size)
mask = torch.ones(batch_size, sequence_length)
print("shape of input:", ... | allennlp-guide-master | exercises/part2/common-architectures/seq2vec.py |
# Create an instance with multiple spans
tokens = [
Token(token)
for token in ["I", "shot", "an", "elephant", "in", "my", "pajamas", "."]
]
token_indexers: Dict[str, TokenIndexer] = {"tokens": SingleIdTokenIndexer()}
text_field = TextField(tokens, token_indexers=token_indexers)
spans = [(2, 3), (5, 6)] # ('an... | allennlp-guide-master | exercises/part2/common-architectures/span_source.py |
embedding_dim1 = 8
embedding_dim2 = 16
sequence_length = 10
# Attention
attention: Attention
# dot product attention only allows vector/matrix of the same size
vector = torch.rand(
(
1,
embedding_dim1,
)
)
matrix = torch.rand((1, sequence_length, embedding_dim1))
attention = DotProductAttentio... | allennlp-guide-master | exercises/part2/common-architectures/attention_source.py |
from typing import Dict
import torch
from allennlp.data import Batch, Instance, Token, Vocabulary
from allennlp.data.dataset_readers.dataset_utils.span_utils import enumerate_spans
from allennlp.data.fields import TextField, ListField, SpanField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenInde... | allennlp-guide-master | exercises/part2/common-architectures/span_setup.py |
import torch
from allennlp.modules.attention import (
Attention,
DotProductAttention,
BilinearAttention,
LinearAttention,
)
from allennlp.modules.matrix_attention import (
MatrixAttention,
DotProductMatrixAttention,
BilinearMatrixAttention,
LinearMatrixAttention,
)
from allennlp.nn impor... | allennlp-guide-master | exercises/part2/common-architectures/attention_setup.py |
import torch
from allennlp.modules.seq2seq_encoders import (
Seq2SeqEncoder,
PassThroughEncoder,
LstmSeq2SeqEncoder,
)
batch_size = 8
sequence_length = 10
input_size = 5
hidden_size = 2
x = torch.rand(batch_size, sequence_length, input_size)
mask = torch.ones(batch_size, sequence_length)
print("shape of i... | allennlp-guide-master | exercises/part2/common-architectures/seq2seq.py |
import torch
from allennlp.nn.initializers import ConstantInitializer
from allennlp.nn.regularizers import L1Regularizer, L2Regularizer, RegularizerApplicator
class Net(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(2, 3)
self.linear2 = torch.nn.Lin... | allennlp-guide-master | exercises/part2/building-your-model/model_regularization.py |
import json
import os
import tempfile
from copy import deepcopy
from typing import Dict, Iterable, List
import torch
from allennlp.common import JsonDict
from allennlp.common.params import Params
from allennlp.data import (
DataLoader,
DatasetReader,
Field,
Instance,
TextFieldTensors,
Vocabular... | allennlp-guide-master | exercises/part2/building-your-model/setup_model_io.py |
# Create a toy model that just prints tensors passed to forward
class ToyModel(Model):
def __init__(self, vocab: Vocabulary):
super().__init__(vocab)
# Note that the signature of forward() needs to match that of field names
def forward(
self, tokens: TextFieldTensors, label: torch.Tensor = ... | allennlp-guide-master | exercises/part2/building-your-model/model_forward.py |
CONFIG = """
{
"dataset_reader" : {
"type": "classification-tsv",
"token_indexers": {
"tokens": {
"type": "single_id"
}
}
},
"train_data_path": "quick_start/data/movie_review/train.tsv",
"validation_data_path": "quick_start/data/movie_revie... | allennlp-guide-master | exercises/part2/building-your-model/model_io.py |
import torch
from allennlp.nn.initializers import (
InitializerApplicator,
XavierUniformInitializer,
ConstantInitializer,
NormalInitializer,
)
class Net(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(2, 3)
self.linear2 = torch.nn.Lin... | allennlp-guide-master | exercises/part2/building-your-model/model_init.py |
from typing import Dict
import torch
import numpy
from allennlp.data import Instance, Token, Vocabulary
from allennlp.data.data_loaders import SimpleDataLoader
from allennlp.data.fields import TextField, LabelField
from allennlp.data.fields.text_field import TextFieldTensors
from allennlp.data.token_indexers import To... | allennlp-guide-master | exercises/part2/building-your-model/setup_model_forward.py |
# Create a toy model that just returns a random distribution over labels
class ToyModel(Model):
def __init__(self, vocab: Vocabulary):
super().__init__(vocab)
def forward(
self, tokens: TextFieldTensors, label: torch.Tensor = None
) -> Dict[str, torch.Tensor]:
# Simply generate rand... | allennlp-guide-master | exercises/part2/building-your-model/model_prediction.py |
import json
from allennlp.common import FromParams, Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
class Gaussian(FromParams):
def __init__(self, mean: float, variance: float):
self.mean = mean
self.variance = variance
class ModelWithGaussian(F... | allennlp-guide-master | exercises/part2/using-config-files/extras_basic.py |
import json
from typing import List
from allennlp.common import Registrable, Params
class Count(Registrable):
def __init__(self, count: int):
self.count = count
@classmethod
def from_list_of_ints(cls, int_list: List[int]):
return cls(len(int_list))
@classmethod
def from_list_of_s... | allennlp-guide-master | exercises/part2/using-config-files/multiple_constructors.py |
import json
from allennlp.common import FromParams, Params, Registrable, Lazy
from allennlp.data import Vocabulary
class Gaussian(FromParams):
def __init__(self, vocab: Vocabulary, mean: float, variance: float):
self.vocab = vocab
self.mean = mean
self.variance = variance
print(f"... | allennlp-guide-master | exercises/part2/using-config-files/lazy_good.py |
import json
from allennlp.common import FromParams, Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
class Gaussian(FromParams):
def __init__(self, vocab: Vocabulary, mean: float, variance: float):
self.vocab = vocab
self.mean = mean
self.v... | allennlp-guide-master | exercises/part2/using-config-files/extras_recursive.py |
import json
from allennlp.common import FromParams, Params
class BaseGaussian(FromParams):
def __init__(self, mean: float, variance: float):
self.mean = mean
self.variance = variance
class MyGaussian(BaseGaussian):
def __init__(self, name: str, **kwargs):
super().__init__(**kwargs)
... | allennlp-guide-master | exercises/part2/using-config-files/kwargs.py |
import json
from allennlp.common import FromParams, Params, Lazy
from allennlp.data import Vocabulary
class Gaussian(FromParams):
def __init__(self, vocab: Vocabulary, mean: float, variance: float):
self.vocab = vocab
self.mean = mean
self.variance = variance
print(f"Gaussian got ... | allennlp-guide-master | exercises/part2/using-config-files/lazy_bad.py |
from typing import Dict, Iterable, List
import torch
from allennlp.data import DatasetReader, Instance, Vocabulary, TextFieldTensors
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, ... | allennlp-guide-master | exercises/part1/training-and-prediction/model_setup.py |
class SentenceClassifierPredictor(Predictor):
def predict(self, sentence: str) -> JsonDict:
return self.predict_json({"sentence": sentence})
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
sentence = json_dict["sentence"]
return self._dataset_reader.text_to_instance(senten... | allennlp-guide-master | exercises/part1/training-and-prediction/prediction_source.py |
import tempfile
from typing import Dict, Iterable, List, Tuple
import torch
from allennlp.data import (
DataLoader,
DatasetReader,
Instance,
Vocabulary,
TextFieldTensors,
)
from allennlp.data.data_loaders import SimpleDataLoader
from allennlp.data.fields import LabelField, TextField
from allennlp.... | allennlp-guide-master | exercises/part1/training-and-prediction/evaluation_setup.py |
from typing import Dict, Iterable, List
from allennlp.data import DatasetReader, Instance
from allennlp.data.fields import Field, LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
| allennlp-guide-master | exercises/part1/training-and-prediction/dataset_reader_setup.py |
class ClassificationTsvReader(DatasetReader):
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs
):
super().__init__(**kwargs)
self.tokenizer = tokenizer or WhitespaceTokenizer(... | allennlp-guide-master | exercises/part1/training-and-prediction/dataset_reader_source.py |
config = {
"dataset_reader": {
"type": "classification-tsv",
"token_indexers": {"tokens": {"type": "single_id"}},
},
"train_data_path": "quick_start/data/movie_review/train.tsv",
"validation_data_path": "quick_start/data/movie_review/dev.tsv",
"model": {
"type": "simple_class... | allennlp-guide-master | exercises/part1/training-and-prediction/config_source.py |
def run_training_loop():
dataset_reader = build_dataset_reader()
train_data, dev_data = read_data(dataset_reader)
vocab = build_vocab(train_data + dev_data)
model = build_model(vocab)
train_loader, dev_loader = build_data_loaders(train_data, dev_data)
train_loader.index_with(vocab)
dev_lo... | allennlp-guide-master | exercises/part1/training-and-prediction/training_source.py |
import tempfile
from typing import Dict, Iterable, List, Tuple
import torch
from allennlp.common.util import JsonDict
from allennlp.data import (
DataLoader,
DatasetReader,
Instance,
Vocabulary,
TextFieldTensors,
)
from allennlp.data.data_loaders import SimpleDataLoader
from allennlp.data.fields i... | allennlp-guide-master | exercises/part1/training-and-prediction/prediction_setup.py |
import tempfile
from typing import Dict, Iterable, List, Tuple
import allennlp
import torch
from allennlp.data import (
DataLoader,
DatasetReader,
Instance,
Vocabulary,
TextFieldTensors,
)
from allennlp.data.data_loaders import SimpleDataLoader
from allennlp.data.fields import LabelField, TextField... | allennlp-guide-master | exercises/part1/training-and-prediction/training_setup.py |
# We've copied the training loop from an earlier example, with updated model
# code, above in the Setup section. We run the training loop to get a trained
# model.
model, dataset_reader = run_training_loop()
# Now we can evaluate the model on a new dataset.
test_data = list(dataset_reader.read("quick_start/data/movie_... | allennlp-guide-master | exercises/part1/training-and-prediction/evaluation_source.py |
import tempfile
import json
from typing import Dict, Iterable, List
import torch
from allennlp.data import DatasetReader, Instance, Vocabulary, TextFieldTensors
from allennlp.data.fields import LabelField, TextField, Field
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.t... | allennlp-guide-master | exercises/part1/training-and-prediction/config_setup.py |
class SimpleClassifier(Model):
def __init__(
self, vocab: Vocabulary, embedder: TextFieldEmbedder, encoder: Seq2VecEncoder
):
super().__init__(vocab)
self.embedder = embedder
self.encoder = encoder
num_labels = vocab.get_vocab_size("labels")
self.classifier = torc... | allennlp-guide-master | exercises/part1/training-and-prediction/model_source.py |
import glob
from dataclasses import dataclass
import re
from typing import Optional, Iterator
OUTPUT_DIR = "_exercises_test/"
CODEBLOCK_RE = re.compile(r"<codeblock source=\"([^\"]+)\"( setup=\"([^\"]+)\")?>")
@dataclass
class CodeExercise:
source: str
setup: Optional[str] = None
def find_code_exercises(... | allennlp-guide-master | scripts/build_exercise_tests.py |
import tempfile
from typing import Dict, Iterable, List, Tuple
import torch
from allennlp.common.util import JsonDict
from allennlp.data import (
DataLoader,
DatasetReader,
Instance,
Vocabulary,
TextFieldTensors,
)
from allennlp.data.data_loaders import SimpleDataLoader
from allennlp.data.fields i... | allennlp-guide-master | quick_start/predict.py |
allennlp-guide-master | quick_start/__init__.py | |
import tempfile
from typing import Dict, Iterable, List, Tuple
import allennlp
import torch
from allennlp.data import (
DataLoader,
DatasetReader,
Instance,
Vocabulary,
TextFieldTensors,
)
from allennlp.data.data_loaders import SimpleDataLoader
from allennlp.data.fields import LabelField, TextField... | allennlp-guide-master | quick_start/train.py |
import tempfile
from typing import Dict, Iterable, List, Tuple
import torch
from allennlp.data import (
DataLoader,
DatasetReader,
Instance,
Vocabulary,
TextFieldTensors,
)
from allennlp.data.data_loaders import SimpleDataLoader
from allennlp.data.fields import LabelField, TextField
from allennlp.... | allennlp-guide-master | quick_start/evaluate.py |
from .dataset_readers import *
from .models import *
from .predictors import *
| allennlp-guide-master | quick_start/my_text_classifier/__init__.py |
from .classification_tsv import ClassificationTsvReader
| allennlp-guide-master | quick_start/my_text_classifier/dataset_readers/__init__.py |
from typing import Dict, Iterable, List
from allennlp.data import DatasetReader, Instance
from allennlp.data.fields import LabelField, TextField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, WhitespaceTokenizer
@DatasetReader.regist... | allennlp-guide-master | quick_start/my_text_classifier/dataset_readers/classification_tsv.py |
from .sentence_classifier_predictor import SentenceClassifierPredictor
| allennlp-guide-master | quick_start/my_text_classifier/predictors/__init__.py |
from allennlp.common import JsonDict
from allennlp.data import DatasetReader, Instance
from allennlp.models import Model
from allennlp.predictors import Predictor
@Predictor.register("sentence_classifier")
class SentenceClassifierPredictor(Predictor):
def predict(self, sentence: str) -> JsonDict:
return s... | allennlp-guide-master | quick_start/my_text_classifier/predictors/sentence_classifier_predictor.py |
from .simple_classifier import SimpleClassifier
| allennlp-guide-master | quick_start/my_text_classifier/models/__init__.py |
from typing import Dict
import torch
from allennlp.data import Vocabulary
from allennlp.data import TextFieldTensors
from allennlp.models import Model
from allennlp.modules import TextFieldEmbedder, Seq2VecEncoder
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
@Model.register(... | allennlp-guide-master | quick_start/my_text_classifier/models/simple_classifier.py |
from typing import Dict, Optional
from overrides import overrides
import torch
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, Seq2SeqEncoder, Seq2VecEncoder, TextFieldEmbedder
from allennlp.nn import InitializerApplicator, util
... | contrastive-explanations-main | allennlp_lib/encoder_classifier.py |
from typing import List, Dict
import numpy
from overrides import overrides
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
from allennlp.data.fields import LabelField
@Predictor.register("textual_entailment_fixed")
class TextualEntailm... | contrastive-explanations-main | allennlp_lib/nli_predictor.py |
from typing import List, Dict
import numpy
import logging
from overrides import overrides
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
from allennlp.data.fields import LabelField
logger = logging.getLogger(__name__)
@Predictor.regi... | contrastive-explanations-main | allennlp_lib/bios_masked_predictor.py |
from copy import deepcopy
from typing import List, Dict
from overrides import overrides
import numpy
import json
from nltk.tree import Tree
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
from allennlp.data.fields import LabelField
@Pr... | contrastive-explanations-main | allennlp_lib/jsonl_predictor.py |
import itertools
from typing import Dict, Optional
import json
import logging
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, TextField, LabelField, MetadataField
from alle... | contrastive-explanations-main | allennlp_lib/mnli.py |
if __name__ == '__main__':
import argparse
from os import listdir
import os
import json
import re
from os.path import isfile, join
import numpy as np
import json
from allennlp.common.util import import_module_and_submodules as import_submodules
from allennlp.models.archival imp... | contrastive-explanations-main | scripts/cache_linear_classifier.py |
if __name__ == '__main__':
import argparse
from os import listdir
import os
from nltk.tree import Tree
import json
import re
from os.path import isfile, join
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input-path', action='store')
pars... | contrastive-explanations-main | scripts/cache_encodings.py |
if __name__ == '__main__':
import argparse
import json
import numpy as np
import json
import os
import json
import pandas as pd
import spacy
from spacy.tokenizer import Tokenizer
from spacy.lang.en import English
nlp = English()
tokenizer = Tokenizer(nlp.vocab)
par... | contrastive-explanations-main | scripts/mnli_concepts.py |
import json
import csv
import os
import pickle
for split in ["train", "dev", "test"]:
path1 = f"data/bios/{split}.pickle"
path2 = f"data/bios/{split}.jsonl"
def find_idx(d: dict):
with_gender, without_gender = d["hard_text"], d["text_without_gender"]
masked_gender = []
with_gender... | contrastive-explanations-main | scripts/bios_pickle_to_jsonl.py |
if __name__ == '__main__':
import argparse
import json
import numpy as np
import json
import os
parser = argparse.ArgumentParser()
parser.add_argument('--data-path', action='store')
parser.add_argument('--concept-path', action='store')
args = parser.parse_args()
if not os.path.... | contrastive-explanations-main | scripts/bios_concepts.py |
'''
ELMo usage example to write biLM embeddings for an entire dataset to
a file.
'''
import os
import h5py
from bilm import dump_bilm_embeddings
# Our small dataset.
raw_context = [
'Pretrained biLMs compute representations useful for NLP tasks .',
'They give state of the art performance for many tasks .'
]
t... | bilm-tf-master | usage_cached.py |
#!/usr/bin/python
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='bilm',
version='0.1.post5',
url='http://github.com/allenai/bilm-tf',
packages=setuptools.find_packages(),
tests_require=[],
zip_safe=False,
entry_points='',
d... | bilm-tf-master | setup.py |
'''
ELMo usage example with character inputs.
Below, we show usage for SQuAD where each input example consists of both
a question and a paragraph of context.
'''
import tensorflow as tf
import os
from bilm import Batcher, BidirectionalLanguageModel, weight_layers
# Location of pretrained LM. Here we use the test fi... | bilm-tf-master | usage_character.py |
'''
ELMo usage example with pre-computed and cached context independent
token representations
Below, we show usage for SQuAD where each input example consists of both
a question and a paragraph of context.
'''
import tensorflow as tf
import os
from bilm import TokenBatcher, BidirectionalLanguageModel, weight_layers, ... | bilm-tf-master | usage_token.py |
import argparse
import numpy as np
from bilm.training import train, load_options_latest_checkpoint, load_vocab
from bilm.data import BidirectionalLMDataset
def main(args):
# load the vocab
vocab = load_vocab(args.vocab_file, 50)
# define the options
batch_size = 128 # batch size for each GPU
... | bilm-tf-master | bin/train_elmo.py |
import argparse
import numpy as np
from bilm.training import train, load_options_latest_checkpoint, load_vocab
from bilm.data import LMDataset, BidirectionalLMDataset
def main(args):
options, ckpt_file = load_options_latest_checkpoint(args.save_dir)
if 'char_cnn' in options:
max_word_length = optio... | bilm-tf-master | bin/restart.py |
import argparse
from bilm.training import dump_weights as dw
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--save_dir', help='Location of checkpoint files')
parser.add_argument('--outfile', help='Output hdf5 file with weights')
args = parser.parse_args()
dw... | bilm-tf-master | bin/dump_weights.py |
import argparse
from bilm.training import test, load_options_latest_checkpoint, load_vocab
from bilm.data import LMDataset, BidirectionalLMDataset
def main(args):
options, ckpt_file = load_options_latest_checkpoint(args.save_dir)
# load the vocab
if 'char_cnn' in options:
max_word_length = optio... | bilm-tf-master | bin/run_test.py |
import tensorflow as tf
def weight_layers(name, bilm_ops, l2_coef=None,
use_top_only=False, do_layer_norm=False):
'''
Weight the layers of a biLM with trainable scalar weights to
compute ELMo representations.
For each output layer, this returns two ops. The first computes
a... | bilm-tf-master | bilm/elmo.py |
from .data import Batcher, TokenBatcher
from .model import BidirectionalLanguageModel, dump_token_embeddings, \
dump_bilm_embeddings
from .elmo import weight_layers
| bilm-tf-master | bilm/__init__.py |
import numpy as np
import tensorflow as tf
import h5py
import json
import re
from .data import UnicodeCharsVocabulary, Batcher
DTYPE = 'float32'
DTYPE_INT = 'int64'
class BidirectionalLanguageModel(object):
def __init__(
self,
options_file: str,
weight_file: str,
... | bilm-tf-master | bilm/model.py |
'''
Train and test bidirectional language models.
'''
import os
import time
import json
import re
import tensorflow as tf
import numpy as np
from tensorflow.python.ops.init_ops import glorot_uniform_initializer
from .data import Vocabulary, UnicodeCharsVocabulary, InvalidNumberOfCharacters
DTYPE = 'float32'
DTYPE... | bilm-tf-master | bilm/training.py |
# originally based on https://github.com/tensorflow/models/tree/master/lm_1b
import glob
import random
import numpy as np
from typing import List
class Vocabulary(object):
'''
A token vocabulary. Holds a map from token to ids and provides
a method for encoding text to a sequence of ids.
'''
def... | bilm-tf-master | bilm/data.py |
import unittest
import os
import json
import numpy as np
import tensorflow as tf
from bilm.model import BidirectionalLanguageModel
from bilm.data import Batcher
from bilm.elmo import weight_layers
FIXTURES = 'tests/fixtures/model/'
class TestWeightedLayers(unittest.TestCase):
def tearDown(self):
tf.r... | bilm-tf-master | tests/test_elmo.py |
import unittest
import os
import json
import h5py
import tempfile
import shutil
import numpy as np
import tensorflow as tf
from bilm.model import BidirectionalLanguageModel, dump_token_embeddings
from bilm.data import Batcher, TokenBatcher
FIXTURES = 'tests/fixtures/model/'
def _load_sentences_embeddings():
#... | bilm-tf-master | tests/test_model.py |
import unittest
import os
import shutil
import tempfile
import tensorflow as tf
import numpy as np
from bilm.training import train, test, load_vocab, \
load_options_latest_checkpoint
from bilm.data import LMDataset, BidirectionalLMDataset
FIXTURES = 'tests/fixtures/train/'
class Te... | bilm-tf-master | tests/test_training.py |
import unittest
import tempfile
import os
import numpy as np
from bilm.data import UnicodeCharsVocabulary, Vocabulary, \
Batcher, TokenBatcher, LMDataset, BidirectionalLMDataset
DATA_FIXTURES = 'tests/fixtures/data/'
TRAIN_FIXTURES = 'tests/fixtures/train/'
class TestVocabulary(unittest.TestCase):
def setU... | bilm-tf-master | tests/test_data.py |
import argparse
import functools
import os
import os
os.environ['MKL_THREADING_LAYER'] = 'GNU'
from omegaconf import OmegaConf
from src.lightning.trainers.moco2_trainer import MocoV2Trainer
# set default of print to flush
# print = functools.partial(print, flush=True)
def train(conf_path):
conf = OmegaConf.loa... | CSR-main | train_csr.py |
import argparse
import json
import os
import numpy as np
def create_table(args):
metric_dir = args.metrics_dir
success = []
num_no_change_energy = 0
prop_fixed_strict = []
energy_prop = []
num_changed = []
atomic_success_walkthrough= []
precision_w = []
atomic_success_unshuffle ... | CSR-main | aggrigate_metrics.py |
from allenact.base_abstractions.misc import ActorCriticOutput, Memory
from allenact_plugins.ithor_plugin.ithor_sensors import RGBSensorThor
from allenact.base_abstractions.sensor import SensorSuite
from allenact.algorithms.onpolicy_sync.storage import RolloutStorage
from ray.util.queue import Queue
import time
import n... | CSR-main | runner_cache_trajectories.py |
from ray.util.queue import Queue
from src.simulation.rearrangement_args import RearrangementArgs
from src.simulation.agent_roomr import AgentRoomr
from src.shared.constants import (IMAGE_SIZE, TEST_ROOM_IDS, TRAIN_ROOM_IDS,
VAL_ROOM_IDS)
from pytorch_lightning import seed_everything
im... | CSR-main | runner_eval_rearrangement.py |
import atexit
import os
import platform
import re
import shlex
import subprocess
import tempfile
# Turning off automatic black formatting for this script as it breaks quotes.
# fmt: off
def pci_records():
records = []
command = shlex.split("lspci -vmm")
output = subprocess.check_output(command).decode()... | CSR-main | scripts/startx.py |
CSR-main | src/__init__.py | |
from typing import Any, List
import numpy as np
import pytorch_lightning as pl
import torch
import wandb
from pytorch_lightning.callbacks import Callback, ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from src.shared.utils import render_confusion_matrix
from torch.utils.data.data... | CSR-main | src/lightning/custom_callbacks.py |
import os
import random
import numpy as np
import pytorch_lightning as pl
import torch
import wandb
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.plugins import DDPPlugi... | CSR-main | src/lightning/trainers/moco2_trainer.py |
CSR-main | src/lightning/trainers/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.