Code stringlengths 103 85.9k | Summary listlengths 0 94 |
|---|---|
Please provide a description of the function:def detailed_tokens(tokenizer, text):
node = tokenizer.parseToNode(text)
node = node.next # first node is beginning of sentence and empty, skip it
words = []
while node.posid != 0:
surface = node.surface
base = surface # a default value... | [
"Format Mecab output into a nice data structure, based on Janome."
] |
Please provide a description of the function:def symlink_to(orig, dest):
if is_windows:
import subprocess
subprocess.check_call(
["mklink", "/d", path2str(orig), path2str(dest)], shell=True
)
else:
orig.symlink_to(dest) | [
"Create a symlink. Used for model shortcut links.\n\n orig (unicode / Path): The origin path.\n dest (unicode / Path): The destination path of the symlink.\n "
] |
Please provide a description of the function:def symlink_remove(link):
# https://stackoverflow.com/q/26554135/6400719
if os.path.isdir(path2str(link)) and is_windows:
# this should only be on Py2.7 and windows
os.rmdir(path2str(link))
else:
os.unlink(path2str(link)) | [
"Remove a symlink. Used for model shortcut links.\n\n link (unicode / Path): The path to the symlink.\n "
] |
Please provide a description of the function:def is_config(python2=None, python3=None, windows=None, linux=None, osx=None):
return (
python2 in (None, is_python2)
and python3 in (None, is_python3)
and windows in (None, is_windows)
and linux in (None, is_linux)
and osx in... | [
"Check if a specific configuration of Python version and operating system\n matches the user's setup. Mostly used to display targeted error messages.\n\n python2 (bool): spaCy is executed with Python 2.x.\n python3 (bool): spaCy is executed with Python 3.x.\n windows (bool): spaCy is executed on Windows... |
Please provide a description of the function:def import_file(name, loc):
loc = path2str(loc)
if is_python_pre_3_5:
import imp
return imp.load_source(name, loc)
else:
import importlib.util
spec = importlib.util.spec_from_file_location(name, str(loc))
module = im... | [
"Import module from a file. Used to load models from a directory.\n\n name (unicode): Name of module to load.\n loc (unicode / Path): Path to the file.\n RETURNS: The loaded module.\n "
] |
Please provide a description of the function:def unescape_unicode(string):
if string is None:
return string
# We only want to unescape the unicode, so we first must protect the other
# backslashes.
string = string.replace("\\", "\\\\")
# Now we remove that protection for the unicode.
... | [
"Python2.7's re module chokes when compiling patterns that have ranges\n between escaped unicode codepoints if the two codepoints are unrecognised\n in the unicode database. For instance:\n\n re.compile('[\\\\uAA77-\\\\uAA79]').findall(\"hello\")\n\n Ends up matching every character (on Python 2). T... |
Please provide a description of the function:def get_lang_class(lang):
global LANGUAGES
# Check if an entry point is exposed for the language code
entry_point = get_entry_point("spacy_languages", lang)
if entry_point is not None:
LANGUAGES[lang] = entry_point
return entry_point
... | [
"Import and load a Language class.\n\n lang (unicode): Two-letter language code, e.g. 'en'.\n RETURNS (Language): Language class.\n "
] |
Please provide a description of the function:def load_model(name, **overrides):
data_path = get_data_path()
if not data_path or not data_path.exists():
raise IOError(Errors.E049.format(path=path2str(data_path)))
if isinstance(name, basestring_): # in data dir / shortcut
if name in set(... | [
"Load a model from a shortcut link, package or data path.\n\n name (unicode): Package name, shortcut link or model path.\n **overrides: Specific overrides, like pipeline components to disable.\n RETURNS (Language): `Language` class with the loaded model.\n "
] |
Please provide a description of the function:def load_model_from_link(name, **overrides):
path = get_data_path() / name / "__init__.py"
try:
cls = import_file(name, path)
except AttributeError:
raise IOError(Errors.E051.format(name=name))
return cls.load(**overrides) | [
"Load a model from a shortcut link, or directory in spaCy data path."
] |
Please provide a description of the function:def load_model_from_package(name, **overrides):
cls = importlib.import_module(name)
return cls.load(**overrides) | [
"Load a model from an installed package."
] |
Please provide a description of the function:def load_model_from_path(model_path, meta=False, **overrides):
if not meta:
meta = get_model_meta(model_path)
cls = get_lang_class(meta["lang"])
nlp = cls(meta=meta, **overrides)
pipeline = meta.get("pipeline", [])
disable = overrides.get("di... | [
"Load a model from a data directory path. Creates Language class with\n pipeline from meta.json and then calls from_disk() with path."
] |
Please provide a description of the function:def load_model_from_init_py(init_file, **overrides):
model_path = Path(init_file).parent
meta = get_model_meta(model_path)
data_dir = "%s_%s-%s" % (meta["lang"], meta["name"], meta["version"])
data_path = model_path / data_dir
if not model_path.exist... | [
"Helper function to use in the `load()` method of a model package's\n __init__.py.\n\n init_file (unicode): Path to model's __init__.py, i.e. `__file__`.\n **overrides: Specific overrides, like pipeline components to disable.\n RETURNS (Language): `Language` class with loaded model.\n "
] |
Please provide a description of the function:def get_model_meta(path):
model_path = ensure_path(path)
if not model_path.exists():
raise IOError(Errors.E052.format(path=path2str(model_path)))
meta_path = model_path / "meta.json"
if not meta_path.is_file():
raise IOError(Errors.E053.f... | [
"Get model meta.json from a directory path and validate its contents.\n\n path (unicode or Path): Path to model directory.\n RETURNS (dict): The model's meta data.\n "
] |
Please provide a description of the function:def get_package_path(name):
name = name.lower() # use lowercase version to be safe
# Here we're importing the module just to find it. This is worryingly
# indirect, but it's otherwise very difficult to find the package.
pkg = importlib.import_module(nam... | [
"Get the path to an installed package.\n\n name (unicode): Package name.\n RETURNS (Path): Path to installed package.\n "
] |
Please provide a description of the function:def get_entry_points(key):
result = {}
for entry_point in pkg_resources.iter_entry_points(key):
result[entry_point.name] = entry_point.load()
return result | [
"Get registered entry points from other packages for a given key, e.g.\n 'spacy_factories' and return them as a dictionary, keyed by name.\n\n key (unicode): Entry point name.\n RETURNS (dict): Entry points, keyed by name.\n "
] |
Please provide a description of the function:def get_entry_point(key, value):
for entry_point in pkg_resources.iter_entry_points(key):
if entry_point.name == value:
return entry_point.load() | [
"Check if registered entry point is available for a given name and\n load it. Otherwise, return None.\n\n key (unicode): Entry point name.\n value (unicode): Name of entry point to load.\n RETURNS: The loaded entry point or None.\n "
] |
Please provide a description of the function:def is_in_jupyter():
# https://stackoverflow.com/a/39662359/6400719
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return True # Jupyter notebook or qtconsole
except NameError:
return Fal... | [
"Check if user is running spaCy from a Jupyter notebook by detecting the\n IPython kernel. Mainly used for the displaCy visualizer.\n RETURNS (bool): True if in Jupyter, False if not.\n "
] |
Please provide a description of the function:def compile_suffix_regex(entries):
expression = "|".join([piece + "$" for piece in entries if piece.strip()])
return re.compile(expression) | [
"Compile a sequence of suffix rules into a regex object.\n\n entries (tuple): The suffix rules, e.g. spacy.lang.punctuation.TOKENIZER_SUFFIXES.\n RETURNS (regex object): The regex object. to be used for Tokenizer.suffix_search.\n "
] |
Please provide a description of the function:def compile_infix_regex(entries):
expression = "|".join([piece for piece in entries if piece.strip()])
return re.compile(expression) | [
"Compile a sequence of infix rules into a regex object.\n\n entries (tuple): The infix rules, e.g. spacy.lang.punctuation.TOKENIZER_INFIXES.\n RETURNS (regex object): The regex object. to be used for Tokenizer.infix_finditer.\n "
] |
Please provide a description of the function:def update_exc(base_exceptions, *addition_dicts):
exc = dict(base_exceptions)
for additions in addition_dicts:
for orth, token_attrs in additions.items():
if not all(isinstance(attr[ORTH], unicode_) for attr in token_attrs):
r... | [
"Update and validate tokenizer exceptions. Will overwrite exceptions.\n\n base_exceptions (dict): Base exceptions.\n *addition_dicts (dict): Exceptions to add to the base dict, in order.\n RETURNS (dict): Combined tokenizer exceptions.\n "
] |
Please provide a description of the function:def expand_exc(excs, search, replace):
def _fix_token(token, search, replace):
fixed = dict(token)
fixed[ORTH] = fixed[ORTH].replace(search, replace)
return fixed
new_excs = dict(excs)
for token_string, tokens in excs.items():
... | [
"Find string in tokenizer exceptions, duplicate entry and replace string.\n For example, to add additional versions with typographic apostrophes.\n\n excs (dict): Tokenizer exceptions.\n search (unicode): String to find and replace.\n replace (unicode): Replacement.\n RETURNS (dict): Combined tokeniz... |
Please provide a description of the function:def minibatch(items, size=8):
if isinstance(size, int):
size_ = itertools.repeat(size)
else:
size_ = size
items = iter(items)
while True:
batch_size = next(size_)
batch = list(itertools.islice(items, int(batch_size)))
... | [
"Iterate over batches of items. `size` may be an iterator,\n so that batch-size can vary on each step.\n "
] |
Please provide a description of the function:def compounding(start, stop, compound):
def clip(value):
return max(value, stop) if (start > stop) else min(value, stop)
curr = float(start)
while True:
yield clip(curr)
curr *= compound | [
"Yield an infinite series of compounding values. Each time the\n generator is called, a value is produced by multiplying the previous\n value by the compound rate.\n\n EXAMPLE:\n >>> sizes = compounding(1., 10., 1.5)\n >>> assert next(sizes) == 1.\n >>> assert next(sizes) == 1 * 1.5\n >... |
Please provide a description of the function:def stepping(start, stop, steps):
def clip(value):
return max(value, stop) if (start > stop) else min(value, stop)
curr = float(start)
while True:
yield clip(curr)
curr += (stop - start) / steps | [
"Yield an infinite series of values that step from a start value to a\n final value over some number of steps. Each step is (stop-start)/steps.\n\n After the final value is reached, the generator continues yielding that\n value.\n\n EXAMPLE:\n >>> sizes = stepping(1., 200., 100)\n >>> assert n... |
Please provide a description of the function:def decaying(start, stop, decay):
curr = float(start)
while True:
yield max(curr, stop)
curr -= (decay) | [
"Yield an infinite series of linearly decaying values."
] |
Please provide a description of the function:def minibatch_by_words(items, size, tuples=True, count_words=len):
if isinstance(size, int):
size_ = itertools.repeat(size)
else:
size_ = size
items = iter(items)
while True:
batch_size = next(size_)
batch = []
whi... | [
"Create minibatches of a given number of words."
] |
Please provide a description of the function:def itershuffle(iterable, bufsize=1000):
iterable = iter(iterable)
buf = []
try:
while True:
for i in range(random.randint(1, bufsize - len(buf))):
buf.append(next(iterable))
random.shuffle(buf)
for... | [
"Shuffle an iterator. This works by holding `bufsize` items back\n and yielding them sometime later. Obviously, this is not unbiased –\n but should be good enough for batching. Larger bufsize means less bias.\n From https://gist.github.com/andres-erbsen/1307752\n\n iterable (iterable): Iterator to shuff... |
Please provide a description of the function:def validate_json(data, validator):
errors = []
for err in sorted(validator.iter_errors(data), key=lambda e: e.path):
if err.path:
err_path = "[{}]".format(" -> ".join([str(p) for p in err.path]))
else:
err_path = ""
... | [
"Validate data against a given JSON schema (see https://json-schema.org).\n\n data: JSON-serializable data to validate.\n validator (jsonschema.DraftXValidator): The validator.\n RETURNS (list): A list of error messages, if available.\n "
] |
Please provide a description of the function:def get_serialization_exclude(serializers, exclude, kwargs):
exclude = list(exclude)
# Split to support file names like meta.json
options = [name.split(".")[0] for name in serializers]
for key, value in kwargs.items():
if key in ("vocab",) and va... | [
"Helper function to validate serialization args and manage transition from\n keyword arguments (pre v2.1) to exclude argument.\n "
] |
Please provide a description of the function:def labels(self):
all_labels = set(self.token_patterns.keys())
all_labels.update(self.phrase_patterns.keys())
return tuple(all_labels) | [
"All labels present in the match patterns.\n\n RETURNS (set): The string labels.\n\n DOCS: https://spacy.io/api/entityruler#labels\n "
] |
Please provide a description of the function:def patterns(self):
all_patterns = []
for label, patterns in self.token_patterns.items():
for pattern in patterns:
all_patterns.append({"label": label, "pattern": pattern})
for label, patterns in self.phrase_patter... | [
"Get all patterns that were added to the entity ruler.\n\n RETURNS (list): The original patterns, one dictionary per pattern.\n\n DOCS: https://spacy.io/api/entityruler#patterns\n "
] |
Please provide a description of the function:def add_patterns(self, patterns):
for entry in patterns:
label = entry["label"]
pattern = entry["pattern"]
if isinstance(pattern, basestring_):
self.phrase_patterns[label].append(self.nlp(pattern))
... | [
"Add patterns to the entitiy ruler. A pattern can either be a token\n pattern (list of dicts) or a phrase pattern (string). For example:\n {'label': 'ORG', 'pattern': 'Apple'}\n {'label': 'GPE', 'pattern': [{'lower': 'san'}, {'lower': 'francisco'}]}\n\n patterns (list): The patterns to a... |
Please provide a description of the function:def from_bytes(self, patterns_bytes, **kwargs):
patterns = srsly.msgpack_loads(patterns_bytes)
self.add_patterns(patterns)
return self | [
"Load the entity ruler from a bytestring.\n\n patterns_bytes (bytes): The bytestring to load.\n **kwargs: Other config paramters, mostly for consistency.\n RETURNS (EntityRuler): The loaded entity ruler.\n\n DOCS: https://spacy.io/api/entityruler#from_bytes\n "
] |
Please provide a description of the function:def from_disk(self, path, **kwargs):
path = ensure_path(path)
path = path.with_suffix(".jsonl")
patterns = srsly.read_jsonl(path)
self.add_patterns(patterns)
return self | [
"Load the entity ruler from a file. Expects a file containing\n newline-delimited JSON (JSONL) with one entry per line.\n\n path (unicode / Path): The JSONL file to load.\n **kwargs: Other config paramters, mostly for consistency.\n RETURNS (EntityRuler): The loaded entity ruler.\n\n ... |
Please provide a description of the function:def to_disk(self, path, **kwargs):
path = ensure_path(path)
path = path.with_suffix(".jsonl")
srsly.write_jsonl(path, self.patterns) | [
"Save the entity ruler patterns to a directory. The patterns will be\n saved as newline-delimited JSON (JSONL).\n\n path (unicode / Path): The JSONL file to load.\n **kwargs: Other config paramters, mostly for consistency.\n RETURNS (EntityRuler): The loaded entity ruler.\n\n DOCS... |
Please provide a description of the function:def read_data(
nlp,
conllu_file,
text_file,
raw_text=True,
oracle_segments=False,
max_doc_length=None,
limit=None,
):
if not raw_text and not oracle_segments:
raise ValueError("At least one of raw_text or oracle_segments must be T... | [
"Read the CONLLU format into (Doc, GoldParse) tuples. If raw_text=True,\n include Doc objects created using nlp.make_doc and then aligned against\n the gold-standard sequences. If oracle_segments=True, include Doc objects\n created from the gold-standard segments. At least one must be True."
] |
Please provide a description of the function:def golds_to_gold_tuples(docs, golds):
tuples = []
for doc, gold in zip(docs, golds):
text = doc.text
ids, words, tags, heads, labels, iob = zip(*gold.orig_annot)
sents = [((ids, words, tags, heads, labels, iob), [])]
tuples.appen... | [
"Get out the annoying 'tuples' format used by begin_training, given the\n GoldParse objects."
] |
Please provide a description of the function:def like_num(text):
text = (
text.replace(",", "")
.replace(".", "")
.replace("،", "")
.replace("٫", "")
.replace("/", "")
)
if text.isdigit():
return True
if text in _num_words:
return True
if ... | [
"\n check if text resembles a number\n "
] |
Please provide a description of the function:def merge_bytes(binder_strings):
output = None
for byte_string in binder_strings:
binder = Binder().from_bytes(byte_string)
if output is None:
output = binder
else:
output.merge(binder)
return output.to_bytes() | [
"Concatenate multiple serialized binders into one byte string."
] |
Please provide a description of the function:def add(self, doc):
array = doc.to_array(self.attrs)
if len(array.shape) == 1:
array = array.reshape((array.shape[0], 1))
self.tokens.append(array)
spaces = doc.to_array(SPACY)
assert array.shape[0] == spaces.shape... | [
"Add a doc's annotations to the binder for serialization."
] |
Please provide a description of the function:def get_docs(self, vocab):
for string in self.strings:
vocab[string]
orth_col = self.attrs.index(ORTH)
for tokens, spaces in zip(self.tokens, self.spaces):
words = [vocab.strings[orth] for orth in tokens[:, orth_col]]
... | [
"Recover Doc objects from the annotations, using the given vocab."
] |
Please provide a description of the function:def merge(self, other):
assert self.attrs == other.attrs
self.tokens.extend(other.tokens)
self.spaces.extend(other.spaces)
self.strings.update(other.strings) | [
"Extend the annotations of this binder with the annotations from another."
] |
Please provide a description of the function:def to_bytes(self):
for tokens in self.tokens:
assert len(tokens.shape) == 2, tokens.shape
lengths = [len(tokens) for tokens in self.tokens]
msg = {
"attrs": self.attrs,
"tokens": numpy.vstack(self.tokens).... | [
"Serialize the binder's annotations into a byte string."
] |
Please provide a description of the function:def from_bytes(self, string):
msg = srsly.msgpack_loads(gzip.decompress(string))
self.attrs = msg["attrs"]
self.strings = set(msg["strings"])
lengths = numpy.fromstring(msg["lengths"], dtype="int32")
flat_spaces = numpy.fromst... | [
"Deserialize the binder's annotations from a byte string."
] |
Please provide a description of the function:def load_data(limit=0, split=0.8):
# Partition off part of the train data for evaluation
train_data, _ = thinc.extra.datasets.imdb()
random.shuffle(train_data)
train_data = train_data[-limit:]
texts, labels = zip(*train_data)
cats = [{"POSITIVE":... | [
"Load data from the IMDB dataset."
] |
Please provide a description of the function:def package(input_dir, output_dir, meta_path=None, create_meta=False, force=False):
msg = Printer()
input_path = util.ensure_path(input_dir)
output_path = util.ensure_path(output_dir)
meta_path = util.ensure_path(meta_path)
if not input_path or not i... | [
"\n Generate Python package for model data, including meta and required\n installation files. A new directory will be created in the specified\n output directory, and model data will be copied over. If --create-meta is\n set and a meta.json already exists in the output directory, the existing\n value... |
Please provide a description of the function:def is_base_form(self, univ_pos, morphology=None):
morphology = {} if morphology is None else morphology
others = [key for key in morphology
if key not in (POS, 'Number', 'POS', 'VerbForm', 'Tense')]
if univ_pos == 'noun' an... | [
"\n Check whether we're dealing with an uninflected paradigm, so we can\n avoid lemmatization entirely.\n "
] |
Please provide a description of the function:def main(model=None, new_model_name="animal", output_dir=None, n_iter=30):
random.seed(0)
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank("en") # c... | [
"Set up the pipeline and entity recognizer, and train the new entity."
] |
Please provide a description of the function:def conll_ner2json(input_data, **kwargs):
delimit_docs = "-DOCSTART- -X- O O"
output_docs = []
for doc in input_data.strip().split(delimit_docs):
doc = doc.strip()
if not doc:
continue
output_doc = []
for sent in d... | [
"\n Convert files in the CoNLL-2003 NER format into JSON format for use with\n train cli.\n "
] |
Please provide a description of the function:def main(lang="en", output_dir=None, n_iter=25):
nlp = spacy.blank(lang)
# add the tagger to the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
tagger = nlp.create_pipe("tagger")
# Add the tags. This needs to be done be... | [
"Create a new model, set up the pipeline and train the tagger. In order to\n train the tagger with a custom tag map, we're creating a new Language\n instance with a custom vocab.\n "
] |
Please provide a description of the function:def load_textcat_data(limit=0):
# Partition off part of the train data for evaluation
train_data, eval_data = thinc.extra.datasets.imdb()
random.shuffle(train_data)
train_data = train_data[-limit:]
texts, labels = zip(*train_data)
eval_texts, eva... | [
"Load data from the IMDB dataset."
] |
Please provide a description of the function:def init_model(
lang,
output_dir,
freqs_loc=None,
clusters_loc=None,
jsonl_loc=None,
vectors_loc=None,
prune_vectors=-1,
):
if jsonl_loc is not None:
if freqs_loc is not None or clusters_loc is not None:
settings = ["-... | [
"\n Create a new model from raw data, like word frequencies, Brown clusters\n and word vectors. If vectors are provided in Word2Vec format, they can\n be either a .txt or zipped as a .zip or .tar.gz.\n "
] |
Please provide a description of the function:def open_file(loc):
loc = ensure_path(loc)
if tarfile.is_tarfile(str(loc)):
return tarfile.open(str(loc), "r:gz")
elif loc.parts[-1].endswith("gz"):
return (line.decode("utf8") for line in gzip.open(str(loc), "r"))
elif loc.parts[-1].ends... | [
"Handle .gz, .tar.gz or unzipped files"
] |
Please provide a description of the function:def main(model=None, output_dir=None, n_iter=100):
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank("en") # create blank Language class
print("C... | [
"Load the model, set up the pipeline and train the entity recognizer."
] |
Please provide a description of the function:def pretrain(
texts_loc,
vectors_model,
output_dir,
width=96,
depth=4,
embed_rows=2000,
loss_func="cosine",
use_vectors=False,
dropout=0.2,
n_iter=1000,
batch_size=3000,
max_length=500,
min_length=5,
seed=0,
n_save_... | [
"\n Pre-train the 'token-to-vector' (tok2vec) layer of pipeline components,\n using an approximate language-modelling objective. Specifically, we load\n pre-trained vectors, and train a component like a CNN, BiLSTM, etc to predict\n vectors which match the pre-trained ones. The weights are saved to a di... |
Please provide a description of the function:def make_update(model, docs, optimizer, drop=0.0, objective="L2"):
predictions, backprop = model.begin_update(docs, drop=drop)
loss, gradients = get_vectors_loss(model.ops, docs, predictions, objective)
backprop(gradients, sgd=optimizer)
# Don't want to ... | [
"Perform an update over a single batch of documents.\n\n docs (iterable): A batch of `Doc` objects.\n drop (float): The droput rate.\n optimizer (callable): An optimizer.\n RETURNS loss: A float for the loss.\n "
] |
Please provide a description of the function:def get_vectors_loss(ops, docs, prediction, objective="L2"):
# The simplest way to implement this would be to vstack the
# token.vector values, but that's a bit inefficient, especially on GPU.
# Instead we fetch the index into the vectors table for each of o... | [
"Compute a mean-squared error loss between the documents' vectors and\n the prediction.\n\n Note that this is ripe for customization! We could compute the vectors\n in some other word, e.g. with an LSTM language model, or use some other\n type of objective.\n "
] |
Please provide a description of the function:def create_pretraining_model(nlp, tok2vec):
output_size = nlp.vocab.vectors.data.shape[1]
output_layer = chain(
LN(Maxout(300, pieces=3)), Affine(output_size, drop_factor=0.0)
)
# This is annoying, but the parser etc have the flatten step after
... | [
"Define a network for the pretraining. We simply add an output layer onto\n the tok2vec input model. The tok2vec input model needs to be a model that\n takes a batch of Doc objects (as a list), and returns a list of arrays.\n Each array in the output needs to have one row per token in the doc.\n "
] |
Please provide a description of the function:def _smart_round(figure, width=10, max_decimal=4):
n_digits = len(str(int(figure)))
n_decimal = width - (n_digits + 1)
if n_decimal <= 1:
return str(int(figure))
else:
n_decimal = min(n_decimal, max_decimal)
format_str = "%." + st... | [
"Round large numbers as integers, smaller numbers as decimals."
] |
Please provide a description of the function:def noun_chunks(obj):
# It follows the logic of the noun chunks finder of English language,
# adjusted to some Greek language special characteristics.
# obj tag corrects some DEP tagger mistakes.
# Further improvement of the models will eliminate the nee... | [
"\n Detect base noun phrases. Works on both Doc and Span.\n "
] |
Please provide a description of the function:def get_ext_args(**kwargs):
default = kwargs.get("default")
getter = kwargs.get("getter")
setter = kwargs.get("setter")
method = kwargs.get("method")
if getter is None and setter is not None:
raise ValueError(Errors.E089)
valid_opts = ("d... | [
"Validate and convert arguments. Reused in Doc, Token and Span."
] |
Please provide a description of the function:def is_writable_attr(ext):
default, method, getter, setter = ext
# Extension is writable if it has a setter (getter + setter), if it has a
# default value (or, if its default value is none, none of the other values
# should be set).
if setter is not ... | [
"Check if an extension attribute is writable.\n ext (tuple): The (default, getter, setter, method) tuple available via\n {Doc,Span,Token}.get_extension.\n RETURNS (bool): Whether the attribute is writable.\n "
] |
Please provide a description of the function:def is_new_osx():
name = distutils.util.get_platform()
if sys.platform != "darwin":
return False
elif name.startswith("macosx-10"):
minor_version = int(name.split("-")[1].split(".")[1])
if minor_version >= 7:
return True
... | [
"Check whether we're on OSX >= 10.10"
] |
Please provide a description of the function:def get_position_label(i, words, tags, heads, labels, ents):
if len(words) < 20:
return "short-doc"
elif i == 0:
return "first-word"
elif i < 10:
return "early-word"
elif i < 20:
return "mid-word"
elif i == len(words) ... | [
"Return labels indicating the position of the word in the document.\n "
] |
Please provide a description of the function:def download(model, direct=False, *pip_args):
dl_tpl = "{m}-{v}/{m}-{v}.tar.gz#egg={m}=={v}"
if direct:
components = model.split("-")
model_name = "".join(components[:-1])
version = components[-1]
dl = download_model(dl_tpl.format... | [
"\n Download compatible model from default download path using pip. Model\n can be shortcut, model name or, if --direct flag is set, full model name\n with version. For direct downloads, the compatibility check will be skipped.\n "
] |
Please provide a description of the function:def convert(
input_file,
output_dir="-",
file_type="json",
n_sents=1,
morphology=False,
converter="auto",
lang=None,
):
msg = Printer()
input_path = Path(input_file)
if file_type not in FILE_TYPES:
msg.fail(
"U... | [
"\n Convert files into JSON format for use with train command and other\n experiment management functions. If no output_dir is specified, the data\n is written to stdout, so you can pipe them forward to a JSON file:\n $ spacy convert some_file.conllu > some_file.json\n "
] |
Please provide a description of the function:def load_model(modelname, add_sentencizer=False):
loading_start = time.time()
nlp = spacy.load(modelname)
if add_sentencizer:
nlp.add_pipe(nlp.create_pipe('sentencizer'))
loading_end = time.time()
loading_time = loading_end - loading_start
... | [
" Load a specific spaCy model "
] |
Please provide a description of the function:def load_default_model_sentencizer(lang):
loading_start = time.time()
lang_class = get_lang_class(lang)
nlp = lang_class()
nlp.add_pipe(nlp.create_pipe('sentencizer'))
loading_end = time.time()
loading_time = loading_end - loading_start
retur... | [
" Load a generic spaCy model and add the sentencizer for sentence tokenization"
] |
Please provide a description of the function:def get_freq_tuples(my_list, print_total_threshold):
d = {}
for token in my_list:
d.setdefault(token, 0)
d[token] += 1
return sorted(d.items(), key=operator.itemgetter(1), reverse=True)[:print_total_threshold] | [
" Turn a list of errors into frequency-sorted tuples thresholded by a certain total number "
] |
Please provide a description of the function:def _contains_blinded_text(stats_xml):
tree = ET.parse(stats_xml)
root = tree.getroot()
total_tokens = int(root.find('size/total/tokens').text)
unique_lemmas = int(root.find('lemmas').get('unique'))
# assume the corpus is largely blinded when there ... | [
" Heuristic to determine whether the treebank has blinded texts or not "
] |
Please provide a description of the function:def fetch_all_treebanks(ud_dir, languages, corpus, best_per_language):
all_treebanks = dict()
treebank_size = dict()
for l in languages:
all_treebanks[l] = []
treebank_size[l] = 0
for treebank_dir in ud_dir.iterdir():
if treebank... | [
"\" Fetch the txt files for all treebanks for a given set of languages "
] |
Please provide a description of the function:def run_single_eval(nlp, loading_time, print_name, text_path, gold_ud, tmp_output_path, out_file, print_header,
check_parse, print_freq_tasks):
with text_path.open(mode='r', encoding='utf-8') as f:
flat_text = f.read()
# STEP 1: toke... | [
"\" Run an evaluation of a model nlp on a certain specified treebank "
] |
Please provide a description of the function:def run_all_evals(models, treebanks, out_file, check_parse, print_freq_tasks):
print_header = True
for tb_lang, treebank_list in treebanks.items():
print()
print("Language", tb_lang)
for text_path in treebank_list:
print(" Ev... | [
"\" Run an evaluation for each language with its specified models and treebanks "
] |
Please provide a description of the function:def main(out_path, ud_dir, check_parse=False, langs=ALL_LANGUAGES, exclude_trained_models=False, exclude_multi=False,
hide_freq=False, corpus='train', best_per_language=False):
languages = [lang.strip() for lang in langs.split(",")]
print_freq_tasks = ... | [
"\"\n Assemble all treebanks and models to run evaluations with.\n When setting check_parse to True, the default models will not be evaluated as they don't have parsing functionality\n "
] |
Please provide a description of the function:def noun_chunks(obj):
# this iterator extracts spans headed by NOUNs starting from the left-most
# syntactic dependent until the NOUN itself for close apposition and
# measurement construction, the span is sometimes extended to the right of
# the NOUN. E... | [
"\n Detect base noun phrases from a dependency parse. Works on both Doc and Span.\n "
] |
Please provide a description of the function:def with_cpu(ops, model):
model.to_cpu()
def with_cpu_forward(inputs, drop=0.0):
cpu_outputs, backprop = model.begin_update(_to_cpu(inputs), drop=drop)
gpu_outputs = _to_device(ops, cpu_outputs)
def with_cpu_backprop(d_outputs, sgd=None... | [
"Wrap a model that should run on CPU, transferring inputs and outputs\n as necessary."
] |
Please provide a description of the function:def build_simple_cnn_text_classifier(tok2vec, nr_class, exclusive_classes=False, **cfg):
with Model.define_operators({">>": chain}):
if exclusive_classes:
output_layer = Softmax(nr_class, tok2vec.nO)
else:
output_layer = (
... | [
"\n Build a simple CNN text classifier, given a token-to-vector model as inputs.\n If exclusive_classes=True, a softmax non-linearity is applied, so that the\n outputs sum to 1. If exclusive_classes=False, a logistic non-linearity\n is applied instead, so that outputs are in the range [0, 1].\n "
] |
Please provide a description of the function:def concatenate_lists(*layers, **kwargs): # pragma: no cover
if not layers:
return noop()
drop_factor = kwargs.get("drop_factor", 1.0)
ops = layers[0].ops
layers = [chain(layer, flatten) for layer in layers]
concat = concatenate(*layers)
... | [
"Compose two or more models `f`, `g`, etc, such that their outputs are\n concatenated, i.e. `concatenate(f, g)(x)` computes `hstack(f(x), g(x))`\n "
] |
Please provide a description of the function:def masked_language_model(vocab, model, mask_prob=0.15):
random_words = _RandomWords(vocab)
def mlm_forward(docs, drop=0.0):
mask, docs = _apply_mask(docs, random_words, mask_prob=mask_prob)
mask = model.ops.asarray(mask).reshape((mask.shape[0]... | [
"Convert a model into a BERT-style masked language model"
] |
Please provide a description of the function:def begin_training(self, _=tuple(), pipeline=None, sgd=None, **kwargs):
if self.model is True:
self.model = self.Model(pipeline[0].model.nO)
link_vectors_to_models(self.vocab)
if sgd is None:
sgd = self.create_opti... | [
"Allocate model, using width from tensorizer in pipeline.\n\n gold_tuples (iterable): Gold-standard training data.\n pipeline (list): The pipeline the model is part of.\n "
] |
Please provide a description of the function:def render(self, parsed, page=False, minify=False):
# Create a random ID prefix to make sure parses don't receive the
# same ID, even if they're identical
id_prefix = uuid.uuid4().hex
rendered = []
for i, p in enumerate(parsed... | [
"Render complete markup.\n\n parsed (list): Dependency parses to render.\n page (bool): Render parses wrapped as full HTML page.\n minify (bool): Minify HTML markup.\n RETURNS (unicode): Rendered SVG or HTML markup.\n "
] |
Please provide a description of the function:def render_svg(self, render_id, words, arcs):
self.levels = self.get_levels(arcs)
self.highest_level = len(self.levels)
self.offset_y = self.distance / 2 * self.highest_level + self.arrow_stroke
self.width = self.offset_x + len(words)... | [
"Render SVG.\n\n render_id (int): Unique ID, typically index of document.\n words (list): Individual words and their tags.\n arcs (list): Individual arcs and their start, end, direction and label.\n RETURNS (unicode): Rendered SVG markup.\n "
] |
Please provide a description of the function:def render_word(self, text, tag, i):
y = self.offset_y + self.word_spacing
x = self.offset_x + i * self.distance
if self.direction == "rtl":
x = self.width - x
html_text = escape_html(text)
return TPL_DEP_WORDS.for... | [
"Render individual word.\n\n text (unicode): Word text.\n tag (unicode): Part-of-speech tag.\n i (int): Unique ID, typically word index.\n RETURNS (unicode): Rendered SVG markup.\n "
] |
Please provide a description of the function:def render_arrow(self, label, start, end, direction, i):
level = self.levels.index(end - start) + 1
x_start = self.offset_x + start * self.distance + self.arrow_spacing
if self.direction == "rtl":
x_start = self.width - x_start
... | [
"Render individual arrow.\n\n label (unicode): Dependency label.\n start (int): Index of start word.\n end (int): Index of end word.\n direction (unicode): Arrow direction, 'left' or 'right'.\n i (int): Unique ID, typically arrow index.\n RETURNS (unicode): Rendered SVG mar... |
Please provide a description of the function:def get_arc(self, x_start, y, y_curve, x_end):
template = "M{x},{y} C{x},{c} {e},{c} {e},{y}"
if self.compact:
template = "M{x},{y} {x},{c} {e},{c} {e},{y}"
return template.format(x=x_start, y=y, c=y_curve, e=x_end) | [
"Render individual arc.\n\n x_start (int): X-coordinate of arrow start point.\n y (int): Y-coordinate of arrow start and end point.\n y_curve (int): Y-corrdinate of Cubic Bézier y_curve point.\n x_end (int): X-coordinate of arrow end point.\n RETURNS (unicode): Definition of the a... |
Please provide a description of the function:def get_arrowhead(self, direction, x, y, end):
if direction == "left":
pos1, pos2, pos3 = (x, x - self.arrow_width + 2, x + self.arrow_width - 2)
else:
pos1, pos2, pos3 = (
end,
end + self.arrow... | [
"Render individual arrow head.\n\n direction (unicode): Arrow direction, 'left' or 'right'.\n x (int): X-coordinate of arrow start point.\n y (int): Y-coordinate of arrow start and end point.\n end (int): X-coordinate of arrow end point.\n RETURNS (unicode): Definition of the arro... |
Please provide a description of the function:def get_levels(self, arcs):
levels = set(map(lambda arc: arc["end"] - arc["start"], arcs))
return sorted(list(levels)) | [
"Calculate available arc height \"levels\".\n Used to calculate arrow heights dynamically and without wasting space.\n\n args (list): Individual arcs and their start, end, direction and label.\n RETURNS (list): Arc levels sorted from lowest to highest.\n "
] |
Please provide a description of the function:def render(self, parsed, page=False, minify=False):
rendered = []
for i, p in enumerate(parsed):
if i == 0:
settings = p.get("settings", {})
self.direction = settings.get("direction", DEFAULT_DIR)
... | [
"Render complete markup.\n\n parsed (list): Dependency parses to render.\n page (bool): Render parses wrapped as full HTML page.\n minify (bool): Minify HTML markup.\n RETURNS (unicode): Rendered HTML markup.\n "
] |
Please provide a description of the function:def render_ents(self, text, spans, title):
markup = ""
offset = 0
for span in spans:
label = span["label"]
start = span["start"]
end = span["end"]
entity = escape_html(text[start:end])
... | [
"Render entities in text.\n\n text (unicode): Original text.\n spans (list): Individual entity spans and their start, end and label.\n title (unicode or None): Document title set in Doc.user_data['title'].\n "
] |
Please provide a description of the function:def merge_noun_chunks(doc):
if not doc.is_parsed:
return doc
with doc.retokenize() as retokenizer:
for np in doc.noun_chunks:
attrs = {"tag": np.root.tag, "dep": np.root.dep}
retokenizer.merge(np, attrs=attrs)
return d... | [
"Merge noun chunks into a single token.\n\n doc (Doc): The Doc object.\n RETURNS (Doc): The Doc object with merged noun chunks.\n\n DOCS: https://spacy.io/api/pipeline-functions#merge_noun_chunks\n "
] |
Please provide a description of the function:def merge_entities(doc):
with doc.retokenize() as retokenizer:
for ent in doc.ents:
attrs = {"tag": ent.root.tag, "dep": ent.root.dep, "ent_type": ent.label}
retokenizer.merge(ent, attrs=attrs)
return doc | [
"Merge entities into a single token.\n\n doc (Doc): The Doc object.\n RETURNS (Doc): The Doc object with merged entities.\n\n DOCS: https://spacy.io/api/pipeline-functions#merge_entities\n "
] |
Please provide a description of the function:def merge_subtokens(doc, label="subtok"):
merger = Matcher(doc.vocab)
merger.add("SUBTOK", None, [{"DEP": label, "op": "+"}])
matches = merger(doc)
spans = [doc[start : end + 1] for _, start, end in matches]
with doc.retokenize() as retokenizer:
... | [
"Merge subtokens into a single token.\n\n doc (Doc): The Doc object.\n label (unicode): The subtoken dependency label.\n RETURNS (Doc): The Doc object with merged subtokens.\n\n DOCS: https://spacy.io/api/pipeline-functions#merge_subtokens\n "
] |
Please provide a description of the function:def train(
lang,
output_path,
train_path,
dev_path,
raw_text=None,
base_model=None,
pipeline="tagger,parser,ner",
vectors=None,
n_iter=30,
n_early_stopping=None,
n_examples=0,
use_gpu=-1,
version="0.0.0",
meta_path=None... | [
"\n Train or update a spaCy model. Requires data to be formatted in spaCy's\n JSON format. To convert data from other formats, use the `spacy convert`\n command.\n "
] |
Please provide a description of the function:def _score_for_model(meta):
mean_acc = list()
pipes = meta["pipeline"]
acc = meta["accuracy"]
if "tagger" in pipes:
mean_acc.append(acc["tags_acc"])
if "parser" in pipes:
mean_acc.append((acc["uas"] + acc["las"]) / 2)
if "ner" in ... | [
" Returns mean score between tasks in pipeline that can be used for early stopping. "
] |
Please provide a description of the function:def _load_pretrained_tok2vec(nlp, loc):
with loc.open("rb") as file_:
weights_data = file_.read()
loaded = []
for name, component in nlp.pipeline:
if hasattr(component, "model") and hasattr(component.model, "tok2vec"):
component.t... | [
"Load pre-trained weights for the 'token-to-vector' part of the component\n models, which is typically a CNN. See 'spacy pretrain'. Experimental.\n "
] |
Please provide a description of the function:def conllu2json(input_data, n_sents=10, use_morphology=False, lang=None):
# by @dvsrepo, via #11 explosion/spacy-dev-resources
# by @katarkor
docs = []
sentences = []
conll_tuples = read_conllx(input_data, use_morphology=use_morphology)
checked_f... | [
"\n Convert conllu files into JSON format for use with train cli.\n use_morphology parameter enables appending morphology to tags, which is\n useful for languages such as Spanish, where UD tags are not so rich.\n\n Extract NER tags if available and convert them so that they follow\n BILUO and the Wik... |
Please provide a description of the function:def is_ner(tag):
tag_match = re.match("([A-Z_]+)-([A-Z_]+)", tag)
if tag_match:
return True
elif tag == "O":
return True
else:
return False | [
"\n Check the 10th column of the first token to determine if the file contains\n NER tags\n "
] |
Please provide a description of the function:def simplify_tags(iob):
new_iob = []
for tag in iob:
tag_match = re.match("([A-Z_]+)-([A-Z_]+)", tag)
if tag_match:
prefix = tag_match.group(1)
suffix = tag_match.group(2)
if suffix == "GPE_LOC":
... | [
"\n Simplify tags obtained from the dataset in order to follow Wikipedia\n scheme (PER, LOC, ORG, MISC). 'PER', 'LOC' and 'ORG' keep their tags, while\n 'GPE_LOC' is simplified to 'LOC', 'GPE_ORG' to 'ORG' and all remaining tags to\n 'MISC'.\n "
] |
Please provide a description of the function:def info(model=None, markdown=False, silent=False):
msg = Printer()
if model:
if util.is_package(model):
model_path = util.get_package_path(model)
else:
model_path = util.get_data_path() / model
meta_path = model_p... | [
"\n Print info about spaCy installation. If a model shortcut link is\n speficied as an argument, print model information. Flag --markdown\n prints details in Markdown for easy copy-pasting to GitHub issues.\n "
] |
Please provide a description of the function:def print_markdown(data, title=None):
markdown = []
for key, value in data.items():
if isinstance(value, basestring_) and Path(value).exists():
continue
markdown.append("* **{}:** {}".format(key, unicode_(value)))
if title:
... | [
"Print data in GitHub-flavoured Markdown format for issues etc.\n\n data (dict or list of tuples): Label/value pairs.\n title (unicode or None): Title, will be rendered as headline 2.\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.