text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_logging( log_dir=tempfile.gettempdir(), format="[%(asctime)s][%(levelname)s] %(name)s:%(lineno)s - %(message)s", level=logging.INFO, ):
"""Configures logging to output to the provided log_dir. Will use a nested directory whose name is the current timestamp. :param log_dir: The directory to store logs in. :type log_dir: str :param format: The logging format string to use. :type format: str :param level: The logging level to use, e.g., logging.INFO. """ |
if not Meta.log_path:
# Generate a new directory using the log_dir, if it doesn't exist
dt = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
log_path = os.path.join(log_dir, dt)
if not os.path.exists(log_path):
os.makedirs(log_path)
# Configure the logger using the provided path
logging.basicConfig(
format=format,
level=level,
handlers=[
logging.FileHandler(os.path.join(log_path, "fonduer.log")),
logging.StreamHandler(),
],
)
# Notify user of log location
logger.info(f"Setting logging directory to: {log_path}")
Meta.log_path = log_path
else:
logger.info(
f"Logging was already initialized to use {Meta.log_path}. "
"To configure logging manually, call fonduer.init_logging before "
"initialiting Meta."
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _update_meta(conn_string):
"""Update Meta class.""" |
url = urlparse(conn_string)
Meta.conn_string = conn_string
Meta.DBNAME = url.path[1:]
Meta.DBUSER = url.username
Meta.DBPWD = url.password
Meta.DBHOST = url.hostname
Meta.DBPORT = url.port
Meta.postgres = url.scheme.startswith("postgresql") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init(cls, conn_string=None):
"""Return the unique Meta class.""" |
if conn_string:
_update_meta(conn_string)
# We initialize the engine within the models module because models'
# schema can depend on which data types are supported by the engine
Meta.Session = new_sessionmaker()
Meta.engine = Meta.Session.kw["bind"]
logger.info(
f"Connecting user:{Meta.DBUSER} "
f"to {Meta.DBHOST}:{Meta.DBPORT}/{Meta.DBNAME}"
)
Meta._init_db()
if not Meta.log_path:
init_logging()
return cls |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _init_db(cls):
""" Initialize the storage schema. This call must be performed after all classes that extend Base are declared to ensure the storage schema is initialized. """ |
# This list of import defines which SQLAlchemy classes will be
# initialized when Meta.init() is called. If a sqlalchemy class is not
# imported before the call to create_all(), it will not be created.
import fonduer.candidates.models # noqa
import fonduer.features.models # noqa
import fonduer.learning.models # noqa
import fonduer.parser.models # noqa
import fonduer.supervision.models # noqa
import fonduer.utils.models # noqa
logger.info("Initializing the storage schema")
Meta.Base.metadata.create_all(Meta.engine) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def model_installed(name):
"""Check if spaCy language model is installed. From https://github.com/explosion/spaCy/blob/master/spacy/util.py :param name: :return: """ |
data_path = util.get_data_path()
if not data_path or not data_path.exists():
raise IOError(f"Can't find spaCy data path: {data_path}")
if name in {d.name for d in data_path.iterdir()}:
return True
if Spacy.is_package(name): # installed as package
return True
if Path(name).exists(): # path to model data directory
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_lang_model(self):
""" Load spaCy language model or download if model is available and not installed. Currenty supported spaCy languages en English (50MB) de German (645MB) fr French (1.33GB) es Spanish (377MB) :return: """ |
if self.lang in self.languages:
if not Spacy.model_installed(self.lang):
download(self.lang)
model = spacy.load(self.lang)
elif self.lang in self.alpha_languages:
language_module = importlib.import_module(f"spacy.lang.{self.lang}")
language_method = getattr(language_module, self.alpha_languages[self.lang])
model = language_method()
self.model = model |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def enrich_sentences_with_NLP(self, all_sentences):
""" Enrich a list of fonduer Sentence objects with NLP features. We merge and process the text of all Sentences for higher efficiency. :param all_sentences: List of fonduer Sentence objects for one document :return: """ |
if not self.has_NLP_support():
raise NotImplementedError(
f"Language {self.lang} not available in spacy beyond tokenization"
)
if len(all_sentences) == 0:
return # Nothing to parse
if self.model.has_pipe("sentencizer"):
self.model.remove_pipe("sentencizer")
self.logger.debug(
f"Removed sentencizer ('sentencizer') from model. "
f"Now in pipeline: {self.model.pipe_names}"
)
if self.model.has_pipe("sentence_boundary_detector"):
self.model.remove_pipe(name="sentence_boundary_detector")
self.model.add_pipe(
set_custom_boundary, before="parser", name="sentence_boundary_detector"
)
sentence_batches = self._split_sentences_by_char_limit(
all_sentences, self.model.max_length
)
# TODO: We could do this in parallel. Test speedup in the future
for sentence_batch in sentence_batches:
custom_tokenizer = TokenPreservingTokenizer(self.model.vocab)
# we circumvent redundant tokenization by using a custom
# tokenizer that directly uses the already separated words
# of each sentence as tokens
doc = custom_tokenizer(sentence_batch)
doc.user_data = sentence_batch
for name, proc in self.model.pipeline: # iterate over components in order
doc = proc(doc)
try:
assert doc.is_parsed
except Exception:
self.logger.exception(f"{doc} was not parsed")
for sent, current_sentence_obj in zip(doc.sents, sentence_batch):
parts = defaultdict(list)
for i, token in enumerate(sent):
parts["lemmas"].append(token.lemma_)
parts["pos_tags"].append(token.tag_)
parts["ner_tags"].append(
token.ent_type_ if token.ent_type_ else "O"
)
head_idx = (
0 if token.head is token else token.head.i - sent[0].i + 1
)
parts["dep_parents"].append(head_idx)
parts["dep_labels"].append(token.dep_)
current_sentence_obj.pos_tags = parts["pos_tags"]
current_sentence_obj.lemmas = parts["lemmas"]
current_sentence_obj.ner_tags = parts["ner_tags"]
current_sentence_obj.dep_parents = parts["dep_parents"]
current_sentence_obj.dep_labels = parts["dep_labels"]
yield current_sentence_obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def split_sentences(self, text):
""" Split input text into sentences that match CoreNLP's default format, but are not yet processed. :param text: The text of the parent paragraph of the sentences :return: """ |
if self.model.has_pipe("sentence_boundary_detector"):
self.model.remove_pipe(name="sentence_boundary_detector")
if not self.model.has_pipe("sentencizer"):
sentencizer = self.model.create_pipe("sentencizer") # add sentencizer
self.model.add_pipe(sentencizer)
try:
doc = self.model(text, disable=["parser", "tagger", "ner"])
except ValueError:
# temporary increase character limit of spacy
# 'Probably save' according to spacy, as no parser or NER is used
previous_max_length = self.model.max_length
self.model.max_length = 100_000_000
self.logger.warning(
f"Temporarily increased spacy maximum "
f"character limit to {self.model.max_length} to split sentences."
)
doc = self.model(text, disable=["parser", "tagger", "ner"])
self.model.max_length = previous_max_length
self.logger.warning(
f"Spacy maximum "
f"character limit set back to {self.model.max_length}."
)
doc.is_parsed = True
position = 0
for sent in doc.sents:
parts = defaultdict(list)
text = sent.text
for i, token in enumerate(sent):
parts["words"].append(str(token))
parts["lemmas"].append(token.lemma_)
parts["pos_tags"].append(token.pos_)
parts["ner_tags"].append("") # placeholder for later NLP parsing
parts["char_offsets"].append(token.idx)
parts["abs_char_offsets"].append(token.idx)
parts["dep_parents"].append(0) # placeholder for later NLP parsing
parts["dep_labels"].append("") # placeholder for later NLP parsing
# make char_offsets relative to start of sentence
parts["char_offsets"] = [
p - parts["char_offsets"][0] for p in parts["char_offsets"]
]
parts["position"] = position
parts["text"] = text
position += 1
yield parts |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _setup_model_loss(self, lr):
""" Setup loss and optimizer for PyTorch model. """ |
# Setup loss
if not hasattr(self, "loss"):
self.loss = SoftCrossEntropyLoss()
# Setup optimizer
if not hasattr(self, "optimizer"):
self.optimizer = optim.Adam(self.parameters(), lr=lr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_marginals(self, session, X, training=False):
"""Save the predicted marginal probabilities for the Candidates X. :param session: The database session to use. :param X: Input data. :param training: If True, these are training marginals / labels; else they are saved as end model predictions. :type training: bool """ |
save_marginals(session, X, self.marginals(X), training=training) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def predict(self, X, b=0.5, pos_label=1, return_probs=False):
"""Return numpy array of class predictions for X based on predicted marginal probabilities. :param X: Input data. :param b: Decision boundary *for binary setting only*. :type b: float :param pos_label: Positive class index *for binary setting only*. Default: 1 :type pos_label: int :param return_probs: If True, return predict probability. Default: False :type return_probs: bool """ |
if self._check_input(X):
X = self._preprocess_data(X)
Y_prob = self.marginals(X)
if self.cardinality > 2:
Y_pred = Y_prob.argmax(axis=1) + 1
if return_probs:
return Y_pred, Y_prob
else:
return Y_pred
if pos_label not in [1, 2]:
raise ValueError("pos_label must have values in {1,2}.")
self.logger.info(f"Using positive label class {pos_label} with threshold {b}")
Y_pred = np.array(
[pos_label if p[pos_label - 1] > b else 3 - pos_label for p in Y_prob]
)
if return_probs:
return Y_pred, Y_prob
else:
return Y_pred |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, model_file, save_dir, verbose=True):
"""Save current model. :param model_file: Saved model file name. :type model_file: str :param save_dir: Saved model directory. :type save_dir: str :param verbose: Print log or not :type verbose: bool """ |
# Check existence of model saving directory and create if does not exist.
if not os.path.exists(save_dir):
os.makedirs(save_dir)
params = {
"model": self.state_dict(),
"cardinality": self.cardinality,
"name": self.name,
"config": self.settings,
}
try:
torch.save(params, f"{save_dir}/{model_file}")
except BaseException:
self.logger.warning("Saving failed... continuing anyway.")
if verbose:
self.logger.info(f"[{self.name}] Model saved as {model_file} in {save_dir}") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, model_file, save_dir, verbose=True):
"""Load model from file and rebuild the model. :param model_file: Saved model file name. :type model_file: str :param save_dir: Saved model directory. :type save_dir: str :param verbose: Print log or not :type verbose: bool """ |
if not os.path.exists(save_dir):
self.logger.error("Loading failed... Directory does not exist.")
try:
checkpoint = torch.load(f"{save_dir}/{model_file}")
except BaseException:
self.logger.error(
f"Loading failed... Cannot load model from {save_dir}/{model_file}"
)
self.load_state_dict(checkpoint["model"])
self.settings = checkpoint["config"]
self.cardinality = checkpoint["cardinality"]
self.name = checkpoint["name"]
if verbose:
self.logger.info(
f"[{self.name}] Model loaded as {model_file} in {save_dir}"
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_parent_tag(mention):
"""Return the HTML tag of the Mention's parent. These may be tags such as 'p', 'h2', 'table', 'div', etc. If a candidate is passed in, only the tag of its first Mention is returned. :param mention: The Mention to evaluate :rtype: string """ |
span = _to_span(mention)
i = _get_node(span.sentence)
return str(i.getparent().tag) if i.getparent() is not None else None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_prev_sibling_tags(mention):
"""Return the HTML tag of the Mention's previous siblings. Previous siblings are Mentions which are at the same level in the HTML tree as the given mention, but are declared before the given mention. If a candidate is passed in, only the previous siblings of its first Mention are considered in the calculation. :param mention: The Mention to evaluate :rtype: list of strings """ |
span = _to_span(mention)
prev_sibling_tags = []
i = _get_node(span.sentence)
while i.getprevious() is not None:
prev_sibling_tags.insert(0, str(i.getprevious().tag))
i = i.getprevious()
return prev_sibling_tags |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_next_sibling_tags(mention):
"""Return the HTML tag of the Mention's next siblings. Next siblings are Mentions which are at the same level in the HTML tree as the given mention, but are declared after the given mention. If a candidate is passed in, only the next siblings of its last Mention are considered in the calculation. :param mention: The Mention to evaluate :rtype: list of strings """ |
span = _to_span(mention)
next_sibling_tags = []
i = _get_node(span.sentence)
while i.getnext() is not None:
next_sibling_tags.append(str(i.getnext().tag))
i = i.getnext()
return next_sibling_tags |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_ancestor_class_names(mention):
"""Return the HTML classes of the Mention's ancestors. If a candidate is passed in, only the ancestors of its first Mention are returned. :param mention: The Mention to evaluate :rtype: list of strings """ |
span = _to_span(mention)
class_names = []
i = _get_node(span.sentence)
while i is not None:
class_names.insert(0, str(i.get("class")))
i = i.getparent()
return class_names |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_ancestor_tag_names(mention):
"""Return the HTML tag of the Mention's ancestors. For example, ['html', 'body', 'p']. If a candidate is passed in, only the ancestors of its first Mention are returned. :param mention: The Mention to evaluate :rtype: list of strings """ |
span = _to_span(mention)
tag_names = []
i = _get_node(span.sentence)
while i is not None:
tag_names.insert(0, str(i.tag))
i = i.getparent()
return tag_names |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_ancestor_id_names(mention):
"""Return the HTML id's of the Mention's ancestors. If a candidate is passed in, only the ancestors of its first Mention are returned. :param mention: The Mention to evaluate :rtype: list of strings """ |
span = _to_span(mention)
id_names = []
i = _get_node(span.sentence)
while i is not None:
id_names.insert(0, str(i.get("id")))
i = i.getparent()
return id_names |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def common_ancestor(c):
"""Return the path to the root that is shared between a binary-Mention Candidate. In particular, this is the common path of HTML tags. :param c: The binary-Mention Candidate to evaluate :rtype: list of strings """ |
span1 = _to_span(c[0])
span2 = _to_span(c[1])
ancestor1 = np.array(span1.sentence.xpath.split("/"))
ancestor2 = np.array(span2.sentence.xpath.split("/"))
min_len = min(ancestor1.size, ancestor2.size)
return list(ancestor1[: np.argmin(ancestor1[:min_len] == ancestor2[:min_len])]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_hidden(self, batch_size):
"""Initiate the initial state. :param batch_size: batch size. :type batch_size: int :return: Initial state of LSTM :rtype: pair of torch.Tensors of shape (num_layers * num_directions, batch_size, hidden_size) """ |
b = 2 if self.bidirectional else 1
if self.use_cuda:
return (
torch.zeros(self.num_layers * b, batch_size, self.lstm_hidden).cuda(),
torch.zeros(self.num_layers * b, batch_size, self.lstm_hidden).cuda(),
)
else:
return (
torch.zeros(self.num_layers * b, batch_size, self.lstm_hidden),
torch.zeros(self.num_layers * b, batch_size, self.lstm_hidden),
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_scalar(self, name, value, step):
"""Log a scalar variable.""" |
self.writer.add_scalar(name, value, step) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mention_to_tokens(mention, token_type="words", lowercase=False):
""" Extract tokens from the mention :param mention: mention object. :param token_type: token type that wants to extract. :type token_type: str :param lowercase: use lowercase or not. :type lowercase: bool :return: The token list. :rtype: list """ |
tokens = mention.context.sentence.__dict__[token_type]
return [w.lower() if lowercase else w for w in tokens] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mark_sentence(s, args):
"""Insert markers around relation arguments in word sequence :param s: list of tokens in sentence. :type s: list to relation arguments :type args: list :return: The marked sentence. :rtype: list Example: Then Barack married Michelle. -> Then ~~[[1 Barack 1]]~~ married ~~[[2 Michelle 2]]~~. """ |
marks = sorted([y for m in args for y in mark(*m)], reverse=True)
x = list(s)
for k, v in marks:
x.insert(k, v)
return x |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pad_batch(batch, max_len=0, type="int"):
"""Pad the batch into matrix :param batch: The data for padding. :type batch: list of word index sequences :param max_len: Max length of sequence of padding. :type max_len: int :param type: mask value type. :type type: str :return: The padded matrix and correspoing mask matrix. :rtype: pair of torch.Tensors with shape (batch_size, max_sent_len) """ |
batch_size = len(batch)
max_sent_len = int(np.max([len(x) for x in batch]))
if max_len > 0 and max_len < max_sent_len:
max_sent_len = max_len
if type == "float":
idx_matrix = np.zeros((batch_size, max_sent_len), dtype=np.float32)
else:
idx_matrix = np.zeros((batch_size, max_sent_len), dtype=np.int)
for idx1, i in enumerate(batch):
for idx2, j in enumerate(i):
if idx2 >= max_sent_len:
break
idx_matrix[idx1, idx2] = j
idx_matrix = torch.tensor(idx_matrix)
mask_matrix = torch.tensor(torch.eq(idx_matrix.data, 0))
return idx_matrix, mask_matrix |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _generate(self):
"""Parses a file or directory of files into a set of ``Document`` objects.""" |
doc_count = 0
for fp in self.all_files:
for doc in self._get_docs_for_path(fp):
yield doc
doc_count += 1
if doc_count >= self.max_docs:
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_horz_aligned(c):
"""Return True if all the components of c are horizontally aligned. Horizontal alignment means that the bounding boxes of each Mention of c shares a similar y-axis value in the visual rendering of the document. :param c: The candidate to evaluate :rtype: boolean """ |
return all(
[
_to_span(c[i]).sentence.is_visual()
and bbox_horz_aligned(
bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0]))
)
for i in range(len(c))
]
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_vert_aligned(c):
"""Return true if all the components of c are vertically aligned. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. :param c: The candidate to evaluate :rtype: boolean """ |
return all(
[
_to_span(c[i]).sentence.is_visual()
and bbox_vert_aligned(
bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0]))
)
for i in range(len(c))
]
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_vert_aligned_left(c):
"""Return true if all components are vertically aligned on their left border. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. In this function the similarity of the x-axis value is based on the left border of their bounding boxes. :param c: The candidate to evaluate :rtype: boolean """ |
return all(
[
_to_span(c[i]).sentence.is_visual()
and bbox_vert_aligned_left(
bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0]))
)
for i in range(len(c))
]
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_vert_aligned_right(c):
"""Return true if all components vertically aligned on their right border. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. In this function the similarity of the x-axis value is based on the right border of their bounding boxes. :param c: The candidate to evaluate :rtype: boolean """ |
return all(
[
_to_span(c[i]).sentence.is_visual()
and bbox_vert_aligned_right(
bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0]))
)
for i in range(len(c))
]
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_vert_aligned_center(c):
"""Return true if all the components are vertically aligned on their center. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. In this function the similarity of the x-axis value is based on the center of their bounding boxes. :param c: The candidate to evaluate :rtype: boolean """ |
return all(
[
_to_span(c[i]).sentence.is_visual()
and bbox_vert_aligned_center(
bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0]))
)
for i in range(len(c))
]
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def same_page(c):
"""Return true if all the components of c are on the same page of the document. Page numbers are based on the PDF rendering of the document. If a PDF file is provided, it is used. Otherwise, if only a HTML/XML document is provided, a PDF is created and then used to determine the page number of a Mention. :param c: The candidate to evaluate :rtype: boolean """ |
return all(
[
_to_span(c[i]).sentence.is_visual()
and bbox_from_span(_to_span(c[i])).page
== bbox_from_span(_to_span(c[0])).page
for i in range(len(c))
]
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_horz_ngrams( mention, attrib="words", n_min=1, n_max=1, lower=True, from_sentence=True ):
"""Return all ngrams which are visually horizontally aligned with the Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention to evaluate :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :param from_sentence: If True, returns ngrams from any horizontally aligned Sentences, rather than just horizontally aligned ngrams themselves. :rtype: a *generator* of ngrams """ |
spans = _to_spans(mention)
for span in spans:
for ngram in _get_direction_ngrams(
"horz", span, attrib, n_min, n_max, lower, from_sentence
):
yield ngram |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_page_vert_percentile( mention, page_width=DEFAULT_WIDTH, page_height=DEFAULT_HEIGHT ):
"""Return which percentile from the TOP in the page the Mention is located in. Percentile is calculated where the top of the page is 0.0, and the bottom of the page is 1.0. For example, a Mention in at the top 1/4 of the page will have a percentile of 0.25. Page width and height are based on pt values:: Letter 612x792 Tabloid 792x1224 Ledger 1224x792 Legal 612x1008 Statement 396x612 Executive 540x720 A0 2384x3371 A1 1685x2384 A2 1190x1684 A3 842x1190 A4 595x842 A4Small 595x842 A5 420x595 B4 729x1032 B5 516x729 Folio 612x936 Quarto 610x780 10x14 720x1008 and should match the source documents. Letter size is used by default. Note that if a candidate is passed in, only the vertical percentil of its first Mention is returned. :param mention: The Mention to evaluate :param page_width: The width of the page. Default to Letter paper width. :param page_height: The heigh of the page. Default to Letter paper height. :rtype: float in [0.0, 1.0] """ |
span = _to_span(mention)
return bbox_from_span(span).top / page_height |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_page_horz_percentile( mention, page_width=DEFAULT_WIDTH, page_height=DEFAULT_HEIGHT ):
"""Return which percentile from the LEFT in the page the Mention is located in. Percentile is calculated where the left of the page is 0.0, and the right of the page is 1.0. Page width and height are based on pt values:: Letter 612x792 Tabloid 792x1224 Ledger 1224x792 Legal 612x1008 Statement 396x612 Executive 540x720 A0 2384x3371 A1 1685x2384 A2 1190x1684 A3 842x1190 A4 595x842 A4Small 595x842 A5 420x595 B4 729x1032 B5 516x729 Folio 612x936 Quarto 610x780 10x14 720x1008 and should match the source documents. Letter size is used by default. Note that if a candidate is passed in, only the vertical percentile of its first Mention is returned. :param c: The Mention to evaluate :param page_width: The width of the page. Default to Letter paper width. :param page_height: The heigh of the page. Default to Letter paper height. :rtype: float in [0.0, 1.0] """ |
span = _to_span(mention)
return bbox_from_span(span).left / page_width |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_visual_aligned_lemmas(mention):
"""Return a generator of the lemmas aligned visually with the Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention to evaluate. :rtype: a *generator* of lemmas """ |
spans = _to_spans(mention)
for span in spans:
sentence = span.sentence
doc = sentence.document
# cache features for the entire document
_preprocess_visual_features(doc)
for aligned_lemma in sentence._aligned_lemmas:
yield aligned_lemma |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def camel_to_under(name):
""" Converts camel-case string to lowercase string separated by underscores. Written by epost (http://stackoverflow.com/questions/1175208). :param name: String to be converted :return: new String with camel-case converted to lowercase, underscored """ |
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_as_dict(x):
"""Return an object as a dictionary of its attributes.""" |
if isinstance(x, dict):
return x
else:
try:
return x._asdict()
except AttributeError:
return x.__dict__ |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _apply_st(self, doc_loader, **kwargs):
"""Run the UDF single-threaded, optionally with progress bar""" |
udf = self.udf_class(**self.udf_init_kwargs)
# Run single-thread
for doc in doc_loader:
if self.pb is not None:
self.pb.update(1)
udf.session.add_all(y for y in udf.apply(doc, **kwargs))
# Commit session and close progress bar if applicable
udf.session.commit() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _apply_mt(self, doc_loader, parallelism, **kwargs):
"""Run the UDF multi-threaded using python multiprocessing""" |
if not Meta.postgres:
raise ValueError("Fonduer must use PostgreSQL as a database backend.")
def fill_input_queue(in_queue, doc_loader, terminal_signal):
for doc in doc_loader:
in_queue.put(doc)
in_queue.put(terminal_signal)
# Create an input queue to feed documents to UDF workers
manager = Manager()
in_queue = manager.Queue()
# Use an output queue to track multiprocess progress
out_queue = JoinableQueue()
total_count = len(doc_loader)
# Start UDF Processes
for i in range(parallelism):
udf = self.udf_class(
in_queue=in_queue,
out_queue=out_queue,
worker_id=i,
**self.udf_init_kwargs,
)
udf.apply_kwargs = kwargs
self.udfs.append(udf)
# Start the UDF processes, and then join on their completion
for udf in self.udfs:
udf.start()
# Fill input queue with documents
terminal_signal = UDF.QUEUE_CLOSED
in_queue_filler = Process(
target=fill_input_queue, args=(in_queue, doc_loader, terminal_signal)
)
in_queue_filler.start()
count_parsed = 0
while count_parsed < total_count:
y = out_queue.get()
# Update progress bar whenever an item has been processed
if y == UDF.TASK_DONE:
count_parsed += 1
if self.pb is not None:
self.pb.update(1)
else:
raise ValueError("Got non-sentinal output.")
in_queue_filler.join()
in_queue.put(UDF.QUEUE_CLOSED)
for udf in self.udfs:
udf.join()
# Terminate and flush the processes
for udf in self.udfs:
udf.terminate()
self.udfs = [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def candidate(cls):
"""The ``Candidate``.""" |
return relationship(
"Candidate",
backref=backref(
camel_to_under(cls.__name__) + "s",
cascade="all, delete-orphan",
cascade_backrefs=False,
),
cascade_backrefs=False,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def same_document(c):
"""Return True if all Mentions in the given candidate are from the same Document. :param c: The candidate whose Mentions are being compared :rtype: boolean """ |
return all(
_to_span(c[i]).sentence.document is not None
and _to_span(c[i]).sentence.document == _to_span(c[0]).sentence.document
for i in range(len(c))
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def same_table(c):
"""Return True if all Mentions in the given candidate are from the same Table. :param c: The candidate whose Mentions are being compared :rtype: boolean """ |
return all(
_to_span(c[i]).sentence.is_tabular()
and _to_span(c[i]).sentence.table == _to_span(c[0]).sentence.table
for i in range(len(c))
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def same_row(c):
"""Return True if all Mentions in the given candidate are from the same Row. :param c: The candidate whose Mentions are being compared :rtype: boolean """ |
return same_table(c) and all(
is_row_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence)
for i in range(len(c))
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def same_col(c):
"""Return True if all Mentions in the given candidate are from the same Col. :param c: The candidate whose Mentions are being compared :rtype: boolean """ |
return same_table(c) and all(
is_col_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence)
for i in range(len(c))
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_tabular_aligned(c):
"""Return True if all Mentions in the given candidate are from the same Row or Col. :param c: The candidate whose Mentions are being compared :rtype: boolean """ |
return same_table(c) and (
is_col_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence)
or is_row_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence)
for i in range(len(c))
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def same_cell(c):
"""Return True if all Mentions in the given candidate are from the same Cell. :param c: The candidate whose Mentions are being compared :rtype: boolean """ |
return all(
_to_span(c[i]).sentence.cell is not None
and _to_span(c[i]).sentence.cell == _to_span(c[0]).sentence.cell
for i in range(len(c))
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def same_sentence(c):
"""Return True if all Mentions in the given candidate are from the same Sentence. :param c: The candidate whose Mentions are being compared :rtype: boolean """ |
return all(
_to_span(c[i]).sentence is not None
and _to_span(c[i]).sentence == _to_span(c[0]).sentence
for i in range(len(c))
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_max_col_num(mention):
"""Return the largest column number that a Mention occupies. :param mention: The Mention to evaluate. If a candidate is given, default to its last Mention. :rtype: integer or None """ |
span = _to_span(mention, idx=-1)
if span.sentence.is_tabular():
return span.sentence.cell.col_end
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_min_col_num(mention):
"""Return the lowest column number that a Mention occupies. :param mention: The Mention to evaluate. If a candidate is given, default to its first Mention. :rtype: integer or None """ |
span = _to_span(mention)
if span.sentence.is_tabular():
return span.sentence.cell.col_start
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_min_row_num(mention):
"""Return the lowest row number that a Mention occupies. :param mention: The Mention to evaluate. If a candidate is given, default to its first Mention. :rtype: integer or None """ |
span = _to_span(mention)
if span.sentence.is_tabular():
return span.sentence.cell.row_start
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_sentence_ngrams(mention, attrib="words", n_min=1, n_max=1, lower=True):
"""Get the ngrams that are in the Sentence of the given Mention, not including itself. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose Sentence is being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ |
spans = _to_spans(mention)
for span in spans:
for ngram in get_left_ngrams(
span, window=100, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower
):
yield ngram
for ngram in get_right_ngrams(
span, window=100, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower
):
yield ngram |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_neighbor_sentence_ngrams( mention, d=1, attrib="words", n_min=1, n_max=1, lower=True ):
"""Get the ngrams that are in the neighoring Sentences of the given Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose neighbor Sentences are being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ |
spans = _to_spans(mention)
for span in spans:
for ngram in chain.from_iterable(
[
tokens_to_ngrams(
getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower
)
for sentence in span.sentence.document.sentences
if abs(sentence.position - span.sentence.position) <= d
and sentence != span.sentence
]
):
yield ngram |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_cell_ngrams(mention, attrib="words", n_min=1, n_max=1, lower=True):
"""Get the ngrams that are in the Cell of the given mention, not including itself. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose Cell is being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ |
spans = _to_spans(mention)
for span in spans:
for ngram in get_sentence_ngrams(
span, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower
):
yield ngram
if span.sentence.is_tabular():
for ngram in chain.from_iterable(
[
tokens_to_ngrams(
getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower
)
for sentence in _get_table_cells(span.sentence.table)[
span.sentence.cell
]
if sentence != span.sentence
]
):
yield ngram |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_neighbor_cell_ngrams( mention, dist=1, directions=False, attrib="words", n_min=1, n_max=1, lower=True ):
""" Get the ngrams from all Cells that are within a given Cell distance in one direction from the given Mention. Note that if a candidate is passed in, all of its Mentions will be searched. If `directions=True``, each ngram will be returned with a direction in {'UP', 'DOWN', 'LEFT', 'RIGHT'}. :param mention: The Mention whose neighbor Cells are being searched :param dist: The Cell distance within which a neighbor Cell must be to be considered :param directions: A Boolean expressing whether or not to return the direction of each ngram :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams (or (ngram, direction) tuples if directions=True) """ |
# TODO: Fix this to be more efficient (optimize with SQL query)
spans = _to_spans(mention)
for span in spans:
for ngram in get_sentence_ngrams(
span, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower
):
yield ngram
if span.sentence.is_tabular():
root_cell = span.sentence.cell
for sentence in chain.from_iterable(
[
_get_aligned_sentences(root_cell, "row"),
_get_aligned_sentences(root_cell, "col"),
]
):
row_diff = min_row_diff(sentence, root_cell, absolute=False)
col_diff = min_col_diff(sentence, root_cell, absolute=False)
if (
(row_diff or col_diff)
and not (row_diff and col_diff)
and abs(row_diff) + abs(col_diff) <= dist
):
if directions:
direction = ""
if col_diff == 0:
if 0 < row_diff and row_diff <= dist:
direction = "UP"
elif 0 > row_diff and row_diff >= -dist:
direction = "DOWN"
elif row_diff == 0:
if 0 < col_diff and col_diff <= dist:
direction = "RIGHT"
elif 0 > col_diff and col_diff >= -dist:
direction = "LEFT"
for ngram in tokens_to_ngrams(
getattr(sentence, attrib),
n_min=n_min,
n_max=n_max,
lower=lower,
):
yield (ngram, direction)
else:
for ngram in tokens_to_ngrams(
getattr(sentence, attrib),
n_min=n_min,
n_max=n_max,
lower=lower,
):
yield ngram |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_col_ngrams( mention, attrib="words", n_min=1, n_max=1, spread=[0, 0], lower=True ):
"""Get the ngrams from all Cells that are in the same column as the given Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose column Cells are being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param spread: The number of cols left and right to also consider "aligned". :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ |
spans = _to_spans(mention)
for span in spans:
for ngram in _get_axis_ngrams(
span,
axis="col",
attrib=attrib,
n_min=n_min,
n_max=n_max,
spread=spread,
lower=lower,
):
yield ngram |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_aligned_ngrams( mention, attrib="words", n_min=1, n_max=1, spread=[0, 0], lower=True ):
"""Get the ngrams from all Cells in the same row or column as the given Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose row and column Cells are being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param spread: The number of rows/cols above/below/left/right to also consider "aligned". :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ |
spans = _to_spans(mention)
for span in spans:
for ngram in get_row_ngrams(
span, attrib=attrib, n_min=n_min, n_max=n_max, spread=spread, lower=lower
):
yield ngram
for ngram in get_col_ngrams(
span, attrib=attrib, n_min=n_min, n_max=n_max, spread=spread, lower=lower
):
yield ngram |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_head_ngrams(mention, axis=None, attrib="words", n_min=1, n_max=1, lower=True):
"""Get the ngrams from the cell in the head of the row or column. More specifically, this returns the ngrams in the leftmost cell in a row and/or the ngrams in the topmost cell in the column, depending on the axis parameter. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose head Cells are being returned :param axis: Which axis {'row', 'col'} to search. If None, then both row and col are searched. :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ |
spans = _to_spans(mention)
axes = (axis,) if axis else ("row", "col")
for span in spans:
if span.sentence.is_tabular():
for axis in axes:
if getattr(span.sentence, _other_axis(axis) + "_start") == 0:
return
for sentence in getattr(
_get_head_cell(span.sentence.cell, axis), "sentences", []
):
for ngram in tokens_to_ngrams(
getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower
):
yield ngram |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_table_cells(table):
"""Helper function with caching for table cells and the cells' sentences. This function significantly improves the speed of `get_row_ngrams` primarily by reducing the number of queries that are made (which were previously the bottleneck. Rather than taking a single mention, then its sentence, then its table, then all the cells in the table, then all the sentences in each cell, and performing operations on that series of queries, this performs a single query for all the sentences in a table and returns all of the cells and the cells sentences directly. :param table: the Table object to cache. """ |
sent_map = defaultdict(list)
for sent in table.sentences:
if sent.is_tabular():
sent_map[sent.cell].append(sent)
return sent_map |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def forward(self, input, target):
""" Calculate the loss :param input: prediction logits :param target: target probabilities :return: loss """ |
n, k = input.shape
losses = input.new_zeros(n)
for i in range(k):
cls_idx = input.new_full((n,), i, dtype=torch.long)
loss = F.cross_entropy(input, cls_idx, reduction="none")
if self.weight is not None:
loss = loss * self.weight[i]
losses += target[:, i].float() * loss
if self.reduction == "mean":
losses = losses.mean()
elif self.reduction == "sum":
losses = losses.sum()
elif self.reduction != "none":
raise ValueError(f"Unrecognized reduction: {self.reduction}")
return losses |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bbox_horz_aligned(box1, box2):
""" Returns true if the vertical center point of either span is within the vertical range of the other """ |
if not (box1 and box2):
return False
# NEW: any overlap counts
# return box1.top <= box2.bottom and box2.top <= box1.bottom
box1_top = box1.top + 1.5
box2_top = box2.top + 1.5
box1_bottom = box1.bottom - 1.5
box2_bottom = box2.bottom - 1.5
return not (box1_top > box2_bottom or box2_top > box1_bottom) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bbox_vert_aligned(box1, box2):
""" Returns true if the horizontal center point of either span is within the horizontal range of the other """ |
if not (box1 and box2):
return False
# NEW: any overlap counts
# return box1.left <= box2.right and box2.left <= box1.right
box1_left = box1.left + 1.5
box2_left = box2.left + 1.5
box1_right = box1.right - 1.5
box2_right = box2.right - 1.5
return not (box1_left > box2_right or box2_left > box1_right) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bbox_vert_aligned_left(box1, box2):
""" Returns true if the left boundary of both boxes is within 2 pts """ |
if not (box1 and box2):
return False
return abs(box1.left - box2.left) <= 2 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bbox_vert_aligned_right(box1, box2):
""" Returns true if the right boundary of both boxes is within 2 pts """ |
if not (box1 and box2):
return False
return abs(box1.right - box2.right) <= 2 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bbox_vert_aligned_center(box1, box2):
""" Returns true if the center of both boxes is within 5 pts """ |
if not (box1 and box2):
return False
return abs(((box1.right + box1.left) / 2.0) - ((box2.right + box2.left) / 2.0)) <= 5 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _is_subspan(self, m, span):
""" Tests if mention m is subspan of span, where span is defined specific to mention type. """ |
return (
m.sentence.id == span[0]
and m.char_start >= span[1]
and m.char_end <= span[2]
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_span(self, m):
""" Gets a tuple that identifies a span for the specific mention class that m belongs to. """ |
return (m.sentence.id, m.char_start, m.char_end) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _is_subspan(self, m, span):
"""Tests if mention m does exist""" |
return m.figure.document.id == span[0] and m.figure.position == span[1] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_span(self, m):
""" Gets a tuple that identifies a figure for the specific mention class that m belongs to. """ |
return (m.figure.document.id, m.figure.position) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def candidate_subclass( class_name, args, table_name=None, cardinality=None, values=None ):
""" Creates and returns a Candidate subclass with provided argument names, which are Context type. Creates the table in DB if does not exist yet. Import using: .. code-block:: python from fonduer.candidates.models import candidate_subclass :param class_name: The name of the class, should be "camel case" e.g. NewCandidate :param args: A list of names of constituent arguments, which refer to the Contexts--representing mentions--that comprise the candidate :param table_name: The name of the corresponding table in DB; if not provided, is converted from camel case by default, e.g. new_candidate :param cardinality: The cardinality of the variable corresponding to the Candidate. By default is 2 i.e. is a binary value, e.g. is or is not a true mention. """ |
if table_name is None:
table_name = camel_to_under(class_name)
# If cardinality and values are None, default to binary classification
if cardinality is None and values is None:
values = [True, False]
cardinality = 2
# Else use values if present, and validate proper input
elif values is not None:
if cardinality is not None and len(values) != cardinality:
raise ValueError("Number of values must match cardinality.")
if None in values:
raise ValueError("`None` is a protected value.")
# Note that bools are instances of ints in Python...
if any([isinstance(v, int) and not isinstance(v, bool) for v in values]):
raise ValueError(
(
"Default usage of values is consecutive integers."
"Leave values unset if trying to define values as integers."
)
)
cardinality = len(values)
# If cardinality is specified but not values, fill in with ints
elif cardinality is not None:
values = list(range(cardinality))
class_spec = (args, table_name, cardinality, values)
if class_name in candidate_subclasses:
if class_spec == candidate_subclasses[class_name][1]:
return candidate_subclasses[class_name][0]
else:
raise ValueError(
f"Candidate subclass {class_name} "
f"already exists in memory with incompatible "
f"specification: {candidate_subclasses[class_name][1]}"
)
else:
# Set the class attributes == the columns in the database
class_attribs = {
# Declares name for storage table
"__tablename__": table_name,
# Connects candidate_subclass records to generic Candidate records
"id": Column(
Integer,
ForeignKey("candidate.id", ondelete="CASCADE"),
primary_key=True,
),
# Store values & cardinality information in the class only
"values": values,
"cardinality": cardinality,
# Polymorphism information for SQLAlchemy
"__mapper_args__": {"polymorphic_identity": table_name},
# Helper method to get argument names
"__argnames__": [_.__tablename__ for _ in args],
"mentions": args,
}
class_attribs["document_id"] = Column(
Integer, ForeignKey("document.id", ondelete="CASCADE")
)
class_attribs["document"] = relationship(
"Document",
backref=backref(table_name + "s", cascade="all, delete-orphan"),
foreign_keys=class_attribs["document_id"],
)
# Create named arguments, i.e. the entity mentions comprising the
# relation mention.
unique_args = []
for arg in args:
# Primary arguments are constituent Contexts, and their ids
class_attribs[arg.__tablename__ + "_id"] = Column(
Integer, ForeignKey(arg.__tablename__ + ".id", ondelete="CASCADE")
)
class_attribs[arg.__tablename__] = relationship(
arg.__name__,
backref=backref(
table_name + "_" + arg.__tablename__ + "s",
cascade_backrefs=False,
cascade="all, delete-orphan",
),
cascade_backrefs=False,
foreign_keys=class_attribs[arg.__tablename__ + "_id"],
)
unique_args.append(class_attribs[arg.__tablename__ + "_id"])
# Add unique constraints to the arguments
class_attribs["__table_args__"] = (UniqueConstraint(*unique_args),)
# Create class
C = type(class_name, (Candidate,), class_attribs)
# Create table in DB
if not Meta.engine.dialect.has_table(Meta.engine, table_name):
C.__table__.create(bind=Meta.engine)
candidate_subclasses[class_name] = C, class_spec
return C |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply(self, docs, split=0, clear=True, parallelism=None, progress_bar=True):
"""Run the CandidateExtractor. :Example: To extract candidates from a set of training documents using 4 cores:: candidate_extractor.apply(train_docs, split=0, parallelism=4) :param docs: Set of documents to extract from. :param split: Which split to assign the extracted Candidates to. :type split: int :param clear: Whether or not to clear the existing Candidates beforehand. :type clear: bool :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the CandidateExtractor if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool """ |
super(CandidateExtractor, self).apply(
docs,
split=split,
clear=clear,
parallelism=parallelism,
progress_bar=progress_bar,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clear(self, split):
"""Delete Candidates of each class initialized with the CandidateExtractor from given split the database. :param split: Which split to clear. :type split: int """ |
for candidate_class in self.candidate_classes:
logger.info(
f"Clearing table {candidate_class.__tablename__} (split {split})"
)
self.session.query(Candidate).filter(
Candidate.type == candidate_class.__tablename__
).filter(Candidate.split == split).delete(synchronize_session="fetch") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clear_all(self, split):
"""Delete ALL Candidates from given split the database. :param split: Which split to clear. :type split: int """ |
logger.info("Clearing ALL Candidates.")
self.session.query(Candidate).filter(Candidate.split == split).delete(
synchronize_session="fetch"
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_candidates(self, docs=None, split=0, sort=False):
"""Return a list of lists of the candidates associated with this extractor. Each list of the return will contain the candidates for one of the candidate classes associated with the CandidateExtractor. :param docs: If provided, return candidates from these documents from all splits. :type docs: list, tuple of ``Documents``. :param split: If docs is None, then return all the candidates from this split. :type split: int :param sort: If sort is True, then return all candidates sorted by stable_id. :type sort: bool :return: Candidates for each candidate_class. :rtype: List of lists of ``Candidates``. """ |
result = []
if docs:
docs = docs if isinstance(docs, (list, tuple)) else [docs]
# Get cands from all splits
for candidate_class in self.candidate_classes:
cands = (
self.session.query(candidate_class)
.filter(candidate_class.document_id.in_([doc.id for doc in docs]))
.order_by(candidate_class.id)
.all()
)
if sort:
cands = sorted(
cands,
key=lambda x: " ".join(
[x[i][0].get_stable_id() for i in range(len(x))]
),
)
result.append(cands)
else:
for candidate_class in self.candidate_classes:
# Filter by candidate_ids in a particular split
sub_query = (
self.session.query(Candidate.id)
.filter(Candidate.split == split)
.subquery()
)
cands = (
self.session.query(candidate_class)
.filter(candidate_class.id.in_(sub_query))
.order_by(candidate_class.id)
.all()
)
if sort:
cands = sorted(
cands,
key=lambda x: " ".join(
[x[i][0].get_stable_id() for i in range(len(x))]
),
)
result.append(cands)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reset_parameters(self):
"""Reinitiate the weight parameters. """ |
stdv = 1.0 / math.sqrt(self.num_features)
self.weight.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
if self.padding_idx is not None:
self.weight.weight.data[self.padding_idx].fill_(0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_marginals(session, X, marginals, training=True):
"""Save marginal probabilities for a set of Candidates to db. :param X: A list of arbitrary objects with candidate ids accessible via a .id attrib :param marginals: A dense M x K matrix of marginal probabilities, where K is the cardinality of the candidates, OR a M-dim list/array if K=2. :param training: If True, these are training marginals / labels; else they are saved as end model predictions. """ |
logger = logging.getLogger(__name__)
# Make sure that we are working with a numpy array
try:
shape = marginals.shape
except Exception:
marginals = np.array(marginals)
shape = marginals.shape
# Handle binary input as M x 1-dim array; assume elements represent
# poksitive (k=1) class values
if len(shape) == 1:
marginals = np.vstack([1 - marginals, marginals]).T
# Only add values for classes k=1,...,K
marginal_tuples = []
for i in range(shape[0]):
for k in range(1, shape[1] if len(shape) > 1 else 2):
if marginals[i, k] > 0:
marginal_tuples.append((i, k, marginals[i, k]))
# NOTE: This will delete all existing marginals of type `training`
session.query(Marginal).filter(Marginal.training == training).delete(
synchronize_session="fetch"
)
# Prepare bulk INSERT query
q = Marginal.__table__.insert()
# Prepare values
insert_vals = []
for i, k, p in marginal_tuples:
cid = X[i].id
insert_vals.append(
{
"candidate_id": cid,
"training": training,
"value": k,
# We cast p in case its a numpy type, which psycopg2 does not handle
"probability": float(p),
}
)
# Execute update
session.execute(q, insert_vals)
session.commit()
logger.info(f"Saved {len(marginals)} marginals") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compile_entity_feature_generator():
""" Given optional arguments, returns a generator function which accepts an xml root and a list of indexes for a mention, and will generate relation features for this entity. """ |
BASIC_ATTRIBS_REL = ["lemma", "dep_label"]
m = Mention(0)
# Basic relation feature templates
temps = [
[Indicator(m, a) for a in BASIC_ATTRIBS_REL],
Indicator(m, "dep_label,lemma"),
# The *first element on the* path to the root: ngram lemmas along it
Ngrams(Parents(m, 3), "lemma", (1, 3)),
Ngrams(Children(m), "lemma", (1, 3)),
# The siblings of the mention
[LeftNgrams(LeftSiblings(m), a) for a in BASIC_ATTRIBS_REL],
[RightNgrams(RightSiblings(m), a) for a in BASIC_ATTRIBS_REL],
]
# return generator function
return Compile(temps).apply_mention |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_ddlib_feats(span, context, idxs):
""" Minimalist port of generic mention features from ddlib """ |
if span.stable_id not in unary_ddlib_feats:
unary_ddlib_feats[span.stable_id] = set()
for seq_feat in _get_seq_features(context, idxs):
unary_ddlib_feats[span.stable_id].add(seq_feat)
for window_feat in _get_window_features(context, idxs):
unary_ddlib_feats[span.stable_id].add(window_feat)
for f in unary_ddlib_feats[span.stable_id]:
yield f |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_cand_values(candidate, key_table):
"""Get the corresponding values for the key_table.""" |
# NOTE: Import just before checking to avoid circular imports.
from fonduer.features.models import FeatureKey
from fonduer.supervision.models import GoldLabelKey, LabelKey
if key_table == FeatureKey:
return candidate.features
elif key_table == LabelKey:
return candidate.labels
elif key_table == GoldLabelKey:
return candidate.gold_labels
else:
raise ValueError(f"{key_table} is not a valid key table.") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _batch_postgres_query(table, records):
"""Break the list into chunks that can be processed as a single statement. Postgres query cannot be too long or it will fail. See: https://dba.stackexchange.com/questions/131399/is-there-a-maximum- length-constraint-for-a-postgres-query :param records: The full list of records to batch. :type records: iterable :param table: The sqlalchemy table. :return: A generator of lists of records. """ |
if not records:
return
POSTGRESQL_MAX = 0x3FFFFFFF
# Create preamble and measure its length
preamble = (
"INSERT INTO "
+ table.__tablename__
+ " ("
+ ", ".join(records[0].keys())
+ ") VALUES ("
+ ", ".join(["?"] * len(records[0].keys()))
+ ")\n"
)
start = 0
end = 0
total_len = len(preamble)
while end < len(records):
record_len = sum([len(str(v)) for v in records[end].values()])
# Pre-increment to include the end element in the slice
end += 1
if total_len + record_len >= POSTGRESQL_MAX:
logger.debug(f"Splitting query due to length ({total_len} chars).")
yield records[start:end]
start = end
# Reset the total query length
total_len = len(preamble)
else:
total_len += record_len
yield records[start:end] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_sparse_matrix_keys(session, key_table):
"""Return a list of keys for the sparse matrix.""" |
return session.query(key_table).order_by(key_table.name).all() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def batch_upsert_records(session, table, records):
"""Batch upsert records into postgresql database.""" |
if not records:
return
for record_batch in _batch_postgres_query(table, records):
stmt = insert(table.__table__)
stmt = stmt.on_conflict_do_update(
constraint=table.__table__.primary_key,
set_={
"keys": stmt.excluded.get("keys"),
"values": stmt.excluded.get("values"),
},
)
session.execute(stmt, record_batch)
session.commit() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_docs_from_split(session, candidate_classes, split):
"""Return a list of documents that contain the candidates in the split.""" |
# Only grab the docs containing candidates from the given split.
sub_query = session.query(Candidate.id).filter(Candidate.split == split).subquery()
split_docs = set()
for candidate_class in candidate_classes:
split_docs.update(
cand.document
for cand in session.query(candidate_class)
.filter(candidate_class.id.in_(sub_query))
.all()
)
return split_docs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_mapping(session, table, candidates, generator, key_map):
"""Generate map of keys and values for the candidate from the generator. :param session: The database session. :param table: The table we will be inserting into (i.e. Feature or Label). :param candidates: The candidates to get mappings for. :param generator: A generator yielding (candidate_id, key, value) tuples. :param key_map: A mutable dict which values will be added to as {key: [relations]}. :type key_map: Dict :return: Generator of dictionaries of {"candidate_id": _, "keys": _, "values": _} :rtype: generator of dict """ |
for cand in candidates:
# Grab the old values currently in the DB
try:
temp = session.query(table).filter(table.candidate_id == cand.id).one()
cand_map = dict(zip(temp.keys, temp.values))
except NoResultFound:
cand_map = {}
map_args = {"candidate_id": cand.id}
for cid, key, value in generator(cand):
if value == 0:
continue
cand_map[key] = value
# Assemble label arguments
map_args["keys"] = [*cand_map.keys()]
map_args["values"] = [*cand_map.values()]
# Update key_map by adding the candidate class for each key
for key in map_args["keys"]:
try:
key_map[key].add(cand.__class__.__tablename__)
except KeyError:
key_map[key] = {cand.__class__.__tablename__}
yield map_args |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_cands_list_from_split(session, candidate_classes, doc, split):
"""Return the list of list of candidates from this document based on the split.""" |
cands = []
if split == ALL_SPLITS:
# Get cands from all splits
for candidate_class in candidate_classes:
cands.append(
session.query(candidate_class)
.filter(candidate_class.document_id == doc.id)
.all()
)
else:
# Get cands from the specified split
for candidate_class in candidate_classes:
cands.append(
session.query(candidate_class)
.filter(candidate_class.document_id == doc.id)
.filter(candidate_class.split == split)
.all()
)
return cands |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def drop_all_keys(session, key_table, candidate_classes):
"""Bulk drop annotation keys for all the candidate_classes in the table. Rather than directly dropping the keys, this removes the candidate_classes specified for the given keys only. If all candidate_classes are removed for a key, the key is dropped. :param key_table: The sqlalchemy class to insert into. :param candidate_classes: A list of candidate classes to drop. """ |
if not candidate_classes:
return
candidate_classes = set([c.__tablename__ for c in candidate_classes])
# Select all rows that contain ANY of the candidate_classes
all_rows = (
session.query(key_table)
.filter(
key_table.candidate_classes.overlap(cast(candidate_classes, ARRAY(String)))
)
.all()
)
to_delete = set()
to_update = []
# All candidate classes will be the same for all keys, so just look at one
for row in all_rows:
# Remove the selected candidate_classes. If empty, mark for deletion.
row.candidate_classes = list(
set(row.candidate_classes) - set(candidate_classes)
)
if len(row.candidate_classes) == 0:
to_delete.add(row.name)
else:
to_update.append(
{"name": row.name, "candidate_classes": row.candidate_classes}
)
# Perform all deletes
if to_delete:
query = session.query(key_table).filter(key_table.name.in_(to_delete))
query.delete(synchronize_session="fetch")
# Perform all updates
if to_update:
for batch in _batch_postgres_query(key_table, to_update):
stmt = insert(key_table.__table__)
stmt = stmt.on_conflict_do_update(
constraint=key_table.__table__.primary_key,
set_={
"name": stmt.excluded.get("name"),
"candidate_classes": stmt.excluded.get("candidate_classes"),
},
)
session.execute(stmt, batch)
session.commit() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def drop_keys(session, key_table, keys):
"""Bulk drop annotation keys to the specified table. Rather than directly dropping the keys, this removes the candidate_classes specified for the given keys only. If all candidate_classes are removed for a key, the key is dropped. :param key_table: The sqlalchemy class to insert into. :param keys: A map of {name: [candidate_classes]}. """ |
# Do nothing if empty
if not keys:
return
for key_batch in _batch_postgres_query(
key_table, [{"name": k[0], "candidate_classes": k[1]} for k in keys.items()]
):
all_rows = (
session.query(key_table)
.filter(key_table.name.in_([key["name"] for key in key_batch]))
.all()
)
to_delete = set()
to_update = []
# All candidate classes will be the same for all keys, so just look at one
candidate_classes = key_batch[0]["candidate_classes"]
for row in all_rows:
# Remove the selected candidate_classes. If empty, mark for deletion.
row.candidate_classes = list(
set(row.candidate_classes) - set(candidate_classes)
)
if len(row.candidate_classes) == 0:
to_delete.add(row.name)
else:
to_update.append(
{"name": row.name, "candidate_classes": row.candidate_classes}
)
# Perform all deletes
if to_delete:
query = session.query(key_table).filter(key_table.name.in_(to_delete))
query.delete(synchronize_session="fetch")
# Perform all updates
if to_update:
stmt = insert(key_table.__table__)
stmt = stmt.on_conflict_do_update(
constraint=key_table.__table__.primary_key,
set_={
"name": stmt.excluded.get("name"),
"candidate_classes": stmt.excluded.get("candidate_classes"),
},
)
session.execute(stmt, to_update)
session.commit() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upsert_keys(session, key_table, keys):
"""Bulk add annotation keys to the specified table. :param key_table: The sqlalchemy class to insert into. :param keys: A map of {name: [candidate_classes]}. """ |
# Do nothing if empty
if not keys:
return
for key_batch in _batch_postgres_query(
key_table, [{"name": k[0], "candidate_classes": k[1]} for k in keys.items()]
):
stmt = insert(key_table.__table__)
stmt = stmt.on_conflict_do_update(
constraint=key_table.__table__.primary_key,
set_={
"name": stmt.excluded.get("name"),
"candidate_classes": stmt.excluded.get("candidate_classes"),
},
)
while True:
try:
session.execute(stmt, key_batch)
session.commit()
break
except Exception as e:
logger.debug(e) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, docs=None, split=0, lfs=None, parallelism=None, progress_bar=True):
"""Update the labels of the specified candidates based on the provided LFs. :param docs: If provided, apply the updated LFs to all the candidates in these documents. :param split: If docs is None, apply the updated LFs to the candidates in this particular split. :param lfs: A list of lists of labeling functions to update. Each list should correspond with the candidate_classes used to initialize the Labeler. :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the Labeler if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool """ |
if lfs is None:
raise ValueError("Please provide a list of lists of labeling functions.")
if len(lfs) != len(self.candidate_classes):
raise ValueError("Please provide LFs for each candidate class.")
self.apply(
docs=docs,
split=split,
lfs=lfs,
train=True,
clear=False,
parallelism=parallelism,
progress_bar=progress_bar,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply( self, docs=None, split=0, train=False, lfs=None, clear=True, parallelism=None, progress_bar=True, ):
"""Apply the labels of the specified candidates based on the provided LFs. :param docs: If provided, apply the LFs to all the candidates in these documents. :param split: If docs is None, apply the LFs to the candidates in this particular split. :type split: int :param train: Whether or not to update the global key set of labels and the labels of candidates. :type train: bool :param lfs: A list of lists of labeling functions to apply. Each list should correspond with the candidate_classes used to initialize the Labeler. :type lfs: list of lists :param clear: Whether or not to clear the labels table before applying these LFs. :type clear: bool :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the Labeler if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool :raises ValueError: If labeling functions are not provided for each candidate class. """ |
if lfs is None:
raise ValueError("Please provide a list of labeling functions.")
if len(lfs) != len(self.candidate_classes):
raise ValueError("Please provide LFs for each candidate class.")
self.lfs = lfs
if docs:
# Call apply on the specified docs for all splits
split = ALL_SPLITS
super(Labeler, self).apply(
docs,
split=split,
train=train,
lfs=self.lfs,
clear=clear,
parallelism=parallelism,
progress_bar=progress_bar,
)
# Needed to sync the bulk operations
self.session.commit()
else:
# Only grab the docs containing candidates from the given split.
split_docs = get_docs_from_split(
self.session, self.candidate_classes, split
)
super(Labeler, self).apply(
split_docs,
split=split,
train=train,
lfs=self.lfs,
clear=clear,
parallelism=parallelism,
progress_bar=progress_bar,
)
# Needed to sync the bulk operations
self.session.commit() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clear(self, train, split, lfs=None):
"""Delete Labels of each class from the database. :param train: Whether or not to clear the LabelKeys. :type train: bool :param split: Which split of candidates to clear labels from. :type split: int :param lfs: This parameter is ignored. """ |
# Clear Labels for the candidates in the split passed in.
logger.info(f"Clearing Labels (split {split})")
sub_query = (
self.session.query(Candidate.id).filter(Candidate.split == split).subquery()
)
query = self.session.query(Label).filter(Label.candidate_id.in_(sub_query))
query.delete(synchronize_session="fetch")
# Delete all old annotation keys
if train:
logger.debug(f"Clearing all LabelKeys from {self.candidate_classes}...")
drop_all_keys(self.session, LabelKey, self.candidate_classes) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clear_all(self):
"""Delete all Labels.""" |
logger.info("Clearing ALL Labels and LabelKeys.")
self.session.query(Label).delete(synchronize_session="fetch")
self.session.query(LabelKey).delete(synchronize_session="fetch") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _f_gen(self, c):
"""Convert lfs into a generator of id, name, and labels. In particular, catch verbose values and convert to integer ones. """ |
lf_idx = self.candidate_classes.index(c.__class__)
labels = lambda c: [(c.id, lf.__name__, lf(c)) for lf in self.lfs[lf_idx]]
for cid, lf_key, label in labels(c):
# Note: We assume if the LF output is an int, it is already
# mapped correctly
if isinstance(label, int):
yield cid, lf_key, label
# None is a protected LF output value corresponding to 0,
# representing LF abstaining
elif label is None:
yield cid, lf_key, 0
elif label in c.values:
if c.cardinality > 2:
yield cid, lf_key, c.values.index(label) + 1
# Note: Would be nice to not special-case here, but for
# consistency we leave binary LF range as {-1,0,1}
else:
val = 1 if c.values.index(label) == 0 else -1
yield cid, lf_key, val
else:
raise ValueError(
f"Can't parse label value {label} for candidate values {c.values}"
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def arrow_get(string):
'''this function exists because ICS uses ISO 8601 without dashes or
colons, i.e. not ISO 8601 at all.'''
# replace slashes with dashes
if '/' in string:
string = string.replace('/', '-')
# if string contains dashes, assume it to be proper ISO 8601
if '-' in string:
return arrow.get(string)
string = string.rstrip('Z')
return arrow.get(string, DATE_FORMATS[len(string)]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_duration(line):
""" Return a timedelta object from a string in the DURATION property format """ |
DAYS, SECS = {'D': 1, 'W': 7}, {'S': 1, 'M': 60, 'H': 3600}
sign, i = 1, 0
if line[i] in '-+':
if line[i] == '-':
sign = -1
i += 1
if line[i] != 'P':
raise parse.ParseError()
i += 1
days, secs = 0, 0
while i < len(line):
if line[i] == 'T':
i += 1
if i == len(line):
break
j = i
while line[j].isdigit():
j += 1
if i == j:
raise parse.ParseError()
val = int(line[i:j])
if line[j] in DAYS:
days += val * DAYS[line[j]]
DAYS.pop(line[j])
elif line[j] in SECS:
secs += val * SECS[line[j]]
SECS.pop(line[j])
else:
raise parse.ParseError()
i = j + 1
return timedelta(sign * days, sign * secs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def timedelta_to_duration(dt):
""" Return a string according to the DURATION property format from a timedelta object """ |
days, secs = dt.days, dt.seconds
res = 'P'
if days // 7:
res += str(days // 7) + 'W'
days %= 7
if days:
res += str(days) + 'D'
if secs:
res += 'T'
if secs // 3600:
res += str(secs // 3600) + 'H'
secs %= 3600
if secs // 60:
res += str(secs // 60) + 'M'
secs %= 60
if secs:
res += str(secs) + 'S'
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def end(self):
"""Get or set the end of the event. | Will return an :class:`Arrow` object. | May be set to anything that :func:`Arrow.get` understands. | If set to a non null value, removes any already existing duration. | Setting to None will have unexpected behavior if begin is not None. | Must not be set to an inferior value than self.begin. """ |
if self._duration: # if end is duration defined
# return the beginning + duration
return self.begin + self._duration
elif self._end_time: # if end is time defined
if self.all_day:
return self._end_time
else:
return self._end_time
elif self._begin: # if end is not defined
if self.all_day:
return self._begin + timedelta(days=1)
else:
# instant event
return self._begin
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def duration(self):
"""Get or set the duration of the event. | Will return a timedelta object. | May be set to anything that timedelta() understands. | May be set with a dict ({"days":2, "hours":6}). | If set to a non null value, removes any already existing end time. """ |
if self._duration:
return self._duration
elif self.end:
# because of the clever getter for end, this also takes care of all_day events
return self.end - self.begin
else:
# event has neither start, nor end, nor duration
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_all_day(self):
"""Transforms self to an all-day event. The event will span all the days from the begin to the end day. """ |
if self.all_day:
# Do nothing if we already are a all day event
return
begin_day = self.begin.floor('day')
end_day = self.end.floor('day')
self._begin = begin_day
# for a one day event, we don't need a _end_time
if begin_day == end_day:
self._end_time = None
else:
self._end_time = end_day + timedelta(days=1)
self._duration = None
self._begin_precision = 'day' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def join(self, other, *args, **kwarg):
"""Create a new event which covers the time range of two intersecting events All extra parameters are passed to the Event constructor. Args: other: the other event Returns: a new Event instance """ |
event = Event(*args, **kwarg)
if self.intersects(other):
if self.starts_within(other):
event.begin = other.begin
else:
event.begin = self.begin
if self.ends_within(other):
event.end = other.end
else:
event.end = self.end
return event
raise ValueError('Cannot join {} with {}: they don\'t intersect.'.format(self, other)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.