id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
229,900
HazyResearch/fonduer
src/fonduer/candidates/mentions.py
MentionExtractor.apply
def apply(self, docs, clear=True, parallelism=None, progress_bar=True): """Run the MentionExtractor. :Example: To extract mentions from a set of training documents using 4 cores:: mention_extractor.apply(train_docs, parallelism=4) :param docs: Set of documents to extract from. :param clear: Whether or not to clear the existing Mentions beforehand. :type clear: bool :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the MentionExtractor if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool """ super(MentionExtractor, self).apply( docs, clear=clear, parallelism=parallelism, progress_bar=progress_bar )
python
def apply(self, docs, clear=True, parallelism=None, progress_bar=True): super(MentionExtractor, self).apply( docs, clear=clear, parallelism=parallelism, progress_bar=progress_bar )
[ "def", "apply", "(", "self", ",", "docs", ",", "clear", "=", "True", ",", "parallelism", "=", "None", ",", "progress_bar", "=", "True", ")", ":", "super", "(", "MentionExtractor", ",", "self", ")", ".", "apply", "(", "docs", ",", "clear", "=", "clear...
Run the MentionExtractor. :Example: To extract mentions from a set of training documents using 4 cores:: mention_extractor.apply(train_docs, parallelism=4) :param docs: Set of documents to extract from. :param clear: Whether or not to clear the existing Mentions beforehand. :type clear: bool :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the MentionExtractor if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool
[ "Run", "the", "MentionExtractor", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/candidates/mentions.py#L421-L443
229,901
HazyResearch/fonduer
src/fonduer/candidates/mentions.py
MentionExtractor.clear
def clear(self): """Delete Mentions of each class in the extractor from the given split.""" # Create set of candidate_subclasses associated with each mention_subclass cand_subclasses = set() for mentions, tablename in [ (_[1][0], _[1][1]) for _ in candidate_subclasses.values() ]: for mention in mentions: if mention in self.mention_classes: cand_subclasses.add(tablename) # First, clear all the Mentions. This will cascade and remove the # mention_subclasses and corresponding candidate_subclasses. for mention_class in self.mention_classes: logger.info(f"Clearing table: {mention_class.__tablename__}") self.session.query(Mention).filter_by( type=mention_class.__tablename__ ).delete(synchronize_session="fetch") # Next, clear the Candidates. This is done manually because we have # no cascading relationship from candidate_subclass to Candidate. for cand_subclass in cand_subclasses: logger.info(f"Cascading to clear table: {cand_subclass}") self.session.query(Candidate).filter_by(type=cand_subclass).delete( synchronize_session="fetch" )
python
def clear(self): # Create set of candidate_subclasses associated with each mention_subclass cand_subclasses = set() for mentions, tablename in [ (_[1][0], _[1][1]) for _ in candidate_subclasses.values() ]: for mention in mentions: if mention in self.mention_classes: cand_subclasses.add(tablename) # First, clear all the Mentions. This will cascade and remove the # mention_subclasses and corresponding candidate_subclasses. for mention_class in self.mention_classes: logger.info(f"Clearing table: {mention_class.__tablename__}") self.session.query(Mention).filter_by( type=mention_class.__tablename__ ).delete(synchronize_session="fetch") # Next, clear the Candidates. This is done manually because we have # no cascading relationship from candidate_subclass to Candidate. for cand_subclass in cand_subclasses: logger.info(f"Cascading to clear table: {cand_subclass}") self.session.query(Candidate).filter_by(type=cand_subclass).delete( synchronize_session="fetch" )
[ "def", "clear", "(", "self", ")", ":", "# Create set of candidate_subclasses associated with each mention_subclass", "cand_subclasses", "=", "set", "(", ")", "for", "mentions", ",", "tablename", "in", "[", "(", "_", "[", "1", "]", "[", "0", "]", ",", "_", "[",...
Delete Mentions of each class in the extractor from the given split.
[ "Delete", "Mentions", "of", "each", "class", "in", "the", "extractor", "from", "the", "given", "split", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/candidates/mentions.py#L445-L471
229,902
HazyResearch/fonduer
src/fonduer/candidates/mentions.py
MentionExtractor.clear_all
def clear_all(self): """Delete all Mentions from given split the database.""" logger.info("Clearing ALL Mentions.") self.session.query(Mention).delete(synchronize_session="fetch") # With no Mentions, there should be no Candidates also self.session.query(Candidate).delete(synchronize_session="fetch") logger.info("Cleared ALL Mentions (and Candidates).")
python
def clear_all(self): logger.info("Clearing ALL Mentions.") self.session.query(Mention).delete(synchronize_session="fetch") # With no Mentions, there should be no Candidates also self.session.query(Candidate).delete(synchronize_session="fetch") logger.info("Cleared ALL Mentions (and Candidates).")
[ "def", "clear_all", "(", "self", ")", ":", "logger", ".", "info", "(", "\"Clearing ALL Mentions.\"", ")", "self", ".", "session", ".", "query", "(", "Mention", ")", ".", "delete", "(", "synchronize_session", "=", "\"fetch\"", ")", "# With no Mentions, there shou...
Delete all Mentions from given split the database.
[ "Delete", "all", "Mentions", "from", "given", "split", "the", "database", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/candidates/mentions.py#L473-L480
229,903
HazyResearch/fonduer
src/fonduer/candidates/mentions.py
MentionExtractor.get_mentions
def get_mentions(self, docs=None, sort=False): """Return a list of lists of the mentions associated with this extractor. Each list of the return will contain the Mentions for one of the mention classes associated with the MentionExtractor. :param docs: If provided, return Mentions from these documents. Else, return all Mentions. :param sort: If sort is True, then return all Mentions sorted by stable_id. :type sort: bool :return: Mentions for each mention_class. :rtype: List of lists. """ result = [] if docs: docs = docs if isinstance(docs, (list, tuple)) else [docs] # Get cands from all splits for mention_class in self.mention_classes: mentions = ( self.session.query(mention_class) .filter(mention_class.document_id.in_([doc.id for doc in docs])) .order_by(mention_class.id) .all() ) if sort: mentions = sorted(mentions, key=lambda x: x[0].get_stable_id()) result.append(mentions) else: for mention_class in self.mention_classes: mentions = ( self.session.query(mention_class).order_by(mention_class.id).all() ) if sort: mentions = sorted(mentions, key=lambda x: x[0].get_stable_id()) result.append(mentions) return result
python
def get_mentions(self, docs=None, sort=False): result = [] if docs: docs = docs if isinstance(docs, (list, tuple)) else [docs] # Get cands from all splits for mention_class in self.mention_classes: mentions = ( self.session.query(mention_class) .filter(mention_class.document_id.in_([doc.id for doc in docs])) .order_by(mention_class.id) .all() ) if sort: mentions = sorted(mentions, key=lambda x: x[0].get_stable_id()) result.append(mentions) else: for mention_class in self.mention_classes: mentions = ( self.session.query(mention_class).order_by(mention_class.id).all() ) if sort: mentions = sorted(mentions, key=lambda x: x[0].get_stable_id()) result.append(mentions) return result
[ "def", "get_mentions", "(", "self", ",", "docs", "=", "None", ",", "sort", "=", "False", ")", ":", "result", "=", "[", "]", "if", "docs", ":", "docs", "=", "docs", "if", "isinstance", "(", "docs", ",", "(", "list", ",", "tuple", ")", ")", "else",...
Return a list of lists of the mentions associated with this extractor. Each list of the return will contain the Mentions for one of the mention classes associated with the MentionExtractor. :param docs: If provided, return Mentions from these documents. Else, return all Mentions. :param sort: If sort is True, then return all Mentions sorted by stable_id. :type sort: bool :return: Mentions for each mention_class. :rtype: List of lists.
[ "Return", "a", "list", "of", "lists", "of", "the", "mentions", "associated", "with", "this", "extractor", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/candidates/mentions.py#L482-L517
229,904
HazyResearch/fonduer
src/fonduer/candidates/mentions.py
MentionExtractorUDF.apply
def apply(self, doc, clear, **kwargs): """Extract mentions from the given Document. :param doc: A document to process. :param clear: Whether or not to clear the existing database entries. """ # Reattach doc with the current session or DetachedInstanceError happens doc = self.session.merge(doc) # Iterate over each mention class for i, mention_class in enumerate(self.mention_classes): tc_to_insert = defaultdict(list) # Generate TemporaryContexts that are children of the context using # the mention_space and filtered by the Matcher self.child_context_set.clear() for tc in self.matchers[i].apply(self.mention_spaces[i].apply(doc)): rec = tc._load_id_or_insert(self.session) if rec: tc_to_insert[tc._get_table()].append(rec) self.child_context_set.add(tc) # Bulk insert temporary contexts for table, records in tc_to_insert.items(): stmt = insert(table.__table__).values(records) self.session.execute(stmt) # Generates and persists mentions mention_args = {"document_id": doc.id} for child_context in self.child_context_set: # Assemble mention arguments for arg_name in mention_class.__argnames__: mention_args[arg_name + "_id"] = child_context.id # Checking for existence if not clear: q = select([mention_class.id]) for key, value in list(mention_args.items()): q = q.where(getattr(mention_class, key) == value) mention_id = self.session.execute(q).first() if mention_id is not None: continue # Add Mention to session yield mention_class(**mention_args)
python
def apply(self, doc, clear, **kwargs): # Reattach doc with the current session or DetachedInstanceError happens doc = self.session.merge(doc) # Iterate over each mention class for i, mention_class in enumerate(self.mention_classes): tc_to_insert = defaultdict(list) # Generate TemporaryContexts that are children of the context using # the mention_space and filtered by the Matcher self.child_context_set.clear() for tc in self.matchers[i].apply(self.mention_spaces[i].apply(doc)): rec = tc._load_id_or_insert(self.session) if rec: tc_to_insert[tc._get_table()].append(rec) self.child_context_set.add(tc) # Bulk insert temporary contexts for table, records in tc_to_insert.items(): stmt = insert(table.__table__).values(records) self.session.execute(stmt) # Generates and persists mentions mention_args = {"document_id": doc.id} for child_context in self.child_context_set: # Assemble mention arguments for arg_name in mention_class.__argnames__: mention_args[arg_name + "_id"] = child_context.id # Checking for existence if not clear: q = select([mention_class.id]) for key, value in list(mention_args.items()): q = q.where(getattr(mention_class, key) == value) mention_id = self.session.execute(q).first() if mention_id is not None: continue # Add Mention to session yield mention_class(**mention_args)
[ "def", "apply", "(", "self", ",", "doc", ",", "clear", ",", "*", "*", "kwargs", ")", ":", "# Reattach doc with the current session or DetachedInstanceError happens", "doc", "=", "self", ".", "session", ".", "merge", "(", "doc", ")", "# Iterate over each mention clas...
Extract mentions from the given Document. :param doc: A document to process. :param clear: Whether or not to clear the existing database entries.
[ "Extract", "mentions", "from", "the", "given", "Document", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/candidates/mentions.py#L545-L588
229,905
HazyResearch/fonduer
src/fonduer/parser/simple_tokenizer.py
SimpleTokenizer.parse
def parse(self, contents): """Parse the document. :param contents: The text contents of the document. :rtype: a *generator* of tokenized text. """ i = 0 for text in contents.split(self.delim): if not len(text.strip()): continue words = text.split() char_offsets = [0] + [ int(_) for _ in np.cumsum([len(x) + 1 for x in words])[:-1] ] text = " ".join(words) yield { "text": text, "words": words, "pos_tags": [""] * len(words), "ner_tags": [""] * len(words), "lemmas": [""] * len(words), "dep_parents": [0] * len(words), "dep_labels": [""] * len(words), "char_offsets": char_offsets, "abs_char_offsets": char_offsets, } i += 1
python
def parse(self, contents): i = 0 for text in contents.split(self.delim): if not len(text.strip()): continue words = text.split() char_offsets = [0] + [ int(_) for _ in np.cumsum([len(x) + 1 for x in words])[:-1] ] text = " ".join(words) yield { "text": text, "words": words, "pos_tags": [""] * len(words), "ner_tags": [""] * len(words), "lemmas": [""] * len(words), "dep_parents": [0] * len(words), "dep_labels": [""] * len(words), "char_offsets": char_offsets, "abs_char_offsets": char_offsets, } i += 1
[ "def", "parse", "(", "self", ",", "contents", ")", ":", "i", "=", "0", "for", "text", "in", "contents", ".", "split", "(", "self", ".", "delim", ")", ":", "if", "not", "len", "(", "text", ".", "strip", "(", ")", ")", ":", "continue", "words", "...
Parse the document. :param contents: The text contents of the document. :rtype: a *generator* of tokenized text.
[ "Parse", "the", "document", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/simple_tokenizer.py#L13-L39
229,906
HazyResearch/fonduer
src/fonduer/features/feature_libs/structural_features.py
strlib_unary_features
def strlib_unary_features(span): """ Structural-related features for a single span """ if not span.sentence.is_structural(): return yield f"TAG_{get_tag(span)}", DEF_VALUE for attr in get_attributes(span): yield f"HTML_ATTR_{attr}", DEF_VALUE yield f"PARENT_TAG_{get_parent_tag(span)}", DEF_VALUE prev_tags = get_prev_sibling_tags(span) if len(prev_tags): yield f"PREV_SIB_TAG_{prev_tags[-1]}", DEF_VALUE yield f"NODE_POS_{len(prev_tags) + 1}", DEF_VALUE else: yield "FIRST_NODE", DEF_VALUE next_tags = get_next_sibling_tags(span) if len(next_tags): yield f"NEXT_SIB_TAG_{next_tags[0]}", DEF_VALUE else: yield "LAST_NODE", DEF_VALUE yield f"ANCESTOR_CLASS_[{' '.join(get_ancestor_class_names(span))}]", DEF_VALUE yield f"ANCESTOR_TAG_[{' '.join(get_ancestor_tag_names(span))}]", DEF_VALUE yield f"ANCESTOR_ID_[{' '.join(get_ancestor_id_names(span))}]", DEF_VALUE
python
def strlib_unary_features(span): if not span.sentence.is_structural(): return yield f"TAG_{get_tag(span)}", DEF_VALUE for attr in get_attributes(span): yield f"HTML_ATTR_{attr}", DEF_VALUE yield f"PARENT_TAG_{get_parent_tag(span)}", DEF_VALUE prev_tags = get_prev_sibling_tags(span) if len(prev_tags): yield f"PREV_SIB_TAG_{prev_tags[-1]}", DEF_VALUE yield f"NODE_POS_{len(prev_tags) + 1}", DEF_VALUE else: yield "FIRST_NODE", DEF_VALUE next_tags = get_next_sibling_tags(span) if len(next_tags): yield f"NEXT_SIB_TAG_{next_tags[0]}", DEF_VALUE else: yield "LAST_NODE", DEF_VALUE yield f"ANCESTOR_CLASS_[{' '.join(get_ancestor_class_names(span))}]", DEF_VALUE yield f"ANCESTOR_TAG_[{' '.join(get_ancestor_tag_names(span))}]", DEF_VALUE yield f"ANCESTOR_ID_[{' '.join(get_ancestor_id_names(span))}]", DEF_VALUE
[ "def", "strlib_unary_features", "(", "span", ")", ":", "if", "not", "span", ".", "sentence", ".", "is_structural", "(", ")", ":", "return", "yield", "f\"TAG_{get_tag(span)}\"", ",", "DEF_VALUE", "for", "attr", "in", "get_attributes", "(", "span", ")", ":", "...
Structural-related features for a single span
[ "Structural", "-", "related", "features", "for", "a", "single", "span" ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/features/feature_libs/structural_features.py#L70-L101
229,907
HazyResearch/fonduer
src/fonduer/utils/utils_parser.py
build_node
def build_node(type, name, content): """ Wrap up content in to a html node. :param type: content type (e.g., doc, section, text, figure) :type path: str :param name: content name (e.g., the name of the section) :type path: str :param name: actual content :type path: str :return: new String with content in html format """ if type == "doc": return f"<html>{content}</html>" if type == "section": return f"<section name='{name}'>{content}</section>" if type == "text": return f"<p name='{name}'>{content}</p>" if type == "figure": return f"<img name='{name}' src='{content}'/>"
python
def build_node(type, name, content): if type == "doc": return f"<html>{content}</html>" if type == "section": return f"<section name='{name}'>{content}</section>" if type == "text": return f"<p name='{name}'>{content}</p>" if type == "figure": return f"<img name='{name}' src='{content}'/>"
[ "def", "build_node", "(", "type", ",", "name", ",", "content", ")", ":", "if", "type", "==", "\"doc\"", ":", "return", "f\"<html>{content}</html>\"", "if", "type", "==", "\"section\"", ":", "return", "f\"<section name='{name}'>{content}</section>\"", "if", "type", ...
Wrap up content in to a html node. :param type: content type (e.g., doc, section, text, figure) :type path: str :param name: content name (e.g., the name of the section) :type path: str :param name: actual content :type path: str :return: new String with content in html format
[ "Wrap", "up", "content", "in", "to", "a", "html", "node", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/utils_parser.py#L1-L20
229,908
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/utils.py
_to_span
def _to_span(x, idx=0): """Convert a Candidate, Mention, or Span to a span.""" if isinstance(x, Candidate): return x[idx].context elif isinstance(x, Mention): return x.context elif isinstance(x, TemporarySpanMention): return x else: raise ValueError(f"{type(x)} is an invalid argument type")
python
def _to_span(x, idx=0): if isinstance(x, Candidate): return x[idx].context elif isinstance(x, Mention): return x.context elif isinstance(x, TemporarySpanMention): return x else: raise ValueError(f"{type(x)} is an invalid argument type")
[ "def", "_to_span", "(", "x", ",", "idx", "=", "0", ")", ":", "if", "isinstance", "(", "x", ",", "Candidate", ")", ":", "return", "x", "[", "idx", "]", ".", "context", "elif", "isinstance", "(", "x", ",", "Mention", ")", ":", "return", "x", ".", ...
Convert a Candidate, Mention, or Span to a span.
[ "Convert", "a", "Candidate", "Mention", "or", "Span", "to", "a", "span", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/utils.py#L9-L18
229,909
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/utils.py
_to_spans
def _to_spans(x): """Convert a Candidate, Mention, or Span to a list of spans.""" if isinstance(x, Candidate): return [_to_span(m) for m in x] elif isinstance(x, Mention): return [x.context] elif isinstance(x, TemporarySpanMention): return [x] else: raise ValueError(f"{type(x)} is an invalid argument type")
python
def _to_spans(x): if isinstance(x, Candidate): return [_to_span(m) for m in x] elif isinstance(x, Mention): return [x.context] elif isinstance(x, TemporarySpanMention): return [x] else: raise ValueError(f"{type(x)} is an invalid argument type")
[ "def", "_to_spans", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "Candidate", ")", ":", "return", "[", "_to_span", "(", "m", ")", "for", "m", "in", "x", "]", "elif", "isinstance", "(", "x", ",", "Mention", ")", ":", "return", "[", "x", ...
Convert a Candidate, Mention, or Span to a list of spans.
[ "Convert", "a", "Candidate", "Mention", "or", "Span", "to", "a", "list", "of", "spans", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/utils.py#L22-L31
229,910
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/utils.py
get_matches
def get_matches(lf, candidate_set, match_values=[1, -1]): """Return a list of candidates that are matched by a particular LF. A simple helper function to see how many matches (non-zero by default) an LF gets. :param lf: The labeling function to apply to the candidate_set :param candidate_set: The set of candidates to evaluate :param match_values: An option list of the values to consider as matched. [1, -1] by default. :rtype: a list of candidates """ logger = logging.getLogger(__name__) matches = [] for c in candidate_set: label = lf(c) if label in match_values: matches.append(c) logger.info(f"{len(matches)} matches") return matches
python
def get_matches(lf, candidate_set, match_values=[1, -1]): logger = logging.getLogger(__name__) matches = [] for c in candidate_set: label = lf(c) if label in match_values: matches.append(c) logger.info(f"{len(matches)} matches") return matches
[ "def", "get_matches", "(", "lf", ",", "candidate_set", ",", "match_values", "=", "[", "1", ",", "-", "1", "]", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "matches", "=", "[", "]", "for", "c", "in", "candidate_set", ":...
Return a list of candidates that are matched by a particular LF. A simple helper function to see how many matches (non-zero by default) an LF gets. :param lf: The labeling function to apply to the candidate_set :param candidate_set: The set of candidates to evaluate :param match_values: An option list of the values to consider as matched. [1, -1] by default. :rtype: a list of candidates
[ "Return", "a", "list", "of", "candidates", "that", "are", "matched", "by", "a", "particular", "LF", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/utils.py#L60-L79
229,911
HazyResearch/fonduer
src/fonduer/features/featurizer.py
Featurizer.update
def update(self, docs=None, split=0, parallelism=None, progress_bar=True): """Update the features of the specified candidates. :param docs: If provided, apply features to all the candidates in these documents. :param split: If docs is None, apply features to the candidates in this particular split. :type split: int :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the Featurizer if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool """ self.apply( docs=docs, split=split, train=True, clear=False, parallelism=parallelism, progress_bar=progress_bar, )
python
def update(self, docs=None, split=0, parallelism=None, progress_bar=True): self.apply( docs=docs, split=split, train=True, clear=False, parallelism=parallelism, progress_bar=progress_bar, )
[ "def", "update", "(", "self", ",", "docs", "=", "None", ",", "split", "=", "0", ",", "parallelism", "=", "None", ",", "progress_bar", "=", "True", ")", ":", "self", ".", "apply", "(", "docs", "=", "docs", ",", "split", "=", "split", ",", "train", ...
Update the features of the specified candidates. :param docs: If provided, apply features to all the candidates in these documents. :param split: If docs is None, apply features to the candidates in this particular split. :type split: int :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the Featurizer if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool
[ "Update", "the", "features", "of", "the", "specified", "candidates", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/features/featurizer.py#L44-L67
229,912
HazyResearch/fonduer
src/fonduer/features/featurizer.py
Featurizer.apply
def apply( self, docs=None, split=0, train=False, clear=True, parallelism=None, progress_bar=True, ): """Apply features to the specified candidates. :param docs: If provided, apply features to all the candidates in these documents. :param split: If docs is None, apply features to the candidates in this particular split. :type split: int :param train: Whether or not to update the global key set of features and the features of candidates. :type train: bool :param clear: Whether or not to clear the features table before applying features. :type clear: bool :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the Featurizer if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool """ if docs: # Call apply on the specified docs for all splits split = ALL_SPLITS super(Featurizer, self).apply( docs, split=split, train=train, clear=clear, parallelism=parallelism, progress_bar=progress_bar, ) # Needed to sync the bulk operations self.session.commit() else: # Only grab the docs containing candidates from the given split. split_docs = get_docs_from_split( self.session, self.candidate_classes, split ) super(Featurizer, self).apply( split_docs, split=split, train=train, clear=clear, parallelism=parallelism, progress_bar=progress_bar, ) # Needed to sync the bulk operations self.session.commit()
python
def apply( self, docs=None, split=0, train=False, clear=True, parallelism=None, progress_bar=True, ): if docs: # Call apply on the specified docs for all splits split = ALL_SPLITS super(Featurizer, self).apply( docs, split=split, train=train, clear=clear, parallelism=parallelism, progress_bar=progress_bar, ) # Needed to sync the bulk operations self.session.commit() else: # Only grab the docs containing candidates from the given split. split_docs = get_docs_from_split( self.session, self.candidate_classes, split ) super(Featurizer, self).apply( split_docs, split=split, train=train, clear=clear, parallelism=parallelism, progress_bar=progress_bar, ) # Needed to sync the bulk operations self.session.commit()
[ "def", "apply", "(", "self", ",", "docs", "=", "None", ",", "split", "=", "0", ",", "train", "=", "False", ",", "clear", "=", "True", ",", "parallelism", "=", "None", ",", "progress_bar", "=", "True", ",", ")", ":", "if", "docs", ":", "# Call apply...
Apply features to the specified candidates. :param docs: If provided, apply features to all the candidates in these documents. :param split: If docs is None, apply features to the candidates in this particular split. :type split: int :param train: Whether or not to update the global key set of features and the features of candidates. :type train: bool :param clear: Whether or not to clear the features table before applying features. :type clear: bool :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the Featurizer if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool
[ "Apply", "features", "to", "the", "specified", "candidates", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/features/featurizer.py#L69-L126
229,913
HazyResearch/fonduer
src/fonduer/features/featurizer.py
Featurizer.drop_keys
def drop_keys(self, keys, candidate_classes=None): """Drop the specified keys from FeatureKeys. :param keys: A list of FeatureKey names to delete. :type keys: list, tuple :param candidate_classes: A list of the Candidates to drop the key for. If None, drops the keys for all candidate classes associated with this Featurizer. :type candidate_classes: list, tuple """ # Make sure keys is iterable keys = keys if isinstance(keys, (list, tuple)) else [keys] # Make sure candidate_classes is iterable if candidate_classes: candidate_classes = ( candidate_classes if isinstance(candidate_classes, (list, tuple)) else [candidate_classes] ) # Ensure only candidate classes associated with the featurizer # are used. candidate_classes = [ _.__tablename__ for _ in candidate_classes if _ in self.candidate_classes ] if len(candidate_classes) == 0: logger.warning( "You didn't specify valid candidate classes for this featurizer." ) return # If unspecified, just use all candidate classes else: candidate_classes = [_.__tablename__ for _ in self.candidate_classes] # build dict for use by utils key_map = dict() for key in keys: key_map[key] = set(candidate_classes) drop_keys(self.session, FeatureKey, key_map)
python
def drop_keys(self, keys, candidate_classes=None): # Make sure keys is iterable keys = keys if isinstance(keys, (list, tuple)) else [keys] # Make sure candidate_classes is iterable if candidate_classes: candidate_classes = ( candidate_classes if isinstance(candidate_classes, (list, tuple)) else [candidate_classes] ) # Ensure only candidate classes associated with the featurizer # are used. candidate_classes = [ _.__tablename__ for _ in candidate_classes if _ in self.candidate_classes ] if len(candidate_classes) == 0: logger.warning( "You didn't specify valid candidate classes for this featurizer." ) return # If unspecified, just use all candidate classes else: candidate_classes = [_.__tablename__ for _ in self.candidate_classes] # build dict for use by utils key_map = dict() for key in keys: key_map[key] = set(candidate_classes) drop_keys(self.session, FeatureKey, key_map)
[ "def", "drop_keys", "(", "self", ",", "keys", ",", "candidate_classes", "=", "None", ")", ":", "# Make sure keys is iterable", "keys", "=", "keys", "if", "isinstance", "(", "keys", ",", "(", "list", ",", "tuple", ")", ")", "else", "[", "keys", "]", "# Ma...
Drop the specified keys from FeatureKeys. :param keys: A list of FeatureKey names to delete. :type keys: list, tuple :param candidate_classes: A list of the Candidates to drop the key for. If None, drops the keys for all candidate classes associated with this Featurizer. :type candidate_classes: list, tuple
[ "Drop", "the", "specified", "keys", "from", "FeatureKeys", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/features/featurizer.py#L128-L171
229,914
HazyResearch/fonduer
src/fonduer/features/featurizer.py
Featurizer.clear
def clear(self, train=False, split=0): """Delete Features of each class from the database. :param train: Whether or not to clear the FeatureKeys :type train: bool :param split: Which split of candidates to clear features from. :type split: int """ # Clear Features for the candidates in the split passed in. logger.info(f"Clearing Features (split {split})") sub_query = ( self.session.query(Candidate.id).filter(Candidate.split == split).subquery() ) query = self.session.query(Feature).filter(Feature.candidate_id.in_(sub_query)) query.delete(synchronize_session="fetch") # Delete all old annotation keys if train: logger.debug(f"Clearing all FeatureKeys from {self.candidate_classes}...") drop_all_keys(self.session, FeatureKey, self.candidate_classes)
python
def clear(self, train=False, split=0): # Clear Features for the candidates in the split passed in. logger.info(f"Clearing Features (split {split})") sub_query = ( self.session.query(Candidate.id).filter(Candidate.split == split).subquery() ) query = self.session.query(Feature).filter(Feature.candidate_id.in_(sub_query)) query.delete(synchronize_session="fetch") # Delete all old annotation keys if train: logger.debug(f"Clearing all FeatureKeys from {self.candidate_classes}...") drop_all_keys(self.session, FeatureKey, self.candidate_classes)
[ "def", "clear", "(", "self", ",", "train", "=", "False", ",", "split", "=", "0", ")", ":", "# Clear Features for the candidates in the split passed in.", "logger", ".", "info", "(", "f\"Clearing Features (split {split})\"", ")", "sub_query", "=", "(", "self", ".", ...
Delete Features of each class from the database. :param train: Whether or not to clear the FeatureKeys :type train: bool :param split: Which split of candidates to clear features from. :type split: int
[ "Delete", "Features", "of", "each", "class", "from", "the", "database", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/features/featurizer.py#L181-L201
229,915
HazyResearch/fonduer
src/fonduer/features/featurizer.py
Featurizer.clear_all
def clear_all(self): """Delete all Features.""" logger.info("Clearing ALL Features and FeatureKeys.") self.session.query(Feature).delete(synchronize_session="fetch") self.session.query(FeatureKey).delete(synchronize_session="fetch")
python
def clear_all(self): logger.info("Clearing ALL Features and FeatureKeys.") self.session.query(Feature).delete(synchronize_session="fetch") self.session.query(FeatureKey).delete(synchronize_session="fetch")
[ "def", "clear_all", "(", "self", ")", ":", "logger", ".", "info", "(", "\"Clearing ALL Features and FeatureKeys.\"", ")", "self", ".", "session", ".", "query", "(", "Feature", ")", ".", "delete", "(", "synchronize_session", "=", "\"fetch\"", ")", "self", ".", ...
Delete all Features.
[ "Delete", "all", "Features", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/features/featurizer.py#L203-L207
229,916
HazyResearch/fonduer
src/fonduer/utils/config.py
_merge
def _merge(x, y): """Merge two nested dictionaries. Overwrite values in x with values in y.""" merged = {**x, **y} xkeys = x.keys() for key in xkeys: if isinstance(x[key], dict) and key in y: merged[key] = _merge(x[key], y[key]) return merged
python
def _merge(x, y): merged = {**x, **y} xkeys = x.keys() for key in xkeys: if isinstance(x[key], dict) and key in y: merged[key] = _merge(x[key], y[key]) return merged
[ "def", "_merge", "(", "x", ",", "y", ")", ":", "merged", "=", "{", "*", "*", "x", ",", "*", "*", "y", "}", "xkeys", "=", "x", ".", "keys", "(", ")", "for", "key", "in", "xkeys", ":", "if", "isinstance", "(", "x", "[", "key", "]", ",", "di...
Merge two nested dictionaries. Overwrite values in x with values in y.
[ "Merge", "two", "nested", "dictionaries", ".", "Overwrite", "values", "in", "x", "with", "values", "in", "y", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/config.py#L55-L65
229,917
HazyResearch/fonduer
src/fonduer/utils/config.py
get_config
def get_config(path=os.getcwd()): """Search for settings file in root of project and its parents.""" config = default tries = 0 current_dir = path while current_dir and tries < MAX_CONFIG_SEARCH_DEPTH: potential_path = os.path.join(current_dir, ".fonduer-config.yaml") if os.path.exists(potential_path): with open(potential_path, "r") as f: config = _merge(config, yaml.safe_load(f)) logger.debug(f"Loading Fonduer config from {potential_path}.") break new_dir = os.path.split(current_dir)[0] if current_dir == new_dir: logger.debug("Unable to find config file. Using defaults.") break current_dir = new_dir tries += 1 return config
python
def get_config(path=os.getcwd()): config = default tries = 0 current_dir = path while current_dir and tries < MAX_CONFIG_SEARCH_DEPTH: potential_path = os.path.join(current_dir, ".fonduer-config.yaml") if os.path.exists(potential_path): with open(potential_path, "r") as f: config = _merge(config, yaml.safe_load(f)) logger.debug(f"Loading Fonduer config from {potential_path}.") break new_dir = os.path.split(current_dir)[0] if current_dir == new_dir: logger.debug("Unable to find config file. Using defaults.") break current_dir = new_dir tries += 1 return config
[ "def", "get_config", "(", "path", "=", "os", ".", "getcwd", "(", ")", ")", ":", "config", "=", "default", "tries", "=", "0", "current_dir", "=", "path", "while", "current_dir", "and", "tries", "<", "MAX_CONFIG_SEARCH_DEPTH", ":", "potential_path", "=", "os...
Search for settings file in root of project and its parents.
[ "Search", "for", "settings", "file", "in", "root", "of", "project", "and", "its", "parents", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/config.py#L68-L88
229,918
HazyResearch/fonduer
src/fonduer/candidates/models/temporary_context.py
TemporaryContext._load_id_or_insert
def _load_id_or_insert(self, session): """Load the id of the temporary context if it exists or return insert args. As a side effect, this also inserts the Context object for the stableid. :return: The record of the temporary context to insert. :rtype: dict """ if self.id is None: stable_id = self.get_stable_id() # Check if exists id = session.execute( select([Context.id]).where(Context.stable_id == stable_id) ).first() # If not, insert if id is None: self.id = session.execute( Context.__table__.insert(), {"type": self._get_table().__tablename__, "stable_id": stable_id}, ).inserted_primary_key[0] insert_args = self._get_insert_args() insert_args["id"] = self.id return insert_args else: self.id = id[0]
python
def _load_id_or_insert(self, session): if self.id is None: stable_id = self.get_stable_id() # Check if exists id = session.execute( select([Context.id]).where(Context.stable_id == stable_id) ).first() # If not, insert if id is None: self.id = session.execute( Context.__table__.insert(), {"type": self._get_table().__tablename__, "stable_id": stable_id}, ).inserted_primary_key[0] insert_args = self._get_insert_args() insert_args["id"] = self.id return insert_args else: self.id = id[0]
[ "def", "_load_id_or_insert", "(", "self", ",", "session", ")", ":", "if", "self", ".", "id", "is", "None", ":", "stable_id", "=", "self", ".", "get_stable_id", "(", ")", "# Check if exists", "id", "=", "session", ".", "execute", "(", "select", "(", "[", ...
Load the id of the temporary context if it exists or return insert args. As a side effect, this also inserts the Context object for the stableid. :return: The record of the temporary context to insert. :rtype: dict
[ "Load", "the", "id", "of", "the", "temporary", "context", "if", "it", "exists", "or", "return", "insert", "args", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/candidates/models/temporary_context.py#L27-L52
229,919
HazyResearch/fonduer
src/fonduer/learning/disc_models/logistic_regression.py
LogisticRegression._build_model
def _build_model(self): """ Build model. """ if "input_dim" not in self.settings: raise ValueError("Model parameter input_dim cannot be None.") self.linear = nn.Linear( self.settings["input_dim"], self.cardinality, self.settings["bias"] )
python
def _build_model(self): if "input_dim" not in self.settings: raise ValueError("Model parameter input_dim cannot be None.") self.linear = nn.Linear( self.settings["input_dim"], self.cardinality, self.settings["bias"] )
[ "def", "_build_model", "(", "self", ")", ":", "if", "\"input_dim\"", "not", "in", "self", ".", "settings", ":", "raise", "ValueError", "(", "\"Model parameter input_dim cannot be None.\"", ")", "self", ".", "linear", "=", "nn", ".", "Linear", "(", "self", ".",...
Build model.
[ "Build", "model", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/disc_models/logistic_regression.py#L107-L117
229,920
HazyResearch/fonduer
src/fonduer/parser/parser.py
Parser.apply
def apply( self, doc_loader, pdf_path=None, clear=True, parallelism=None, progress_bar=True ): """Run the Parser. :param doc_loader: An iteratable of ``Documents`` to parse. Typically, one of Fonduer's document preprocessors. :param pdf_path: The path to the PDF documents, if any. This path will override the one used in initialization, if provided. :param clear: Whether or not to clear the labels table before applying these LFs. :type clear: bool :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the Labeler if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool """ super(Parser, self).apply( doc_loader, pdf_path=pdf_path, clear=clear, parallelism=parallelism, progress_bar=progress_bar, )
python
def apply( self, doc_loader, pdf_path=None, clear=True, parallelism=None, progress_bar=True ): super(Parser, self).apply( doc_loader, pdf_path=pdf_path, clear=clear, parallelism=parallelism, progress_bar=progress_bar, )
[ "def", "apply", "(", "self", ",", "doc_loader", ",", "pdf_path", "=", "None", ",", "clear", "=", "True", ",", "parallelism", "=", "None", ",", "progress_bar", "=", "True", ")", ":", "super", "(", "Parser", ",", "self", ")", ".", "apply", "(", "doc_lo...
Run the Parser. :param doc_loader: An iteratable of ``Documents`` to parse. Typically, one of Fonduer's document preprocessors. :param pdf_path: The path to the PDF documents, if any. This path will override the one used in initialization, if provided. :param clear: Whether or not to clear the labels table before applying these LFs. :type clear: bool :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the Labeler if it is provided. :type parallelism: int :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document. :type progress_bar: bool
[ "Run", "the", "Parser", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/parser.py#L85-L111
229,921
HazyResearch/fonduer
src/fonduer/parser/parser.py
Parser.get_last_documents
def get_last_documents(self): """Return the most recently parsed list of ``Documents``. :rtype: A list of the most recently parsed ``Documents`` ordered by name. """ return ( self.session.query(Document) .filter(Document.name.in_(self.last_docs)) .order_by(Document.name) .all() )
python
def get_last_documents(self): return ( self.session.query(Document) .filter(Document.name.in_(self.last_docs)) .order_by(Document.name) .all() )
[ "def", "get_last_documents", "(", "self", ")", ":", "return", "(", "self", ".", "session", ".", "query", "(", "Document", ")", ".", "filter", "(", "Document", ".", "name", ".", "in_", "(", "self", ".", "last_docs", ")", ")", ".", "order_by", "(", "Do...
Return the most recently parsed list of ``Documents``. :rtype: A list of the most recently parsed ``Documents`` ordered by name.
[ "Return", "the", "most", "recently", "parsed", "list", "of", "Documents", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/parser.py#L120-L130
229,922
HazyResearch/fonduer
src/fonduer/parser/parser.py
Parser.get_documents
def get_documents(self): """Return all the parsed ``Documents`` in the database. :rtype: A list of all ``Documents`` in the database ordered by name. """ return self.session.query(Document).order_by(Document.name).all()
python
def get_documents(self): return self.session.query(Document).order_by(Document.name).all()
[ "def", "get_documents", "(", "self", ")", ":", "return", "self", ".", "session", ".", "query", "(", "Document", ")", ".", "order_by", "(", "Document", ".", "name", ")", ".", "all", "(", ")" ]
Return all the parsed ``Documents`` in the database. :rtype: A list of all ``Documents`` in the database ordered by name.
[ "Return", "all", "the", "parsed", "Documents", "in", "the", "database", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/parser.py#L132-L137
229,923
HazyResearch/fonduer
src/fonduer/parser/parser.py
ParserUDF._valid_pdf
def _valid_pdf(self, path, filename): """Verify that the file exists and has a PDF extension.""" # If path is file, but not PDF. if os.path.isfile(path) and path.lower().endswith(".pdf"): return True else: full_path = os.path.join(path, filename) if os.path.isfile(full_path) and full_path.lower().endswith(".pdf"): return True elif os.path.isfile(os.path.join(path, filename + ".pdf")): return True elif os.path.isfile(os.path.join(path, filename + ".PDF")): return True return False
python
def _valid_pdf(self, path, filename): # If path is file, but not PDF. if os.path.isfile(path) and path.lower().endswith(".pdf"): return True else: full_path = os.path.join(path, filename) if os.path.isfile(full_path) and full_path.lower().endswith(".pdf"): return True elif os.path.isfile(os.path.join(path, filename + ".pdf")): return True elif os.path.isfile(os.path.join(path, filename + ".PDF")): return True return False
[ "def", "_valid_pdf", "(", "self", ",", "path", ",", "filename", ")", ":", "# If path is file, but not PDF.", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", "and", "path", ".", "lower", "(", ")", ".", "endswith", "(", "\".pdf\"", ")", ":", "r...
Verify that the file exists and has a PDF extension.
[ "Verify", "that", "the", "file", "exists", "and", "has", "a", "PDF", "extension", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/parser.py#L262-L276
229,924
HazyResearch/fonduer
src/fonduer/parser/parser.py
ParserUDF._parse_figure
def _parse_figure(self, node, state): """Parse the figure node. :param node: The lxml img node to parse :param state: The global state necessary to place the node in context of the document as a whole. """ if node.tag not in ["img", "figure"]: return state # Process the Figure stable_id = ( f"{state['document'].name}" f"::" f"{'figure'}" f":" f"{state['figure']['idx']}" ) # Set name for Figure name = node.attrib["name"] if "name" in node.attrib else None # img within a Figure get's processed in the parent Figure if node.tag == "img" and isinstance(state["parent"][node], Figure): return state # NOTE: We currently do NOT support nested figures. parts = {} parent = state["parent"][node] if isinstance(parent, Section): parts["section"] = parent elif isinstance(parent, Cell): parts["section"] = parent.table.section parts["cell"] = parent else: logger.warning(f"Figure is nested within {state['parent'][node]}") return state parts["document"] = state["document"] parts["stable_id"] = stable_id parts["name"] = name parts["position"] = state["figure"]["idx"] # If processing a raw img if node.tag == "img": # Create the Figure entry in the DB parts["url"] = node.get("src") state["context"][node] = Figure(**parts) elif node.tag == "figure": # Pull the image from a child img node, if one exists imgs = [child for child in node if child.tag == "img"] if len(imgs) > 1: logger.warning("Figure contains multiple images.") # Right now we don't support multiple URLs in the Figure context # As a workaround, just ignore the outer Figure and allow processing # of the individual images. We ignore the accompanying figcaption # by marking it as visited. captions = [child for child in node if child.tag == "figcaption"] state["visited"].update(captions) return state img = imgs[0] state["visited"].add(img) # Create the Figure entry in the DB parts["url"] = img.get("src") state["context"][node] = Figure(**parts) state["figure"]["idx"] += 1 return state
python
def _parse_figure(self, node, state): if node.tag not in ["img", "figure"]: return state # Process the Figure stable_id = ( f"{state['document'].name}" f"::" f"{'figure'}" f":" f"{state['figure']['idx']}" ) # Set name for Figure name = node.attrib["name"] if "name" in node.attrib else None # img within a Figure get's processed in the parent Figure if node.tag == "img" and isinstance(state["parent"][node], Figure): return state # NOTE: We currently do NOT support nested figures. parts = {} parent = state["parent"][node] if isinstance(parent, Section): parts["section"] = parent elif isinstance(parent, Cell): parts["section"] = parent.table.section parts["cell"] = parent else: logger.warning(f"Figure is nested within {state['parent'][node]}") return state parts["document"] = state["document"] parts["stable_id"] = stable_id parts["name"] = name parts["position"] = state["figure"]["idx"] # If processing a raw img if node.tag == "img": # Create the Figure entry in the DB parts["url"] = node.get("src") state["context"][node] = Figure(**parts) elif node.tag == "figure": # Pull the image from a child img node, if one exists imgs = [child for child in node if child.tag == "img"] if len(imgs) > 1: logger.warning("Figure contains multiple images.") # Right now we don't support multiple URLs in the Figure context # As a workaround, just ignore the outer Figure and allow processing # of the individual images. We ignore the accompanying figcaption # by marking it as visited. captions = [child for child in node if child.tag == "figcaption"] state["visited"].update(captions) return state img = imgs[0] state["visited"].add(img) # Create the Figure entry in the DB parts["url"] = img.get("src") state["context"][node] = Figure(**parts) state["figure"]["idx"] += 1 return state
[ "def", "_parse_figure", "(", "self", ",", "node", ",", "state", ")", ":", "if", "node", ".", "tag", "not", "in", "[", "\"img\"", ",", "\"figure\"", "]", ":", "return", "state", "# Process the Figure", "stable_id", "=", "(", "f\"{state['document'].name}\"", "...
Parse the figure node. :param node: The lxml img node to parse :param state: The global state necessary to place the node in context of the document as a whole.
[ "Parse", "the", "figure", "node", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/parser.py#L406-L476
229,925
HazyResearch/fonduer
src/fonduer/parser/parser.py
ParserUDF._parse_paragraph
def _parse_paragraph(self, node, state): """Parse a Paragraph of the node. :param node: The lxml node to parse :param state: The global state necessary to place the node in context of the document as a whole. """ # Both Paragraphs will share the same parent parent = ( state["context"][node] if node in state["context"] else state["parent"][node] ) # Set name for Paragraph name = node.attrib["name"] if "name" in node.attrib else None for field in ["text", "tail"]: text = getattr(node, field) text = text.strip() if text and self.strip else text # Skip if "" or None if not text: continue # Run RegEx replacements for (rgx, replace) in self.replacements: text = rgx.sub(replace, text) # Process the Paragraph stable_id = ( f"{state['document'].name}" f"::" f"{'paragraph'}" f":" f"{state['paragraph']['idx']}" ) parts = {} parts["stable_id"] = stable_id parts["name"] = name parts["document"] = state["document"] parts["position"] = state["paragraph"]["idx"] if isinstance(parent, Caption): if parent.table: parts["section"] = parent.table.section elif parent.figure: parts["section"] = parent.figure.section parts["caption"] = parent elif isinstance(parent, Cell): parts["section"] = parent.table.section parts["cell"] = parent elif isinstance(parent, Section): parts["section"] = parent elif isinstance(parent, Figure): # occurs with text in the tail of an img parts["section"] = parent.section elif isinstance(parent, Table): # occurs with text in the tail of a table parts["section"] = parent.section else: raise NotImplementedError( f"Para '{text}' parent must be Section, Caption, or Cell, " f"not {parent}" ) # Create the entry in the DB paragraph = Paragraph(**parts) state["paragraph"]["idx"] += 1 state["paragraph"]["text"] = text state["paragraph"]["field"] = field yield from self._parse_sentence(paragraph, node, state)
python
def _parse_paragraph(self, node, state): # Both Paragraphs will share the same parent parent = ( state["context"][node] if node in state["context"] else state["parent"][node] ) # Set name for Paragraph name = node.attrib["name"] if "name" in node.attrib else None for field in ["text", "tail"]: text = getattr(node, field) text = text.strip() if text and self.strip else text # Skip if "" or None if not text: continue # Run RegEx replacements for (rgx, replace) in self.replacements: text = rgx.sub(replace, text) # Process the Paragraph stable_id = ( f"{state['document'].name}" f"::" f"{'paragraph'}" f":" f"{state['paragraph']['idx']}" ) parts = {} parts["stable_id"] = stable_id parts["name"] = name parts["document"] = state["document"] parts["position"] = state["paragraph"]["idx"] if isinstance(parent, Caption): if parent.table: parts["section"] = parent.table.section elif parent.figure: parts["section"] = parent.figure.section parts["caption"] = parent elif isinstance(parent, Cell): parts["section"] = parent.table.section parts["cell"] = parent elif isinstance(parent, Section): parts["section"] = parent elif isinstance(parent, Figure): # occurs with text in the tail of an img parts["section"] = parent.section elif isinstance(parent, Table): # occurs with text in the tail of a table parts["section"] = parent.section else: raise NotImplementedError( f"Para '{text}' parent must be Section, Caption, or Cell, " f"not {parent}" ) # Create the entry in the DB paragraph = Paragraph(**parts) state["paragraph"]["idx"] += 1 state["paragraph"]["text"] = text state["paragraph"]["field"] = field yield from self._parse_sentence(paragraph, node, state)
[ "def", "_parse_paragraph", "(", "self", ",", "node", ",", "state", ")", ":", "# Both Paragraphs will share the same parent", "parent", "=", "(", "state", "[", "\"context\"", "]", "[", "node", "]", "if", "node", "in", "state", "[", "\"context\"", "]", "else", ...
Parse a Paragraph of the node. :param node: The lxml node to parse :param state: The global state necessary to place the node in context of the document as a whole.
[ "Parse", "a", "Paragraph", "of", "the", "node", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/parser.py#L582-L653
229,926
HazyResearch/fonduer
src/fonduer/parser/parser.py
ParserUDF._parse_section
def _parse_section(self, node, state): """Parse a Section of the node. Note that this implementation currently creates a Section at the beginning of the document and creates Section based on tag of node. :param node: The lxml node to parse :param state: The global state necessary to place the node in context of the document as a whole. """ if node.tag not in ["html", "section"]: return state # Add a Section stable_id = ( f"{state['document'].name}" f"::" f"{'section'}" f":" f"{state['section']['idx']}" ) # Set name for Section name = node.attrib["name"] if "name" in node.attrib else None state["context"][node] = Section( document=state["document"], name=name, stable_id=stable_id, position=state["section"]["idx"], ) state["section"]["idx"] += 1 return state
python
def _parse_section(self, node, state): if node.tag not in ["html", "section"]: return state # Add a Section stable_id = ( f"{state['document'].name}" f"::" f"{'section'}" f":" f"{state['section']['idx']}" ) # Set name for Section name = node.attrib["name"] if "name" in node.attrib else None state["context"][node] = Section( document=state["document"], name=name, stable_id=stable_id, position=state["section"]["idx"], ) state["section"]["idx"] += 1 return state
[ "def", "_parse_section", "(", "self", ",", "node", ",", "state", ")", ":", "if", "node", ".", "tag", "not", "in", "[", "\"html\"", ",", "\"section\"", "]", ":", "return", "state", "# Add a Section", "stable_id", "=", "(", "f\"{state['document'].name}\"", "f\...
Parse a Section of the node. Note that this implementation currently creates a Section at the beginning of the document and creates Section based on tag of node. :param node: The lxml node to parse :param state: The global state necessary to place the node in context of the document as a whole.
[ "Parse", "a", "Section", "of", "the", "node", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/parser.py#L655-L688
229,927
HazyResearch/fonduer
src/fonduer/parser/parser.py
ParserUDF._parse_caption
def _parse_caption(self, node, state): """Parse a Caption of the node. :param node: The lxml node to parse :param state: The global state necessary to place the node in context of the document as a whole. """ if node.tag not in ["caption", "figcaption"]: # captions used in Tables return state # Add a Caption parent = state["parent"][node] stable_id = ( f"{state['document'].name}" f"::" f"{'caption'}" f":" f"{state['caption']['idx']}" ) # Set name for Section name = node.attrib["name"] if "name" in node.attrib else None if isinstance(parent, Table): state["context"][node] = Caption( document=state["document"], table=parent, figure=None, stable_id=stable_id, name=name, position=state["caption"]["idx"], ) elif isinstance(parent, Figure): state["context"][node] = Caption( document=state["document"], table=None, figure=parent, stable_id=stable_id, name=name, position=state["caption"]["idx"], ) else: raise NotImplementedError("Caption must be a child of Table or Figure.") state["caption"]["idx"] += 1 return state
python
def _parse_caption(self, node, state): if node.tag not in ["caption", "figcaption"]: # captions used in Tables return state # Add a Caption parent = state["parent"][node] stable_id = ( f"{state['document'].name}" f"::" f"{'caption'}" f":" f"{state['caption']['idx']}" ) # Set name for Section name = node.attrib["name"] if "name" in node.attrib else None if isinstance(parent, Table): state["context"][node] = Caption( document=state["document"], table=parent, figure=None, stable_id=stable_id, name=name, position=state["caption"]["idx"], ) elif isinstance(parent, Figure): state["context"][node] = Caption( document=state["document"], table=None, figure=parent, stable_id=stable_id, name=name, position=state["caption"]["idx"], ) else: raise NotImplementedError("Caption must be a child of Table or Figure.") state["caption"]["idx"] += 1 return state
[ "def", "_parse_caption", "(", "self", ",", "node", ",", "state", ")", ":", "if", "node", ".", "tag", "not", "in", "[", "\"caption\"", ",", "\"figcaption\"", "]", ":", "# captions used in Tables", "return", "state", "# Add a Caption", "parent", "=", "state", ...
Parse a Caption of the node. :param node: The lxml node to parse :param state: The global state necessary to place the node in context of the document as a whole.
[ "Parse", "a", "Caption", "of", "the", "node", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/parser.py#L690-L735
229,928
HazyResearch/fonduer
src/fonduer/parser/parser.py
ParserUDF._parse_node
def _parse_node(self, node, state): """Entry point for parsing all node types. :param node: The lxml HTML node to parse :param state: The global state necessary to place the node in context of the document as a whole. :rtype: a *generator* of Sentences """ # Processing on entry of node state = self._parse_section(node, state) state = self._parse_figure(node, state) if self.tabular: state = self._parse_table(node, state) state = self._parse_caption(node, state) yield from self._parse_paragraph(node, state)
python
def _parse_node(self, node, state): # Processing on entry of node state = self._parse_section(node, state) state = self._parse_figure(node, state) if self.tabular: state = self._parse_table(node, state) state = self._parse_caption(node, state) yield from self._parse_paragraph(node, state)
[ "def", "_parse_node", "(", "self", ",", "node", ",", "state", ")", ":", "# Processing on entry of node", "state", "=", "self", ".", "_parse_section", "(", "node", ",", "state", ")", "state", "=", "self", ".", "_parse_figure", "(", "node", ",", "state", ")"...
Entry point for parsing all node types. :param node: The lxml HTML node to parse :param state: The global state necessary to place the node in context of the document as a whole. :rtype: a *generator* of Sentences
[ "Entry", "point", "for", "parsing", "all", "node", "types", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/parser.py#L737-L755
229,929
HazyResearch/fonduer
src/fonduer/parser/parser.py
ParserUDF.parse
def parse(self, document, text): """Depth-first search over the provided tree. Implemented as an iterative procedure. The structure of the state needed to parse each node is also defined in this function. :param document: the Document context :param text: the structured text of the document (e.g. HTML) :rtype: a *generator* of Sentences. """ stack = [] root = lxml.html.fromstring(text) # flattens children of node that are in the 'flatten' list if self.flatten: lxml.etree.strip_tags(root, self.flatten) # Assign the text, which was stripped of the 'flatten'-tags, to the document document.text = lxml.etree.tostring(root, encoding="unicode") # This dictionary contain the global state necessary to parse a # document and each context element. This reflects the relationships # defined in parser/models. This contains the state necessary to create # the respective Contexts within the document. state = { "visited": set(), "parent": {}, # map of parent[child] = node used to discover child "context": {}, # track the Context of each node (context['td'] = Cell) "root": root, "document": document, "section": {"idx": 0}, "paragraph": {"idx": 0}, "figure": {"idx": 0}, "caption": {"idx": 0}, "table": {"idx": 0}, "sentence": {"idx": 0, "abs_offset": 0}, } # NOTE: Currently the helper functions directly manipulate the state # rather than returning a modified copy. # Iterative Depth-First Search stack.append(root) state["parent"][root] = document state["context"][root] = document tokenized_sentences = [] while stack: node = stack.pop() if node not in state["visited"]: state["visited"].add(node) # mark as visited # Process if self.lingual: tokenized_sentences += [y for y in self._parse_node(node, state)] else: yield from self._parse_node(node, state) # NOTE: This reversed() order is to ensure that the iterative # DFS matches the order that would be produced by a recursive # DFS implementation. for child in reversed(node): # Skip nodes that are comments or blacklisted if child.tag is lxml.etree.Comment or ( self.blacklist and child.tag in self.blacklist ): continue stack.append(child) # store the parent of the node, which is either the parent # Context, or if the parent did not create a Context, then # use the node's parent Context. state["parent"][child] = ( state["context"][node] if node in state["context"] else state["parent"][node] ) if self.lingual: yield from self.enrich_tokenized_sentences_with_nlp(tokenized_sentences)
python
def parse(self, document, text): stack = [] root = lxml.html.fromstring(text) # flattens children of node that are in the 'flatten' list if self.flatten: lxml.etree.strip_tags(root, self.flatten) # Assign the text, which was stripped of the 'flatten'-tags, to the document document.text = lxml.etree.tostring(root, encoding="unicode") # This dictionary contain the global state necessary to parse a # document and each context element. This reflects the relationships # defined in parser/models. This contains the state necessary to create # the respective Contexts within the document. state = { "visited": set(), "parent": {}, # map of parent[child] = node used to discover child "context": {}, # track the Context of each node (context['td'] = Cell) "root": root, "document": document, "section": {"idx": 0}, "paragraph": {"idx": 0}, "figure": {"idx": 0}, "caption": {"idx": 0}, "table": {"idx": 0}, "sentence": {"idx": 0, "abs_offset": 0}, } # NOTE: Currently the helper functions directly manipulate the state # rather than returning a modified copy. # Iterative Depth-First Search stack.append(root) state["parent"][root] = document state["context"][root] = document tokenized_sentences = [] while stack: node = stack.pop() if node not in state["visited"]: state["visited"].add(node) # mark as visited # Process if self.lingual: tokenized_sentences += [y for y in self._parse_node(node, state)] else: yield from self._parse_node(node, state) # NOTE: This reversed() order is to ensure that the iterative # DFS matches the order that would be produced by a recursive # DFS implementation. for child in reversed(node): # Skip nodes that are comments or blacklisted if child.tag is lxml.etree.Comment or ( self.blacklist and child.tag in self.blacklist ): continue stack.append(child) # store the parent of the node, which is either the parent # Context, or if the parent did not create a Context, then # use the node's parent Context. state["parent"][child] = ( state["context"][node] if node in state["context"] else state["parent"][node] ) if self.lingual: yield from self.enrich_tokenized_sentences_with_nlp(tokenized_sentences)
[ "def", "parse", "(", "self", ",", "document", ",", "text", ")", ":", "stack", "=", "[", "]", "root", "=", "lxml", ".", "html", ".", "fromstring", "(", "text", ")", "# flattens children of node that are in the 'flatten' list", "if", "self", ".", "flatten", ":...
Depth-first search over the provided tree. Implemented as an iterative procedure. The structure of the state needed to parse each node is also defined in this function. :param document: the Document context :param text: the structured text of the document (e.g. HTML) :rtype: a *generator* of Sentences.
[ "Depth", "-", "first", "search", "over", "the", "provided", "tree", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/parser.py#L757-L836
229,930
HazyResearch/fonduer
src/fonduer/meta.py
init_logging
def init_logging( log_dir=tempfile.gettempdir(), format="[%(asctime)s][%(levelname)s] %(name)s:%(lineno)s - %(message)s", level=logging.INFO, ): """Configures logging to output to the provided log_dir. Will use a nested directory whose name is the current timestamp. :param log_dir: The directory to store logs in. :type log_dir: str :param format: The logging format string to use. :type format: str :param level: The logging level to use, e.g., logging.INFO. """ if not Meta.log_path: # Generate a new directory using the log_dir, if it doesn't exist dt = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") log_path = os.path.join(log_dir, dt) if not os.path.exists(log_path): os.makedirs(log_path) # Configure the logger using the provided path logging.basicConfig( format=format, level=level, handlers=[ logging.FileHandler(os.path.join(log_path, "fonduer.log")), logging.StreamHandler(), ], ) # Notify user of log location logger.info(f"Setting logging directory to: {log_path}") Meta.log_path = log_path else: logger.info( f"Logging was already initialized to use {Meta.log_path}. " "To configure logging manually, call fonduer.init_logging before " "initialiting Meta." )
python
def init_logging( log_dir=tempfile.gettempdir(), format="[%(asctime)s][%(levelname)s] %(name)s:%(lineno)s - %(message)s", level=logging.INFO, ): if not Meta.log_path: # Generate a new directory using the log_dir, if it doesn't exist dt = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") log_path = os.path.join(log_dir, dt) if not os.path.exists(log_path): os.makedirs(log_path) # Configure the logger using the provided path logging.basicConfig( format=format, level=level, handlers=[ logging.FileHandler(os.path.join(log_path, "fonduer.log")), logging.StreamHandler(), ], ) # Notify user of log location logger.info(f"Setting logging directory to: {log_path}") Meta.log_path = log_path else: logger.info( f"Logging was already initialized to use {Meta.log_path}. " "To configure logging manually, call fonduer.init_logging before " "initialiting Meta." )
[ "def", "init_logging", "(", "log_dir", "=", "tempfile", ".", "gettempdir", "(", ")", ",", "format", "=", "\"[%(asctime)s][%(levelname)s] %(name)s:%(lineno)s - %(message)s\"", ",", "level", "=", "logging", ".", "INFO", ",", ")", ":", "if", "not", "Meta", ".", "lo...
Configures logging to output to the provided log_dir. Will use a nested directory whose name is the current timestamp. :param log_dir: The directory to store logs in. :type log_dir: str :param format: The logging format string to use. :type format: str :param level: The logging level to use, e.g., logging.INFO.
[ "Configures", "logging", "to", "output", "to", "the", "provided", "log_dir", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/meta.py#L16-L57
229,931
HazyResearch/fonduer
src/fonduer/meta.py
_update_meta
def _update_meta(conn_string): """Update Meta class.""" url = urlparse(conn_string) Meta.conn_string = conn_string Meta.DBNAME = url.path[1:] Meta.DBUSER = url.username Meta.DBPWD = url.password Meta.DBHOST = url.hostname Meta.DBPORT = url.port Meta.postgres = url.scheme.startswith("postgresql")
python
def _update_meta(conn_string): url = urlparse(conn_string) Meta.conn_string = conn_string Meta.DBNAME = url.path[1:] Meta.DBUSER = url.username Meta.DBPWD = url.password Meta.DBHOST = url.hostname Meta.DBPORT = url.port Meta.postgres = url.scheme.startswith("postgresql")
[ "def", "_update_meta", "(", "conn_string", ")", ":", "url", "=", "urlparse", "(", "conn_string", ")", "Meta", ".", "conn_string", "=", "conn_string", "Meta", ".", "DBNAME", "=", "url", ".", "path", "[", "1", ":", "]", "Meta", ".", "DBUSER", "=", "url",...
Update Meta class.
[ "Update", "Meta", "class", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/meta.py#L92-L101
229,932
HazyResearch/fonduer
src/fonduer/meta.py
Meta.init
def init(cls, conn_string=None): """Return the unique Meta class.""" if conn_string: _update_meta(conn_string) # We initialize the engine within the models module because models' # schema can depend on which data types are supported by the engine Meta.Session = new_sessionmaker() Meta.engine = Meta.Session.kw["bind"] logger.info( f"Connecting user:{Meta.DBUSER} " f"to {Meta.DBHOST}:{Meta.DBPORT}/{Meta.DBNAME}" ) Meta._init_db() if not Meta.log_path: init_logging() return cls
python
def init(cls, conn_string=None): if conn_string: _update_meta(conn_string) # We initialize the engine within the models module because models' # schema can depend on which data types are supported by the engine Meta.Session = new_sessionmaker() Meta.engine = Meta.Session.kw["bind"] logger.info( f"Connecting user:{Meta.DBUSER} " f"to {Meta.DBHOST}:{Meta.DBPORT}/{Meta.DBNAME}" ) Meta._init_db() if not Meta.log_path: init_logging() return cls
[ "def", "init", "(", "cls", ",", "conn_string", "=", "None", ")", ":", "if", "conn_string", ":", "_update_meta", "(", "conn_string", ")", "# We initialize the engine within the models module because models'", "# schema can depend on which data types are supported by the engine", ...
Return the unique Meta class.
[ "Return", "the", "unique", "Meta", "class", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/meta.py#L125-L142
229,933
HazyResearch/fonduer
src/fonduer/meta.py
Meta._init_db
def _init_db(cls): """ Initialize the storage schema. This call must be performed after all classes that extend Base are declared to ensure the storage schema is initialized. """ # This list of import defines which SQLAlchemy classes will be # initialized when Meta.init() is called. If a sqlalchemy class is not # imported before the call to create_all(), it will not be created. import fonduer.candidates.models # noqa import fonduer.features.models # noqa import fonduer.learning.models # noqa import fonduer.parser.models # noqa import fonduer.supervision.models # noqa import fonduer.utils.models # noqa logger.info("Initializing the storage schema") Meta.Base.metadata.create_all(Meta.engine)
python
def _init_db(cls): # This list of import defines which SQLAlchemy classes will be # initialized when Meta.init() is called. If a sqlalchemy class is not # imported before the call to create_all(), it will not be created. import fonduer.candidates.models # noqa import fonduer.features.models # noqa import fonduer.learning.models # noqa import fonduer.parser.models # noqa import fonduer.supervision.models # noqa import fonduer.utils.models # noqa logger.info("Initializing the storage schema") Meta.Base.metadata.create_all(Meta.engine)
[ "def", "_init_db", "(", "cls", ")", ":", "# This list of import defines which SQLAlchemy classes will be", "# initialized when Meta.init() is called. If a sqlalchemy class is not", "# imported before the call to create_all(), it will not be created.", "import", "fonduer", ".", "candidates", ...
Initialize the storage schema. This call must be performed after all classes that extend Base are declared to ensure the storage schema is initialized.
[ "Initialize", "the", "storage", "schema", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/meta.py#L145-L162
229,934
HazyResearch/fonduer
src/fonduer/parser/spacy_parser.py
Spacy.model_installed
def model_installed(name): """Check if spaCy language model is installed. From https://github.com/explosion/spaCy/blob/master/spacy/util.py :param name: :return: """ data_path = util.get_data_path() if not data_path or not data_path.exists(): raise IOError(f"Can't find spaCy data path: {data_path}") if name in {d.name for d in data_path.iterdir()}: return True if Spacy.is_package(name): # installed as package return True if Path(name).exists(): # path to model data directory return True return False
python
def model_installed(name): data_path = util.get_data_path() if not data_path or not data_path.exists(): raise IOError(f"Can't find spaCy data path: {data_path}") if name in {d.name for d in data_path.iterdir()}: return True if Spacy.is_package(name): # installed as package return True if Path(name).exists(): # path to model data directory return True return False
[ "def", "model_installed", "(", "name", ")", ":", "data_path", "=", "util", ".", "get_data_path", "(", ")", "if", "not", "data_path", "or", "not", "data_path", ".", "exists", "(", ")", ":", "raise", "IOError", "(", "f\"Can't find spaCy data path: {data_path}\"", ...
Check if spaCy language model is installed. From https://github.com/explosion/spaCy/blob/master/spacy/util.py :param name: :return:
[ "Check", "if", "spaCy", "language", "model", "is", "installed", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/spacy_parser.py#L87-L104
229,935
HazyResearch/fonduer
src/fonduer/parser/spacy_parser.py
Spacy.load_lang_model
def load_lang_model(self): """ Load spaCy language model or download if model is available and not installed. Currenty supported spaCy languages en English (50MB) de German (645MB) fr French (1.33GB) es Spanish (377MB) :return: """ if self.lang in self.languages: if not Spacy.model_installed(self.lang): download(self.lang) model = spacy.load(self.lang) elif self.lang in self.alpha_languages: language_module = importlib.import_module(f"spacy.lang.{self.lang}") language_method = getattr(language_module, self.alpha_languages[self.lang]) model = language_method() self.model = model
python
def load_lang_model(self): if self.lang in self.languages: if not Spacy.model_installed(self.lang): download(self.lang) model = spacy.load(self.lang) elif self.lang in self.alpha_languages: language_module = importlib.import_module(f"spacy.lang.{self.lang}") language_method = getattr(language_module, self.alpha_languages[self.lang]) model = language_method() self.model = model
[ "def", "load_lang_model", "(", "self", ")", ":", "if", "self", ".", "lang", "in", "self", ".", "languages", ":", "if", "not", "Spacy", ".", "model_installed", "(", "self", ".", "lang", ")", ":", "download", "(", "self", ".", "lang", ")", "model", "="...
Load spaCy language model or download if model is available and not installed. Currenty supported spaCy languages en English (50MB) de German (645MB) fr French (1.33GB) es Spanish (377MB) :return:
[ "Load", "spaCy", "language", "model", "or", "download", "if", "model", "is", "available", "and", "not", "installed", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/spacy_parser.py#L106-L128
229,936
HazyResearch/fonduer
src/fonduer/parser/spacy_parser.py
Spacy.enrich_sentences_with_NLP
def enrich_sentences_with_NLP(self, all_sentences): """ Enrich a list of fonduer Sentence objects with NLP features. We merge and process the text of all Sentences for higher efficiency. :param all_sentences: List of fonduer Sentence objects for one document :return: """ if not self.has_NLP_support(): raise NotImplementedError( f"Language {self.lang} not available in spacy beyond tokenization" ) if len(all_sentences) == 0: return # Nothing to parse if self.model.has_pipe("sentencizer"): self.model.remove_pipe("sentencizer") self.logger.debug( f"Removed sentencizer ('sentencizer') from model. " f"Now in pipeline: {self.model.pipe_names}" ) if self.model.has_pipe("sentence_boundary_detector"): self.model.remove_pipe(name="sentence_boundary_detector") self.model.add_pipe( set_custom_boundary, before="parser", name="sentence_boundary_detector" ) sentence_batches = self._split_sentences_by_char_limit( all_sentences, self.model.max_length ) # TODO: We could do this in parallel. Test speedup in the future for sentence_batch in sentence_batches: custom_tokenizer = TokenPreservingTokenizer(self.model.vocab) # we circumvent redundant tokenization by using a custom # tokenizer that directly uses the already separated words # of each sentence as tokens doc = custom_tokenizer(sentence_batch) doc.user_data = sentence_batch for name, proc in self.model.pipeline: # iterate over components in order doc = proc(doc) try: assert doc.is_parsed except Exception: self.logger.exception(f"{doc} was not parsed") for sent, current_sentence_obj in zip(doc.sents, sentence_batch): parts = defaultdict(list) for i, token in enumerate(sent): parts["lemmas"].append(token.lemma_) parts["pos_tags"].append(token.tag_) parts["ner_tags"].append( token.ent_type_ if token.ent_type_ else "O" ) head_idx = ( 0 if token.head is token else token.head.i - sent[0].i + 1 ) parts["dep_parents"].append(head_idx) parts["dep_labels"].append(token.dep_) current_sentence_obj.pos_tags = parts["pos_tags"] current_sentence_obj.lemmas = parts["lemmas"] current_sentence_obj.ner_tags = parts["ner_tags"] current_sentence_obj.dep_parents = parts["dep_parents"] current_sentence_obj.dep_labels = parts["dep_labels"] yield current_sentence_obj
python
def enrich_sentences_with_NLP(self, all_sentences): if not self.has_NLP_support(): raise NotImplementedError( f"Language {self.lang} not available in spacy beyond tokenization" ) if len(all_sentences) == 0: return # Nothing to parse if self.model.has_pipe("sentencizer"): self.model.remove_pipe("sentencizer") self.logger.debug( f"Removed sentencizer ('sentencizer') from model. " f"Now in pipeline: {self.model.pipe_names}" ) if self.model.has_pipe("sentence_boundary_detector"): self.model.remove_pipe(name="sentence_boundary_detector") self.model.add_pipe( set_custom_boundary, before="parser", name="sentence_boundary_detector" ) sentence_batches = self._split_sentences_by_char_limit( all_sentences, self.model.max_length ) # TODO: We could do this in parallel. Test speedup in the future for sentence_batch in sentence_batches: custom_tokenizer = TokenPreservingTokenizer(self.model.vocab) # we circumvent redundant tokenization by using a custom # tokenizer that directly uses the already separated words # of each sentence as tokens doc = custom_tokenizer(sentence_batch) doc.user_data = sentence_batch for name, proc in self.model.pipeline: # iterate over components in order doc = proc(doc) try: assert doc.is_parsed except Exception: self.logger.exception(f"{doc} was not parsed") for sent, current_sentence_obj in zip(doc.sents, sentence_batch): parts = defaultdict(list) for i, token in enumerate(sent): parts["lemmas"].append(token.lemma_) parts["pos_tags"].append(token.tag_) parts["ner_tags"].append( token.ent_type_ if token.ent_type_ else "O" ) head_idx = ( 0 if token.head is token else token.head.i - sent[0].i + 1 ) parts["dep_parents"].append(head_idx) parts["dep_labels"].append(token.dep_) current_sentence_obj.pos_tags = parts["pos_tags"] current_sentence_obj.lemmas = parts["lemmas"] current_sentence_obj.ner_tags = parts["ner_tags"] current_sentence_obj.dep_parents = parts["dep_parents"] current_sentence_obj.dep_labels = parts["dep_labels"] yield current_sentence_obj
[ "def", "enrich_sentences_with_NLP", "(", "self", ",", "all_sentences", ")", ":", "if", "not", "self", ".", "has_NLP_support", "(", ")", ":", "raise", "NotImplementedError", "(", "f\"Language {self.lang} not available in spacy beyond tokenization\"", ")", "if", "len", "(...
Enrich a list of fonduer Sentence objects with NLP features. We merge and process the text of all Sentences for higher efficiency. :param all_sentences: List of fonduer Sentence objects for one document :return:
[ "Enrich", "a", "list", "of", "fonduer", "Sentence", "objects", "with", "NLP", "features", ".", "We", "merge", "and", "process", "the", "text", "of", "all", "Sentences", "for", "higher", "efficiency", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/spacy_parser.py#L130-L198
229,937
HazyResearch/fonduer
src/fonduer/parser/spacy_parser.py
Spacy.split_sentences
def split_sentences(self, text): """ Split input text into sentences that match CoreNLP's default format, but are not yet processed. :param text: The text of the parent paragraph of the sentences :return: """ if self.model.has_pipe("sentence_boundary_detector"): self.model.remove_pipe(name="sentence_boundary_detector") if not self.model.has_pipe("sentencizer"): sentencizer = self.model.create_pipe("sentencizer") # add sentencizer self.model.add_pipe(sentencizer) try: doc = self.model(text, disable=["parser", "tagger", "ner"]) except ValueError: # temporary increase character limit of spacy # 'Probably save' according to spacy, as no parser or NER is used previous_max_length = self.model.max_length self.model.max_length = 100_000_000 self.logger.warning( f"Temporarily increased spacy maximum " f"character limit to {self.model.max_length} to split sentences." ) doc = self.model(text, disable=["parser", "tagger", "ner"]) self.model.max_length = previous_max_length self.logger.warning( f"Spacy maximum " f"character limit set back to {self.model.max_length}." ) doc.is_parsed = True position = 0 for sent in doc.sents: parts = defaultdict(list) text = sent.text for i, token in enumerate(sent): parts["words"].append(str(token)) parts["lemmas"].append(token.lemma_) parts["pos_tags"].append(token.pos_) parts["ner_tags"].append("") # placeholder for later NLP parsing parts["char_offsets"].append(token.idx) parts["abs_char_offsets"].append(token.idx) parts["dep_parents"].append(0) # placeholder for later NLP parsing parts["dep_labels"].append("") # placeholder for later NLP parsing # make char_offsets relative to start of sentence parts["char_offsets"] = [ p - parts["char_offsets"][0] for p in parts["char_offsets"] ] parts["position"] = position parts["text"] = text position += 1 yield parts
python
def split_sentences(self, text): if self.model.has_pipe("sentence_boundary_detector"): self.model.remove_pipe(name="sentence_boundary_detector") if not self.model.has_pipe("sentencizer"): sentencizer = self.model.create_pipe("sentencizer") # add sentencizer self.model.add_pipe(sentencizer) try: doc = self.model(text, disable=["parser", "tagger", "ner"]) except ValueError: # temporary increase character limit of spacy # 'Probably save' according to spacy, as no parser or NER is used previous_max_length = self.model.max_length self.model.max_length = 100_000_000 self.logger.warning( f"Temporarily increased spacy maximum " f"character limit to {self.model.max_length} to split sentences." ) doc = self.model(text, disable=["parser", "tagger", "ner"]) self.model.max_length = previous_max_length self.logger.warning( f"Spacy maximum " f"character limit set back to {self.model.max_length}." ) doc.is_parsed = True position = 0 for sent in doc.sents: parts = defaultdict(list) text = sent.text for i, token in enumerate(sent): parts["words"].append(str(token)) parts["lemmas"].append(token.lemma_) parts["pos_tags"].append(token.pos_) parts["ner_tags"].append("") # placeholder for later NLP parsing parts["char_offsets"].append(token.idx) parts["abs_char_offsets"].append(token.idx) parts["dep_parents"].append(0) # placeholder for later NLP parsing parts["dep_labels"].append("") # placeholder for later NLP parsing # make char_offsets relative to start of sentence parts["char_offsets"] = [ p - parts["char_offsets"][0] for p in parts["char_offsets"] ] parts["position"] = position parts["text"] = text position += 1 yield parts
[ "def", "split_sentences", "(", "self", ",", "text", ")", ":", "if", "self", ".", "model", ".", "has_pipe", "(", "\"sentence_boundary_detector\"", ")", ":", "self", ".", "model", ".", "remove_pipe", "(", "name", "=", "\"sentence_boundary_detector\"", ")", "if",...
Split input text into sentences that match CoreNLP's default format, but are not yet processed. :param text: The text of the parent paragraph of the sentences :return:
[ "Split", "input", "text", "into", "sentences", "that", "match", "CoreNLP", "s", "default", "format", "but", "are", "not", "yet", "processed", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/spacy_parser.py#L212-L270
229,938
HazyResearch/fonduer
src/fonduer/learning/classifier.py
Classifier._setup_model_loss
def _setup_model_loss(self, lr): """ Setup loss and optimizer for PyTorch model. """ # Setup loss if not hasattr(self, "loss"): self.loss = SoftCrossEntropyLoss() # Setup optimizer if not hasattr(self, "optimizer"): self.optimizer = optim.Adam(self.parameters(), lr=lr)
python
def _setup_model_loss(self, lr): # Setup loss if not hasattr(self, "loss"): self.loss = SoftCrossEntropyLoss() # Setup optimizer if not hasattr(self, "optimizer"): self.optimizer = optim.Adam(self.parameters(), lr=lr)
[ "def", "_setup_model_loss", "(", "self", ",", "lr", ")", ":", "# Setup loss", "if", "not", "hasattr", "(", "self", ",", "\"loss\"", ")", ":", "self", ".", "loss", "=", "SoftCrossEntropyLoss", "(", ")", "# Setup optimizer", "if", "not", "hasattr", "(", "sel...
Setup loss and optimizer for PyTorch model.
[ "Setup", "loss", "and", "optimizer", "for", "PyTorch", "model", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/classifier.py#L74-L84
229,939
HazyResearch/fonduer
src/fonduer/learning/classifier.py
Classifier.save_marginals
def save_marginals(self, session, X, training=False): """Save the predicted marginal probabilities for the Candidates X. :param session: The database session to use. :param X: Input data. :param training: If True, these are training marginals / labels; else they are saved as end model predictions. :type training: bool """ save_marginals(session, X, self.marginals(X), training=training)
python
def save_marginals(self, session, X, training=False): save_marginals(session, X, self.marginals(X), training=training)
[ "def", "save_marginals", "(", "self", ",", "session", ",", "X", ",", "training", "=", "False", ")", ":", "save_marginals", "(", "session", ",", "X", ",", "self", ".", "marginals", "(", "X", ")", ",", "training", "=", "training", ")" ]
Save the predicted marginal probabilities for the Candidates X. :param session: The database session to use. :param X: Input data. :param training: If True, these are training marginals / labels; else they are saved as end model predictions. :type training: bool
[ "Save", "the", "predicted", "marginal", "probabilities", "for", "the", "Candidates", "X", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/classifier.py#L364-L374
229,940
HazyResearch/fonduer
src/fonduer/learning/classifier.py
Classifier.predict
def predict(self, X, b=0.5, pos_label=1, return_probs=False): """Return numpy array of class predictions for X based on predicted marginal probabilities. :param X: Input data. :param b: Decision boundary *for binary setting only*. :type b: float :param pos_label: Positive class index *for binary setting only*. Default: 1 :type pos_label: int :param return_probs: If True, return predict probability. Default: False :type return_probs: bool """ if self._check_input(X): X = self._preprocess_data(X) Y_prob = self.marginals(X) if self.cardinality > 2: Y_pred = Y_prob.argmax(axis=1) + 1 if return_probs: return Y_pred, Y_prob else: return Y_pred if pos_label not in [1, 2]: raise ValueError("pos_label must have values in {1,2}.") self.logger.info(f"Using positive label class {pos_label} with threshold {b}") Y_pred = np.array( [pos_label if p[pos_label - 1] > b else 3 - pos_label for p in Y_prob] ) if return_probs: return Y_pred, Y_prob else: return Y_pred
python
def predict(self, X, b=0.5, pos_label=1, return_probs=False): if self._check_input(X): X = self._preprocess_data(X) Y_prob = self.marginals(X) if self.cardinality > 2: Y_pred = Y_prob.argmax(axis=1) + 1 if return_probs: return Y_pred, Y_prob else: return Y_pred if pos_label not in [1, 2]: raise ValueError("pos_label must have values in {1,2}.") self.logger.info(f"Using positive label class {pos_label} with threshold {b}") Y_pred = np.array( [pos_label if p[pos_label - 1] > b else 3 - pos_label for p in Y_prob] ) if return_probs: return Y_pred, Y_prob else: return Y_pred
[ "def", "predict", "(", "self", ",", "X", ",", "b", "=", "0.5", ",", "pos_label", "=", "1", ",", "return_probs", "=", "False", ")", ":", "if", "self", ".", "_check_input", "(", "X", ")", ":", "X", "=", "self", ".", "_preprocess_data", "(", "X", ")...
Return numpy array of class predictions for X based on predicted marginal probabilities. :param X: Input data. :param b: Decision boundary *for binary setting only*. :type b: float :param pos_label: Positive class index *for binary setting only*. Default: 1 :type pos_label: int :param return_probs: If True, return predict probability. Default: False :type return_probs: bool
[ "Return", "numpy", "array", "of", "class", "predictions", "for", "X", "based", "on", "predicted", "marginal", "probabilities", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/classifier.py#L376-L411
229,941
HazyResearch/fonduer
src/fonduer/learning/classifier.py
Classifier.save
def save(self, model_file, save_dir, verbose=True): """Save current model. :param model_file: Saved model file name. :type model_file: str :param save_dir: Saved model directory. :type save_dir: str :param verbose: Print log or not :type verbose: bool """ # Check existence of model saving directory and create if does not exist. if not os.path.exists(save_dir): os.makedirs(save_dir) params = { "model": self.state_dict(), "cardinality": self.cardinality, "name": self.name, "config": self.settings, } try: torch.save(params, f"{save_dir}/{model_file}") except BaseException: self.logger.warning("Saving failed... continuing anyway.") if verbose: self.logger.info(f"[{self.name}] Model saved as {model_file} in {save_dir}")
python
def save(self, model_file, save_dir, verbose=True): # Check existence of model saving directory and create if does not exist. if not os.path.exists(save_dir): os.makedirs(save_dir) params = { "model": self.state_dict(), "cardinality": self.cardinality, "name": self.name, "config": self.settings, } try: torch.save(params, f"{save_dir}/{model_file}") except BaseException: self.logger.warning("Saving failed... continuing anyway.") if verbose: self.logger.info(f"[{self.name}] Model saved as {model_file} in {save_dir}")
[ "def", "save", "(", "self", ",", "model_file", ",", "save_dir", ",", "verbose", "=", "True", ")", ":", "# Check existence of model saving directory and create if does not exist.", "if", "not", "os", ".", "path", ".", "exists", "(", "save_dir", ")", ":", "os", "....
Save current model. :param model_file: Saved model file name. :type model_file: str :param save_dir: Saved model directory. :type save_dir: str :param verbose: Print log or not :type verbose: bool
[ "Save", "current", "model", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/classifier.py#L488-L516
229,942
HazyResearch/fonduer
src/fonduer/learning/classifier.py
Classifier.load
def load(self, model_file, save_dir, verbose=True): """Load model from file and rebuild the model. :param model_file: Saved model file name. :type model_file: str :param save_dir: Saved model directory. :type save_dir: str :param verbose: Print log or not :type verbose: bool """ if not os.path.exists(save_dir): self.logger.error("Loading failed... Directory does not exist.") try: checkpoint = torch.load(f"{save_dir}/{model_file}") except BaseException: self.logger.error( f"Loading failed... Cannot load model from {save_dir}/{model_file}" ) self.load_state_dict(checkpoint["model"]) self.settings = checkpoint["config"] self.cardinality = checkpoint["cardinality"] self.name = checkpoint["name"] if verbose: self.logger.info( f"[{self.name}] Model loaded as {model_file} in {save_dir}" )
python
def load(self, model_file, save_dir, verbose=True): if not os.path.exists(save_dir): self.logger.error("Loading failed... Directory does not exist.") try: checkpoint = torch.load(f"{save_dir}/{model_file}") except BaseException: self.logger.error( f"Loading failed... Cannot load model from {save_dir}/{model_file}" ) self.load_state_dict(checkpoint["model"]) self.settings = checkpoint["config"] self.cardinality = checkpoint["cardinality"] self.name = checkpoint["name"] if verbose: self.logger.info( f"[{self.name}] Model loaded as {model_file} in {save_dir}" )
[ "def", "load", "(", "self", ",", "model_file", ",", "save_dir", ",", "verbose", "=", "True", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "save_dir", ")", ":", "self", ".", "logger", ".", "error", "(", "\"Loading failed... Directory does...
Load model from file and rebuild the model. :param model_file: Saved model file name. :type model_file: str :param save_dir: Saved model directory. :type save_dir: str :param verbose: Print log or not :type verbose: bool
[ "Load", "model", "from", "file", "and", "rebuild", "the", "model", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/classifier.py#L518-L547
229,943
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/structural.py
get_parent_tag
def get_parent_tag(mention): """Return the HTML tag of the Mention's parent. These may be tags such as 'p', 'h2', 'table', 'div', etc. If a candidate is passed in, only the tag of its first Mention is returned. :param mention: The Mention to evaluate :rtype: string """ span = _to_span(mention) i = _get_node(span.sentence) return str(i.getparent().tag) if i.getparent() is not None else None
python
def get_parent_tag(mention): span = _to_span(mention) i = _get_node(span.sentence) return str(i.getparent().tag) if i.getparent() is not None else None
[ "def", "get_parent_tag", "(", "mention", ")", ":", "span", "=", "_to_span", "(", "mention", ")", "i", "=", "_get_node", "(", "span", ".", "sentence", ")", "return", "str", "(", "i", ".", "getparent", "(", ")", ".", "tag", ")", "if", "i", ".", "getp...
Return the HTML tag of the Mention's parent. These may be tags such as 'p', 'h2', 'table', 'div', etc. If a candidate is passed in, only the tag of its first Mention is returned. :param mention: The Mention to evaluate :rtype: string
[ "Return", "the", "HTML", "tag", "of", "the", "Mention", "s", "parent", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/structural.py#L54-L65
229,944
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/structural.py
get_prev_sibling_tags
def get_prev_sibling_tags(mention): """Return the HTML tag of the Mention's previous siblings. Previous siblings are Mentions which are at the same level in the HTML tree as the given mention, but are declared before the given mention. If a candidate is passed in, only the previous siblings of its first Mention are considered in the calculation. :param mention: The Mention to evaluate :rtype: list of strings """ span = _to_span(mention) prev_sibling_tags = [] i = _get_node(span.sentence) while i.getprevious() is not None: prev_sibling_tags.insert(0, str(i.getprevious().tag)) i = i.getprevious() return prev_sibling_tags
python
def get_prev_sibling_tags(mention): span = _to_span(mention) prev_sibling_tags = [] i = _get_node(span.sentence) while i.getprevious() is not None: prev_sibling_tags.insert(0, str(i.getprevious().tag)) i = i.getprevious() return prev_sibling_tags
[ "def", "get_prev_sibling_tags", "(", "mention", ")", ":", "span", "=", "_to_span", "(", "mention", ")", "prev_sibling_tags", "=", "[", "]", "i", "=", "_get_node", "(", "span", ".", "sentence", ")", "while", "i", ".", "getprevious", "(", ")", "is", "not",...
Return the HTML tag of the Mention's previous siblings. Previous siblings are Mentions which are at the same level in the HTML tree as the given mention, but are declared before the given mention. If a candidate is passed in, only the previous siblings of its first Mention are considered in the calculation. :param mention: The Mention to evaluate :rtype: list of strings
[ "Return", "the", "HTML", "tag", "of", "the", "Mention", "s", "previous", "siblings", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/structural.py#L68-L85
229,945
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/structural.py
get_next_sibling_tags
def get_next_sibling_tags(mention): """Return the HTML tag of the Mention's next siblings. Next siblings are Mentions which are at the same level in the HTML tree as the given mention, but are declared after the given mention. If a candidate is passed in, only the next siblings of its last Mention are considered in the calculation. :param mention: The Mention to evaluate :rtype: list of strings """ span = _to_span(mention) next_sibling_tags = [] i = _get_node(span.sentence) while i.getnext() is not None: next_sibling_tags.append(str(i.getnext().tag)) i = i.getnext() return next_sibling_tags
python
def get_next_sibling_tags(mention): span = _to_span(mention) next_sibling_tags = [] i = _get_node(span.sentence) while i.getnext() is not None: next_sibling_tags.append(str(i.getnext().tag)) i = i.getnext() return next_sibling_tags
[ "def", "get_next_sibling_tags", "(", "mention", ")", ":", "span", "=", "_to_span", "(", "mention", ")", "next_sibling_tags", "=", "[", "]", "i", "=", "_get_node", "(", "span", ".", "sentence", ")", "while", "i", ".", "getnext", "(", ")", "is", "not", "...
Return the HTML tag of the Mention's next siblings. Next siblings are Mentions which are at the same level in the HTML tree as the given mention, but are declared after the given mention. If a candidate is passed in, only the next siblings of its last Mention are considered in the calculation. :param mention: The Mention to evaluate :rtype: list of strings
[ "Return", "the", "HTML", "tag", "of", "the", "Mention", "s", "next", "siblings", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/structural.py#L88-L105
229,946
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/structural.py
get_ancestor_class_names
def get_ancestor_class_names(mention): """Return the HTML classes of the Mention's ancestors. If a candidate is passed in, only the ancestors of its first Mention are returned. :param mention: The Mention to evaluate :rtype: list of strings """ span = _to_span(mention) class_names = [] i = _get_node(span.sentence) while i is not None: class_names.insert(0, str(i.get("class"))) i = i.getparent() return class_names
python
def get_ancestor_class_names(mention): span = _to_span(mention) class_names = [] i = _get_node(span.sentence) while i is not None: class_names.insert(0, str(i.get("class"))) i = i.getparent() return class_names
[ "def", "get_ancestor_class_names", "(", "mention", ")", ":", "span", "=", "_to_span", "(", "mention", ")", "class_names", "=", "[", "]", "i", "=", "_get_node", "(", "span", ".", "sentence", ")", "while", "i", "is", "not", "None", ":", "class_names", ".",...
Return the HTML classes of the Mention's ancestors. If a candidate is passed in, only the ancestors of its first Mention are returned. :param mention: The Mention to evaluate :rtype: list of strings
[ "Return", "the", "HTML", "classes", "of", "the", "Mention", "s", "ancestors", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/structural.py#L108-L123
229,947
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/structural.py
get_ancestor_tag_names
def get_ancestor_tag_names(mention): """Return the HTML tag of the Mention's ancestors. For example, ['html', 'body', 'p']. If a candidate is passed in, only the ancestors of its first Mention are returned. :param mention: The Mention to evaluate :rtype: list of strings """ span = _to_span(mention) tag_names = [] i = _get_node(span.sentence) while i is not None: tag_names.insert(0, str(i.tag)) i = i.getparent() return tag_names
python
def get_ancestor_tag_names(mention): span = _to_span(mention) tag_names = [] i = _get_node(span.sentence) while i is not None: tag_names.insert(0, str(i.tag)) i = i.getparent() return tag_names
[ "def", "get_ancestor_tag_names", "(", "mention", ")", ":", "span", "=", "_to_span", "(", "mention", ")", "tag_names", "=", "[", "]", "i", "=", "_get_node", "(", "span", ".", "sentence", ")", "while", "i", "is", "not", "None", ":", "tag_names", ".", "in...
Return the HTML tag of the Mention's ancestors. For example, ['html', 'body', 'p']. If a candidate is passed in, only the ancestors of its first Mention are returned. :param mention: The Mention to evaluate :rtype: list of strings
[ "Return", "the", "HTML", "tag", "of", "the", "Mention", "s", "ancestors", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/structural.py#L126-L141
229,948
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/structural.py
get_ancestor_id_names
def get_ancestor_id_names(mention): """Return the HTML id's of the Mention's ancestors. If a candidate is passed in, only the ancestors of its first Mention are returned. :param mention: The Mention to evaluate :rtype: list of strings """ span = _to_span(mention) id_names = [] i = _get_node(span.sentence) while i is not None: id_names.insert(0, str(i.get("id"))) i = i.getparent() return id_names
python
def get_ancestor_id_names(mention): span = _to_span(mention) id_names = [] i = _get_node(span.sentence) while i is not None: id_names.insert(0, str(i.get("id"))) i = i.getparent() return id_names
[ "def", "get_ancestor_id_names", "(", "mention", ")", ":", "span", "=", "_to_span", "(", "mention", ")", "id_names", "=", "[", "]", "i", "=", "_get_node", "(", "span", ".", "sentence", ")", "while", "i", "is", "not", "None", ":", "id_names", ".", "inser...
Return the HTML id's of the Mention's ancestors. If a candidate is passed in, only the ancestors of its first Mention are returned. :param mention: The Mention to evaluate :rtype: list of strings
[ "Return", "the", "HTML", "id", "s", "of", "the", "Mention", "s", "ancestors", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/structural.py#L144-L159
229,949
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/structural.py
common_ancestor
def common_ancestor(c): """Return the path to the root that is shared between a binary-Mention Candidate. In particular, this is the common path of HTML tags. :param c: The binary-Mention Candidate to evaluate :rtype: list of strings """ span1 = _to_span(c[0]) span2 = _to_span(c[1]) ancestor1 = np.array(span1.sentence.xpath.split("/")) ancestor2 = np.array(span2.sentence.xpath.split("/")) min_len = min(ancestor1.size, ancestor2.size) return list(ancestor1[: np.argmin(ancestor1[:min_len] == ancestor2[:min_len])])
python
def common_ancestor(c): span1 = _to_span(c[0]) span2 = _to_span(c[1]) ancestor1 = np.array(span1.sentence.xpath.split("/")) ancestor2 = np.array(span2.sentence.xpath.split("/")) min_len = min(ancestor1.size, ancestor2.size) return list(ancestor1[: np.argmin(ancestor1[:min_len] == ancestor2[:min_len])])
[ "def", "common_ancestor", "(", "c", ")", ":", "span1", "=", "_to_span", "(", "c", "[", "0", "]", ")", "span2", "=", "_to_span", "(", "c", "[", "1", "]", ")", "ancestor1", "=", "np", ".", "array", "(", "span1", ".", "sentence", ".", "xpath", ".", ...
Return the path to the root that is shared between a binary-Mention Candidate. In particular, this is the common path of HTML tags. :param c: The binary-Mention Candidate to evaluate :rtype: list of strings
[ "Return", "the", "path", "to", "the", "root", "that", "is", "shared", "between", "a", "binary", "-", "Mention", "Candidate", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/structural.py#L162-L175
229,950
HazyResearch/fonduer
src/fonduer/learning/disc_models/modules/rnn.py
RNN.init_hidden
def init_hidden(self, batch_size): """Initiate the initial state. :param batch_size: batch size. :type batch_size: int :return: Initial state of LSTM :rtype: pair of torch.Tensors of shape (num_layers * num_directions, batch_size, hidden_size) """ b = 2 if self.bidirectional else 1 if self.use_cuda: return ( torch.zeros(self.num_layers * b, batch_size, self.lstm_hidden).cuda(), torch.zeros(self.num_layers * b, batch_size, self.lstm_hidden).cuda(), ) else: return ( torch.zeros(self.num_layers * b, batch_size, self.lstm_hidden), torch.zeros(self.num_layers * b, batch_size, self.lstm_hidden), )
python
def init_hidden(self, batch_size): b = 2 if self.bidirectional else 1 if self.use_cuda: return ( torch.zeros(self.num_layers * b, batch_size, self.lstm_hidden).cuda(), torch.zeros(self.num_layers * b, batch_size, self.lstm_hidden).cuda(), ) else: return ( torch.zeros(self.num_layers * b, batch_size, self.lstm_hidden), torch.zeros(self.num_layers * b, batch_size, self.lstm_hidden), )
[ "def", "init_hidden", "(", "self", ",", "batch_size", ")", ":", "b", "=", "2", "if", "self", ".", "bidirectional", "else", "1", "if", "self", ".", "use_cuda", ":", "return", "(", "torch", ".", "zeros", "(", "self", ".", "num_layers", "*", "b", ",", ...
Initiate the initial state. :param batch_size: batch size. :type batch_size: int :return: Initial state of LSTM :rtype: pair of torch.Tensors of shape (num_layers * num_directions, batch_size, hidden_size)
[ "Initiate", "the", "initial", "state", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/disc_models/modules/rnn.py#L138-L158
229,951
HazyResearch/fonduer
src/fonduer/utils/logging/tensorboard_writer.py
TensorBoardLogger.add_scalar
def add_scalar(self, name, value, step): """Log a scalar variable.""" self.writer.add_scalar(name, value, step)
python
def add_scalar(self, name, value, step): self.writer.add_scalar(name, value, step)
[ "def", "add_scalar", "(", "self", ",", "name", ",", "value", ",", "step", ")", ":", "self", ".", "writer", ".", "add_scalar", "(", "name", ",", "value", ",", "step", ")" ]
Log a scalar variable.
[ "Log", "a", "scalar", "variable", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/logging/tensorboard_writer.py#L12-L14
229,952
HazyResearch/fonduer
src/fonduer/learning/disc_models/utils.py
mention_to_tokens
def mention_to_tokens(mention, token_type="words", lowercase=False): """ Extract tokens from the mention :param mention: mention object. :param token_type: token type that wants to extract. :type token_type: str :param lowercase: use lowercase or not. :type lowercase: bool :return: The token list. :rtype: list """ tokens = mention.context.sentence.__dict__[token_type] return [w.lower() if lowercase else w for w in tokens]
python
def mention_to_tokens(mention, token_type="words", lowercase=False): tokens = mention.context.sentence.__dict__[token_type] return [w.lower() if lowercase else w for w in tokens]
[ "def", "mention_to_tokens", "(", "mention", ",", "token_type", "=", "\"words\"", ",", "lowercase", "=", "False", ")", ":", "tokens", "=", "mention", ".", "context", ".", "sentence", ".", "__dict__", "[", "token_type", "]", "return", "[", "w", ".", "lower",...
Extract tokens from the mention :param mention: mention object. :param token_type: token type that wants to extract. :type token_type: str :param lowercase: use lowercase or not. :type lowercase: bool :return: The token list. :rtype: list
[ "Extract", "tokens", "from", "the", "mention" ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/disc_models/utils.py#L38-L52
229,953
HazyResearch/fonduer
src/fonduer/learning/disc_models/utils.py
mark_sentence
def mark_sentence(s, args): """Insert markers around relation arguments in word sequence :param s: list of tokens in sentence. :type s: list :param args: list of triples (l, h, idx) as per @_mark(...) corresponding to relation arguments :type args: list :return: The marked sentence. :rtype: list Example: Then Barack married Michelle. -> Then ~~[[1 Barack 1]]~~ married ~~[[2 Michelle 2]]~~. """ marks = sorted([y for m in args for y in mark(*m)], reverse=True) x = list(s) for k, v in marks: x.insert(k, v) return x
python
def mark_sentence(s, args): marks = sorted([y for m in args for y in mark(*m)], reverse=True) x = list(s) for k, v in marks: x.insert(k, v) return x
[ "def", "mark_sentence", "(", "s", ",", "args", ")", ":", "marks", "=", "sorted", "(", "[", "y", "for", "m", "in", "args", "for", "y", "in", "mark", "(", "*", "m", ")", "]", ",", "reverse", "=", "True", ")", "x", "=", "list", "(", "s", ")", ...
Insert markers around relation arguments in word sequence :param s: list of tokens in sentence. :type s: list :param args: list of triples (l, h, idx) as per @_mark(...) corresponding to relation arguments :type args: list :return: The marked sentence. :rtype: list Example: Then Barack married Michelle. -> Then ~~[[1 Barack 1]]~~ married ~~[[2 Michelle 2]]~~.
[ "Insert", "markers", "around", "relation", "arguments", "in", "word", "sequence" ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/disc_models/utils.py#L72-L91
229,954
HazyResearch/fonduer
src/fonduer/learning/disc_models/utils.py
pad_batch
def pad_batch(batch, max_len=0, type="int"): """Pad the batch into matrix :param batch: The data for padding. :type batch: list of word index sequences :param max_len: Max length of sequence of padding. :type max_len: int :param type: mask value type. :type type: str :return: The padded matrix and correspoing mask matrix. :rtype: pair of torch.Tensors with shape (batch_size, max_sent_len) """ batch_size = len(batch) max_sent_len = int(np.max([len(x) for x in batch])) if max_len > 0 and max_len < max_sent_len: max_sent_len = max_len if type == "float": idx_matrix = np.zeros((batch_size, max_sent_len), dtype=np.float32) else: idx_matrix = np.zeros((batch_size, max_sent_len), dtype=np.int) for idx1, i in enumerate(batch): for idx2, j in enumerate(i): if idx2 >= max_sent_len: break idx_matrix[idx1, idx2] = j idx_matrix = torch.tensor(idx_matrix) mask_matrix = torch.tensor(torch.eq(idx_matrix.data, 0)) return idx_matrix, mask_matrix
python
def pad_batch(batch, max_len=0, type="int"): batch_size = len(batch) max_sent_len = int(np.max([len(x) for x in batch])) if max_len > 0 and max_len < max_sent_len: max_sent_len = max_len if type == "float": idx_matrix = np.zeros((batch_size, max_sent_len), dtype=np.float32) else: idx_matrix = np.zeros((batch_size, max_sent_len), dtype=np.int) for idx1, i in enumerate(batch): for idx2, j in enumerate(i): if idx2 >= max_sent_len: break idx_matrix[idx1, idx2] = j idx_matrix = torch.tensor(idx_matrix) mask_matrix = torch.tensor(torch.eq(idx_matrix.data, 0)) return idx_matrix, mask_matrix
[ "def", "pad_batch", "(", "batch", ",", "max_len", "=", "0", ",", "type", "=", "\"int\"", ")", ":", "batch_size", "=", "len", "(", "batch", ")", "max_sent_len", "=", "int", "(", "np", ".", "max", "(", "[", "len", "(", "x", ")", "for", "x", "in", ...
Pad the batch into matrix :param batch: The data for padding. :type batch: list of word index sequences :param max_len: Max length of sequence of padding. :type max_len: int :param type: mask value type. :type type: str :return: The padded matrix and correspoing mask matrix. :rtype: pair of torch.Tensors with shape (batch_size, max_sent_len)
[ "Pad", "the", "batch", "into", "matrix" ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/disc_models/utils.py#L94-L123
229,955
HazyResearch/fonduer
src/fonduer/parser/preprocessors/doc_preprocessor.py
DocPreprocessor._generate
def _generate(self): """Parses a file or directory of files into a set of ``Document`` objects.""" doc_count = 0 for fp in self.all_files: for doc in self._get_docs_for_path(fp): yield doc doc_count += 1 if doc_count >= self.max_docs: return
python
def _generate(self): doc_count = 0 for fp in self.all_files: for doc in self._get_docs_for_path(fp): yield doc doc_count += 1 if doc_count >= self.max_docs: return
[ "def", "_generate", "(", "self", ")", ":", "doc_count", "=", "0", "for", "fp", "in", "self", ".", "all_files", ":", "for", "doc", "in", "self", ".", "_get_docs_for_path", "(", "fp", ")", ":", "yield", "doc", "doc_count", "+=", "1", "if", "doc_count", ...
Parses a file or directory of files into a set of ``Document`` objects.
[ "Parses", "a", "file", "or", "directory", "of", "files", "into", "a", "set", "of", "Document", "objects", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/preprocessors/doc_preprocessor.py#L25-L33
229,956
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/visual.py
is_horz_aligned
def is_horz_aligned(c): """Return True if all the components of c are horizontally aligned. Horizontal alignment means that the bounding boxes of each Mention of c shares a similar y-axis value in the visual rendering of the document. :param c: The candidate to evaluate :rtype: boolean """ return all( [ _to_span(c[i]).sentence.is_visual() and bbox_horz_aligned( bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0])) ) for i in range(len(c)) ] )
python
def is_horz_aligned(c): return all( [ _to_span(c[i]).sentence.is_visual() and bbox_horz_aligned( bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0])) ) for i in range(len(c)) ] )
[ "def", "is_horz_aligned", "(", "c", ")", ":", "return", "all", "(", "[", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", ".", "is_visual", "(", ")", "and", "bbox_horz_aligned", "(", "bbox_from_span", "(", "_to_span", "(", "c", "[", "i", "]...
Return True if all the components of c are horizontally aligned. Horizontal alignment means that the bounding boxes of each Mention of c shares a similar y-axis value in the visual rendering of the document. :param c: The candidate to evaluate :rtype: boolean
[ "Return", "True", "if", "all", "the", "components", "of", "c", "are", "horizontally", "aligned", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/visual.py#L37-L54
229,957
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/visual.py
is_vert_aligned
def is_vert_aligned(c): """Return true if all the components of c are vertically aligned. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. :param c: The candidate to evaluate :rtype: boolean """ return all( [ _to_span(c[i]).sentence.is_visual() and bbox_vert_aligned( bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0])) ) for i in range(len(c)) ] )
python
def is_vert_aligned(c): return all( [ _to_span(c[i]).sentence.is_visual() and bbox_vert_aligned( bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0])) ) for i in range(len(c)) ] )
[ "def", "is_vert_aligned", "(", "c", ")", ":", "return", "all", "(", "[", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", ".", "is_visual", "(", ")", "and", "bbox_vert_aligned", "(", "bbox_from_span", "(", "_to_span", "(", "c", "[", "i", "]...
Return true if all the components of c are vertically aligned. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. :param c: The candidate to evaluate :rtype: boolean
[ "Return", "true", "if", "all", "the", "components", "of", "c", "are", "vertically", "aligned", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/visual.py#L58-L75
229,958
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/visual.py
is_vert_aligned_left
def is_vert_aligned_left(c): """Return true if all components are vertically aligned on their left border. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. In this function the similarity of the x-axis value is based on the left border of their bounding boxes. :param c: The candidate to evaluate :rtype: boolean """ return all( [ _to_span(c[i]).sentence.is_visual() and bbox_vert_aligned_left( bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0])) ) for i in range(len(c)) ] )
python
def is_vert_aligned_left(c): return all( [ _to_span(c[i]).sentence.is_visual() and bbox_vert_aligned_left( bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0])) ) for i in range(len(c)) ] )
[ "def", "is_vert_aligned_left", "(", "c", ")", ":", "return", "all", "(", "[", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", ".", "is_visual", "(", ")", "and", "bbox_vert_aligned_left", "(", "bbox_from_span", "(", "_to_span", "(", "c", "[", ...
Return true if all components are vertically aligned on their left border. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. In this function the similarity of the x-axis value is based on the left border of their bounding boxes. :param c: The candidate to evaluate :rtype: boolean
[ "Return", "true", "if", "all", "components", "are", "vertically", "aligned", "on", "their", "left", "border", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/visual.py#L79-L98
229,959
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/visual.py
is_vert_aligned_right
def is_vert_aligned_right(c): """Return true if all components vertically aligned on their right border. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. In this function the similarity of the x-axis value is based on the right border of their bounding boxes. :param c: The candidate to evaluate :rtype: boolean """ return all( [ _to_span(c[i]).sentence.is_visual() and bbox_vert_aligned_right( bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0])) ) for i in range(len(c)) ] )
python
def is_vert_aligned_right(c): return all( [ _to_span(c[i]).sentence.is_visual() and bbox_vert_aligned_right( bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0])) ) for i in range(len(c)) ] )
[ "def", "is_vert_aligned_right", "(", "c", ")", ":", "return", "all", "(", "[", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", ".", "is_visual", "(", ")", "and", "bbox_vert_aligned_right", "(", "bbox_from_span", "(", "_to_span", "(", "c", "[",...
Return true if all components vertically aligned on their right border. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. In this function the similarity of the x-axis value is based on the right border of their bounding boxes. :param c: The candidate to evaluate :rtype: boolean
[ "Return", "true", "if", "all", "components", "vertically", "aligned", "on", "their", "right", "border", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/visual.py#L102-L121
229,960
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/visual.py
is_vert_aligned_center
def is_vert_aligned_center(c): """Return true if all the components are vertically aligned on their center. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. In this function the similarity of the x-axis value is based on the center of their bounding boxes. :param c: The candidate to evaluate :rtype: boolean """ return all( [ _to_span(c[i]).sentence.is_visual() and bbox_vert_aligned_center( bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0])) ) for i in range(len(c)) ] )
python
def is_vert_aligned_center(c): return all( [ _to_span(c[i]).sentence.is_visual() and bbox_vert_aligned_center( bbox_from_span(_to_span(c[i])), bbox_from_span(_to_span(c[0])) ) for i in range(len(c)) ] )
[ "def", "is_vert_aligned_center", "(", "c", ")", ":", "return", "all", "(", "[", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", ".", "is_visual", "(", ")", "and", "bbox_vert_aligned_center", "(", "bbox_from_span", "(", "_to_span", "(", "c", "[...
Return true if all the components are vertically aligned on their center. Vertical alignment means that the bounding boxes of each Mention of c shares a similar x-axis value in the visual rendering of the document. In this function the similarity of the x-axis value is based on the center of their bounding boxes. :param c: The candidate to evaluate :rtype: boolean
[ "Return", "true", "if", "all", "the", "components", "are", "vertically", "aligned", "on", "their", "center", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/visual.py#L125-L144
229,961
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/visual.py
same_page
def same_page(c): """Return true if all the components of c are on the same page of the document. Page numbers are based on the PDF rendering of the document. If a PDF file is provided, it is used. Otherwise, if only a HTML/XML document is provided, a PDF is created and then used to determine the page number of a Mention. :param c: The candidate to evaluate :rtype: boolean """ return all( [ _to_span(c[i]).sentence.is_visual() and bbox_from_span(_to_span(c[i])).page == bbox_from_span(_to_span(c[0])).page for i in range(len(c)) ] )
python
def same_page(c): return all( [ _to_span(c[i]).sentence.is_visual() and bbox_from_span(_to_span(c[i])).page == bbox_from_span(_to_span(c[0])).page for i in range(len(c)) ] )
[ "def", "same_page", "(", "c", ")", ":", "return", "all", "(", "[", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", ".", "is_visual", "(", ")", "and", "bbox_from_span", "(", "_to_span", "(", "c", "[", "i", "]", ")", ")", ".", "page", ...
Return true if all the components of c are on the same page of the document. Page numbers are based on the PDF rendering of the document. If a PDF file is provided, it is used. Otherwise, if only a HTML/XML document is provided, a PDF is created and then used to determine the page number of a Mention. :param c: The candidate to evaluate :rtype: boolean
[ "Return", "true", "if", "all", "the", "components", "of", "c", "are", "on", "the", "same", "page", "of", "the", "document", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/visual.py#L148-L165
229,962
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/visual.py
get_horz_ngrams
def get_horz_ngrams( mention, attrib="words", n_min=1, n_max=1, lower=True, from_sentence=True ): """Return all ngrams which are visually horizontally aligned with the Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention to evaluate :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :param from_sentence: If True, returns ngrams from any horizontally aligned Sentences, rather than just horizontally aligned ngrams themselves. :rtype: a *generator* of ngrams """ spans = _to_spans(mention) for span in spans: for ngram in _get_direction_ngrams( "horz", span, attrib, n_min, n_max, lower, from_sentence ): yield ngram
python
def get_horz_ngrams( mention, attrib="words", n_min=1, n_max=1, lower=True, from_sentence=True ): spans = _to_spans(mention) for span in spans: for ngram in _get_direction_ngrams( "horz", span, attrib, n_min, n_max, lower, from_sentence ): yield ngram
[ "def", "get_horz_ngrams", "(", "mention", ",", "attrib", "=", "\"words\"", ",", "n_min", "=", "1", ",", "n_max", "=", "1", ",", "lower", "=", "True", ",", "from_sentence", "=", "True", ")", ":", "spans", "=", "_to_spans", "(", "mention", ")", "for", ...
Return all ngrams which are visually horizontally aligned with the Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention to evaluate :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :param from_sentence: If True, returns ngrams from any horizontally aligned Sentences, rather than just horizontally aligned ngrams themselves. :rtype: a *generator* of ngrams
[ "Return", "all", "ngrams", "which", "are", "visually", "horizontally", "aligned", "with", "the", "Mention", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/visual.py#L168-L189
229,963
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/visual.py
get_page_vert_percentile
def get_page_vert_percentile( mention, page_width=DEFAULT_WIDTH, page_height=DEFAULT_HEIGHT ): """Return which percentile from the TOP in the page the Mention is located in. Percentile is calculated where the top of the page is 0.0, and the bottom of the page is 1.0. For example, a Mention in at the top 1/4 of the page will have a percentile of 0.25. Page width and height are based on pt values:: Letter 612x792 Tabloid 792x1224 Ledger 1224x792 Legal 612x1008 Statement 396x612 Executive 540x720 A0 2384x3371 A1 1685x2384 A2 1190x1684 A3 842x1190 A4 595x842 A4Small 595x842 A5 420x595 B4 729x1032 B5 516x729 Folio 612x936 Quarto 610x780 10x14 720x1008 and should match the source documents. Letter size is used by default. Note that if a candidate is passed in, only the vertical percentil of its first Mention is returned. :param mention: The Mention to evaluate :param page_width: The width of the page. Default to Letter paper width. :param page_height: The heigh of the page. Default to Letter paper height. :rtype: float in [0.0, 1.0] """ span = _to_span(mention) return bbox_from_span(span).top / page_height
python
def get_page_vert_percentile( mention, page_width=DEFAULT_WIDTH, page_height=DEFAULT_HEIGHT ): span = _to_span(mention) return bbox_from_span(span).top / page_height
[ "def", "get_page_vert_percentile", "(", "mention", ",", "page_width", "=", "DEFAULT_WIDTH", ",", "page_height", "=", "DEFAULT_HEIGHT", ")", ":", "span", "=", "_to_span", "(", "mention", ")", "return", "bbox_from_span", "(", "span", ")", ".", "top", "/", "page_...
Return which percentile from the TOP in the page the Mention is located in. Percentile is calculated where the top of the page is 0.0, and the bottom of the page is 1.0. For example, a Mention in at the top 1/4 of the page will have a percentile of 0.25. Page width and height are based on pt values:: Letter 612x792 Tabloid 792x1224 Ledger 1224x792 Legal 612x1008 Statement 396x612 Executive 540x720 A0 2384x3371 A1 1685x2384 A2 1190x1684 A3 842x1190 A4 595x842 A4Small 595x842 A5 420x595 B4 729x1032 B5 516x729 Folio 612x936 Quarto 610x780 10x14 720x1008 and should match the source documents. Letter size is used by default. Note that if a candidate is passed in, only the vertical percentil of its first Mention is returned. :param mention: The Mention to evaluate :param page_width: The width of the page. Default to Letter paper width. :param page_height: The heigh of the page. Default to Letter paper height. :rtype: float in [0.0, 1.0]
[ "Return", "which", "percentile", "from", "the", "TOP", "in", "the", "page", "the", "Mention", "is", "located", "in", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/visual.py#L285-L326
229,964
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/visual.py
get_page_horz_percentile
def get_page_horz_percentile( mention, page_width=DEFAULT_WIDTH, page_height=DEFAULT_HEIGHT ): """Return which percentile from the LEFT in the page the Mention is located in. Percentile is calculated where the left of the page is 0.0, and the right of the page is 1.0. Page width and height are based on pt values:: Letter 612x792 Tabloid 792x1224 Ledger 1224x792 Legal 612x1008 Statement 396x612 Executive 540x720 A0 2384x3371 A1 1685x2384 A2 1190x1684 A3 842x1190 A4 595x842 A4Small 595x842 A5 420x595 B4 729x1032 B5 516x729 Folio 612x936 Quarto 610x780 10x14 720x1008 and should match the source documents. Letter size is used by default. Note that if a candidate is passed in, only the vertical percentile of its first Mention is returned. :param c: The Mention to evaluate :param page_width: The width of the page. Default to Letter paper width. :param page_height: The heigh of the page. Default to Letter paper height. :rtype: float in [0.0, 1.0] """ span = _to_span(mention) return bbox_from_span(span).left / page_width
python
def get_page_horz_percentile( mention, page_width=DEFAULT_WIDTH, page_height=DEFAULT_HEIGHT ): span = _to_span(mention) return bbox_from_span(span).left / page_width
[ "def", "get_page_horz_percentile", "(", "mention", ",", "page_width", "=", "DEFAULT_WIDTH", ",", "page_height", "=", "DEFAULT_HEIGHT", ")", ":", "span", "=", "_to_span", "(", "mention", ")", "return", "bbox_from_span", "(", "span", ")", ".", "left", "/", "page...
Return which percentile from the LEFT in the page the Mention is located in. Percentile is calculated where the left of the page is 0.0, and the right of the page is 1.0. Page width and height are based on pt values:: Letter 612x792 Tabloid 792x1224 Ledger 1224x792 Legal 612x1008 Statement 396x612 Executive 540x720 A0 2384x3371 A1 1685x2384 A2 1190x1684 A3 842x1190 A4 595x842 A4Small 595x842 A5 420x595 B4 729x1032 B5 516x729 Folio 612x936 Quarto 610x780 10x14 720x1008 and should match the source documents. Letter size is used by default. Note that if a candidate is passed in, only the vertical percentile of its first Mention is returned. :param c: The Mention to evaluate :param page_width: The width of the page. Default to Letter paper width. :param page_height: The heigh of the page. Default to Letter paper height. :rtype: float in [0.0, 1.0]
[ "Return", "which", "percentile", "from", "the", "LEFT", "in", "the", "page", "the", "Mention", "is", "located", "in", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/visual.py#L329-L369
229,965
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/visual.py
get_visual_aligned_lemmas
def get_visual_aligned_lemmas(mention): """Return a generator of the lemmas aligned visually with the Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention to evaluate. :rtype: a *generator* of lemmas """ spans = _to_spans(mention) for span in spans: sentence = span.sentence doc = sentence.document # cache features for the entire document _preprocess_visual_features(doc) for aligned_lemma in sentence._aligned_lemmas: yield aligned_lemma
python
def get_visual_aligned_lemmas(mention): spans = _to_spans(mention) for span in spans: sentence = span.sentence doc = sentence.document # cache features for the entire document _preprocess_visual_features(doc) for aligned_lemma in sentence._aligned_lemmas: yield aligned_lemma
[ "def", "get_visual_aligned_lemmas", "(", "mention", ")", ":", "spans", "=", "_to_spans", "(", "mention", ")", "for", "span", "in", "spans", ":", "sentence", "=", "span", ".", "sentence", "doc", "=", "sentence", ".", "document", "# cache features for the entire d...
Return a generator of the lemmas aligned visually with the Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention to evaluate. :rtype: a *generator* of lemmas
[ "Return", "a", "generator", "of", "the", "lemmas", "aligned", "visually", "with", "the", "Mention", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/visual.py#L429-L445
229,966
HazyResearch/fonduer
src/fonduer/utils/utils.py
camel_to_under
def camel_to_under(name): """ Converts camel-case string to lowercase string separated by underscores. Written by epost (http://stackoverflow.com/questions/1175208). :param name: String to be converted :return: new String with camel-case converted to lowercase, underscored """ s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
python
def camel_to_under(name): s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
[ "def", "camel_to_under", "(", "name", ")", ":", "s1", "=", "re", ".", "sub", "(", "\"(.)([A-Z][a-z]+)\"", ",", "r\"\\1_\\2\"", ",", "name", ")", "return", "re", ".", "sub", "(", "\"([a-z0-9])([A-Z])\"", ",", "r\"\\1_\\2\"", ",", "s1", ")", ".", "lower", ...
Converts camel-case string to lowercase string separated by underscores. Written by epost (http://stackoverflow.com/questions/1175208). :param name: String to be converted :return: new String with camel-case converted to lowercase, underscored
[ "Converts", "camel", "-", "case", "string", "to", "lowercase", "string", "separated", "by", "underscores", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/utils.py#L5-L15
229,967
HazyResearch/fonduer
src/fonduer/utils/utils.py
get_as_dict
def get_as_dict(x): """Return an object as a dictionary of its attributes.""" if isinstance(x, dict): return x else: try: return x._asdict() except AttributeError: return x.__dict__
python
def get_as_dict(x): if isinstance(x, dict): return x else: try: return x._asdict() except AttributeError: return x.__dict__
[ "def", "get_as_dict", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "dict", ")", ":", "return", "x", "else", ":", "try", ":", "return", "x", ".", "_asdict", "(", ")", "except", "AttributeError", ":", "return", "x", ".", "__dict__" ]
Return an object as a dictionary of its attributes.
[ "Return", "an", "object", "as", "a", "dictionary", "of", "its", "attributes", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/utils.py#L18-L26
229,968
HazyResearch/fonduer
src/fonduer/utils/udf.py
UDFRunner._apply_st
def _apply_st(self, doc_loader, **kwargs): """Run the UDF single-threaded, optionally with progress bar""" udf = self.udf_class(**self.udf_init_kwargs) # Run single-thread for doc in doc_loader: if self.pb is not None: self.pb.update(1) udf.session.add_all(y for y in udf.apply(doc, **kwargs)) # Commit session and close progress bar if applicable udf.session.commit()
python
def _apply_st(self, doc_loader, **kwargs): udf = self.udf_class(**self.udf_init_kwargs) # Run single-thread for doc in doc_loader: if self.pb is not None: self.pb.update(1) udf.session.add_all(y for y in udf.apply(doc, **kwargs)) # Commit session and close progress bar if applicable udf.session.commit()
[ "def", "_apply_st", "(", "self", ",", "doc_loader", ",", "*", "*", "kwargs", ")", ":", "udf", "=", "self", ".", "udf_class", "(", "*", "*", "self", ".", "udf_init_kwargs", ")", "# Run single-thread", "for", "doc", "in", "doc_loader", ":", "if", "self", ...
Run the UDF single-threaded, optionally with progress bar
[ "Run", "the", "UDF", "single", "-", "threaded", "optionally", "with", "progress", "bar" ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/udf.py#L80-L92
229,969
HazyResearch/fonduer
src/fonduer/utils/udf.py
UDFRunner._apply_mt
def _apply_mt(self, doc_loader, parallelism, **kwargs): """Run the UDF multi-threaded using python multiprocessing""" if not Meta.postgres: raise ValueError("Fonduer must use PostgreSQL as a database backend.") def fill_input_queue(in_queue, doc_loader, terminal_signal): for doc in doc_loader: in_queue.put(doc) in_queue.put(terminal_signal) # Create an input queue to feed documents to UDF workers manager = Manager() in_queue = manager.Queue() # Use an output queue to track multiprocess progress out_queue = JoinableQueue() total_count = len(doc_loader) # Start UDF Processes for i in range(parallelism): udf = self.udf_class( in_queue=in_queue, out_queue=out_queue, worker_id=i, **self.udf_init_kwargs, ) udf.apply_kwargs = kwargs self.udfs.append(udf) # Start the UDF processes, and then join on their completion for udf in self.udfs: udf.start() # Fill input queue with documents terminal_signal = UDF.QUEUE_CLOSED in_queue_filler = Process( target=fill_input_queue, args=(in_queue, doc_loader, terminal_signal) ) in_queue_filler.start() count_parsed = 0 while count_parsed < total_count: y = out_queue.get() # Update progress bar whenever an item has been processed if y == UDF.TASK_DONE: count_parsed += 1 if self.pb is not None: self.pb.update(1) else: raise ValueError("Got non-sentinal output.") in_queue_filler.join() in_queue.put(UDF.QUEUE_CLOSED) for udf in self.udfs: udf.join() # Terminate and flush the processes for udf in self.udfs: udf.terminate() self.udfs = []
python
def _apply_mt(self, doc_loader, parallelism, **kwargs): if not Meta.postgres: raise ValueError("Fonduer must use PostgreSQL as a database backend.") def fill_input_queue(in_queue, doc_loader, terminal_signal): for doc in doc_loader: in_queue.put(doc) in_queue.put(terminal_signal) # Create an input queue to feed documents to UDF workers manager = Manager() in_queue = manager.Queue() # Use an output queue to track multiprocess progress out_queue = JoinableQueue() total_count = len(doc_loader) # Start UDF Processes for i in range(parallelism): udf = self.udf_class( in_queue=in_queue, out_queue=out_queue, worker_id=i, **self.udf_init_kwargs, ) udf.apply_kwargs = kwargs self.udfs.append(udf) # Start the UDF processes, and then join on their completion for udf in self.udfs: udf.start() # Fill input queue with documents terminal_signal = UDF.QUEUE_CLOSED in_queue_filler = Process( target=fill_input_queue, args=(in_queue, doc_loader, terminal_signal) ) in_queue_filler.start() count_parsed = 0 while count_parsed < total_count: y = out_queue.get() # Update progress bar whenever an item has been processed if y == UDF.TASK_DONE: count_parsed += 1 if self.pb is not None: self.pb.update(1) else: raise ValueError("Got non-sentinal output.") in_queue_filler.join() in_queue.put(UDF.QUEUE_CLOSED) for udf in self.udfs: udf.join() # Terminate and flush the processes for udf in self.udfs: udf.terminate() self.udfs = []
[ "def", "_apply_mt", "(", "self", ",", "doc_loader", ",", "parallelism", ",", "*", "*", "kwargs", ")", ":", "if", "not", "Meta", ".", "postgres", ":", "raise", "ValueError", "(", "\"Fonduer must use PostgreSQL as a database backend.\"", ")", "def", "fill_input_queu...
Run the UDF multi-threaded using python multiprocessing
[ "Run", "the", "UDF", "multi", "-", "threaded", "using", "python", "multiprocessing" ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/udf.py#L94-L154
229,970
HazyResearch/fonduer
src/fonduer/utils/models/annotation.py
AnnotationMixin.candidate
def candidate(cls): """The ``Candidate``.""" return relationship( "Candidate", backref=backref( camel_to_under(cls.__name__) + "s", cascade="all, delete-orphan", cascade_backrefs=False, ), cascade_backrefs=False, )
python
def candidate(cls): return relationship( "Candidate", backref=backref( camel_to_under(cls.__name__) + "s", cascade="all, delete-orphan", cascade_backrefs=False, ), cascade_backrefs=False, )
[ "def", "candidate", "(", "cls", ")", ":", "return", "relationship", "(", "\"Candidate\"", ",", "backref", "=", "backref", "(", "camel_to_under", "(", "cls", ".", "__name__", ")", "+", "\"s\"", ",", "cascade", "=", "\"all, delete-orphan\"", ",", "cascade_backre...
The ``Candidate``.
[ "The", "Candidate", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/models/annotation.py#L79-L89
229,971
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
same_document
def same_document(c): """Return True if all Mentions in the given candidate are from the same Document. :param c: The candidate whose Mentions are being compared :rtype: boolean """ return all( _to_span(c[i]).sentence.document is not None and _to_span(c[i]).sentence.document == _to_span(c[0]).sentence.document for i in range(len(c)) )
python
def same_document(c): return all( _to_span(c[i]).sentence.document is not None and _to_span(c[i]).sentence.document == _to_span(c[0]).sentence.document for i in range(len(c)) )
[ "def", "same_document", "(", "c", ")", ":", "return", "all", "(", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", ".", "document", "is", "not", "None", "and", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", ".", "document", ...
Return True if all Mentions in the given candidate are from the same Document. :param c: The candidate whose Mentions are being compared :rtype: boolean
[ "Return", "True", "if", "all", "Mentions", "in", "the", "given", "candidate", "are", "from", "the", "same", "Document", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L21-L31
229,972
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
same_table
def same_table(c): """Return True if all Mentions in the given candidate are from the same Table. :param c: The candidate whose Mentions are being compared :rtype: boolean """ return all( _to_span(c[i]).sentence.is_tabular() and _to_span(c[i]).sentence.table == _to_span(c[0]).sentence.table for i in range(len(c)) )
python
def same_table(c): return all( _to_span(c[i]).sentence.is_tabular() and _to_span(c[i]).sentence.table == _to_span(c[0]).sentence.table for i in range(len(c)) )
[ "def", "same_table", "(", "c", ")", ":", "return", "all", "(", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", ".", "is_tabular", "(", ")", "and", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", ".", "table", "==", "_to_span...
Return True if all Mentions in the given candidate are from the same Table. :param c: The candidate whose Mentions are being compared :rtype: boolean
[ "Return", "True", "if", "all", "Mentions", "in", "the", "given", "candidate", "are", "from", "the", "same", "Table", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L34-L44
229,973
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
same_row
def same_row(c): """Return True if all Mentions in the given candidate are from the same Row. :param c: The candidate whose Mentions are being compared :rtype: boolean """ return same_table(c) and all( is_row_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence) for i in range(len(c)) )
python
def same_row(c): return same_table(c) and all( is_row_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence) for i in range(len(c)) )
[ "def", "same_row", "(", "c", ")", ":", "return", "same_table", "(", "c", ")", "and", "all", "(", "is_row_aligned", "(", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", ",", "_to_span", "(", "c", "[", "0", "]", ")", ".", "sentence", ")"...
Return True if all Mentions in the given candidate are from the same Row. :param c: The candidate whose Mentions are being compared :rtype: boolean
[ "Return", "True", "if", "all", "Mentions", "in", "the", "given", "candidate", "are", "from", "the", "same", "Row", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L47-L56
229,974
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
same_col
def same_col(c): """Return True if all Mentions in the given candidate are from the same Col. :param c: The candidate whose Mentions are being compared :rtype: boolean """ return same_table(c) and all( is_col_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence) for i in range(len(c)) )
python
def same_col(c): return same_table(c) and all( is_col_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence) for i in range(len(c)) )
[ "def", "same_col", "(", "c", ")", ":", "return", "same_table", "(", "c", ")", "and", "all", "(", "is_col_aligned", "(", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", ",", "_to_span", "(", "c", "[", "0", "]", ")", ".", "sentence", ")"...
Return True if all Mentions in the given candidate are from the same Col. :param c: The candidate whose Mentions are being compared :rtype: boolean
[ "Return", "True", "if", "all", "Mentions", "in", "the", "given", "candidate", "are", "from", "the", "same", "Col", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L59-L68
229,975
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
is_tabular_aligned
def is_tabular_aligned(c): """Return True if all Mentions in the given candidate are from the same Row or Col. :param c: The candidate whose Mentions are being compared :rtype: boolean """ return same_table(c) and ( is_col_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence) or is_row_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence) for i in range(len(c)) )
python
def is_tabular_aligned(c): return same_table(c) and ( is_col_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence) or is_row_aligned(_to_span(c[i]).sentence, _to_span(c[0]).sentence) for i in range(len(c)) )
[ "def", "is_tabular_aligned", "(", "c", ")", ":", "return", "same_table", "(", "c", ")", "and", "(", "is_col_aligned", "(", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", ",", "_to_span", "(", "c", "[", "0", "]", ")", ".", "sentence", ")...
Return True if all Mentions in the given candidate are from the same Row or Col. :param c: The candidate whose Mentions are being compared :rtype: boolean
[ "Return", "True", "if", "all", "Mentions", "in", "the", "given", "candidate", "are", "from", "the", "same", "Row", "or", "Col", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L71-L81
229,976
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
same_cell
def same_cell(c): """Return True if all Mentions in the given candidate are from the same Cell. :param c: The candidate whose Mentions are being compared :rtype: boolean """ return all( _to_span(c[i]).sentence.cell is not None and _to_span(c[i]).sentence.cell == _to_span(c[0]).sentence.cell for i in range(len(c)) )
python
def same_cell(c): return all( _to_span(c[i]).sentence.cell is not None and _to_span(c[i]).sentence.cell == _to_span(c[0]).sentence.cell for i in range(len(c)) )
[ "def", "same_cell", "(", "c", ")", ":", "return", "all", "(", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", ".", "cell", "is", "not", "None", "and", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", ".", "cell", "==", "_to...
Return True if all Mentions in the given candidate are from the same Cell. :param c: The candidate whose Mentions are being compared :rtype: boolean
[ "Return", "True", "if", "all", "Mentions", "in", "the", "given", "candidate", "are", "from", "the", "same", "Cell", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L84-L94
229,977
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
same_sentence
def same_sentence(c): """Return True if all Mentions in the given candidate are from the same Sentence. :param c: The candidate whose Mentions are being compared :rtype: boolean """ return all( _to_span(c[i]).sentence is not None and _to_span(c[i]).sentence == _to_span(c[0]).sentence for i in range(len(c)) )
python
def same_sentence(c): return all( _to_span(c[i]).sentence is not None and _to_span(c[i]).sentence == _to_span(c[0]).sentence for i in range(len(c)) )
[ "def", "same_sentence", "(", "c", ")", ":", "return", "all", "(", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", "is", "not", "None", "and", "_to_span", "(", "c", "[", "i", "]", ")", ".", "sentence", "==", "_to_span", "(", "c", "[", ...
Return True if all Mentions in the given candidate are from the same Sentence. :param c: The candidate whose Mentions are being compared :rtype: boolean
[ "Return", "True", "if", "all", "Mentions", "in", "the", "given", "candidate", "are", "from", "the", "same", "Sentence", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L97-L107
229,978
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
get_max_col_num
def get_max_col_num(mention): """Return the largest column number that a Mention occupies. :param mention: The Mention to evaluate. If a candidate is given, default to its last Mention. :rtype: integer or None """ span = _to_span(mention, idx=-1) if span.sentence.is_tabular(): return span.sentence.cell.col_end else: return None
python
def get_max_col_num(mention): span = _to_span(mention, idx=-1) if span.sentence.is_tabular(): return span.sentence.cell.col_end else: return None
[ "def", "get_max_col_num", "(", "mention", ")", ":", "span", "=", "_to_span", "(", "mention", ",", "idx", "=", "-", "1", ")", "if", "span", ".", "sentence", ".", "is_tabular", "(", ")", ":", "return", "span", ".", "sentence", ".", "cell", ".", "col_en...
Return the largest column number that a Mention occupies. :param mention: The Mention to evaluate. If a candidate is given, default to its last Mention. :rtype: integer or None
[ "Return", "the", "largest", "column", "number", "that", "a", "Mention", "occupies", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L110-L121
229,979
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
get_min_col_num
def get_min_col_num(mention): """Return the lowest column number that a Mention occupies. :param mention: The Mention to evaluate. If a candidate is given, default to its first Mention. :rtype: integer or None """ span = _to_span(mention) if span.sentence.is_tabular(): return span.sentence.cell.col_start else: return None
python
def get_min_col_num(mention): span = _to_span(mention) if span.sentence.is_tabular(): return span.sentence.cell.col_start else: return None
[ "def", "get_min_col_num", "(", "mention", ")", ":", "span", "=", "_to_span", "(", "mention", ")", "if", "span", ".", "sentence", ".", "is_tabular", "(", ")", ":", "return", "span", ".", "sentence", ".", "cell", ".", "col_start", "else", ":", "return", ...
Return the lowest column number that a Mention occupies. :param mention: The Mention to evaluate. If a candidate is given, default to its first Mention. :rtype: integer or None
[ "Return", "the", "lowest", "column", "number", "that", "a", "Mention", "occupies", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L124-L135
229,980
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
get_min_row_num
def get_min_row_num(mention): """Return the lowest row number that a Mention occupies. :param mention: The Mention to evaluate. If a candidate is given, default to its first Mention. :rtype: integer or None """ span = _to_span(mention) if span.sentence.is_tabular(): return span.sentence.cell.row_start else: return None
python
def get_min_row_num(mention): span = _to_span(mention) if span.sentence.is_tabular(): return span.sentence.cell.row_start else: return None
[ "def", "get_min_row_num", "(", "mention", ")", ":", "span", "=", "_to_span", "(", "mention", ")", "if", "span", ".", "sentence", ".", "is_tabular", "(", ")", ":", "return", "span", ".", "sentence", ".", "cell", ".", "row_start", "else", ":", "return", ...
Return the lowest row number that a Mention occupies. :param mention: The Mention to evaluate. If a candidate is given, default to its first Mention. :rtype: integer or None
[ "Return", "the", "lowest", "row", "number", "that", "a", "Mention", "occupies", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L138-L149
229,981
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
get_sentence_ngrams
def get_sentence_ngrams(mention, attrib="words", n_min=1, n_max=1, lower=True): """Get the ngrams that are in the Sentence of the given Mention, not including itself. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose Sentence is being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ spans = _to_spans(mention) for span in spans: for ngram in get_left_ngrams( span, window=100, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower ): yield ngram for ngram in get_right_ngrams( span, window=100, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower ): yield ngram
python
def get_sentence_ngrams(mention, attrib="words", n_min=1, n_max=1, lower=True): spans = _to_spans(mention) for span in spans: for ngram in get_left_ngrams( span, window=100, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower ): yield ngram for ngram in get_right_ngrams( span, window=100, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower ): yield ngram
[ "def", "get_sentence_ngrams", "(", "mention", ",", "attrib", "=", "\"words\"", ",", "n_min", "=", "1", ",", "n_max", "=", "1", ",", "lower", "=", "True", ")", ":", "spans", "=", "_to_spans", "(", "mention", ")", "for", "span", "in", "spans", ":", "fo...
Get the ngrams that are in the Sentence of the given Mention, not including itself. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose Sentence is being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams
[ "Get", "the", "ngrams", "that", "are", "in", "the", "Sentence", "of", "the", "given", "Mention", "not", "including", "itself", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L152-L174
229,982
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
get_neighbor_sentence_ngrams
def get_neighbor_sentence_ngrams( mention, d=1, attrib="words", n_min=1, n_max=1, lower=True ): """Get the ngrams that are in the neighoring Sentences of the given Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose neighbor Sentences are being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ spans = _to_spans(mention) for span in spans: for ngram in chain.from_iterable( [ tokens_to_ngrams( getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower ) for sentence in span.sentence.document.sentences if abs(sentence.position - span.sentence.position) <= d and sentence != span.sentence ] ): yield ngram
python
def get_neighbor_sentence_ngrams( mention, d=1, attrib="words", n_min=1, n_max=1, lower=True ): spans = _to_spans(mention) for span in spans: for ngram in chain.from_iterable( [ tokens_to_ngrams( getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower ) for sentence in span.sentence.document.sentences if abs(sentence.position - span.sentence.position) <= d and sentence != span.sentence ] ): yield ngram
[ "def", "get_neighbor_sentence_ngrams", "(", "mention", ",", "d", "=", "1", ",", "attrib", "=", "\"words\"", ",", "n_min", "=", "1", ",", "n_max", "=", "1", ",", "lower", "=", "True", ")", ":", "spans", "=", "_to_spans", "(", "mention", ")", "for", "s...
Get the ngrams that are in the neighoring Sentences of the given Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose neighbor Sentences are being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams
[ "Get", "the", "ngrams", "that", "are", "in", "the", "neighoring", "Sentences", "of", "the", "given", "Mention", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L177-L203
229,983
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
get_cell_ngrams
def get_cell_ngrams(mention, attrib="words", n_min=1, n_max=1, lower=True): """Get the ngrams that are in the Cell of the given mention, not including itself. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose Cell is being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ spans = _to_spans(mention) for span in spans: for ngram in get_sentence_ngrams( span, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower ): yield ngram if span.sentence.is_tabular(): for ngram in chain.from_iterable( [ tokens_to_ngrams( getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower ) for sentence in _get_table_cells(span.sentence.table)[ span.sentence.cell ] if sentence != span.sentence ] ): yield ngram
python
def get_cell_ngrams(mention, attrib="words", n_min=1, n_max=1, lower=True): spans = _to_spans(mention) for span in spans: for ngram in get_sentence_ngrams( span, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower ): yield ngram if span.sentence.is_tabular(): for ngram in chain.from_iterable( [ tokens_to_ngrams( getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower ) for sentence in _get_table_cells(span.sentence.table)[ span.sentence.cell ] if sentence != span.sentence ] ): yield ngram
[ "def", "get_cell_ngrams", "(", "mention", ",", "attrib", "=", "\"words\"", ",", "n_min", "=", "1", ",", "n_max", "=", "1", ",", "lower", "=", "True", ")", ":", "spans", "=", "_to_spans", "(", "mention", ")", "for", "span", "in", "spans", ":", "for", ...
Get the ngrams that are in the Cell of the given mention, not including itself. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose Cell is being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams
[ "Get", "the", "ngrams", "that", "are", "in", "the", "Cell", "of", "the", "given", "mention", "not", "including", "itself", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L206-L236
229,984
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
get_neighbor_cell_ngrams
def get_neighbor_cell_ngrams( mention, dist=1, directions=False, attrib="words", n_min=1, n_max=1, lower=True ): """ Get the ngrams from all Cells that are within a given Cell distance in one direction from the given Mention. Note that if a candidate is passed in, all of its Mentions will be searched. If `directions=True``, each ngram will be returned with a direction in {'UP', 'DOWN', 'LEFT', 'RIGHT'}. :param mention: The Mention whose neighbor Cells are being searched :param dist: The Cell distance within which a neighbor Cell must be to be considered :param directions: A Boolean expressing whether or not to return the direction of each ngram :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams (or (ngram, direction) tuples if directions=True) """ # TODO: Fix this to be more efficient (optimize with SQL query) spans = _to_spans(mention) for span in spans: for ngram in get_sentence_ngrams( span, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower ): yield ngram if span.sentence.is_tabular(): root_cell = span.sentence.cell for sentence in chain.from_iterable( [ _get_aligned_sentences(root_cell, "row"), _get_aligned_sentences(root_cell, "col"), ] ): row_diff = min_row_diff(sentence, root_cell, absolute=False) col_diff = min_col_diff(sentence, root_cell, absolute=False) if ( (row_diff or col_diff) and not (row_diff and col_diff) and abs(row_diff) + abs(col_diff) <= dist ): if directions: direction = "" if col_diff == 0: if 0 < row_diff and row_diff <= dist: direction = "UP" elif 0 > row_diff and row_diff >= -dist: direction = "DOWN" elif row_diff == 0: if 0 < col_diff and col_diff <= dist: direction = "RIGHT" elif 0 > col_diff and col_diff >= -dist: direction = "LEFT" for ngram in tokens_to_ngrams( getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower, ): yield (ngram, direction) else: for ngram in tokens_to_ngrams( getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower, ): yield ngram
python
def get_neighbor_cell_ngrams( mention, dist=1, directions=False, attrib="words", n_min=1, n_max=1, lower=True ): # TODO: Fix this to be more efficient (optimize with SQL query) spans = _to_spans(mention) for span in spans: for ngram in get_sentence_ngrams( span, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower ): yield ngram if span.sentence.is_tabular(): root_cell = span.sentence.cell for sentence in chain.from_iterable( [ _get_aligned_sentences(root_cell, "row"), _get_aligned_sentences(root_cell, "col"), ] ): row_diff = min_row_diff(sentence, root_cell, absolute=False) col_diff = min_col_diff(sentence, root_cell, absolute=False) if ( (row_diff or col_diff) and not (row_diff and col_diff) and abs(row_diff) + abs(col_diff) <= dist ): if directions: direction = "" if col_diff == 0: if 0 < row_diff and row_diff <= dist: direction = "UP" elif 0 > row_diff and row_diff >= -dist: direction = "DOWN" elif row_diff == 0: if 0 < col_diff and col_diff <= dist: direction = "RIGHT" elif 0 > col_diff and col_diff >= -dist: direction = "LEFT" for ngram in tokens_to_ngrams( getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower, ): yield (ngram, direction) else: for ngram in tokens_to_ngrams( getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower, ): yield ngram
[ "def", "get_neighbor_cell_ngrams", "(", "mention", ",", "dist", "=", "1", ",", "directions", "=", "False", ",", "attrib", "=", "\"words\"", ",", "n_min", "=", "1", ",", "n_max", "=", "1", ",", "lower", "=", "True", ")", ":", "# TODO: Fix this to be more ef...
Get the ngrams from all Cells that are within a given Cell distance in one direction from the given Mention. Note that if a candidate is passed in, all of its Mentions will be searched. If `directions=True``, each ngram will be returned with a direction in {'UP', 'DOWN', 'LEFT', 'RIGHT'}. :param mention: The Mention whose neighbor Cells are being searched :param dist: The Cell distance within which a neighbor Cell must be to be considered :param directions: A Boolean expressing whether or not to return the direction of each ngram :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams (or (ngram, direction) tuples if directions=True)
[ "Get", "the", "ngrams", "from", "all", "Cells", "that", "are", "within", "a", "given", "Cell", "distance", "in", "one", "direction", "from", "the", "given", "Mention", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L239-L309
229,985
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
get_col_ngrams
def get_col_ngrams( mention, attrib="words", n_min=1, n_max=1, spread=[0, 0], lower=True ): """Get the ngrams from all Cells that are in the same column as the given Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose column Cells are being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param spread: The number of cols left and right to also consider "aligned". :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ spans = _to_spans(mention) for span in spans: for ngram in _get_axis_ngrams( span, axis="col", attrib=attrib, n_min=n_min, n_max=n_max, spread=spread, lower=lower, ): yield ngram
python
def get_col_ngrams( mention, attrib="words", n_min=1, n_max=1, spread=[0, 0], lower=True ): spans = _to_spans(mention) for span in spans: for ngram in _get_axis_ngrams( span, axis="col", attrib=attrib, n_min=n_min, n_max=n_max, spread=spread, lower=lower, ): yield ngram
[ "def", "get_col_ngrams", "(", "mention", ",", "attrib", "=", "\"words\"", ",", "n_min", "=", "1", ",", "n_max", "=", "1", ",", "spread", "=", "[", "0", ",", "0", "]", ",", "lower", "=", "True", ")", ":", "spans", "=", "_to_spans", "(", "mention", ...
Get the ngrams from all Cells that are in the same column as the given Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose column Cells are being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param spread: The number of cols left and right to also consider "aligned". :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams
[ "Get", "the", "ngrams", "from", "all", "Cells", "that", "are", "in", "the", "same", "column", "as", "the", "given", "Mention", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L341-L367
229,986
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
get_aligned_ngrams
def get_aligned_ngrams( mention, attrib="words", n_min=1, n_max=1, spread=[0, 0], lower=True ): """Get the ngrams from all Cells in the same row or column as the given Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose row and column Cells are being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param spread: The number of rows/cols above/below/left/right to also consider "aligned". :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ spans = _to_spans(mention) for span in spans: for ngram in get_row_ngrams( span, attrib=attrib, n_min=n_min, n_max=n_max, spread=spread, lower=lower ): yield ngram for ngram in get_col_ngrams( span, attrib=attrib, n_min=n_min, n_max=n_max, spread=spread, lower=lower ): yield ngram
python
def get_aligned_ngrams( mention, attrib="words", n_min=1, n_max=1, spread=[0, 0], lower=True ): spans = _to_spans(mention) for span in spans: for ngram in get_row_ngrams( span, attrib=attrib, n_min=n_min, n_max=n_max, spread=spread, lower=lower ): yield ngram for ngram in get_col_ngrams( span, attrib=attrib, n_min=n_min, n_max=n_max, spread=spread, lower=lower ): yield ngram
[ "def", "get_aligned_ngrams", "(", "mention", ",", "attrib", "=", "\"words\"", ",", "n_min", "=", "1", ",", "n_max", "=", "1", ",", "spread", "=", "[", "0", ",", "0", "]", ",", "lower", "=", "True", ")", ":", "spans", "=", "_to_spans", "(", "mention...
Get the ngrams from all Cells in the same row or column as the given Mention. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose row and column Cells are being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param spread: The number of rows/cols above/below/left/right to also consider "aligned". :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams
[ "Get", "the", "ngrams", "from", "all", "Cells", "in", "the", "same", "row", "or", "column", "as", "the", "given", "Mention", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L370-L396
229,987
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
get_head_ngrams
def get_head_ngrams(mention, axis=None, attrib="words", n_min=1, n_max=1, lower=True): """Get the ngrams from the cell in the head of the row or column. More specifically, this returns the ngrams in the leftmost cell in a row and/or the ngrams in the topmost cell in the column, depending on the axis parameter. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose head Cells are being returned :param axis: Which axis {'row', 'col'} to search. If None, then both row and col are searched. :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ spans = _to_spans(mention) axes = (axis,) if axis else ("row", "col") for span in spans: if span.sentence.is_tabular(): for axis in axes: if getattr(span.sentence, _other_axis(axis) + "_start") == 0: return for sentence in getattr( _get_head_cell(span.sentence.cell, axis), "sentences", [] ): for ngram in tokens_to_ngrams( getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower ): yield ngram
python
def get_head_ngrams(mention, axis=None, attrib="words", n_min=1, n_max=1, lower=True): spans = _to_spans(mention) axes = (axis,) if axis else ("row", "col") for span in spans: if span.sentence.is_tabular(): for axis in axes: if getattr(span.sentence, _other_axis(axis) + "_start") == 0: return for sentence in getattr( _get_head_cell(span.sentence.cell, axis), "sentences", [] ): for ngram in tokens_to_ngrams( getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower ): yield ngram
[ "def", "get_head_ngrams", "(", "mention", ",", "axis", "=", "None", ",", "attrib", "=", "\"words\"", ",", "n_min", "=", "1", ",", "n_max", "=", "1", ",", "lower", "=", "True", ")", ":", "spans", "=", "_to_spans", "(", "mention", ")", "axes", "=", "...
Get the ngrams from the cell in the head of the row or column. More specifically, this returns the ngrams in the leftmost cell in a row and/or the ngrams in the topmost cell in the column, depending on the axis parameter. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose head Cells are being returned :param axis: Which axis {'row', 'col'} to search. If None, then both row and col are searched. :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams
[ "Get", "the", "ngrams", "from", "the", "cell", "in", "the", "head", "of", "the", "row", "or", "column", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L399-L429
229,988
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
_get_table_cells
def _get_table_cells(table): """Helper function with caching for table cells and the cells' sentences. This function significantly improves the speed of `get_row_ngrams` primarily by reducing the number of queries that are made (which were previously the bottleneck. Rather than taking a single mention, then its sentence, then its table, then all the cells in the table, then all the sentences in each cell, and performing operations on that series of queries, this performs a single query for all the sentences in a table and returns all of the cells and the cells sentences directly. :param table: the Table object to cache. :return: an iterator of (Cell, [Sentence._asdict(), ...]) tuples. """ sent_map = defaultdict(list) for sent in table.sentences: if sent.is_tabular(): sent_map[sent.cell].append(sent) return sent_map
python
def _get_table_cells(table): sent_map = defaultdict(list) for sent in table.sentences: if sent.is_tabular(): sent_map[sent.cell].append(sent) return sent_map
[ "def", "_get_table_cells", "(", "table", ")", ":", "sent_map", "=", "defaultdict", "(", "list", ")", "for", "sent", "in", "table", ".", "sentences", ":", "if", "sent", ".", "is_tabular", "(", ")", ":", "sent_map", "[", "sent", ".", "cell", "]", ".", ...
Helper function with caching for table cells and the cells' sentences. This function significantly improves the speed of `get_row_ngrams` primarily by reducing the number of queries that are made (which were previously the bottleneck. Rather than taking a single mention, then its sentence, then its table, then all the cells in the table, then all the sentences in each cell, and performing operations on that series of queries, this performs a single query for all the sentences in a table and returns all of the cells and the cells sentences directly. :param table: the Table object to cache. :return: an iterator of (Cell, [Sentence._asdict(), ...]) tuples.
[ "Helper", "function", "with", "caching", "for", "table", "cells", "and", "the", "cells", "sentences", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L444-L462
229,989
HazyResearch/fonduer
src/fonduer/learning/disc_models/modules/loss.py
SoftCrossEntropyLoss.forward
def forward(self, input, target): """ Calculate the loss :param input: prediction logits :param target: target probabilities :return: loss """ n, k = input.shape losses = input.new_zeros(n) for i in range(k): cls_idx = input.new_full((n,), i, dtype=torch.long) loss = F.cross_entropy(input, cls_idx, reduction="none") if self.weight is not None: loss = loss * self.weight[i] losses += target[:, i].float() * loss if self.reduction == "mean": losses = losses.mean() elif self.reduction == "sum": losses = losses.sum() elif self.reduction != "none": raise ValueError(f"Unrecognized reduction: {self.reduction}") return losses
python
def forward(self, input, target): n, k = input.shape losses = input.new_zeros(n) for i in range(k): cls_idx = input.new_full((n,), i, dtype=torch.long) loss = F.cross_entropy(input, cls_idx, reduction="none") if self.weight is not None: loss = loss * self.weight[i] losses += target[:, i].float() * loss if self.reduction == "mean": losses = losses.mean() elif self.reduction == "sum": losses = losses.sum() elif self.reduction != "none": raise ValueError(f"Unrecognized reduction: {self.reduction}") return losses
[ "def", "forward", "(", "self", ",", "input", ",", "target", ")", ":", "n", ",", "k", "=", "input", ".", "shape", "losses", "=", "input", ".", "new_zeros", "(", "n", ")", "for", "i", "in", "range", "(", "k", ")", ":", "cls_idx", "=", "input", "....
Calculate the loss :param input: prediction logits :param target: target probabilities :return: loss
[ "Calculate", "the", "loss" ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/disc_models/modules/loss.py#L28-L54
229,990
HazyResearch/fonduer
src/fonduer/utils/utils_visual.py
bbox_horz_aligned
def bbox_horz_aligned(box1, box2): """ Returns true if the vertical center point of either span is within the vertical range of the other """ if not (box1 and box2): return False # NEW: any overlap counts # return box1.top <= box2.bottom and box2.top <= box1.bottom box1_top = box1.top + 1.5 box2_top = box2.top + 1.5 box1_bottom = box1.bottom - 1.5 box2_bottom = box2.bottom - 1.5 return not (box1_top > box2_bottom or box2_top > box1_bottom)
python
def bbox_horz_aligned(box1, box2): if not (box1 and box2): return False # NEW: any overlap counts # return box1.top <= box2.bottom and box2.top <= box1.bottom box1_top = box1.top + 1.5 box2_top = box2.top + 1.5 box1_bottom = box1.bottom - 1.5 box2_bottom = box2.bottom - 1.5 return not (box1_top > box2_bottom or box2_top > box1_bottom)
[ "def", "bbox_horz_aligned", "(", "box1", ",", "box2", ")", ":", "if", "not", "(", "box1", "and", "box2", ")", ":", "return", "False", "# NEW: any overlap counts", "# return box1.top <= box2.bottom and box2.top <= box1.bottom", "box1_top", "=", "box1", ".", "top", ...
Returns true if the vertical center point of either span is within the vertical range of the other
[ "Returns", "true", "if", "the", "vertical", "center", "point", "of", "either", "span", "is", "within", "the", "vertical", "range", "of", "the", "other" ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/utils_visual.py#L36-L49
229,991
HazyResearch/fonduer
src/fonduer/utils/utils_visual.py
bbox_vert_aligned
def bbox_vert_aligned(box1, box2): """ Returns true if the horizontal center point of either span is within the horizontal range of the other """ if not (box1 and box2): return False # NEW: any overlap counts # return box1.left <= box2.right and box2.left <= box1.right box1_left = box1.left + 1.5 box2_left = box2.left + 1.5 box1_right = box1.right - 1.5 box2_right = box2.right - 1.5 return not (box1_left > box2_right or box2_left > box1_right)
python
def bbox_vert_aligned(box1, box2): if not (box1 and box2): return False # NEW: any overlap counts # return box1.left <= box2.right and box2.left <= box1.right box1_left = box1.left + 1.5 box2_left = box2.left + 1.5 box1_right = box1.right - 1.5 box2_right = box2.right - 1.5 return not (box1_left > box2_right or box2_left > box1_right)
[ "def", "bbox_vert_aligned", "(", "box1", ",", "box2", ")", ":", "if", "not", "(", "box1", "and", "box2", ")", ":", "return", "False", "# NEW: any overlap counts", "# return box1.left <= box2.right and box2.left <= box1.right", "box1_left", "=", "box1", ".", "left",...
Returns true if the horizontal center point of either span is within the horizontal range of the other
[ "Returns", "true", "if", "the", "horizontal", "center", "point", "of", "either", "span", "is", "within", "the", "horizontal", "range", "of", "the", "other" ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/utils_visual.py#L59-L72
229,992
HazyResearch/fonduer
src/fonduer/utils/utils_visual.py
bbox_vert_aligned_left
def bbox_vert_aligned_left(box1, box2): """ Returns true if the left boundary of both boxes is within 2 pts """ if not (box1 and box2): return False return abs(box1.left - box2.left) <= 2
python
def bbox_vert_aligned_left(box1, box2): if not (box1 and box2): return False return abs(box1.left - box2.left) <= 2
[ "def", "bbox_vert_aligned_left", "(", "box1", ",", "box2", ")", ":", "if", "not", "(", "box1", "and", "box2", ")", ":", "return", "False", "return", "abs", "(", "box1", ".", "left", "-", "box2", ".", "left", ")", "<=", "2" ]
Returns true if the left boundary of both boxes is within 2 pts
[ "Returns", "true", "if", "the", "left", "boundary", "of", "both", "boxes", "is", "within", "2", "pts" ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/utils_visual.py#L79-L85
229,993
HazyResearch/fonduer
src/fonduer/utils/utils_visual.py
bbox_vert_aligned_right
def bbox_vert_aligned_right(box1, box2): """ Returns true if the right boundary of both boxes is within 2 pts """ if not (box1 and box2): return False return abs(box1.right - box2.right) <= 2
python
def bbox_vert_aligned_right(box1, box2): if not (box1 and box2): return False return abs(box1.right - box2.right) <= 2
[ "def", "bbox_vert_aligned_right", "(", "box1", ",", "box2", ")", ":", "if", "not", "(", "box1", "and", "box2", ")", ":", "return", "False", "return", "abs", "(", "box1", ".", "right", "-", "box2", ".", "right", ")", "<=", "2" ]
Returns true if the right boundary of both boxes is within 2 pts
[ "Returns", "true", "if", "the", "right", "boundary", "of", "both", "boxes", "is", "within", "2", "pts" ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/utils_visual.py#L88-L94
229,994
HazyResearch/fonduer
src/fonduer/utils/utils_visual.py
bbox_vert_aligned_center
def bbox_vert_aligned_center(box1, box2): """ Returns true if the center of both boxes is within 5 pts """ if not (box1 and box2): return False return abs(((box1.right + box1.left) / 2.0) - ((box2.right + box2.left) / 2.0)) <= 5
python
def bbox_vert_aligned_center(box1, box2): if not (box1 and box2): return False return abs(((box1.right + box1.left) / 2.0) - ((box2.right + box2.left) / 2.0)) <= 5
[ "def", "bbox_vert_aligned_center", "(", "box1", ",", "box2", ")", ":", "if", "not", "(", "box1", "and", "box2", ")", ":", "return", "False", "return", "abs", "(", "(", "(", "box1", ".", "right", "+", "box1", ".", "left", ")", "/", "2.0", ")", "-", ...
Returns true if the center of both boxes is within 5 pts
[ "Returns", "true", "if", "the", "center", "of", "both", "boxes", "is", "within", "5", "pts" ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/utils_visual.py#L97-L103
229,995
HazyResearch/fonduer
src/fonduer/candidates/matchers.py
_NgramMatcher._is_subspan
def _is_subspan(self, m, span): """ Tests if mention m is subspan of span, where span is defined specific to mention type. """ return ( m.sentence.id == span[0] and m.char_start >= span[1] and m.char_end <= span[2] )
python
def _is_subspan(self, m, span): return ( m.sentence.id == span[0] and m.char_start >= span[1] and m.char_end <= span[2] )
[ "def", "_is_subspan", "(", "self", ",", "m", ",", "span", ")", ":", "return", "(", "m", ".", "sentence", ".", "id", "==", "span", "[", "0", "]", "and", "m", ".", "char_start", ">=", "span", "[", "1", "]", "and", "m", ".", "char_end", "<=", "spa...
Tests if mention m is subspan of span, where span is defined specific to mention type.
[ "Tests", "if", "mention", "m", "is", "subspan", "of", "span", "where", "span", "is", "defined", "specific", "to", "mention", "type", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/candidates/matchers.py#L95-L104
229,996
HazyResearch/fonduer
src/fonduer/candidates/matchers.py
_NgramMatcher._get_span
def _get_span(self, m): """ Gets a tuple that identifies a span for the specific mention class that m belongs to. """ return (m.sentence.id, m.char_start, m.char_end)
python
def _get_span(self, m): return (m.sentence.id, m.char_start, m.char_end)
[ "def", "_get_span", "(", "self", ",", "m", ")", ":", "return", "(", "m", ".", "sentence", ".", "id", ",", "m", ".", "char_start", ",", "m", ".", "char_end", ")" ]
Gets a tuple that identifies a span for the specific mention class that m belongs to.
[ "Gets", "a", "tuple", "that", "identifies", "a", "span", "for", "the", "specific", "mention", "class", "that", "m", "belongs", "to", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/candidates/matchers.py#L106-L111
229,997
HazyResearch/fonduer
src/fonduer/candidates/matchers.py
_FigureMatcher._is_subspan
def _is_subspan(self, m, span): """Tests if mention m does exist""" return m.figure.document.id == span[0] and m.figure.position == span[1]
python
def _is_subspan(self, m, span): return m.figure.document.id == span[0] and m.figure.position == span[1]
[ "def", "_is_subspan", "(", "self", ",", "m", ",", "span", ")", ":", "return", "m", ".", "figure", ".", "document", ".", "id", "==", "span", "[", "0", "]", "and", "m", ".", "figure", ".", "position", "==", "span", "[", "1", "]" ]
Tests if mention m does exist
[ "Tests", "if", "mention", "m", "does", "exist" ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/candidates/matchers.py#L472-L474
229,998
HazyResearch/fonduer
src/fonduer/candidates/matchers.py
_FigureMatcher._get_span
def _get_span(self, m): """ Gets a tuple that identifies a figure for the specific mention class that m belongs to. """ return (m.figure.document.id, m.figure.position)
python
def _get_span(self, m): return (m.figure.document.id, m.figure.position)
[ "def", "_get_span", "(", "self", ",", "m", ")", ":", "return", "(", "m", ".", "figure", ".", "document", ".", "id", ",", "m", ".", "figure", ".", "position", ")" ]
Gets a tuple that identifies a figure for the specific mention class that m belongs to.
[ "Gets", "a", "tuple", "that", "identifies", "a", "figure", "for", "the", "specific", "mention", "class", "that", "m", "belongs", "to", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/candidates/matchers.py#L476-L481
229,999
HazyResearch/fonduer
src/fonduer/candidates/models/candidate.py
candidate_subclass
def candidate_subclass( class_name, args, table_name=None, cardinality=None, values=None ): """ Creates and returns a Candidate subclass with provided argument names, which are Context type. Creates the table in DB if does not exist yet. Import using: .. code-block:: python from fonduer.candidates.models import candidate_subclass :param class_name: The name of the class, should be "camel case" e.g. NewCandidate :param args: A list of names of constituent arguments, which refer to the Contexts--representing mentions--that comprise the candidate :param table_name: The name of the corresponding table in DB; if not provided, is converted from camel case by default, e.g. new_candidate :param cardinality: The cardinality of the variable corresponding to the Candidate. By default is 2 i.e. is a binary value, e.g. is or is not a true mention. """ if table_name is None: table_name = camel_to_under(class_name) # If cardinality and values are None, default to binary classification if cardinality is None and values is None: values = [True, False] cardinality = 2 # Else use values if present, and validate proper input elif values is not None: if cardinality is not None and len(values) != cardinality: raise ValueError("Number of values must match cardinality.") if None in values: raise ValueError("`None` is a protected value.") # Note that bools are instances of ints in Python... if any([isinstance(v, int) and not isinstance(v, bool) for v in values]): raise ValueError( ( "Default usage of values is consecutive integers." "Leave values unset if trying to define values as integers." ) ) cardinality = len(values) # If cardinality is specified but not values, fill in with ints elif cardinality is not None: values = list(range(cardinality)) class_spec = (args, table_name, cardinality, values) if class_name in candidate_subclasses: if class_spec == candidate_subclasses[class_name][1]: return candidate_subclasses[class_name][0] else: raise ValueError( f"Candidate subclass {class_name} " f"already exists in memory with incompatible " f"specification: {candidate_subclasses[class_name][1]}" ) else: # Set the class attributes == the columns in the database class_attribs = { # Declares name for storage table "__tablename__": table_name, # Connects candidate_subclass records to generic Candidate records "id": Column( Integer, ForeignKey("candidate.id", ondelete="CASCADE"), primary_key=True, ), # Store values & cardinality information in the class only "values": values, "cardinality": cardinality, # Polymorphism information for SQLAlchemy "__mapper_args__": {"polymorphic_identity": table_name}, # Helper method to get argument names "__argnames__": [_.__tablename__ for _ in args], "mentions": args, } class_attribs["document_id"] = Column( Integer, ForeignKey("document.id", ondelete="CASCADE") ) class_attribs["document"] = relationship( "Document", backref=backref(table_name + "s", cascade="all, delete-orphan"), foreign_keys=class_attribs["document_id"], ) # Create named arguments, i.e. the entity mentions comprising the # relation mention. unique_args = [] for arg in args: # Primary arguments are constituent Contexts, and their ids class_attribs[arg.__tablename__ + "_id"] = Column( Integer, ForeignKey(arg.__tablename__ + ".id", ondelete="CASCADE") ) class_attribs[arg.__tablename__] = relationship( arg.__name__, backref=backref( table_name + "_" + arg.__tablename__ + "s", cascade_backrefs=False, cascade="all, delete-orphan", ), cascade_backrefs=False, foreign_keys=class_attribs[arg.__tablename__ + "_id"], ) unique_args.append(class_attribs[arg.__tablename__ + "_id"]) # Add unique constraints to the arguments class_attribs["__table_args__"] = (UniqueConstraint(*unique_args),) # Create class C = type(class_name, (Candidate,), class_attribs) # Create table in DB if not Meta.engine.dialect.has_table(Meta.engine, table_name): C.__table__.create(bind=Meta.engine) candidate_subclasses[class_name] = C, class_spec return C
python
def candidate_subclass( class_name, args, table_name=None, cardinality=None, values=None ): if table_name is None: table_name = camel_to_under(class_name) # If cardinality and values are None, default to binary classification if cardinality is None and values is None: values = [True, False] cardinality = 2 # Else use values if present, and validate proper input elif values is not None: if cardinality is not None and len(values) != cardinality: raise ValueError("Number of values must match cardinality.") if None in values: raise ValueError("`None` is a protected value.") # Note that bools are instances of ints in Python... if any([isinstance(v, int) and not isinstance(v, bool) for v in values]): raise ValueError( ( "Default usage of values is consecutive integers." "Leave values unset if trying to define values as integers." ) ) cardinality = len(values) # If cardinality is specified but not values, fill in with ints elif cardinality is not None: values = list(range(cardinality)) class_spec = (args, table_name, cardinality, values) if class_name in candidate_subclasses: if class_spec == candidate_subclasses[class_name][1]: return candidate_subclasses[class_name][0] else: raise ValueError( f"Candidate subclass {class_name} " f"already exists in memory with incompatible " f"specification: {candidate_subclasses[class_name][1]}" ) else: # Set the class attributes == the columns in the database class_attribs = { # Declares name for storage table "__tablename__": table_name, # Connects candidate_subclass records to generic Candidate records "id": Column( Integer, ForeignKey("candidate.id", ondelete="CASCADE"), primary_key=True, ), # Store values & cardinality information in the class only "values": values, "cardinality": cardinality, # Polymorphism information for SQLAlchemy "__mapper_args__": {"polymorphic_identity": table_name}, # Helper method to get argument names "__argnames__": [_.__tablename__ for _ in args], "mentions": args, } class_attribs["document_id"] = Column( Integer, ForeignKey("document.id", ondelete="CASCADE") ) class_attribs["document"] = relationship( "Document", backref=backref(table_name + "s", cascade="all, delete-orphan"), foreign_keys=class_attribs["document_id"], ) # Create named arguments, i.e. the entity mentions comprising the # relation mention. unique_args = [] for arg in args: # Primary arguments are constituent Contexts, and their ids class_attribs[arg.__tablename__ + "_id"] = Column( Integer, ForeignKey(arg.__tablename__ + ".id", ondelete="CASCADE") ) class_attribs[arg.__tablename__] = relationship( arg.__name__, backref=backref( table_name + "_" + arg.__tablename__ + "s", cascade_backrefs=False, cascade="all, delete-orphan", ), cascade_backrefs=False, foreign_keys=class_attribs[arg.__tablename__ + "_id"], ) unique_args.append(class_attribs[arg.__tablename__ + "_id"]) # Add unique constraints to the arguments class_attribs["__table_args__"] = (UniqueConstraint(*unique_args),) # Create class C = type(class_name, (Candidate,), class_attribs) # Create table in DB if not Meta.engine.dialect.has_table(Meta.engine, table_name): C.__table__.create(bind=Meta.engine) candidate_subclasses[class_name] = C, class_spec return C
[ "def", "candidate_subclass", "(", "class_name", ",", "args", ",", "table_name", "=", "None", ",", "cardinality", "=", "None", ",", "values", "=", "None", ")", ":", "if", "table_name", "is", "None", ":", "table_name", "=", "camel_to_under", "(", "class_name",...
Creates and returns a Candidate subclass with provided argument names, which are Context type. Creates the table in DB if does not exist yet. Import using: .. code-block:: python from fonduer.candidates.models import candidate_subclass :param class_name: The name of the class, should be "camel case" e.g. NewCandidate :param args: A list of names of constituent arguments, which refer to the Contexts--representing mentions--that comprise the candidate :param table_name: The name of the corresponding table in DB; if not provided, is converted from camel case by default, e.g. new_candidate :param cardinality: The cardinality of the variable corresponding to the Candidate. By default is 2 i.e. is a binary value, e.g. is or is not a true mention.
[ "Creates", "and", "returns", "a", "Candidate", "subclass", "with", "provided", "argument", "names", "which", "are", "Context", "type", ".", "Creates", "the", "table", "in", "DB", "if", "does", "not", "exist", "yet", "." ]
4520f86a716f03dcca458a9f4bddac75b4e7068f
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/candidates/models/candidate.py#L67-L189