repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
pluralize
def pluralize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}): """ Returns the plural of a given word. The inflection is based on probability rather than gender and role. """ w = word.lower().capitalize() if word in custom: return custom[word] if pos == NOUN: for a, b in plural_inflections: if w.endswith(a): return w[:-len(a)] + b # Default rules (baseline = 69%). if w.startswith("ge"): return w if w.endswith("gie"): return w if w.endswith("e"): return w + "n" if w.endswith("ien"): return w[:-2] + "um" if w.endswith(("au", "ein", "eit", "er", "en", "el", "chen", "mus", u"tät", "tik", "tum", "u")): return w if w.endswith(("ant", "ei", "enz", "ion", "ist", "or", "schaft", "tur", "ung")): return w + "en" if w.endswith("in"): return w + "nen" if w.endswith("nis"): return w + "se" if w.endswith(("eld", "ild", "ind")): return w + "er" if w.endswith("o"): return w + "s" if w.endswith("a"): return w[:-1] + "en" # Inflect common umlaut vowels: Kopf => Köpfe. if w.endswith(("all", "and", "ang", "ank", "atz", "auf", "ock", "opf", "uch", "uss")): umlaut = w[-3] umlaut = umlaut.replace("a", u"ä") umlaut = umlaut.replace("o", u"ö") umlaut = umlaut.replace("u", u"ü") return w[:-3] + umlaut + w[-2:] + "e" for a, b in ( ("ag", u"äge"), ("ann", u"änner"), ("aum", u"äume"), ("aus", u"äuser"), ("zug", u"züge")): if w.endswith(a): return w[:-len(a)] + b return w + "e" return w
python
def pluralize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}): """ Returns the plural of a given word. The inflection is based on probability rather than gender and role. """ w = word.lower().capitalize() if word in custom: return custom[word] if pos == NOUN: for a, b in plural_inflections: if w.endswith(a): return w[:-len(a)] + b # Default rules (baseline = 69%). if w.startswith("ge"): return w if w.endswith("gie"): return w if w.endswith("e"): return w + "n" if w.endswith("ien"): return w[:-2] + "um" if w.endswith(("au", "ein", "eit", "er", "en", "el", "chen", "mus", u"tät", "tik", "tum", "u")): return w if w.endswith(("ant", "ei", "enz", "ion", "ist", "or", "schaft", "tur", "ung")): return w + "en" if w.endswith("in"): return w + "nen" if w.endswith("nis"): return w + "se" if w.endswith(("eld", "ild", "ind")): return w + "er" if w.endswith("o"): return w + "s" if w.endswith("a"): return w[:-1] + "en" # Inflect common umlaut vowels: Kopf => Köpfe. if w.endswith(("all", "and", "ang", "ank", "atz", "auf", "ock", "opf", "uch", "uss")): umlaut = w[-3] umlaut = umlaut.replace("a", u"ä") umlaut = umlaut.replace("o", u"ö") umlaut = umlaut.replace("u", u"ü") return w[:-3] + umlaut + w[-2:] + "e" for a, b in ( ("ag", u"äge"), ("ann", u"änner"), ("aum", u"äume"), ("aus", u"äuser"), ("zug", u"züge")): if w.endswith(a): return w[:-len(a)] + b return w + "e" return w
[ "def", "pluralize", "(", "word", ",", "pos", "=", "NOUN", ",", "gender", "=", "MALE", ",", "role", "=", "SUBJECT", ",", "custom", "=", "{", "}", ")", ":", "w", "=", "word", ".", "lower", "(", ")", ".", "capitalize", "(", ")", "if", "word", "in"...
Returns the plural of a given word. The inflection is based on probability rather than gender and role.
[ "Returns", "the", "plural", "of", "a", "given", "word", ".", "The", "inflection", "is", "based", "on", "probability", "rather", "than", "gender", "and", "role", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L212-L262
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
singularize
def singularize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}): """ Returns the singular of a given word. The inflection is based on probability rather than gender and role. """ w = word.lower().capitalize() if word in custom: return custom[word] if word in singular: return singular[word] if pos == NOUN: for a, b in singular_inflections: if w.endswith(a): return w[:-len(a)] + b # Default rule: strip known plural suffixes (baseline = 51%). for suffix in ("nen", "en", "n", "e", "er", "s"): if w.endswith(suffix): w = w[:-len(suffix)] break # Corrections (these add about 1% accuracy): if w.endswith(("rr", "rv", "nz")): return w + "e" return w return w
python
def singularize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}): """ Returns the singular of a given word. The inflection is based on probability rather than gender and role. """ w = word.lower().capitalize() if word in custom: return custom[word] if word in singular: return singular[word] if pos == NOUN: for a, b in singular_inflections: if w.endswith(a): return w[:-len(a)] + b # Default rule: strip known plural suffixes (baseline = 51%). for suffix in ("nen", "en", "n", "e", "er", "s"): if w.endswith(suffix): w = w[:-len(suffix)] break # Corrections (these add about 1% accuracy): if w.endswith(("rr", "rv", "nz")): return w + "e" return w return w
[ "def", "singularize", "(", "word", ",", "pos", "=", "NOUN", ",", "gender", "=", "MALE", ",", "role", "=", "SUBJECT", ",", "custom", "=", "{", "}", ")", ":", "w", "=", "word", ".", "lower", "(", ")", ".", "capitalize", "(", ")", "if", "word", "i...
Returns the singular of a given word. The inflection is based on probability rather than gender and role.
[ "Returns", "the", "singular", "of", "a", "given", "word", ".", "The", "inflection", "is", "based", "on", "probability", "rather", "than", "gender", "and", "role", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L317-L339
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
attributive
def attributive(adjective, gender=MALE, role=SUBJECT, article=None): """ For a predicative adjective, returns the attributive form (lowercase). In German, the attributive is formed with -e, -em, -en, -er or -es, depending on gender (masculine, feminine, neuter or plural) and role (nominative, accusative, dative, genitive). """ w, g, c, a = \ adjective.lower(), gender[:1].lower(), role[:3].lower(), article and article.lower() or None if w in adjective_attributive: return adjective_attributive[w] if a is None \ or a in ("mir", "dir", "ihm") \ or a in ("ein", "etwas", "mehr") \ or a.startswith(("all", "mehrer", "wenig", "viel")): return w + adjectives_strong.get((g, c), "") if a.startswith(("ein", "kein")) \ or a.startswith(("mein", "dein", "sein", "ihr", "Ihr", "unser", "euer")): return w + adjectives_mixed.get((g, c), "") if a in ("arm", "alt", "all", "der", "die", "das", "den", "dem", "des") \ or a.startswith(( "derselb", "derjenig", "jed", "jeglich", "jen", "manch", "dies", "solch", "welch")): return w + adjectives_weak.get((g, c), "") # Default to strong inflection. return w + adjectives_strong.get((g, c), "")
python
def attributive(adjective, gender=MALE, role=SUBJECT, article=None): """ For a predicative adjective, returns the attributive form (lowercase). In German, the attributive is formed with -e, -em, -en, -er or -es, depending on gender (masculine, feminine, neuter or plural) and role (nominative, accusative, dative, genitive). """ w, g, c, a = \ adjective.lower(), gender[:1].lower(), role[:3].lower(), article and article.lower() or None if w in adjective_attributive: return adjective_attributive[w] if a is None \ or a in ("mir", "dir", "ihm") \ or a in ("ein", "etwas", "mehr") \ or a.startswith(("all", "mehrer", "wenig", "viel")): return w + adjectives_strong.get((g, c), "") if a.startswith(("ein", "kein")) \ or a.startswith(("mein", "dein", "sein", "ihr", "Ihr", "unser", "euer")): return w + adjectives_mixed.get((g, c), "") if a in ("arm", "alt", "all", "der", "die", "das", "den", "dem", "des") \ or a.startswith(( "derselb", "derjenig", "jed", "jeglich", "jen", "manch", "dies", "solch", "welch")): return w + adjectives_weak.get((g, c), "") # Default to strong inflection. return w + adjectives_strong.get((g, c), "")
[ "def", "attributive", "(", "adjective", ",", "gender", "=", "MALE", ",", "role", "=", "SUBJECT", ",", "article", "=", "None", ")", ":", "w", ",", "g", ",", "c", ",", "a", "=", "adjective", ".", "lower", "(", ")", ",", "gender", "[", ":", "1", "...
For a predicative adjective, returns the attributive form (lowercase). In German, the attributive is formed with -e, -em, -en, -er or -es, depending on gender (masculine, feminine, neuter or plural) and role (nominative, accusative, dative, genitive).
[ "For", "a", "predicative", "adjective", "returns", "the", "attributive", "form", "(", "lowercase", ")", ".", "In", "German", "the", "attributive", "is", "formed", "with", "-", "e", "-", "em", "-", "en", "-", "er", "or", "-", "es", "depending", "on", "g...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L517-L541
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
predicative
def predicative(adjective): """ Returns the predicative adjective (lowercase). In German, the attributive form preceding a noun is always used: "ein kleiner Junge" => strong, masculine, nominative, "eine schöne Frau" => mixed, feminine, nominative, "der kleine Prinz" => weak, masculine, nominative, etc. The predicative is useful for lemmatization. """ w = adjective.lower() if len(w) > 3: for suffix in ("em", "en", "er", "es", "e"): if w.endswith(suffix): b = w[:max(-len(suffix), -(len(w)-3))] if b.endswith("bl"): # plausibles => plausibel b = b[:-1] + "el" if b.endswith("pr"): # propres => proper b = b[:-1] + "er" return b return w
python
def predicative(adjective): """ Returns the predicative adjective (lowercase). In German, the attributive form preceding a noun is always used: "ein kleiner Junge" => strong, masculine, nominative, "eine schöne Frau" => mixed, feminine, nominative, "der kleine Prinz" => weak, masculine, nominative, etc. The predicative is useful for lemmatization. """ w = adjective.lower() if len(w) > 3: for suffix in ("em", "en", "er", "es", "e"): if w.endswith(suffix): b = w[:max(-len(suffix), -(len(w)-3))] if b.endswith("bl"): # plausibles => plausibel b = b[:-1] + "el" if b.endswith("pr"): # propres => proper b = b[:-1] + "er" return b return w
[ "def", "predicative", "(", "adjective", ")", ":", "w", "=", "adjective", ".", "lower", "(", ")", "if", "len", "(", "w", ")", ">", "3", ":", "for", "suffix", "in", "(", "\"em\"", ",", "\"en\"", ",", "\"er\"", ",", "\"es\"", ",", "\"e\"", ")", ":",...
Returns the predicative adjective (lowercase). In German, the attributive form preceding a noun is always used: "ein kleiner Junge" => strong, masculine, nominative, "eine schöne Frau" => mixed, feminine, nominative, "der kleine Prinz" => weak, masculine, nominative, etc. The predicative is useful for lemmatization.
[ "Returns", "the", "predicative", "adjective", "(", "lowercase", ")", ".", "In", "German", "the", "attributive", "form", "preceding", "a", "noun", "is", "always", "used", ":", "ein", "kleiner", "Junge", "=", ">", "strong", "masculine", "nominative", "eine", "...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L543-L561
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
grade
def grade(adjective, suffix=COMPARATIVE): """ Returns the comparative or superlative form of the given (inflected) adjective. """ b = predicative(adjective) # groß => großt, schön => schönst if suffix == SUPERLATIVE and b.endswith(("s", u"ß")): suffix = suffix[1:] # große => großere, schönes => schöneres return adjective[:len(b)] + suffix + adjective[len(b):]
python
def grade(adjective, suffix=COMPARATIVE): """ Returns the comparative or superlative form of the given (inflected) adjective. """ b = predicative(adjective) # groß => großt, schön => schönst if suffix == SUPERLATIVE and b.endswith(("s", u"ß")): suffix = suffix[1:] # große => großere, schönes => schöneres return adjective[:len(b)] + suffix + adjective[len(b):]
[ "def", "grade", "(", "adjective", ",", "suffix", "=", "COMPARATIVE", ")", ":", "b", "=", "predicative", "(", "adjective", ")", "# groß => großt, schön => schönst", "if", "suffix", "==", "SUPERLATIVE", "and", "b", ".", "endswith", "(", "(", "\"s\"", ",", "u\"...
Returns the comparative or superlative form of the given (inflected) adjective.
[ "Returns", "the", "comparative", "or", "superlative", "form", "of", "the", "given", "(", "inflected", ")", "adjective", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L568-L576
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
Verbs.find_lemma
def find_lemma(self, verb): """ Returns the base form of the given inflected verb, using a rule-based approach. """ v = verb.lower() # Common prefixes: be-finden and emp-finden probably inflect like finden. if not (v.startswith("ge") and v.endswith("t")): # Probably gerund. for prefix in prefixes: if v.startswith(prefix) and v[len(prefix):] in self.inflections: return prefix + self.inflections[v[len(prefix):]] # Common sufixes: setze nieder => niedersetzen. b, suffix = " " in v and v.split()[:2] or (v, "") # Infinitive -ln: trommeln. if b.endswith(("ln", "rn")): return b # Lemmatize regular inflections. for x in ("test", "est", "end", "ten", "tet", "en", "et", "te", "st", "e", "t"): if b.endswith(x): b = b[:-len(x)]; break # Subjunctive: hielte => halten, schnitte => schneiden. for x, y in ( ("ieb", "eib"), ( "ied", "eid"), ( "ief", "auf" ), ( "ieg", "eig" ), ("iel", "alt"), ("ien", "ein"), ("iess", "ass"), (u"ieß", u"aß" ), ( "iff", "eif" ), ("iss", "eiss"), (u"iß", u"eiß"), ( "it", "eid"), ( "oss", "iess"), (u"öss", "iess")): if b.endswith(x): b = b[:-len(x)] + y; break b = b.replace("eeiss", "eiss") b = b.replace("eeid", "eit") # Subjunctive: wechselte => wechseln if not b.endswith(("e", "l")) and not (b.endswith("er") and len(b) >= 3 and not b[-3] in VOWELS): b = b + "e" # abknallst != abknalln => abknallen if b.endswith(("hl", "ll", "ul", "eil")): b = b + "e" # Strip ge- from (likely) gerund: if b.startswith("ge") and v.endswith("t"): b = b[2:] # Corrections (these add about 1.5% accuracy): if b.endswith(("lnde", "rnde")): b = b[:-3] if b.endswith(("ae", "al", u"öe", u"üe")): b = b.rstrip("e") + "te" if b.endswith(u"äl"): b = b + "e" return suffix + b + "n"
python
def find_lemma(self, verb): """ Returns the base form of the given inflected verb, using a rule-based approach. """ v = verb.lower() # Common prefixes: be-finden and emp-finden probably inflect like finden. if not (v.startswith("ge") and v.endswith("t")): # Probably gerund. for prefix in prefixes: if v.startswith(prefix) and v[len(prefix):] in self.inflections: return prefix + self.inflections[v[len(prefix):]] # Common sufixes: setze nieder => niedersetzen. b, suffix = " " in v and v.split()[:2] or (v, "") # Infinitive -ln: trommeln. if b.endswith(("ln", "rn")): return b # Lemmatize regular inflections. for x in ("test", "est", "end", "ten", "tet", "en", "et", "te", "st", "e", "t"): if b.endswith(x): b = b[:-len(x)]; break # Subjunctive: hielte => halten, schnitte => schneiden. for x, y in ( ("ieb", "eib"), ( "ied", "eid"), ( "ief", "auf" ), ( "ieg", "eig" ), ("iel", "alt"), ("ien", "ein"), ("iess", "ass"), (u"ieß", u"aß" ), ( "iff", "eif" ), ("iss", "eiss"), (u"iß", u"eiß"), ( "it", "eid"), ( "oss", "iess"), (u"öss", "iess")): if b.endswith(x): b = b[:-len(x)] + y; break b = b.replace("eeiss", "eiss") b = b.replace("eeid", "eit") # Subjunctive: wechselte => wechseln if not b.endswith(("e", "l")) and not (b.endswith("er") and len(b) >= 3 and not b[-3] in VOWELS): b = b + "e" # abknallst != abknalln => abknallen if b.endswith(("hl", "ll", "ul", "eil")): b = b + "e" # Strip ge- from (likely) gerund: if b.startswith("ge") and v.endswith("t"): b = b[2:] # Corrections (these add about 1.5% accuracy): if b.endswith(("lnde", "rnde")): b = b[:-3] if b.endswith(("ae", "al", u"öe", u"üe")): b = b.rstrip("e") + "te" if b.endswith(u"äl"): b = b + "e" return suffix + b + "n"
[ "def", "find_lemma", "(", "self", ",", "verb", ")", ":", "v", "=", "verb", ".", "lower", "(", ")", "# Common prefixes: be-finden and emp-finden probably inflect like finden.", "if", "not", "(", "v", ".", "startswith", "(", "\"ge\"", ")", "and", "v", ".", "ends...
Returns the base form of the given inflected verb, using a rule-based approach.
[ "Returns", "the", "base", "form", "of", "the", "given", "inflected", "verb", "using", "a", "rule", "-", "based", "approach", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L372-L413
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
Verbs.find_lexeme
def find_lexeme(self, verb): """ For a regular verb (base form), returns the forms using a rule-based approach. """ v = verb.lower() # Stem = infinitive minus -en, -ln, -rn. b = b0 = re.sub("en$", "", re.sub("ln$", "l", re.sub("rn$", "r", v))) # Split common prefixes. x, x1, x2 = "", "", "" for prefix in prefix_separable: if v.startswith(prefix): b, x = b[len(prefix):], prefix x1 = (" " + x).rstrip() x2 = x + "ge" break # Present tense 1sg and subjunctive -el: handeln => ich handle, du handlest. pl = b.endswith("el") and b[:-2]+"l" or b # Present tense 1pl -el: handeln => wir handeln pw = v.endswith(("ln", "rn")) and v or b+"en" # Present tense ending in -d or -t gets -e: pr = b.endswith(("d", "t")) and b+"e" or b # Present tense 2sg gets -st, unless stem ends with -s or -z. p2 = pr.endswith(("s","z")) and pr+"t" or pr+"st" # Present participle: spiel + -end, arbeiten + -d: pp = v.endswith(("en", "ln", "rn")) and v+"d" or v+"end" # Past tense regular: pt = encode_sz(pr) + "t" # Past participle: haushalten => hausgehalten ge = (v.startswith(prefix_inseparable) or b.endswith(("r","t"))) and pt or "ge"+pt ge = x and x+"ge"+pt or ge # Present subjunctive: stem + -e, -est, -en, -et: s1 = encode_sz(pl) # Past subjunctive: past (usually with Umlaut) + -e, -est, -en, -et: s2 = encode_sz(pt) # Construct the lexeme: lexeme = a = [ v, pl+"e"+x1, p2+x1, pr+"t"+x1, pw+x1, pr+"t"+x1, pp, # present pt+"e"+x1, pt+"est"+x1, pt+"e"+x1, pt+"en"+x1, pt+"et"+x1, ge, # past b+"e"+x1, pr+"t"+x1, x+pw, # imperative s1+"e"+x1, s1+"est"+x1, s1+"en"+x1, s1+"et"+x1, # subjunctive I s2+"e"+x1, s2+"est"+x1, s2+"en"+x1, s2+"et"+x1 # subjunctive II ] # Encode Eszett (ß) and attempt to retrieve from the lexicon. # Decode Eszett for present and imperative. if encode_sz(v) in self: a = self[encode_sz(v)] a = [decode_sz(v) for v in a[:7]] + a[7:13] + [decode_sz(v) for v in a[13:20]] + a[20:] # Since the lexicon does not contain imperative for all verbs, don't simply return it. # Instead, update the rule-based lexeme with inflections from the lexicon. return [a[i] or lexeme[i] for i in range(len(a))]
python
def find_lexeme(self, verb): """ For a regular verb (base form), returns the forms using a rule-based approach. """ v = verb.lower() # Stem = infinitive minus -en, -ln, -rn. b = b0 = re.sub("en$", "", re.sub("ln$", "l", re.sub("rn$", "r", v))) # Split common prefixes. x, x1, x2 = "", "", "" for prefix in prefix_separable: if v.startswith(prefix): b, x = b[len(prefix):], prefix x1 = (" " + x).rstrip() x2 = x + "ge" break # Present tense 1sg and subjunctive -el: handeln => ich handle, du handlest. pl = b.endswith("el") and b[:-2]+"l" or b # Present tense 1pl -el: handeln => wir handeln pw = v.endswith(("ln", "rn")) and v or b+"en" # Present tense ending in -d or -t gets -e: pr = b.endswith(("d", "t")) and b+"e" or b # Present tense 2sg gets -st, unless stem ends with -s or -z. p2 = pr.endswith(("s","z")) and pr+"t" or pr+"st" # Present participle: spiel + -end, arbeiten + -d: pp = v.endswith(("en", "ln", "rn")) and v+"d" or v+"end" # Past tense regular: pt = encode_sz(pr) + "t" # Past participle: haushalten => hausgehalten ge = (v.startswith(prefix_inseparable) or b.endswith(("r","t"))) and pt or "ge"+pt ge = x and x+"ge"+pt or ge # Present subjunctive: stem + -e, -est, -en, -et: s1 = encode_sz(pl) # Past subjunctive: past (usually with Umlaut) + -e, -est, -en, -et: s2 = encode_sz(pt) # Construct the lexeme: lexeme = a = [ v, pl+"e"+x1, p2+x1, pr+"t"+x1, pw+x1, pr+"t"+x1, pp, # present pt+"e"+x1, pt+"est"+x1, pt+"e"+x1, pt+"en"+x1, pt+"et"+x1, ge, # past b+"e"+x1, pr+"t"+x1, x+pw, # imperative s1+"e"+x1, s1+"est"+x1, s1+"en"+x1, s1+"et"+x1, # subjunctive I s2+"e"+x1, s2+"est"+x1, s2+"en"+x1, s2+"et"+x1 # subjunctive II ] # Encode Eszett (ß) and attempt to retrieve from the lexicon. # Decode Eszett for present and imperative. if encode_sz(v) in self: a = self[encode_sz(v)] a = [decode_sz(v) for v in a[:7]] + a[7:13] + [decode_sz(v) for v in a[13:20]] + a[20:] # Since the lexicon does not contain imperative for all verbs, don't simply return it. # Instead, update the rule-based lexeme with inflections from the lexicon. return [a[i] or lexeme[i] for i in range(len(a))]
[ "def", "find_lexeme", "(", "self", ",", "verb", ")", ":", "v", "=", "verb", ".", "lower", "(", ")", "# Stem = infinitive minus -en, -ln, -rn.", "b", "=", "b0", "=", "re", ".", "sub", "(", "\"en$\"", ",", "\"\"", ",", "re", ".", "sub", "(", "\"ln$\"", ...
For a regular verb (base form), returns the forms using a rule-based approach.
[ "For", "a", "regular", "verb", "(", "base", "form", ")", "returns", "the", "forms", "using", "a", "rule", "-", "based", "approach", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L415-L464
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/inflect.py
Verbs.tenses
def tenses(self, verb, parse=True): """ Returns a list of possible tenses for the given inflected verb. """ tenses = _Verbs.tenses(self, verb, parse) if len(tenses) == 0: # auswirkte => wirkte aus for prefix in prefix_separable: if verb.startswith(prefix): tenses = _Verbs.tenses(self, verb[len(prefix):] + " " + prefix, parse) break return tenses
python
def tenses(self, verb, parse=True): """ Returns a list of possible tenses for the given inflected verb. """ tenses = _Verbs.tenses(self, verb, parse) if len(tenses) == 0: # auswirkte => wirkte aus for prefix in prefix_separable: if verb.startswith(prefix): tenses = _Verbs.tenses(self, verb[len(prefix):] + " " + prefix, parse) break return tenses
[ "def", "tenses", "(", "self", ",", "verb", ",", "parse", "=", "True", ")", ":", "tenses", "=", "_Verbs", ".", "tenses", "(", "self", ",", "verb", ",", "parse", ")", "if", "len", "(", "tenses", ")", "==", "0", ":", "# auswirkte => wirkte aus", "for", ...
Returns a list of possible tenses for the given inflected verb.
[ "Returns", "a", "list", "of", "possible", "tenses", "for", "the", "given", "inflected", "verb", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/inflect.py#L466-L476
train
markuskiller/textblob-de
textblob_de/classifiers.py
_get_words_from_dataset
def _get_words_from_dataset(dataset): """Return a set of all words in a dataset. :param dataset: A list of tuples of the form ``(words, label)`` where ``words`` is either a string of a list of tokens. """ # Words may be either a string or a list of tokens. Return an iterator # of tokens accordingly def tokenize(words): if isinstance(words, basestring): return word_tokenize(words, include_punc=False) else: return words all_words = chain.from_iterable(tokenize(words) for words, _ in dataset) return set(all_words)
python
def _get_words_from_dataset(dataset): """Return a set of all words in a dataset. :param dataset: A list of tuples of the form ``(words, label)`` where ``words`` is either a string of a list of tokens. """ # Words may be either a string or a list of tokens. Return an iterator # of tokens accordingly def tokenize(words): if isinstance(words, basestring): return word_tokenize(words, include_punc=False) else: return words all_words = chain.from_iterable(tokenize(words) for words, _ in dataset) return set(all_words)
[ "def", "_get_words_from_dataset", "(", "dataset", ")", ":", "# Words may be either a string or a list of tokens. Return an iterator", "# of tokens accordingly", "def", "tokenize", "(", "words", ")", ":", "if", "isinstance", "(", "words", ",", "basestring", ")", ":", "retu...
Return a set of all words in a dataset. :param dataset: A list of tuples of the form ``(words, label)`` where ``words`` is either a string of a list of tokens.
[ "Return", "a", "set", "of", "all", "words", "in", "a", "dataset", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L57-L72
train
markuskiller/textblob-de
textblob_de/classifiers.py
basic_extractor
def basic_extractor(document, train_set): """A basic document feature extractor that returns a dict indicating what words in ``train_set`` are contained in ``document``. :param document: The text to extract features from. Can be a string or an iterable. :param list train_set: Training data set, a list of tuples of the form ``(words, label)``. """ word_features = _get_words_from_dataset(train_set) tokens = _get_document_tokens(document) features = dict(((u'contains({0})'.format(word), (word in tokens)) for word in word_features)) return features
python
def basic_extractor(document, train_set): """A basic document feature extractor that returns a dict indicating what words in ``train_set`` are contained in ``document``. :param document: The text to extract features from. Can be a string or an iterable. :param list train_set: Training data set, a list of tuples of the form ``(words, label)``. """ word_features = _get_words_from_dataset(train_set) tokens = _get_document_tokens(document) features = dict(((u'contains({0})'.format(word), (word in tokens)) for word in word_features)) return features
[ "def", "basic_extractor", "(", "document", ",", "train_set", ")", ":", "word_features", "=", "_get_words_from_dataset", "(", "train_set", ")", "tokens", "=", "_get_document_tokens", "(", "document", ")", "features", "=", "dict", "(", "(", "(", "u'contains({0})'", ...
A basic document feature extractor that returns a dict indicating what words in ``train_set`` are contained in ``document``. :param document: The text to extract features from. Can be a string or an iterable. :param list train_set: Training data set, a list of tuples of the form ``(words, label)``.
[ "A", "basic", "document", "feature", "extractor", "that", "returns", "a", "dict", "indicating", "what", "words", "in", "train_set", "are", "contained", "in", "document", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L84-L97
train
markuskiller/textblob-de
textblob_de/classifiers.py
contains_extractor
def contains_extractor(document): """A basic document feature extractor that returns a dict of words that the document contains.""" tokens = _get_document_tokens(document) features = dict((u'contains({0})'.format(w), True) for w in tokens) return features
python
def contains_extractor(document): """A basic document feature extractor that returns a dict of words that the document contains.""" tokens = _get_document_tokens(document) features = dict((u'contains({0})'.format(w), True) for w in tokens) return features
[ "def", "contains_extractor", "(", "document", ")", ":", "tokens", "=", "_get_document_tokens", "(", "document", ")", "features", "=", "dict", "(", "(", "u'contains({0})'", ".", "format", "(", "w", ")", ",", "True", ")", "for", "w", "in", "tokens", ")", "...
A basic document feature extractor that returns a dict of words that the document contains.
[ "A", "basic", "document", "feature", "extractor", "that", "returns", "a", "dict", "of", "words", "that", "the", "document", "contains", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L100-L105
train
markuskiller/textblob-de
textblob_de/classifiers.py
BaseClassifier._read_data
def _read_data(self, dataset, format=None): """Reads a data file and returns and iterable that can be used as testing or training data.""" # Attempt to detect file format if "format" isn't specified if not format: format_class = formats.detect(dataset) else: if format not in formats.AVAILABLE.keys(): raise ValueError("'{0}' format not supported.".format(format)) format_class = formats.AVAILABLE[format] return format_class(dataset).to_iterable()
python
def _read_data(self, dataset, format=None): """Reads a data file and returns and iterable that can be used as testing or training data.""" # Attempt to detect file format if "format" isn't specified if not format: format_class = formats.detect(dataset) else: if format not in formats.AVAILABLE.keys(): raise ValueError("'{0}' format not supported.".format(format)) format_class = formats.AVAILABLE[format] return format_class(dataset).to_iterable()
[ "def", "_read_data", "(", "self", ",", "dataset", ",", "format", "=", "None", ")", ":", "# Attempt to detect file format if \"format\" isn't specified", "if", "not", "format", ":", "format_class", "=", "formats", ".", "detect", "(", "dataset", ")", "else", ":", ...
Reads a data file and returns and iterable that can be used as testing or training data.
[ "Reads", "a", "data", "file", "and", "returns", "and", "iterable", "that", "can", "be", "used", "as", "testing", "or", "training", "data", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L138-L148
train
markuskiller/textblob-de
textblob_de/classifiers.py
BaseClassifier.extract_features
def extract_features(self, text): """Extracts features from a body of text. :rtype: dictionary of features """ # Feature extractor may take one or two arguments try: return self.feature_extractor(text, self.train_set) except (TypeError, AttributeError): return self.feature_extractor(text)
python
def extract_features(self, text): """Extracts features from a body of text. :rtype: dictionary of features """ # Feature extractor may take one or two arguments try: return self.feature_extractor(text, self.train_set) except (TypeError, AttributeError): return self.feature_extractor(text)
[ "def", "extract_features", "(", "self", ",", "text", ")", ":", "# Feature extractor may take one or two arguments", "try", ":", "return", "self", ".", "feature_extractor", "(", "text", ",", "self", ".", "train_set", ")", "except", "(", "TypeError", ",", "Attribute...
Extracts features from a body of text. :rtype: dictionary of features
[ "Extracts", "features", "from", "a", "body", "of", "text", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L167-L177
train
markuskiller/textblob-de
textblob_de/classifiers.py
NLTKClassifier.train
def train(self, *args, **kwargs): """Train the classifier with a labeled feature set and return the classifier. Takes the same arguments as the wrapped NLTK class. This method is implicitly called when calling ``classify`` or ``accuracy`` methods and is included only to allow passing in arguments to the ``train`` method of the wrapped NLTK class. .. versionadded:: 0.6.2 :rtype: A classifier """ try: self.classifier = self.nltk_class.train(self.train_features, *args, **kwargs) return self.classifier except AttributeError: raise ValueError("NLTKClassifier must have a nltk_class" " variable that is not None.")
python
def train(self, *args, **kwargs): """Train the classifier with a labeled feature set and return the classifier. Takes the same arguments as the wrapped NLTK class. This method is implicitly called when calling ``classify`` or ``accuracy`` methods and is included only to allow passing in arguments to the ``train`` method of the wrapped NLTK class. .. versionadded:: 0.6.2 :rtype: A classifier """ try: self.classifier = self.nltk_class.train(self.train_features, *args, **kwargs) return self.classifier except AttributeError: raise ValueError("NLTKClassifier must have a nltk_class" " variable that is not None.")
[ "def", "train", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "self", ".", "classifier", "=", "self", ".", "nltk_class", ".", "train", "(", "self", ".", "train_features", ",", "*", "args", ",", "*", "*", "kwargs", ...
Train the classifier with a labeled feature set and return the classifier. Takes the same arguments as the wrapped NLTK class. This method is implicitly called when calling ``classify`` or ``accuracy`` methods and is included only to allow passing in arguments to the ``train`` method of the wrapped NLTK class. .. versionadded:: 0.6.2 :rtype: A classifier
[ "Train", "the", "classifier", "with", "a", "labeled", "feature", "set", "and", "return", "the", "classifier", ".", "Takes", "the", "same", "arguments", "as", "the", "wrapped", "NLTK", "class", ".", "This", "method", "is", "implicitly", "called", "when", "cal...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L224-L242
train
markuskiller/textblob-de
textblob_de/classifiers.py
NLTKClassifier.classify
def classify(self, text): """Classifies the text. :param str text: A string of text. """ text_features = self.extract_features(text) return self.classifier.classify(text_features)
python
def classify(self, text): """Classifies the text. :param str text: A string of text. """ text_features = self.extract_features(text) return self.classifier.classify(text_features)
[ "def", "classify", "(", "self", ",", "text", ")", ":", "text_features", "=", "self", ".", "extract_features", "(", "text", ")", "return", "self", ".", "classifier", ".", "classify", "(", "text_features", ")" ]
Classifies the text. :param str text: A string of text.
[ "Classifies", "the", "text", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L248-L255
train
markuskiller/textblob-de
textblob_de/classifiers.py
NLTKClassifier.accuracy
def accuracy(self, test_set, format=None): """Compute the accuracy on a test set. :param test_set: A list of tuples of the form ``(text, label)``, or a filename. :param format: If ``test_set`` is a filename, the file format, e.g. ``"csv"`` or ``"json"``. If ``None``, will attempt to detect the file format. """ if isinstance(test_set, basestring): # test_set is a filename test_data = self._read_data(test_set) else: # test_set is a list of tuples test_data = test_set test_features = [(self.extract_features(d), c) for d, c in test_data] return nltk.classify.accuracy(self.classifier, test_features)
python
def accuracy(self, test_set, format=None): """Compute the accuracy on a test set. :param test_set: A list of tuples of the form ``(text, label)``, or a filename. :param format: If ``test_set`` is a filename, the file format, e.g. ``"csv"`` or ``"json"``. If ``None``, will attempt to detect the file format. """ if isinstance(test_set, basestring): # test_set is a filename test_data = self._read_data(test_set) else: # test_set is a list of tuples test_data = test_set test_features = [(self.extract_features(d), c) for d, c in test_data] return nltk.classify.accuracy(self.classifier, test_features)
[ "def", "accuracy", "(", "self", ",", "test_set", ",", "format", "=", "None", ")", ":", "if", "isinstance", "(", "test_set", ",", "basestring", ")", ":", "# test_set is a filename", "test_data", "=", "self", ".", "_read_data", "(", "test_set", ")", "else", ...
Compute the accuracy on a test set. :param test_set: A list of tuples of the form ``(text, label)``, or a filename. :param format: If ``test_set`` is a filename, the file format, e.g. ``"csv"`` or ``"json"``. If ``None``, will attempt to detect the file format.
[ "Compute", "the", "accuracy", "on", "a", "test", "set", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L257-L272
train
markuskiller/textblob-de
textblob_de/classifiers.py
NLTKClassifier.update
def update(self, new_data, *args, **kwargs): '''Update the classifier with new training data and re-trains the classifier. :param new_data: New data as a list of tuples of the form ``(text, label)``. ''' self.train_set += new_data self.train_features = [(self.extract_features(d), c) for d, c in self.train_set] try: self.classifier = self.nltk_class.train(self.train_features, *args, **kwargs) except AttributeError: # Descendant has not defined nltk_class raise ValueError("NLTKClassifier must have a nltk_class" " variable that is not None.") return True
python
def update(self, new_data, *args, **kwargs): '''Update the classifier with new training data and re-trains the classifier. :param new_data: New data as a list of tuples of the form ``(text, label)``. ''' self.train_set += new_data self.train_features = [(self.extract_features(d), c) for d, c in self.train_set] try: self.classifier = self.nltk_class.train(self.train_features, *args, **kwargs) except AttributeError: # Descendant has not defined nltk_class raise ValueError("NLTKClassifier must have a nltk_class" " variable that is not None.") return True
[ "def", "update", "(", "self", ",", "new_data", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "train_set", "+=", "new_data", "self", ".", "train_features", "=", "[", "(", "self", ".", "extract_features", "(", "d", ")", ",", "c", ...
Update the classifier with new training data and re-trains the classifier. :param new_data: New data as a list of tuples of the form ``(text, label)``.
[ "Update", "the", "classifier", "with", "new", "training", "data", "and", "re", "-", "trains", "the", "classifier", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L274-L290
train
markuskiller/textblob-de
textblob_de/classifiers.py
NaiveBayesClassifier.prob_classify
def prob_classify(self, text): """Return the label probability distribution for classifying a string of text. Example: :: >>> classifier = NaiveBayesClassifier(train_data) >>> prob_dist = classifier.prob_classify("I feel happy this morning.") >>> prob_dist.max() 'positive' >>> prob_dist.prob("positive") 0.7 :rtype: nltk.probability.DictionaryProbDist """ text_features = self.extract_features(text) return self.classifier.prob_classify(text_features)
python
def prob_classify(self, text): """Return the label probability distribution for classifying a string of text. Example: :: >>> classifier = NaiveBayesClassifier(train_data) >>> prob_dist = classifier.prob_classify("I feel happy this morning.") >>> prob_dist.max() 'positive' >>> prob_dist.prob("positive") 0.7 :rtype: nltk.probability.DictionaryProbDist """ text_features = self.extract_features(text) return self.classifier.prob_classify(text_features)
[ "def", "prob_classify", "(", "self", ",", "text", ")", ":", "text_features", "=", "self", ".", "extract_features", "(", "text", ")", "return", "self", ".", "classifier", ".", "prob_classify", "(", "text_features", ")" ]
Return the label probability distribution for classifying a string of text. Example: :: >>> classifier = NaiveBayesClassifier(train_data) >>> prob_dist = classifier.prob_classify("I feel happy this morning.") >>> prob_dist.max() 'positive' >>> prob_dist.prob("positive") 0.7 :rtype: nltk.probability.DictionaryProbDist
[ "Return", "the", "label", "probability", "distribution", "for", "classifying", "a", "string", "of", "text", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L312-L330
train
markuskiller/textblob-de
textblob_de/classifiers.py
PositiveNaiveBayesClassifier.train
def train(self, *args, **kwargs): """Train the classifier with a labeled and unlabeled feature sets and return the classifier. Takes the same arguments as the wrapped NLTK class. This method is implicitly called when calling ``classify`` or ``accuracy`` methods and is included only to allow passing in arguments to the ``train`` method of the wrapped NLTK class. :rtype: A classifier """ self.classifier = self.nltk_class.train(self.positive_features, self.unlabeled_features, self.positive_prob_prior) return self.classifier
python
def train(self, *args, **kwargs): """Train the classifier with a labeled and unlabeled feature sets and return the classifier. Takes the same arguments as the wrapped NLTK class. This method is implicitly called when calling ``classify`` or ``accuracy`` methods and is included only to allow passing in arguments to the ``train`` method of the wrapped NLTK class. :rtype: A classifier """ self.classifier = self.nltk_class.train(self.positive_features, self.unlabeled_features, self.positive_prob_prior) return self.classifier
[ "def", "train", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "classifier", "=", "self", ".", "nltk_class", ".", "train", "(", "self", ".", "positive_features", ",", "self", ".", "unlabeled_features", ",", "self", ".", ...
Train the classifier with a labeled and unlabeled feature sets and return the classifier. Takes the same arguments as the wrapped NLTK class. This method is implicitly called when calling ``classify`` or ``accuracy`` methods and is included only to allow passing in arguments to the ``train`` method of the wrapped NLTK class. :rtype: A classifier
[ "Train", "the", "classifier", "with", "a", "labeled", "and", "unlabeled", "feature", "sets", "and", "return", "the", "classifier", ".", "Takes", "the", "same", "arguments", "as", "the", "wrapped", "NLTK", "class", ".", "This", "method", "is", "implicitly", "...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L453-L466
train
markuskiller/textblob-de
textblob_de/classifiers.py
PositiveNaiveBayesClassifier.update
def update(self, new_positive_data=None, new_unlabeled_data=None, positive_prob_prior=0.5, *args, **kwargs): '''Update the classifier with new data and re-trains the classifier. :param new_positive_data: List of new, labeled strings. :param new_unlabeled_data: List of new, unlabeled strings. ''' self.positive_prob_prior = positive_prob_prior if new_positive_data: self.positive_set += new_positive_data self.positive_features += [self.extract_features(d) for d in new_positive_data] if new_unlabeled_data: self.unlabeled_set += new_unlabeled_data self.unlabeled_features += [self.extract_features(d) for d in new_unlabeled_data] self.classifier = self.nltk_class.train(self.positive_features, self.unlabeled_features, self.positive_prob_prior, *args, **kwargs) return True
python
def update(self, new_positive_data=None, new_unlabeled_data=None, positive_prob_prior=0.5, *args, **kwargs): '''Update the classifier with new data and re-trains the classifier. :param new_positive_data: List of new, labeled strings. :param new_unlabeled_data: List of new, unlabeled strings. ''' self.positive_prob_prior = positive_prob_prior if new_positive_data: self.positive_set += new_positive_data self.positive_features += [self.extract_features(d) for d in new_positive_data] if new_unlabeled_data: self.unlabeled_set += new_unlabeled_data self.unlabeled_features += [self.extract_features(d) for d in new_unlabeled_data] self.classifier = self.nltk_class.train(self.positive_features, self.unlabeled_features, self.positive_prob_prior, *args, **kwargs) return True
[ "def", "update", "(", "self", ",", "new_positive_data", "=", "None", ",", "new_unlabeled_data", "=", "None", ",", "positive_prob_prior", "=", "0.5", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "positive_prob_prior", "=", "positive_prob_...
Update the classifier with new data and re-trains the classifier. :param new_positive_data: List of new, labeled strings. :param new_unlabeled_data: List of new, unlabeled strings.
[ "Update", "the", "classifier", "with", "new", "data", "and", "re", "-", "trains", "the", "classifier", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L468-L490
train
markuskiller/textblob-de
textblob_de/classifiers.py
MaxEntClassifier.prob_classify
def prob_classify(self, text): """Return the label probability distribution for classifying a string of text. Example: :: >>> classifier = MaxEntClassifier(train_data) >>> prob_dist = classifier.prob_classify("I feel happy this morning.") >>> prob_dist.max() 'positive' >>> prob_dist.prob("positive") 0.7 :rtype: nltk.probability.DictionaryProbDist """ feats = self.extract_features(text) return self.classifier.prob_classify(feats)
python
def prob_classify(self, text): """Return the label probability distribution for classifying a string of text. Example: :: >>> classifier = MaxEntClassifier(train_data) >>> prob_dist = classifier.prob_classify("I feel happy this morning.") >>> prob_dist.max() 'positive' >>> prob_dist.prob("positive") 0.7 :rtype: nltk.probability.DictionaryProbDist """ feats = self.extract_features(text) return self.classifier.prob_classify(feats)
[ "def", "prob_classify", "(", "self", ",", "text", ")", ":", "feats", "=", "self", ".", "extract_features", "(", "text", ")", "return", "self", ".", "classifier", ".", "prob_classify", "(", "feats", ")" ]
Return the label probability distribution for classifying a string of text. Example: :: >>> classifier = MaxEntClassifier(train_data) >>> prob_dist = classifier.prob_classify("I feel happy this morning.") >>> prob_dist.max() 'positive' >>> prob_dist.prob("positive") 0.7 :rtype: nltk.probability.DictionaryProbDist
[ "Return", "the", "label", "probability", "distribution", "for", "classifying", "a", "string", "of", "text", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/classifiers.py#L497-L515
train
markuskiller/textblob-de
textblob_de/lemmatizers.py
PatternParserLemmatizer.lemmatize
def lemmatize(self, text): """Return a list of (lemma, tag) tuples. :param str text: A string. """ #: Do not process empty strings (Issue #3) if text.strip() == "": return [] parsed_sentences = self._parse_text(text) _lemmalist = [] for s in parsed_sentences: tokens = s.split() for i, t in enumerate(tokens): #: Filter empty tokens from the parser output (Issue #5) #: This only happens if parser input is improperly tokenized #: e.g. if there are empty strings in the list of tokens ['A', '', '.'] if t.startswith('/'): continue w, tag, phrase, role, lemma = t.split('/') # The lexicon uses Swiss spelling: "ss" instead of "ß". lemma = lemma.replace(u"ß", "ss") # Reverse previous replacement lemma = lemma.strip().replace("forwardslash", "/") if w[0].isupper() and i > 0: lemma = lemma.title() elif tag.startswith("N") and i == 0: lemma = lemma.title() # Todo: Check if it makes sense to treat '/' as punctuation # (especially for sentiment analysis it might be interesting # to treat it as OR ('oder')). if w in string.punctuation or lemma == '/': continue else: lemma = lemma _lemmalist.append((lemma, tag)) return _lemmalist
python
def lemmatize(self, text): """Return a list of (lemma, tag) tuples. :param str text: A string. """ #: Do not process empty strings (Issue #3) if text.strip() == "": return [] parsed_sentences = self._parse_text(text) _lemmalist = [] for s in parsed_sentences: tokens = s.split() for i, t in enumerate(tokens): #: Filter empty tokens from the parser output (Issue #5) #: This only happens if parser input is improperly tokenized #: e.g. if there are empty strings in the list of tokens ['A', '', '.'] if t.startswith('/'): continue w, tag, phrase, role, lemma = t.split('/') # The lexicon uses Swiss spelling: "ss" instead of "ß". lemma = lemma.replace(u"ß", "ss") # Reverse previous replacement lemma = lemma.strip().replace("forwardslash", "/") if w[0].isupper() and i > 0: lemma = lemma.title() elif tag.startswith("N") and i == 0: lemma = lemma.title() # Todo: Check if it makes sense to treat '/' as punctuation # (especially for sentiment analysis it might be interesting # to treat it as OR ('oder')). if w in string.punctuation or lemma == '/': continue else: lemma = lemma _lemmalist.append((lemma, tag)) return _lemmalist
[ "def", "lemmatize", "(", "self", ",", "text", ")", ":", "#: Do not process empty strings (Issue #3)", "if", "text", ".", "strip", "(", ")", "==", "\"\"", ":", "return", "[", "]", "parsed_sentences", "=", "self", ".", "_parse_text", "(", "text", ")", "_lemmal...
Return a list of (lemma, tag) tuples. :param str text: A string.
[ "Return", "a", "list", "of", "(", "lemma", "tag", ")", "tuples", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/lemmatizers.py#L43-L80
train
markuskiller/textblob-de
textblob_de/lemmatizers.py
PatternParserLemmatizer._parse_text
def _parse_text(self, text): """Parse text (string) and return list of parsed sentences (strings). Each sentence consists of space separated token elements and the token format returned by the PatternParser is WORD/TAG/PHRASE/ROLE/LEMMA (separated by a forward slash '/') :param str text: A string. """ # Fix for issue #1 text = text.replace("/", " FORWARDSLASH ") _tokenized = " ".join(self.tokenizer.tokenize(text)) parsed_text = pattern_parse(_tokenized, tokenize=False, lemmata=True) return parsed_text.split('\n')
python
def _parse_text(self, text): """Parse text (string) and return list of parsed sentences (strings). Each sentence consists of space separated token elements and the token format returned by the PatternParser is WORD/TAG/PHRASE/ROLE/LEMMA (separated by a forward slash '/') :param str text: A string. """ # Fix for issue #1 text = text.replace("/", " FORWARDSLASH ") _tokenized = " ".join(self.tokenizer.tokenize(text)) parsed_text = pattern_parse(_tokenized, tokenize=False, lemmata=True) return parsed_text.split('\n')
[ "def", "_parse_text", "(", "self", ",", "text", ")", ":", "# Fix for issue #1", "text", "=", "text", ".", "replace", "(", "\"/\"", ",", "\" FORWARDSLASH \"", ")", "_tokenized", "=", "\" \"", ".", "join", "(", "self", ".", "tokenizer", ".", "tokenize", "(",...
Parse text (string) and return list of parsed sentences (strings). Each sentence consists of space separated token elements and the token format returned by the PatternParser is WORD/TAG/PHRASE/ROLE/LEMMA (separated by a forward slash '/') :param str text: A string.
[ "Parse", "text", "(", "string", ")", "and", "return", "list", "of", "parsed", "sentences", "(", "strings", ")", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/lemmatizers.py#L82-L96
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
_match
def _match(string, pattern): """ Returns True if the pattern matches the given word string. The pattern can include a wildcard (*front, back*, *both*, in*side), or it can be a compiled regular expression. """ p = pattern try: if p[:1] == WILDCARD and (p[-1:] == WILDCARD and p[1:-1] in string or string.endswith(p[1:])): return True if p[-1:] == WILDCARD and not p[-2:-1] == "\\" and string.startswith(p[:-1]): return True if p == string: return True if WILDCARD in p[1:-1]: p = p.split(WILDCARD) return string.startswith(p[0]) and string.endswith(p[-1]) except: # For performance, calling isinstance() last is 10% faster for plain strings. if isinstance(p, regexp): return p.search(string) is not None return False
python
def _match(string, pattern): """ Returns True if the pattern matches the given word string. The pattern can include a wildcard (*front, back*, *both*, in*side), or it can be a compiled regular expression. """ p = pattern try: if p[:1] == WILDCARD and (p[-1:] == WILDCARD and p[1:-1] in string or string.endswith(p[1:])): return True if p[-1:] == WILDCARD and not p[-2:-1] == "\\" and string.startswith(p[:-1]): return True if p == string: return True if WILDCARD in p[1:-1]: p = p.split(WILDCARD) return string.startswith(p[0]) and string.endswith(p[-1]) except: # For performance, calling isinstance() last is 10% faster for plain strings. if isinstance(p, regexp): return p.search(string) is not None return False
[ "def", "_match", "(", "string", ",", "pattern", ")", ":", "p", "=", "pattern", "try", ":", "if", "p", "[", ":", "1", "]", "==", "WILDCARD", "and", "(", "p", "[", "-", "1", ":", "]", "==", "WILDCARD", "and", "p", "[", "1", ":", "-", "1", "]"...
Returns True if the pattern matches the given word string. The pattern can include a wildcard (*front, back*, *both*, in*side), or it can be a compiled regular expression.
[ "Returns", "True", "if", "the", "pattern", "matches", "the", "given", "word", "string", ".", "The", "pattern", "can", "include", "a", "wildcard", "(", "*", "front", "back", "*", "*", "both", "*", "in", "*", "side", ")", "or", "it", "can", "be", "a", ...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L101-L121
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
unique
def unique(iterable): """ Returns a list copy in which each item occurs only once (in-order). """ seen = set() return [x for x in iterable if x not in seen and not seen.add(x)]
python
def unique(iterable): """ Returns a list copy in which each item occurs only once (in-order). """ seen = set() return [x for x in iterable if x not in seen and not seen.add(x)]
[ "def", "unique", "(", "iterable", ")", ":", "seen", "=", "set", "(", ")", "return", "[", "x", "for", "x", "in", "iterable", "if", "x", "not", "in", "seen", "and", "not", "seen", ".", "add", "(", "x", ")", "]" ]
Returns a list copy in which each item occurs only once (in-order).
[ "Returns", "a", "list", "copy", "in", "which", "each", "item", "occurs", "only", "once", "(", "in", "-", "order", ")", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L127-L131
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
product
def product(*args, **kwargs): """ Yields all permutations with replacement: list(product("cat", repeat=2)) => [("c", "c"), ("c", "a"), ("c", "t"), ("a", "c"), ("a", "a"), ("a", "t"), ("t", "c"), ("t", "a"), ("t", "t")] """ p = [[]] for iterable in map(tuple, args) * kwargs.get("repeat", 1): p = [x + [y] for x in p for y in iterable] for p in p: yield tuple(p)
python
def product(*args, **kwargs): """ Yields all permutations with replacement: list(product("cat", repeat=2)) => [("c", "c"), ("c", "a"), ("c", "t"), ("a", "c"), ("a", "a"), ("a", "t"), ("t", "c"), ("t", "a"), ("t", "t")] """ p = [[]] for iterable in map(tuple, args) * kwargs.get("repeat", 1): p = [x + [y] for x in p for y in iterable] for p in p: yield tuple(p)
[ "def", "product", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "p", "=", "[", "[", "]", "]", "for", "iterable", "in", "map", "(", "tuple", ",", "args", ")", "*", "kwargs", ".", "get", "(", "\"repeat\"", ",", "1", ")", ":", "p", "=", ...
Yields all permutations with replacement: list(product("cat", repeat=2)) => [("c", "c"), ("c", "a"), ("c", "t"), ("a", "c"), ("a", "a"), ("a", "t"), ("t", "c"), ("t", "a"), ("t", "t")]
[ "Yields", "all", "permutations", "with", "replacement", ":", "list", "(", "product", "(", "cat", "repeat", "=", "2", "))", "=", ">", "[", "(", "c", "c", ")", "(", "c", "a", ")", "(", "c", "t", ")", "(", "a", "c", ")", "(", "a", "a", ")", "(...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L144-L161
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
variations
def variations(iterable, optional=lambda x: False): """ Returns all possible variations of a sequence with optional items. """ # For example: variations(["A?", "B?", "C"], optional=lambda s: s.endswith("?")) # defines a sequence where constraint A and B are optional: # [("A?", "B?", "C"), ("B?", "C"), ("A?", "C"), ("C")] iterable = tuple(iterable) # Create a boolean sequence where True means optional: # ("A?", "B?", "C") => [True, True, False] o = [optional(x) for x in iterable] # Find all permutations of the boolean sequence: # [True, False, True], [True, False, False], [False, False, True], [False, False, False]. # Map to sequences of constraints whose index in the boolean sequence yields True. a = set() for p in product([False, True], repeat=sum(o)): p = list(p) v = [b and (b and p.pop(0)) for b in o] v = tuple(iterable[i] for i in xrange(len(v)) if not v[i]) a.add(v) # Longest-first. return sorted(a, cmp=lambda x, y: len(y) - len(x))
python
def variations(iterable, optional=lambda x: False): """ Returns all possible variations of a sequence with optional items. """ # For example: variations(["A?", "B?", "C"], optional=lambda s: s.endswith("?")) # defines a sequence where constraint A and B are optional: # [("A?", "B?", "C"), ("B?", "C"), ("A?", "C"), ("C")] iterable = tuple(iterable) # Create a boolean sequence where True means optional: # ("A?", "B?", "C") => [True, True, False] o = [optional(x) for x in iterable] # Find all permutations of the boolean sequence: # [True, False, True], [True, False, False], [False, False, True], [False, False, False]. # Map to sequences of constraints whose index in the boolean sequence yields True. a = set() for p in product([False, True], repeat=sum(o)): p = list(p) v = [b and (b and p.pop(0)) for b in o] v = tuple(iterable[i] for i in xrange(len(v)) if not v[i]) a.add(v) # Longest-first. return sorted(a, cmp=lambda x, y: len(y) - len(x))
[ "def", "variations", "(", "iterable", ",", "optional", "=", "lambda", "x", ":", "False", ")", ":", "# For example: variations([\"A?\", \"B?\", \"C\"], optional=lambda s: s.endswith(\"?\"))", "# defines a sequence where constraint A and B are optional:", "# [(\"A?\", \"B?\", \"C\"), (\"...
Returns all possible variations of a sequence with optional items.
[ "Returns", "all", "possible", "variations", "of", "a", "sequence", "with", "optional", "items", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L167-L187
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
compile
def compile(pattern, *args, **kwargs): """ Returns a Pattern from the given string or regular expression. Recently compiled patterns are kept in cache (if they do not use taxonomies, which are mutable dicts). """ id, p = repr(pattern) + repr(args), pattern if id in _cache and not kwargs: return _cache[id] if isinstance(pattern, basestring): p = Pattern.fromstring(pattern, *args, **kwargs) if isinstance(pattern, regexp): p = Pattern([Constraint(words=[pattern], taxonomy=kwargs.get("taxonomy", TAXONOMY))], *args, **kwargs) if len(_cache) > _CACHE_SIZE: _cache.clear() if isinstance(p, Pattern) and not kwargs: _cache[id] = p if isinstance(p, Pattern): return p else: raise TypeError("can't compile '%s' object" % pattern.__class__.__name__)
python
def compile(pattern, *args, **kwargs): """ Returns a Pattern from the given string or regular expression. Recently compiled patterns are kept in cache (if they do not use taxonomies, which are mutable dicts). """ id, p = repr(pattern) + repr(args), pattern if id in _cache and not kwargs: return _cache[id] if isinstance(pattern, basestring): p = Pattern.fromstring(pattern, *args, **kwargs) if isinstance(pattern, regexp): p = Pattern([Constraint(words=[pattern], taxonomy=kwargs.get("taxonomy", TAXONOMY))], *args, **kwargs) if len(_cache) > _CACHE_SIZE: _cache.clear() if isinstance(p, Pattern) and not kwargs: _cache[id] = p if isinstance(p, Pattern): return p else: raise TypeError("can't compile '%s' object" % pattern.__class__.__name__)
[ "def", "compile", "(", "pattern", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "id", ",", "p", "=", "repr", "(", "pattern", ")", "+", "repr", "(", "args", ")", ",", "pattern", "if", "id", "in", "_cache", "and", "not", "kwargs", ":", "re...
Returns a Pattern from the given string or regular expression. Recently compiled patterns are kept in cache (if they do not use taxonomies, which are mutable dicts).
[ "Returns", "a", "Pattern", "from", "the", "given", "string", "or", "regular", "expression", ".", "Recently", "compiled", "patterns", "are", "kept", "in", "cache", "(", "if", "they", "do", "not", "use", "taxonomies", "which", "are", "mutable", "dicts", ")", ...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L927-L946
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
scan
def scan(pattern, string, *args, **kwargs): """ Returns True if pattern.search(Sentence(string)) may yield matches. If is often faster to scan prior to creating a Sentence and searching it. """ return compile(pattern, *args, **kwargs).scan(string)
python
def scan(pattern, string, *args, **kwargs): """ Returns True if pattern.search(Sentence(string)) may yield matches. If is often faster to scan prior to creating a Sentence and searching it. """ return compile(pattern, *args, **kwargs).scan(string)
[ "def", "scan", "(", "pattern", ",", "string", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "compile", "(", "pattern", ",", "*", "args", ",", "*", "*", "kwargs", ")", ".", "scan", "(", "string", ")" ]
Returns True if pattern.search(Sentence(string)) may yield matches. If is often faster to scan prior to creating a Sentence and searching it.
[ "Returns", "True", "if", "pattern", ".", "search", "(", "Sentence", "(", "string", "))", "may", "yield", "matches", ".", "If", "is", "often", "faster", "to", "scan", "prior", "to", "creating", "a", "Sentence", "and", "searching", "it", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L948-L952
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
match
def match(pattern, sentence, *args, **kwargs): """ Returns the first match found in the given sentence, or None. """ return compile(pattern, *args, **kwargs).match(sentence)
python
def match(pattern, sentence, *args, **kwargs): """ Returns the first match found in the given sentence, or None. """ return compile(pattern, *args, **kwargs).match(sentence)
[ "def", "match", "(", "pattern", ",", "sentence", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "compile", "(", "pattern", ",", "*", "args", ",", "*", "*", "kwargs", ")", ".", "match", "(", "sentence", ")" ]
Returns the first match found in the given sentence, or None.
[ "Returns", "the", "first", "match", "found", "in", "the", "given", "sentence", "or", "None", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L954-L957
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
search
def search(pattern, sentence, *args, **kwargs): """ Returns a list of all matches found in the given sentence. """ return compile(pattern, *args, **kwargs).search(sentence)
python
def search(pattern, sentence, *args, **kwargs): """ Returns a list of all matches found in the given sentence. """ return compile(pattern, *args, **kwargs).search(sentence)
[ "def", "search", "(", "pattern", ",", "sentence", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "compile", "(", "pattern", ",", "*", "args", ",", "*", "*", "kwargs", ")", ".", "search", "(", "sentence", ")" ]
Returns a list of all matches found in the given sentence.
[ "Returns", "a", "list", "of", "all", "matches", "found", "in", "the", "given", "sentence", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L959-L962
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
odict.push
def push(self, kv): """ Adds a new item from the given (key, value)-tuple. If the key exists, pushes the updated item to the head of the dict. """ if kv[0] in self: self.__delitem__(kv[0]) self.__setitem__(kv[0], kv[1])
python
def push(self, kv): """ Adds a new item from the given (key, value)-tuple. If the key exists, pushes the updated item to the head of the dict. """ if kv[0] in self: self.__delitem__(kv[0]) self.__setitem__(kv[0], kv[1])
[ "def", "push", "(", "self", ",", "kv", ")", ":", "if", "kv", "[", "0", "]", "in", "self", ":", "self", ".", "__delitem__", "(", "kv", "[", "0", "]", ")", "self", ".", "__setitem__", "(", "kv", "[", "0", "]", ",", "kv", "[", "1", "]", ")" ]
Adds a new item from the given (key, value)-tuple. If the key exists, pushes the updated item to the head of the dict.
[ "Adds", "a", "new", "item", "from", "the", "given", "(", "key", "value", ")", "-", "tuple", ".", "If", "the", "key", "exists", "pushes", "the", "updated", "item", "to", "the", "head", "of", "the", "dict", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L211-L217
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Taxonomy.append
def append(self, term, type=None, value=None): """ Appends the given term to the taxonomy and tags it as the given type. Optionally, a disambiguation value can be supplied. For example: taxonomy.append("many", "quantity", "50-200") """ term = self._normalize(term) type = self._normalize(type) self.setdefault(term, (odict(), odict()))[0].push((type, True)) self.setdefault(type, (odict(), odict()))[1].push((term, True)) self._values[term] = value
python
def append(self, term, type=None, value=None): """ Appends the given term to the taxonomy and tags it as the given type. Optionally, a disambiguation value can be supplied. For example: taxonomy.append("many", "quantity", "50-200") """ term = self._normalize(term) type = self._normalize(type) self.setdefault(term, (odict(), odict()))[0].push((type, True)) self.setdefault(type, (odict(), odict()))[1].push((term, True)) self._values[term] = value
[ "def", "append", "(", "self", ",", "term", ",", "type", "=", "None", ",", "value", "=", "None", ")", ":", "term", "=", "self", ".", "_normalize", "(", "term", ")", "type", "=", "self", ".", "_normalize", "(", "type", ")", "self", ".", "setdefault",...
Appends the given term to the taxonomy and tags it as the given type. Optionally, a disambiguation value can be supplied. For example: taxonomy.append("many", "quantity", "50-200")
[ "Appends", "the", "given", "term", "to", "the", "taxonomy", "and", "tags", "it", "as", "the", "given", "type", ".", "Optionally", "a", "disambiguation", "value", "can", "be", "supplied", ".", "For", "example", ":", "taxonomy", ".", "append", "(", "many", ...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L308-L317
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Taxonomy.classify
def classify(self, term, **kwargs): """ Returns the (most recently added) semantic type for the given term ("many" => "quantity"). If the term is not in the dictionary, try Taxonomy.classifiers. """ term = self._normalize(term) if dict.__contains__(self, term): return self[term][0].keys()[-1] # If the term is not in the dictionary, check the classifiers. # Returns the first term in the list returned by a classifier. for classifier in self.classifiers: # **kwargs are useful if the classifier requests extra information, # for example the part-of-speech tag. v = classifier.parents(term, **kwargs) if v: return v[0]
python
def classify(self, term, **kwargs): """ Returns the (most recently added) semantic type for the given term ("many" => "quantity"). If the term is not in the dictionary, try Taxonomy.classifiers. """ term = self._normalize(term) if dict.__contains__(self, term): return self[term][0].keys()[-1] # If the term is not in the dictionary, check the classifiers. # Returns the first term in the list returned by a classifier. for classifier in self.classifiers: # **kwargs are useful if the classifier requests extra information, # for example the part-of-speech tag. v = classifier.parents(term, **kwargs) if v: return v[0]
[ "def", "classify", "(", "self", ",", "term", ",", "*", "*", "kwargs", ")", ":", "term", "=", "self", ".", "_normalize", "(", "term", ")", "if", "dict", ".", "__contains__", "(", "self", ",", "term", ")", ":", "return", "self", "[", "term", "]", "...
Returns the (most recently added) semantic type for the given term ("many" => "quantity"). If the term is not in the dictionary, try Taxonomy.classifiers.
[ "Returns", "the", "(", "most", "recently", "added", ")", "semantic", "type", "for", "the", "given", "term", "(", "many", "=", ">", "quantity", ")", ".", "If", "the", "term", "is", "not", "in", "the", "dictionary", "try", "Taxonomy", ".", "classifiers", ...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L319-L333
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Taxonomy.parents
def parents(self, term, recursive=False, **kwargs): """ Returns a list of all semantic types for the given term. If recursive=True, traverses parents up to the root. """ def dfs(term, recursive=False, visited={}, **kwargs): if term in visited: # Break on cyclic relations. return [] visited[term], a = True, [] if dict.__contains__(self, term): a = self[term][0].keys() for classifier in self.classifiers: a.extend(classifier.parents(term, **kwargs) or []) if recursive: for w in a: a += dfs(w, recursive, visited, **kwargs) return a return unique(dfs(self._normalize(term), recursive, {}, **kwargs))
python
def parents(self, term, recursive=False, **kwargs): """ Returns a list of all semantic types for the given term. If recursive=True, traverses parents up to the root. """ def dfs(term, recursive=False, visited={}, **kwargs): if term in visited: # Break on cyclic relations. return [] visited[term], a = True, [] if dict.__contains__(self, term): a = self[term][0].keys() for classifier in self.classifiers: a.extend(classifier.parents(term, **kwargs) or []) if recursive: for w in a: a += dfs(w, recursive, visited, **kwargs) return a return unique(dfs(self._normalize(term), recursive, {}, **kwargs))
[ "def", "parents", "(", "self", ",", "term", ",", "recursive", "=", "False", ",", "*", "*", "kwargs", ")", ":", "def", "dfs", "(", "term", ",", "recursive", "=", "False", ",", "visited", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "if", "t...
Returns a list of all semantic types for the given term. If recursive=True, traverses parents up to the root.
[ "Returns", "a", "list", "of", "all", "semantic", "types", "for", "the", "given", "term", ".", "If", "recursive", "=", "True", "traverses", "parents", "up", "to", "the", "root", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L335-L350
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Taxonomy.value
def value(self, term, **kwargs): """ Returns the value of the given term ("many" => "50-200") """ term = self._normalize(term) if term in self._values: return self._values[term] for classifier in self.classifiers: v = classifier.value(term, **kwargs) if v is not None: return v
python
def value(self, term, **kwargs): """ Returns the value of the given term ("many" => "50-200") """ term = self._normalize(term) if term in self._values: return self._values[term] for classifier in self.classifiers: v = classifier.value(term, **kwargs) if v is not None: return v
[ "def", "value", "(", "self", ",", "term", ",", "*", "*", "kwargs", ")", ":", "term", "=", "self", ".", "_normalize", "(", "term", ")", "if", "term", "in", "self", ".", "_values", ":", "return", "self", ".", "_values", "[", "term", "]", "for", "cl...
Returns the value of the given term ("many" => "50-200")
[ "Returns", "the", "value", "of", "the", "given", "term", "(", "many", "=", ">", "50", "-", "200", ")" ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L369-L378
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Constraint.fromstring
def fromstring(cls, s, **kwargs): """ Returns a new Constraint from the given string. Uppercase words indicate either a tag ("NN", "JJ", "VP") or a taxonomy term (e.g., "PRODUCT", "PERSON"). Syntax: ( defines an optional constraint, e.g., "(JJ)". [ defines a constraint with spaces, e.g., "[Mac OS X | Windows Vista]". _ is converted to spaces, e.g., "Windows_Vista". | separates different options, e.g., "ADJP|ADVP". ! can be used as a word prefix to disallow it. * can be used as a wildcard character, e.g., "soft*|JJ*". ? as a suffix defines a constraint that is optional, e.g., "JJ?". + as a suffix defines a constraint that can span multiple words, e.g., "JJ+". ^ as a prefix defines a constraint that can only match the first word. These characters need to be escaped if used as content: "\(". """ C = cls(**kwargs) s = s.strip() s = s.strip("{}") s = s.strip() for i in range(3): # Wrapping order of control characters is ignored: # (NN+) == (NN)+ == NN?+ == NN+? == [NN+?] == [NN]+? if s.startswith("^"): s = s[1: ]; C.first = True if s.endswith("+") and not s.endswith("\+"): s = s[0:-1]; C.multiple = True if s.endswith("?") and not s.endswith("\?"): s = s[0:-1]; C.optional = True if s.startswith("(") and s.endswith(")"): s = s[1:-1]; C.optional = True if s.startswith("[") and s.endswith("]"): s = s[1:-1] s = re.sub(r"^\\\^", "^", s) s = re.sub(r"\\\+$", "+", s) s = s.replace("\_", "&uscore;") s = s.replace("_"," ") s = s.replace("&uscore;", "_") s = s.replace("&lparen;", "(") s = s.replace("&rparen;", ")") s = s.replace("[", "[") s = s.replace("]", "]") s = s.replace("&lcurly;", "{") s = s.replace("&rcurly;", "}") s = s.replace("\(", "(") s = s.replace("\)", ")") s = s.replace("\[", "[") s = s.replace("\]", "]") s = s.replace("\{", "{") s = s.replace("\}", "}") s = s.replace("\*", "*") s = s.replace("\?", "?") s = s.replace("\+", "+") s = s.replace("\^", "^") s = s.replace("\|", "⊢") s = s.split("|") s = [v.replace("⊢", "|").strip() for v in s] for v in s: C._append(v) return C
python
def fromstring(cls, s, **kwargs): """ Returns a new Constraint from the given string. Uppercase words indicate either a tag ("NN", "JJ", "VP") or a taxonomy term (e.g., "PRODUCT", "PERSON"). Syntax: ( defines an optional constraint, e.g., "(JJ)". [ defines a constraint with spaces, e.g., "[Mac OS X | Windows Vista]". _ is converted to spaces, e.g., "Windows_Vista". | separates different options, e.g., "ADJP|ADVP". ! can be used as a word prefix to disallow it. * can be used as a wildcard character, e.g., "soft*|JJ*". ? as a suffix defines a constraint that is optional, e.g., "JJ?". + as a suffix defines a constraint that can span multiple words, e.g., "JJ+". ^ as a prefix defines a constraint that can only match the first word. These characters need to be escaped if used as content: "\(". """ C = cls(**kwargs) s = s.strip() s = s.strip("{}") s = s.strip() for i in range(3): # Wrapping order of control characters is ignored: # (NN+) == (NN)+ == NN?+ == NN+? == [NN+?] == [NN]+? if s.startswith("^"): s = s[1: ]; C.first = True if s.endswith("+") and not s.endswith("\+"): s = s[0:-1]; C.multiple = True if s.endswith("?") and not s.endswith("\?"): s = s[0:-1]; C.optional = True if s.startswith("(") and s.endswith(")"): s = s[1:-1]; C.optional = True if s.startswith("[") and s.endswith("]"): s = s[1:-1] s = re.sub(r"^\\\^", "^", s) s = re.sub(r"\\\+$", "+", s) s = s.replace("\_", "&uscore;") s = s.replace("_"," ") s = s.replace("&uscore;", "_") s = s.replace("&lparen;", "(") s = s.replace("&rparen;", ")") s = s.replace("[", "[") s = s.replace("]", "]") s = s.replace("&lcurly;", "{") s = s.replace("&rcurly;", "}") s = s.replace("\(", "(") s = s.replace("\)", ")") s = s.replace("\[", "[") s = s.replace("\]", "]") s = s.replace("\{", "{") s = s.replace("\}", "}") s = s.replace("\*", "*") s = s.replace("\?", "?") s = s.replace("\+", "+") s = s.replace("\^", "^") s = s.replace("\|", "⊢") s = s.split("|") s = [v.replace("⊢", "|").strip() for v in s] for v in s: C._append(v) return C
[ "def", "fromstring", "(", "cls", ",", "s", ",", "*", "*", "kwargs", ")", ":", "C", "=", "cls", "(", "*", "*", "kwargs", ")", "s", "=", "s", ".", "strip", "(", ")", "s", "=", "s", ".", "strip", "(", "\"{}\"", ")", "s", "=", "s", ".", "stri...
Returns a new Constraint from the given string. Uppercase words indicate either a tag ("NN", "JJ", "VP") or a taxonomy term (e.g., "PRODUCT", "PERSON"). Syntax: ( defines an optional constraint, e.g., "(JJ)". [ defines a constraint with spaces, e.g., "[Mac OS X | Windows Vista]". _ is converted to spaces, e.g., "Windows_Vista". | separates different options, e.g., "ADJP|ADVP". ! can be used as a word prefix to disallow it. * can be used as a wildcard character, e.g., "soft*|JJ*". ? as a suffix defines a constraint that is optional, e.g., "JJ?". + as a suffix defines a constraint that can span multiple words, e.g., "JJ+". ^ as a prefix defines a constraint that can only match the first word. These characters need to be escaped if used as content: "\(".
[ "Returns", "a", "new", "Constraint", "from", "the", "given", "string", ".", "Uppercase", "words", "indicate", "either", "a", "tag", "(", "NN", "JJ", "VP", ")", "or", "a", "taxonomy", "term", "(", "e", ".", "g", ".", "PRODUCT", "PERSON", ")", ".", "Sy...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L487-L546
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Constraint.match
def match(self, word): """ Return True if the given Word is part of the constraint: - the word (or lemma) occurs in Constraint.words, OR - the word (or lemma) occurs in Constraint.taxa taxonomy tree, AND - the word and/or chunk tags match those defined in the constraint. Individual terms in Constraint.words or the taxonomy can contain wildcards (*). Some part-of-speech-tags can also contain wildcards: NN*, VB*, JJ*, RB* If the given word contains spaces (e.g., proper noun), the entire chunk will also be compared. For example: Constraint(words=["Mac OS X*"]) matches the word "Mac" if the word occurs in a Chunk("Mac OS X 10.5"). """ # If the constraint has a custom function it must return True. if self.custom is not None and self.custom(word) is False: return False # If the constraint can only match the first word, Word.index must be 0. if self.first and word.index > 0: return False # If the constraint defines excluded options, Word can not match any of these. if self.exclude and self.exclude.match(word): return False # If the constraint defines allowed tags, Word.tag needs to match one of these. if self.tags: if find(lambda w: _match(word.tag, w), self.tags) is None: return False # If the constraint defines allowed chunks, Word.chunk.tag needs to match one of these. if self.chunks: ch = word.chunk and word.chunk.tag or None if find(lambda w: _match(ch, w), self.chunks) is None: return False # If the constraint defines allowed role, Word.chunk.tag needs to match one of these. if self.roles: R = word.chunk and [r2 for r1, r2 in word.chunk.relations] or [] if find(lambda w: w in R, self.roles) is None: return False # If the constraint defines allowed words, # Word.string.lower() OR Word.lemma needs to match one of these. b = True # b==True when word in constraint (or Constraints.words=[]). if len(self.words) + len(self.taxa) > 0: s1 = word.string.lower() s2 = word.lemma b = False for w in itertools.chain(self.words, self.taxa): # If the constraint has a word with spaces (e.g., a proper noun), # compare it to the entire chunk. try: if " " in w and (s1 in w or s2 and s2 in w or "*" in w): s1 = word.chunk and word.chunk.string.lower() or s1 s2 = word.chunk and " ".join([x or "" for x in word.chunk.lemmata]) or s2 except: s1 = s1 s2 = None # Compare the word to the allowed words (which can contain wildcards). if _match(s1, w): b=True; break # Compare the word lemma to the allowed words, e.g., # if "was" is not in the constraint, perhaps "be" is, which is a good match. if s2 and _match(s2, w): b=True; break # If the constraint defines allowed taxonomy terms, # and the given word did not match an allowed word, traverse the taxonomy. # The search goes up from the given word to its parents in the taxonomy. # This is faster than traversing all the children of terms in Constraint.taxa. # The drawback is that: # 1) Wildcards in the taxonomy are not detected (use classifiers instead), # 2) Classifier.children() has no effect, only Classifier.parent(). if self.taxa and (not self.words or (self.words and not b)): for s in ( word.string, # "ants" word.lemma, # "ant" word.chunk and word.chunk.string or None, # "army ants" word.chunk and " ".join([x or "" for x in word.chunk.lemmata]) or None): # "army ant" if s is not None: if self.taxonomy.case_sensitive is False: s = s.lower() # Compare ancestors of the word to each term in Constraint.taxa. for p in self.taxonomy.parents(s, recursive=True): if find(lambda s: p==s, self.taxa): # No wildcards. return True return b
python
def match(self, word): """ Return True if the given Word is part of the constraint: - the word (or lemma) occurs in Constraint.words, OR - the word (or lemma) occurs in Constraint.taxa taxonomy tree, AND - the word and/or chunk tags match those defined in the constraint. Individual terms in Constraint.words or the taxonomy can contain wildcards (*). Some part-of-speech-tags can also contain wildcards: NN*, VB*, JJ*, RB* If the given word contains spaces (e.g., proper noun), the entire chunk will also be compared. For example: Constraint(words=["Mac OS X*"]) matches the word "Mac" if the word occurs in a Chunk("Mac OS X 10.5"). """ # If the constraint has a custom function it must return True. if self.custom is not None and self.custom(word) is False: return False # If the constraint can only match the first word, Word.index must be 0. if self.first and word.index > 0: return False # If the constraint defines excluded options, Word can not match any of these. if self.exclude and self.exclude.match(word): return False # If the constraint defines allowed tags, Word.tag needs to match one of these. if self.tags: if find(lambda w: _match(word.tag, w), self.tags) is None: return False # If the constraint defines allowed chunks, Word.chunk.tag needs to match one of these. if self.chunks: ch = word.chunk and word.chunk.tag or None if find(lambda w: _match(ch, w), self.chunks) is None: return False # If the constraint defines allowed role, Word.chunk.tag needs to match one of these. if self.roles: R = word.chunk and [r2 for r1, r2 in word.chunk.relations] or [] if find(lambda w: w in R, self.roles) is None: return False # If the constraint defines allowed words, # Word.string.lower() OR Word.lemma needs to match one of these. b = True # b==True when word in constraint (or Constraints.words=[]). if len(self.words) + len(self.taxa) > 0: s1 = word.string.lower() s2 = word.lemma b = False for w in itertools.chain(self.words, self.taxa): # If the constraint has a word with spaces (e.g., a proper noun), # compare it to the entire chunk. try: if " " in w and (s1 in w or s2 and s2 in w or "*" in w): s1 = word.chunk and word.chunk.string.lower() or s1 s2 = word.chunk and " ".join([x or "" for x in word.chunk.lemmata]) or s2 except: s1 = s1 s2 = None # Compare the word to the allowed words (which can contain wildcards). if _match(s1, w): b=True; break # Compare the word lemma to the allowed words, e.g., # if "was" is not in the constraint, perhaps "be" is, which is a good match. if s2 and _match(s2, w): b=True; break # If the constraint defines allowed taxonomy terms, # and the given word did not match an allowed word, traverse the taxonomy. # The search goes up from the given word to its parents in the taxonomy. # This is faster than traversing all the children of terms in Constraint.taxa. # The drawback is that: # 1) Wildcards in the taxonomy are not detected (use classifiers instead), # 2) Classifier.children() has no effect, only Classifier.parent(). if self.taxa and (not self.words or (self.words and not b)): for s in ( word.string, # "ants" word.lemma, # "ant" word.chunk and word.chunk.string or None, # "army ants" word.chunk and " ".join([x or "" for x in word.chunk.lemmata]) or None): # "army ant" if s is not None: if self.taxonomy.case_sensitive is False: s = s.lower() # Compare ancestors of the word to each term in Constraint.taxa. for p in self.taxonomy.parents(s, recursive=True): if find(lambda s: p==s, self.taxa): # No wildcards. return True return b
[ "def", "match", "(", "self", ",", "word", ")", ":", "# If the constraint has a custom function it must return True.", "if", "self", ".", "custom", "is", "not", "None", "and", "self", ".", "custom", "(", "word", ")", "is", "False", ":", "return", "False", "# If...
Return True if the given Word is part of the constraint: - the word (or lemma) occurs in Constraint.words, OR - the word (or lemma) occurs in Constraint.taxa taxonomy tree, AND - the word and/or chunk tags match those defined in the constraint. Individual terms in Constraint.words or the taxonomy can contain wildcards (*). Some part-of-speech-tags can also contain wildcards: NN*, VB*, JJ*, RB* If the given word contains spaces (e.g., proper noun), the entire chunk will also be compared. For example: Constraint(words=["Mac OS X*"]) matches the word "Mac" if the word occurs in a Chunk("Mac OS X 10.5").
[ "Return", "True", "if", "the", "given", "Word", "is", "part", "of", "the", "constraint", ":", "-", "the", "word", "(", "or", "lemma", ")", "occurs", "in", "Constraint", ".", "words", "OR", "-", "the", "word", "(", "or", "lemma", ")", "occurs", "in", ...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L571-L650
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Pattern.fromstring
def fromstring(cls, s, *args, **kwargs): """ Returns a new Pattern from the given string. Constraints are separated by a space. If a constraint contains a space, it must be wrapped in []. """ s = s.replace("\(", "&lparen;") s = s.replace("\)", "&rparen;") s = s.replace("\[", "[") s = s.replace("\]", "]") s = s.replace("\{", "&lcurly;") s = s.replace("\}", "&rcurly;") p = [] i = 0 for m in re.finditer(r"\[.*?\]|\(.*?\)", s): # Spaces in a range encapsulated in square brackets are encoded. # "[Windows Vista]" is one range, don't split on space. p.append(s[i:m.start()]) p.append(s[m.start():m.end()].replace(" ", "&space;")); i=m.end() p.append(s[i:]) s = "".join(p) s = s.replace("][", "] [") s = s.replace(")(", ") (") s = s.replace("\|", "⊢") s = re.sub(r"\s+\|\s+", "|", s) s = re.sub(r"\s+", " ", s) s = re.sub(r"\{\s+", "{", s) s = re.sub(r"\s+\}", "}", s) s = s.split(" ") s = [v.replace("&space;"," ") for v in s] P = cls([], *args, **kwargs) G, O, i = [], [], 0 for s in s: constraint = Constraint.fromstring(s.strip("{}"), taxonomy=kwargs.get("taxonomy", TAXONOMY)) constraint.index = len(P.sequence) P.sequence.append(constraint) # Push a new group on the stack if string starts with "{". # Parse constraint from string, add it to all open groups. # Pop latest group from stack if string ends with "}". # Insert groups in opened-first order (i). while s.startswith("{"): s = s[1:] G.append((i, [])); i+=1 O.append([]) for g in G: g[1].append(constraint) while s.endswith("}"): s = s[:-1] if G: O[G[-1][0]] = G[-1][1]; G.pop() P.groups = [g for g in O if g] return P
python
def fromstring(cls, s, *args, **kwargs): """ Returns a new Pattern from the given string. Constraints are separated by a space. If a constraint contains a space, it must be wrapped in []. """ s = s.replace("\(", "&lparen;") s = s.replace("\)", "&rparen;") s = s.replace("\[", "[") s = s.replace("\]", "]") s = s.replace("\{", "&lcurly;") s = s.replace("\}", "&rcurly;") p = [] i = 0 for m in re.finditer(r"\[.*?\]|\(.*?\)", s): # Spaces in a range encapsulated in square brackets are encoded. # "[Windows Vista]" is one range, don't split on space. p.append(s[i:m.start()]) p.append(s[m.start():m.end()].replace(" ", "&space;")); i=m.end() p.append(s[i:]) s = "".join(p) s = s.replace("][", "] [") s = s.replace(")(", ") (") s = s.replace("\|", "⊢") s = re.sub(r"\s+\|\s+", "|", s) s = re.sub(r"\s+", " ", s) s = re.sub(r"\{\s+", "{", s) s = re.sub(r"\s+\}", "}", s) s = s.split(" ") s = [v.replace("&space;"," ") for v in s] P = cls([], *args, **kwargs) G, O, i = [], [], 0 for s in s: constraint = Constraint.fromstring(s.strip("{}"), taxonomy=kwargs.get("taxonomy", TAXONOMY)) constraint.index = len(P.sequence) P.sequence.append(constraint) # Push a new group on the stack if string starts with "{". # Parse constraint from string, add it to all open groups. # Pop latest group from stack if string ends with "}". # Insert groups in opened-first order (i). while s.startswith("{"): s = s[1:] G.append((i, [])); i+=1 O.append([]) for g in G: g[1].append(constraint) while s.endswith("}"): s = s[:-1] if G: O[G[-1][0]] = G[-1][1]; G.pop() P.groups = [g for g in O if g] return P
[ "def", "fromstring", "(", "cls", ",", "s", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "s", "=", "s", ".", "replace", "(", "\"\\(\"", ",", "\"&lparen;\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\)\"", ",", "\"&rparen;\"", ")", "s",...
Returns a new Pattern from the given string. Constraints are separated by a space. If a constraint contains a space, it must be wrapped in [].
[ "Returns", "a", "new", "Pattern", "from", "the", "given", "string", ".", "Constraints", "are", "separated", "by", "a", "space", ".", "If", "a", "constraint", "contains", "a", "space", "it", "must", "be", "wrapped", "in", "[]", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L718-L767
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Pattern.scan
def scan(self, string): """ Returns True if search(Sentence(string)) may yield matches. If is often faster to scan prior to creating a Sentence and searching it. """ # In the following example, first scan the string for "good" and "bad": # p = Pattern.fromstring("good|bad NN") # for s in open("parsed.txt"): # if p.scan(s): # s = Sentence(s) # m = p.search(s) # if m: # print(m) w = (constraint.words for constraint in self.sequence if not constraint.optional) w = itertools.chain(*w) w = [w.strip(WILDCARD) for w in w if WILDCARD not in w[1:-1]] if w and not any(w in string.lower() for w in w): return False return True
python
def scan(self, string): """ Returns True if search(Sentence(string)) may yield matches. If is often faster to scan prior to creating a Sentence and searching it. """ # In the following example, first scan the string for "good" and "bad": # p = Pattern.fromstring("good|bad NN") # for s in open("parsed.txt"): # if p.scan(s): # s = Sentence(s) # m = p.search(s) # if m: # print(m) w = (constraint.words for constraint in self.sequence if not constraint.optional) w = itertools.chain(*w) w = [w.strip(WILDCARD) for w in w if WILDCARD not in w[1:-1]] if w and not any(w in string.lower() for w in w): return False return True
[ "def", "scan", "(", "self", ",", "string", ")", ":", "# In the following example, first scan the string for \"good\" and \"bad\":", "# p = Pattern.fromstring(\"good|bad NN\")", "# for s in open(\"parsed.txt\"):", "# if p.scan(s):", "# s = Sentence(s)", "# m = p.search(s...
Returns True if search(Sentence(string)) may yield matches. If is often faster to scan prior to creating a Sentence and searching it.
[ "Returns", "True", "if", "search", "(", "Sentence", "(", "string", "))", "may", "yield", "matches", ".", "If", "is", "often", "faster", "to", "scan", "prior", "to", "creating", "a", "Sentence", "and", "searching", "it", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L769-L786
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Pattern.search
def search(self, sentence): """ Returns a list of all matches found in the given sentence. """ if sentence.__class__.__name__ == "Sentence": pass elif isinstance(sentence, list) or sentence.__class__.__name__ == "Text": a=[]; [a.extend(self.search(s)) for s in sentence]; return a elif isinstance(sentence, basestring): sentence = Sentence(sentence) elif isinstance(sentence, Match) and len(sentence) > 0: sentence = sentence[0].sentence.slice(sentence[0].index, sentence[-1].index + 1) a = [] v = self._variations() u = {} m = self.match(sentence, _v=v) while m: a.append(m) m = self.match(sentence, start=m.words[-1].index+1, _v=v, _u=u) return a
python
def search(self, sentence): """ Returns a list of all matches found in the given sentence. """ if sentence.__class__.__name__ == "Sentence": pass elif isinstance(sentence, list) or sentence.__class__.__name__ == "Text": a=[]; [a.extend(self.search(s)) for s in sentence]; return a elif isinstance(sentence, basestring): sentence = Sentence(sentence) elif isinstance(sentence, Match) and len(sentence) > 0: sentence = sentence[0].sentence.slice(sentence[0].index, sentence[-1].index + 1) a = [] v = self._variations() u = {} m = self.match(sentence, _v=v) while m: a.append(m) m = self.match(sentence, start=m.words[-1].index+1, _v=v, _u=u) return a
[ "def", "search", "(", "self", ",", "sentence", ")", ":", "if", "sentence", ".", "__class__", ".", "__name__", "==", "\"Sentence\"", ":", "pass", "elif", "isinstance", "(", "sentence", ",", "list", ")", "or", "sentence", ".", "__class__", ".", "__name__", ...
Returns a list of all matches found in the given sentence.
[ "Returns", "a", "list", "of", "all", "matches", "found", "in", "the", "given", "sentence", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L788-L806
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Pattern.match
def match(self, sentence, start=0, _v=None, _u=None): """ Returns the first match found in the given sentence, or None. """ if sentence.__class__.__name__ == "Sentence": pass elif isinstance(sentence, list) or sentence.__class__.__name__ == "Text": return find(lambda m,s: m is not None, ((self.match(s, start, _v), s) for s in sentence))[0] elif isinstance(sentence, basestring): sentence = Sentence(sentence) elif isinstance(sentence, Match) and len(sentence) > 0: sentence = sentence[0].sentence.slice(sentence[0].index, sentence[-1].index + 1) # Variations (_v) further down the list may match words more to the front. # We need to check all of them. Unmatched variations are blacklisted (_u). # Pattern.search() calls Pattern.match() with a persistent blacklist (1.5x faster). a = [] for sequence in (_v is not None and _v or self._variations()): if _u is not None and id(sequence) in _u: continue m = self._match(sequence, sentence, start) if m is not None: a.append((m.words[0].index, len(m.words), m)) if m is not None and m.words[0].index == start: return m if m is None and _u is not None: _u[id(sequence)] = False # Return the leftmost-longest. if len(a) > 0: return sorted(a)[0][-1]
python
def match(self, sentence, start=0, _v=None, _u=None): """ Returns the first match found in the given sentence, or None. """ if sentence.__class__.__name__ == "Sentence": pass elif isinstance(sentence, list) or sentence.__class__.__name__ == "Text": return find(lambda m,s: m is not None, ((self.match(s, start, _v), s) for s in sentence))[0] elif isinstance(sentence, basestring): sentence = Sentence(sentence) elif isinstance(sentence, Match) and len(sentence) > 0: sentence = sentence[0].sentence.slice(sentence[0].index, sentence[-1].index + 1) # Variations (_v) further down the list may match words more to the front. # We need to check all of them. Unmatched variations are blacklisted (_u). # Pattern.search() calls Pattern.match() with a persistent blacklist (1.5x faster). a = [] for sequence in (_v is not None and _v or self._variations()): if _u is not None and id(sequence) in _u: continue m = self._match(sequence, sentence, start) if m is not None: a.append((m.words[0].index, len(m.words), m)) if m is not None and m.words[0].index == start: return m if m is None and _u is not None: _u[id(sequence)] = False # Return the leftmost-longest. if len(a) > 0: return sorted(a)[0][-1]
[ "def", "match", "(", "self", ",", "sentence", ",", "start", "=", "0", ",", "_v", "=", "None", ",", "_u", "=", "None", ")", ":", "if", "sentence", ".", "__class__", ".", "__name__", "==", "\"Sentence\"", ":", "pass", "elif", "isinstance", "(", "senten...
Returns the first match found in the given sentence, or None.
[ "Returns", "the", "first", "match", "found", "in", "the", "given", "sentence", "or", "None", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L808-L835
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Match.constraint
def constraint(self, word): """ Returns the constraint that matches the given Word, or None. """ if word.index in self._map1: return self._map1[word.index]
python
def constraint(self, word): """ Returns the constraint that matches the given Word, or None. """ if word.index in self._map1: return self._map1[word.index]
[ "def", "constraint", "(", "self", ",", "word", ")", ":", "if", "word", ".", "index", "in", "self", ".", "_map1", ":", "return", "self", ".", "_map1", "[", "word", ".", "index", "]" ]
Returns the constraint that matches the given Word, or None.
[ "Returns", "the", "constraint", "that", "matches", "the", "given", "Word", "or", "None", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L1005-L1009
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Match.constraints
def constraints(self, chunk): """ Returns a list of constraints that match the given Chunk. """ a = [self._map1[w.index] for w in chunk.words if w.index in self._map1] b = []; [b.append(constraint) for constraint in a if constraint not in b] return b
python
def constraints(self, chunk): """ Returns a list of constraints that match the given Chunk. """ a = [self._map1[w.index] for w in chunk.words if w.index in self._map1] b = []; [b.append(constraint) for constraint in a if constraint not in b] return b
[ "def", "constraints", "(", "self", ",", "chunk", ")", ":", "a", "=", "[", "self", ".", "_map1", "[", "w", ".", "index", "]", "for", "w", "in", "chunk", ".", "words", "if", "w", ".", "index", "in", "self", ".", "_map1", "]", "b", "=", "[", "]"...
Returns a list of constraints that match the given Chunk.
[ "Returns", "a", "list", "of", "constraints", "that", "match", "the", "given", "Chunk", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L1011-L1016
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Match.constituents
def constituents(self, constraint=None): """ Returns a list of Word and Chunk objects, where words have been grouped into their chunks whenever possible. Optionally, returns only chunks/words that match given constraint(s), or constraint index. """ # Select only words that match the given constraint. # Note: this will only work with constraints from Match.pattern.sequence. W = self.words n = len(self.pattern.sequence) if isinstance(constraint, (int, Constraint)): if isinstance(constraint, int): i = constraint i = i<0 and i%n or i else: i = self.pattern.sequence.index(constraint) W = self._map2.get(i,[]) W = [self.words[i-self.words[0].index] for i in W] if isinstance(constraint, (list, tuple)): W = []; [W.extend(self._map2.get(j<0 and j%n or j,[])) for j in constraint] W = [self.words[i-self.words[0].index] for i in W] W = unique(W) a = [] i = 0 while i < len(W): w = W[i] if w.chunk and W[i:i+len(w.chunk)] == w.chunk.words: i += len(w.chunk) - 1 a.append(w.chunk) else: a.append(w) i += 1 return a
python
def constituents(self, constraint=None): """ Returns a list of Word and Chunk objects, where words have been grouped into their chunks whenever possible. Optionally, returns only chunks/words that match given constraint(s), or constraint index. """ # Select only words that match the given constraint. # Note: this will only work with constraints from Match.pattern.sequence. W = self.words n = len(self.pattern.sequence) if isinstance(constraint, (int, Constraint)): if isinstance(constraint, int): i = constraint i = i<0 and i%n or i else: i = self.pattern.sequence.index(constraint) W = self._map2.get(i,[]) W = [self.words[i-self.words[0].index] for i in W] if isinstance(constraint, (list, tuple)): W = []; [W.extend(self._map2.get(j<0 and j%n or j,[])) for j in constraint] W = [self.words[i-self.words[0].index] for i in W] W = unique(W) a = [] i = 0 while i < len(W): w = W[i] if w.chunk and W[i:i+len(w.chunk)] == w.chunk.words: i += len(w.chunk) - 1 a.append(w.chunk) else: a.append(w) i += 1 return a
[ "def", "constituents", "(", "self", ",", "constraint", "=", "None", ")", ":", "# Select only words that match the given constraint.", "# Note: this will only work with constraints from Match.pattern.sequence.", "W", "=", "self", ".", "words", "n", "=", "len", "(", "self", ...
Returns a list of Word and Chunk objects, where words have been grouped into their chunks whenever possible. Optionally, returns only chunks/words that match given constraint(s), or constraint index.
[ "Returns", "a", "list", "of", "Word", "and", "Chunk", "objects", "where", "words", "have", "been", "grouped", "into", "their", "chunks", "whenever", "possible", ".", "Optionally", "returns", "only", "chunks", "/", "words", "that", "match", "given", "constraint...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L1018-L1049
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Match.group
def group(self, index, chunked=False): """ Returns a list of Word objects that match the given group. With chunked=True, returns a list of Word + Chunk objects - see Match.constituents(). A group consists of consecutive constraints wrapped in { }, e.g., search("{JJ JJ} NN", Sentence(parse("big black cat"))).group(1) => big black. """ if index < 0 or index > len(self.pattern.groups): raise IndexError("no such group") if index > 0 and index <= len(self.pattern.groups): g = self.pattern.groups[index-1] if index == 0: g = self.pattern.sequence if chunked is True: return Group(self, self.constituents(constraint=[self.pattern.sequence.index(x) for x in g])) return Group(self, [w for w in self.words if self.constraint(w) in g])
python
def group(self, index, chunked=False): """ Returns a list of Word objects that match the given group. With chunked=True, returns a list of Word + Chunk objects - see Match.constituents(). A group consists of consecutive constraints wrapped in { }, e.g., search("{JJ JJ} NN", Sentence(parse("big black cat"))).group(1) => big black. """ if index < 0 or index > len(self.pattern.groups): raise IndexError("no such group") if index > 0 and index <= len(self.pattern.groups): g = self.pattern.groups[index-1] if index == 0: g = self.pattern.sequence if chunked is True: return Group(self, self.constituents(constraint=[self.pattern.sequence.index(x) for x in g])) return Group(self, [w for w in self.words if self.constraint(w) in g])
[ "def", "group", "(", "self", ",", "index", ",", "chunked", "=", "False", ")", ":", "if", "index", "<", "0", "or", "index", ">", "len", "(", "self", ".", "pattern", ".", "groups", ")", ":", "raise", "IndexError", "(", "\"no such group\"", ")", "if", ...
Returns a list of Word objects that match the given group. With chunked=True, returns a list of Word + Chunk objects - see Match.constituents(). A group consists of consecutive constraints wrapped in { }, e.g., search("{JJ JJ} NN", Sentence(parse("big black cat"))).group(1) => big black.
[ "Returns", "a", "list", "of", "Word", "objects", "that", "match", "the", "given", "group", ".", "With", "chunked", "=", "True", "returns", "a", "list", "of", "Word", "+", "Chunk", "objects", "-", "see", "Match", ".", "constituents", "()", ".", "A", "gr...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L1051-L1065
train
markuskiller/textblob-de
textblob_de/sentiments.py
PatternAnalyzer.analyze
def analyze(self, text): """Return the sentiment as a tuple of the form: ``(polarity, subjectivity)`` :param str text: A string. .. todo:: Figure out best format to be passed to the analyzer. There might be a better format than a string of space separated lemmas (e.g. with pos tags) but the parsing/tagging results look rather inaccurate and a wrong pos might prevent the lexicon lookup of an otherwise correctly lemmatized word form (or would it not?) - further checks needed. """ if self.lemmatize: text = self._lemmatize(text) return self.RETURN_TYPE(*pattern_sentiment(text))
python
def analyze(self, text): """Return the sentiment as a tuple of the form: ``(polarity, subjectivity)`` :param str text: A string. .. todo:: Figure out best format to be passed to the analyzer. There might be a better format than a string of space separated lemmas (e.g. with pos tags) but the parsing/tagging results look rather inaccurate and a wrong pos might prevent the lexicon lookup of an otherwise correctly lemmatized word form (or would it not?) - further checks needed. """ if self.lemmatize: text = self._lemmatize(text) return self.RETURN_TYPE(*pattern_sentiment(text))
[ "def", "analyze", "(", "self", ",", "text", ")", ":", "if", "self", ".", "lemmatize", ":", "text", "=", "self", ".", "_lemmatize", "(", "text", ")", "return", "self", ".", "RETURN_TYPE", "(", "*", "pattern_sentiment", "(", "text", ")", ")" ]
Return the sentiment as a tuple of the form: ``(polarity, subjectivity)`` :param str text: A string. .. todo:: Figure out best format to be passed to the analyzer. There might be a better format than a string of space separated lemmas (e.g. with pos tags) but the parsing/tagging results look rather inaccurate and a wrong pos might prevent the lexicon lookup of an otherwise correctly lemmatized word form (or would it not?) - further checks needed.
[ "Return", "the", "sentiment", "as", "a", "tuple", "of", "the", "form", ":", "(", "polarity", "subjectivity", ")" ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/sentiments.py#L124-L142
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/__init__.py
stts2universal
def stts2universal(token, tag): """ Converts an STTS tag to a universal tag. For example: ohne/APPR => ohne/PREP """ if tag in ("KON", "KOUI", "KOUS", "KOKOM"): return (token, CONJ) if tag in ("PTKZU", "PTKNEG", "PTKVZ", "PTKANT"): return (token, PRT) if tag in ("PDF", "PDAT", "PIS", "PIAT", "PIDAT", "PPER", "PPOS", "PPOSAT"): return (token, PRON) if tag in ("PRELS", "PRELAT", "PRF", "PWS", "PWAT", "PWAV", "PAV"): return (token, PRON) return penntreebank2universal(*stts2penntreebank(token, tag))
python
def stts2universal(token, tag): """ Converts an STTS tag to a universal tag. For example: ohne/APPR => ohne/PREP """ if tag in ("KON", "KOUI", "KOUS", "KOKOM"): return (token, CONJ) if tag in ("PTKZU", "PTKNEG", "PTKVZ", "PTKANT"): return (token, PRT) if tag in ("PDF", "PDAT", "PIS", "PIAT", "PIDAT", "PPER", "PPOS", "PPOSAT"): return (token, PRON) if tag in ("PRELS", "PRELAT", "PRF", "PWS", "PWAT", "PWAV", "PAV"): return (token, PRON) return penntreebank2universal(*stts2penntreebank(token, tag))
[ "def", "stts2universal", "(", "token", ",", "tag", ")", ":", "if", "tag", "in", "(", "\"KON\"", ",", "\"KOUI\"", ",", "\"KOUS\"", ",", "\"KOKOM\"", ")", ":", "return", "(", "token", ",", "CONJ", ")", "if", "tag", "in", "(", "\"PTKZU\"", ",", "\"PTKNE...
Converts an STTS tag to a universal tag. For example: ohne/APPR => ohne/PREP
[ "Converts", "an", "STTS", "tag", "to", "a", "universal", "tag", ".", "For", "example", ":", "ohne", "/", "APPR", "=", ">", "ohne", "/", "PREP" ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/__init__.py#L160-L172
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/__init__.py
find_lemmata
def find_lemmata(tokens): """ Annotates the tokens with lemmata for plural nouns and conjugated verbs, where each token is a [word, part-of-speech] list. """ for token in tokens: word, pos, lemma = token[0], token[1], token[0] if pos.startswith(("DT", "JJ")): lemma = predicative(word) if pos == "NNS": lemma = singularize(word) if pos.startswith(("VB", "MD")): lemma = conjugate(word, INFINITIVE) or word token.append(lemma.lower()) return tokens
python
def find_lemmata(tokens): """ Annotates the tokens with lemmata for plural nouns and conjugated verbs, where each token is a [word, part-of-speech] list. """ for token in tokens: word, pos, lemma = token[0], token[1], token[0] if pos.startswith(("DT", "JJ")): lemma = predicative(word) if pos == "NNS": lemma = singularize(word) if pos.startswith(("VB", "MD")): lemma = conjugate(word, INFINITIVE) or word token.append(lemma.lower()) return tokens
[ "def", "find_lemmata", "(", "tokens", ")", ":", "for", "token", "in", "tokens", ":", "word", ",", "pos", ",", "lemma", "=", "token", "[", "0", "]", ",", "token", "[", "1", "]", ",", "token", "[", "0", "]", "if", "pos", ".", "startswith", "(", "...
Annotates the tokens with lemmata for plural nouns and conjugated verbs, where each token is a [word, part-of-speech] list.
[ "Annotates", "the", "tokens", "with", "lemmata", "for", "plural", "nouns", "and", "conjugated", "verbs", "where", "each", "token", "is", "a", "[", "word", "part", "-", "of", "-", "speech", "]", "list", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/__init__.py#L186-L199
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/__init__.py
tree
def tree(s, token=[WORD, POS, CHUNK, PNP, REL, LEMMA]): """ Returns a parsed Text from the given parsed string. """ return Text(s, token)
python
def tree(s, token=[WORD, POS, CHUNK, PNP, REL, LEMMA]): """ Returns a parsed Text from the given parsed string. """ return Text(s, token)
[ "def", "tree", "(", "s", ",", "token", "=", "[", "WORD", ",", "POS", ",", "CHUNK", ",", "PNP", ",", "REL", ",", "LEMMA", "]", ")", ":", "return", "Text", "(", "s", ",", "token", ")" ]
Returns a parsed Text from the given parsed string.
[ "Returns", "a", "parsed", "Text", "from", "the", "given", "parsed", "string", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/__init__.py#L250-L253
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/__init__.py
tag
def tag(s, tokenize=True, encoding="utf-8", **kwargs): """ Returns a list of (token, tag)-tuples from the given string. """ tags = [] for sentence in parse(s, tokenize, True, False, False, False, encoding, **kwargs).split(): for token in sentence: tags.append((token[0], token[1])) return tags
python
def tag(s, tokenize=True, encoding="utf-8", **kwargs): """ Returns a list of (token, tag)-tuples from the given string. """ tags = [] for sentence in parse(s, tokenize, True, False, False, False, encoding, **kwargs).split(): for token in sentence: tags.append((token[0], token[1])) return tags
[ "def", "tag", "(", "s", ",", "tokenize", "=", "True", ",", "encoding", "=", "\"utf-8\"", ",", "*", "*", "kwargs", ")", ":", "tags", "=", "[", "]", "for", "sentence", "in", "parse", "(", "s", ",", "tokenize", ",", "True", ",", "False", ",", "False...
Returns a list of (token, tag)-tuples from the given string.
[ "Returns", "a", "list", "of", "(", "token", "tag", ")", "-", "tuples", "from", "the", "given", "string", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/__init__.py#L255-L262
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/de/__init__.py
keywords
def keywords(s, top=10, **kwargs): """ Returns a sorted list of keywords in the given string. """ return parser.find_keywords(s, top=top, frequency=parser.frequency)
python
def keywords(s, top=10, **kwargs): """ Returns a sorted list of keywords in the given string. """ return parser.find_keywords(s, top=top, frequency=parser.frequency)
[ "def", "keywords", "(", "s", ",", "top", "=", "10", ",", "*", "*", "kwargs", ")", ":", "return", "parser", ".", "find_keywords", "(", "s", ",", "top", "=", "top", ",", "frequency", "=", "parser", ".", "frequency", ")" ]
Returns a sorted list of keywords in the given string.
[ "Returns", "a", "sorted", "list", "of", "keywords", "in", "the", "given", "string", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/de/__init__.py#L264-L267
train
markuskiller/textblob-de
textblob_de/tokenizers.py
sent_tokenize
def sent_tokenize(text, tokenizer=None): """Convenience function for tokenizing sentences (not iterable). If tokenizer is not specified, the default tokenizer NLTKPunktTokenizer() is used (same behaviour as in the main `TextBlob`_ library). This function returns the sentences as a generator object. .. _TextBlob: http://textblob.readthedocs.org/ """ _tokenizer = tokenizer if tokenizer is not None else NLTKPunktTokenizer() return SentenceTokenizer(tokenizer=_tokenizer).itokenize(text)
python
def sent_tokenize(text, tokenizer=None): """Convenience function for tokenizing sentences (not iterable). If tokenizer is not specified, the default tokenizer NLTKPunktTokenizer() is used (same behaviour as in the main `TextBlob`_ library). This function returns the sentences as a generator object. .. _TextBlob: http://textblob.readthedocs.org/ """ _tokenizer = tokenizer if tokenizer is not None else NLTKPunktTokenizer() return SentenceTokenizer(tokenizer=_tokenizer).itokenize(text)
[ "def", "sent_tokenize", "(", "text", ",", "tokenizer", "=", "None", ")", ":", "_tokenizer", "=", "tokenizer", "if", "tokenizer", "is", "not", "None", "else", "NLTKPunktTokenizer", "(", ")", "return", "SentenceTokenizer", "(", "tokenizer", "=", "_tokenizer", ")...
Convenience function for tokenizing sentences (not iterable). If tokenizer is not specified, the default tokenizer NLTKPunktTokenizer() is used (same behaviour as in the main `TextBlob`_ library). This function returns the sentences as a generator object. .. _TextBlob: http://textblob.readthedocs.org/
[ "Convenience", "function", "for", "tokenizing", "sentences", "(", "not", "iterable", ")", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/tokenizers.py#L306-L318
train
markuskiller/textblob-de
textblob_de/tokenizers.py
word_tokenize
def word_tokenize(text, tokenizer=None, include_punc=True, *args, **kwargs): """Convenience function for tokenizing text into words. NOTE: NLTK's word tokenizer expects sentences as input, so the text will be tokenized to sentences before being tokenized to words. This function returns an itertools chain object (generator). """ _tokenizer = tokenizer if tokenizer is not None else NLTKPunktTokenizer() words = chain.from_iterable( WordTokenizer(tokenizer=_tokenizer).itokenize(sentence, include_punc, *args, **kwargs) for sentence in sent_tokenize(text, tokenizer=_tokenizer)) return words
python
def word_tokenize(text, tokenizer=None, include_punc=True, *args, **kwargs): """Convenience function for tokenizing text into words. NOTE: NLTK's word tokenizer expects sentences as input, so the text will be tokenized to sentences before being tokenized to words. This function returns an itertools chain object (generator). """ _tokenizer = tokenizer if tokenizer is not None else NLTKPunktTokenizer() words = chain.from_iterable( WordTokenizer(tokenizer=_tokenizer).itokenize(sentence, include_punc, *args, **kwargs) for sentence in sent_tokenize(text, tokenizer=_tokenizer)) return words
[ "def", "word_tokenize", "(", "text", ",", "tokenizer", "=", "None", ",", "include_punc", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "_tokenizer", "=", "tokenizer", "if", "tokenizer", "is", "not", "None", "else", "NLTKPunktTokenizer",...
Convenience function for tokenizing text into words. NOTE: NLTK's word tokenizer expects sentences as input, so the text will be tokenized to sentences before being tokenized to words. This function returns an itertools chain object (generator).
[ "Convenience", "function", "for", "tokenizing", "text", "into", "words", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/tokenizers.py#L321-L335
train
markuskiller/textblob-de
textblob_de/tokenizers.py
NLTKPunktTokenizer.tokenize
def tokenize(self, text, include_punc=True, nested=False): """Return a list of word tokens. :param text: string of text. :param include_punc: (optional) whether to include punctuation as separate tokens. Default to True. :param nested: (optional) whether to return tokens as nested lists of sentences. Default to False. """ self.tokens = [ w for w in ( self.word_tokenize( s, include_punc) for s in self.sent_tokenize(text))] if nested: return self.tokens else: return list(chain.from_iterable(self.tokens))
python
def tokenize(self, text, include_punc=True, nested=False): """Return a list of word tokens. :param text: string of text. :param include_punc: (optional) whether to include punctuation as separate tokens. Default to True. :param nested: (optional) whether to return tokens as nested lists of sentences. Default to False. """ self.tokens = [ w for w in ( self.word_tokenize( s, include_punc) for s in self.sent_tokenize(text))] if nested: return self.tokens else: return list(chain.from_iterable(self.tokens))
[ "def", "tokenize", "(", "self", ",", "text", ",", "include_punc", "=", "True", ",", "nested", "=", "False", ")", ":", "self", ".", "tokens", "=", "[", "w", "for", "w", "in", "(", "self", ".", "word_tokenize", "(", "s", ",", "include_punc", ")", "fo...
Return a list of word tokens. :param text: string of text. :param include_punc: (optional) whether to include punctuation as separate tokens. Default to True. :param nested: (optional) whether to return tokens as nested lists of sentences. Default to False.
[ "Return", "a", "list", "of", "word", "tokens", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/tokenizers.py#L54-L72
train
markuskiller/textblob-de
textblob_de/tokenizers.py
NLTKPunktTokenizer.sent_tokenize
def sent_tokenize(self, text, **kwargs): """NLTK's sentence tokenizer (currently PunktSentenceTokenizer). Uses an unsupervised algorithm to build a model for abbreviation words, collocations, and words that start sentences, then uses that to find sentence boundaries. """ sentences = self.sent_tok.tokenize( text, realign_boundaries=kwargs.get( "realign_boundaries", True)) return sentences
python
def sent_tokenize(self, text, **kwargs): """NLTK's sentence tokenizer (currently PunktSentenceTokenizer). Uses an unsupervised algorithm to build a model for abbreviation words, collocations, and words that start sentences, then uses that to find sentence boundaries. """ sentences = self.sent_tok.tokenize( text, realign_boundaries=kwargs.get( "realign_boundaries", True)) return sentences
[ "def", "sent_tokenize", "(", "self", ",", "text", ",", "*", "*", "kwargs", ")", ":", "sentences", "=", "self", ".", "sent_tok", ".", "tokenize", "(", "text", ",", "realign_boundaries", "=", "kwargs", ".", "get", "(", "\"realign_boundaries\"", ",", "True", ...
NLTK's sentence tokenizer (currently PunktSentenceTokenizer). Uses an unsupervised algorithm to build a model for abbreviation words, collocations, and words that start sentences, then uses that to find sentence boundaries.
[ "NLTK", "s", "sentence", "tokenizer", "(", "currently", "PunktSentenceTokenizer", ")", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/tokenizers.py#L75-L88
train
markuskiller/textblob-de
textblob_de/tokenizers.py
NLTKPunktTokenizer.word_tokenize
def word_tokenize(self, text, include_punc=True): """The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. It assumes that the text has already been segmented into sentences, e.g. using ``self.sent_tokenize()``. This tokenizer performs the following steps: - split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll`` - treat most punctuation characters as separate tokens - split off commas and single quotes, when followed by whitespace - separate periods that appear at the end of line Source: NLTK's docstring of ``TreebankWordTokenizer`` (accessed: 02/10/2014) """ #: Do not process empty strings (Issue #3) if text.strip() == "": return [] _tokens = self.word_tok.tokenize(text) #: Handle strings consisting of a single punctuation mark seperately (Issue #4) if len(_tokens) == 1: if _tokens[0] in PUNCTUATION: if include_punc: return _tokens else: return [] if include_punc: return _tokens else: # Return each word token # Strips punctuation unless the word comes from a contraction # e.g. "gibt's" => ["gibt", "'s"] in "Heute gibt's viel zu tun!" # e.g. "hat's" => ["hat", "'s"] # e.g. "home." => ['home'] words = [ word if word.startswith("'") else strip_punc( word, all=False) for word in _tokens if strip_punc( word, all=False)] return list(words)
python
def word_tokenize(self, text, include_punc=True): """The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. It assumes that the text has already been segmented into sentences, e.g. using ``self.sent_tokenize()``. This tokenizer performs the following steps: - split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll`` - treat most punctuation characters as separate tokens - split off commas and single quotes, when followed by whitespace - separate periods that appear at the end of line Source: NLTK's docstring of ``TreebankWordTokenizer`` (accessed: 02/10/2014) """ #: Do not process empty strings (Issue #3) if text.strip() == "": return [] _tokens = self.word_tok.tokenize(text) #: Handle strings consisting of a single punctuation mark seperately (Issue #4) if len(_tokens) == 1: if _tokens[0] in PUNCTUATION: if include_punc: return _tokens else: return [] if include_punc: return _tokens else: # Return each word token # Strips punctuation unless the word comes from a contraction # e.g. "gibt's" => ["gibt", "'s"] in "Heute gibt's viel zu tun!" # e.g. "hat's" => ["hat", "'s"] # e.g. "home." => ['home'] words = [ word if word.startswith("'") else strip_punc( word, all=False) for word in _tokens if strip_punc( word, all=False)] return list(words)
[ "def", "word_tokenize", "(", "self", ",", "text", ",", "include_punc", "=", "True", ")", ":", "#: Do not process empty strings (Issue #3)", "if", "text", ".", "strip", "(", ")", "==", "\"\"", ":", "return", "[", "]", "_tokens", "=", "self", ".", "word_tok", ...
The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. It assumes that the text has already been segmented into sentences, e.g. using ``self.sent_tokenize()``. This tokenizer performs the following steps: - split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll`` - treat most punctuation characters as separate tokens - split off commas and single quotes, when followed by whitespace - separate periods that appear at the end of line Source: NLTK's docstring of ``TreebankWordTokenizer`` (accessed: 02/10/2014)
[ "The", "Treebank", "tokenizer", "uses", "regular", "expressions", "to", "tokenize", "text", "as", "in", "Penn", "Treebank", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/tokenizers.py#L90-L132
train
markuskiller/textblob-de
textblob_de/tokenizers.py
PatternTokenizer.sent_tokenize
def sent_tokenize(self, text, **kwargs): """Returns a list of sentences. Each sentence is a space-separated string of tokens (words). Handles common cases of abbreviations (e.g., etc., ...). Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence. Headings without an ending period are inferred by line breaks. """ sentences = find_sentences(text, punctuation=kwargs.get( "punctuation", PUNCTUATION), abbreviations=kwargs.get( "abbreviations", ABBREVIATIONS_DE), replace=kwargs.get("replace", replacements), linebreak=r"\n{2,}") return sentences
python
def sent_tokenize(self, text, **kwargs): """Returns a list of sentences. Each sentence is a space-separated string of tokens (words). Handles common cases of abbreviations (e.g., etc., ...). Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence. Headings without an ending period are inferred by line breaks. """ sentences = find_sentences(text, punctuation=kwargs.get( "punctuation", PUNCTUATION), abbreviations=kwargs.get( "abbreviations", ABBREVIATIONS_DE), replace=kwargs.get("replace", replacements), linebreak=r"\n{2,}") return sentences
[ "def", "sent_tokenize", "(", "self", ",", "text", ",", "*", "*", "kwargs", ")", ":", "sentences", "=", "find_sentences", "(", "text", ",", "punctuation", "=", "kwargs", ".", "get", "(", "\"punctuation\"", ",", "PUNCTUATION", ")", ",", "abbreviations", "=",...
Returns a list of sentences. Each sentence is a space-separated string of tokens (words). Handles common cases of abbreviations (e.g., etc., ...). Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence. Headings without an ending period are inferred by line breaks.
[ "Returns", "a", "list", "of", "sentences", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/tokenizers.py#L173-L192
train
markuskiller/textblob-de
textblob_de/tokenizers.py
WordTokenizer.tokenize
def tokenize(self, text, include_punc=True, **kwargs): """Return a list of word tokens. :param text: string of text. :param include_punc: (optional) whether to include punctuation as separate tokens. Default to True. """ return self.tokenizer.word_tokenize(text, include_punc, **kwargs)
python
def tokenize(self, text, include_punc=True, **kwargs): """Return a list of word tokens. :param text: string of text. :param include_punc: (optional) whether to include punctuation as separate tokens. Default to True. """ return self.tokenizer.word_tokenize(text, include_punc, **kwargs)
[ "def", "tokenize", "(", "self", ",", "text", ",", "include_punc", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "tokenizer", ".", "word_tokenize", "(", "text", ",", "include_punc", ",", "*", "*", "kwargs", ")" ]
Return a list of word tokens. :param text: string of text. :param include_punc: (optional) whether to include punctuation as separate tokens. Default to True.
[ "Return", "a", "list", "of", "word", "tokens", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/tokenizers.py#L254-L262
train
markuskiller/textblob-de
textblob_de/parsers.py
PatternParser.parse
def parse(self, text): """Parses the text. ``pattern.de.parse(**kwargs)`` can be passed to the parser instance and are documented in the main docstring of :class:`PatternParser() <textblob_de.parsers.PatternParser>`. :param str text: A string. """ #: Do not process empty strings (Issue #3) if text.strip() == "": return "" #: Do not process strings consisting of a single punctuation mark (Issue #4) elif text.strip() in PUNCTUATION: _sym = text.strip() if _sym in tuple('.?!'): _tag = "." else: _tag = _sym if self.lemmata: return "{0}/{1}/O/O/{0}".format(_sym, _tag) else: return "{0}/{1}/O/O".format(_sym, _tag) if self.tokenize: _tokenized = " ".join(self.tokenizer.tokenize(text)) else: _tokenized = text _parsed = pattern_parse(_tokenized, # text is tokenized before it is passed on to # pattern.de.parse tokenize=False, tags=self.tags, chunks=self.chunks, relations=self.relations, lemmata=self.lemmata, encoding=self.encoding, tagset=self.tagset) if self.pprint: _parsed = pattern_pprint(_parsed) return _parsed
python
def parse(self, text): """Parses the text. ``pattern.de.parse(**kwargs)`` can be passed to the parser instance and are documented in the main docstring of :class:`PatternParser() <textblob_de.parsers.PatternParser>`. :param str text: A string. """ #: Do not process empty strings (Issue #3) if text.strip() == "": return "" #: Do not process strings consisting of a single punctuation mark (Issue #4) elif text.strip() in PUNCTUATION: _sym = text.strip() if _sym in tuple('.?!'): _tag = "." else: _tag = _sym if self.lemmata: return "{0}/{1}/O/O/{0}".format(_sym, _tag) else: return "{0}/{1}/O/O".format(_sym, _tag) if self.tokenize: _tokenized = " ".join(self.tokenizer.tokenize(text)) else: _tokenized = text _parsed = pattern_parse(_tokenized, # text is tokenized before it is passed on to # pattern.de.parse tokenize=False, tags=self.tags, chunks=self.chunks, relations=self.relations, lemmata=self.lemmata, encoding=self.encoding, tagset=self.tagset) if self.pprint: _parsed = pattern_pprint(_parsed) return _parsed
[ "def", "parse", "(", "self", ",", "text", ")", ":", "#: Do not process empty strings (Issue #3)", "if", "text", ".", "strip", "(", ")", "==", "\"\"", ":", "return", "\"\"", "#: Do not process strings consisting of a single punctuation mark (Issue #4)", "elif", "text", "...
Parses the text. ``pattern.de.parse(**kwargs)`` can be passed to the parser instance and are documented in the main docstring of :class:`PatternParser() <textblob_de.parsers.PatternParser>`. :param str text: A string.
[ "Parses", "the", "text", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/parsers.py#L77-L116
train
markuskiller/textblob-de
textblob_de/np_extractors.py
PatternParserNPExtractor.extract
def extract(self, text): """Return a list of noun phrases (strings) for a body of text. :param str text: A string. """ _extracted = [] if text.strip() == "": return _extracted parsed_sentences = self._parse_text(text) for s in parsed_sentences: tokens = s.split() new_np = [] for t in tokens: w, tag, phrase, role = t.split('/') # exclude some parser errors (e.g. VB within NP), # extend startswith tuple if necessary if 'NP' in phrase and not self._is_verb(w, tag): if len(new_np) > 0 and w.lower() in START_NEW_NP: _extracted.append(" ".join(new_np)) new_np = [w] else: # normalize capitalisation of sentence starters, except # for nouns new_np.append(w.lower() if tokens[0].startswith(w) and not tag.startswith('N') else w) else: if len(new_np) > 0: _extracted.append(" ".join(new_np)) new_np = [] return self._filter_extracted(_extracted)
python
def extract(self, text): """Return a list of noun phrases (strings) for a body of text. :param str text: A string. """ _extracted = [] if text.strip() == "": return _extracted parsed_sentences = self._parse_text(text) for s in parsed_sentences: tokens = s.split() new_np = [] for t in tokens: w, tag, phrase, role = t.split('/') # exclude some parser errors (e.g. VB within NP), # extend startswith tuple if necessary if 'NP' in phrase and not self._is_verb(w, tag): if len(new_np) > 0 and w.lower() in START_NEW_NP: _extracted.append(" ".join(new_np)) new_np = [w] else: # normalize capitalisation of sentence starters, except # for nouns new_np.append(w.lower() if tokens[0].startswith(w) and not tag.startswith('N') else w) else: if len(new_np) > 0: _extracted.append(" ".join(new_np)) new_np = [] return self._filter_extracted(_extracted)
[ "def", "extract", "(", "self", ",", "text", ")", ":", "_extracted", "=", "[", "]", "if", "text", ".", "strip", "(", ")", "==", "\"\"", ":", "return", "_extracted", "parsed_sentences", "=", "self", ".", "_parse_text", "(", "text", ")", "for", "s", "in...
Return a list of noun phrases (strings) for a body of text. :param str text: A string.
[ "Return", "a", "list", "of", "noun", "phrases", "(", "strings", ")", "for", "a", "body", "of", "text", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/np_extractors.py#L88-L118
train
markuskiller/textblob-de
textblob_de/np_extractors.py
PatternParserNPExtractor._filter_extracted
def _filter_extracted(self, extracted_list): """Filter insignificant words for key noun phrase extraction. determiners, relative pronouns, reflexive pronouns In general, pronouns are not useful, as you need context to know what they refer to. Most of the pronouns, however, are filtered out by blob.noun_phrase method's np length (>1) filter :param list extracted_list: A list of noun phrases extracted from parser output. """ _filtered = [] for np in extracted_list: _np = np.split() if _np[0] in INSIGNIFICANT: _np.pop(0) try: if _np[-1] in INSIGNIFICANT: _np.pop(-1) # e.g. 'welcher die ...' if _np[0] in INSIGNIFICANT: _np.pop(0) except IndexError: _np = [] if len(_np) > 0: _filtered.append(" ".join(_np)) return _filtered
python
def _filter_extracted(self, extracted_list): """Filter insignificant words for key noun phrase extraction. determiners, relative pronouns, reflexive pronouns In general, pronouns are not useful, as you need context to know what they refer to. Most of the pronouns, however, are filtered out by blob.noun_phrase method's np length (>1) filter :param list extracted_list: A list of noun phrases extracted from parser output. """ _filtered = [] for np in extracted_list: _np = np.split() if _np[0] in INSIGNIFICANT: _np.pop(0) try: if _np[-1] in INSIGNIFICANT: _np.pop(-1) # e.g. 'welcher die ...' if _np[0] in INSIGNIFICANT: _np.pop(0) except IndexError: _np = [] if len(_np) > 0: _filtered.append(" ".join(_np)) return _filtered
[ "def", "_filter_extracted", "(", "self", ",", "extracted_list", ")", ":", "_filtered", "=", "[", "]", "for", "np", "in", "extracted_list", ":", "_np", "=", "np", ".", "split", "(", ")", "if", "_np", "[", "0", "]", "in", "INSIGNIFICANT", ":", "_np", "...
Filter insignificant words for key noun phrase extraction. determiners, relative pronouns, reflexive pronouns In general, pronouns are not useful, as you need context to know what they refer to. Most of the pronouns, however, are filtered out by blob.noun_phrase method's np length (>1) filter :param list extracted_list: A list of noun phrases extracted from parser output.
[ "Filter", "insignificant", "words", "for", "key", "noun", "phrase", "extraction", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/np_extractors.py#L120-L146
train
markuskiller/textblob-de
textblob_de/np_extractors.py
PatternParserNPExtractor._parse_text
def _parse_text(self, text): """Parse text (string) and return list of parsed sentences (strings). Each sentence consists of space separated token elements and the token format returned by the PatternParser is WORD/TAG/PHRASE/ROLE/(LEMMA) (separated by a forward slash '/') :param str text: A string. """ if isinstance(self.tokenizer, PatternTokenizer): parsed_text = pattern_parse(text, tokenize=True, lemmata=False) else: _tokenized = [] _sentences = sent_tokenize(text, tokenizer=self.tokenizer) for s in _sentences: _tokenized.append(" ".join(self.tokenizer.tokenize(s))) parsed_text = pattern_parse( _tokenized, tokenize=False, lemmata=False) return parsed_text.split('\n')
python
def _parse_text(self, text): """Parse text (string) and return list of parsed sentences (strings). Each sentence consists of space separated token elements and the token format returned by the PatternParser is WORD/TAG/PHRASE/ROLE/(LEMMA) (separated by a forward slash '/') :param str text: A string. """ if isinstance(self.tokenizer, PatternTokenizer): parsed_text = pattern_parse(text, tokenize=True, lemmata=False) else: _tokenized = [] _sentences = sent_tokenize(text, tokenizer=self.tokenizer) for s in _sentences: _tokenized.append(" ".join(self.tokenizer.tokenize(s))) parsed_text = pattern_parse( _tokenized, tokenize=False, lemmata=False) return parsed_text.split('\n')
[ "def", "_parse_text", "(", "self", ",", "text", ")", ":", "if", "isinstance", "(", "self", ".", "tokenizer", ",", "PatternTokenizer", ")", ":", "parsed_text", "=", "pattern_parse", "(", "text", ",", "tokenize", "=", "True", ",", "lemmata", "=", "False", ...
Parse text (string) and return list of parsed sentences (strings). Each sentence consists of space separated token elements and the token format returned by the PatternParser is WORD/TAG/PHRASE/ROLE/(LEMMA) (separated by a forward slash '/') :param str text: A string.
[ "Parse", "text", "(", "string", ")", "and", "return", "list", "of", "parsed", "sentences", "(", "strings", ")", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/np_extractors.py#L148-L169
train
markuskiller/textblob-de
textblob_de/taggers.py
PatternTagger.tag
def tag(self, sentence, tokenize=True): """Tag a string `sentence`. :param str or list sentence: A string or a list of sentence strings. :param tokenize: (optional) If ``False`` string has to be tokenized before (space separated string). """ #: Do not process empty strings (Issue #3) if sentence.strip() == "": return [] #: Do not process strings consisting of a single punctuation mark (Issue #4) elif sentence.strip() in PUNCTUATION: if self.include_punc: _sym = sentence.strip() if _sym in tuple('.?!'): _tag = "." else: _tag = _sym return [(_sym, _tag)] else: return [] if tokenize: _tokenized = " ".join(self.tokenizer.tokenize(sentence)) sentence = _tokenized # Sentence is tokenized before it is passed on to pattern.de.tag # (i.e. it is either submitted tokenized or if ) _tagged = pattern_tag(sentence, tokenize=False, encoding=self.encoding, tagset=self.tagset) if self.include_punc: return _tagged else: _tagged = [ (word, t) for word, t in _tagged if not PUNCTUATION_REGEX.match( unicode(t))] return _tagged
python
def tag(self, sentence, tokenize=True): """Tag a string `sentence`. :param str or list sentence: A string or a list of sentence strings. :param tokenize: (optional) If ``False`` string has to be tokenized before (space separated string). """ #: Do not process empty strings (Issue #3) if sentence.strip() == "": return [] #: Do not process strings consisting of a single punctuation mark (Issue #4) elif sentence.strip() in PUNCTUATION: if self.include_punc: _sym = sentence.strip() if _sym in tuple('.?!'): _tag = "." else: _tag = _sym return [(_sym, _tag)] else: return [] if tokenize: _tokenized = " ".join(self.tokenizer.tokenize(sentence)) sentence = _tokenized # Sentence is tokenized before it is passed on to pattern.de.tag # (i.e. it is either submitted tokenized or if ) _tagged = pattern_tag(sentence, tokenize=False, encoding=self.encoding, tagset=self.tagset) if self.include_punc: return _tagged else: _tagged = [ (word, t) for word, t in _tagged if not PUNCTUATION_REGEX.match( unicode(t))] return _tagged
[ "def", "tag", "(", "self", ",", "sentence", ",", "tokenize", "=", "True", ")", ":", "#: Do not process empty strings (Issue #3)", "if", "sentence", ".", "strip", "(", ")", "==", "\"\"", ":", "return", "[", "]", "#: Do not process strings consisting of a single punct...
Tag a string `sentence`. :param str or list sentence: A string or a list of sentence strings. :param tokenize: (optional) If ``False`` string has to be tokenized before (space separated string).
[ "Tag", "a", "string", "sentence", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/taggers.py#L60-L96
train
markuskiller/textblob-de
textblob_de/compat.py
decode_string
def decode_string(v, encoding="utf-8"): """Returns the given value as a Unicode string (if possible).""" if isinstance(encoding, basestring): encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore")) if isinstance(v, binary_type): for e in encoding: try: return v.decode(*e) except: pass return v return unicode(v)
python
def decode_string(v, encoding="utf-8"): """Returns the given value as a Unicode string (if possible).""" if isinstance(encoding, basestring): encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore")) if isinstance(v, binary_type): for e in encoding: try: return v.decode(*e) except: pass return v return unicode(v)
[ "def", "decode_string", "(", "v", ",", "encoding", "=", "\"utf-8\"", ")", ":", "if", "isinstance", "(", "encoding", ",", "basestring", ")", ":", "encoding", "=", "(", "(", "encoding", ",", ")", ",", ")", "+", "(", "(", "\"windows-1252\"", ",", ")", "...
Returns the given value as a Unicode string (if possible).
[ "Returns", "the", "given", "value", "as", "a", "Unicode", "string", "(", "if", "possible", ")", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/compat.py#L112-L123
train
markuskiller/textblob-de
textblob_de/compat.py
encode_string
def encode_string(v, encoding="utf-8"): """Returns the given value as a Python byte string (if possible).""" if isinstance(encoding, basestring): encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore")) if isinstance(v, unicode): for e in encoding: try: return v.encode(*e) except: pass return v return str(v)
python
def encode_string(v, encoding="utf-8"): """Returns the given value as a Python byte string (if possible).""" if isinstance(encoding, basestring): encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore")) if isinstance(v, unicode): for e in encoding: try: return v.encode(*e) except: pass return v return str(v)
[ "def", "encode_string", "(", "v", ",", "encoding", "=", "\"utf-8\"", ")", ":", "if", "isinstance", "(", "encoding", ",", "basestring", ")", ":", "encoding", "=", "(", "(", "encoding", ",", ")", ",", ")", "+", "(", "(", "\"windows-1252\"", ",", ")", "...
Returns the given value as a Python byte string (if possible).
[ "Returns", "the", "given", "value", "as", "a", "Python", "byte", "string", "(", "if", "possible", ")", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/compat.py#L126-L137
train
markuskiller/textblob-de
textblob_de/compat.py
_shutil_which
def _shutil_which(cmd, mode=os.F_OK | os.X_OK, path=None): """Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path. """ # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) # If we're given a path with a directory part, look it up directly rather # than referring to PATH directories. This includes checking relative to the # current directory, e.g. ./script if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get("PATH", os.defpath) if not path: return None path = path.split(os.pathsep) if sys.platform == "win32": # The current directory takes precedence on Windows. if not os.curdir in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get("PATHEXT", "").split(os.pathsep) # See if the given file matches any of the expected path extensions. # This will allow us to short circuit when given "python.exe". # If it does match, only test that one, otherwise we have to try # others. if any([cmd.lower().endswith(ext.lower()) for ext in pathext]): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if normdir not in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None
python
def _shutil_which(cmd, mode=os.F_OK | os.X_OK, path=None): """Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path. """ # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) # If we're given a path with a directory part, look it up directly rather # than referring to PATH directories. This includes checking relative to the # current directory, e.g. ./script if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get("PATH", os.defpath) if not path: return None path = path.split(os.pathsep) if sys.platform == "win32": # The current directory takes precedence on Windows. if not os.curdir in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get("PATHEXT", "").split(os.pathsep) # See if the given file matches any of the expected path extensions. # This will allow us to short circuit when given "python.exe". # If it does match, only test that one, otherwise we have to try # others. if any([cmd.lower().endswith(ext.lower()) for ext in pathext]): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if normdir not in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None
[ "def", "_shutil_which", "(", "cmd", ",", "mode", "=", "os", ".", "F_OK", "|", "os", ".", "X_OK", ",", "path", "=", "None", ")", ":", "# Check that a given file can be accessed with the correct mode.", "# Additionally check that `file` is not a directory, as on Windows", "...
Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path.
[ "Given", "a", "command", "mode", "and", "a", "PATH", "string", "return", "the", "path", "which", "conforms", "to", "the", "given", "mode", "on", "the", "PATH", "or", "None", "if", "there", "is", "no", "such", "file", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/compat.py#L156-L215
train
markuskiller/textblob-de
textblob_de/blob.py
Word.translate
def translate(self, from_lang=None, to="de"): """Translate the word to another language using Google's Translate API. .. versionadded:: 0.5.0 (``textblob``) """ if from_lang is None: from_lang = self.translator.detect(self.string) return self.translator.translate(self.string, from_lang=from_lang, to_lang=to)
python
def translate(self, from_lang=None, to="de"): """Translate the word to another language using Google's Translate API. .. versionadded:: 0.5.0 (``textblob``) """ if from_lang is None: from_lang = self.translator.detect(self.string) return self.translator.translate(self.string, from_lang=from_lang, to_lang=to)
[ "def", "translate", "(", "self", ",", "from_lang", "=", "None", ",", "to", "=", "\"de\"", ")", ":", "if", "from_lang", "is", "None", ":", "from_lang", "=", "self", ".", "translator", ".", "detect", "(", "self", ".", "string", ")", "return", "self", "...
Translate the word to another language using Google's Translate API. .. versionadded:: 0.5.0 (``textblob``)
[ "Translate", "the", "word", "to", "another", "language", "using", "Google", "s", "Translate", "API", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L100-L109
train
markuskiller/textblob-de
textblob_de/blob.py
WordList.lemmatize
def lemmatize(self): """Return the lemma of each word in this WordList. Currently using NLTKPunktTokenizer() for all lemmatization tasks. This might cause slightly different tokenization results compared to the TextBlob.words property. """ _lemmatizer = PatternParserLemmatizer(tokenizer=NLTKPunktTokenizer()) # WordList object --> Sentence.string # add a period (improves parser accuracy) _raw = " ".join(self) + "." _lemmas = _lemmatizer.lemmatize(_raw) return self.__class__([Word(l, t) for l, t in _lemmas])
python
def lemmatize(self): """Return the lemma of each word in this WordList. Currently using NLTKPunktTokenizer() for all lemmatization tasks. This might cause slightly different tokenization results compared to the TextBlob.words property. """ _lemmatizer = PatternParserLemmatizer(tokenizer=NLTKPunktTokenizer()) # WordList object --> Sentence.string # add a period (improves parser accuracy) _raw = " ".join(self) + "." _lemmas = _lemmatizer.lemmatize(_raw) return self.__class__([Word(l, t) for l, t in _lemmas])
[ "def", "lemmatize", "(", "self", ")", ":", "_lemmatizer", "=", "PatternParserLemmatizer", "(", "tokenizer", "=", "NLTKPunktTokenizer", "(", ")", ")", "# WordList object --> Sentence.string", "# add a period (improves parser accuracy)", "_raw", "=", "\" \"", ".", "join", ...
Return the lemma of each word in this WordList. Currently using NLTKPunktTokenizer() for all lemmatization tasks. This might cause slightly different tokenization results compared to the TextBlob.words property.
[ "Return", "the", "lemma", "of", "each", "word", "in", "this", "WordList", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L305-L318
train
markuskiller/textblob-de
textblob_de/blob.py
BaseBlob.tokenize
def tokenize(self, tokenizer=None): """Return a list of tokens, using ``tokenizer``. :param tokenizer: (optional) A tokenizer object. If None, defaults to this blob's default tokenizer. """ t = tokenizer if tokenizer is not None else self.tokenizer return WordList(t.tokenize(self.raw))
python
def tokenize(self, tokenizer=None): """Return a list of tokens, using ``tokenizer``. :param tokenizer: (optional) A tokenizer object. If None, defaults to this blob's default tokenizer. """ t = tokenizer if tokenizer is not None else self.tokenizer return WordList(t.tokenize(self.raw))
[ "def", "tokenize", "(", "self", ",", "tokenizer", "=", "None", ")", ":", "t", "=", "tokenizer", "if", "tokenizer", "is", "not", "None", "else", "self", ".", "tokenizer", "return", "WordList", "(", "t", ".", "tokenize", "(", "self", ".", "raw", ")", "...
Return a list of tokens, using ``tokenizer``. :param tokenizer: (optional) A tokenizer object. If None, defaults to this blob's default tokenizer.
[ "Return", "a", "list", "of", "tokens", "using", "tokenizer", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L405-L413
train
markuskiller/textblob-de
textblob_de/blob.py
BaseBlob.noun_phrases
def noun_phrases(self): """Returns a list of noun phrases for this blob.""" return WordList([phrase.strip() for phrase in self.np_extractor.extract(self.raw) if len(phrase.split()) > 1])
python
def noun_phrases(self): """Returns a list of noun phrases for this blob.""" return WordList([phrase.strip() for phrase in self.np_extractor.extract(self.raw) if len(phrase.split()) > 1])
[ "def", "noun_phrases", "(", "self", ")", ":", "return", "WordList", "(", "[", "phrase", ".", "strip", "(", ")", "for", "phrase", "in", "self", ".", "np_extractor", ".", "extract", "(", "self", ".", "raw", ")", "if", "len", "(", "phrase", ".", "split"...
Returns a list of noun phrases for this blob.
[ "Returns", "a", "list", "of", "noun", "phrases", "for", "this", "blob", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L456-L460
train
markuskiller/textblob-de
textblob_de/blob.py
BaseBlob.pos_tags
def pos_tags(self): """Returns an list of tuples of the form (word, POS tag). Example: :: [('At', 'IN'), ('eight', 'CD'), ("o'clock", 'JJ'), ('on', 'IN'), ('Thursday', 'NNP'), ('morning', 'NN')] :rtype: list of tuples """ return [(Word(word, pos_tag=t), unicode(t)) for word, t in self.pos_tagger.tag(self.raw) # new keyword PatternTagger(include_punc=False) # if not PUNCTUATION_REGEX.match(unicode(t)) ]
python
def pos_tags(self): """Returns an list of tuples of the form (word, POS tag). Example: :: [('At', 'IN'), ('eight', 'CD'), ("o'clock", 'JJ'), ('on', 'IN'), ('Thursday', 'NNP'), ('morning', 'NN')] :rtype: list of tuples """ return [(Word(word, pos_tag=t), unicode(t)) for word, t in self.pos_tagger.tag(self.raw) # new keyword PatternTagger(include_punc=False) # if not PUNCTUATION_REGEX.match(unicode(t)) ]
[ "def", "pos_tags", "(", "self", ")", ":", "return", "[", "(", "Word", "(", "word", ",", "pos_tag", "=", "t", ")", ",", "unicode", "(", "t", ")", ")", "for", "word", ",", "t", "in", "self", ".", "pos_tagger", ".", "tag", "(", "self", ".", "raw",...
Returns an list of tuples of the form (word, POS tag). Example: :: [('At', 'IN'), ('eight', 'CD'), ("o'clock", 'JJ'), ('on', 'IN'), ('Thursday', 'NNP'), ('morning', 'NN')] :rtype: list of tuples
[ "Returns", "an", "list", "of", "tuples", "of", "the", "form", "(", "word", "POS", "tag", ")", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L463-L479
train
markuskiller/textblob-de
textblob_de/blob.py
BaseBlob.word_counts
def word_counts(self): """Dictionary of word frequencies in this text.""" counts = defaultdict(int) stripped_words = [lowerstrip(word) for word in self.words] for word in stripped_words: counts[word] += 1 return counts
python
def word_counts(self): """Dictionary of word frequencies in this text.""" counts = defaultdict(int) stripped_words = [lowerstrip(word) for word in self.words] for word in stripped_words: counts[word] += 1 return counts
[ "def", "word_counts", "(", "self", ")", ":", "counts", "=", "defaultdict", "(", "int", ")", "stripped_words", "=", "[", "lowerstrip", "(", "word", ")", "for", "word", "in", "self", ".", "words", "]", "for", "word", "in", "stripped_words", ":", "counts", ...
Dictionary of word frequencies in this text.
[ "Dictionary", "of", "word", "frequencies", "in", "this", "text", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L484-L490
train
markuskiller/textblob-de
textblob_de/blob.py
Sentence.dict
def dict(self): """The dict representation of this sentence.""" return { 'raw': self.raw, 'start_index': self.start_index, 'end_index': self.end_index, 'stripped': self.stripped, 'noun_phrases': self.noun_phrases, 'polarity': self.polarity, 'subjectivity': self.subjectivity, }
python
def dict(self): """The dict representation of this sentence.""" return { 'raw': self.raw, 'start_index': self.start_index, 'end_index': self.end_index, 'stripped': self.stripped, 'noun_phrases': self.noun_phrases, 'polarity': self.polarity, 'subjectivity': self.subjectivity, }
[ "def", "dict", "(", "self", ")", ":", "return", "{", "'raw'", ":", "self", ".", "raw", ",", "'start_index'", ":", "self", ".", "start_index", ",", "'end_index'", ":", "self", ".", "end_index", ",", "'stripped'", ":", "self", ".", "stripped", ",", "'nou...
The dict representation of this sentence.
[ "The", "dict", "representation", "of", "this", "sentence", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L614-L624
train
markuskiller/textblob-de
textblob_de/blob.py
TextBlobDE.words
def words(self): """Return a list of word tokens. This excludes punctuation characters. If you want to include punctuation characters, access the ``tokens`` property. :returns: A :class:`WordList <WordList>` of word tokens. """ return WordList( word_tokenize(self.raw, self.tokenizer, include_punc=False))
python
def words(self): """Return a list of word tokens. This excludes punctuation characters. If you want to include punctuation characters, access the ``tokens`` property. :returns: A :class:`WordList <WordList>` of word tokens. """ return WordList( word_tokenize(self.raw, self.tokenizer, include_punc=False))
[ "def", "words", "(", "self", ")", ":", "return", "WordList", "(", "word_tokenize", "(", "self", ".", "raw", ",", "self", ".", "tokenizer", ",", "include_punc", "=", "False", ")", ")" ]
Return a list of word tokens. This excludes punctuation characters. If you want to include punctuation characters, access the ``tokens`` property. :returns: A :class:`WordList <WordList>` of word tokens.
[ "Return", "a", "list", "of", "word", "tokens", ".", "This", "excludes", "punctuation", "characters", ".", "If", "you", "want", "to", "include", "punctuation", "characters", "access", "the", "tokens", "property", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L650-L659
train
markuskiller/textblob-de
textblob_de/blob.py
TextBlobDE.sentiment
def sentiment(self): """Return a tuple of form (polarity, subjectivity ) where polarity is a float within the range [-1.0, 1.0] and subjectivity is a float within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is very subjective. :rtype: named tuple of the form ``Sentiment(polarity=0.0, subjectivity=0.0)`` """ #: Enhancement Issue #2 #: adapted from 'textblob.en.sentiments.py' #: Return type declaration _RETURN_TYPE = namedtuple('Sentiment', ['polarity', 'subjectivity']) _polarity = 0 _subjectivity = 0 for s in self.sentences: _polarity += s.polarity _subjectivity += s.subjectivity try: polarity = _polarity / len(self.sentences) except ZeroDivisionError: polarity = 0.0 try: subjectivity = _subjectivity / len(self.sentences) except ZeroDivisionError: subjectivity = 0.0 return _RETURN_TYPE(polarity, subjectivity)
python
def sentiment(self): """Return a tuple of form (polarity, subjectivity ) where polarity is a float within the range [-1.0, 1.0] and subjectivity is a float within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is very subjective. :rtype: named tuple of the form ``Sentiment(polarity=0.0, subjectivity=0.0)`` """ #: Enhancement Issue #2 #: adapted from 'textblob.en.sentiments.py' #: Return type declaration _RETURN_TYPE = namedtuple('Sentiment', ['polarity', 'subjectivity']) _polarity = 0 _subjectivity = 0 for s in self.sentences: _polarity += s.polarity _subjectivity += s.subjectivity try: polarity = _polarity / len(self.sentences) except ZeroDivisionError: polarity = 0.0 try: subjectivity = _subjectivity / len(self.sentences) except ZeroDivisionError: subjectivity = 0.0 return _RETURN_TYPE(polarity, subjectivity)
[ "def", "sentiment", "(", "self", ")", ":", "#: Enhancement Issue #2", "#: adapted from 'textblob.en.sentiments.py'", "#: Return type declaration", "_RETURN_TYPE", "=", "namedtuple", "(", "'Sentiment'", ",", "[", "'polarity'", ",", "'subjectivity'", "]", ")", "_polarity", ...
Return a tuple of form (polarity, subjectivity ) where polarity is a float within the range [-1.0, 1.0] and subjectivity is a float within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is very subjective. :rtype: named tuple of the form ``Sentiment(polarity=0.0, subjectivity=0.0)``
[ "Return", "a", "tuple", "of", "form", "(", "polarity", "subjectivity", ")", "where", "polarity", "is", "a", "float", "within", "the", "range", "[", "-", "1", ".", "0", "1", ".", "0", "]", "and", "subjectivity", "is", "a", "float", "within", "the", "r...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L667-L692
train
markuskiller/textblob-de
textblob_de/blob.py
TextBlobDE.to_json
def to_json(self, *args, **kwargs): """Return a json representation (str) of this blob. Takes the same arguments as json.dumps. .. versionadded:: 0.5.1 (``textblob``) """ return json.dumps(self.serialized, *args, **kwargs)
python
def to_json(self, *args, **kwargs): """Return a json representation (str) of this blob. Takes the same arguments as json.dumps. .. versionadded:: 0.5.1 (``textblob``) """ return json.dumps(self.serialized, *args, **kwargs)
[ "def", "to_json", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "json", ".", "dumps", "(", "self", ".", "serialized", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Return a json representation (str) of this blob. Takes the same arguments as json.dumps. .. versionadded:: 0.5.1 (``textblob``)
[ "Return", "a", "json", "representation", "(", "str", ")", "of", "this", "blob", ".", "Takes", "the", "same", "arguments", "as", "json", ".", "dumps", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L728-L735
train
markuskiller/textblob-de
textblob_de/blob.py
TextBlobDE._create_sentence_objects
def _create_sentence_objects(self): """Returns a list of Sentence objects from the raw text.""" sentence_objects = [] sentences = sent_tokenize(self.raw, tokenizer=self.tokenizer) char_index = 0 # Keeps track of character index within the blob for sent in sentences: # Compute the start and end indices of the sentence # within the blob. This only works if the sentence splitter # does not perform any character replacements or changes to # white space. # Working: NLTKPunktTokenizer # Not working: PatternTokenizer try: start_index = self.raw.index(sent, char_index) char_index += len(sent) end_index = start_index + len(sent) except ValueError: start_index = None end_index = None # Sentences share the same models as their parent blob s = Sentence( sent, start_index=start_index, end_index=end_index, tokenizer=self.tokenizer, np_extractor=self.np_extractor, pos_tagger=self.pos_tagger, analyzer=self.analyzer, parser=self.parser, classifier=self.classifier) sentence_objects.append(s) return sentence_objects
python
def _create_sentence_objects(self): """Returns a list of Sentence objects from the raw text.""" sentence_objects = [] sentences = sent_tokenize(self.raw, tokenizer=self.tokenizer) char_index = 0 # Keeps track of character index within the blob for sent in sentences: # Compute the start and end indices of the sentence # within the blob. This only works if the sentence splitter # does not perform any character replacements or changes to # white space. # Working: NLTKPunktTokenizer # Not working: PatternTokenizer try: start_index = self.raw.index(sent, char_index) char_index += len(sent) end_index = start_index + len(sent) except ValueError: start_index = None end_index = None # Sentences share the same models as their parent blob s = Sentence( sent, start_index=start_index, end_index=end_index, tokenizer=self.tokenizer, np_extractor=self.np_extractor, pos_tagger=self.pos_tagger, analyzer=self.analyzer, parser=self.parser, classifier=self.classifier) sentence_objects.append(s) return sentence_objects
[ "def", "_create_sentence_objects", "(", "self", ")", ":", "sentence_objects", "=", "[", "]", "sentences", "=", "sent_tokenize", "(", "self", ".", "raw", ",", "tokenizer", "=", "self", ".", "tokenizer", ")", "char_index", "=", "0", "# Keeps track of character ind...
Returns a list of Sentence objects from the raw text.
[ "Returns", "a", "list", "of", "Sentence", "objects", "from", "the", "raw", "text", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/blob.py#L748-L779
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
ngrams
def ngrams(string, n=3, punctuation=PUNCTUATION, continuous=False): """ Returns a list of n-grams (tuples of n successive words) from the given string. Alternatively, you can supply a Text or Sentence object. With continuous=False, n-grams will not run over sentence markers (i.e., .!?). Punctuation marks are stripped from words. """ def strip_punctuation(s, punctuation=set(punctuation)): return [w for w in s if (isinstance(w, Word) and w.string or w) not in punctuation] if n <= 0: return [] if isinstance(string, basestring): s = [strip_punctuation(s.split(" ")) for s in tokenize(string)] if isinstance(string, Sentence): s = [strip_punctuation(string)] if isinstance(string, Text): s = [strip_punctuation(s) for s in string] if continuous: s = [sum(s, [])] g = [] for s in s: #s = [None] + s + [None] g.extend([tuple(s[i:i+n]) for i in range(len(s)-n+1)]) return g
python
def ngrams(string, n=3, punctuation=PUNCTUATION, continuous=False): """ Returns a list of n-grams (tuples of n successive words) from the given string. Alternatively, you can supply a Text or Sentence object. With continuous=False, n-grams will not run over sentence markers (i.e., .!?). Punctuation marks are stripped from words. """ def strip_punctuation(s, punctuation=set(punctuation)): return [w for w in s if (isinstance(w, Word) and w.string or w) not in punctuation] if n <= 0: return [] if isinstance(string, basestring): s = [strip_punctuation(s.split(" ")) for s in tokenize(string)] if isinstance(string, Sentence): s = [strip_punctuation(string)] if isinstance(string, Text): s = [strip_punctuation(s) for s in string] if continuous: s = [sum(s, [])] g = [] for s in s: #s = [None] + s + [None] g.extend([tuple(s[i:i+n]) for i in range(len(s)-n+1)]) return g
[ "def", "ngrams", "(", "string", ",", "n", "=", "3", ",", "punctuation", "=", "PUNCTUATION", ",", "continuous", "=", "False", ")", ":", "def", "strip_punctuation", "(", "s", ",", "punctuation", "=", "set", "(", "punctuation", ")", ")", ":", "return", "[...
Returns a list of n-grams (tuples of n successive words) from the given string. Alternatively, you can supply a Text or Sentence object. With continuous=False, n-grams will not run over sentence markers (i.e., .!?). Punctuation marks are stripped from words.
[ "Returns", "a", "list", "of", "n", "-", "grams", "(", "tuples", "of", "n", "successive", "words", ")", "from", "the", "given", "string", ".", "Alternatively", "you", "can", "supply", "a", "Text", "or", "Sentence", "object", ".", "With", "continuous", "="...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L79-L101
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
deflood
def deflood(s, n=3): """ Returns the string with no more than n repeated characters, e.g., deflood("NIIIICE!!", n=1) => "Nice!" deflood("nice.....", n=3) => "nice..." """ if n == 0: return s[0:0] return re.sub(r"((.)\2{%s,})" % (n-1), lambda m: m.group(1)[0] * n, s)
python
def deflood(s, n=3): """ Returns the string with no more than n repeated characters, e.g., deflood("NIIIICE!!", n=1) => "Nice!" deflood("nice.....", n=3) => "nice..." """ if n == 0: return s[0:0] return re.sub(r"((.)\2{%s,})" % (n-1), lambda m: m.group(1)[0] * n, s)
[ "def", "deflood", "(", "s", ",", "n", "=", "3", ")", ":", "if", "n", "==", "0", ":", "return", "s", "[", "0", ":", "0", "]", "return", "re", ".", "sub", "(", "r\"((.)\\2{%s,})\"", "%", "(", "n", "-", "1", ")", ",", "lambda", "m", ":", "m", ...
Returns the string with no more than n repeated characters, e.g., deflood("NIIIICE!!", n=1) => "Nice!" deflood("nice.....", n=3) => "nice..."
[ "Returns", "the", "string", "with", "no", "more", "than", "n", "repeated", "characters", "e", ".", "g", ".", "deflood", "(", "NIIIICE!!", "n", "=", "1", ")", "=", ">", "Nice!", "deflood", "(", "nice", ".....", "n", "=", "3", ")", "=", ">", "nice", ...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L103-L110
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
pprint
def pprint(string, token=[WORD, POS, CHUNK, PNP], column=4): """ Pretty-prints the output of Parser.parse() as a table with outlined columns. Alternatively, you can supply a tree.Text or tree.Sentence object. """ if isinstance(string, basestring): print("\n\n".join([table(sentence, fill=column) for sentence in Text(string, token)])) if isinstance(string, Text): print("\n\n".join([table(sentence, fill=column) for sentence in string])) if isinstance(string, Sentence): print(table(string, fill=column))
python
def pprint(string, token=[WORD, POS, CHUNK, PNP], column=4): """ Pretty-prints the output of Parser.parse() as a table with outlined columns. Alternatively, you can supply a tree.Text or tree.Sentence object. """ if isinstance(string, basestring): print("\n\n".join([table(sentence, fill=column) for sentence in Text(string, token)])) if isinstance(string, Text): print("\n\n".join([table(sentence, fill=column) for sentence in string])) if isinstance(string, Sentence): print(table(string, fill=column))
[ "def", "pprint", "(", "string", ",", "token", "=", "[", "WORD", ",", "POS", ",", "CHUNK", ",", "PNP", "]", ",", "column", "=", "4", ")", ":", "if", "isinstance", "(", "string", ",", "basestring", ")", ":", "print", "(", "\"\\n\\n\"", ".", "join", ...
Pretty-prints the output of Parser.parse() as a table with outlined columns. Alternatively, you can supply a tree.Text or tree.Sentence object.
[ "Pretty", "-", "prints", "the", "output", "of", "Parser", ".", "parse", "()", "as", "a", "table", "with", "outlined", "columns", ".", "Alternatively", "you", "can", "supply", "a", "tree", ".", "Text", "or", "tree", ".", "Sentence", "object", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L112-L121
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
_read
def _read(path, encoding="utf-8", comment=";;;"): """ Returns an iterator over the lines in the file at the given path, strippping comments and decoding each line to Unicode. """ if path: if isinstance(path, basestring) and os.path.exists(path): # From file path. if PY2: f = codecs.open(path, 'r', encoding='utf-8') else: f = open(path, 'r', encoding='utf-8') elif isinstance(path, basestring): # From string. f = path.splitlines() else: # From file or buffer. f = path for i, line in enumerate(f): line = line.strip(codecs.BOM_UTF8) if i == 0 and isinstance(line, binary_type) else line line = line.strip() line = decode_utf8(line, encoding) if not line or (comment and line.startswith(comment)): continue yield line return
python
def _read(path, encoding="utf-8", comment=";;;"): """ Returns an iterator over the lines in the file at the given path, strippping comments and decoding each line to Unicode. """ if path: if isinstance(path, basestring) and os.path.exists(path): # From file path. if PY2: f = codecs.open(path, 'r', encoding='utf-8') else: f = open(path, 'r', encoding='utf-8') elif isinstance(path, basestring): # From string. f = path.splitlines() else: # From file or buffer. f = path for i, line in enumerate(f): line = line.strip(codecs.BOM_UTF8) if i == 0 and isinstance(line, binary_type) else line line = line.strip() line = decode_utf8(line, encoding) if not line or (comment and line.startswith(comment)): continue yield line return
[ "def", "_read", "(", "path", ",", "encoding", "=", "\"utf-8\"", ",", "comment", "=", "\";;;\"", ")", ":", "if", "path", ":", "if", "isinstance", "(", "path", ",", "basestring", ")", "and", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "#...
Returns an iterator over the lines in the file at the given path, strippping comments and decoding each line to Unicode.
[ "Returns", "an", "iterator", "over", "the", "lines", "in", "the", "file", "at", "the", "given", "path", "strippping", "comments", "and", "decoding", "each", "line", "to", "Unicode", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L218-L242
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
penntreebank2universal
def penntreebank2universal(token, tag): """ Returns a (token, tag)-tuple with a simplified universal part-of-speech tag. """ if tag.startswith(("NNP-", "NNPS-")): return (token, "%s-%s" % (NOUN, tag.split("-")[-1])) if tag in ("NN", "NNS", "NNP", "NNPS", "NP"): return (token, NOUN) if tag in ("MD", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ"): return (token, VERB) if tag in ("JJ", "JJR", "JJS"): return (token, ADJ) if tag in ("RB", "RBR", "RBS", "WRB"): return (token, ADV) if tag in ("PRP", "PRP$", "WP", "WP$"): return (token, PRON) if tag in ("DT", "PDT", "WDT", "EX"): return (token, DET) if tag in ("IN",): return (token, PREP) if tag in ("CD",): return (token, NUM) if tag in ("CC",): return (token, CONJ) if tag in ("UH",): return (token, INTJ) if tag in ("POS", "RP", "TO"): return (token, PRT) if tag in ("SYM", "LS", ".", "!", "?", ",", ":", "(", ")", "\"", "#", "$"): return (token, PUNC) return (token, X)
python
def penntreebank2universal(token, tag): """ Returns a (token, tag)-tuple with a simplified universal part-of-speech tag. """ if tag.startswith(("NNP-", "NNPS-")): return (token, "%s-%s" % (NOUN, tag.split("-")[-1])) if tag in ("NN", "NNS", "NNP", "NNPS", "NP"): return (token, NOUN) if tag in ("MD", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ"): return (token, VERB) if tag in ("JJ", "JJR", "JJS"): return (token, ADJ) if tag in ("RB", "RBR", "RBS", "WRB"): return (token, ADV) if tag in ("PRP", "PRP$", "WP", "WP$"): return (token, PRON) if tag in ("DT", "PDT", "WDT", "EX"): return (token, DET) if tag in ("IN",): return (token, PREP) if tag in ("CD",): return (token, NUM) if tag in ("CC",): return (token, CONJ) if tag in ("UH",): return (token, INTJ) if tag in ("POS", "RP", "TO"): return (token, PRT) if tag in ("SYM", "LS", ".", "!", "?", ",", ":", "(", ")", "\"", "#", "$"): return (token, PUNC) return (token, X)
[ "def", "penntreebank2universal", "(", "token", ",", "tag", ")", ":", "if", "tag", ".", "startswith", "(", "(", "\"NNP-\"", ",", "\"NNPS-\"", ")", ")", ":", "return", "(", "token", ",", "\"%s-%s\"", "%", "(", "NOUN", ",", "tag", ".", "split", "(", "\"...
Returns a (token, tag)-tuple with a simplified universal part-of-speech tag.
[ "Returns", "a", "(", "token", "tag", ")", "-", "tuple", "with", "a", "simplified", "universal", "part", "-", "of", "-", "speech", "tag", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L891-L920
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
find_tokens
def find_tokens(string, punctuation=PUNCTUATION, abbreviations=ABBREVIATIONS, replace=replacements, linebreak=r"\n{2,}"): """ Returns a list of sentences. Each sentence is a space-separated string of tokens (words). Handles common cases of abbreviations (e.g., etc., ...). Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence. Headings without an ending period are inferred by line breaks. """ # Handle periods separately. punctuation = tuple(punctuation.replace(".", "")) # Handle replacements (contractions). for a, b in replace.items(): string = re.sub(a, b, string) # Handle Unicode quotes. if isinstance(string, unicode): string = string.replace(u"“", u" “ ") string = string.replace(u"”", u" ” ") string = string.replace(u"‘", u" ‘ ") string = string.replace(u"’", u" ’ ") # Collapse whitespace. string = re.sub("\r\n", "\n", string) string = re.sub(linebreak, " %s " % EOS, string) string = re.sub(r"\s+", " ", string) tokens = [] # Handle punctuation marks. for t in TOKEN.findall(string+" "): if len(t) > 0: tail = [] while t.startswith(punctuation) and \ not t in replace: # Split leading punctuation. if t.startswith(punctuation): tokens.append(t[0]); t=t[1:] while t.endswith(punctuation+(".",)) and \ not t in replace: # Split trailing punctuation. if t.endswith(punctuation): tail.append(t[-1]); t=t[:-1] # Split ellipsis (...) before splitting period. if t.endswith("..."): tail.append("..."); t=t[:-3].rstrip(".") # Split period (if not an abbreviation). if t.endswith("."): if t in abbreviations or \ RE_ABBR1.match(t) is not None or \ RE_ABBR2.match(t) is not None or \ RE_ABBR3.match(t) is not None: break else: tail.append(t[-1]); t=t[:-1] if t != "": tokens.append(t) tokens.extend(reversed(tail)) # Handle sentence breaks (periods, quotes, parenthesis). sentences, i, j = [[]], 0, 0 while j < len(tokens): if tokens[j] in ("...", ".", "!", "?", EOS): while j < len(tokens) \ and tokens[j] in ("'", "\"", u"”", u"’", "...", ".", "!", "?", ")", EOS): if tokens[j] in ("'", "\"") and sentences[-1].count(tokens[j]) % 2 == 0: break # Balanced quotes. j += 1 sentences[-1].extend(t for t in tokens[i:j] if t != EOS) sentences.append([]) i = j j += 1 # Handle emoticons. sentences[-1].extend(tokens[i:j]) sentences = (" ".join(s) for s in sentences if len(s) > 0) sentences = (RE_SARCASM.sub("(!)", s) for s in sentences) sentences = [RE_EMOTICONS.sub( lambda m: m.group(1).replace(" ", "") + m.group(2), s) for s in sentences] return sentences
python
def find_tokens(string, punctuation=PUNCTUATION, abbreviations=ABBREVIATIONS, replace=replacements, linebreak=r"\n{2,}"): """ Returns a list of sentences. Each sentence is a space-separated string of tokens (words). Handles common cases of abbreviations (e.g., etc., ...). Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence. Headings without an ending period are inferred by line breaks. """ # Handle periods separately. punctuation = tuple(punctuation.replace(".", "")) # Handle replacements (contractions). for a, b in replace.items(): string = re.sub(a, b, string) # Handle Unicode quotes. if isinstance(string, unicode): string = string.replace(u"“", u" “ ") string = string.replace(u"”", u" ” ") string = string.replace(u"‘", u" ‘ ") string = string.replace(u"’", u" ’ ") # Collapse whitespace. string = re.sub("\r\n", "\n", string) string = re.sub(linebreak, " %s " % EOS, string) string = re.sub(r"\s+", " ", string) tokens = [] # Handle punctuation marks. for t in TOKEN.findall(string+" "): if len(t) > 0: tail = [] while t.startswith(punctuation) and \ not t in replace: # Split leading punctuation. if t.startswith(punctuation): tokens.append(t[0]); t=t[1:] while t.endswith(punctuation+(".",)) and \ not t in replace: # Split trailing punctuation. if t.endswith(punctuation): tail.append(t[-1]); t=t[:-1] # Split ellipsis (...) before splitting period. if t.endswith("..."): tail.append("..."); t=t[:-3].rstrip(".") # Split period (if not an abbreviation). if t.endswith("."): if t in abbreviations or \ RE_ABBR1.match(t) is not None or \ RE_ABBR2.match(t) is not None or \ RE_ABBR3.match(t) is not None: break else: tail.append(t[-1]); t=t[:-1] if t != "": tokens.append(t) tokens.extend(reversed(tail)) # Handle sentence breaks (periods, quotes, parenthesis). sentences, i, j = [[]], 0, 0 while j < len(tokens): if tokens[j] in ("...", ".", "!", "?", EOS): while j < len(tokens) \ and tokens[j] in ("'", "\"", u"”", u"’", "...", ".", "!", "?", ")", EOS): if tokens[j] in ("'", "\"") and sentences[-1].count(tokens[j]) % 2 == 0: break # Balanced quotes. j += 1 sentences[-1].extend(t for t in tokens[i:j] if t != EOS) sentences.append([]) i = j j += 1 # Handle emoticons. sentences[-1].extend(tokens[i:j]) sentences = (" ".join(s) for s in sentences if len(s) > 0) sentences = (RE_SARCASM.sub("(!)", s) for s in sentences) sentences = [RE_EMOTICONS.sub( lambda m: m.group(1).replace(" ", "") + m.group(2), s) for s in sentences] return sentences
[ "def", "find_tokens", "(", "string", ",", "punctuation", "=", "PUNCTUATION", ",", "abbreviations", "=", "ABBREVIATIONS", ",", "replace", "=", "replacements", ",", "linebreak", "=", "r\"\\n{2,}\"", ")", ":", "# Handle periods separately.", "punctuation", "=", "tuple"...
Returns a list of sentences. Each sentence is a space-separated string of tokens (words). Handles common cases of abbreviations (e.g., etc., ...). Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence. Headings without an ending period are inferred by line breaks.
[ "Returns", "a", "list", "of", "sentences", ".", "Each", "sentence", "is", "a", "space", "-", "separated", "string", "of", "tokens", "(", "words", ")", ".", "Handles", "common", "cases", "of", "abbreviations", "(", "e", ".", "g", ".", "etc", ".", "...",...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L976-L1046
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
_suffix_rules
def _suffix_rules(token, tag="NN"): """ Default morphological tagging rules for English, based on word suffixes. """ if isinstance(token, (list, tuple)): token, tag = token if token.endswith("ing"): tag = "VBG" if token.endswith("ly"): tag = "RB" if token.endswith("s") and not token.endswith(("is", "ous", "ss")): tag = "NNS" if token.endswith(("able", "al", "ful", "ible", "ient", "ish", "ive", "less", "tic", "ous")) or "-" in token: tag = "JJ" if token.endswith("ed"): tag = "VBN" if token.endswith(("ate", "ify", "ise", "ize")): tag = "VBP" return [token, tag]
python
def _suffix_rules(token, tag="NN"): """ Default morphological tagging rules for English, based on word suffixes. """ if isinstance(token, (list, tuple)): token, tag = token if token.endswith("ing"): tag = "VBG" if token.endswith("ly"): tag = "RB" if token.endswith("s") and not token.endswith(("is", "ous", "ss")): tag = "NNS" if token.endswith(("able", "al", "ful", "ible", "ient", "ish", "ive", "less", "tic", "ous")) or "-" in token: tag = "JJ" if token.endswith("ed"): tag = "VBN" if token.endswith(("ate", "ify", "ise", "ize")): tag = "VBP" return [token, tag]
[ "def", "_suffix_rules", "(", "token", ",", "tag", "=", "\"NN\"", ")", ":", "if", "isinstance", "(", "token", ",", "(", "list", ",", "tuple", ")", ")", ":", "token", ",", "tag", "=", "token", "if", "token", ".", "endswith", "(", "\"ing\"", ")", ":",...
Default morphological tagging rules for English, based on word suffixes.
[ "Default", "morphological", "tagging", "rules", "for", "English", "based", "on", "word", "suffixes", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1053-L1070
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
find_tags
def find_tags(tokens, lexicon={}, model=None, morphology=None, context=None, entities=None, default=("NN", "NNP", "CD"), language="en", map=None, **kwargs): """ Returns a list of [token, tag]-items for the given list of tokens: ["The", "cat", "purs"] => [["The", "DT"], ["cat", "NN"], ["purs", "VB"]] Words are tagged using the given lexicon of (word, tag)-items. Unknown words are tagged NN by default. Unknown words that start with a capital letter are tagged NNP (unless language="de"). Unknown words that consist only of digits and punctuation marks are tagged CD. Unknown words are then improved with morphological rules. All words are improved with contextual rules. If a model is given, uses model for unknown words instead of morphology and context. If map is a function, it is applied to each (token, tag) after applying all rules. """ tagged = [] # Tag known words. for i, token in enumerate(tokens): tagged.append([token, lexicon.get(token, i == 0 and lexicon.get(token.lower()) or None)]) # Tag unknown words. for i, (token, tag) in enumerate(tagged): prev, next = (None, None), (None, None) if i > 0: prev = tagged[i-1] if i < len(tagged) - 1: next = tagged[i+1] if tag is None or token in (model is not None and model.unknown or ()): # Use language model (i.e., SLP). if model is not None: tagged[i] = model.apply([token, None], prev, next) # Use NNP for capitalized words (except in German). elif token.istitle() and language != "de": tagged[i] = [token, default[1]] # Use CD for digits and numbers. elif CD.match(token) is not None: tagged[i] = [token, default[2]] # Use suffix rules (e.g., -ly = RB). elif morphology is not None: tagged[i] = morphology.apply([token, default[0]], prev, next) # Use suffix rules (English default). elif language == "en": tagged[i] = _suffix_rules([token, default[0]]) # Use most frequent tag (NN). else: tagged[i] = [token, default[0]] # Tag words by context. if context is not None and model is None: tagged = context.apply(tagged) # Tag named entities. if entities is not None: tagged = entities.apply(tagged) # Map tags with a custom function. if map is not None: tagged = [list(map(token, tag)) or [token, default[0]] for token, tag in tagged] return tagged
python
def find_tags(tokens, lexicon={}, model=None, morphology=None, context=None, entities=None, default=("NN", "NNP", "CD"), language="en", map=None, **kwargs): """ Returns a list of [token, tag]-items for the given list of tokens: ["The", "cat", "purs"] => [["The", "DT"], ["cat", "NN"], ["purs", "VB"]] Words are tagged using the given lexicon of (word, tag)-items. Unknown words are tagged NN by default. Unknown words that start with a capital letter are tagged NNP (unless language="de"). Unknown words that consist only of digits and punctuation marks are tagged CD. Unknown words are then improved with morphological rules. All words are improved with contextual rules. If a model is given, uses model for unknown words instead of morphology and context. If map is a function, it is applied to each (token, tag) after applying all rules. """ tagged = [] # Tag known words. for i, token in enumerate(tokens): tagged.append([token, lexicon.get(token, i == 0 and lexicon.get(token.lower()) or None)]) # Tag unknown words. for i, (token, tag) in enumerate(tagged): prev, next = (None, None), (None, None) if i > 0: prev = tagged[i-1] if i < len(tagged) - 1: next = tagged[i+1] if tag is None or token in (model is not None and model.unknown or ()): # Use language model (i.e., SLP). if model is not None: tagged[i] = model.apply([token, None], prev, next) # Use NNP for capitalized words (except in German). elif token.istitle() and language != "de": tagged[i] = [token, default[1]] # Use CD for digits and numbers. elif CD.match(token) is not None: tagged[i] = [token, default[2]] # Use suffix rules (e.g., -ly = RB). elif morphology is not None: tagged[i] = morphology.apply([token, default[0]], prev, next) # Use suffix rules (English default). elif language == "en": tagged[i] = _suffix_rules([token, default[0]]) # Use most frequent tag (NN). else: tagged[i] = [token, default[0]] # Tag words by context. if context is not None and model is None: tagged = context.apply(tagged) # Tag named entities. if entities is not None: tagged = entities.apply(tagged) # Map tags with a custom function. if map is not None: tagged = [list(map(token, tag)) or [token, default[0]] for token, tag in tagged] return tagged
[ "def", "find_tags", "(", "tokens", ",", "lexicon", "=", "{", "}", ",", "model", "=", "None", ",", "morphology", "=", "None", ",", "context", "=", "None", ",", "entities", "=", "None", ",", "default", "=", "(", "\"NN\"", ",", "\"NNP\"", ",", "\"CD\"",...
Returns a list of [token, tag]-items for the given list of tokens: ["The", "cat", "purs"] => [["The", "DT"], ["cat", "NN"], ["purs", "VB"]] Words are tagged using the given lexicon of (word, tag)-items. Unknown words are tagged NN by default. Unknown words that start with a capital letter are tagged NNP (unless language="de"). Unknown words that consist only of digits and punctuation marks are tagged CD. Unknown words are then improved with morphological rules. All words are improved with contextual rules. If a model is given, uses model for unknown words instead of morphology and context. If map is a function, it is applied to each (token, tag) after applying all rules.
[ "Returns", "a", "list", "of", "[", "token", "tag", "]", "-", "items", "for", "the", "given", "list", "of", "tokens", ":", "[", "The", "cat", "purs", "]", "=", ">", "[[", "The", "DT", "]", "[", "cat", "NN", "]", "[", "purs", "VB", "]]", "Words",...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1072-L1123
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
find_chunks
def find_chunks(tagged, language="en"): """ The input is a list of [token, tag]-items. The output is a list of [token, tag, chunk]-items: The/DT nice/JJ fish/NN is/VBZ dead/JJ ./. => The/DT/B-NP nice/JJ/I-NP fish/NN/I-NP is/VBZ/B-VP dead/JJ/B-ADJP ././O """ chunked = [x for x in tagged] tags = "".join("%s%s" % (tag, SEPARATOR) for token, tag in tagged) # Use Germanic or Romance chunking rules according to given language. for tag, rule in CHUNKS[int(language in ("ca", "es", "pt", "fr", "it", "pt", "ro"))]: for m in rule.finditer(tags): # Find the start of chunks inside the tags-string. # Number of preceding separators = number of preceding tokens. i = m.start() j = tags[:i].count(SEPARATOR) n = m.group(0).count(SEPARATOR) for k in range(j, j+n): if len(chunked[k]) == 3: continue if len(chunked[k]) < 3: # A conjunction or comma cannot be start of a chunk. if k == j and chunked[k][1] in ("CC", "CJ", ","): j += 1 # Mark first token in chunk with B-. elif k == j: chunked[k].append("B-" + tag) # Mark other tokens in chunk with I-. else: chunked[k].append("I-" + tag) # Mark chinks (tokens outside of a chunk) with O-. for chink in filter(lambda x: len(x) < 3, chunked): chink.append("O") # Post-processing corrections. for i, (word, tag, chunk) in enumerate(chunked): if tag.startswith("RB") and chunk == "B-NP": # "Perhaps you" => ADVP + NP # "Really nice work" => NP # "Really, nice work" => ADVP + O + NP if i < len(chunked)-1 and not chunked[i+1][1].startswith("JJ"): chunked[i+0][2] = "B-ADVP" chunked[i+1][2] = "B-NP" if i < len(chunked)-1 and chunked[i+1][1] in ("CC", "CJ", ","): chunked[i+1][2] = "O" if i < len(chunked)-2 and chunked[i+1][2] == "O": chunked[i+2][2] = "B-NP" return chunked
python
def find_chunks(tagged, language="en"): """ The input is a list of [token, tag]-items. The output is a list of [token, tag, chunk]-items: The/DT nice/JJ fish/NN is/VBZ dead/JJ ./. => The/DT/B-NP nice/JJ/I-NP fish/NN/I-NP is/VBZ/B-VP dead/JJ/B-ADJP ././O """ chunked = [x for x in tagged] tags = "".join("%s%s" % (tag, SEPARATOR) for token, tag in tagged) # Use Germanic or Romance chunking rules according to given language. for tag, rule in CHUNKS[int(language in ("ca", "es", "pt", "fr", "it", "pt", "ro"))]: for m in rule.finditer(tags): # Find the start of chunks inside the tags-string. # Number of preceding separators = number of preceding tokens. i = m.start() j = tags[:i].count(SEPARATOR) n = m.group(0).count(SEPARATOR) for k in range(j, j+n): if len(chunked[k]) == 3: continue if len(chunked[k]) < 3: # A conjunction or comma cannot be start of a chunk. if k == j and chunked[k][1] in ("CC", "CJ", ","): j += 1 # Mark first token in chunk with B-. elif k == j: chunked[k].append("B-" + tag) # Mark other tokens in chunk with I-. else: chunked[k].append("I-" + tag) # Mark chinks (tokens outside of a chunk) with O-. for chink in filter(lambda x: len(x) < 3, chunked): chink.append("O") # Post-processing corrections. for i, (word, tag, chunk) in enumerate(chunked): if tag.startswith("RB") and chunk == "B-NP": # "Perhaps you" => ADVP + NP # "Really nice work" => NP # "Really, nice work" => ADVP + O + NP if i < len(chunked)-1 and not chunked[i+1][1].startswith("JJ"): chunked[i+0][2] = "B-ADVP" chunked[i+1][2] = "B-NP" if i < len(chunked)-1 and chunked[i+1][1] in ("CC", "CJ", ","): chunked[i+1][2] = "O" if i < len(chunked)-2 and chunked[i+1][2] == "O": chunked[i+2][2] = "B-NP" return chunked
[ "def", "find_chunks", "(", "tagged", ",", "language", "=", "\"en\"", ")", ":", "chunked", "=", "[", "x", "for", "x", "in", "tagged", "]", "tags", "=", "\"\"", ".", "join", "(", "\"%s%s\"", "%", "(", "tag", ",", "SEPARATOR", ")", "for", "token", ","...
The input is a list of [token, tag]-items. The output is a list of [token, tag, chunk]-items: The/DT nice/JJ fish/NN is/VBZ dead/JJ ./. => The/DT/B-NP nice/JJ/I-NP fish/NN/I-NP is/VBZ/B-VP dead/JJ/B-ADJP ././O
[ "The", "input", "is", "a", "list", "of", "[", "token", "tag", "]", "-", "items", ".", "The", "output", "is", "a", "list", "of", "[", "token", "tag", "chunk", "]", "-", "items", ":", "The", "/", "DT", "nice", "/", "JJ", "fish", "/", "NN", "is", ...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1171-L1216
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
find_prepositions
def find_prepositions(chunked): """ The input is a list of [token, tag, chunk]-items. The output is a list of [token, tag, chunk, preposition]-items. PP-chunks followed by NP-chunks make up a PNP-chunk. """ # Tokens that are not part of a preposition just get the O-tag. for ch in chunked: ch.append("O") for i, chunk in enumerate(chunked): if chunk[2].endswith("PP") and chunk[-1] == "O": # Find PP followed by other PP, NP with nouns and pronouns, VP with a gerund. if i < len(chunked)-1 and \ (chunked[i+1][2].endswith(("NP", "PP")) or \ chunked[i+1][1] in ("VBG", "VBN")): chunk[-1] = "B-PNP" pp = True for ch in chunked[i+1:]: if not (ch[2].endswith(("NP", "PP")) or ch[1] in ("VBG", "VBN")): break if ch[2].endswith("PP") and pp: ch[-1] = "I-PNP" if not ch[2].endswith("PP"): ch[-1] = "I-PNP" pp = False return chunked
python
def find_prepositions(chunked): """ The input is a list of [token, tag, chunk]-items. The output is a list of [token, tag, chunk, preposition]-items. PP-chunks followed by NP-chunks make up a PNP-chunk. """ # Tokens that are not part of a preposition just get the O-tag. for ch in chunked: ch.append("O") for i, chunk in enumerate(chunked): if chunk[2].endswith("PP") and chunk[-1] == "O": # Find PP followed by other PP, NP with nouns and pronouns, VP with a gerund. if i < len(chunked)-1 and \ (chunked[i+1][2].endswith(("NP", "PP")) or \ chunked[i+1][1] in ("VBG", "VBN")): chunk[-1] = "B-PNP" pp = True for ch in chunked[i+1:]: if not (ch[2].endswith(("NP", "PP")) or ch[1] in ("VBG", "VBN")): break if ch[2].endswith("PP") and pp: ch[-1] = "I-PNP" if not ch[2].endswith("PP"): ch[-1] = "I-PNP" pp = False return chunked
[ "def", "find_prepositions", "(", "chunked", ")", ":", "# Tokens that are not part of a preposition just get the O-tag.", "for", "ch", "in", "chunked", ":", "ch", ".", "append", "(", "\"O\"", ")", "for", "i", ",", "chunk", "in", "enumerate", "(", "chunked", ")", ...
The input is a list of [token, tag, chunk]-items. The output is a list of [token, tag, chunk, preposition]-items. PP-chunks followed by NP-chunks make up a PNP-chunk.
[ "The", "input", "is", "a", "list", "of", "[", "token", "tag", "chunk", "]", "-", "items", ".", "The", "output", "is", "a", "list", "of", "[", "token", "tag", "chunk", "preposition", "]", "-", "items", ".", "PP", "-", "chunks", "followed", "by", "NP...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1218-L1242
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
find_relations
def find_relations(chunked): """ The input is a list of [token, tag, chunk]-items. The output is a list of [token, tag, chunk, relation]-items. A noun phrase preceding a verb phrase is perceived as sentence subject. A noun phrase following a verb phrase is perceived as sentence object. """ tag = lambda token: token[2].split("-")[-1] # B-NP => NP # Group successive tokens with the same chunk-tag. chunks = [] for token in chunked: if len(chunks) == 0 \ or token[2].startswith("B-") \ or tag(token) != tag(chunks[-1][-1]): chunks.append([]) chunks[-1].append(token+["O"]) # If a VP is preceded by a NP, the NP is tagged as NP-SBJ-(id). # If a VP is followed by a NP, the NP is tagged as NP-OBJ-(id). # Chunks that are not part of a relation get an O-tag. id = 0 for i, chunk in enumerate(chunks): if tag(chunk[-1]) == "VP" and i > 0 and tag(chunks[i-1][-1]) == "NP": if chunk[-1][-1] == "O": id += 1 for token in chunk: token[-1] = "VP-" + str(id) for token in chunks[i-1]: token[-1] += "*NP-SBJ-" + str(id) token[-1] = token[-1].lstrip("O-*") if tag(chunk[-1]) == "VP" and i < len(chunks)-1 and tag(chunks[i+1][-1]) == "NP": if chunk[-1][-1] == "O": id += 1 for token in chunk: token[-1] = "VP-" + str(id) for token in chunks[i+1]: token[-1] = "*NP-OBJ-" + str(id) token[-1] = token[-1].lstrip("O-*") # This is more a proof-of-concept than useful in practice: # PP-LOC = be + in|at + the|my # PP-DIR = go + to|towards + the|my for i, chunk in enumerate(chunks): if 0 < i < len(chunks)-1 and len(chunk) == 1 and chunk[-1][-1] == "O": t0, t1, t2 = chunks[i-1][-1], chunks[i][0], chunks[i+1][0] # previous / current / next if tag(t1) == "PP" and t2[1] in ("DT", "PR", "PRP$"): if t0[0] in BE and t1[0] in ("in", "at") : t1[-1] = "PP-LOC" if t0[0] in GO and t1[0] in ("to", "towards") : t1[-1] = "PP-DIR" related = []; [related.extend(chunk) for chunk in chunks] return related
python
def find_relations(chunked): """ The input is a list of [token, tag, chunk]-items. The output is a list of [token, tag, chunk, relation]-items. A noun phrase preceding a verb phrase is perceived as sentence subject. A noun phrase following a verb phrase is perceived as sentence object. """ tag = lambda token: token[2].split("-")[-1] # B-NP => NP # Group successive tokens with the same chunk-tag. chunks = [] for token in chunked: if len(chunks) == 0 \ or token[2].startswith("B-") \ or tag(token) != tag(chunks[-1][-1]): chunks.append([]) chunks[-1].append(token+["O"]) # If a VP is preceded by a NP, the NP is tagged as NP-SBJ-(id). # If a VP is followed by a NP, the NP is tagged as NP-OBJ-(id). # Chunks that are not part of a relation get an O-tag. id = 0 for i, chunk in enumerate(chunks): if tag(chunk[-1]) == "VP" and i > 0 and tag(chunks[i-1][-1]) == "NP": if chunk[-1][-1] == "O": id += 1 for token in chunk: token[-1] = "VP-" + str(id) for token in chunks[i-1]: token[-1] += "*NP-SBJ-" + str(id) token[-1] = token[-1].lstrip("O-*") if tag(chunk[-1]) == "VP" and i < len(chunks)-1 and tag(chunks[i+1][-1]) == "NP": if chunk[-1][-1] == "O": id += 1 for token in chunk: token[-1] = "VP-" + str(id) for token in chunks[i+1]: token[-1] = "*NP-OBJ-" + str(id) token[-1] = token[-1].lstrip("O-*") # This is more a proof-of-concept than useful in practice: # PP-LOC = be + in|at + the|my # PP-DIR = go + to|towards + the|my for i, chunk in enumerate(chunks): if 0 < i < len(chunks)-1 and len(chunk) == 1 and chunk[-1][-1] == "O": t0, t1, t2 = chunks[i-1][-1], chunks[i][0], chunks[i+1][0] # previous / current / next if tag(t1) == "PP" and t2[1] in ("DT", "PR", "PRP$"): if t0[0] in BE and t1[0] in ("in", "at") : t1[-1] = "PP-LOC" if t0[0] in GO and t1[0] in ("to", "towards") : t1[-1] = "PP-DIR" related = []; [related.extend(chunk) for chunk in chunks] return related
[ "def", "find_relations", "(", "chunked", ")", ":", "tag", "=", "lambda", "token", ":", "token", "[", "2", "]", ".", "split", "(", "\"-\"", ")", "[", "-", "1", "]", "# B-NP => NP", "# Group successive tokens with the same chunk-tag.", "chunks", "=", "[", "]",...
The input is a list of [token, tag, chunk]-items. The output is a list of [token, tag, chunk, relation]-items. A noun phrase preceding a verb phrase is perceived as sentence subject. A noun phrase following a verb phrase is perceived as sentence object.
[ "The", "input", "is", "a", "list", "of", "[", "token", "tag", "chunk", "]", "-", "items", ".", "The", "output", "is", "a", "list", "of", "[", "token", "tag", "chunk", "relation", "]", "-", "items", ".", "A", "noun", "phrase", "preceding", "a", "ver...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1250-L1296
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
find_keywords
def find_keywords(string, parser, top=10, frequency={}, **kwargs): """ Returns a sorted list of keywords in the given string. The given parser (e.g., pattern.en.parser) is used to identify noun phrases. The given frequency dictionary can be a reference corpus, with relative document frequency (df, 0.0-1.0) for each lemma, e.g., {"the": 0.8, "cat": 0.1, ...} """ lemmata = kwargs.pop("lemmata", kwargs.pop("stem", True)) # Parse the string and extract noun phrases (NP). chunks = [] wordcount = 0 for sentence in parser.parse(string, chunks=True, lemmata=lemmata).split(): for w in sentence: # ["cats", "NNS", "I-NP", "O", "cat"] if w[2] == "B-NP": chunks.append([w]) wordcount += 1 elif w[2] == "I-NP" and w[1][:3] == chunks[-1][-1][1][:3] == "NNP": chunks[-1][-1][+0] += " " + w[+0] # Collapse NNPs: "Ms Kitty". chunks[-1][-1][-1] += " " + w[-1] elif w[2] == "I-NP": chunks[-1].append(w) wordcount += 1 # Rate the nouns in noun phrases. m = {} for i, chunk in enumerate(chunks): head = True if parser.language not in ("ca", "es", "pt", "fr", "it", "pt", "ro"): # Head of "cat hair" => "hair". # Head of "poils de chat" => "poils". chunk = list(reversed(chunk)) for w in chunk: if w[1].startswith("NN"): if lemmata: k = w[-1] else: k = w[0].lower() if not k in m: m[k] = [0.0, set(), 1.0, 1.0, 1.0] # Higher score for chunks that appear more frequently. m[k][0] += 1 / float(wordcount) # Higher score for chunks that appear in more contexts (semantic centrality). m[k][1].add(" ".join(map(lambda x: x[0], chunk)).lower()) # Higher score for chunks at the start (25%) of the text. m[k][2] += 1 if float(i) / len(chunks) <= 0.25 else 0 # Higher score for chunks not in a prepositional phrase. m[k][3] += 1 if w[3] == "O" else 0 # Higher score for chunk head. m[k][4] += 1 if head else 0 head = False # Rate tf-idf if a frequency dict is given. for k in m: if frequency: df = frequency.get(k, 0.0) df = max(df, 1e-10) df = log(1.0 / df, 2.71828) else: df = 1.0 m[k][0] = max(1e-10, m[k][0] * df) m[k][1] = 1 + float(len(m[k][1])) # Sort candidates alphabetically by total score # The harmonic mean will emphasize tf-idf score. hmean = lambda a: len(a) / sum(1.0 / x for x in a) m = [(hmean(m[k]), k) for k in m] m = sorted(m, key=lambda x: x[1]) m = sorted(m, key=lambda x: x[0], reverse=True) m = [k for score, k in m] return m[:top]
python
def find_keywords(string, parser, top=10, frequency={}, **kwargs): """ Returns a sorted list of keywords in the given string. The given parser (e.g., pattern.en.parser) is used to identify noun phrases. The given frequency dictionary can be a reference corpus, with relative document frequency (df, 0.0-1.0) for each lemma, e.g., {"the": 0.8, "cat": 0.1, ...} """ lemmata = kwargs.pop("lemmata", kwargs.pop("stem", True)) # Parse the string and extract noun phrases (NP). chunks = [] wordcount = 0 for sentence in parser.parse(string, chunks=True, lemmata=lemmata).split(): for w in sentence: # ["cats", "NNS", "I-NP", "O", "cat"] if w[2] == "B-NP": chunks.append([w]) wordcount += 1 elif w[2] == "I-NP" and w[1][:3] == chunks[-1][-1][1][:3] == "NNP": chunks[-1][-1][+0] += " " + w[+0] # Collapse NNPs: "Ms Kitty". chunks[-1][-1][-1] += " " + w[-1] elif w[2] == "I-NP": chunks[-1].append(w) wordcount += 1 # Rate the nouns in noun phrases. m = {} for i, chunk in enumerate(chunks): head = True if parser.language not in ("ca", "es", "pt", "fr", "it", "pt", "ro"): # Head of "cat hair" => "hair". # Head of "poils de chat" => "poils". chunk = list(reversed(chunk)) for w in chunk: if w[1].startswith("NN"): if lemmata: k = w[-1] else: k = w[0].lower() if not k in m: m[k] = [0.0, set(), 1.0, 1.0, 1.0] # Higher score for chunks that appear more frequently. m[k][0] += 1 / float(wordcount) # Higher score for chunks that appear in more contexts (semantic centrality). m[k][1].add(" ".join(map(lambda x: x[0], chunk)).lower()) # Higher score for chunks at the start (25%) of the text. m[k][2] += 1 if float(i) / len(chunks) <= 0.25 else 0 # Higher score for chunks not in a prepositional phrase. m[k][3] += 1 if w[3] == "O" else 0 # Higher score for chunk head. m[k][4] += 1 if head else 0 head = False # Rate tf-idf if a frequency dict is given. for k in m: if frequency: df = frequency.get(k, 0.0) df = max(df, 1e-10) df = log(1.0 / df, 2.71828) else: df = 1.0 m[k][0] = max(1e-10, m[k][0] * df) m[k][1] = 1 + float(len(m[k][1])) # Sort candidates alphabetically by total score # The harmonic mean will emphasize tf-idf score. hmean = lambda a: len(a) / sum(1.0 / x for x in a) m = [(hmean(m[k]), k) for k in m] m = sorted(m, key=lambda x: x[1]) m = sorted(m, key=lambda x: x[0], reverse=True) m = [k for score, k in m] return m[:top]
[ "def", "find_keywords", "(", "string", ",", "parser", ",", "top", "=", "10", ",", "frequency", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "lemmata", "=", "kwargs", ".", "pop", "(", "\"lemmata\"", ",", "kwargs", ".", "pop", "(", "\"stem\"", "...
Returns a sorted list of keywords in the given string. The given parser (e.g., pattern.en.parser) is used to identify noun phrases. The given frequency dictionary can be a reference corpus, with relative document frequency (df, 0.0-1.0) for each lemma, e.g., {"the": 0.8, "cat": 0.1, ...}
[ "Returns", "a", "sorted", "list", "of", "keywords", "in", "the", "given", "string", ".", "The", "given", "parser", "(", "e", ".", "g", ".", "pattern", ".", "en", ".", "parser", ")", "is", "used", "to", "identify", "noun", "phrases", ".", "The", "give...
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1300-L1366
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
tense_id
def tense_id(*args, **kwargs): """ Returns the tense id for a given (tense, person, number, mood, aspect, negated). Aliases and compound forms (e.g., IMPERFECT) are disambiguated. """ # Unpack tense given as a tuple, e.g., tense((PRESENT, 1, SG)): if len(args) == 1 and isinstance(args[0], (list, tuple)): if args[0] not in ((PRESENT, PARTICIPLE), (PAST, PARTICIPLE)): args = args[0] # No parameters defaults to tense=INFINITIVE, tense=PRESENT otherwise. if len(args) == 0 and len(kwargs) == 0: t = INFINITIVE else: t = PRESENT # Set default values. tense = kwargs.get("tense" , args[0] if len(args) > 0 else t) person = kwargs.get("person" , args[1] if len(args) > 1 else 3) or None number = kwargs.get("number" , args[2] if len(args) > 2 else SINGULAR) mood = kwargs.get("mood" , args[3] if len(args) > 3 else INDICATIVE) aspect = kwargs.get("aspect" , args[4] if len(args) > 4 else IMPERFECTIVE) negated = kwargs.get("negated", args[5] if len(args) > 5 else False) # Disambiguate wrong order of parameters. if mood in (PERFECTIVE, IMPERFECTIVE): mood, aspect = INDICATIVE, mood # Disambiguate INFINITIVE. # Disambiguate PARTICIPLE, IMPERFECT, PRETERITE. # These are often considered to be tenses but are in fact tense + aspect. if tense == INFINITIVE: person = number = mood = aspect = None; negated=False if tense in ((PRESENT, PARTICIPLE), PRESENT+PARTICIPLE, PARTICIPLE, GERUND): tense, aspect = PRESENT, PROGRESSIVE if tense in ((PAST, PARTICIPLE), PAST+PARTICIPLE): tense, aspect = PAST, PROGRESSIVE if tense == IMPERFECT: tense, aspect = PAST, IMPERFECTIVE if tense == PRETERITE: tense, aspect = PAST, PERFECTIVE if aspect in (CONTINUOUS, PARTICIPLE, GERUND): aspect = PROGRESSIVE if aspect == PROGRESSIVE: person = number = None # Disambiguate CONDITIONAL. # In Spanish, the conditional is regarded as an indicative tense. if tense == CONDITIONAL and mood == INDICATIVE: tense, mood = PRESENT, CONDITIONAL # Disambiguate aliases: "pl" => # (PRESENT, None, PLURAL, INDICATIVE, IMPERFECTIVE, False). return TENSES_ID.get(tense.lower(), TENSES_ID.get((tense, person, number, mood, aspect, negated)))
python
def tense_id(*args, **kwargs): """ Returns the tense id for a given (tense, person, number, mood, aspect, negated). Aliases and compound forms (e.g., IMPERFECT) are disambiguated. """ # Unpack tense given as a tuple, e.g., tense((PRESENT, 1, SG)): if len(args) == 1 and isinstance(args[0], (list, tuple)): if args[0] not in ((PRESENT, PARTICIPLE), (PAST, PARTICIPLE)): args = args[0] # No parameters defaults to tense=INFINITIVE, tense=PRESENT otherwise. if len(args) == 0 and len(kwargs) == 0: t = INFINITIVE else: t = PRESENT # Set default values. tense = kwargs.get("tense" , args[0] if len(args) > 0 else t) person = kwargs.get("person" , args[1] if len(args) > 1 else 3) or None number = kwargs.get("number" , args[2] if len(args) > 2 else SINGULAR) mood = kwargs.get("mood" , args[3] if len(args) > 3 else INDICATIVE) aspect = kwargs.get("aspect" , args[4] if len(args) > 4 else IMPERFECTIVE) negated = kwargs.get("negated", args[5] if len(args) > 5 else False) # Disambiguate wrong order of parameters. if mood in (PERFECTIVE, IMPERFECTIVE): mood, aspect = INDICATIVE, mood # Disambiguate INFINITIVE. # Disambiguate PARTICIPLE, IMPERFECT, PRETERITE. # These are often considered to be tenses but are in fact tense + aspect. if tense == INFINITIVE: person = number = mood = aspect = None; negated=False if tense in ((PRESENT, PARTICIPLE), PRESENT+PARTICIPLE, PARTICIPLE, GERUND): tense, aspect = PRESENT, PROGRESSIVE if tense in ((PAST, PARTICIPLE), PAST+PARTICIPLE): tense, aspect = PAST, PROGRESSIVE if tense == IMPERFECT: tense, aspect = PAST, IMPERFECTIVE if tense == PRETERITE: tense, aspect = PAST, PERFECTIVE if aspect in (CONTINUOUS, PARTICIPLE, GERUND): aspect = PROGRESSIVE if aspect == PROGRESSIVE: person = number = None # Disambiguate CONDITIONAL. # In Spanish, the conditional is regarded as an indicative tense. if tense == CONDITIONAL and mood == INDICATIVE: tense, mood = PRESENT, CONDITIONAL # Disambiguate aliases: "pl" => # (PRESENT, None, PLURAL, INDICATIVE, IMPERFECTIVE, False). return TENSES_ID.get(tense.lower(), TENSES_ID.get((tense, person, number, mood, aspect, negated)))
[ "def", "tense_id", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Unpack tense given as a tuple, e.g., tense((PRESENT, 1, SG)):", "if", "len", "(", "args", ")", "==", "1", "and", "isinstance", "(", "args", "[", "0", "]", ",", "(", "list", ",", "tu...
Returns the tense id for a given (tense, person, number, mood, aspect, negated). Aliases and compound forms (e.g., IMPERFECT) are disambiguated.
[ "Returns", "the", "tense", "id", "for", "a", "given", "(", "tense", "person", "number", "mood", "aspect", "negated", ")", ".", "Aliases", "and", "compound", "forms", "(", "e", ".", "g", ".", "IMPERFECT", ")", "are", "disambiguated", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1594-L1641
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
_multilingual
def _multilingual(function, *args, **kwargs): """ Returns the value from the function with the given name in the given language module. By default, language="en". """ return getattr(_module(kwargs.pop("language", "en")), function)(*args, **kwargs)
python
def _multilingual(function, *args, **kwargs): """ Returns the value from the function with the given name in the given language module. By default, language="en". """ return getattr(_module(kwargs.pop("language", "en")), function)(*args, **kwargs)
[ "def", "_multilingual", "(", "function", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "getattr", "(", "_module", "(", "kwargs", ".", "pop", "(", "\"language\"", ",", "\"en\"", ")", ")", ",", "function", ")", "(", "*", "args", ",", ...
Returns the value from the function with the given name in the given language module. By default, language="en".
[ "Returns", "the", "value", "from", "the", "function", "with", "the", "given", "name", "in", "the", "given", "language", "module", ".", "By", "default", "language", "=", "en", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L2188-L2192
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
language
def language(s): """ Returns a (language, confidence)-tuple for the given string. """ s = decode_utf8(s) s = set(w.strip(PUNCTUATION) for w in s.replace("'", "' ").split()) n = float(len(s) or 1) p = {} for xx in LANGUAGES: lexicon = _module(xx).__dict__["lexicon"] p[xx] = sum(1 for w in s if w in lexicon) / n return max(p.items(), key=lambda kv: (kv[1], int(kv[0] == "en")))
python
def language(s): """ Returns a (language, confidence)-tuple for the given string. """ s = decode_utf8(s) s = set(w.strip(PUNCTUATION) for w in s.replace("'", "' ").split()) n = float(len(s) or 1) p = {} for xx in LANGUAGES: lexicon = _module(xx).__dict__["lexicon"] p[xx] = sum(1 for w in s if w in lexicon) / n return max(p.items(), key=lambda kv: (kv[1], int(kv[0] == "en")))
[ "def", "language", "(", "s", ")", ":", "s", "=", "decode_utf8", "(", "s", ")", "s", "=", "set", "(", "w", ".", "strip", "(", "PUNCTUATION", ")", "for", "w", "in", "s", ".", "replace", "(", "\"'\"", ",", "\"' \"", ")", ".", "split", "(", ")", ...
Returns a (language, confidence)-tuple for the given string.
[ "Returns", "a", "(", "language", "confidence", ")", "-", "tuple", "for", "the", "given", "string", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L2194-L2204
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
lazylist._lazy
def _lazy(self, method, *args): """ If the list is empty, calls lazylist.load(). Replaces lazylist.method() with list.method() and calls it. """ if list.__len__(self) == 0: self.load() setattr(self, method, types.MethodType(getattr(list, method), self)) return getattr(list, method)(self, *args)
python
def _lazy(self, method, *args): """ If the list is empty, calls lazylist.load(). Replaces lazylist.method() with list.method() and calls it. """ if list.__len__(self) == 0: self.load() setattr(self, method, types.MethodType(getattr(list, method), self)) return getattr(list, method)(self, *args)
[ "def", "_lazy", "(", "self", ",", "method", ",", "*", "args", ")", ":", "if", "list", ".", "__len__", "(", "self", ")", "==", "0", ":", "self", ".", "load", "(", ")", "setattr", "(", "self", ",", "method", ",", "types", ".", "MethodType", "(", ...
If the list is empty, calls lazylist.load(). Replaces lazylist.method() with list.method() and calls it.
[ "If", "the", "list", "is", "empty", "calls", "lazylist", ".", "load", "()", ".", "Replaces", "lazylist", ".", "method", "()", "with", "list", ".", "method", "()", "and", "calls", "it", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L179-L186
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Model.train
def train(self, token, tag, previous=None, next=None): """ Trains the model to predict the given tag for the given token, in context of the given previous and next (token, tag)-tuples. """ self._classifier.train(self._v(token, previous, next), type=tag)
python
def train(self, token, tag, previous=None, next=None): """ Trains the model to predict the given tag for the given token, in context of the given previous and next (token, tag)-tuples. """ self._classifier.train(self._v(token, previous, next), type=tag)
[ "def", "train", "(", "self", ",", "token", ",", "tag", ",", "previous", "=", "None", ",", "next", "=", "None", ")", ":", "self", ".", "_classifier", ".", "train", "(", "self", ".", "_v", "(", "token", ",", "previous", ",", "next", ")", ",", "type...
Trains the model to predict the given tag for the given token, in context of the given previous and next (token, tag)-tuples.
[ "Trains", "the", "model", "to", "predict", "the", "given", "tag", "for", "the", "given", "token", "in", "context", "of", "the", "given", "previous", "and", "next", "(", "token", "tag", ")", "-", "tuples", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L323-L327
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Model.classify
def classify(self, token, previous=None, next=None, **kwargs): """ Returns the predicted tag for the given token, in context of the given previous and next (token, tag)-tuples. """ return self._classifier.classify(self._v(token, previous, next), **kwargs)
python
def classify(self, token, previous=None, next=None, **kwargs): """ Returns the predicted tag for the given token, in context of the given previous and next (token, tag)-tuples. """ return self._classifier.classify(self._v(token, previous, next), **kwargs)
[ "def", "classify", "(", "self", ",", "token", ",", "previous", "=", "None", ",", "next", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_classifier", ".", "classify", "(", "self", ".", "_v", "(", "token", ",", "previous", "...
Returns the predicted tag for the given token, in context of the given previous and next (token, tag)-tuples.
[ "Returns", "the", "predicted", "tag", "for", "the", "given", "token", "in", "context", "of", "the", "given", "previous", "and", "next", "(", "token", "tag", ")", "-", "tuples", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L329-L333
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Model.apply
def apply(self, token, previous=(None, None), next=(None, None)): """ Returns a (token, tag)-tuple for the given token, in context of the given previous and next (token, tag)-tuples. """ return [token[0], self._classifier.classify(self._v(token[0], previous, next))]
python
def apply(self, token, previous=(None, None), next=(None, None)): """ Returns a (token, tag)-tuple for the given token, in context of the given previous and next (token, tag)-tuples. """ return [token[0], self._classifier.classify(self._v(token[0], previous, next))]
[ "def", "apply", "(", "self", ",", "token", ",", "previous", "=", "(", "None", ",", "None", ")", ",", "next", "=", "(", "None", ",", "None", ")", ")", ":", "return", "[", "token", "[", "0", "]", ",", "self", ".", "_classifier", ".", "classify", ...
Returns a (token, tag)-tuple for the given token, in context of the given previous and next (token, tag)-tuples.
[ "Returns", "a", "(", "token", "tag", ")", "-", "tuple", "for", "the", "given", "token", "in", "context", "of", "the", "given", "previous", "and", "next", "(", "token", "tag", ")", "-", "tuples", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L335-L339
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Model._v
def _v(self, token, previous=None, next=None): """ Returns a training vector for the given (word, tag)-tuple and its context. """ def f(v, s1, s2): if s2: v[s1 + " " + s2] = 1 p, n = previous, next p = ("", "") if not p else (p[0] or "", p[1] or "") n = ("", "") if not n else (n[0] or "", n[1] or "") v = {} f(v, "b", "b") # Bias. f(v, "h", token[0]) # Capitalization. f(v, "w", token[-6:] if token not in self.known or token in self.unknown else "") f(v, "x", token[-3:]) # Word suffix. f(v, "-x", p[0][-3:]) # Word suffix left. f(v, "+x", n[0][-3:]) # Word suffix right. f(v, "-t", p[1]) # Tag left. f(v, "-+", p[1] + n[1]) # Tag left + right. f(v, "+t", n[1]) # Tag right. return v
python
def _v(self, token, previous=None, next=None): """ Returns a training vector for the given (word, tag)-tuple and its context. """ def f(v, s1, s2): if s2: v[s1 + " " + s2] = 1 p, n = previous, next p = ("", "") if not p else (p[0] or "", p[1] or "") n = ("", "") if not n else (n[0] or "", n[1] or "") v = {} f(v, "b", "b") # Bias. f(v, "h", token[0]) # Capitalization. f(v, "w", token[-6:] if token not in self.known or token in self.unknown else "") f(v, "x", token[-3:]) # Word suffix. f(v, "-x", p[0][-3:]) # Word suffix left. f(v, "+x", n[0][-3:]) # Word suffix right. f(v, "-t", p[1]) # Tag left. f(v, "-+", p[1] + n[1]) # Tag left + right. f(v, "+t", n[1]) # Tag right. return v
[ "def", "_v", "(", "self", ",", "token", ",", "previous", "=", "None", ",", "next", "=", "None", ")", ":", "def", "f", "(", "v", ",", "s1", ",", "s2", ")", ":", "if", "s2", ":", "v", "[", "s1", "+", "\" \"", "+", "s2", "]", "=", "1", "p", ...
Returns a training vector for the given (word, tag)-tuple and its context.
[ "Returns", "a", "training", "vector", "for", "the", "given", "(", "word", "tag", ")", "-", "tuple", "and", "its", "context", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L341-L360
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Morphology.apply
def apply(self, token, previous=(None, None), next=(None, None)): """ Applies lexical rules to the given token, which is a [word, tag] list. """ w = token[0] for r in self: if r[1] in self._cmd: # Rule = ly hassuf 2 RB x f, x, pos, cmd = bool(0), r[0], r[-2], r[1].lower() if r[2] in self._cmd: # Rule = NN s fhassuf 1 NNS x f, x, pos, cmd = bool(1), r[1], r[-2], r[2].lower().lstrip("f") if f and token[1] != r[0]: continue if (cmd == "word" and x == w) \ or (cmd == "char" and x in w) \ or (cmd == "haspref" and w.startswith(x)) \ or (cmd == "hassuf" and w.endswith(x)) \ or (cmd == "addpref" and x + w in self.known) \ or (cmd == "addsuf" and w + x in self.known) \ or (cmd == "deletepref" and w.startswith(x) and w[len(x):] in self.known) \ or (cmd == "deletesuf" and w.endswith(x) and w[:-len(x)] in self.known) \ or (cmd == "goodleft" and x == next[0]) \ or (cmd == "goodright" and x == previous[0]): token[1] = pos return token
python
def apply(self, token, previous=(None, None), next=(None, None)): """ Applies lexical rules to the given token, which is a [word, tag] list. """ w = token[0] for r in self: if r[1] in self._cmd: # Rule = ly hassuf 2 RB x f, x, pos, cmd = bool(0), r[0], r[-2], r[1].lower() if r[2] in self._cmd: # Rule = NN s fhassuf 1 NNS x f, x, pos, cmd = bool(1), r[1], r[-2], r[2].lower().lstrip("f") if f and token[1] != r[0]: continue if (cmd == "word" and x == w) \ or (cmd == "char" and x in w) \ or (cmd == "haspref" and w.startswith(x)) \ or (cmd == "hassuf" and w.endswith(x)) \ or (cmd == "addpref" and x + w in self.known) \ or (cmd == "addsuf" and w + x in self.known) \ or (cmd == "deletepref" and w.startswith(x) and w[len(x):] in self.known) \ or (cmd == "deletesuf" and w.endswith(x) and w[:-len(x)] in self.known) \ or (cmd == "goodleft" and x == next[0]) \ or (cmd == "goodright" and x == previous[0]): token[1] = pos return token
[ "def", "apply", "(", "self", ",", "token", ",", "previous", "=", "(", "None", ",", "None", ")", ",", "next", "=", "(", "None", ",", "None", ")", ")", ":", "w", "=", "token", "[", "0", "]", "for", "r", "in", "self", ":", "if", "r", "[", "1",...
Applies lexical rules to the given token, which is a [word, tag] list.
[ "Applies", "lexical", "rules", "to", "the", "given", "token", "which", "is", "a", "[", "word", "tag", "]", "list", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L403-L425
train
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Morphology.insert
def insert(self, i, tag, affix, cmd="hassuf", tagged=None): """ Inserts a new rule that assigns the given tag to words with the given affix, e.g., Morphology.append("RB", "-ly"). """ if affix.startswith("-") and affix.endswith("-"): affix, cmd = affix[+1:-1], "char" if affix.startswith("-"): affix, cmd = affix[+1:-0], "hassuf" if affix.endswith("-"): affix, cmd = affix[+0:-1], "haspref" if tagged: r = [tagged, affix, "f"+cmd.lstrip("f"), tag, "x"] else: r = [affix, cmd.lstrip("f"), tag, "x"] lazylist.insert(self, i, r)
python
def insert(self, i, tag, affix, cmd="hassuf", tagged=None): """ Inserts a new rule that assigns the given tag to words with the given affix, e.g., Morphology.append("RB", "-ly"). """ if affix.startswith("-") and affix.endswith("-"): affix, cmd = affix[+1:-1], "char" if affix.startswith("-"): affix, cmd = affix[+1:-0], "hassuf" if affix.endswith("-"): affix, cmd = affix[+0:-1], "haspref" if tagged: r = [tagged, affix, "f"+cmd.lstrip("f"), tag, "x"] else: r = [affix, cmd.lstrip("f"), tag, "x"] lazylist.insert(self, i, r)
[ "def", "insert", "(", "self", ",", "i", ",", "tag", ",", "affix", ",", "cmd", "=", "\"hassuf\"", ",", "tagged", "=", "None", ")", ":", "if", "affix", ".", "startswith", "(", "\"-\"", ")", "and", "affix", ".", "endswith", "(", "\"-\"", ")", ":", "...
Inserts a new rule that assigns the given tag to words with the given affix, e.g., Morphology.append("RB", "-ly").
[ "Inserts", "a", "new", "rule", "that", "assigns", "the", "given", "tag", "to", "words", "with", "the", "given", "affix", "e", ".", "g", ".", "Morphology", ".", "append", "(", "RB", "-", "ly", ")", "." ]
1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L427-L441
train