repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
totalgood/nlpia
src/nlpia/loaders.py
normalize_column_names
def normalize_column_names(df): r""" Clean up whitespace in column names. See better version at `pugnlp.clean_columns` >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['Hello World', 'not here']) >>> normalize_column_names(df) ['hello_world', 'not_here'] """ columns = df.columns if hasattr(df, 'columns') else df columns = [c.lower().replace(' ', '_') for c in columns] return columns
python
def normalize_column_names(df): r""" Clean up whitespace in column names. See better version at `pugnlp.clean_columns` >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['Hello World', 'not here']) >>> normalize_column_names(df) ['hello_world', 'not_here'] """ columns = df.columns if hasattr(df, 'columns') else df columns = [c.lower().replace(' ', '_') for c in columns] return columns
[ "def", "normalize_column_names", "(", "df", ")", ":", "columns", "=", "df", ".", "columns", "if", "hasattr", "(", "df", ",", "'columns'", ")", "else", "df", "columns", "=", "[", "c", ".", "lower", "(", ")", ".", "replace", "(", "' '", ",", "'_'", "...
r""" Clean up whitespace in column names. See better version at `pugnlp.clean_columns` >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['Hello World', 'not here']) >>> normalize_column_names(df) ['hello_world', 'not_here']
[ "r", "Clean", "up", "whitespace", "in", "column", "names", ".", "See", "better", "version", "at", "pugnlp", ".", "clean_columns" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L1167-L1176
train
226,300
totalgood/nlpia
src/nlpia/loaders.py
clean_column_values
def clean_column_values(df, inplace=True): r""" Convert dollar value strings, numbers with commas, and percents into floating point values >>> df = get_data('us_gov_deficits_raw') >>> df2 = clean_column_values(df, inplace=False) >>> df2.iloc[0] Fiscal year 10/2017-3/2018 President's party R Senate majority party R House majority party R Top-bracket marginal income tax rate 38.3 National debt millions 2.10896e+07 National debt millions of 1983 dollars 8.47004e+06 Deficit\n(millions of 1983 dollars) 431443 Surplus string in 1983 dollars NaN Deficit string in 1983 dollars ($ = $10B) $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ Net surplus in 1983 dollars ($B) -430 Name: 0, dtype: object """ dollars_percents = re.compile(r'[%$,;\s]+') if not inplace: df = df.copy() for c in df.columns: values = None if df[c].dtype.char in '<U S O'.split(): try: values = df[c].copy() values = values.fillna('') values = values.astype(str).str.replace(dollars_percents, '') # values = values.str.strip().str.replace(dollars_percents, '').str.strip() if values.str.len().sum() > .2 * df[c].astype(str).str.len().sum(): values[values.isnull()] = np.nan values[values == ''] = np.nan values = values.astype(float) except ValueError: values = None except: # noqa logger.error('Error on column {} with dtype {}'.format(c, df[c].dtype)) raise if values is not None: if values.isnull().sum() < .6 * len(values) and values.any(): df[c] = values return df
python
def clean_column_values(df, inplace=True): r""" Convert dollar value strings, numbers with commas, and percents into floating point values >>> df = get_data('us_gov_deficits_raw') >>> df2 = clean_column_values(df, inplace=False) >>> df2.iloc[0] Fiscal year 10/2017-3/2018 President's party R Senate majority party R House majority party R Top-bracket marginal income tax rate 38.3 National debt millions 2.10896e+07 National debt millions of 1983 dollars 8.47004e+06 Deficit\n(millions of 1983 dollars) 431443 Surplus string in 1983 dollars NaN Deficit string in 1983 dollars ($ = $10B) $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ Net surplus in 1983 dollars ($B) -430 Name: 0, dtype: object """ dollars_percents = re.compile(r'[%$,;\s]+') if not inplace: df = df.copy() for c in df.columns: values = None if df[c].dtype.char in '<U S O'.split(): try: values = df[c].copy() values = values.fillna('') values = values.astype(str).str.replace(dollars_percents, '') # values = values.str.strip().str.replace(dollars_percents, '').str.strip() if values.str.len().sum() > .2 * df[c].astype(str).str.len().sum(): values[values.isnull()] = np.nan values[values == ''] = np.nan values = values.astype(float) except ValueError: values = None except: # noqa logger.error('Error on column {} with dtype {}'.format(c, df[c].dtype)) raise if values is not None: if values.isnull().sum() < .6 * len(values) and values.any(): df[c] = values return df
[ "def", "clean_column_values", "(", "df", ",", "inplace", "=", "True", ")", ":", "dollars_percents", "=", "re", ".", "compile", "(", "r'[%$,;\\s]+'", ")", "if", "not", "inplace", ":", "df", "=", "df", ".", "copy", "(", ")", "for", "c", "in", "df", "."...
r""" Convert dollar value strings, numbers with commas, and percents into floating point values >>> df = get_data('us_gov_deficits_raw') >>> df2 = clean_column_values(df, inplace=False) >>> df2.iloc[0] Fiscal year 10/2017-3/2018 President's party R Senate majority party R House majority party R Top-bracket marginal income tax rate 38.3 National debt millions 2.10896e+07 National debt millions of 1983 dollars 8.47004e+06 Deficit\n(millions of 1983 dollars) 431443 Surplus string in 1983 dollars NaN Deficit string in 1983 dollars ($ = $10B) $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ Net surplus in 1983 dollars ($B) -430 Name: 0, dtype: object
[ "r", "Convert", "dollar", "value", "strings", "numbers", "with", "commas", "and", "percents", "into", "floating", "point", "values" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L1179-L1222
train
226,301
totalgood/nlpia
src/nlpia/loaders.py
isglove
def isglove(filepath): """ Get the first word vector in a GloVE file and return its dimensionality or False if not a vector >>> isglove(os.path.join(DATA_PATH, 'cats_and_dogs.txt')) False """ with ensure_open(filepath, 'r') as f: header_line = f.readline() vector_line = f.readline() try: num_vectors, num_dim = header_line.split() return int(num_dim) except (ValueError, TypeError): pass vector = vector_line.split()[1:] if len(vector) % 10: print(vector) print(len(vector) % 10) return False try: vector = np.array([float(x) for x in vector]) except (ValueError, TypeError): return False if np.all(np.abs(vector) < 12.): return len(vector) return False
python
def isglove(filepath): """ Get the first word vector in a GloVE file and return its dimensionality or False if not a vector >>> isglove(os.path.join(DATA_PATH, 'cats_and_dogs.txt')) False """ with ensure_open(filepath, 'r') as f: header_line = f.readline() vector_line = f.readline() try: num_vectors, num_dim = header_line.split() return int(num_dim) except (ValueError, TypeError): pass vector = vector_line.split()[1:] if len(vector) % 10: print(vector) print(len(vector) % 10) return False try: vector = np.array([float(x) for x in vector]) except (ValueError, TypeError): return False if np.all(np.abs(vector) < 12.): return len(vector) return False
[ "def", "isglove", "(", "filepath", ")", ":", "with", "ensure_open", "(", "filepath", ",", "'r'", ")", "as", "f", ":", "header_line", "=", "f", ".", "readline", "(", ")", "vector_line", "=", "f", ".", "readline", "(", ")", "try", ":", "num_vectors", "...
Get the first word vector in a GloVE file and return its dimensionality or False if not a vector >>> isglove(os.path.join(DATA_PATH, 'cats_and_dogs.txt')) False
[ "Get", "the", "first", "word", "vector", "in", "a", "GloVE", "file", "and", "return", "its", "dimensionality", "or", "False", "if", "not", "a", "vector" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L1320-L1346
train
226,302
totalgood/nlpia
src/nlpia/loaders.py
nlp
def nlp(texts, lang='en', linesep=None, verbose=True): r""" Use the SpaCy parser to parse and tag natural language strings. Load the SpaCy parser language model lazily and share it among all nlpia modules. Probably unnecessary, since SpaCy probably takes care of this with `spacy.load()` >>> _parse is None True >>> doc = nlp("Domo arigatto Mr. Roboto.") >>> doc.text 'Domo arigatto Mr. Roboto.' >>> doc.ents (Roboto,) >>> docs = nlp("Hey Mr. Tangerine Man!\nPlay a song for me.\n", linesep='\n') >>> doc = docs[0] >>> [t for t in doc] [Hey, Mr., Tangerine, Man, !] >>> [tok.text for tok in doc] ['Hey', 'Mr.', 'Tangerine', 'Man', '!'] >>> [(tok.text, tok.tag_) for tok in doc] [('Hey', 'UH'), ('Mr.', 'NNP'), ('Tangerine', 'NNP'), ('Man', 'NN'), ('!', '.')] >>> [(ent.text, ent.ent_id, ent.has_vector, ent.vector[:3].round(3)) for ent in doc.ents] [('Tangerine Man', 0, True, array([0.72 , 1.913, 2.675], dtype=float32))] """ # doesn't let you load a different model anywhere else in the module linesep = os.linesep if linesep in ('default', True, 1, 'os') else linesep tqdm_prog = no_tqdm if (not verbose or (hasattr(texts, '__len__') and len(texts) < 3)) else tqdm global _parse if not _parse: try: _parse = spacy.load(lang) except (OSError, IOError): try: spacy.cli.download(lang) except URLError: logger.warning("Unable to download Spacy language model '{}' so nlp(text) just returns text.split()".format(lang)) parse = _parse or str.split # TODO: reverse this recursion (str first then sequence) to allow for sequences of sequences of texts if isinstance(texts, str): if linesep: return nlp(texts.split(linesep)) else: return nlp([texts]) if hasattr(texts, '__len__'): if len(texts) == 1: return parse(texts[0]) elif len(texts) > 1: return [(parse or str.split)(text) for text in tqdm_prog(texts)] else: return None else: # return generator if sequence of strings doesn't have __len__ which means its an iterable or generator itself return (parse(text) for text in tqdm_prog(texts))
python
def nlp(texts, lang='en', linesep=None, verbose=True): r""" Use the SpaCy parser to parse and tag natural language strings. Load the SpaCy parser language model lazily and share it among all nlpia modules. Probably unnecessary, since SpaCy probably takes care of this with `spacy.load()` >>> _parse is None True >>> doc = nlp("Domo arigatto Mr. Roboto.") >>> doc.text 'Domo arigatto Mr. Roboto.' >>> doc.ents (Roboto,) >>> docs = nlp("Hey Mr. Tangerine Man!\nPlay a song for me.\n", linesep='\n') >>> doc = docs[0] >>> [t for t in doc] [Hey, Mr., Tangerine, Man, !] >>> [tok.text for tok in doc] ['Hey', 'Mr.', 'Tangerine', 'Man', '!'] >>> [(tok.text, tok.tag_) for tok in doc] [('Hey', 'UH'), ('Mr.', 'NNP'), ('Tangerine', 'NNP'), ('Man', 'NN'), ('!', '.')] >>> [(ent.text, ent.ent_id, ent.has_vector, ent.vector[:3].round(3)) for ent in doc.ents] [('Tangerine Man', 0, True, array([0.72 , 1.913, 2.675], dtype=float32))] """ # doesn't let you load a different model anywhere else in the module linesep = os.linesep if linesep in ('default', True, 1, 'os') else linesep tqdm_prog = no_tqdm if (not verbose or (hasattr(texts, '__len__') and len(texts) < 3)) else tqdm global _parse if not _parse: try: _parse = spacy.load(lang) except (OSError, IOError): try: spacy.cli.download(lang) except URLError: logger.warning("Unable to download Spacy language model '{}' so nlp(text) just returns text.split()".format(lang)) parse = _parse or str.split # TODO: reverse this recursion (str first then sequence) to allow for sequences of sequences of texts if isinstance(texts, str): if linesep: return nlp(texts.split(linesep)) else: return nlp([texts]) if hasattr(texts, '__len__'): if len(texts) == 1: return parse(texts[0]) elif len(texts) > 1: return [(parse or str.split)(text) for text in tqdm_prog(texts)] else: return None else: # return generator if sequence of strings doesn't have __len__ which means its an iterable or generator itself return (parse(text) for text in tqdm_prog(texts))
[ "def", "nlp", "(", "texts", ",", "lang", "=", "'en'", ",", "linesep", "=", "None", ",", "verbose", "=", "True", ")", ":", "# doesn't let you load a different model anywhere else in the module", "linesep", "=", "os", ".", "linesep", "if", "linesep", "in", "(", ...
r""" Use the SpaCy parser to parse and tag natural language strings. Load the SpaCy parser language model lazily and share it among all nlpia modules. Probably unnecessary, since SpaCy probably takes care of this with `spacy.load()` >>> _parse is None True >>> doc = nlp("Domo arigatto Mr. Roboto.") >>> doc.text 'Domo arigatto Mr. Roboto.' >>> doc.ents (Roboto,) >>> docs = nlp("Hey Mr. Tangerine Man!\nPlay a song for me.\n", linesep='\n') >>> doc = docs[0] >>> [t for t in doc] [Hey, Mr., Tangerine, Man, !] >>> [tok.text for tok in doc] ['Hey', 'Mr.', 'Tangerine', 'Man', '!'] >>> [(tok.text, tok.tag_) for tok in doc] [('Hey', 'UH'), ('Mr.', 'NNP'), ('Tangerine', 'NNP'), ('Man', 'NN'), ('!', '.')] >>> [(ent.text, ent.ent_id, ent.has_vector, ent.vector[:3].round(3)) for ent in doc.ents] [('Tangerine Man', 0, True, array([0.72 , 1.913, 2.675], dtype=float32))]
[ "r", "Use", "the", "SpaCy", "parser", "to", "parse", "and", "tag", "natural", "language", "strings", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L1349-L1405
train
226,303
totalgood/nlpia
src/nlpia/talk.py
get_decoder
def get_decoder(libdir=None, modeldir=None, lang='en-us'): """ Create a decoder with the requested language model """ modeldir = modeldir or (os.path.join(libdir, 'model') if libdir else MODELDIR) libdir = os.path.dirname(modeldir) config = ps.Decoder.default_config() config.set_string('-hmm', os.path.join(modeldir, lang)) config.set_string('-lm', os.path.join(modeldir, lang + '.lm.bin')) config.set_string('-dict', os.path.join(modeldir, 'cmudict-' + lang + '.dict')) print(config) return ps.Decoder(config)
python
def get_decoder(libdir=None, modeldir=None, lang='en-us'): """ Create a decoder with the requested language model """ modeldir = modeldir or (os.path.join(libdir, 'model') if libdir else MODELDIR) libdir = os.path.dirname(modeldir) config = ps.Decoder.default_config() config.set_string('-hmm', os.path.join(modeldir, lang)) config.set_string('-lm', os.path.join(modeldir, lang + '.lm.bin')) config.set_string('-dict', os.path.join(modeldir, 'cmudict-' + lang + '.dict')) print(config) return ps.Decoder(config)
[ "def", "get_decoder", "(", "libdir", "=", "None", ",", "modeldir", "=", "None", ",", "lang", "=", "'en-us'", ")", ":", "modeldir", "=", "modeldir", "or", "(", "os", ".", "path", ".", "join", "(", "libdir", ",", "'model'", ")", "if", "libdir", "else",...
Create a decoder with the requested language model
[ "Create", "a", "decoder", "with", "the", "requested", "language", "model" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/talk.py#L43-L52
train
226,304
totalgood/nlpia
src/nlpia/talk.py
transcribe
def transcribe(decoder, audio_file, libdir=None): """ Decode streaming audio data from raw binary file on disk. """ decoder = get_decoder() decoder.start_utt() stream = open(audio_file, 'rb') while True: buf = stream.read(1024) if buf: decoder.process_raw(buf, False, False) else: break decoder.end_utt() return evaluate_results(decoder)
python
def transcribe(decoder, audio_file, libdir=None): """ Decode streaming audio data from raw binary file on disk. """ decoder = get_decoder() decoder.start_utt() stream = open(audio_file, 'rb') while True: buf = stream.read(1024) if buf: decoder.process_raw(buf, False, False) else: break decoder.end_utt() return evaluate_results(decoder)
[ "def", "transcribe", "(", "decoder", ",", "audio_file", ",", "libdir", "=", "None", ")", ":", "decoder", "=", "get_decoder", "(", ")", "decoder", ".", "start_utt", "(", ")", "stream", "=", "open", "(", "audio_file", ",", "'rb'", ")", "while", "True", "...
Decode streaming audio data from raw binary file on disk.
[ "Decode", "streaming", "audio", "data", "from", "raw", "binary", "file", "on", "disk", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/talk.py#L67-L80
train
226,305
totalgood/nlpia
src/nlpia/book/examples/ch09.py
pre_process_data
def pre_process_data(filepath): """ This is dependent on your training data source but we will try to generalize it as best as possible. """ positive_path = os.path.join(filepath, 'pos') negative_path = os.path.join(filepath, 'neg') pos_label = 1 neg_label = 0 dataset = [] for filename in glob.glob(os.path.join(positive_path, '*.txt')): with open(filename, 'r') as f: dataset.append((pos_label, f.read())) for filename in glob.glob(os.path.join(negative_path, '*.txt')): with open(filename, 'r') as f: dataset.append((neg_label, f.read())) shuffle(dataset) return dataset
python
def pre_process_data(filepath): """ This is dependent on your training data source but we will try to generalize it as best as possible. """ positive_path = os.path.join(filepath, 'pos') negative_path = os.path.join(filepath, 'neg') pos_label = 1 neg_label = 0 dataset = [] for filename in glob.glob(os.path.join(positive_path, '*.txt')): with open(filename, 'r') as f: dataset.append((pos_label, f.read())) for filename in glob.glob(os.path.join(negative_path, '*.txt')): with open(filename, 'r') as f: dataset.append((neg_label, f.read())) shuffle(dataset) return dataset
[ "def", "pre_process_data", "(", "filepath", ")", ":", "positive_path", "=", "os", ".", "path", ".", "join", "(", "filepath", ",", "'pos'", ")", "negative_path", "=", "os", ".", "path", ".", "join", "(", "filepath", ",", "'neg'", ")", "pos_label", "=", ...
This is dependent on your training data source but we will try to generalize it as best as possible.
[ "This", "is", "dependent", "on", "your", "training", "data", "source", "but", "we", "will", "try", "to", "generalize", "it", "as", "best", "as", "possible", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch09.py#L141-L163
train
226,306
totalgood/nlpia
src/nlpia/book/examples/ch09.py
pad_trunc
def pad_trunc(data, maxlen): """ For a given dataset pad with zero vectors or truncate to maxlen """ new_data = [] # Create a vector of 0's the length of our word vectors zero_vector = [] for _ in range(len(data[0][0])): zero_vector.append(0.0) for sample in data: if len(sample) > maxlen: temp = sample[:maxlen] elif len(sample) < maxlen: temp = sample additional_elems = maxlen - len(sample) for _ in range(additional_elems): temp.append(zero_vector) else: temp = sample new_data.append(temp) return new_data
python
def pad_trunc(data, maxlen): """ For a given dataset pad with zero vectors or truncate to maxlen """ new_data = [] # Create a vector of 0's the length of our word vectors zero_vector = [] for _ in range(len(data[0][0])): zero_vector.append(0.0) for sample in data: if len(sample) > maxlen: temp = sample[:maxlen] elif len(sample) < maxlen: temp = sample additional_elems = maxlen - len(sample) for _ in range(additional_elems): temp.append(zero_vector) else: temp = sample new_data.append(temp) return new_data
[ "def", "pad_trunc", "(", "data", ",", "maxlen", ")", ":", "new_data", "=", "[", "]", "# Create a vector of 0's the length of our word vectors", "zero_vector", "=", "[", "]", "for", "_", "in", "range", "(", "len", "(", "data", "[", "0", "]", "[", "0", "]", ...
For a given dataset pad with zero vectors or truncate to maxlen
[ "For", "a", "given", "dataset", "pad", "with", "zero", "vectors", "or", "truncate", "to", "maxlen" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch09.py#L207-L228
train
226,307
totalgood/nlpia
src/nlpia/book/examples/ch09.py
clean_data
def clean_data(data): """ Shift to lower case, replace unknowns with UNK, and listify """ new_data = [] VALID = 'abcdefghijklmnopqrstuvwxyz123456789"\'?!.,:; ' for sample in data: new_sample = [] for char in sample[1].lower(): # Just grab the string, not the label if char in VALID: new_sample.append(char) else: new_sample.append('UNK') new_data.append(new_sample) return new_data
python
def clean_data(data): """ Shift to lower case, replace unknowns with UNK, and listify """ new_data = [] VALID = 'abcdefghijklmnopqrstuvwxyz123456789"\'?!.,:; ' for sample in data: new_sample = [] for char in sample[1].lower(): # Just grab the string, not the label if char in VALID: new_sample.append(char) else: new_sample.append('UNK') new_data.append(new_sample) return new_data
[ "def", "clean_data", "(", "data", ")", ":", "new_data", "=", "[", "]", "VALID", "=", "'abcdefghijklmnopqrstuvwxyz123456789\"\\'?!.,:; '", "for", "sample", "in", "data", ":", "new_sample", "=", "[", "]", "for", "char", "in", "sample", "[", "1", "]", ".", "l...
Shift to lower case, replace unknowns with UNK, and listify
[ "Shift", "to", "lower", "case", "replace", "unknowns", "with", "UNK", "and", "listify" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch09.py#L436-L449
train
226,308
totalgood/nlpia
src/nlpia/book/examples/ch09.py
char_pad_trunc
def char_pad_trunc(data, maxlen): """ We truncate to maxlen or add in PAD tokens """ new_dataset = [] for sample in data: if len(sample) > maxlen: new_data = sample[:maxlen] elif len(sample) < maxlen: pads = maxlen - len(sample) new_data = sample + ['PAD'] * pads else: new_data = sample new_dataset.append(new_data) return new_dataset
python
def char_pad_trunc(data, maxlen): """ We truncate to maxlen or add in PAD tokens """ new_dataset = [] for sample in data: if len(sample) > maxlen: new_data = sample[:maxlen] elif len(sample) < maxlen: pads = maxlen - len(sample) new_data = sample + ['PAD'] * pads else: new_data = sample new_dataset.append(new_data) return new_dataset
[ "def", "char_pad_trunc", "(", "data", ",", "maxlen", ")", ":", "new_dataset", "=", "[", "]", "for", "sample", "in", "data", ":", "if", "len", "(", "sample", ")", ">", "maxlen", ":", "new_data", "=", "sample", "[", ":", "maxlen", "]", "elif", "len", ...
We truncate to maxlen or add in PAD tokens
[ "We", "truncate", "to", "maxlen", "or", "add", "in", "PAD", "tokens" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch09.py#L458-L470
train
226,309
totalgood/nlpia
src/nlpia/book/examples/ch09.py
create_dicts
def create_dicts(data): """ Modified from Keras LSTM example""" chars = set() for sample in data: chars.update(set(sample)) char_indices = dict((c, i) for i, c in enumerate(chars)) indices_char = dict((i, c) for i, c in enumerate(chars)) return char_indices, indices_char
python
def create_dicts(data): """ Modified from Keras LSTM example""" chars = set() for sample in data: chars.update(set(sample)) char_indices = dict((c, i) for i, c in enumerate(chars)) indices_char = dict((i, c) for i, c in enumerate(chars)) return char_indices, indices_char
[ "def", "create_dicts", "(", "data", ")", ":", "chars", "=", "set", "(", ")", "for", "sample", "in", "data", ":", "chars", ".", "update", "(", "set", "(", "sample", ")", ")", "char_indices", "=", "dict", "(", "(", "c", ",", "i", ")", "for", "i", ...
Modified from Keras LSTM example
[ "Modified", "from", "Keras", "LSTM", "example" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch09.py#L479-L486
train
226,310
totalgood/nlpia
src/nlpia/book/examples/ch09.py
onehot_encode
def onehot_encode(dataset, char_indices, maxlen): """ One hot encode the tokens Args: dataset list of lists of tokens char_indices dictionary of {key=character, value=index to use encoding vector} maxlen int Length of each sample Return: np array of shape (samples, tokens, encoding length) """ X = np.zeros((len(dataset), maxlen, len(char_indices.keys()))) for i, sentence in enumerate(dataset): for t, char in enumerate(sentence): X[i, t, char_indices[char]] = 1 return X
python
def onehot_encode(dataset, char_indices, maxlen): """ One hot encode the tokens Args: dataset list of lists of tokens char_indices dictionary of {key=character, value=index to use encoding vector} maxlen int Length of each sample Return: np array of shape (samples, tokens, encoding length) """ X = np.zeros((len(dataset), maxlen, len(char_indices.keys()))) for i, sentence in enumerate(dataset): for t, char in enumerate(sentence): X[i, t, char_indices[char]] = 1 return X
[ "def", "onehot_encode", "(", "dataset", ",", "char_indices", ",", "maxlen", ")", ":", "X", "=", "np", ".", "zeros", "(", "(", "len", "(", "dataset", ")", ",", "maxlen", ",", "len", "(", "char_indices", ".", "keys", "(", ")", ")", ")", ")", "for", ...
One hot encode the tokens Args: dataset list of lists of tokens char_indices dictionary of {key=character, value=index to use encoding vector} maxlen int Length of each sample Return: np array of shape (samples, tokens, encoding length)
[ "One", "hot", "encode", "the", "tokens" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch09.py#L495-L510
train
226,311
totalgood/nlpia
src/nlpia/book/examples/ch04_sklearn_pca_source.py
_fit_full
def _fit_full(self=self, X=X, n_components=6): """Fit the model by computing full SVD on X""" n_samples, n_features = X.shape # Center data self.mean_ = np.mean(X, axis=0) print(self.mean_) X -= self.mean_ print(X.round(2)) U, S, V = linalg.svd(X, full_matrices=False) print(V.round(2)) # flip eigenvectors' sign to enforce deterministic output U, V = svd_flip(U, V) components_ = V print(components_.round(2)) # Get variance explained by singular values explained_variance_ = (S ** 2) / (n_samples - 1) total_var = explained_variance_.sum() explained_variance_ratio_ = explained_variance_ / total_var singular_values_ = S.copy() # Store the singular values. # Postprocess the number of components required if n_components == 'mle': n_components = \ _infer_dimension_(explained_variance_, n_samples, n_features) elif 0 < n_components < 1.0: # number of components for which the cumulated explained # variance percentage is superior to the desired threshold ratio_cumsum = stable_cumsum(explained_variance_ratio_) n_components = np.searchsorted(ratio_cumsum, n_components) + 1 # Compute noise covariance using Probabilistic PCA model # The sigma2 maximum likelihood (cf. eq. 12.46) if n_components < min(n_features, n_samples): self.noise_variance_ = explained_variance_[n_components:].mean() else: self.noise_variance_ = 0. self.n_samples_, self.n_features_ = n_samples, n_features self.components_ = components_[:n_components] print(self.components_.round(2)) self.n_components_ = n_components self.explained_variance_ = explained_variance_[:n_components] self.explained_variance_ratio_ = \ explained_variance_ratio_[:n_components] self.singular_values_ = singular_values_[:n_components] return U, S, V
python
def _fit_full(self=self, X=X, n_components=6): """Fit the model by computing full SVD on X""" n_samples, n_features = X.shape # Center data self.mean_ = np.mean(X, axis=0) print(self.mean_) X -= self.mean_ print(X.round(2)) U, S, V = linalg.svd(X, full_matrices=False) print(V.round(2)) # flip eigenvectors' sign to enforce deterministic output U, V = svd_flip(U, V) components_ = V print(components_.round(2)) # Get variance explained by singular values explained_variance_ = (S ** 2) / (n_samples - 1) total_var = explained_variance_.sum() explained_variance_ratio_ = explained_variance_ / total_var singular_values_ = S.copy() # Store the singular values. # Postprocess the number of components required if n_components == 'mle': n_components = \ _infer_dimension_(explained_variance_, n_samples, n_features) elif 0 < n_components < 1.0: # number of components for which the cumulated explained # variance percentage is superior to the desired threshold ratio_cumsum = stable_cumsum(explained_variance_ratio_) n_components = np.searchsorted(ratio_cumsum, n_components) + 1 # Compute noise covariance using Probabilistic PCA model # The sigma2 maximum likelihood (cf. eq. 12.46) if n_components < min(n_features, n_samples): self.noise_variance_ = explained_variance_[n_components:].mean() else: self.noise_variance_ = 0. self.n_samples_, self.n_features_ = n_samples, n_features self.components_ = components_[:n_components] print(self.components_.round(2)) self.n_components_ = n_components self.explained_variance_ = explained_variance_[:n_components] self.explained_variance_ratio_ = \ explained_variance_ratio_[:n_components] self.singular_values_ = singular_values_[:n_components] return U, S, V
[ "def", "_fit_full", "(", "self", "=", "self", ",", "X", "=", "X", ",", "n_components", "=", "6", ")", ":", "n_samples", ",", "n_features", "=", "X", ".", "shape", "# Center data", "self", ".", "mean_", "=", "np", ".", "mean", "(", "X", ",", "axis",...
Fit the model by computing full SVD on X
[ "Fit", "the", "model", "by", "computing", "full", "SVD", "on", "X" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch04_sklearn_pca_source.py#L136-L186
train
226,312
totalgood/nlpia
src/nlpia/clean_alice.py
extract_aiml
def extract_aiml(path='aiml-en-us-foundation-alice.v1-9'): """ Extract an aiml.zip file if it hasn't been already and return a list of aiml file paths """ path = find_data_path(path) or path if os.path.isdir(path): paths = os.listdir(path) paths = [os.path.join(path, p) for p in paths] else: zf = zipfile.ZipFile(path) paths = [] for name in zf.namelist(): if '.hg/' in name: continue paths.append(zf.extract(name, path=BIGDATA_PATH)) return paths
python
def extract_aiml(path='aiml-en-us-foundation-alice.v1-9'): """ Extract an aiml.zip file if it hasn't been already and return a list of aiml file paths """ path = find_data_path(path) or path if os.path.isdir(path): paths = os.listdir(path) paths = [os.path.join(path, p) for p in paths] else: zf = zipfile.ZipFile(path) paths = [] for name in zf.namelist(): if '.hg/' in name: continue paths.append(zf.extract(name, path=BIGDATA_PATH)) return paths
[ "def", "extract_aiml", "(", "path", "=", "'aiml-en-us-foundation-alice.v1-9'", ")", ":", "path", "=", "find_data_path", "(", "path", ")", "or", "path", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "paths", "=", "os", ".", "listdir", "(",...
Extract an aiml.zip file if it hasn't been already and return a list of aiml file paths
[ "Extract", "an", "aiml", ".", "zip", "file", "if", "it", "hasn", "t", "been", "already", "and", "return", "a", "list", "of", "aiml", "file", "paths" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/clean_alice.py#L85-L98
train
226,313
totalgood/nlpia
src/nlpia/clean_alice.py
create_brain
def create_brain(path='aiml-en-us-foundation-alice.v1-9.zip'): """ Create an aiml_bot.Bot brain from an AIML zip file or directory of AIML files """ path = find_data_path(path) or path bot = Bot() num_templates = bot._brain.template_count paths = extract_aiml(path=path) for path in paths: if not path.lower().endswith('.aiml'): continue try: bot.learn(path) except AimlParserError: logger.error(format_exc()) logger.warning('AIML Parse Error: {}'.format(path)) num_templates = bot._brain.template_count - num_templates logger.info('Loaded {} trigger-response pairs.\n'.format(num_templates)) print('Loaded {} trigger-response pairs from {} AIML files.'.format(bot._brain.template_count, len(paths))) return bot
python
def create_brain(path='aiml-en-us-foundation-alice.v1-9.zip'): """ Create an aiml_bot.Bot brain from an AIML zip file or directory of AIML files """ path = find_data_path(path) or path bot = Bot() num_templates = bot._brain.template_count paths = extract_aiml(path=path) for path in paths: if not path.lower().endswith('.aiml'): continue try: bot.learn(path) except AimlParserError: logger.error(format_exc()) logger.warning('AIML Parse Error: {}'.format(path)) num_templates = bot._brain.template_count - num_templates logger.info('Loaded {} trigger-response pairs.\n'.format(num_templates)) print('Loaded {} trigger-response pairs from {} AIML files.'.format(bot._brain.template_count, len(paths))) return bot
[ "def", "create_brain", "(", "path", "=", "'aiml-en-us-foundation-alice.v1-9.zip'", ")", ":", "path", "=", "find_data_path", "(", "path", ")", "or", "path", "bot", "=", "Bot", "(", ")", "num_templates", "=", "bot", ".", "_brain", ".", "template_count", "paths",...
Create an aiml_bot.Bot brain from an AIML zip file or directory of AIML files
[ "Create", "an", "aiml_bot", ".", "Bot", "brain", "from", "an", "AIML", "zip", "file", "or", "directory", "of", "AIML", "files" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/clean_alice.py#L101-L119
train
226,314
totalgood/nlpia
src/nlpia/transcoders.py
minify_urls
def minify_urls(filepath, ext='asc', url_regex=None, output_ext='.urls_minified', access_token=None): """ Use bitly or similar minifier to shrink all URLs in text files within a folder structure. Used for the NLPIA manuscript directory for Manning Publishing bitly API: https://dev.bitly.com/links.html Args: path (str): Directory or file path ext (str): File name extension to filter text files by. default='.asc' output_ext (str): Extension to append to filenames of altered files default='' (in-place replacement of URLs) FIXME: NotImplementedError! Untested! """ access_token = access_token or secrets.bitly.access_token output_ext = output_ext or '' url_regex = regex.compile(url_regex) if isinstance(url_regex, str) else url_regex filemetas = [] for filemeta in find_files(filepath, ext=ext): filemetas += [filemeta] altered_text = '' with open(filemeta['path'], 'rt') as fin: text = fin.read() end = 0 for match in url_regex.finditer(text): url = match.group() start = match.start() altered_text += text[:start] resp = requests.get('https://api-ssl.bitly.com/v3/shorten?access_token={}&longUrl={}'.format( access_token, url), allow_redirects=True, timeout=5) js = resp.json() short_url = js['shortUrl'] altered_text += short_url end = start + len(url) altered_text += text[end:] with open(filemeta['path'] + (output_ext or ''), 'wt') as fout: fout.write(altered_text) return altered_text
python
def minify_urls(filepath, ext='asc', url_regex=None, output_ext='.urls_minified', access_token=None): """ Use bitly or similar minifier to shrink all URLs in text files within a folder structure. Used for the NLPIA manuscript directory for Manning Publishing bitly API: https://dev.bitly.com/links.html Args: path (str): Directory or file path ext (str): File name extension to filter text files by. default='.asc' output_ext (str): Extension to append to filenames of altered files default='' (in-place replacement of URLs) FIXME: NotImplementedError! Untested! """ access_token = access_token or secrets.bitly.access_token output_ext = output_ext or '' url_regex = regex.compile(url_regex) if isinstance(url_regex, str) else url_regex filemetas = [] for filemeta in find_files(filepath, ext=ext): filemetas += [filemeta] altered_text = '' with open(filemeta['path'], 'rt') as fin: text = fin.read() end = 0 for match in url_regex.finditer(text): url = match.group() start = match.start() altered_text += text[:start] resp = requests.get('https://api-ssl.bitly.com/v3/shorten?access_token={}&longUrl={}'.format( access_token, url), allow_redirects=True, timeout=5) js = resp.json() short_url = js['shortUrl'] altered_text += short_url end = start + len(url) altered_text += text[end:] with open(filemeta['path'] + (output_ext or ''), 'wt') as fout: fout.write(altered_text) return altered_text
[ "def", "minify_urls", "(", "filepath", ",", "ext", "=", "'asc'", ",", "url_regex", "=", "None", ",", "output_ext", "=", "'.urls_minified'", ",", "access_token", "=", "None", ")", ":", "access_token", "=", "access_token", "or", "secrets", ".", "bitly", ".", ...
Use bitly or similar minifier to shrink all URLs in text files within a folder structure. Used for the NLPIA manuscript directory for Manning Publishing bitly API: https://dev.bitly.com/links.html Args: path (str): Directory or file path ext (str): File name extension to filter text files by. default='.asc' output_ext (str): Extension to append to filenames of altered files default='' (in-place replacement of URLs) FIXME: NotImplementedError! Untested!
[ "Use", "bitly", "or", "similar", "minifier", "to", "shrink", "all", "URLs", "in", "text", "files", "within", "a", "folder", "structure", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/transcoders.py#L22-L59
train
226,315
totalgood/nlpia
src/nlpia/transcoders.py
delimit_slug
def delimit_slug(slug, sep=' '): """ Return a str of separated tokens found within a slugLike_This => 'slug Like This' >>> delimit_slug("slugLike_ThisW/aTLA's") 'slug Like This W a TLA s' >>> delimit_slug('slugLike_ThisW/aTLA', '|') 'slug|Like|This|W|a|TLA' """ hyphenated_slug = re.sub(CRE_SLUG_DELIMITTER, sep, slug) return hyphenated_slug
python
def delimit_slug(slug, sep=' '): """ Return a str of separated tokens found within a slugLike_This => 'slug Like This' >>> delimit_slug("slugLike_ThisW/aTLA's") 'slug Like This W a TLA s' >>> delimit_slug('slugLike_ThisW/aTLA', '|') 'slug|Like|This|W|a|TLA' """ hyphenated_slug = re.sub(CRE_SLUG_DELIMITTER, sep, slug) return hyphenated_slug
[ "def", "delimit_slug", "(", "slug", ",", "sep", "=", "' '", ")", ":", "hyphenated_slug", "=", "re", ".", "sub", "(", "CRE_SLUG_DELIMITTER", ",", "sep", ",", "slug", ")", "return", "hyphenated_slug" ]
Return a str of separated tokens found within a slugLike_This => 'slug Like This' >>> delimit_slug("slugLike_ThisW/aTLA's") 'slug Like This W a TLA s' >>> delimit_slug('slugLike_ThisW/aTLA', '|') 'slug|Like|This|W|a|TLA'
[ "Return", "a", "str", "of", "separated", "tokens", "found", "within", "a", "slugLike_This", "=", ">", "slug", "Like", "This" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/transcoders.py#L62-L71
train
226,316
totalgood/nlpia
src/nlpia/transcoders.py
clean_asciidoc
def clean_asciidoc(text): r""" Transform asciidoc text into ASCII text that NL parsers can handle TODO: Tag lines and words with meta data like italics, underlined, bold, title, heading 1, etc >>> clean_asciidoc('**Hello** _world_!') '"Hello" "world"!' """ text = re.sub(r'(\b|^)[\[_*]{1,2}([a-zA-Z0-9])', r'"\2', text) text = re.sub(r'([a-zA-Z0-9])[\]_*]{1,2}', r'\1"', text) return text
python
def clean_asciidoc(text): r""" Transform asciidoc text into ASCII text that NL parsers can handle TODO: Tag lines and words with meta data like italics, underlined, bold, title, heading 1, etc >>> clean_asciidoc('**Hello** _world_!') '"Hello" "world"!' """ text = re.sub(r'(\b|^)[\[_*]{1,2}([a-zA-Z0-9])', r'"\2', text) text = re.sub(r'([a-zA-Z0-9])[\]_*]{1,2}', r'\1"', text) return text
[ "def", "clean_asciidoc", "(", "text", ")", ":", "text", "=", "re", ".", "sub", "(", "r'(\\b|^)[\\[_*]{1,2}([a-zA-Z0-9])'", ",", "r'\"\\2'", ",", "text", ")", "text", "=", "re", ".", "sub", "(", "r'([a-zA-Z0-9])[\\]_*]{1,2}'", ",", "r'\\1\"'", ",", "text", ")...
r""" Transform asciidoc text into ASCII text that NL parsers can handle TODO: Tag lines and words with meta data like italics, underlined, bold, title, heading 1, etc >>> clean_asciidoc('**Hello** _world_!') '"Hello" "world"!'
[ "r", "Transform", "asciidoc", "text", "into", "ASCII", "text", "that", "NL", "parsers", "can", "handle" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/transcoders.py#L121-L132
train
226,317
totalgood/nlpia
src/nlpia/transcoders.py
split_sentences_regex
def split_sentences_regex(text): """ Use dead-simple regex to split text into sentences. Very poor accuracy. >>> split_sentences_regex("Hello World. I'm I.B.M.'s Watson. --Watson") ['Hello World.', "I'm I.B.M.'s Watson.", '--Watson'] """ parts = regex.split(r'([a-zA-Z0-9][.?!])[\s$]', text) sentences = [''.join(s) for s in zip(parts[0::2], parts[1::2])] return sentences + [parts[-1]] if len(parts) % 2 else sentences
python
def split_sentences_regex(text): """ Use dead-simple regex to split text into sentences. Very poor accuracy. >>> split_sentences_regex("Hello World. I'm I.B.M.'s Watson. --Watson") ['Hello World.', "I'm I.B.M.'s Watson.", '--Watson'] """ parts = regex.split(r'([a-zA-Z0-9][.?!])[\s$]', text) sentences = [''.join(s) for s in zip(parts[0::2], parts[1::2])] return sentences + [parts[-1]] if len(parts) % 2 else sentences
[ "def", "split_sentences_regex", "(", "text", ")", ":", "parts", "=", "regex", ".", "split", "(", "r'([a-zA-Z0-9][.?!])[\\s$]'", ",", "text", ")", "sentences", "=", "[", "''", ".", "join", "(", "s", ")", "for", "s", "in", "zip", "(", "parts", "[", "0", ...
Use dead-simple regex to split text into sentences. Very poor accuracy. >>> split_sentences_regex("Hello World. I'm I.B.M.'s Watson. --Watson") ['Hello World.', "I'm I.B.M.'s Watson.", '--Watson']
[ "Use", "dead", "-", "simple", "regex", "to", "split", "text", "into", "sentences", ".", "Very", "poor", "accuracy", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/transcoders.py#L157-L165
train
226,318
totalgood/nlpia
src/nlpia/transcoders.py
split_sentences_spacy
def split_sentences_spacy(text, language_model='en'): r""" You must download a spacy language model with python -m download 'en' The default English language model for spacy tends to be a lot more agressive than NLTK's punkt: >>> split_sentences_nltk("Hi Ms. Lovelace.\nI'm a wanna-\nbe human @ I.B.M. ;) --Watson 2.0") ['Hi Ms. Lovelace.', "I'm a wanna-\nbe human @ I.B.M.", ';) --Watson 2.0'] >>> split_sentences_spacy("Hi Ms. Lovelace.\nI'm a wanna-\nbe human @ I.B.M. ;) --Watson 2.0") ['Hi Ms. Lovelace.', "I'm a wanna-", 'be human @', 'I.B.M. ;) --Watson 2.0'] >>> split_sentences_spacy("Hi Ms. Lovelace. I'm at I.B.M. --Watson 2.0") ['Hi Ms. Lovelace.', "I'm at I.B.M. --Watson 2.0"] >>> split_sentences_nltk("Hi Ms. Lovelace. I'm at I.B.M. --Watson 2.0") ['Hi Ms. Lovelace.', "I'm at I.B.M.", '--Watson 2.0'] """ doc = nlp(text) sentences = [] if not hasattr(doc, 'sents'): logger.warning("Using NLTK sentence tokenizer because SpaCy language model hasn't been loaded") return split_sentences_nltk(text) for w, span in enumerate(doc.sents): sent = ''.join(doc[i].string for i in range(span.start, span.end)).strip() if len(sent): sentences.append(sent) return sentences
python
def split_sentences_spacy(text, language_model='en'): r""" You must download a spacy language model with python -m download 'en' The default English language model for spacy tends to be a lot more agressive than NLTK's punkt: >>> split_sentences_nltk("Hi Ms. Lovelace.\nI'm a wanna-\nbe human @ I.B.M. ;) --Watson 2.0") ['Hi Ms. Lovelace.', "I'm a wanna-\nbe human @ I.B.M.", ';) --Watson 2.0'] >>> split_sentences_spacy("Hi Ms. Lovelace.\nI'm a wanna-\nbe human @ I.B.M. ;) --Watson 2.0") ['Hi Ms. Lovelace.', "I'm a wanna-", 'be human @', 'I.B.M. ;) --Watson 2.0'] >>> split_sentences_spacy("Hi Ms. Lovelace. I'm at I.B.M. --Watson 2.0") ['Hi Ms. Lovelace.', "I'm at I.B.M. --Watson 2.0"] >>> split_sentences_nltk("Hi Ms. Lovelace. I'm at I.B.M. --Watson 2.0") ['Hi Ms. Lovelace.', "I'm at I.B.M.", '--Watson 2.0'] """ doc = nlp(text) sentences = [] if not hasattr(doc, 'sents'): logger.warning("Using NLTK sentence tokenizer because SpaCy language model hasn't been loaded") return split_sentences_nltk(text) for w, span in enumerate(doc.sents): sent = ''.join(doc[i].string for i in range(span.start, span.end)).strip() if len(sent): sentences.append(sent) return sentences
[ "def", "split_sentences_spacy", "(", "text", ",", "language_model", "=", "'en'", ")", ":", "doc", "=", "nlp", "(", "text", ")", "sentences", "=", "[", "]", "if", "not", "hasattr", "(", "doc", ",", "'sents'", ")", ":", "logger", ".", "warning", "(", "...
r""" You must download a spacy language model with python -m download 'en' The default English language model for spacy tends to be a lot more agressive than NLTK's punkt: >>> split_sentences_nltk("Hi Ms. Lovelace.\nI'm a wanna-\nbe human @ I.B.M. ;) --Watson 2.0") ['Hi Ms. Lovelace.', "I'm a wanna-\nbe human @ I.B.M.", ';) --Watson 2.0'] >>> split_sentences_spacy("Hi Ms. Lovelace.\nI'm a wanna-\nbe human @ I.B.M. ;) --Watson 2.0") ['Hi Ms. Lovelace.', "I'm a wanna-", 'be human @', 'I.B.M. ;) --Watson 2.0'] >>> split_sentences_spacy("Hi Ms. Lovelace. I'm at I.B.M. --Watson 2.0") ['Hi Ms. Lovelace.', "I'm at I.B.M. --Watson 2.0"] >>> split_sentences_nltk("Hi Ms. Lovelace. I'm at I.B.M. --Watson 2.0") ['Hi Ms. Lovelace.', "I'm at I.B.M.", '--Watson 2.0']
[ "r", "You", "must", "download", "a", "spacy", "language", "model", "with", "python", "-", "m", "download", "en" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/transcoders.py#L168-L192
train
226,319
totalgood/nlpia
src/nlpia/transcoders.py
segment_sentences
def segment_sentences(path=os.path.join(DATA_PATH, 'book'), splitter=split_sentences_nltk, **find_files_kwargs): """ Return a list of all sentences and empty lines. TODO: 1. process each line with an aggressive sentence segmenter, like DetectorMorse 2. process our manuscript to create a complete-sentence and heading training set normalized/simplified syntax net tree is the input feature set common words and N-grams inserted with their label as additional feature 3. process a training set with a grammar checker and syntax to bootstrap a "complete sentence" labeler. 4. process each 1-3 line window (breaking on empty lines) with syntax net to label them 5. label each 1-3-line window of lines as "complete sentence, partial sentence/phrase, or multi-sentence" >>> 10000 > len(segment_sentences(path=os.path.join(DATA_PATH, 'book'))) >= 4 True >>> len(segment_sentences(path=os.path.join(DATA_PATH, 'psychology-scripts.txt'), splitter=split_sentences_nltk)) 23 """ sentences = [] if os.path.isdir(path): for filemeta in find_files(path, **find_files_kwargs): with open(filemeta['path']) as fin: i, batch = 0, [] try: for i, line in enumerate(fin): if not line.strip(): sentences.extend(splitter('\n'.join(batch))) batch = [line] # may contain all whitespace else: batch.append(line) except (UnicodeDecodeError, IOError): logger.error('UnicodeDecodeError or IOError on line {} in file {} from stat: {}'.format( i + 1, fin.name, filemeta)) raise if len(batch): # TODO: tag sentences with line + filename where they started sentences.extend(splitter('\n'.join(batch))) else: batch = [] for i, line in enumerate(iter_lines(path)): # TODO: filter out code and meta lines using asciidoc or markdown parser # split into batches based on empty lines if not line.strip(): sentences.extend(splitter('\n'.join(batch))) # first line may contain all whitespace batch = [line] else: batch.append(line) if len(batch): # TODO: tag sentences with line + filename where they started sentences.extend(splitter('\n'.join(batch))) return sentences
python
def segment_sentences(path=os.path.join(DATA_PATH, 'book'), splitter=split_sentences_nltk, **find_files_kwargs): """ Return a list of all sentences and empty lines. TODO: 1. process each line with an aggressive sentence segmenter, like DetectorMorse 2. process our manuscript to create a complete-sentence and heading training set normalized/simplified syntax net tree is the input feature set common words and N-grams inserted with their label as additional feature 3. process a training set with a grammar checker and syntax to bootstrap a "complete sentence" labeler. 4. process each 1-3 line window (breaking on empty lines) with syntax net to label them 5. label each 1-3-line window of lines as "complete sentence, partial sentence/phrase, or multi-sentence" >>> 10000 > len(segment_sentences(path=os.path.join(DATA_PATH, 'book'))) >= 4 True >>> len(segment_sentences(path=os.path.join(DATA_PATH, 'psychology-scripts.txt'), splitter=split_sentences_nltk)) 23 """ sentences = [] if os.path.isdir(path): for filemeta in find_files(path, **find_files_kwargs): with open(filemeta['path']) as fin: i, batch = 0, [] try: for i, line in enumerate(fin): if not line.strip(): sentences.extend(splitter('\n'.join(batch))) batch = [line] # may contain all whitespace else: batch.append(line) except (UnicodeDecodeError, IOError): logger.error('UnicodeDecodeError or IOError on line {} in file {} from stat: {}'.format( i + 1, fin.name, filemeta)) raise if len(batch): # TODO: tag sentences with line + filename where they started sentences.extend(splitter('\n'.join(batch))) else: batch = [] for i, line in enumerate(iter_lines(path)): # TODO: filter out code and meta lines using asciidoc or markdown parser # split into batches based on empty lines if not line.strip(): sentences.extend(splitter('\n'.join(batch))) # first line may contain all whitespace batch = [line] else: batch.append(line) if len(batch): # TODO: tag sentences with line + filename where they started sentences.extend(splitter('\n'.join(batch))) return sentences
[ "def", "segment_sentences", "(", "path", "=", "os", ".", "path", ".", "join", "(", "DATA_PATH", ",", "'book'", ")", ",", "splitter", "=", "split_sentences_nltk", ",", "*", "*", "find_files_kwargs", ")", ":", "sentences", "=", "[", "]", "if", "os", ".", ...
Return a list of all sentences and empty lines. TODO: 1. process each line with an aggressive sentence segmenter, like DetectorMorse 2. process our manuscript to create a complete-sentence and heading training set normalized/simplified syntax net tree is the input feature set common words and N-grams inserted with their label as additional feature 3. process a training set with a grammar checker and syntax to bootstrap a "complete sentence" labeler. 4. process each 1-3 line window (breaking on empty lines) with syntax net to label them 5. label each 1-3-line window of lines as "complete sentence, partial sentence/phrase, or multi-sentence" >>> 10000 > len(segment_sentences(path=os.path.join(DATA_PATH, 'book'))) >= 4 True >>> len(segment_sentences(path=os.path.join(DATA_PATH, 'psychology-scripts.txt'), splitter=split_sentences_nltk)) 23
[ "Return", "a", "list", "of", "all", "sentences", "and", "empty", "lines", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/transcoders.py#L216-L267
train
226,320
totalgood/nlpia
src/nlpia/transcoders.py
fix_hunspell_json
def fix_hunspell_json(badjson_path='en_us.json', goodjson_path='en_us_fixed.json'): """Fix the invalid hunspellToJSON.py json format by inserting double-quotes in list of affix strings Args: badjson_path (str): path to input json file that doesn't properly quote goodjson_path (str): path to output json file with properly quoted strings in list of affixes Returns: list of all words with all possible affixes in *.txt format (simplified .dic format) References: Syed Faisal Ali 's Hunspell dic parser: https://github.com/SyedFaisalAli/HunspellToJSON """ with open(badjson_path, 'r') as fin: with open(goodjson_path, 'w') as fout: for i, line in enumerate(fin): line2 = regex.sub(r'\[(\w)', r'["\1', line) line2 = regex.sub(r'(\w)\]', r'\1"]', line2) line2 = regex.sub(r'(\w),(\w)', r'\1","\2', line2) fout.write(line2) with open(goodjson_path, 'r') as fin: words = [] with open(goodjson_path + '.txt', 'w') as fout: hunspell = json.load(fin) for word, affixes in hunspell['words'].items(): words += [word] fout.write(word + '\n') for affix in affixes: words += [affix] fout.write(affix + '\n') return words
python
def fix_hunspell_json(badjson_path='en_us.json', goodjson_path='en_us_fixed.json'): """Fix the invalid hunspellToJSON.py json format by inserting double-quotes in list of affix strings Args: badjson_path (str): path to input json file that doesn't properly quote goodjson_path (str): path to output json file with properly quoted strings in list of affixes Returns: list of all words with all possible affixes in *.txt format (simplified .dic format) References: Syed Faisal Ali 's Hunspell dic parser: https://github.com/SyedFaisalAli/HunspellToJSON """ with open(badjson_path, 'r') as fin: with open(goodjson_path, 'w') as fout: for i, line in enumerate(fin): line2 = regex.sub(r'\[(\w)', r'["\1', line) line2 = regex.sub(r'(\w)\]', r'\1"]', line2) line2 = regex.sub(r'(\w),(\w)', r'\1","\2', line2) fout.write(line2) with open(goodjson_path, 'r') as fin: words = [] with open(goodjson_path + '.txt', 'w') as fout: hunspell = json.load(fin) for word, affixes in hunspell['words'].items(): words += [word] fout.write(word + '\n') for affix in affixes: words += [affix] fout.write(affix + '\n') return words
[ "def", "fix_hunspell_json", "(", "badjson_path", "=", "'en_us.json'", ",", "goodjson_path", "=", "'en_us_fixed.json'", ")", ":", "with", "open", "(", "badjson_path", ",", "'r'", ")", "as", "fin", ":", "with", "open", "(", "goodjson_path", ",", "'w'", ")", "a...
Fix the invalid hunspellToJSON.py json format by inserting double-quotes in list of affix strings Args: badjson_path (str): path to input json file that doesn't properly quote goodjson_path (str): path to output json file with properly quoted strings in list of affixes Returns: list of all words with all possible affixes in *.txt format (simplified .dic format) References: Syed Faisal Ali 's Hunspell dic parser: https://github.com/SyedFaisalAli/HunspellToJSON
[ "Fix", "the", "invalid", "hunspellToJSON", ".", "py", "json", "format", "by", "inserting", "double", "-", "quotes", "in", "list", "of", "affix", "strings" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/transcoders.py#L295-L327
train
226,321
totalgood/nlpia
src/nlpia/book/examples/ch12_retrieval.py
format_ubuntu_dialog
def format_ubuntu_dialog(df): """ Print statements paired with replies, formatted for easy review """ s = '' for i, record in df.iterrows(): statement = list(split_turns(record.Context))[-1] # <1> reply = list(split_turns(record.Utterance))[-1] # <2> s += 'Statement: {}\n'.format(statement) s += 'Reply: {}\n\n'.format(reply) return s
python
def format_ubuntu_dialog(df): """ Print statements paired with replies, formatted for easy review """ s = '' for i, record in df.iterrows(): statement = list(split_turns(record.Context))[-1] # <1> reply = list(split_turns(record.Utterance))[-1] # <2> s += 'Statement: {}\n'.format(statement) s += 'Reply: {}\n\n'.format(reply) return s
[ "def", "format_ubuntu_dialog", "(", "df", ")", ":", "s", "=", "''", "for", "i", ",", "record", "in", "df", ".", "iterrows", "(", ")", ":", "statement", "=", "list", "(", "split_turns", "(", "record", ".", "Context", ")", ")", "[", "-", "1", "]", ...
Print statements paired with replies, formatted for easy review
[ "Print", "statements", "paired", "with", "replies", "formatted", "for", "easy", "review" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch12_retrieval.py#L40-L48
train
226,322
totalgood/nlpia
src/nlpia/regexes.py
splitext
def splitext(filepath): """ Like os.path.splitext except splits compound extensions as one long one >>> splitext('~/.bashrc.asciidoc.ext.ps4.42') ('~/.bashrc', '.asciidoc.ext.ps4.42') >>> splitext('~/.bash_profile') ('~/.bash_profile', '') """ exts = getattr(CRE_FILENAME_EXT.search(filepath), 'group', str)() return (filepath[:(-len(exts) or None)], exts)
python
def splitext(filepath): """ Like os.path.splitext except splits compound extensions as one long one >>> splitext('~/.bashrc.asciidoc.ext.ps4.42') ('~/.bashrc', '.asciidoc.ext.ps4.42') >>> splitext('~/.bash_profile') ('~/.bash_profile', '') """ exts = getattr(CRE_FILENAME_EXT.search(filepath), 'group', str)() return (filepath[:(-len(exts) or None)], exts)
[ "def", "splitext", "(", "filepath", ")", ":", "exts", "=", "getattr", "(", "CRE_FILENAME_EXT", ".", "search", "(", "filepath", ")", ",", "'group'", ",", "str", ")", "(", ")", "return", "(", "filepath", "[", ":", "(", "-", "len", "(", "exts", ")", "...
Like os.path.splitext except splits compound extensions as one long one >>> splitext('~/.bashrc.asciidoc.ext.ps4.42') ('~/.bashrc', '.asciidoc.ext.ps4.42') >>> splitext('~/.bash_profile') ('~/.bash_profile', '')
[ "Like", "os", ".", "path", ".", "splitext", "except", "splits", "compound", "extensions", "as", "one", "long", "one" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/regexes.py#L109-L118
train
226,323
totalgood/nlpia
src/nlpia/plots.py
offline_plotly_scatter3d
def offline_plotly_scatter3d(df, x=0, y=1, z=-1): """ Plot an offline scatter plot colored according to the categories in the 'name' column. >> df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/iris.csv') >> offline_plotly(df) """ data = [] # clusters = [] colors = ['rgb(228,26,28)', 'rgb(55,126,184)', 'rgb(77,175,74)'] # df.columns = clean_columns(df.columns) x = get_array(df, x, default=0) y = get_array(df, y, default=1) z = get_array(df, z, default=-1) for i in range(len(df['name'].unique())): name = df['Name'].unique()[i] color = colors[i] x = x[pd.np.array(df['name'] == name)] y = y[pd.np.array(df['name'] == name)] z = z[pd.np.array(df['name'] == name)] trace = dict( name=name, x=x, y=y, z=z, type="scatter3d", mode='markers', marker=dict(size=3, color=color, line=dict(width=0))) data.append(trace) layout = dict( width=800, height=550, autosize=False, title='Iris dataset', scene=dict( xaxis=dict( gridcolor='rgb(255, 255, 255)', zerolinecolor='rgb(255, 255, 255)', showbackground=True, backgroundcolor='rgb(230, 230,230)' ), yaxis=dict( gridcolor='rgb(255, 255, 255)', zerolinecolor='rgb(255, 255, 255)', showbackground=True, backgroundcolor='rgb(230, 230,230)' ), zaxis=dict( gridcolor='rgb(255, 255, 255)', zerolinecolor='rgb(255, 255, 255)', showbackground=True, backgroundcolor='rgb(230, 230,230)' ), aspectratio=dict(x=1, y=1, z=0.7), aspectmode='manual' ), ) fig = dict(data=data, layout=layout) # IPython notebook # plotly.iplot(fig, filename='pandas-3d-iris', validate=False) url = plotly.offline.plot(fig, filename='pandas-3d-iris', validate=False) return url
python
def offline_plotly_scatter3d(df, x=0, y=1, z=-1): """ Plot an offline scatter plot colored according to the categories in the 'name' column. >> df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/iris.csv') >> offline_plotly(df) """ data = [] # clusters = [] colors = ['rgb(228,26,28)', 'rgb(55,126,184)', 'rgb(77,175,74)'] # df.columns = clean_columns(df.columns) x = get_array(df, x, default=0) y = get_array(df, y, default=1) z = get_array(df, z, default=-1) for i in range(len(df['name'].unique())): name = df['Name'].unique()[i] color = colors[i] x = x[pd.np.array(df['name'] == name)] y = y[pd.np.array(df['name'] == name)] z = z[pd.np.array(df['name'] == name)] trace = dict( name=name, x=x, y=y, z=z, type="scatter3d", mode='markers', marker=dict(size=3, color=color, line=dict(width=0))) data.append(trace) layout = dict( width=800, height=550, autosize=False, title='Iris dataset', scene=dict( xaxis=dict( gridcolor='rgb(255, 255, 255)', zerolinecolor='rgb(255, 255, 255)', showbackground=True, backgroundcolor='rgb(230, 230,230)' ), yaxis=dict( gridcolor='rgb(255, 255, 255)', zerolinecolor='rgb(255, 255, 255)', showbackground=True, backgroundcolor='rgb(230, 230,230)' ), zaxis=dict( gridcolor='rgb(255, 255, 255)', zerolinecolor='rgb(255, 255, 255)', showbackground=True, backgroundcolor='rgb(230, 230,230)' ), aspectratio=dict(x=1, y=1, z=0.7), aspectmode='manual' ), ) fig = dict(data=data, layout=layout) # IPython notebook # plotly.iplot(fig, filename='pandas-3d-iris', validate=False) url = plotly.offline.plot(fig, filename='pandas-3d-iris', validate=False) return url
[ "def", "offline_plotly_scatter3d", "(", "df", ",", "x", "=", "0", ",", "y", "=", "1", ",", "z", "=", "-", "1", ")", ":", "data", "=", "[", "]", "# clusters = []", "colors", "=", "[", "'rgb(228,26,28)'", ",", "'rgb(55,126,184)'", ",", "'rgb(77,175,74)'", ...
Plot an offline scatter plot colored according to the categories in the 'name' column. >> df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/iris.csv') >> offline_plotly(df)
[ "Plot", "an", "offline", "scatter", "plot", "colored", "according", "to", "the", "categories", "in", "the", "name", "column", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/plots.py#L107-L172
train
226,324
totalgood/nlpia
src/nlpia/plots.py
offline_plotly_data
def offline_plotly_data(data, filename=None, config=None, validate=True, default_width='100%', default_height=525, global_requirejs=False): r""" Write a plotly scatter plot to HTML file that doesn't require server >>> from nlpia.loaders import get_data >>> df = get_data('etpinard') # pd.read_csv('https://plot.ly/~etpinard/191.csv') >>> df.columns = [eval(c) if c[0] in '"\'' else str(c) for c in df.columns] >>> data = {'data': [ ... Scatter(x=df[continent+', x'], ... y=df[continent+', y'], ... text=df[continent+', text'], ... marker=Marker(size=df[continent+', size'].fillna(10000), sizemode='area', sizeref=131868,), ... mode='markers', ... name=continent) for continent in ['Africa', 'Americas', 'Asia', 'Europe', 'Oceania'] ... ], ... 'layout': Layout(xaxis=XAxis(title='Life Expectancy'), yaxis=YAxis(title='GDP per Capita', type='log')) ... } >>> html = offline_plotly_data(data, filename=None) """ config_default = dict(DEFAULT_PLOTLY_CONFIG) if config is not None: config_default.update(config) with open(os.path.join(DATA_PATH, 'plotly.js.min'), 'rt') as f: js = f.read() html, divid, width, height = _plot_html( data, config=config_default, validate=validate, default_width=default_width, default_height=default_height, global_requirejs=global_requirejs) html = PLOTLY_HTML.format(plotlyjs=js, plotlyhtml=html) if filename and isinstance(filename, str): with open(filename, 'wt') as f: f.write(html) return html
python
def offline_plotly_data(data, filename=None, config=None, validate=True, default_width='100%', default_height=525, global_requirejs=False): r""" Write a plotly scatter plot to HTML file that doesn't require server >>> from nlpia.loaders import get_data >>> df = get_data('etpinard') # pd.read_csv('https://plot.ly/~etpinard/191.csv') >>> df.columns = [eval(c) if c[0] in '"\'' else str(c) for c in df.columns] >>> data = {'data': [ ... Scatter(x=df[continent+', x'], ... y=df[continent+', y'], ... text=df[continent+', text'], ... marker=Marker(size=df[continent+', size'].fillna(10000), sizemode='area', sizeref=131868,), ... mode='markers', ... name=continent) for continent in ['Africa', 'Americas', 'Asia', 'Europe', 'Oceania'] ... ], ... 'layout': Layout(xaxis=XAxis(title='Life Expectancy'), yaxis=YAxis(title='GDP per Capita', type='log')) ... } >>> html = offline_plotly_data(data, filename=None) """ config_default = dict(DEFAULT_PLOTLY_CONFIG) if config is not None: config_default.update(config) with open(os.path.join(DATA_PATH, 'plotly.js.min'), 'rt') as f: js = f.read() html, divid, width, height = _plot_html( data, config=config_default, validate=validate, default_width=default_width, default_height=default_height, global_requirejs=global_requirejs) html = PLOTLY_HTML.format(plotlyjs=js, plotlyhtml=html) if filename and isinstance(filename, str): with open(filename, 'wt') as f: f.write(html) return html
[ "def", "offline_plotly_data", "(", "data", ",", "filename", "=", "None", ",", "config", "=", "None", ",", "validate", "=", "True", ",", "default_width", "=", "'100%'", ",", "default_height", "=", "525", ",", "global_requirejs", "=", "False", ")", ":", "con...
r""" Write a plotly scatter plot to HTML file that doesn't require server >>> from nlpia.loaders import get_data >>> df = get_data('etpinard') # pd.read_csv('https://plot.ly/~etpinard/191.csv') >>> df.columns = [eval(c) if c[0] in '"\'' else str(c) for c in df.columns] >>> data = {'data': [ ... Scatter(x=df[continent+', x'], ... y=df[continent+', y'], ... text=df[continent+', text'], ... marker=Marker(size=df[continent+', size'].fillna(10000), sizemode='area', sizeref=131868,), ... mode='markers', ... name=continent) for continent in ['Africa', 'Americas', 'Asia', 'Europe', 'Oceania'] ... ], ... 'layout': Layout(xaxis=XAxis(title='Life Expectancy'), yaxis=YAxis(title='GDP per Capita', type='log')) ... } >>> html = offline_plotly_data(data, filename=None)
[ "r", "Write", "a", "plotly", "scatter", "plot", "to", "HTML", "file", "that", "doesn", "t", "require", "server" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/plots.py#L189-L223
train
226,325
totalgood/nlpia
src/nlpia/plots.py
normalize_etpinard_df
def normalize_etpinard_df(df='https://plot.ly/~etpinard/191.csv', columns='x y size text'.split(), category_col='category', possible_categories=['Africa', 'Americas', 'Asia', 'Europe', 'Oceania']): """Reformat a dataframe in etpinard's format for use in plot functions and sklearn models""" possible_categories = ['Africa', 'Americas', 'Asia', 'Europe', 'Oceania'] if possible_categories is None else possible_categories df.columns = clean_columns(df.columns) df = pd.read_csv(df) if isinstance(df, str) else df columns = clean_columns(list(columns)) df2 = pd.DataFrame(columns=columns) df2[category_col] = np.concatenate([np.array([categ] * len(df)) for categ in possible_categories]) columns = zip(columns, [[clean_columns(categ + ', ' + column) for categ in possible_categories] for column in columns]) for col, category_cols in columns: df2[col] = np.concatenate([df[label].values for label in category_cols]) return df2
python
def normalize_etpinard_df(df='https://plot.ly/~etpinard/191.csv', columns='x y size text'.split(), category_col='category', possible_categories=['Africa', 'Americas', 'Asia', 'Europe', 'Oceania']): """Reformat a dataframe in etpinard's format for use in plot functions and sklearn models""" possible_categories = ['Africa', 'Americas', 'Asia', 'Europe', 'Oceania'] if possible_categories is None else possible_categories df.columns = clean_columns(df.columns) df = pd.read_csv(df) if isinstance(df, str) else df columns = clean_columns(list(columns)) df2 = pd.DataFrame(columns=columns) df2[category_col] = np.concatenate([np.array([categ] * len(df)) for categ in possible_categories]) columns = zip(columns, [[clean_columns(categ + ', ' + column) for categ in possible_categories] for column in columns]) for col, category_cols in columns: df2[col] = np.concatenate([df[label].values for label in category_cols]) return df2
[ "def", "normalize_etpinard_df", "(", "df", "=", "'https://plot.ly/~etpinard/191.csv'", ",", "columns", "=", "'x y size text'", ".", "split", "(", ")", ",", "category_col", "=", "'category'", ",", "possible_categories", "=", "[", "'Africa'", ",", "'Americas'", ",", ...
Reformat a dataframe in etpinard's format for use in plot functions and sklearn models
[ "Reformat", "a", "dataframe", "in", "etpinard", "s", "format", "for", "use", "in", "plot", "functions", "and", "sklearn", "models" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/plots.py#L226-L239
train
226,326
totalgood/nlpia
src/nlpia/plots.py
offline_plotly_scatter_bubble
def offline_plotly_scatter_bubble(df, x='x', y='y', size_col='size', text_col='text', category_col='category', possible_categories=None, filename=None, config={'displaylogo': False}, xscale=None, yscale='log', layout={'hovermode': 'closest', 'showlegend': False, 'autosize': True}, marker={'sizemode': 'area'}, min_size=10, ): r"""Interactive scatterplot of a DataFrame with the size and color of circles linke to two columns config keys: fillFrame setBackground displaylogo sendData showLink linkText staticPlot scrollZoom plot3dPixelRatio displayModeBar showTips workspace doubleClick autosizable editable layout keys: angularaxis annotations autosize bargap bargroupgap barmode barnorm boxgap boxgroupgap boxmode calendar direction dragmode font geo height hiddenlabels hiddenlabelssrc hidesources hovermode images legend mapbox margin orientation paper_bgcolor plot_bgcolor radialaxis scene separators shapes showlegend sliders smith ternary title titlefont updatemenus width xaxis yaxis marker keys: autocolorscale blend border cauto cmax cmin color colorbar colors colorscale colorsrc colorssrc line maxdisplayed opacity opacitysrc outliercolor reversescale showscale size sizemax sizemin sizemode sizeref sizesrc symbol symbolsrc marker['sizeref'] gives the denominator of the circle scaling factor. Typically it should be about a tenth of the minimum 'size' column value >>> from nlpia.data.loaders import get_data >>> df = get_data('cities_us_wordvectors_pca2_meta').iloc[:100] >>> html = offline_plotly_scatter_bubble( ... df.sort_values('population', ascending=False)[:350].copy().sort_values('population'), ... x='x', y='y', ... size_col='population', text_col='name', category_col='timezone', ... xscale=None, yscale=None, # 'log' or None ... layout={}, marker={'sizeref': 3000}) """ config_default = dict(DEFAULT_PLOTLY_CONFIG) marker_default = { 'size': size_col or min_size, 'sizemode': 'area', 'sizeref': int(df[size_col].min() * .8) if size_col else min_size} marker_default.update(marker) size_col = marker_default.pop('size') layout_default = { 'xaxis': XAxis(title=x, type=xscale), 'yaxis': YAxis(title=y, type=yscale), } layout_default.update(**layout) if config is not None: config_default.update(config) df.columns = clean_columns(df.columns) if possible_categories is None and category_col is not None: if category_col in df.columns: category_labels = df[category_col] else: category_labels = np.array(category_col) possible_categories = list(set(category_labels)) possible_categories = [None] if possible_categories is None else possible_categories if category_col and category_col in df: masks = [np.array(df[category_col] == label) for label in possible_categories] else: masks = [np.array([True] * len(df))] * len(possible_categories) data = {'data': [ Scatter(x=df[x][mask].values, y=df[y][mask].values, text=df[text_col][mask].values, marker=Marker(size=df[size_col][mask] if size_col in df.columns else size_col, **marker_default), mode='markers', name=str(category_name)) for (category_name, mask) in zip(possible_categories, masks) ], 'layout': Layout(**layout_default) } return offline_plotly_data(data, filename=filename, config=config_default)
python
def offline_plotly_scatter_bubble(df, x='x', y='y', size_col='size', text_col='text', category_col='category', possible_categories=None, filename=None, config={'displaylogo': False}, xscale=None, yscale='log', layout={'hovermode': 'closest', 'showlegend': False, 'autosize': True}, marker={'sizemode': 'area'}, min_size=10, ): r"""Interactive scatterplot of a DataFrame with the size and color of circles linke to two columns config keys: fillFrame setBackground displaylogo sendData showLink linkText staticPlot scrollZoom plot3dPixelRatio displayModeBar showTips workspace doubleClick autosizable editable layout keys: angularaxis annotations autosize bargap bargroupgap barmode barnorm boxgap boxgroupgap boxmode calendar direction dragmode font geo height hiddenlabels hiddenlabelssrc hidesources hovermode images legend mapbox margin orientation paper_bgcolor plot_bgcolor radialaxis scene separators shapes showlegend sliders smith ternary title titlefont updatemenus width xaxis yaxis marker keys: autocolorscale blend border cauto cmax cmin color colorbar colors colorscale colorsrc colorssrc line maxdisplayed opacity opacitysrc outliercolor reversescale showscale size sizemax sizemin sizemode sizeref sizesrc symbol symbolsrc marker['sizeref'] gives the denominator of the circle scaling factor. Typically it should be about a tenth of the minimum 'size' column value >>> from nlpia.data.loaders import get_data >>> df = get_data('cities_us_wordvectors_pca2_meta').iloc[:100] >>> html = offline_plotly_scatter_bubble( ... df.sort_values('population', ascending=False)[:350].copy().sort_values('population'), ... x='x', y='y', ... size_col='population', text_col='name', category_col='timezone', ... xscale=None, yscale=None, # 'log' or None ... layout={}, marker={'sizeref': 3000}) """ config_default = dict(DEFAULT_PLOTLY_CONFIG) marker_default = { 'size': size_col or min_size, 'sizemode': 'area', 'sizeref': int(df[size_col].min() * .8) if size_col else min_size} marker_default.update(marker) size_col = marker_default.pop('size') layout_default = { 'xaxis': XAxis(title=x, type=xscale), 'yaxis': YAxis(title=y, type=yscale), } layout_default.update(**layout) if config is not None: config_default.update(config) df.columns = clean_columns(df.columns) if possible_categories is None and category_col is not None: if category_col in df.columns: category_labels = df[category_col] else: category_labels = np.array(category_col) possible_categories = list(set(category_labels)) possible_categories = [None] if possible_categories is None else possible_categories if category_col and category_col in df: masks = [np.array(df[category_col] == label) for label in possible_categories] else: masks = [np.array([True] * len(df))] * len(possible_categories) data = {'data': [ Scatter(x=df[x][mask].values, y=df[y][mask].values, text=df[text_col][mask].values, marker=Marker(size=df[size_col][mask] if size_col in df.columns else size_col, **marker_default), mode='markers', name=str(category_name)) for (category_name, mask) in zip(possible_categories, masks) ], 'layout': Layout(**layout_default) } return offline_plotly_data(data, filename=filename, config=config_default)
[ "def", "offline_plotly_scatter_bubble", "(", "df", ",", "x", "=", "'x'", ",", "y", "=", "'y'", ",", "size_col", "=", "'size'", ",", "text_col", "=", "'text'", ",", "category_col", "=", "'category'", ",", "possible_categories", "=", "None", ",", "filename", ...
r"""Interactive scatterplot of a DataFrame with the size and color of circles linke to two columns config keys: fillFrame setBackground displaylogo sendData showLink linkText staticPlot scrollZoom plot3dPixelRatio displayModeBar showTips workspace doubleClick autosizable editable layout keys: angularaxis annotations autosize bargap bargroupgap barmode barnorm boxgap boxgroupgap boxmode calendar direction dragmode font geo height hiddenlabels hiddenlabelssrc hidesources hovermode images legend mapbox margin orientation paper_bgcolor plot_bgcolor radialaxis scene separators shapes showlegend sliders smith ternary title titlefont updatemenus width xaxis yaxis marker keys: autocolorscale blend border cauto cmax cmin color colorbar colors colorscale colorsrc colorssrc line maxdisplayed opacity opacitysrc outliercolor reversescale showscale size sizemax sizemin sizemode sizeref sizesrc symbol symbolsrc marker['sizeref'] gives the denominator of the circle scaling factor. Typically it should be about a tenth of the minimum 'size' column value >>> from nlpia.data.loaders import get_data >>> df = get_data('cities_us_wordvectors_pca2_meta').iloc[:100] >>> html = offline_plotly_scatter_bubble( ... df.sort_values('population', ascending=False)[:350].copy().sort_values('population'), ... x='x', y='y', ... size_col='population', text_col='name', category_col='timezone', ... xscale=None, yscale=None, # 'log' or None ... layout={}, marker={'sizeref': 3000})
[ "r", "Interactive", "scatterplot", "of", "a", "DataFrame", "with", "the", "size", "and", "color", "of", "circles", "linke", "to", "two", "columns" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/plots.py#L242-L316
train
226,327
totalgood/nlpia
src/nlpia/data_utils.py
format_hex
def format_hex(i, num_bytes=4, prefix='0x'): """ Format hexidecimal string from decimal integer value >>> format_hex(42, num_bytes=8, prefix=None) '0000002a' >>> format_hex(23) '0x0017' """ prefix = str(prefix or '') i = int(i or 0) return prefix + '{0:0{1}x}'.format(i, num_bytes)
python
def format_hex(i, num_bytes=4, prefix='0x'): """ Format hexidecimal string from decimal integer value >>> format_hex(42, num_bytes=8, prefix=None) '0000002a' >>> format_hex(23) '0x0017' """ prefix = str(prefix or '') i = int(i or 0) return prefix + '{0:0{1}x}'.format(i, num_bytes)
[ "def", "format_hex", "(", "i", ",", "num_bytes", "=", "4", ",", "prefix", "=", "'0x'", ")", ":", "prefix", "=", "str", "(", "prefix", "or", "''", ")", "i", "=", "int", "(", "i", "or", "0", ")", "return", "prefix", "+", "'{0:0{1}x}'", ".", "format...
Format hexidecimal string from decimal integer value >>> format_hex(42, num_bytes=8, prefix=None) '0000002a' >>> format_hex(23) '0x0017'
[ "Format", "hexidecimal", "string", "from", "decimal", "integer", "value" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/data_utils.py#L38-L48
train
226,328
totalgood/nlpia
src/nlpia/data_utils.py
is_up_url
def is_up_url(url, allow_redirects=False, timeout=5): r""" Check URL to see if it is a valid web page, return the redirected location if it is Returns: None if ConnectionError False if url is invalid (any HTTP error code) cleaned up URL (following redirects and possibly adding HTTP schema "http://") >>> is_up_url("duckduckgo.com") # a more private, less manipulative search engine 'https://duckduckgo.com/' >>> urlisup = is_up_url("totalgood.org") >>> not urlisup or str(urlisup).startswith('http') True >>> urlisup = is_up_url("wikipedia.org") >>> str(urlisup).startswith('http') True >>> 'wikipedia.org' in str(urlisup) True >>> bool(is_up_url('8158989668202919656')) False >>> is_up_url('invalidurlwithoutadomain') False """ if not isinstance(url, basestring) or '.' not in url: return False normalized_url = prepend_http(url) session = requests.Session() session.mount(url, HTTPAdapter(max_retries=2)) try: resp = session.get(normalized_url, allow_redirects=allow_redirects, timeout=timeout) except ConnectionError: return None except: return None if resp.status_code in (301, 302, 307) or resp.headers.get('location', None): return resp.headers.get('location', None) # return redirected URL elif 100 <= resp.status_code < 400: return normalized_url # return the original URL that was requested/visited else: return False
python
def is_up_url(url, allow_redirects=False, timeout=5): r""" Check URL to see if it is a valid web page, return the redirected location if it is Returns: None if ConnectionError False if url is invalid (any HTTP error code) cleaned up URL (following redirects and possibly adding HTTP schema "http://") >>> is_up_url("duckduckgo.com") # a more private, less manipulative search engine 'https://duckduckgo.com/' >>> urlisup = is_up_url("totalgood.org") >>> not urlisup or str(urlisup).startswith('http') True >>> urlisup = is_up_url("wikipedia.org") >>> str(urlisup).startswith('http') True >>> 'wikipedia.org' in str(urlisup) True >>> bool(is_up_url('8158989668202919656')) False >>> is_up_url('invalidurlwithoutadomain') False """ if not isinstance(url, basestring) or '.' not in url: return False normalized_url = prepend_http(url) session = requests.Session() session.mount(url, HTTPAdapter(max_retries=2)) try: resp = session.get(normalized_url, allow_redirects=allow_redirects, timeout=timeout) except ConnectionError: return None except: return None if resp.status_code in (301, 302, 307) or resp.headers.get('location', None): return resp.headers.get('location', None) # return redirected URL elif 100 <= resp.status_code < 400: return normalized_url # return the original URL that was requested/visited else: return False
[ "def", "is_up_url", "(", "url", ",", "allow_redirects", "=", "False", ",", "timeout", "=", "5", ")", ":", "if", "not", "isinstance", "(", "url", ",", "basestring", ")", "or", "'.'", "not", "in", "url", ":", "return", "False", "normalized_url", "=", "pr...
r""" Check URL to see if it is a valid web page, return the redirected location if it is Returns: None if ConnectionError False if url is invalid (any HTTP error code) cleaned up URL (following redirects and possibly adding HTTP schema "http://") >>> is_up_url("duckduckgo.com") # a more private, less manipulative search engine 'https://duckduckgo.com/' >>> urlisup = is_up_url("totalgood.org") >>> not urlisup or str(urlisup).startswith('http') True >>> urlisup = is_up_url("wikipedia.org") >>> str(urlisup).startswith('http') True >>> 'wikipedia.org' in str(urlisup) True >>> bool(is_up_url('8158989668202919656')) False >>> is_up_url('invalidurlwithoutadomain') False
[ "r", "Check", "URL", "to", "see", "if", "it", "is", "a", "valid", "web", "page", "return", "the", "redirected", "location", "if", "it", "is" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/data_utils.py#L83-L122
train
226,329
totalgood/nlpia
src/nlpia/data_utils.py
get_markdown_levels
def get_markdown_levels(lines, levels=set((0, 1, 2, 3, 4, 5, 6))): r""" Return a list of 2-tuples with a level integer for the heading levels >>> get_markdown_levels('paragraph \n##bad\n# hello\n ### world\n') [(0, 'paragraph '), (2, 'bad'), (0, '# hello'), (3, 'world')] >>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n') [(0, '- bullet '), (2, 'bad'), (0, '# hello'), (3, 'world')] >>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n', 2) [(2, 'bad')] >>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n', 1) [] """ if isinstance(levels, (int, float, basestring, str, bytes)): levels = [float(levels)] levels = set([int(i) for i in levels]) if isinstance(lines, basestring): lines = lines.splitlines() level_lines = [] for line in lines: level_line = None if 0 in levels: level_line = (0, line) lstripped = line.lstrip() for i in range(6, 1, -1): if lstripped.startswith('#' * i): level_line = (i, lstripped[i:].lstrip()) break if level_line and level_line[0] in levels: level_lines.append(level_line) return level_lines
python
def get_markdown_levels(lines, levels=set((0, 1, 2, 3, 4, 5, 6))): r""" Return a list of 2-tuples with a level integer for the heading levels >>> get_markdown_levels('paragraph \n##bad\n# hello\n ### world\n') [(0, 'paragraph '), (2, 'bad'), (0, '# hello'), (3, 'world')] >>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n') [(0, '- bullet '), (2, 'bad'), (0, '# hello'), (3, 'world')] >>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n', 2) [(2, 'bad')] >>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n', 1) [] """ if isinstance(levels, (int, float, basestring, str, bytes)): levels = [float(levels)] levels = set([int(i) for i in levels]) if isinstance(lines, basestring): lines = lines.splitlines() level_lines = [] for line in lines: level_line = None if 0 in levels: level_line = (0, line) lstripped = line.lstrip() for i in range(6, 1, -1): if lstripped.startswith('#' * i): level_line = (i, lstripped[i:].lstrip()) break if level_line and level_line[0] in levels: level_lines.append(level_line) return level_lines
[ "def", "get_markdown_levels", "(", "lines", ",", "levels", "=", "set", "(", "(", "0", ",", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "6", ")", ")", ")", ":", "if", "isinstance", "(", "levels", ",", "(", "int", ",", "float", ",", "base...
r""" Return a list of 2-tuples with a level integer for the heading levels >>> get_markdown_levels('paragraph \n##bad\n# hello\n ### world\n') [(0, 'paragraph '), (2, 'bad'), (0, '# hello'), (3, 'world')] >>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n') [(0, '- bullet '), (2, 'bad'), (0, '# hello'), (3, 'world')] >>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n', 2) [(2, 'bad')] >>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n', 1) []
[ "r", "Return", "a", "list", "of", "2", "-", "tuples", "with", "a", "level", "integer", "for", "the", "heading", "levels" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/data_utils.py#L125-L155
train
226,330
totalgood/nlpia
src/nlpia/data_utils.py
iter_lines
def iter_lines(url_or_text, ext=None, mode='rt'): r""" Return an iterator over the lines of a file or URI response. >>> len(list(iter_lines('cats_and_dogs.txt'))) 263 >>> len(list(iter_lines(list('abcdefgh')))) 8 >>> len(list(iter_lines('abc\n def\n gh\n'))) 3 >>> len(list(iter_lines('abc\n def\n gh'))) 3 >>> 20000 > len(list(iter_lines(BOOK_PATH))) > 200 True """ if url_or_text is None or not url_or_text: return [] # url_or_text = 'https://www.fileformat.info/info/charset/UTF-8/list.htm' elif isinstance(url_or_text, (str, bytes, basestring)): if '\n' in url_or_text or '\r' in url_or_text: return StringIO(url_or_text) elif os.path.isfile(os.path.join(DATA_PATH, url_or_text)): return open(os.path.join(DATA_PATH, url_or_text), mode=mode) elif os.path.isfile(url_or_text): return open(os.path.join(url_or_text), mode=mode) if os.path.isdir(url_or_text): filepaths = [filemeta['path'] for filemeta in find_files(url_or_text, ext=ext)] return itertools.chain.from_iterable(map(open, filepaths)) url = looks_like_url(url_or_text) if url: for i in range(3): return requests.get(url, stream=True, allow_redirects=True, timeout=5) else: return StringIO(url_or_text) elif isinstance(url_or_text, (list, tuple)): # FIXME: make this lazy with chain and map so it doesn't gobble up RAM text = '' for s in url_or_text: text += '\n'.join(list(iter_lines(s, ext=ext, mode=mode))) + '\n' return iter_lines(text)
python
def iter_lines(url_or_text, ext=None, mode='rt'): r""" Return an iterator over the lines of a file or URI response. >>> len(list(iter_lines('cats_and_dogs.txt'))) 263 >>> len(list(iter_lines(list('abcdefgh')))) 8 >>> len(list(iter_lines('abc\n def\n gh\n'))) 3 >>> len(list(iter_lines('abc\n def\n gh'))) 3 >>> 20000 > len(list(iter_lines(BOOK_PATH))) > 200 True """ if url_or_text is None or not url_or_text: return [] # url_or_text = 'https://www.fileformat.info/info/charset/UTF-8/list.htm' elif isinstance(url_or_text, (str, bytes, basestring)): if '\n' in url_or_text or '\r' in url_or_text: return StringIO(url_or_text) elif os.path.isfile(os.path.join(DATA_PATH, url_or_text)): return open(os.path.join(DATA_PATH, url_or_text), mode=mode) elif os.path.isfile(url_or_text): return open(os.path.join(url_or_text), mode=mode) if os.path.isdir(url_or_text): filepaths = [filemeta['path'] for filemeta in find_files(url_or_text, ext=ext)] return itertools.chain.from_iterable(map(open, filepaths)) url = looks_like_url(url_or_text) if url: for i in range(3): return requests.get(url, stream=True, allow_redirects=True, timeout=5) else: return StringIO(url_or_text) elif isinstance(url_or_text, (list, tuple)): # FIXME: make this lazy with chain and map so it doesn't gobble up RAM text = '' for s in url_or_text: text += '\n'.join(list(iter_lines(s, ext=ext, mode=mode))) + '\n' return iter_lines(text)
[ "def", "iter_lines", "(", "url_or_text", ",", "ext", "=", "None", ",", "mode", "=", "'rt'", ")", ":", "if", "url_or_text", "is", "None", "or", "not", "url_or_text", ":", "return", "[", "]", "# url_or_text = 'https://www.fileformat.info/info/charset/UTF-8/list.htm'",...
r""" Return an iterator over the lines of a file or URI response. >>> len(list(iter_lines('cats_and_dogs.txt'))) 263 >>> len(list(iter_lines(list('abcdefgh')))) 8 >>> len(list(iter_lines('abc\n def\n gh\n'))) 3 >>> len(list(iter_lines('abc\n def\n gh'))) 3 >>> 20000 > len(list(iter_lines(BOOK_PATH))) > 200 True
[ "r", "Return", "an", "iterator", "over", "the", "lines", "of", "a", "file", "or", "URI", "response", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/data_utils.py#L186-L224
train
226,331
totalgood/nlpia
src/nlpia/data_utils.py
parse_utf_html
def parse_utf_html(url=os.path.join(DATA_PATH, 'utf8_table.html')): """ Parse HTML table UTF8 char descriptions returning DataFrame with `ascii` and `mutliascii` """ utf = pd.read_html(url) utf = [df for df in utf if len(df) > 1023 and len(df.columns) > 2][0] utf = utf.iloc[:1024] if len(utf) == 1025 else utf utf.columns = 'char name hex'.split() utf.name = utf.name.str.replace('<control>', 'CONTTROL CHARACTER') multiascii = [' '] * len(utf) asc = [' '] * len(utf) rows = [] for i, name in enumerate(utf.name): if i < 128 and str.isprintable(chr(i)): asc[i] = chr(i) else: asc[i] = ' ' big = re.findall(r'CAPITAL\ LETTER\ ([a-z0-9A-Z ]+$)', name) small = re.findall(r'SMALL\ LETTER\ ([a-z0-9A-Z ]+$)', name) pattern = r'(?P<description>' \ r'(?P<lang>LATIN|GREEK|COPTIC|CYRILLIC)?[\s]*' \ r'(?P<case>CAPITAL|SMALL)?[\s]*' \ r'(?P<length>CHARACTER|LETTER)?[\s]*' \ r'(?P<ukrainian>BYELORUSSIAN-UKRAINIAN)?[\s]*' \ r'(?P<name>[-_><a-z0-9A-Z\s ]+)[\s]*' \ r'\(?(?P<code_point>U\+[- a-fA-F0-9]{4,8})?\)?)[\s]*' # noqa match = re.match(pattern, name) gd = match.groupdict() gd['char'] = chr(i) gd['suffix'] = None gd['wordwith'] = None withprefix = re.match(r'(?P<prefix>DOTLESS|TURNED|SMALL)(?P<name>.*)' + r'(?P<wordwith>WITH|SUPERSCRIPT|SUBSCRIPT|DIGRAPH)\s+(?P<suffix>[-_><a-z0-9A-Z\s ]+)', gd['name']) if withprefix: gd.update(withprefix.groupdict()) withsuffix = re.match(r'(?P<name>.*)(?P<wordwith>WITH|SUPERSCRIPT|SUBSCRIPT|DIGRAPH)\s+' + r'(?P<suffix>[-_><a-z0-9A-Z\s ]+)', gd['name']) if withsuffix: gd.update(withsuffix.groupdict()) gd['code_point'] = gd['code_point'] or format_hex(i, num_bytes=4, prefix='U+').upper() if i < 128: gd['ascii'] = chr(i) else: multiascii = gd['name'] if gd['suffix'] and gd['wordwith']: multiascii = NAME_ACCENT.get(gd['suffix'], "'") else: if big: m = big[0] multiascii[i] = m if len(m) == 1: asc[i] = m elif small: multiascii[i] = small[0].lower() if len(multiascii[i]) == 1: asc[i] = small[0].lower() rows.append(gd) df = pd.DataFrame(rows) df.multiascii = df.multiascii.str.strip() df['ascii'] = df['ascii'].str.strip() df.name = df.name.str.strip() return df
python
def parse_utf_html(url=os.path.join(DATA_PATH, 'utf8_table.html')): """ Parse HTML table UTF8 char descriptions returning DataFrame with `ascii` and `mutliascii` """ utf = pd.read_html(url) utf = [df for df in utf if len(df) > 1023 and len(df.columns) > 2][0] utf = utf.iloc[:1024] if len(utf) == 1025 else utf utf.columns = 'char name hex'.split() utf.name = utf.name.str.replace('<control>', 'CONTTROL CHARACTER') multiascii = [' '] * len(utf) asc = [' '] * len(utf) rows = [] for i, name in enumerate(utf.name): if i < 128 and str.isprintable(chr(i)): asc[i] = chr(i) else: asc[i] = ' ' big = re.findall(r'CAPITAL\ LETTER\ ([a-z0-9A-Z ]+$)', name) small = re.findall(r'SMALL\ LETTER\ ([a-z0-9A-Z ]+$)', name) pattern = r'(?P<description>' \ r'(?P<lang>LATIN|GREEK|COPTIC|CYRILLIC)?[\s]*' \ r'(?P<case>CAPITAL|SMALL)?[\s]*' \ r'(?P<length>CHARACTER|LETTER)?[\s]*' \ r'(?P<ukrainian>BYELORUSSIAN-UKRAINIAN)?[\s]*' \ r'(?P<name>[-_><a-z0-9A-Z\s ]+)[\s]*' \ r'\(?(?P<code_point>U\+[- a-fA-F0-9]{4,8})?\)?)[\s]*' # noqa match = re.match(pattern, name) gd = match.groupdict() gd['char'] = chr(i) gd['suffix'] = None gd['wordwith'] = None withprefix = re.match(r'(?P<prefix>DOTLESS|TURNED|SMALL)(?P<name>.*)' + r'(?P<wordwith>WITH|SUPERSCRIPT|SUBSCRIPT|DIGRAPH)\s+(?P<suffix>[-_><a-z0-9A-Z\s ]+)', gd['name']) if withprefix: gd.update(withprefix.groupdict()) withsuffix = re.match(r'(?P<name>.*)(?P<wordwith>WITH|SUPERSCRIPT|SUBSCRIPT|DIGRAPH)\s+' + r'(?P<suffix>[-_><a-z0-9A-Z\s ]+)', gd['name']) if withsuffix: gd.update(withsuffix.groupdict()) gd['code_point'] = gd['code_point'] or format_hex(i, num_bytes=4, prefix='U+').upper() if i < 128: gd['ascii'] = chr(i) else: multiascii = gd['name'] if gd['suffix'] and gd['wordwith']: multiascii = NAME_ACCENT.get(gd['suffix'], "'") else: if big: m = big[0] multiascii[i] = m if len(m) == 1: asc[i] = m elif small: multiascii[i] = small[0].lower() if len(multiascii[i]) == 1: asc[i] = small[0].lower() rows.append(gd) df = pd.DataFrame(rows) df.multiascii = df.multiascii.str.strip() df['ascii'] = df['ascii'].str.strip() df.name = df.name.str.strip() return df
[ "def", "parse_utf_html", "(", "url", "=", "os", ".", "path", ".", "join", "(", "DATA_PATH", ",", "'utf8_table.html'", ")", ")", ":", "utf", "=", "pd", ".", "read_html", "(", "url", ")", "utf", "=", "[", "df", "for", "df", "in", "utf", "if", "len", ...
Parse HTML table UTF8 char descriptions returning DataFrame with `ascii` and `mutliascii`
[ "Parse", "HTML", "table", "UTF8", "char", "descriptions", "returning", "DataFrame", "with", "ascii", "and", "mutliascii" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/data_utils.py#L227-L291
train
226,332
totalgood/nlpia
src/nlpia/data_utils.py
clean_csvs
def clean_csvs(dialogpath=None): """ Translate non-ASCII characters to spaces or equivalent ASCII characters """ dialogdir = os.dirname(dialogpath) if os.path.isfile(dialogpath) else dialogpath filenames = [dialogpath.split(os.path.sep)[-1]] if os.path.isfile(dialogpath) else os.listdir(dialogpath) for filename in filenames: filepath = os.path.join(dialogdir, filename) df = clean_df(filepath) df.to_csv(filepath, header=None) return filenames
python
def clean_csvs(dialogpath=None): """ Translate non-ASCII characters to spaces or equivalent ASCII characters """ dialogdir = os.dirname(dialogpath) if os.path.isfile(dialogpath) else dialogpath filenames = [dialogpath.split(os.path.sep)[-1]] if os.path.isfile(dialogpath) else os.listdir(dialogpath) for filename in filenames: filepath = os.path.join(dialogdir, filename) df = clean_df(filepath) df.to_csv(filepath, header=None) return filenames
[ "def", "clean_csvs", "(", "dialogpath", "=", "None", ")", ":", "dialogdir", "=", "os", ".", "dirname", "(", "dialogpath", ")", "if", "os", ".", "path", ".", "isfile", "(", "dialogpath", ")", "else", "dialogpath", "filenames", "=", "[", "dialogpath", ".",...
Translate non-ASCII characters to spaces or equivalent ASCII characters
[ "Translate", "non", "-", "ASCII", "characters", "to", "spaces", "or", "equivalent", "ASCII", "characters" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/data_utils.py#L294-L302
train
226,333
totalgood/nlpia
src/nlpia/data_utils.py
unicode2ascii
def unicode2ascii(text, expand=True): r""" Translate UTF8 characters to ASCII >> unicode2ascii("żółw") zozw utf8_letters = 'ą ę ć ź ż ó ł ń ś “ ” ’'.split() ascii_letters = 'a e c z z o l n s " " \'' """ translate = UTF8_TO_ASCII if not expand else UTF8_TO_MULTIASCII output = '' for c in text: if not c or ord(c) < 128: output += c else: output += translate[c] if c in translate else ' ' return output.strip()
python
def unicode2ascii(text, expand=True): r""" Translate UTF8 characters to ASCII >> unicode2ascii("żółw") zozw utf8_letters = 'ą ę ć ź ż ó ł ń ś “ ” ’'.split() ascii_letters = 'a e c z z o l n s " " \'' """ translate = UTF8_TO_ASCII if not expand else UTF8_TO_MULTIASCII output = '' for c in text: if not c or ord(c) < 128: output += c else: output += translate[c] if c in translate else ' ' return output.strip()
[ "def", "unicode2ascii", "(", "text", ",", "expand", "=", "True", ")", ":", "translate", "=", "UTF8_TO_ASCII", "if", "not", "expand", "else", "UTF8_TO_MULTIASCII", "output", "=", "''", "for", "c", "in", "text", ":", "if", "not", "c", "or", "ord", "(", "...
r""" Translate UTF8 characters to ASCII >> unicode2ascii("żółw") zozw utf8_letters = 'ą ę ć ź ż ó ł ń ś “ ” ’'.split() ascii_letters = 'a e c z z o l n s " " \''
[ "r", "Translate", "UTF8", "characters", "to", "ASCII" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/data_utils.py#L305-L321
train
226,334
totalgood/nlpia
src/nlpia/data_utils.py
clean_df
def clean_df(df, header=None, **read_csv_kwargs): """ Convert UTF8 characters in a CSV file or dataframe into ASCII Args: df (DataFrame or str): DataFrame or path or url to CSV """ df = read_csv(df, header=header, **read_csv_kwargs) df = df.fillna(' ') for col in df.columns: df[col] = df[col].apply(unicode2ascii) return df
python
def clean_df(df, header=None, **read_csv_kwargs): """ Convert UTF8 characters in a CSV file or dataframe into ASCII Args: df (DataFrame or str): DataFrame or path or url to CSV """ df = read_csv(df, header=header, **read_csv_kwargs) df = df.fillna(' ') for col in df.columns: df[col] = df[col].apply(unicode2ascii) return df
[ "def", "clean_df", "(", "df", ",", "header", "=", "None", ",", "*", "*", "read_csv_kwargs", ")", ":", "df", "=", "read_csv", "(", "df", ",", "header", "=", "header", ",", "*", "*", "read_csv_kwargs", ")", "df", "=", "df", ".", "fillna", "(", "' '",...
Convert UTF8 characters in a CSV file or dataframe into ASCII Args: df (DataFrame or str): DataFrame or path or url to CSV
[ "Convert", "UTF8", "characters", "in", "a", "CSV", "file", "or", "dataframe", "into", "ASCII" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/data_utils.py#L324-L334
train
226,335
totalgood/nlpia
src/nlpia/book_parser.py
get_acronyms
def get_acronyms(manuscript=os.path.expanduser('~/code/nlpia/lane/manuscript')): """ Find all the 2 and 3-letter acronyms in the manuscript and return as a sorted list of tuples """ acronyms = [] for f, lines in get_lines(manuscript): for line in lines: matches = CRE_ACRONYM.finditer(line) if matches: for m in matches: if m.group('a2'): acronyms.append((m.group('a2'), m.group('s2'))) elif m.group('a3'): acronyms.append((m.group('a3'), m.group('s3'))) elif m.group('a4'): acronyms.append((m.group('a4'), m.group('s4'))) elif m.group('a5'): acronyms.append((m.group('a5'), m.group('s5'))) return sorted(dict(acronyms).items())
python
def get_acronyms(manuscript=os.path.expanduser('~/code/nlpia/lane/manuscript')): """ Find all the 2 and 3-letter acronyms in the manuscript and return as a sorted list of tuples """ acronyms = [] for f, lines in get_lines(manuscript): for line in lines: matches = CRE_ACRONYM.finditer(line) if matches: for m in matches: if m.group('a2'): acronyms.append((m.group('a2'), m.group('s2'))) elif m.group('a3'): acronyms.append((m.group('a3'), m.group('s3'))) elif m.group('a4'): acronyms.append((m.group('a4'), m.group('s4'))) elif m.group('a5'): acronyms.append((m.group('a5'), m.group('s5'))) return sorted(dict(acronyms).items())
[ "def", "get_acronyms", "(", "manuscript", "=", "os", ".", "path", ".", "expanduser", "(", "'~/code/nlpia/lane/manuscript'", ")", ")", ":", "acronyms", "=", "[", "]", "for", "f", ",", "lines", "in", "get_lines", "(", "manuscript", ")", ":", "for", "line", ...
Find all the 2 and 3-letter acronyms in the manuscript and return as a sorted list of tuples
[ "Find", "all", "the", "2", "and", "3", "-", "letter", "acronyms", "in", "the", "manuscript", "and", "return", "as", "a", "sorted", "list", "of", "tuples" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book_parser.py#L90-L107
train
226,336
totalgood/nlpia
src/nlpia/book_parser.py
write_glossary
def write_glossary(manuscript=os.path.expanduser('~/code/nlpia/lane/manuscript'), linesep=None): """ Compose an asciidoc string with acronyms culled from the manuscript """ linesep = linesep or os.linesep lines = ['[acronyms]', '== Acronyms', '', '[acronyms,template="glossary",id="terms"]'] acronyms = get_acronyms(manuscript) for a in acronyms: lines.append('*{}*:: {} -- '.format(a[0], a[1][0].upper() + a[1][1:])) return linesep.join(lines)
python
def write_glossary(manuscript=os.path.expanduser('~/code/nlpia/lane/manuscript'), linesep=None): """ Compose an asciidoc string with acronyms culled from the manuscript """ linesep = linesep or os.linesep lines = ['[acronyms]', '== Acronyms', '', '[acronyms,template="glossary",id="terms"]'] acronyms = get_acronyms(manuscript) for a in acronyms: lines.append('*{}*:: {} -- '.format(a[0], a[1][0].upper() + a[1][1:])) return linesep.join(lines)
[ "def", "write_glossary", "(", "manuscript", "=", "os", ".", "path", ".", "expanduser", "(", "'~/code/nlpia/lane/manuscript'", ")", ",", "linesep", "=", "None", ")", ":", "linesep", "=", "linesep", "or", "os", ".", "linesep", "lines", "=", "[", "'[acronyms]'"...
Compose an asciidoc string with acronyms culled from the manuscript
[ "Compose", "an", "asciidoc", "string", "with", "acronyms", "culled", "from", "the", "manuscript" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book_parser.py#L110-L117
train
226,337
totalgood/nlpia
src/nlpia/book_parser.py
infer_url_title
def infer_url_title(url): """ Guess what the page title is going to be from the path and FQDN in the URL >>> infer_url_title('https://ai.googleblog.com/2018/09/the-what-if-tool-code-free-probing-of.html') 'the what if tool code free probing of' """ meta = get_url_filemeta(url) title = '' if meta: if meta.get('hostname', url) == 'drive.google.com': title = get_url_title(url) else: title = meta.get('filename', meta['hostname']) or meta['hostname'] title, fileext = splitext(title) else: logging.error('Unable to retrieve URL: {}'.format(url)) return None return delimit_slug(title, ' ')
python
def infer_url_title(url): """ Guess what the page title is going to be from the path and FQDN in the URL >>> infer_url_title('https://ai.googleblog.com/2018/09/the-what-if-tool-code-free-probing-of.html') 'the what if tool code free probing of' """ meta = get_url_filemeta(url) title = '' if meta: if meta.get('hostname', url) == 'drive.google.com': title = get_url_title(url) else: title = meta.get('filename', meta['hostname']) or meta['hostname'] title, fileext = splitext(title) else: logging.error('Unable to retrieve URL: {}'.format(url)) return None return delimit_slug(title, ' ')
[ "def", "infer_url_title", "(", "url", ")", ":", "meta", "=", "get_url_filemeta", "(", "url", ")", "title", "=", "''", "if", "meta", ":", "if", "meta", ".", "get", "(", "'hostname'", ",", "url", ")", "==", "'drive.google.com'", ":", "title", "=", "get_u...
Guess what the page title is going to be from the path and FQDN in the URL >>> infer_url_title('https://ai.googleblog.com/2018/09/the-what-if-tool-code-free-probing-of.html') 'the what if tool code free probing of'
[ "Guess", "what", "the", "page", "title", "is", "going", "to", "be", "from", "the", "path", "and", "FQDN", "in", "the", "URL" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book_parser.py#L245-L262
train
226,338
totalgood/nlpia
src/nlpia/book_parser.py
translate_book
def translate_book(translators=(HyperlinkStyleCorrector().translate, translate_line_footnotes), book_dir=BOOK_PATH, dest=None, include_tags=None, ext='.nlpiabak', skip_untitled=True): """ Fix any style corrections listed in `translate` list of translation functions >>> len(translate_book(book_dir=BOOK_PATH, dest='cleaned_hyperlinks')) 3 >>> rm_rf(os.path.join(BOOK_PATH, 'cleaned_hyperlinks')) """ if callable(translators) or not hasattr(translators, '__len__'): translators = (translators,) sections = get_tagged_sections(book_dir=book_dir, include_tags=include_tags) file_line_maps = [] for fileid, (filepath, tagged_lines) in enumerate(sections): logger.info('filepath={}'.format(filepath)) destpath = filepath if not dest: copyfile(filepath, filepath + '.' + ext.lstrip('.')) elif os.path.sep in dest: destpath = os.path.join(dest, os.path.basename(filepath)) else: destpath = os.path.join(os.path.dirname(filepath), dest, os.path.basename(filepath)) ensure_dir_exists(os.path.dirname(destpath)) with open(destpath, 'w') as fout: logger.info('destpath={}'.format(destpath)) for lineno, (tag, line) in enumerate(tagged_lines): if (include_tags is None or tag in include_tags or any((tag.startswith(t) for t in include_tags))): for translate in translators: new_line = translate(line) # TODO: be smarter about writing to files in-place if line != new_line: file_line_maps.append((fileid, lineno, filepath, destpath, line, new_line)) line = new_line fout.write(line) return file_line_maps
python
def translate_book(translators=(HyperlinkStyleCorrector().translate, translate_line_footnotes), book_dir=BOOK_PATH, dest=None, include_tags=None, ext='.nlpiabak', skip_untitled=True): """ Fix any style corrections listed in `translate` list of translation functions >>> len(translate_book(book_dir=BOOK_PATH, dest='cleaned_hyperlinks')) 3 >>> rm_rf(os.path.join(BOOK_PATH, 'cleaned_hyperlinks')) """ if callable(translators) or not hasattr(translators, '__len__'): translators = (translators,) sections = get_tagged_sections(book_dir=book_dir, include_tags=include_tags) file_line_maps = [] for fileid, (filepath, tagged_lines) in enumerate(sections): logger.info('filepath={}'.format(filepath)) destpath = filepath if not dest: copyfile(filepath, filepath + '.' + ext.lstrip('.')) elif os.path.sep in dest: destpath = os.path.join(dest, os.path.basename(filepath)) else: destpath = os.path.join(os.path.dirname(filepath), dest, os.path.basename(filepath)) ensure_dir_exists(os.path.dirname(destpath)) with open(destpath, 'w') as fout: logger.info('destpath={}'.format(destpath)) for lineno, (tag, line) in enumerate(tagged_lines): if (include_tags is None or tag in include_tags or any((tag.startswith(t) for t in include_tags))): for translate in translators: new_line = translate(line) # TODO: be smarter about writing to files in-place if line != new_line: file_line_maps.append((fileid, lineno, filepath, destpath, line, new_line)) line = new_line fout.write(line) return file_line_maps
[ "def", "translate_book", "(", "translators", "=", "(", "HyperlinkStyleCorrector", "(", ")", ".", "translate", ",", "translate_line_footnotes", ")", ",", "book_dir", "=", "BOOK_PATH", ",", "dest", "=", "None", ",", "include_tags", "=", "None", ",", "ext", "=", ...
Fix any style corrections listed in `translate` list of translation functions >>> len(translate_book(book_dir=BOOK_PATH, dest='cleaned_hyperlinks')) 3 >>> rm_rf(os.path.join(BOOK_PATH, 'cleaned_hyperlinks'))
[ "Fix", "any", "style", "corrections", "listed", "in", "translate", "list", "of", "translation", "functions" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book_parser.py#L335-L371
train
226,339
totalgood/nlpia
src/nlpia/book_parser.py
filter_lines
def filter_lines(input_file, output_file, translate=lambda line: line): """ Translate all the lines of a single file """ filepath, lines = get_lines([input_file])[0] return filepath, [(tag, translate(line=line, tag=tag)) for (tag, line) in lines]
python
def filter_lines(input_file, output_file, translate=lambda line: line): """ Translate all the lines of a single file """ filepath, lines = get_lines([input_file])[0] return filepath, [(tag, translate(line=line, tag=tag)) for (tag, line) in lines]
[ "def", "filter_lines", "(", "input_file", ",", "output_file", ",", "translate", "=", "lambda", "line", ":", "line", ")", ":", "filepath", ",", "lines", "=", "get_lines", "(", "[", "input_file", "]", ")", "[", "0", "]", "return", "filepath", ",", "[", "...
Translate all the lines of a single file
[ "Translate", "all", "the", "lines", "of", "a", "single", "file" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book_parser.py#L407-L410
train
226,340
totalgood/nlpia
src/nlpia/book_parser.py
filter_tagged_lines
def filter_tagged_lines(tagged_lines, include_tags=None, exclude_tags=None): r""" Return iterable of tagged lines where the tags all start with one of the include_tags prefixes >>> filter_tagged_lines([('natural', "Hello."), ('code', '[source,python]'), ('code', '>>> hello()')]) <generator object filter_tagged_lines at ...> >>> list(filter_tagged_lines([('natural', "Hello."), ('code', '[source,python]'), ('code', '>>> hello()')], ... include_tags='natural')) [('natural', 'Hello.')] """ include_tags = (include_tags,) if isinstance(include_tags, str) else include_tags exclude_tags = (exclude_tags,) if isinstance(exclude_tags, str) else exclude_tags for tagged_line in tagged_lines: if (include_tags is None or tagged_line[0] in include_tags or any((tagged_line[0].startswith(t) for t in include_tags))): if exclude_tags is None or not any((tagged_line[0].startswith(t) for t in exclude_tags)): yield tagged_line else: logger.debug('skipping tag {} because it starts with one of the exclude_tags={}'.format( tagged_line[0], exclude_tags)) else: logger.debug('skipping tag {} because not in {}'.format(tagged_line[0], include_tags))
python
def filter_tagged_lines(tagged_lines, include_tags=None, exclude_tags=None): r""" Return iterable of tagged lines where the tags all start with one of the include_tags prefixes >>> filter_tagged_lines([('natural', "Hello."), ('code', '[source,python]'), ('code', '>>> hello()')]) <generator object filter_tagged_lines at ...> >>> list(filter_tagged_lines([('natural', "Hello."), ('code', '[source,python]'), ('code', '>>> hello()')], ... include_tags='natural')) [('natural', 'Hello.')] """ include_tags = (include_tags,) if isinstance(include_tags, str) else include_tags exclude_tags = (exclude_tags,) if isinstance(exclude_tags, str) else exclude_tags for tagged_line in tagged_lines: if (include_tags is None or tagged_line[0] in include_tags or any((tagged_line[0].startswith(t) for t in include_tags))): if exclude_tags is None or not any((tagged_line[0].startswith(t) for t in exclude_tags)): yield tagged_line else: logger.debug('skipping tag {} because it starts with one of the exclude_tags={}'.format( tagged_line[0], exclude_tags)) else: logger.debug('skipping tag {} because not in {}'.format(tagged_line[0], include_tags))
[ "def", "filter_tagged_lines", "(", "tagged_lines", ",", "include_tags", "=", "None", ",", "exclude_tags", "=", "None", ")", ":", "include_tags", "=", "(", "include_tags", ",", ")", "if", "isinstance", "(", "include_tags", ",", "str", ")", "else", "include_tags...
r""" Return iterable of tagged lines where the tags all start with one of the include_tags prefixes >>> filter_tagged_lines([('natural', "Hello."), ('code', '[source,python]'), ('code', '>>> hello()')]) <generator object filter_tagged_lines at ...> >>> list(filter_tagged_lines([('natural', "Hello."), ('code', '[source,python]'), ('code', '>>> hello()')], ... include_tags='natural')) [('natural', 'Hello.')]
[ "r", "Return", "iterable", "of", "tagged", "lines", "where", "the", "tags", "all", "start", "with", "one", "of", "the", "include_tags", "prefixes" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book_parser.py#L413-L434
train
226,341
totalgood/nlpia
src/nlpia/book/examples/ch04_catdog_lsa_sorted.py
accuracy_study
def accuracy_study(tdm=None, u=None, s=None, vt=None, verbosity=0, **kwargs): """ Reconstruct the term-document matrix and measure error as SVD terms are truncated """ smat = np.zeros((len(u), len(vt))) np.fill_diagonal(smat, s) smat = pd.DataFrame(smat, columns=vt.index, index=u.index) if verbosity: print() print('Sigma:') print(smat.round(2)) print() print('Sigma without zeroing any dim:') print(np.diag(smat.round(2))) tdm_prime = u.values.dot(smat.values).dot(vt.values) if verbosity: print() print('Reconstructed Term-Document Matrix') print(tdm_prime.round(2)) err = [np.sqrt(((tdm_prime - tdm).values.flatten() ** 2).sum() / np.product(tdm.shape))] if verbosity: print() print('Error without reducing dimensions:') print(err[-1]) # 2.3481474529927113e-15 smat2 = smat.copy() for numdim in range(len(s) - 1, 0, -1): smat2.iloc[numdim, numdim] = 0 if verbosity: print('Sigma after zeroing out dim {}'.format(numdim)) print(np.diag(smat2.round(2))) # d0 d1 d2 d3 d4 d5 # ship 2.16 0.00 0.0 0.0 0.0 0.0 # boat 0.00 1.59 0.0 0.0 0.0 0.0 # ocean 0.00 0.00 0.0 0.0 0.0 0.0 # voyage 0.00 0.00 0.0 0.0 0.0 0.0 # trip 0.00 0.00 0.0 0.0 0.0 0.0 tdm_prime2 = u.values.dot(smat2.values).dot(vt.values) err += [np.sqrt(((tdm_prime2 - tdm).values.flatten() ** 2).sum() / np.product(tdm.shape))] if verbosity: print('Error after zeroing out dim {}'.format(numdim)) print(err[-1]) return err
python
def accuracy_study(tdm=None, u=None, s=None, vt=None, verbosity=0, **kwargs): """ Reconstruct the term-document matrix and measure error as SVD terms are truncated """ smat = np.zeros((len(u), len(vt))) np.fill_diagonal(smat, s) smat = pd.DataFrame(smat, columns=vt.index, index=u.index) if verbosity: print() print('Sigma:') print(smat.round(2)) print() print('Sigma without zeroing any dim:') print(np.diag(smat.round(2))) tdm_prime = u.values.dot(smat.values).dot(vt.values) if verbosity: print() print('Reconstructed Term-Document Matrix') print(tdm_prime.round(2)) err = [np.sqrt(((tdm_prime - tdm).values.flatten() ** 2).sum() / np.product(tdm.shape))] if verbosity: print() print('Error without reducing dimensions:') print(err[-1]) # 2.3481474529927113e-15 smat2 = smat.copy() for numdim in range(len(s) - 1, 0, -1): smat2.iloc[numdim, numdim] = 0 if verbosity: print('Sigma after zeroing out dim {}'.format(numdim)) print(np.diag(smat2.round(2))) # d0 d1 d2 d3 d4 d5 # ship 2.16 0.00 0.0 0.0 0.0 0.0 # boat 0.00 1.59 0.0 0.0 0.0 0.0 # ocean 0.00 0.00 0.0 0.0 0.0 0.0 # voyage 0.00 0.00 0.0 0.0 0.0 0.0 # trip 0.00 0.00 0.0 0.0 0.0 0.0 tdm_prime2 = u.values.dot(smat2.values).dot(vt.values) err += [np.sqrt(((tdm_prime2 - tdm).values.flatten() ** 2).sum() / np.product(tdm.shape))] if verbosity: print('Error after zeroing out dim {}'.format(numdim)) print(err[-1]) return err
[ "def", "accuracy_study", "(", "tdm", "=", "None", ",", "u", "=", "None", ",", "s", "=", "None", ",", "vt", "=", "None", ",", "verbosity", "=", "0", ",", "*", "*", "kwargs", ")", ":", "smat", "=", "np", ".", "zeros", "(", "(", "len", "(", "u",...
Reconstruct the term-document matrix and measure error as SVD terms are truncated
[ "Reconstruct", "the", "term", "-", "document", "matrix", "and", "measure", "error", "as", "SVD", "terms", "are", "truncated" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch04_catdog_lsa_sorted.py#L143-L187
train
226,342
totalgood/nlpia
src/nlpia/anki.py
get_anki_phrases
def get_anki_phrases(lang='english', limit=None): """ Retrieve as many anki paired-statement corpora as you can for the requested language If `ankis` (requested languages) is more than one, then get the english texts associated with those languages. TODO: improve modularity: def function that takes a single language and call it recursively if necessary >>> get_anki_phrases('afr')[:2] ["'n Groen piesang is nie ryp genoeg om te eet nie.", "'n Hond het agter die kat aan gehardloop."] """ lang = lang.strip().lower()[:3] lang = LANG2ANKI[lang[:2]] if lang not in ANKI_LANGUAGES else lang if lang[:2] == 'en': return get_anki_phrases_english(limit=limit) return sorted(get_data(lang).iloc[:, -1].str.strip().values)
python
def get_anki_phrases(lang='english', limit=None): """ Retrieve as many anki paired-statement corpora as you can for the requested language If `ankis` (requested languages) is more than one, then get the english texts associated with those languages. TODO: improve modularity: def function that takes a single language and call it recursively if necessary >>> get_anki_phrases('afr')[:2] ["'n Groen piesang is nie ryp genoeg om te eet nie.", "'n Hond het agter die kat aan gehardloop."] """ lang = lang.strip().lower()[:3] lang = LANG2ANKI[lang[:2]] if lang not in ANKI_LANGUAGES else lang if lang[:2] == 'en': return get_anki_phrases_english(limit=limit) return sorted(get_data(lang).iloc[:, -1].str.strip().values)
[ "def", "get_anki_phrases", "(", "lang", "=", "'english'", ",", "limit", "=", "None", ")", ":", "lang", "=", "lang", ".", "strip", "(", ")", ".", "lower", "(", ")", "[", ":", "3", "]", "lang", "=", "LANG2ANKI", "[", "lang", "[", ":", "2", "]", "...
Retrieve as many anki paired-statement corpora as you can for the requested language If `ankis` (requested languages) is more than one, then get the english texts associated with those languages. TODO: improve modularity: def function that takes a single language and call it recursively if necessary >>> get_anki_phrases('afr')[:2] ["'n Groen piesang is nie ryp genoeg om te eet nie.", "'n Hond het agter die kat aan gehardloop."]
[ "Retrieve", "as", "many", "anki", "paired", "-", "statement", "corpora", "as", "you", "can", "for", "the", "requested", "language" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/anki.py#L16-L30
train
226,343
totalgood/nlpia
src/nlpia/anki.py
get_anki_phrases_english
def get_anki_phrases_english(limit=None): """ Return all the English phrases in the Anki translation flashcards >>> len(get_anki_phrases_english(limit=100)) > 700 True """ texts = set() for lang in ANKI_LANGUAGES: df = get_data(lang) phrases = df.eng.str.strip().values texts = texts.union(set(phrases)) if limit and len(texts) >= limit: break return sorted(texts)
python
def get_anki_phrases_english(limit=None): """ Return all the English phrases in the Anki translation flashcards >>> len(get_anki_phrases_english(limit=100)) > 700 True """ texts = set() for lang in ANKI_LANGUAGES: df = get_data(lang) phrases = df.eng.str.strip().values texts = texts.union(set(phrases)) if limit and len(texts) >= limit: break return sorted(texts)
[ "def", "get_anki_phrases_english", "(", "limit", "=", "None", ")", ":", "texts", "=", "set", "(", ")", "for", "lang", "in", "ANKI_LANGUAGES", ":", "df", "=", "get_data", "(", "lang", ")", "phrases", "=", "df", ".", "eng", ".", "str", ".", "strip", "(...
Return all the English phrases in the Anki translation flashcards >>> len(get_anki_phrases_english(limit=100)) > 700 True
[ "Return", "all", "the", "English", "phrases", "in", "the", "Anki", "translation", "flashcards" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/anki.py#L33-L46
train
226,344
totalgood/nlpia
src/nlpia/anki.py
get_vocab
def get_vocab(docs): """ Build a DataFrame containing all the words in the docs provided along with their POS tags etc >>> doc = nlp("Hey Mr. Tangerine Man!") <BLANKLINE> ... >>> get_vocab([doc]) word pos tag dep ent_type ent_iob sentiment 0 ! PUNCT . punct O 0.0 1 Hey INTJ UH intj O 0.0 2 Man NOUN NN ROOT PERSON I 0.0 3 Mr. PROPN NNP compound O 0.0 4 Tangerine PROPN NNP compound PERSON B 0.0 """ if isinstance(docs, spacy.tokens.doc.Doc): return get_vocab([docs]) vocab = set() for doc in tqdm(docs): for tok in doc: vocab.add((tok.text, tok.pos_, tok.tag_, tok.dep_, tok.ent_type_, tok.ent_iob_, tok.sentiment)) # TODO: add ent type info and other flags, e.g. like_url, like_email, etc return pd.DataFrame(sorted(vocab), columns='word pos tag dep ent_type ent_iob sentiment'.split())
python
def get_vocab(docs): """ Build a DataFrame containing all the words in the docs provided along with their POS tags etc >>> doc = nlp("Hey Mr. Tangerine Man!") <BLANKLINE> ... >>> get_vocab([doc]) word pos tag dep ent_type ent_iob sentiment 0 ! PUNCT . punct O 0.0 1 Hey INTJ UH intj O 0.0 2 Man NOUN NN ROOT PERSON I 0.0 3 Mr. PROPN NNP compound O 0.0 4 Tangerine PROPN NNP compound PERSON B 0.0 """ if isinstance(docs, spacy.tokens.doc.Doc): return get_vocab([docs]) vocab = set() for doc in tqdm(docs): for tok in doc: vocab.add((tok.text, tok.pos_, tok.tag_, tok.dep_, tok.ent_type_, tok.ent_iob_, tok.sentiment)) # TODO: add ent type info and other flags, e.g. like_url, like_email, etc return pd.DataFrame(sorted(vocab), columns='word pos tag dep ent_type ent_iob sentiment'.split())
[ "def", "get_vocab", "(", "docs", ")", ":", "if", "isinstance", "(", "docs", ",", "spacy", ".", "tokens", ".", "doc", ".", "Doc", ")", ":", "return", "get_vocab", "(", "[", "docs", "]", ")", "vocab", "=", "set", "(", ")", "for", "doc", "in", "tqdm...
Build a DataFrame containing all the words in the docs provided along with their POS tags etc >>> doc = nlp("Hey Mr. Tangerine Man!") <BLANKLINE> ... >>> get_vocab([doc]) word pos tag dep ent_type ent_iob sentiment 0 ! PUNCT . punct O 0.0 1 Hey INTJ UH intj O 0.0 2 Man NOUN NN ROOT PERSON I 0.0 3 Mr. PROPN NNP compound O 0.0 4 Tangerine PROPN NNP compound PERSON B 0.0
[ "Build", "a", "DataFrame", "containing", "all", "the", "words", "in", "the", "docs", "provided", "along", "with", "their", "POS", "tags", "etc" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/anki.py#L49-L70
train
226,345
totalgood/nlpia
src/nlpia/anki.py
get_word_vectors
def get_word_vectors(vocab): """ Create a word2vec embedding matrix for all the words in the vocab """ wv = get_data('word2vec') vectors = np.array(len(vocab), len(wv['the'])) for i, tok in enumerate(vocab): word = tok[0] variations = (word, word.lower(), word.lower()[:-1]) for w in variations: if w in wv: vectors[i, :] = wv[w] if not np.sum(np.abs(vectors[i])): logger.warning('Unable to find {}, {}, or {} in word2vec.'.format(*variations)) return vectors
python
def get_word_vectors(vocab): """ Create a word2vec embedding matrix for all the words in the vocab """ wv = get_data('word2vec') vectors = np.array(len(vocab), len(wv['the'])) for i, tok in enumerate(vocab): word = tok[0] variations = (word, word.lower(), word.lower()[:-1]) for w in variations: if w in wv: vectors[i, :] = wv[w] if not np.sum(np.abs(vectors[i])): logger.warning('Unable to find {}, {}, or {} in word2vec.'.format(*variations)) return vectors
[ "def", "get_word_vectors", "(", "vocab", ")", ":", "wv", "=", "get_data", "(", "'word2vec'", ")", "vectors", "=", "np", ".", "array", "(", "len", "(", "vocab", ")", ",", "len", "(", "wv", "[", "'the'", "]", ")", ")", "for", "i", ",", "tok", "in",...
Create a word2vec embedding matrix for all the words in the vocab
[ "Create", "a", "word2vec", "embedding", "matrix", "for", "all", "the", "words", "in", "the", "vocab" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/anki.py#L73-L85
train
226,346
totalgood/nlpia
src/nlpia/anki.py
get_anki_vocab
def get_anki_vocab(lang=['eng'], limit=None, filename='anki_en_vocabulary.csv'): """ Get all the vocab words+tags+wordvectors for the tokens in the Anki translation corpus Returns a DataFrame of with columns = word, pos, tag, dep, ent, ent_iob, sentiment, vectors """ texts = get_anki_phrases(lang=lang, limit=limit) docs = nlp(texts, lang=lang) vocab = get_vocab(docs) vocab['vector'] = get_word_vectors(vocab) # TODO: turn this into a KeyedVectors object if filename: vocab.to_csv(os.path.join(BIGDATA_PATH, filename)) return vocab
python
def get_anki_vocab(lang=['eng'], limit=None, filename='anki_en_vocabulary.csv'): """ Get all the vocab words+tags+wordvectors for the tokens in the Anki translation corpus Returns a DataFrame of with columns = word, pos, tag, dep, ent, ent_iob, sentiment, vectors """ texts = get_anki_phrases(lang=lang, limit=limit) docs = nlp(texts, lang=lang) vocab = get_vocab(docs) vocab['vector'] = get_word_vectors(vocab) # TODO: turn this into a KeyedVectors object if filename: vocab.to_csv(os.path.join(BIGDATA_PATH, filename)) return vocab
[ "def", "get_anki_vocab", "(", "lang", "=", "[", "'eng'", "]", ",", "limit", "=", "None", ",", "filename", "=", "'anki_en_vocabulary.csv'", ")", ":", "texts", "=", "get_anki_phrases", "(", "lang", "=", "lang", ",", "limit", "=", "limit", ")", "docs", "=",...
Get all the vocab words+tags+wordvectors for the tokens in the Anki translation corpus Returns a DataFrame of with columns = word, pos, tag, dep, ent, ent_iob, sentiment, vectors
[ "Get", "all", "the", "vocab", "words", "+", "tags", "+", "wordvectors", "for", "the", "tokens", "in", "the", "Anki", "translation", "corpus" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/anki.py#L88-L99
train
226,347
totalgood/nlpia
src/nlpia/scripts/lsa_tweets.py
lsa_twitter
def lsa_twitter(cased_tokens): """ Latent Sentiment Analyis on random sampling of twitter search results for words listed in cased_tokens """ # Only 5 of these tokens are saved for a no_below=2 filter: # PyCons NLPS #PyCon2016 #NaturalLanguageProcessing #naturallanguageprocessing if cased_tokens is None: cased_tokens = ('PyConOpenSpaces PyCon PyCon2017 PyCon2018 PyCon2016 PyCon2015 OpenSpace PyconTutorial ' + 'NLP NaturalLanguageProcessing NLPInAction NaturalLanguageProcessingInAction NLPIA Twote Twip' ).split() cased_tokens += [s + 's' for s in cased_tokens] cased_tokens += 'TotalGood TotalGoods HobsonLane Hob Hobs TotalGood.com ' \ 'www.TotalGood.com http://www.TotalGood.com https://www.TotalGood.com'.split() allcase_tokens = cased_tokens + [s.lower() for s in cased_tokens] allcase_tokens += [s.title() for s in cased_tokens] allcase_tokens += [s.upper() for s in cased_tokens] KEEP_TOKENS = allcase_tokens + ['#' + s for s in allcase_tokens] # takes 15 minutes and 10GB of RAM for 500k tweets if you keep all 20M unique tokens/names URLs vocab_path = os.path.join(BIGDATA_PATH, 'vocab939370.pkl') if os.path.isfile(vocab_path): print('Loading vocab: {} ...'.format(vocab_path)) vocab = Dictionary.load(vocab_path) print(' len(vocab) loaded: {}'.format(len(vocab.dfs))) else: tweets_path = os.path.join(BIGDATA_PATH, 'tweets.csv.gz') print('Loading tweets: {} ...'.format(tweets_path)) tweets = read_csv(tweets_path) tweets = pd.np.array(tweets.text.str.split()) with gzip.open(os.path.join(BIGDATA_PATH, 'tweets.txt.gz'), 'w') as f: for tokens in tweets: f.write((' '.join(tokens) + '\n').encode('utf-8')) # tweets['text'] = tweets.text.apply(lambda s: eval(s).decode('utf-8')) # tweets['user'] = tweets.user.apply(lambda s: eval(s).decode('utf-8')) # tweets.to_csv('tweets.csv.gz', compression='gzip') print('Computing vocab from {} tweets...'.format(len(tweets))) vocab = Dictionary(tweets, no_below=NO_BELOW, no_above=NO_ABOVE, keep_tokens=set(KEEP_TOKENS)) vocab.filter_extremes(no_below=NO_BELOW, no_above=NO_ABOVE, keep_n=KEEP_N, keep_tokens=set(KEEP_TOKENS)) print(' len(vocab) after filtering: {}'.format(len(vocab.dfs))) # no time at all, just a bookeeping step, doesn't actually compute anything tfidf = TfidfModel(id2word=vocab, dictionary=vocab) tfidf.save(os.path.join(BIGDATA_PATH, 'tfidf{}.pkl'.format(len(vocab.dfs)))) tweets = [vocab.doc2bow(tw) for tw in tweets] json.dump(tweets, gzip.open(os.path.join(BIGDATA_PATH, 'tweet_bows.json.gz'), 'w')) gc.collect() # LSA is more useful name than LSA lsa = LsiModel(tfidf[tweets], num_topics=200, id2word=vocab, extra_samples=100, power_iters=2) return lsa
python
def lsa_twitter(cased_tokens): """ Latent Sentiment Analyis on random sampling of twitter search results for words listed in cased_tokens """ # Only 5 of these tokens are saved for a no_below=2 filter: # PyCons NLPS #PyCon2016 #NaturalLanguageProcessing #naturallanguageprocessing if cased_tokens is None: cased_tokens = ('PyConOpenSpaces PyCon PyCon2017 PyCon2018 PyCon2016 PyCon2015 OpenSpace PyconTutorial ' + 'NLP NaturalLanguageProcessing NLPInAction NaturalLanguageProcessingInAction NLPIA Twote Twip' ).split() cased_tokens += [s + 's' for s in cased_tokens] cased_tokens += 'TotalGood TotalGoods HobsonLane Hob Hobs TotalGood.com ' \ 'www.TotalGood.com http://www.TotalGood.com https://www.TotalGood.com'.split() allcase_tokens = cased_tokens + [s.lower() for s in cased_tokens] allcase_tokens += [s.title() for s in cased_tokens] allcase_tokens += [s.upper() for s in cased_tokens] KEEP_TOKENS = allcase_tokens + ['#' + s for s in allcase_tokens] # takes 15 minutes and 10GB of RAM for 500k tweets if you keep all 20M unique tokens/names URLs vocab_path = os.path.join(BIGDATA_PATH, 'vocab939370.pkl') if os.path.isfile(vocab_path): print('Loading vocab: {} ...'.format(vocab_path)) vocab = Dictionary.load(vocab_path) print(' len(vocab) loaded: {}'.format(len(vocab.dfs))) else: tweets_path = os.path.join(BIGDATA_PATH, 'tweets.csv.gz') print('Loading tweets: {} ...'.format(tweets_path)) tweets = read_csv(tweets_path) tweets = pd.np.array(tweets.text.str.split()) with gzip.open(os.path.join(BIGDATA_PATH, 'tweets.txt.gz'), 'w') as f: for tokens in tweets: f.write((' '.join(tokens) + '\n').encode('utf-8')) # tweets['text'] = tweets.text.apply(lambda s: eval(s).decode('utf-8')) # tweets['user'] = tweets.user.apply(lambda s: eval(s).decode('utf-8')) # tweets.to_csv('tweets.csv.gz', compression='gzip') print('Computing vocab from {} tweets...'.format(len(tweets))) vocab = Dictionary(tweets, no_below=NO_BELOW, no_above=NO_ABOVE, keep_tokens=set(KEEP_TOKENS)) vocab.filter_extremes(no_below=NO_BELOW, no_above=NO_ABOVE, keep_n=KEEP_N, keep_tokens=set(KEEP_TOKENS)) print(' len(vocab) after filtering: {}'.format(len(vocab.dfs))) # no time at all, just a bookeeping step, doesn't actually compute anything tfidf = TfidfModel(id2word=vocab, dictionary=vocab) tfidf.save(os.path.join(BIGDATA_PATH, 'tfidf{}.pkl'.format(len(vocab.dfs)))) tweets = [vocab.doc2bow(tw) for tw in tweets] json.dump(tweets, gzip.open(os.path.join(BIGDATA_PATH, 'tweet_bows.json.gz'), 'w')) gc.collect() # LSA is more useful name than LSA lsa = LsiModel(tfidf[tweets], num_topics=200, id2word=vocab, extra_samples=100, power_iters=2) return lsa
[ "def", "lsa_twitter", "(", "cased_tokens", ")", ":", "# Only 5 of these tokens are saved for a no_below=2 filter:", "# PyCons NLPS #PyCon2016 #NaturalLanguageProcessing #naturallanguageprocessing", "if", "cased_tokens", "is", "None", ":", "cased_tokens", "=", "(", "'PyConOpenSpaces...
Latent Sentiment Analyis on random sampling of twitter search results for words listed in cased_tokens
[ "Latent", "Sentiment", "Analyis", "on", "random", "sampling", "of", "twitter", "search", "results", "for", "words", "listed", "in", "cased_tokens" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/scripts/lsa_tweets.py#L18-L72
train
226,348
totalgood/nlpia
src/nlpia/futil.py
wc
def wc(f, verbose=False, nrows=None): r""" Count lines in a text file References: https://stackoverflow.com/q/845058/623735 >>> with open(os.path.join(DATA_PATH, 'dictionary_fda_drug_names.txt')) as fin: ... print(wc(fin) == wc(fin) == 7037 == wc(fin.name)) True >>> wc(fin.name) 7037 """ tqdm_prog = tqdm if verbose else no_tqdm with ensure_open(f, mode='r') as fin: for i, line in tqdm_prog(enumerate(fin)): if nrows is not None and i >= nrows - 1: break # fin.seek(0) return i + 1
python
def wc(f, verbose=False, nrows=None): r""" Count lines in a text file References: https://stackoverflow.com/q/845058/623735 >>> with open(os.path.join(DATA_PATH, 'dictionary_fda_drug_names.txt')) as fin: ... print(wc(fin) == wc(fin) == 7037 == wc(fin.name)) True >>> wc(fin.name) 7037 """ tqdm_prog = tqdm if verbose else no_tqdm with ensure_open(f, mode='r') as fin: for i, line in tqdm_prog(enumerate(fin)): if nrows is not None and i >= nrows - 1: break # fin.seek(0) return i + 1
[ "def", "wc", "(", "f", ",", "verbose", "=", "False", ",", "nrows", "=", "None", ")", ":", "tqdm_prog", "=", "tqdm", "if", "verbose", "else", "no_tqdm", "with", "ensure_open", "(", "f", ",", "mode", "=", "'r'", ")", "as", "fin", ":", "for", "i", "...
r""" Count lines in a text file References: https://stackoverflow.com/q/845058/623735 >>> with open(os.path.join(DATA_PATH, 'dictionary_fda_drug_names.txt')) as fin: ... print(wc(fin) == wc(fin) == 7037 == wc(fin.name)) True >>> wc(fin.name) 7037
[ "r", "Count", "lines", "in", "a", "text", "file" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/futil.py#L36-L54
train
226,349
totalgood/nlpia
src/nlpia/futil.py
normalize_filepath
def normalize_filepath(filepath): r""" Lowercase the filename and ext, expanding extensions like .tgz to .tar.gz. >>> normalize_filepath('/Hello_World.txt\n') 'hello_world.txt' >>> normalize_filepath('NLPIA/src/nlpia/bigdata/Goog New 300Dneg\f.bIn\n.GZ') 'NLPIA/src/nlpia/bigdata/goog new 300dneg.bin.gz' """ filename = os.path.basename(filepath) dirpath = filepath[:-len(filename)] cre_controlspace = re.compile(r'[\t\r\n\f]+') new_filename = cre_controlspace.sub('', filename) if not new_filename == filename: logger.warning('Stripping whitespace from filename: {} => {}'.format( repr(filename), repr(new_filename))) filename = new_filename filename = filename.lower() filename = normalize_ext(filename) if dirpath: dirpath = dirpath[:-1] # get rid of the trailing os.path.sep return os.path.join(dirpath, filename) return filename
python
def normalize_filepath(filepath): r""" Lowercase the filename and ext, expanding extensions like .tgz to .tar.gz. >>> normalize_filepath('/Hello_World.txt\n') 'hello_world.txt' >>> normalize_filepath('NLPIA/src/nlpia/bigdata/Goog New 300Dneg\f.bIn\n.GZ') 'NLPIA/src/nlpia/bigdata/goog new 300dneg.bin.gz' """ filename = os.path.basename(filepath) dirpath = filepath[:-len(filename)] cre_controlspace = re.compile(r'[\t\r\n\f]+') new_filename = cre_controlspace.sub('', filename) if not new_filename == filename: logger.warning('Stripping whitespace from filename: {} => {}'.format( repr(filename), repr(new_filename))) filename = new_filename filename = filename.lower() filename = normalize_ext(filename) if dirpath: dirpath = dirpath[:-1] # get rid of the trailing os.path.sep return os.path.join(dirpath, filename) return filename
[ "def", "normalize_filepath", "(", "filepath", ")", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "filepath", ")", "dirpath", "=", "filepath", "[", ":", "-", "len", "(", "filename", ")", "]", "cre_controlspace", "=", "re", ".", "compile",...
r""" Lowercase the filename and ext, expanding extensions like .tgz to .tar.gz. >>> normalize_filepath('/Hello_World.txt\n') 'hello_world.txt' >>> normalize_filepath('NLPIA/src/nlpia/bigdata/Goog New 300Dneg\f.bIn\n.GZ') 'NLPIA/src/nlpia/bigdata/goog new 300dneg.bin.gz'
[ "r", "Lowercase", "the", "filename", "and", "ext", "expanding", "extensions", "like", ".", "tgz", "to", ".", "tar", ".", "gz", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/futil.py#L278-L299
train
226,350
totalgood/nlpia
src/nlpia/futil.py
find_filepath
def find_filepath( filename, basepaths=(os.path.curdir, DATA_PATH, BIGDATA_PATH, BASE_DIR, '~', '~/Downloads', os.path.join('/', 'tmp'), '..')): """ Given a filename or path see if it exists in any of the common places datafiles might be >>> p = find_filepath('iq_test.csv') >>> p == expand_filepath(os.path.join(DATA_PATH, 'iq_test.csv')) True >>> p[-len('iq_test.csv'):] 'iq_test.csv' >>> find_filepath('exponentially-crazy-filename-2.718281828459045.nonexistent') False """ if os.path.isfile(filename): return filename for basedir in basepaths: fullpath = expand_filepath(os.path.join(basedir, filename)) if os.path.isfile(fullpath): return fullpath return False
python
def find_filepath( filename, basepaths=(os.path.curdir, DATA_PATH, BIGDATA_PATH, BASE_DIR, '~', '~/Downloads', os.path.join('/', 'tmp'), '..')): """ Given a filename or path see if it exists in any of the common places datafiles might be >>> p = find_filepath('iq_test.csv') >>> p == expand_filepath(os.path.join(DATA_PATH, 'iq_test.csv')) True >>> p[-len('iq_test.csv'):] 'iq_test.csv' >>> find_filepath('exponentially-crazy-filename-2.718281828459045.nonexistent') False """ if os.path.isfile(filename): return filename for basedir in basepaths: fullpath = expand_filepath(os.path.join(basedir, filename)) if os.path.isfile(fullpath): return fullpath return False
[ "def", "find_filepath", "(", "filename", ",", "basepaths", "=", "(", "os", ".", "path", ".", "curdir", ",", "DATA_PATH", ",", "BIGDATA_PATH", ",", "BASE_DIR", ",", "'~'", ",", "'~/Downloads'", ",", "os", ".", "path", ".", "join", "(", "'/'", ",", "'tmp...
Given a filename or path see if it exists in any of the common places datafiles might be >>> p = find_filepath('iq_test.csv') >>> p == expand_filepath(os.path.join(DATA_PATH, 'iq_test.csv')) True >>> p[-len('iq_test.csv'):] 'iq_test.csv' >>> find_filepath('exponentially-crazy-filename-2.718281828459045.nonexistent') False
[ "Given", "a", "filename", "or", "path", "see", "if", "it", "exists", "in", "any", "of", "the", "common", "places", "datafiles", "might", "be" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/futil.py#L302-L321
train
226,351
neo4j/neo4j-python-driver
neo4j/__init__.py
Driver.close
def close(self): """ Shut down, closing any open connections in the pool. """ if not self._closed: self._closed = True if self._pool is not None: self._pool.close() self._pool = None
python
def close(self): """ Shut down, closing any open connections in the pool. """ if not self._closed: self._closed = True if self._pool is not None: self._pool.close() self._pool = None
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "_closed", ":", "self", ".", "_closed", "=", "True", "if", "self", ".", "_pool", "is", "not", "None", ":", "self", ".", "_pool", ".", "close", "(", ")", "self", ".", "_pool", "=", ...
Shut down, closing any open connections in the pool.
[ "Shut", "down", "closing", "any", "open", "connections", "in", "the", "pool", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/__init__.py#L163-L170
train
226,352
neo4j/neo4j-python-driver
neo4j/types/spatial.py
hydrate_point
def hydrate_point(srid, *coordinates): """ Create a new instance of a Point subclass from a raw set of fields. The subclass chosen is determined by the given SRID; a ValueError will be raised if no such subclass can be found. """ try: point_class, dim = __srid_table[srid] except KeyError: point = Point(coordinates) point.srid = srid return point else: if len(coordinates) != dim: raise ValueError("SRID %d requires %d coordinates (%d provided)" % (srid, dim, len(coordinates))) return point_class(coordinates)
python
def hydrate_point(srid, *coordinates): """ Create a new instance of a Point subclass from a raw set of fields. The subclass chosen is determined by the given SRID; a ValueError will be raised if no such subclass can be found. """ try: point_class, dim = __srid_table[srid] except KeyError: point = Point(coordinates) point.srid = srid return point else: if len(coordinates) != dim: raise ValueError("SRID %d requires %d coordinates (%d provided)" % (srid, dim, len(coordinates))) return point_class(coordinates)
[ "def", "hydrate_point", "(", "srid", ",", "*", "coordinates", ")", ":", "try", ":", "point_class", ",", "dim", "=", "__srid_table", "[", "srid", "]", "except", "KeyError", ":", "point", "=", "Point", "(", "coordinates", ")", "point", ".", "srid", "=", ...
Create a new instance of a Point subclass from a raw set of fields. The subclass chosen is determined by the given SRID; a ValueError will be raised if no such subclass can be found.
[ "Create", "a", "new", "instance", "of", "a", "Point", "subclass", "from", "a", "raw", "set", "of", "fields", ".", "The", "subclass", "chosen", "is", "determined", "by", "the", "given", "SRID", ";", "a", "ValueError", "will", "be", "raised", "if", "no", ...
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/spatial.py#L104-L119
train
226,353
neo4j/neo4j-python-driver
neo4j/types/spatial.py
dehydrate_point
def dehydrate_point(value): """ Dehydrator for Point data. :param value: :type value: Point :return: """ dim = len(value) if dim == 2: return Structure(b"X", value.srid, *value) elif dim == 3: return Structure(b"Y", value.srid, *value) else: raise ValueError("Cannot dehydrate Point with %d dimensions" % dim)
python
def dehydrate_point(value): """ Dehydrator for Point data. :param value: :type value: Point :return: """ dim = len(value) if dim == 2: return Structure(b"X", value.srid, *value) elif dim == 3: return Structure(b"Y", value.srid, *value) else: raise ValueError("Cannot dehydrate Point with %d dimensions" % dim)
[ "def", "dehydrate_point", "(", "value", ")", ":", "dim", "=", "len", "(", "value", ")", "if", "dim", "==", "2", ":", "return", "Structure", "(", "b\"X\"", ",", "value", ".", "srid", ",", "*", "value", ")", "elif", "dim", "==", "3", ":", "return", ...
Dehydrator for Point data. :param value: :type value: Point :return:
[ "Dehydrator", "for", "Point", "data", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/spatial.py#L122-L135
train
226,354
neo4j/neo4j-python-driver
neo4j/types/__init__.py
PackStreamDehydrator.dehydrate
def dehydrate(self, values): """ Convert native values into PackStream values. """ def dehydrate_(obj): try: f = self.dehydration_functions[type(obj)] except KeyError: pass else: return f(obj) if obj is None: return None elif isinstance(obj, bool): return obj elif isinstance(obj, int): if INT64_MIN <= obj <= INT64_MAX: return obj raise ValueError("Integer out of bounds (64-bit signed integer values only)") elif isinstance(obj, float): return obj elif isinstance(obj, str): return obj elif isinstance(obj, (bytes, bytearray)): # order is important here - bytes must be checked after string if self.supports_bytes: return obj else: raise TypeError("This PackSteam channel does not support BYTES (consider upgrading to Neo4j 3.2+)") elif isinstance(obj, (list, map_type)): return list(map(dehydrate_, obj)) elif isinstance(obj, dict): if any(not isinstance(key, str) for key in obj.keys()): raise TypeError("Non-string dictionary keys are not supported") return {key: dehydrate_(value) for key, value in obj.items()} else: raise TypeError(obj) return tuple(map(dehydrate_, values))
python
def dehydrate(self, values): """ Convert native values into PackStream values. """ def dehydrate_(obj): try: f = self.dehydration_functions[type(obj)] except KeyError: pass else: return f(obj) if obj is None: return None elif isinstance(obj, bool): return obj elif isinstance(obj, int): if INT64_MIN <= obj <= INT64_MAX: return obj raise ValueError("Integer out of bounds (64-bit signed integer values only)") elif isinstance(obj, float): return obj elif isinstance(obj, str): return obj elif isinstance(obj, (bytes, bytearray)): # order is important here - bytes must be checked after string if self.supports_bytes: return obj else: raise TypeError("This PackSteam channel does not support BYTES (consider upgrading to Neo4j 3.2+)") elif isinstance(obj, (list, map_type)): return list(map(dehydrate_, obj)) elif isinstance(obj, dict): if any(not isinstance(key, str) for key in obj.keys()): raise TypeError("Non-string dictionary keys are not supported") return {key: dehydrate_(value) for key, value in obj.items()} else: raise TypeError(obj) return tuple(map(dehydrate_, values))
[ "def", "dehydrate", "(", "self", ",", "values", ")", ":", "def", "dehydrate_", "(", "obj", ")", ":", "try", ":", "f", "=", "self", ".", "dehydration_functions", "[", "type", "(", "obj", ")", "]", "except", "KeyError", ":", "pass", "else", ":", "retur...
Convert native values into PackStream values.
[ "Convert", "native", "values", "into", "PackStream", "values", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/__init__.py#L97-L134
train
226,355
neo4j/neo4j-python-driver
neo4j/types/__init__.py
Record.get
def get(self, key, default=None): """ Obtain a value from the record by key, returning a default value if the key does not exist. :param key: :param default: :return: """ try: index = self.__keys.index(str(key)) except ValueError: return default if 0 <= index < len(self): return super(Record, self).__getitem__(index) else: return default
python
def get(self, key, default=None): """ Obtain a value from the record by key, returning a default value if the key does not exist. :param key: :param default: :return: """ try: index = self.__keys.index(str(key)) except ValueError: return default if 0 <= index < len(self): return super(Record, self).__getitem__(index) else: return default
[ "def", "get", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "try", ":", "index", "=", "self", ".", "__keys", ".", "index", "(", "str", "(", "key", ")", ")", "except", "ValueError", ":", "return", "default", "if", "0", "<=", "ind...
Obtain a value from the record by key, returning a default value if the key does not exist. :param key: :param default: :return:
[ "Obtain", "a", "value", "from", "the", "record", "by", "key", "returning", "a", "default", "value", "if", "the", "key", "does", "not", "exist", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/__init__.py#L202-L217
train
226,356
neo4j/neo4j-python-driver
neo4j/types/__init__.py
Record.index
def index(self, key): """ Return the index of the given item. :param key: :return: """ if isinstance(key, int): if 0 <= key < len(self.__keys): return key raise IndexError(key) elif isinstance(key, str): try: return self.__keys.index(key) except ValueError: raise KeyError(key) else: raise TypeError(key)
python
def index(self, key): """ Return the index of the given item. :param key: :return: """ if isinstance(key, int): if 0 <= key < len(self.__keys): return key raise IndexError(key) elif isinstance(key, str): try: return self.__keys.index(key) except ValueError: raise KeyError(key) else: raise TypeError(key)
[ "def", "index", "(", "self", ",", "key", ")", ":", "if", "isinstance", "(", "key", ",", "int", ")", ":", "if", "0", "<=", "key", "<", "len", "(", "self", ".", "__keys", ")", ":", "return", "key", "raise", "IndexError", "(", "key", ")", "elif", ...
Return the index of the given item. :param key: :return:
[ "Return", "the", "index", "of", "the", "given", "item", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/__init__.py#L219-L235
train
226,357
neo4j/neo4j-python-driver
neo4j/types/__init__.py
Record.value
def value(self, key=0, default=None): """ Obtain a single value from the record by index or key. If no index or key is specified, the first value is returned. If the specified item does not exist, the default value is returned. :param key: :param default: :return: """ try: index = self.index(key) except (IndexError, KeyError): return default else: return self[index]
python
def value(self, key=0, default=None): """ Obtain a single value from the record by index or key. If no index or key is specified, the first value is returned. If the specified item does not exist, the default value is returned. :param key: :param default: :return: """ try: index = self.index(key) except (IndexError, KeyError): return default else: return self[index]
[ "def", "value", "(", "self", ",", "key", "=", "0", ",", "default", "=", "None", ")", ":", "try", ":", "index", "=", "self", ".", "index", "(", "key", ")", "except", "(", "IndexError", ",", "KeyError", ")", ":", "return", "default", "else", ":", "...
Obtain a single value from the record by index or key. If no index or key is specified, the first value is returned. If the specified item does not exist, the default value is returned. :param key: :param default: :return:
[ "Obtain", "a", "single", "value", "from", "the", "record", "by", "index", "or", "key", ".", "If", "no", "index", "or", "key", "is", "specified", "the", "first", "value", "is", "returned", ".", "If", "the", "specified", "item", "does", "not", "exist", "...
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/__init__.py#L237-L251
train
226,358
neo4j/neo4j-python-driver
neo4j/types/__init__.py
Record.values
def values(self, *keys): """ Return the values of the record, optionally filtering to include only certain values by index or key. :param keys: indexes or keys of the items to include; if none are provided, all values will be included :return: list of values """ if keys: d = [] for key in keys: try: i = self.index(key) except KeyError: d.append(None) else: d.append(self[i]) return d return list(self)
python
def values(self, *keys): """ Return the values of the record, optionally filtering to include only certain values by index or key. :param keys: indexes or keys of the items to include; if none are provided, all values will be included :return: list of values """ if keys: d = [] for key in keys: try: i = self.index(key) except KeyError: d.append(None) else: d.append(self[i]) return d return list(self)
[ "def", "values", "(", "self", ",", "*", "keys", ")", ":", "if", "keys", ":", "d", "=", "[", "]", "for", "key", "in", "keys", ":", "try", ":", "i", "=", "self", ".", "index", "(", "key", ")", "except", "KeyError", ":", "d", ".", "append", "(",...
Return the values of the record, optionally filtering to include only certain values by index or key. :param keys: indexes or keys of the items to include; if none are provided, all values will be included :return: list of values
[ "Return", "the", "values", "of", "the", "record", "optionally", "filtering", "to", "include", "only", "certain", "values", "by", "index", "or", "key", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/__init__.py#L260-L278
train
226,359
neo4j/neo4j-python-driver
neo4j/types/__init__.py
Record.items
def items(self, *keys): """ Return the fields of the record as a list of key and value tuples :return: """ if keys: d = [] for key in keys: try: i = self.index(key) except KeyError: d.append((key, None)) else: d.append((self.__keys[i], self[i])) return d return list((self.__keys[i], super(Record, self).__getitem__(i)) for i in range(len(self)))
python
def items(self, *keys): """ Return the fields of the record as a list of key and value tuples :return: """ if keys: d = [] for key in keys: try: i = self.index(key) except KeyError: d.append((key, None)) else: d.append((self.__keys[i], self[i])) return d return list((self.__keys[i], super(Record, self).__getitem__(i)) for i in range(len(self)))
[ "def", "items", "(", "self", ",", "*", "keys", ")", ":", "if", "keys", ":", "d", "=", "[", "]", "for", "key", "in", "keys", ":", "try", ":", "i", "=", "self", ".", "index", "(", "key", ")", "except", "KeyError", ":", "d", ".", "append", "(", ...
Return the fields of the record as a list of key and value tuples :return:
[ "Return", "the", "fields", "of", "the", "record", "as", "a", "list", "of", "key", "and", "value", "tuples" ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/__init__.py#L280-L295
train
226,360
neo4j/neo4j-python-driver
neo4j/blocking.py
_make_plan
def _make_plan(plan_dict): """ Construct a Plan or ProfiledPlan from a dictionary of metadata values. :param plan_dict: :return: """ operator_type = plan_dict["operatorType"] identifiers = plan_dict.get("identifiers", []) arguments = plan_dict.get("args", []) children = [_make_plan(child) for child in plan_dict.get("children", [])] if "dbHits" in plan_dict or "rows" in plan_dict: db_hits = plan_dict.get("dbHits", 0) rows = plan_dict.get("rows", 0) return ProfiledPlan(operator_type, identifiers, arguments, children, db_hits, rows) else: return Plan(operator_type, identifiers, arguments, children)
python
def _make_plan(plan_dict): """ Construct a Plan or ProfiledPlan from a dictionary of metadata values. :param plan_dict: :return: """ operator_type = plan_dict["operatorType"] identifiers = plan_dict.get("identifiers", []) arguments = plan_dict.get("args", []) children = [_make_plan(child) for child in plan_dict.get("children", [])] if "dbHits" in plan_dict or "rows" in plan_dict: db_hits = plan_dict.get("dbHits", 0) rows = plan_dict.get("rows", 0) return ProfiledPlan(operator_type, identifiers, arguments, children, db_hits, rows) else: return Plan(operator_type, identifiers, arguments, children)
[ "def", "_make_plan", "(", "plan_dict", ")", ":", "operator_type", "=", "plan_dict", "[", "\"operatorType\"", "]", "identifiers", "=", "plan_dict", ".", "get", "(", "\"identifiers\"", ",", "[", "]", ")", "arguments", "=", "plan_dict", ".", "get", "(", "\"args...
Construct a Plan or ProfiledPlan from a dictionary of metadata values. :param plan_dict: :return:
[ "Construct", "a", "Plan", "or", "ProfiledPlan", "from", "a", "dictionary", "of", "metadata", "values", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/blocking.py#L962-L977
train
226,361
neo4j/neo4j-python-driver
neo4j/blocking.py
unit_of_work
def unit_of_work(metadata=None, timeout=None): """ This function is a decorator for transaction functions that allows extra control over how the transaction is carried out. For example, a timeout (in seconds) may be applied:: @unit_of_work(timeout=25.0) def count_people(tx): return tx.run("MATCH (a:Person) RETURN count(a)").single().value() """ def wrapper(f): def wrapped(*args, **kwargs): return f(*args, **kwargs) wrapped.metadata = metadata wrapped.timeout = timeout return wrapped return wrapper
python
def unit_of_work(metadata=None, timeout=None): """ This function is a decorator for transaction functions that allows extra control over how the transaction is carried out. For example, a timeout (in seconds) may be applied:: @unit_of_work(timeout=25.0) def count_people(tx): return tx.run("MATCH (a:Person) RETURN count(a)").single().value() """ def wrapper(f): def wrapped(*args, **kwargs): return f(*args, **kwargs) wrapped.metadata = metadata wrapped.timeout = timeout return wrapped return wrapper
[ "def", "unit_of_work", "(", "metadata", "=", "None", ",", "timeout", "=", "None", ")", ":", "def", "wrapper", "(", "f", ")", ":", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "f", "(", "*", "args", ",", "*", ...
This function is a decorator for transaction functions that allows extra control over how the transaction is carried out. For example, a timeout (in seconds) may be applied:: @unit_of_work(timeout=25.0) def count_people(tx): return tx.run("MATCH (a:Person) RETURN count(a)").single().value()
[ "This", "function", "is", "a", "decorator", "for", "transaction", "functions", "that", "allows", "extra", "control", "over", "how", "the", "transaction", "is", "carried", "out", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/blocking.py#L1007-L1028
train
226,362
neo4j/neo4j-python-driver
neo4j/blocking.py
Session.close
def close(self): """ Close the session. This will release any borrowed resources, such as connections, and will roll back any outstanding transactions. """ from neobolt.exceptions import ConnectionExpired, CypherError, ServiceUnavailable try: if self.has_transaction(): try: self.rollback_transaction() except (CypherError, TransactionError, SessionError, ConnectionExpired, ServiceUnavailable): pass finally: self._closed = True self._disconnect(sync=True)
python
def close(self): """ Close the session. This will release any borrowed resources, such as connections, and will roll back any outstanding transactions. """ from neobolt.exceptions import ConnectionExpired, CypherError, ServiceUnavailable try: if self.has_transaction(): try: self.rollback_transaction() except (CypherError, TransactionError, SessionError, ConnectionExpired, ServiceUnavailable): pass finally: self._closed = True self._disconnect(sync=True)
[ "def", "close", "(", "self", ")", ":", "from", "neobolt", ".", "exceptions", "import", "ConnectionExpired", ",", "CypherError", ",", "ServiceUnavailable", "try", ":", "if", "self", ".", "has_transaction", "(", ")", ":", "try", ":", "self", ".", "rollback_tra...
Close the session. This will release any borrowed resources, such as connections, and will roll back any outstanding transactions.
[ "Close", "the", "session", ".", "This", "will", "release", "any", "borrowed", "resources", "such", "as", "connections", "and", "will", "roll", "back", "any", "outstanding", "transactions", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/blocking.py#L144-L157
train
226,363
neo4j/neo4j-python-driver
neo4j/blocking.py
Session.run
def run(self, statement, parameters=None, **kwparameters): """ Run a Cypher statement within an auto-commit transaction. The statement is sent and the result header received immediately but the :class:`.StatementResult` content is fetched lazily as consumed by the client application. If a statement is executed before a previous :class:`.StatementResult` in the same :class:`.Session` has been fully consumed, the first result will be fully fetched and buffered. Note therefore that the generally recommended pattern of usage is to fully consume one result before executing a subsequent statement. If two results need to be consumed in parallel, multiple :class:`.Session` objects can be used as an alternative to result buffering. For more usage details, see :meth:`.Transaction.run`. :param statement: template Cypher statement :param parameters: dictionary of parameters :param kwparameters: additional keyword parameters :returns: :class:`.StatementResult` object """ from neobolt.exceptions import ConnectionExpired self._assert_open() if not statement: raise ValueError("Cannot run an empty statement") if not isinstance(statement, (str, Statement)): raise TypeError("Statement must be a string or a Statement instance") if not self._connection: self._connect() cx = self._connection protocol_version = cx.protocol_version server = cx.server has_transaction = self.has_transaction() statement_text = str(statement) statement_metadata = getattr(statement, "metadata", None) statement_timeout = getattr(statement, "timeout", None) parameters = fix_parameters(dict(parameters or {}, **kwparameters), protocol_version, supports_bytes=server.supports("bytes")) def fail(_): self._close_transaction() hydrant = PackStreamHydrator(protocol_version) result_metadata = { "statement": statement_text, "parameters": parameters, "server": server, "protocol_version": protocol_version, } run_metadata = { "metadata": statement_metadata, "timeout": statement_timeout, "on_success": result_metadata.update, "on_failure": fail, } def done(summary_metadata): result_metadata.update(summary_metadata) bookmark = result_metadata.get("bookmark") if bookmark: self._bookmarks_in = tuple([bookmark]) self._bookmark_out = bookmark self._last_result = result = BoltStatementResult(self, hydrant, result_metadata) if has_transaction: if statement_metadata: raise ValueError("Metadata can only be attached at transaction level") if statement_timeout: raise ValueError("Timeouts only apply at transaction level") else: run_metadata["bookmarks"] = self._bookmarks_in cx.run(statement_text, parameters, **run_metadata) cx.pull_all( on_records=lambda records: result._records.extend( hydrant.hydrate_records(result.keys(), records)), on_success=done, on_failure=fail, on_summary=lambda: result.detach(sync=False), ) if not has_transaction: try: self._connection.send() self._connection.fetch() except ConnectionExpired as error: raise SessionExpired(*error.args) return result
python
def run(self, statement, parameters=None, **kwparameters): """ Run a Cypher statement within an auto-commit transaction. The statement is sent and the result header received immediately but the :class:`.StatementResult` content is fetched lazily as consumed by the client application. If a statement is executed before a previous :class:`.StatementResult` in the same :class:`.Session` has been fully consumed, the first result will be fully fetched and buffered. Note therefore that the generally recommended pattern of usage is to fully consume one result before executing a subsequent statement. If two results need to be consumed in parallel, multiple :class:`.Session` objects can be used as an alternative to result buffering. For more usage details, see :meth:`.Transaction.run`. :param statement: template Cypher statement :param parameters: dictionary of parameters :param kwparameters: additional keyword parameters :returns: :class:`.StatementResult` object """ from neobolt.exceptions import ConnectionExpired self._assert_open() if not statement: raise ValueError("Cannot run an empty statement") if not isinstance(statement, (str, Statement)): raise TypeError("Statement must be a string or a Statement instance") if not self._connection: self._connect() cx = self._connection protocol_version = cx.protocol_version server = cx.server has_transaction = self.has_transaction() statement_text = str(statement) statement_metadata = getattr(statement, "metadata", None) statement_timeout = getattr(statement, "timeout", None) parameters = fix_parameters(dict(parameters or {}, **kwparameters), protocol_version, supports_bytes=server.supports("bytes")) def fail(_): self._close_transaction() hydrant = PackStreamHydrator(protocol_version) result_metadata = { "statement": statement_text, "parameters": parameters, "server": server, "protocol_version": protocol_version, } run_metadata = { "metadata": statement_metadata, "timeout": statement_timeout, "on_success": result_metadata.update, "on_failure": fail, } def done(summary_metadata): result_metadata.update(summary_metadata) bookmark = result_metadata.get("bookmark") if bookmark: self._bookmarks_in = tuple([bookmark]) self._bookmark_out = bookmark self._last_result = result = BoltStatementResult(self, hydrant, result_metadata) if has_transaction: if statement_metadata: raise ValueError("Metadata can only be attached at transaction level") if statement_timeout: raise ValueError("Timeouts only apply at transaction level") else: run_metadata["bookmarks"] = self._bookmarks_in cx.run(statement_text, parameters, **run_metadata) cx.pull_all( on_records=lambda records: result._records.extend( hydrant.hydrate_records(result.keys(), records)), on_success=done, on_failure=fail, on_summary=lambda: result.detach(sync=False), ) if not has_transaction: try: self._connection.send() self._connection.fetch() except ConnectionExpired as error: raise SessionExpired(*error.args) return result
[ "def", "run", "(", "self", ",", "statement", ",", "parameters", "=", "None", ",", "*", "*", "kwparameters", ")", ":", "from", "neobolt", ".", "exceptions", "import", "ConnectionExpired", "self", ".", "_assert_open", "(", ")", "if", "not", "statement", ":",...
Run a Cypher statement within an auto-commit transaction. The statement is sent and the result header received immediately but the :class:`.StatementResult` content is fetched lazily as consumed by the client application. If a statement is executed before a previous :class:`.StatementResult` in the same :class:`.Session` has been fully consumed, the first result will be fully fetched and buffered. Note therefore that the generally recommended pattern of usage is to fully consume one result before executing a subsequent statement. If two results need to be consumed in parallel, multiple :class:`.Session` objects can be used as an alternative to result buffering. For more usage details, see :meth:`.Transaction.run`. :param statement: template Cypher statement :param parameters: dictionary of parameters :param kwparameters: additional keyword parameters :returns: :class:`.StatementResult` object
[ "Run", "a", "Cypher", "statement", "within", "an", "auto", "-", "commit", "transaction", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/blocking.py#L166-L261
train
226,364
neo4j/neo4j-python-driver
neo4j/blocking.py
Session.send
def send(self): """ Send all outstanding requests. """ from neobolt.exceptions import ConnectionExpired if self._connection: try: self._connection.send() except ConnectionExpired as error: raise SessionExpired(*error.args)
python
def send(self): """ Send all outstanding requests. """ from neobolt.exceptions import ConnectionExpired if self._connection: try: self._connection.send() except ConnectionExpired as error: raise SessionExpired(*error.args)
[ "def", "send", "(", "self", ")", ":", "from", "neobolt", ".", "exceptions", "import", "ConnectionExpired", "if", "self", ".", "_connection", ":", "try", ":", "self", ".", "_connection", ".", "send", "(", ")", "except", "ConnectionExpired", "as", "error", "...
Send all outstanding requests.
[ "Send", "all", "outstanding", "requests", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/blocking.py#L263-L271
train
226,365
neo4j/neo4j-python-driver
neo4j/blocking.py
Session.fetch
def fetch(self): """ Attempt to fetch at least one more record. :returns: number of records fetched """ from neobolt.exceptions import ConnectionExpired if self._connection: try: detail_count, _ = self._connection.fetch() except ConnectionExpired as error: raise SessionExpired(*error.args) else: return detail_count return 0
python
def fetch(self): """ Attempt to fetch at least one more record. :returns: number of records fetched """ from neobolt.exceptions import ConnectionExpired if self._connection: try: detail_count, _ = self._connection.fetch() except ConnectionExpired as error: raise SessionExpired(*error.args) else: return detail_count return 0
[ "def", "fetch", "(", "self", ")", ":", "from", "neobolt", ".", "exceptions", "import", "ConnectionExpired", "if", "self", ".", "_connection", ":", "try", ":", "detail_count", ",", "_", "=", "self", ".", "_connection", ".", "fetch", "(", ")", "except", "C...
Attempt to fetch at least one more record. :returns: number of records fetched
[ "Attempt", "to", "fetch", "at", "least", "one", "more", "record", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/blocking.py#L273-L286
train
226,366
neo4j/neo4j-python-driver
neo4j/blocking.py
Session.detach
def detach(self, result, sync=True): """ Detach a result from this session by fetching and buffering any remaining records. :param result: :param sync: :returns: number of records fetched """ count = 0 if sync and result.attached(): self.send() fetch = self.fetch while result.attached(): count += fetch() if self._last_result is result: self._last_result = None if not self.has_transaction(): self._disconnect(sync=False) result._session = None return count
python
def detach(self, result, sync=True): """ Detach a result from this session by fetching and buffering any remaining records. :param result: :param sync: :returns: number of records fetched """ count = 0 if sync and result.attached(): self.send() fetch = self.fetch while result.attached(): count += fetch() if self._last_result is result: self._last_result = None if not self.has_transaction(): self._disconnect(sync=False) result._session = None return count
[ "def", "detach", "(", "self", ",", "result", ",", "sync", "=", "True", ")", ":", "count", "=", "0", "if", "sync", "and", "result", ".", "attached", "(", ")", ":", "self", ".", "send", "(", ")", "fetch", "=", "self", ".", "fetch", "while", "result...
Detach a result from this session by fetching and buffering any remaining records. :param result: :param sync: :returns: number of records fetched
[ "Detach", "a", "result", "from", "this", "session", "by", "fetching", "and", "buffering", "any", "remaining", "records", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/blocking.py#L303-L325
train
226,367
neo4j/neo4j-python-driver
neo4j/blocking.py
Transaction.run
def run(self, statement, parameters=None, **kwparameters): """ Run a Cypher statement within the context of this transaction. The statement is sent to the server lazily, when its result is consumed. To force the statement to be sent to the server, use the :meth:`.Transaction.sync` method. Cypher is typically expressed as a statement template plus a set of named parameters. In Python, parameters may be expressed through a dictionary of parameters, through individual parameter arguments, or as a mixture of both. For example, the `run` statements below are all equivalent:: >>> statement = "CREATE (a:Person {name:{name}, age:{age}})" >>> tx.run(statement, {"name": "Alice", "age": 33}) >>> tx.run(statement, {"name": "Alice"}, age=33) >>> tx.run(statement, name="Alice", age=33) Parameter values can be of any type supported by the Neo4j type system. In Python, this includes :class:`bool`, :class:`int`, :class:`str`, :class:`list` and :class:`dict`. Note however that :class:`list` properties must be homogenous. :param statement: template Cypher statement :param parameters: dictionary of parameters :param kwparameters: additional keyword parameters :returns: :class:`.StatementResult` object :raise TransactionError: if the transaction is closed """ self._assert_open() return self.session.run(statement, parameters, **kwparameters)
python
def run(self, statement, parameters=None, **kwparameters): """ Run a Cypher statement within the context of this transaction. The statement is sent to the server lazily, when its result is consumed. To force the statement to be sent to the server, use the :meth:`.Transaction.sync` method. Cypher is typically expressed as a statement template plus a set of named parameters. In Python, parameters may be expressed through a dictionary of parameters, through individual parameter arguments, or as a mixture of both. For example, the `run` statements below are all equivalent:: >>> statement = "CREATE (a:Person {name:{name}, age:{age}})" >>> tx.run(statement, {"name": "Alice", "age": 33}) >>> tx.run(statement, {"name": "Alice"}, age=33) >>> tx.run(statement, name="Alice", age=33) Parameter values can be of any type supported by the Neo4j type system. In Python, this includes :class:`bool`, :class:`int`, :class:`str`, :class:`list` and :class:`dict`. Note however that :class:`list` properties must be homogenous. :param statement: template Cypher statement :param parameters: dictionary of parameters :param kwparameters: additional keyword parameters :returns: :class:`.StatementResult` object :raise TransactionError: if the transaction is closed """ self._assert_open() return self.session.run(statement, parameters, **kwparameters)
[ "def", "run", "(", "self", ",", "statement", ",", "parameters", "=", "None", ",", "*", "*", "kwparameters", ")", ":", "self", ".", "_assert_open", "(", ")", "return", "self", ".", "session", ".", "run", "(", "statement", ",", "parameters", ",", "*", ...
Run a Cypher statement within the context of this transaction. The statement is sent to the server lazily, when its result is consumed. To force the statement to be sent to the server, use the :meth:`.Transaction.sync` method. Cypher is typically expressed as a statement template plus a set of named parameters. In Python, parameters may be expressed through a dictionary of parameters, through individual parameter arguments, or as a mixture of both. For example, the `run` statements below are all equivalent:: >>> statement = "CREATE (a:Person {name:{name}, age:{age}})" >>> tx.run(statement, {"name": "Alice", "age": 33}) >>> tx.run(statement, {"name": "Alice"}, age=33) >>> tx.run(statement, name="Alice", age=33) Parameter values can be of any type supported by the Neo4j type system. In Python, this includes :class:`bool`, :class:`int`, :class:`str`, :class:`list` and :class:`dict`. Note however that :class:`list` properties must be homogenous. :param statement: template Cypher statement :param parameters: dictionary of parameters :param kwparameters: additional keyword parameters :returns: :class:`.StatementResult` object :raise TransactionError: if the transaction is closed
[ "Run", "a", "Cypher", "statement", "within", "the", "context", "of", "this", "transaction", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/blocking.py#L497-L527
train
226,368
neo4j/neo4j-python-driver
neo4j/blocking.py
StatementResult.detach
def detach(self, sync=True): """ Detach this result from its parent session by fetching the remainder of this result from the network into the buffer. :returns: number of records fetched """ if self.attached(): return self._session.detach(self, sync=sync) else: return 0
python
def detach(self, sync=True): """ Detach this result from its parent session by fetching the remainder of this result from the network into the buffer. :returns: number of records fetched """ if self.attached(): return self._session.detach(self, sync=sync) else: return 0
[ "def", "detach", "(", "self", ",", "sync", "=", "True", ")", ":", "if", "self", ".", "attached", "(", ")", ":", "return", "self", ".", "_session", ".", "detach", "(", "self", ",", "sync", "=", "sync", ")", "else", ":", "return", "0" ]
Detach this result from its parent session by fetching the remainder of this result from the network into the buffer. :returns: number of records fetched
[ "Detach", "this", "result", "from", "its", "parent", "session", "by", "fetching", "the", "remainder", "of", "this", "result", "from", "the", "network", "into", "the", "buffer", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/blocking.py#L653-L662
train
226,369
neo4j/neo4j-python-driver
neo4j/blocking.py
StatementResult.keys
def keys(self): """ The keys for the records in this result. :returns: tuple of key names """ try: return self._metadata["fields"] except KeyError: if self.attached(): self._session.send() while self.attached() and "fields" not in self._metadata: self._session.fetch() return self._metadata.get("fields")
python
def keys(self): """ The keys for the records in this result. :returns: tuple of key names """ try: return self._metadata["fields"] except KeyError: if self.attached(): self._session.send() while self.attached() and "fields" not in self._metadata: self._session.fetch() return self._metadata.get("fields")
[ "def", "keys", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_metadata", "[", "\"fields\"", "]", "except", "KeyError", ":", "if", "self", ".", "attached", "(", ")", ":", "self", ".", "_session", ".", "send", "(", ")", "while", "self", ...
The keys for the records in this result. :returns: tuple of key names
[ "The", "keys", "for", "the", "records", "in", "this", "result", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/blocking.py#L664-L676
train
226,370
neo4j/neo4j-python-driver
neo4j/blocking.py
StatementResult.records
def records(self): """ Generator for records obtained from this result. :yields: iterable of :class:`.Record` objects """ records = self._records next_record = records.popleft while records: yield next_record() attached = self.attached if attached(): self._session.send() while attached(): self._session.fetch() while records: yield next_record()
python
def records(self): """ Generator for records obtained from this result. :yields: iterable of :class:`.Record` objects """ records = self._records next_record = records.popleft while records: yield next_record() attached = self.attached if attached(): self._session.send() while attached(): self._session.fetch() while records: yield next_record()
[ "def", "records", "(", "self", ")", ":", "records", "=", "self", ".", "_records", "next_record", "=", "records", ".", "popleft", "while", "records", ":", "yield", "next_record", "(", ")", "attached", "=", "self", ".", "attached", "if", "attached", "(", "...
Generator for records obtained from this result. :yields: iterable of :class:`.Record` objects
[ "Generator", "for", "records", "obtained", "from", "this", "result", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/blocking.py#L678-L693
train
226,371
neo4j/neo4j-python-driver
neo4j/blocking.py
StatementResult.summary
def summary(self): """ Obtain the summary of this result, buffering any remaining records. :returns: The :class:`.ResultSummary` for this result """ self.detach() if self._summary is None: self._summary = BoltStatementResultSummary(**self._metadata) return self._summary
python
def summary(self): """ Obtain the summary of this result, buffering any remaining records. :returns: The :class:`.ResultSummary` for this result """ self.detach() if self._summary is None: self._summary = BoltStatementResultSummary(**self._metadata) return self._summary
[ "def", "summary", "(", "self", ")", ":", "self", ".", "detach", "(", ")", "if", "self", ".", "_summary", "is", "None", ":", "self", ".", "_summary", "=", "BoltStatementResultSummary", "(", "*", "*", "self", ".", "_metadata", ")", "return", "self", ".",...
Obtain the summary of this result, buffering any remaining records. :returns: The :class:`.ResultSummary` for this result
[ "Obtain", "the", "summary", "of", "this", "result", "buffering", "any", "remaining", "records", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/blocking.py#L695-L703
train
226,372
neo4j/neo4j-python-driver
neo4j/blocking.py
StatementResult.single
def single(self): """ Obtain the next and only remaining record from this result. A warning is generated if more than one record is available but the first of these is still returned. :returns: the next :class:`.Record` or :const:`None` if none remain :warns: if more than one record is available """ records = list(self) size = len(records) if size == 0: return None if size != 1: warn("Expected a result with a single record, but this result contains %d" % size) return records[0]
python
def single(self): """ Obtain the next and only remaining record from this result. A warning is generated if more than one record is available but the first of these is still returned. :returns: the next :class:`.Record` or :const:`None` if none remain :warns: if more than one record is available """ records = list(self) size = len(records) if size == 0: return None if size != 1: warn("Expected a result with a single record, but this result contains %d" % size) return records[0]
[ "def", "single", "(", "self", ")", ":", "records", "=", "list", "(", "self", ")", "size", "=", "len", "(", "records", ")", "if", "size", "==", "0", ":", "return", "None", "if", "size", "!=", "1", ":", "warn", "(", "\"Expected a result with a single rec...
Obtain the next and only remaining record from this result. A warning is generated if more than one record is available but the first of these is still returned. :returns: the next :class:`.Record` or :const:`None` if none remain :warns: if more than one record is available
[ "Obtain", "the", "next", "and", "only", "remaining", "record", "from", "this", "result", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/blocking.py#L715-L730
train
226,373
neo4j/neo4j-python-driver
neo4j/blocking.py
StatementResult.peek
def peek(self): """ Obtain the next record from this result without consuming it. This leaves the record in the buffer for further processing. :returns: the next :class:`.Record` or :const:`None` if none remain """ records = self._records if records: return records[0] if not self.attached(): return None if self.attached(): self._session.send() while self.attached() and not records: self._session.fetch() if records: return records[0] return None
python
def peek(self): """ Obtain the next record from this result without consuming it. This leaves the record in the buffer for further processing. :returns: the next :class:`.Record` or :const:`None` if none remain """ records = self._records if records: return records[0] if not self.attached(): return None if self.attached(): self._session.send() while self.attached() and not records: self._session.fetch() if records: return records[0] return None
[ "def", "peek", "(", "self", ")", ":", "records", "=", "self", ".", "_records", "if", "records", ":", "return", "records", "[", "0", "]", "if", "not", "self", ".", "attached", "(", ")", ":", "return", "None", "if", "self", ".", "attached", "(", ")",...
Obtain the next record from this result without consuming it. This leaves the record in the buffer for further processing. :returns: the next :class:`.Record` or :const:`None` if none remain
[ "Obtain", "the", "next", "record", "from", "this", "result", "without", "consuming", "it", ".", "This", "leaves", "the", "record", "in", "the", "buffer", "for", "further", "processing", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/blocking.py#L732-L749
train
226,374
neo4j/neo4j-python-driver
neo4j/blocking.py
BoltStatementResult.value
def value(self, item=0, default=None): """ Return the remainder of the result as a list of values. :param item: field to return for each remaining record :param default: default value, used if the index of key is unavailable :returns: list of individual values """ return [record.value(item, default) for record in self.records()]
python
def value(self, item=0, default=None): """ Return the remainder of the result as a list of values. :param item: field to return for each remaining record :param default: default value, used if the index of key is unavailable :returns: list of individual values """ return [record.value(item, default) for record in self.records()]
[ "def", "value", "(", "self", ",", "item", "=", "0", ",", "default", "=", "None", ")", ":", "return", "[", "record", ".", "value", "(", "item", ",", "default", ")", "for", "record", "in", "self", ".", "records", "(", ")", "]" ]
Return the remainder of the result as a list of values. :param item: field to return for each remaining record :param default: default value, used if the index of key is unavailable :returns: list of individual values
[ "Return", "the", "remainder", "of", "the", "result", "as", "a", "list", "of", "values", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/blocking.py#L769-L776
train
226,375
neo4j/neo4j-python-driver
neo4j/pipelining.py
Pipeline.pull
def pull(self): """Returns a generator containing the results of the next query in the pipeline""" # n.b. pull is now somewhat misleadingly named because it doesn't do anything # the connection isn't touched until you try and iterate the generator we return lock_acquired = self._pull_lock.acquire(blocking=False) if not lock_acquired: raise PullOrderException() return self._results_generator()
python
def pull(self): """Returns a generator containing the results of the next query in the pipeline""" # n.b. pull is now somewhat misleadingly named because it doesn't do anything # the connection isn't touched until you try and iterate the generator we return lock_acquired = self._pull_lock.acquire(blocking=False) if not lock_acquired: raise PullOrderException() return self._results_generator()
[ "def", "pull", "(", "self", ")", ":", "# n.b. pull is now somewhat misleadingly named because it doesn't do anything", "# the connection isn't touched until you try and iterate the generator we return", "lock_acquired", "=", "self", ".", "_pull_lock", ".", "acquire", "(", "blocking",...
Returns a generator containing the results of the next query in the pipeline
[ "Returns", "a", "generator", "containing", "the", "results", "of", "the", "next", "query", "in", "the", "pipeline" ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/pipelining.py#L61-L68
train
226,376
neo4j/neo4j-python-driver
neo4j/meta.py
deprecated
def deprecated(message): """ Decorator for deprecating functions and methods. :: @deprecated("'foo' has been deprecated in favour of 'bar'") def foo(x): pass """ def f__(f): def f_(*args, **kwargs): from warnings import warn warn(message, category=DeprecationWarning, stacklevel=2) return f(*args, **kwargs) f_.__name__ = f.__name__ f_.__doc__ = f.__doc__ f_.__dict__.update(f.__dict__) return f_ return f__
python
def deprecated(message): """ Decorator for deprecating functions and methods. :: @deprecated("'foo' has been deprecated in favour of 'bar'") def foo(x): pass """ def f__(f): def f_(*args, **kwargs): from warnings import warn warn(message, category=DeprecationWarning, stacklevel=2) return f(*args, **kwargs) f_.__name__ = f.__name__ f_.__doc__ = f.__doc__ f_.__dict__.update(f.__dict__) return f_ return f__
[ "def", "deprecated", "(", "message", ")", ":", "def", "f__", "(", "f", ")", ":", "def", "f_", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "warnings", "import", "warn", "warn", "(", "message", ",", "category", "=", "DeprecationWarning...
Decorator for deprecating functions and methods. :: @deprecated("'foo' has been deprecated in favour of 'bar'") def foo(x): pass
[ "Decorator", "for", "deprecating", "functions", "and", "methods", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/meta.py#L27-L46
train
226,377
neo4j/neo4j-python-driver
neo4j/meta.py
experimental
def experimental(message): """ Decorator for tagging experimental functions and methods. :: @experimental("'foo' is an experimental function and may be " "removed in a future release") def foo(x): pass """ def f__(f): def f_(*args, **kwargs): from warnings import warn warn(message, category=ExperimentalWarning, stacklevel=2) return f(*args, **kwargs) f_.__name__ = f.__name__ f_.__doc__ = f.__doc__ f_.__dict__.update(f.__dict__) return f_ return f__
python
def experimental(message): """ Decorator for tagging experimental functions and methods. :: @experimental("'foo' is an experimental function and may be " "removed in a future release") def foo(x): pass """ def f__(f): def f_(*args, **kwargs): from warnings import warn warn(message, category=ExperimentalWarning, stacklevel=2) return f(*args, **kwargs) f_.__name__ = f.__name__ f_.__doc__ = f.__doc__ f_.__dict__.update(f.__dict__) return f_ return f__
[ "def", "experimental", "(", "message", ")", ":", "def", "f__", "(", "f", ")", ":", "def", "f_", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "warnings", "import", "warn", "warn", "(", "message", ",", "category", "=", "ExperimentalWarn...
Decorator for tagging experimental functions and methods. :: @experimental("'foo' is an experimental function and may be " "removed in a future release") def foo(x): pass
[ "Decorator", "for", "tagging", "experimental", "functions", "and", "methods", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/meta.py#L54-L74
train
226,378
neo4j/neo4j-python-driver
neo4j/types/temporal.py
hydrate_time
def hydrate_time(nanoseconds, tz=None): """ Hydrator for `Time` and `LocalTime` values. :param nanoseconds: :param tz: :return: Time """ seconds, nanoseconds = map(int, divmod(nanoseconds, 1000000000)) minutes, seconds = map(int, divmod(seconds, 60)) hours, minutes = map(int, divmod(minutes, 60)) seconds = (1000000000 * seconds + nanoseconds) / 1000000000 t = Time(hours, minutes, seconds) if tz is None: return t tz_offset_minutes, tz_offset_seconds = divmod(tz, 60) zone = FixedOffset(tz_offset_minutes) return zone.localize(t)
python
def hydrate_time(nanoseconds, tz=None): """ Hydrator for `Time` and `LocalTime` values. :param nanoseconds: :param tz: :return: Time """ seconds, nanoseconds = map(int, divmod(nanoseconds, 1000000000)) minutes, seconds = map(int, divmod(seconds, 60)) hours, minutes = map(int, divmod(minutes, 60)) seconds = (1000000000 * seconds + nanoseconds) / 1000000000 t = Time(hours, minutes, seconds) if tz is None: return t tz_offset_minutes, tz_offset_seconds = divmod(tz, 60) zone = FixedOffset(tz_offset_minutes) return zone.localize(t)
[ "def", "hydrate_time", "(", "nanoseconds", ",", "tz", "=", "None", ")", ":", "seconds", ",", "nanoseconds", "=", "map", "(", "int", ",", "divmod", "(", "nanoseconds", ",", "1000000000", ")", ")", "minutes", ",", "seconds", "=", "map", "(", "int", ",", ...
Hydrator for `Time` and `LocalTime` values. :param nanoseconds: :param tz: :return: Time
[ "Hydrator", "for", "Time", "and", "LocalTime", "values", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/temporal.py#L61-L77
train
226,379
neo4j/neo4j-python-driver
neo4j/types/temporal.py
dehydrate_time
def dehydrate_time(value): """ Dehydrator for `time` values. :param value: :type value: Time :return: """ if isinstance(value, Time): nanoseconds = int(value.ticks * 1000000000) elif isinstance(value, time): nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute + 1000000000 * value.second + 1000 * value.microsecond) else: raise TypeError("Value must be a neotime.Time or a datetime.time") if value.tzinfo: return Structure(b"T", nanoseconds, value.tzinfo.utcoffset(value).seconds) else: return Structure(b"t", nanoseconds)
python
def dehydrate_time(value): """ Dehydrator for `time` values. :param value: :type value: Time :return: """ if isinstance(value, Time): nanoseconds = int(value.ticks * 1000000000) elif isinstance(value, time): nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute + 1000000000 * value.second + 1000 * value.microsecond) else: raise TypeError("Value must be a neotime.Time or a datetime.time") if value.tzinfo: return Structure(b"T", nanoseconds, value.tzinfo.utcoffset(value).seconds) else: return Structure(b"t", nanoseconds)
[ "def", "dehydrate_time", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "Time", ")", ":", "nanoseconds", "=", "int", "(", "value", ".", "ticks", "*", "1000000000", ")", "elif", "isinstance", "(", "value", ",", "time", ")", ":", "nanosec...
Dehydrator for `time` values. :param value: :type value: Time :return:
[ "Dehydrator", "for", "time", "values", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/temporal.py#L80-L97
train
226,380
neo4j/neo4j-python-driver
neo4j/types/temporal.py
hydrate_datetime
def hydrate_datetime(seconds, nanoseconds, tz=None): """ Hydrator for `DateTime` and `LocalDateTime` values. :param seconds: :param nanoseconds: :param tz: :return: datetime """ minutes, seconds = map(int, divmod(seconds, 60)) hours, minutes = map(int, divmod(minutes, 60)) days, hours = map(int, divmod(hours, 24)) seconds = (1000000000 * seconds + nanoseconds) / 1000000000 t = DateTime.combine(Date.from_ordinal(UNIX_EPOCH_DATE_ORDINAL + days), Time(hours, minutes, seconds)) if tz is None: return t if isinstance(tz, int): tz_offset_minutes, tz_offset_seconds = divmod(tz, 60) zone = FixedOffset(tz_offset_minutes) else: zone = timezone(tz) return zone.localize(t)
python
def hydrate_datetime(seconds, nanoseconds, tz=None): """ Hydrator for `DateTime` and `LocalDateTime` values. :param seconds: :param nanoseconds: :param tz: :return: datetime """ minutes, seconds = map(int, divmod(seconds, 60)) hours, minutes = map(int, divmod(minutes, 60)) days, hours = map(int, divmod(hours, 24)) seconds = (1000000000 * seconds + nanoseconds) / 1000000000 t = DateTime.combine(Date.from_ordinal(UNIX_EPOCH_DATE_ORDINAL + days), Time(hours, minutes, seconds)) if tz is None: return t if isinstance(tz, int): tz_offset_minutes, tz_offset_seconds = divmod(tz, 60) zone = FixedOffset(tz_offset_minutes) else: zone = timezone(tz) return zone.localize(t)
[ "def", "hydrate_datetime", "(", "seconds", ",", "nanoseconds", ",", "tz", "=", "None", ")", ":", "minutes", ",", "seconds", "=", "map", "(", "int", ",", "divmod", "(", "seconds", ",", "60", ")", ")", "hours", ",", "minutes", "=", "map", "(", "int", ...
Hydrator for `DateTime` and `LocalDateTime` values. :param seconds: :param nanoseconds: :param tz: :return: datetime
[ "Hydrator", "for", "DateTime", "and", "LocalDateTime", "values", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/temporal.py#L100-L120
train
226,381
neo4j/neo4j-python-driver
neo4j/types/temporal.py
dehydrate_datetime
def dehydrate_datetime(value): """ Dehydrator for `datetime` values. :param value: :type value: datetime :return: """ def seconds_and_nanoseconds(dt): if isinstance(dt, datetime): dt = DateTime.from_native(dt) zone_epoch = DateTime(1970, 1, 1, tzinfo=dt.tzinfo) t = dt.to_clock_time() - zone_epoch.to_clock_time() return t.seconds, t.nanoseconds tz = value.tzinfo if tz is None: # without time zone value = utc.localize(value) seconds, nanoseconds = seconds_and_nanoseconds(value) return Structure(b"d", seconds, nanoseconds) elif hasattr(tz, "zone") and tz.zone: # with named time zone seconds, nanoseconds = seconds_and_nanoseconds(value) return Structure(b"f", seconds, nanoseconds, tz.zone) else: # with time offset seconds, nanoseconds = seconds_and_nanoseconds(value) return Structure(b"F", seconds, nanoseconds, tz.utcoffset(value).seconds)
python
def dehydrate_datetime(value): """ Dehydrator for `datetime` values. :param value: :type value: datetime :return: """ def seconds_and_nanoseconds(dt): if isinstance(dt, datetime): dt = DateTime.from_native(dt) zone_epoch = DateTime(1970, 1, 1, tzinfo=dt.tzinfo) t = dt.to_clock_time() - zone_epoch.to_clock_time() return t.seconds, t.nanoseconds tz = value.tzinfo if tz is None: # without time zone value = utc.localize(value) seconds, nanoseconds = seconds_and_nanoseconds(value) return Structure(b"d", seconds, nanoseconds) elif hasattr(tz, "zone") and tz.zone: # with named time zone seconds, nanoseconds = seconds_and_nanoseconds(value) return Structure(b"f", seconds, nanoseconds, tz.zone) else: # with time offset seconds, nanoseconds = seconds_and_nanoseconds(value) return Structure(b"F", seconds, nanoseconds, tz.utcoffset(value).seconds)
[ "def", "dehydrate_datetime", "(", "value", ")", ":", "def", "seconds_and_nanoseconds", "(", "dt", ")", ":", "if", "isinstance", "(", "dt", ",", "datetime", ")", ":", "dt", "=", "DateTime", ".", "from_native", "(", "dt", ")", "zone_epoch", "=", "DateTime", ...
Dehydrator for `datetime` values. :param value: :type value: datetime :return:
[ "Dehydrator", "for", "datetime", "values", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/temporal.py#L123-L151
train
226,382
neo4j/neo4j-python-driver
neo4j/types/temporal.py
hydrate_duration
def hydrate_duration(months, days, seconds, nanoseconds): """ Hydrator for `Duration` values. :param months: :param days: :param seconds: :param nanoseconds: :return: `duration` namedtuple """ return Duration(months=months, days=days, seconds=seconds, nanoseconds=nanoseconds)
python
def hydrate_duration(months, days, seconds, nanoseconds): """ Hydrator for `Duration` values. :param months: :param days: :param seconds: :param nanoseconds: :return: `duration` namedtuple """ return Duration(months=months, days=days, seconds=seconds, nanoseconds=nanoseconds)
[ "def", "hydrate_duration", "(", "months", ",", "days", ",", "seconds", ",", "nanoseconds", ")", ":", "return", "Duration", "(", "months", "=", "months", ",", "days", "=", "days", ",", "seconds", "=", "seconds", ",", "nanoseconds", "=", "nanoseconds", ")" ]
Hydrator for `Duration` values. :param months: :param days: :param seconds: :param nanoseconds: :return: `duration` namedtuple
[ "Hydrator", "for", "Duration", "values", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/temporal.py#L154-L163
train
226,383
neo4j/neo4j-python-driver
neo4j/types/temporal.py
dehydrate_duration
def dehydrate_duration(value): """ Dehydrator for `duration` values. :param value: :type value: Duration :return: """ return Structure(b"E", value.months, value.days, value.seconds, int(1000000000 * value.subseconds))
python
def dehydrate_duration(value): """ Dehydrator for `duration` values. :param value: :type value: Duration :return: """ return Structure(b"E", value.months, value.days, value.seconds, int(1000000000 * value.subseconds))
[ "def", "dehydrate_duration", "(", "value", ")", ":", "return", "Structure", "(", "b\"E\"", ",", "value", ".", "months", ",", "value", ".", "days", ",", "value", ".", "seconds", ",", "int", "(", "1000000000", "*", "value", ".", "subseconds", ")", ")" ]
Dehydrator for `duration` values. :param value: :type value: Duration :return:
[ "Dehydrator", "for", "duration", "values", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/temporal.py#L166-L173
train
226,384
neo4j/neo4j-python-driver
neo4j/types/temporal.py
dehydrate_timedelta
def dehydrate_timedelta(value): """ Dehydrator for `timedelta` values. :param value: :type value: timedelta :return: """ months = 0 days = value.days seconds = value.seconds nanoseconds = 1000 * value.microseconds return Structure(b"E", months, days, seconds, nanoseconds)
python
def dehydrate_timedelta(value): """ Dehydrator for `timedelta` values. :param value: :type value: timedelta :return: """ months = 0 days = value.days seconds = value.seconds nanoseconds = 1000 * value.microseconds return Structure(b"E", months, days, seconds, nanoseconds)
[ "def", "dehydrate_timedelta", "(", "value", ")", ":", "months", "=", "0", "days", "=", "value", ".", "days", "seconds", "=", "value", ".", "seconds", "nanoseconds", "=", "1000", "*", "value", ".", "microseconds", "return", "Structure", "(", "b\"E\"", ",", ...
Dehydrator for `timedelta` values. :param value: :type value: timedelta :return:
[ "Dehydrator", "for", "timedelta", "values", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/temporal.py#L176-L187
train
226,385
serhatbolsu/robotframework-appiumlibrary
AppiumLibrary/keywords/_touch.py
_TouchKeywords.zoom
def zoom(self, locator, percent="200%", steps=1): """ Zooms in on an element a certain amount. """ driver = self._current_application() element = self._element_find(locator, True, True) driver.zoom(element=element, percent=percent, steps=steps)
python
def zoom(self, locator, percent="200%", steps=1): """ Zooms in on an element a certain amount. """ driver = self._current_application() element = self._element_find(locator, True, True) driver.zoom(element=element, percent=percent, steps=steps)
[ "def", "zoom", "(", "self", ",", "locator", ",", "percent", "=", "\"200%\"", ",", "steps", "=", "1", ")", ":", "driver", "=", "self", ".", "_current_application", "(", ")", "element", "=", "self", ".", "_element_find", "(", "locator", ",", "True", ",",...
Zooms in on an element a certain amount.
[ "Zooms", "in", "on", "an", "element", "a", "certain", "amount", "." ]
91c808cf0602af6be8135ac529fa488fded04a85
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_touch.py#L15-L21
train
226,386
serhatbolsu/robotframework-appiumlibrary
AppiumLibrary/keywords/_touch.py
_TouchKeywords.scroll
def scroll(self, start_locator, end_locator): """ Scrolls from one element to another Key attributes for arbitrary elements are `id` and `name`. See `introduction` for details about locating elements. """ el1 = self._element_find(start_locator, True, True) el2 = self._element_find(end_locator, True, True) driver = self._current_application() driver.scroll(el1, el2)
python
def scroll(self, start_locator, end_locator): """ Scrolls from one element to another Key attributes for arbitrary elements are `id` and `name`. See `introduction` for details about locating elements. """ el1 = self._element_find(start_locator, True, True) el2 = self._element_find(end_locator, True, True) driver = self._current_application() driver.scroll(el1, el2)
[ "def", "scroll", "(", "self", ",", "start_locator", ",", "end_locator", ")", ":", "el1", "=", "self", ".", "_element_find", "(", "start_locator", ",", "True", ",", "True", ")", "el2", "=", "self", ".", "_element_find", "(", "end_locator", ",", "True", ",...
Scrolls from one element to another Key attributes for arbitrary elements are `id` and `name`. See `introduction` for details about locating elements.
[ "Scrolls", "from", "one", "element", "to", "another", "Key", "attributes", "for", "arbitrary", "elements", "are", "id", "and", "name", ".", "See", "introduction", "for", "details", "about", "locating", "elements", "." ]
91c808cf0602af6be8135ac529fa488fded04a85
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_touch.py#L85-L94
train
226,387
serhatbolsu/robotframework-appiumlibrary
AppiumLibrary/keywords/_touch.py
_TouchKeywords.scroll_up
def scroll_up(self, locator): """Scrolls up to element""" driver = self._current_application() element = self._element_find(locator, True, True) driver.execute_script("mobile: scroll", {"direction": 'up', 'element': element.id})
python
def scroll_up(self, locator): """Scrolls up to element""" driver = self._current_application() element = self._element_find(locator, True, True) driver.execute_script("mobile: scroll", {"direction": 'up', 'element': element.id})
[ "def", "scroll_up", "(", "self", ",", "locator", ")", ":", "driver", "=", "self", ".", "_current_application", "(", ")", "element", "=", "self", ".", "_element_find", "(", "locator", ",", "True", ",", "True", ")", "driver", ".", "execute_script", "(", "\...
Scrolls up to element
[ "Scrolls", "up", "to", "element" ]
91c808cf0602af6be8135ac529fa488fded04a85
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_touch.py#L102-L106
train
226,388
serhatbolsu/robotframework-appiumlibrary
AppiumLibrary/keywords/_touch.py
_TouchKeywords.long_press
def long_press(self, locator, duration=1000): """ Long press the element with optional duration """ driver = self._current_application() element = self._element_find(locator, True, True) action = TouchAction(driver) action.press(element).wait(duration).release().perform()
python
def long_press(self, locator, duration=1000): """ Long press the element with optional duration """ driver = self._current_application() element = self._element_find(locator, True, True) action = TouchAction(driver) action.press(element).wait(duration).release().perform()
[ "def", "long_press", "(", "self", ",", "locator", ",", "duration", "=", "1000", ")", ":", "driver", "=", "self", ".", "_current_application", "(", ")", "element", "=", "self", ".", "_element_find", "(", "locator", ",", "True", ",", "True", ")", "action",...
Long press the element with optional duration
[ "Long", "press", "the", "element", "with", "optional", "duration" ]
91c808cf0602af6be8135ac529fa488fded04a85
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_touch.py#L108-L113
train
226,389
serhatbolsu/robotframework-appiumlibrary
AppiumLibrary/keywords/_touch.py
_TouchKeywords.click_a_point
def click_a_point(self, x=0, y=0, duration=100): """ Click on a point""" self._info("Clicking on a point (%s,%s)." % (x,y)) driver = self._current_application() action = TouchAction(driver) try: action.press(x=float(x), y=float(y)).wait(float(duration)).release().perform() except: assert False, "Can't click on a point at (%s,%s)" % (x,y)
python
def click_a_point(self, x=0, y=0, duration=100): """ Click on a point""" self._info("Clicking on a point (%s,%s)." % (x,y)) driver = self._current_application() action = TouchAction(driver) try: action.press(x=float(x), y=float(y)).wait(float(duration)).release().perform() except: assert False, "Can't click on a point at (%s,%s)" % (x,y)
[ "def", "click_a_point", "(", "self", ",", "x", "=", "0", ",", "y", "=", "0", ",", "duration", "=", "100", ")", ":", "self", ".", "_info", "(", "\"Clicking on a point (%s,%s).\"", "%", "(", "x", ",", "y", ")", ")", "driver", "=", "self", ".", "_curr...
Click on a point
[ "Click", "on", "a", "point" ]
91c808cf0602af6be8135ac529fa488fded04a85
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_touch.py#L128-L136
train
226,390
serhatbolsu/robotframework-appiumlibrary
AppiumLibrary/keywords/_touch.py
_TouchKeywords.click_element_at_coordinates
def click_element_at_coordinates(self, coordinate_X, coordinate_Y): """ click element at a certain coordinate """ self._info("Pressing at (%s, %s)." % (coordinate_X, coordinate_Y)) driver = self._current_application() action = TouchAction(driver) action.press(x=coordinate_X, y=coordinate_Y).release().perform()
python
def click_element_at_coordinates(self, coordinate_X, coordinate_Y): """ click element at a certain coordinate """ self._info("Pressing at (%s, %s)." % (coordinate_X, coordinate_Y)) driver = self._current_application() action = TouchAction(driver) action.press(x=coordinate_X, y=coordinate_Y).release().perform()
[ "def", "click_element_at_coordinates", "(", "self", ",", "coordinate_X", ",", "coordinate_Y", ")", ":", "self", ".", "_info", "(", "\"Pressing at (%s, %s).\"", "%", "(", "coordinate_X", ",", "coordinate_Y", ")", ")", "driver", "=", "self", ".", "_current_applicati...
click element at a certain coordinate
[ "click", "element", "at", "a", "certain", "coordinate" ]
91c808cf0602af6be8135ac529fa488fded04a85
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_touch.py#L138-L143
train
226,391
serhatbolsu/robotframework-appiumlibrary
AppiumLibrary/keywords/_waiting.py
_WaitingKeywords.wait_until_element_is_visible
def wait_until_element_is_visible(self, locator, timeout=None, error=None): """Waits until element specified with `locator` is visible. Fails if `timeout` expires before the element is visible. See `introduction` for more information about `timeout` and its default value. `error` can be used to override the default error message. See also `Wait Until Page Contains`, `Wait Until Page Contains Element`, `Wait For Condition` and BuiltIn keyword `Wait Until Keyword Succeeds`. """ def check_visibility(): visible = self._is_visible(locator) if visible: return elif visible is None: return error or "Element locator '%s' did not match any elements after %s" % (locator, self._format_timeout(timeout)) else: return error or "Element '%s' was not visible in %s" % (locator, self._format_timeout(timeout)) self._wait_until_no_error(timeout, check_visibility)
python
def wait_until_element_is_visible(self, locator, timeout=None, error=None): """Waits until element specified with `locator` is visible. Fails if `timeout` expires before the element is visible. See `introduction` for more information about `timeout` and its default value. `error` can be used to override the default error message. See also `Wait Until Page Contains`, `Wait Until Page Contains Element`, `Wait For Condition` and BuiltIn keyword `Wait Until Keyword Succeeds`. """ def check_visibility(): visible = self._is_visible(locator) if visible: return elif visible is None: return error or "Element locator '%s' did not match any elements after %s" % (locator, self._format_timeout(timeout)) else: return error or "Element '%s' was not visible in %s" % (locator, self._format_timeout(timeout)) self._wait_until_no_error(timeout, check_visibility)
[ "def", "wait_until_element_is_visible", "(", "self", ",", "locator", ",", "timeout", "=", "None", ",", "error", "=", "None", ")", ":", "def", "check_visibility", "(", ")", ":", "visible", "=", "self", ".", "_is_visible", "(", "locator", ")", "if", "visible...
Waits until element specified with `locator` is visible. Fails if `timeout` expires before the element is visible. See `introduction` for more information about `timeout` and its default value. `error` can be used to override the default error message. See also `Wait Until Page Contains`, `Wait Until Page Contains Element`, `Wait For Condition` and BuiltIn keyword `Wait Until Keyword Succeeds`.
[ "Waits", "until", "element", "specified", "with", "locator", "is", "visible", "." ]
91c808cf0602af6be8135ac529fa488fded04a85
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_waiting.py#L7-L28
train
226,392
serhatbolsu/robotframework-appiumlibrary
AppiumLibrary/keywords/_waiting.py
_WaitingKeywords.wait_until_page_contains
def wait_until_page_contains(self, text, timeout=None, error=None): """Waits until `text` appears on current page. Fails if `timeout` expires before the text appears. See `introduction` for more information about `timeout` and its default value. `error` can be used to override the default error message. See also `Wait Until Page Does Not Contain`, `Wait Until Page Contains Element`, `Wait Until Page Does Not Contain Element` and BuiltIn keyword `Wait Until Keyword Succeeds`. """ if not error: error = "Text '%s' did not appear in <TIMEOUT>" % text self._wait_until(timeout, error, self._is_text_present, text)
python
def wait_until_page_contains(self, text, timeout=None, error=None): """Waits until `text` appears on current page. Fails if `timeout` expires before the text appears. See `introduction` for more information about `timeout` and its default value. `error` can be used to override the default error message. See also `Wait Until Page Does Not Contain`, `Wait Until Page Contains Element`, `Wait Until Page Does Not Contain Element` and BuiltIn keyword `Wait Until Keyword Succeeds`. """ if not error: error = "Text '%s' did not appear in <TIMEOUT>" % text self._wait_until(timeout, error, self._is_text_present, text)
[ "def", "wait_until_page_contains", "(", "self", ",", "text", ",", "timeout", "=", "None", ",", "error", "=", "None", ")", ":", "if", "not", "error", ":", "error", "=", "\"Text '%s' did not appear in <TIMEOUT>\"", "%", "text", "self", ".", "_wait_until", "(", ...
Waits until `text` appears on current page. Fails if `timeout` expires before the text appears. See `introduction` for more information about `timeout` and its default value. `error` can be used to override the default error message. See also `Wait Until Page Does Not Contain`, `Wait Until Page Contains Element`, `Wait Until Page Does Not Contain Element` and BuiltIn keyword `Wait Until Keyword Succeeds`.
[ "Waits", "until", "text", "appears", "on", "current", "page", "." ]
91c808cf0602af6be8135ac529fa488fded04a85
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_waiting.py#L30-L46
train
226,393
serhatbolsu/robotframework-appiumlibrary
AppiumLibrary/keywords/_waiting.py
_WaitingKeywords.wait_until_page_does_not_contain
def wait_until_page_does_not_contain(self, text, timeout=None, error=None): """Waits until `text` disappears from current page. Fails if `timeout` expires before the `text` disappears. See `introduction` for more information about `timeout` and its default value. `error` can be used to override the default error message. See also `Wait Until Page Contains`, `Wait Until Page Contains Element`, `Wait Until Page Does Not Contain Element` and BuiltIn keyword `Wait Until Keyword Succeeds`. """ def check_present(): present = self._is_text_present(text) if not present: return else: return error or "Text '%s' did not disappear in %s" % (text, self._format_timeout(timeout)) self._wait_until_no_error(timeout, check_present)
python
def wait_until_page_does_not_contain(self, text, timeout=None, error=None): """Waits until `text` disappears from current page. Fails if `timeout` expires before the `text` disappears. See `introduction` for more information about `timeout` and its default value. `error` can be used to override the default error message. See also `Wait Until Page Contains`, `Wait Until Page Contains Element`, `Wait Until Page Does Not Contain Element` and BuiltIn keyword `Wait Until Keyword Succeeds`. """ def check_present(): present = self._is_text_present(text) if not present: return else: return error or "Text '%s' did not disappear in %s" % (text, self._format_timeout(timeout)) self._wait_until_no_error(timeout, check_present)
[ "def", "wait_until_page_does_not_contain", "(", "self", ",", "text", ",", "timeout", "=", "None", ",", "error", "=", "None", ")", ":", "def", "check_present", "(", ")", ":", "present", "=", "self", ".", "_is_text_present", "(", "text", ")", "if", "not", ...
Waits until `text` disappears from current page. Fails if `timeout` expires before the `text` disappears. See `introduction` for more information about `timeout` and its default value. `error` can be used to override the default error message. See also `Wait Until Page Contains`, `Wait Until Page Contains Element`, `Wait Until Page Does Not Contain Element` and BuiltIn keyword `Wait Until Keyword Succeeds`.
[ "Waits", "until", "text", "disappears", "from", "current", "page", "." ]
91c808cf0602af6be8135ac529fa488fded04a85
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_waiting.py#L48-L70
train
226,394
serhatbolsu/robotframework-appiumlibrary
AppiumLibrary/keywords/_waiting.py
_WaitingKeywords.wait_until_page_contains_element
def wait_until_page_contains_element(self, locator, timeout=None, error=None): """Waits until element specified with `locator` appears on current page. Fails if `timeout` expires before the element appears. See `introduction` for more information about `timeout` and its default value. `error` can be used to override the default error message. See also `Wait Until Page Contains`, `Wait Until Page Does Not Contain` `Wait Until Page Does Not Contain Element` and BuiltIn keyword `Wait Until Keyword Succeeds`. """ if not error: error = "Element '%s' did not appear in <TIMEOUT>" % locator self._wait_until(timeout, error, self._is_element_present, locator)
python
def wait_until_page_contains_element(self, locator, timeout=None, error=None): """Waits until element specified with `locator` appears on current page. Fails if `timeout` expires before the element appears. See `introduction` for more information about `timeout` and its default value. `error` can be used to override the default error message. See also `Wait Until Page Contains`, `Wait Until Page Does Not Contain` `Wait Until Page Does Not Contain Element` and BuiltIn keyword `Wait Until Keyword Succeeds`. """ if not error: error = "Element '%s' did not appear in <TIMEOUT>" % locator self._wait_until(timeout, error, self._is_element_present, locator)
[ "def", "wait_until_page_contains_element", "(", "self", ",", "locator", ",", "timeout", "=", "None", ",", "error", "=", "None", ")", ":", "if", "not", "error", ":", "error", "=", "\"Element '%s' did not appear in <TIMEOUT>\"", "%", "locator", "self", ".", "_wait...
Waits until element specified with `locator` appears on current page. Fails if `timeout` expires before the element appears. See `introduction` for more information about `timeout` and its default value. `error` can be used to override the default error message. See also `Wait Until Page Contains`, `Wait Until Page Does Not Contain` `Wait Until Page Does Not Contain Element` and BuiltIn keyword `Wait Until Keyword Succeeds`.
[ "Waits", "until", "element", "specified", "with", "locator", "appears", "on", "current", "page", "." ]
91c808cf0602af6be8135ac529fa488fded04a85
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_waiting.py#L72-L88
train
226,395
serhatbolsu/robotframework-appiumlibrary
AppiumLibrary/keywords/_waiting.py
_WaitingKeywords.wait_until_page_does_not_contain_element
def wait_until_page_does_not_contain_element(self, locator, timeout=None, error=None): """Waits until element specified with `locator` disappears from current page. Fails if `timeout` expires before the element disappears. See `introduction` for more information about `timeout` and its default value. `error` can be used to override the default error message. See also `Wait Until Page Contains`, `Wait Until Page Does Not Contain`, `Wait Until Page Contains Element` and BuiltIn keyword `Wait Until Keyword Succeeds`. """ def check_present(): present = self._is_element_present(locator) if not present: return else: return error or "Element '%s' did not disappear in %s" % (locator, self._format_timeout(timeout)) self._wait_until_no_error(timeout, check_present)
python
def wait_until_page_does_not_contain_element(self, locator, timeout=None, error=None): """Waits until element specified with `locator` disappears from current page. Fails if `timeout` expires before the element disappears. See `introduction` for more information about `timeout` and its default value. `error` can be used to override the default error message. See also `Wait Until Page Contains`, `Wait Until Page Does Not Contain`, `Wait Until Page Contains Element` and BuiltIn keyword `Wait Until Keyword Succeeds`. """ def check_present(): present = self._is_element_present(locator) if not present: return else: return error or "Element '%s' did not disappear in %s" % (locator, self._format_timeout(timeout)) self._wait_until_no_error(timeout, check_present)
[ "def", "wait_until_page_does_not_contain_element", "(", "self", ",", "locator", ",", "timeout", "=", "None", ",", "error", "=", "None", ")", ":", "def", "check_present", "(", ")", ":", "present", "=", "self", ".", "_is_element_present", "(", "locator", ")", ...
Waits until element specified with `locator` disappears from current page. Fails if `timeout` expires before the element disappears. See `introduction` for more information about `timeout` and its default value. `error` can be used to override the default error message. See also `Wait Until Page Contains`, `Wait Until Page Does Not Contain`, `Wait Until Page Contains Element` and BuiltIn keyword `Wait Until Keyword Succeeds`.
[ "Waits", "until", "element", "specified", "with", "locator", "disappears", "from", "current", "page", "." ]
91c808cf0602af6be8135ac529fa488fded04a85
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_waiting.py#L90-L112
train
226,396
serhatbolsu/robotframework-appiumlibrary
AppiumLibrary/keywords/_android_utils.py
_AndroidUtilsKeywords.set_network_connection_status
def set_network_connection_status(self, connectionStatus): """Sets the network connection Status. Android only. Possible values: | =Value= | =Alias= | =Data= | =Wifi= | =Airplane Mode= | | 0 | (None) | 0 | 0 | 0 | | 1 | (Airplane Mode) | 0 | 0 | 1 | | 2 | (Wifi only) | 0 | 1 | 0 | | 4 | (Data only) | 1 | 0 | 0 | | 6 | (All network on) | 1 | 1 | 0 | """ driver = self._current_application() return driver.set_network_connection(int(connectionStatus))
python
def set_network_connection_status(self, connectionStatus): """Sets the network connection Status. Android only. Possible values: | =Value= | =Alias= | =Data= | =Wifi= | =Airplane Mode= | | 0 | (None) | 0 | 0 | 0 | | 1 | (Airplane Mode) | 0 | 0 | 1 | | 2 | (Wifi only) | 0 | 1 | 0 | | 4 | (Data only) | 1 | 0 | 0 | | 6 | (All network on) | 1 | 1 | 0 | """ driver = self._current_application() return driver.set_network_connection(int(connectionStatus))
[ "def", "set_network_connection_status", "(", "self", ",", "connectionStatus", ")", ":", "driver", "=", "self", ".", "_current_application", "(", ")", "return", "driver", ".", "set_network_connection", "(", "int", "(", "connectionStatus", ")", ")" ]
Sets the network connection Status. Android only. Possible values: | =Value= | =Alias= | =Data= | =Wifi= | =Airplane Mode= | | 0 | (None) | 0 | 0 | 0 | | 1 | (Airplane Mode) | 0 | 0 | 1 | | 2 | (Wifi only) | 0 | 1 | 0 | | 4 | (Data only) | 1 | 0 | 0 | | 6 | (All network on) | 1 | 1 | 0 |
[ "Sets", "the", "network", "connection", "Status", "." ]
91c808cf0602af6be8135ac529fa488fded04a85
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_android_utils.py#L21-L35
train
226,397
serhatbolsu/robotframework-appiumlibrary
AppiumLibrary/keywords/_android_utils.py
_AndroidUtilsKeywords.pull_file
def pull_file(self, path, decode=False): """Retrieves the file at `path` and return it's content. Android only. - _path_ - the path to the file on the device - _decode_ - True/False decode the data (base64) before returning it (default=False) """ driver = self._current_application() theFile = driver.pull_file(path) if decode: theFile = base64.b64decode(theFile) return str(theFile)
python
def pull_file(self, path, decode=False): """Retrieves the file at `path` and return it's content. Android only. - _path_ - the path to the file on the device - _decode_ - True/False decode the data (base64) before returning it (default=False) """ driver = self._current_application() theFile = driver.pull_file(path) if decode: theFile = base64.b64decode(theFile) return str(theFile)
[ "def", "pull_file", "(", "self", ",", "path", ",", "decode", "=", "False", ")", ":", "driver", "=", "self", ".", "_current_application", "(", ")", "theFile", "=", "driver", ".", "pull_file", "(", "path", ")", "if", "decode", ":", "theFile", "=", "base6...
Retrieves the file at `path` and return it's content. Android only. - _path_ - the path to the file on the device - _decode_ - True/False decode the data (base64) before returning it (default=False)
[ "Retrieves", "the", "file", "at", "path", "and", "return", "it", "s", "content", "." ]
91c808cf0602af6be8135ac529fa488fded04a85
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_android_utils.py#L37-L49
train
226,398
serhatbolsu/robotframework-appiumlibrary
AppiumLibrary/keywords/_android_utils.py
_AndroidUtilsKeywords.pull_folder
def pull_folder(self, path, decode=False): """Retrieves a folder at `path`. Returns the folder's contents zipped. Android only. - _path_ - the path to the folder on the device - _decode_ - True/False decode the data (base64) before returning it (default=False) """ driver = self._current_application() theFolder = driver.pull_folder(path) if decode: theFolder = base64.b64decode(theFolder) return theFolder
python
def pull_folder(self, path, decode=False): """Retrieves a folder at `path`. Returns the folder's contents zipped. Android only. - _path_ - the path to the folder on the device - _decode_ - True/False decode the data (base64) before returning it (default=False) """ driver = self._current_application() theFolder = driver.pull_folder(path) if decode: theFolder = base64.b64decode(theFolder) return theFolder
[ "def", "pull_folder", "(", "self", ",", "path", ",", "decode", "=", "False", ")", ":", "driver", "=", "self", ".", "_current_application", "(", ")", "theFolder", "=", "driver", ".", "pull_folder", "(", "path", ")", "if", "decode", ":", "theFolder", "=", ...
Retrieves a folder at `path`. Returns the folder's contents zipped. Android only. - _path_ - the path to the folder on the device - _decode_ - True/False decode the data (base64) before returning it (default=False)
[ "Retrieves", "a", "folder", "at", "path", ".", "Returns", "the", "folder", "s", "contents", "zipped", "." ]
91c808cf0602af6be8135ac529fa488fded04a85
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_android_utils.py#L51-L63
train
226,399