repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
INM-6/hybridLFPy
hybridLFPy/gdf.py
GDF.interval
def interval(self, T=[0, 1000]): """ Get all spikes in a time interval T. Parameters ---------- T : list Time interval. Returns ------- s : list Nested list with spike times. See also -------- sqlite3.connect.cursor """ self.cursor.execute('SELECT * FROM spikes WHERE time BETWEEN %f AND %f' % tuple(T)) sel = self.cursor.fetchall() return sel
python
def interval(self, T=[0, 1000]): """ Get all spikes in a time interval T. Parameters ---------- T : list Time interval. Returns ------- s : list Nested list with spike times. See also -------- sqlite3.connect.cursor """ self.cursor.execute('SELECT * FROM spikes WHERE time BETWEEN %f AND %f' % tuple(T)) sel = self.cursor.fetchall() return sel
[ "def", "interval", "(", "self", ",", "T", "=", "[", "0", ",", "1000", "]", ")", ":", "self", ".", "cursor", ".", "execute", "(", "'SELECT * FROM spikes WHERE time BETWEEN %f AND %f'", "%", "tuple", "(", "T", ")", ")", "sel", "=", "self", ".", "cursor", ...
Get all spikes in a time interval T. Parameters ---------- T : list Time interval. Returns ------- s : list Nested list with spike times. See also -------- sqlite3.connect.cursor
[ "Get", "all", "spikes", "in", "a", "time", "interval", "T", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/gdf.py#L255-L280
INM-6/hybridLFPy
hybridLFPy/gdf.py
GDF.neurons
def neurons(self): """ Return list of neuron indices. Parameters ---------- None Returns ------- list list of neuron indices See also -------- sqlite3.connect.cursor """ self.cursor.execute('SELECT DISTINCT neuron FROM spikes ORDER BY neuron') sel = self.cursor.fetchall() return np.array(sel).flatten()
python
def neurons(self): """ Return list of neuron indices. Parameters ---------- None Returns ------- list list of neuron indices See also -------- sqlite3.connect.cursor """ self.cursor.execute('SELECT DISTINCT neuron FROM spikes ORDER BY neuron') sel = self.cursor.fetchall() return np.array(sel).flatten()
[ "def", "neurons", "(", "self", ")", ":", "self", ".", "cursor", ".", "execute", "(", "'SELECT DISTINCT neuron FROM spikes ORDER BY neuron'", ")", "sel", "=", "self", ".", "cursor", ".", "fetchall", "(", ")", "return", "np", ".", "array", "(", "sel", ")", "...
Return list of neuron indices. Parameters ---------- None Returns ------- list list of neuron indices See also -------- sqlite3.connect.cursor
[ "Return", "list", "of", "neuron", "indices", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/gdf.py#L318-L341
INM-6/hybridLFPy
hybridLFPy/gdf.py
GDF.num_spikes
def num_spikes(self): """ Return total number of spikes. Parameters ---------- None Returns ------- list """ self.cursor.execute('SELECT Count(*) from spikes') rows = self.cursor.fetchall()[0] # Check against 'wc -l *ex*.gdf' if self.debug: print('DB has %d spikes' % rows) return rows
python
def num_spikes(self): """ Return total number of spikes. Parameters ---------- None Returns ------- list """ self.cursor.execute('SELECT Count(*) from spikes') rows = self.cursor.fetchall()[0] # Check against 'wc -l *ex*.gdf' if self.debug: print('DB has %d spikes' % rows) return rows
[ "def", "num_spikes", "(", "self", ")", ":", "self", ".", "cursor", ".", "execute", "(", "'SELECT Count(*) from spikes'", ")", "rows", "=", "self", ".", "cursor", ".", "fetchall", "(", ")", "[", "0", "]", "# Check against 'wc -l *ex*.gdf'", "if", "self", ".",...
Return total number of spikes. Parameters ---------- None Returns ------- list
[ "Return", "total", "number", "of", "spikes", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/gdf.py#L344-L363
INM-6/hybridLFPy
hybridLFPy/gdf.py
GDF.plotstuff
def plotstuff(self, T=[0, 1000]): """ Create a scatter plot of the contents of the database, with entries on the interval T. Parameters ---------- T : list Time interval. Returns ------- None See also -------- GDF.select_neurons_interval """ fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(111) neurons = self.neurons() i = 0 for x in self.select_neurons_interval(neurons, T): ax.plot(x, np.zeros(x.size) + neurons[i], 'o', markersize=1, markerfacecolor='k', markeredgecolor='k', alpha=0.25) i += 1 ax.set_xlabel('time (ms)') ax.set_ylabel('neuron ID') ax.set_xlim(T[0], T[1]) ax.set_ylim(neurons.min(), neurons.max()) ax.set_title('database content on T = [%.0f, %.0f]' % (T[0], T[1]))
python
def plotstuff(self, T=[0, 1000]): """ Create a scatter plot of the contents of the database, with entries on the interval T. Parameters ---------- T : list Time interval. Returns ------- None See also -------- GDF.select_neurons_interval """ fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(111) neurons = self.neurons() i = 0 for x in self.select_neurons_interval(neurons, T): ax.plot(x, np.zeros(x.size) + neurons[i], 'o', markersize=1, markerfacecolor='k', markeredgecolor='k', alpha=0.25) i += 1 ax.set_xlabel('time (ms)') ax.set_ylabel('neuron ID') ax.set_xlim(T[0], T[1]) ax.set_ylim(neurons.min(), neurons.max()) ax.set_title('database content on T = [%.0f, %.0f]' % (T[0], T[1]))
[ "def", "plotstuff", "(", "self", ",", "T", "=", "[", "0", ",", "1000", "]", ")", ":", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "10", ",", "10", ")", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ")", "neurons", "="...
Create a scatter plot of the contents of the database, with entries on the interval T. Parameters ---------- T : list Time interval. Returns ------- None See also -------- GDF.select_neurons_interval
[ "Create", "a", "scatter", "plot", "of", "the", "contents", "of", "the", "database", "with", "entries", "on", "the", "interval", "T", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/gdf.py#L390-L427
squdle/baseconvert
baseconvert/baseconvert.py
represent_as_tuple
def represent_as_tuple(string): """ Represent a number-string in the form of a tuple of digits. "868.0F" -> (8, 6, 8, '.', 0, 15) Args: string - Number represented as a string of digits. Returns: Number represented as an iterable container of digits >>> represent_as_tuple('868.0F') (8, 6, 8, '.', 0, 15) """ keep = (".", "[", "]") return tuple(str_digit_to_int(c) if c not in keep else c for c in string)
python
def represent_as_tuple(string): """ Represent a number-string in the form of a tuple of digits. "868.0F" -> (8, 6, 8, '.', 0, 15) Args: string - Number represented as a string of digits. Returns: Number represented as an iterable container of digits >>> represent_as_tuple('868.0F') (8, 6, 8, '.', 0, 15) """ keep = (".", "[", "]") return tuple(str_digit_to_int(c) if c not in keep else c for c in string)
[ "def", "represent_as_tuple", "(", "string", ")", ":", "keep", "=", "(", "\".\"", ",", "\"[\"", ",", "\"]\"", ")", "return", "tuple", "(", "str_digit_to_int", "(", "c", ")", "if", "c", "not", "in", "keep", "else", "c", "for", "c", "in", "string", ")" ...
Represent a number-string in the form of a tuple of digits. "868.0F" -> (8, 6, 8, '.', 0, 15) Args: string - Number represented as a string of digits. Returns: Number represented as an iterable container of digits >>> represent_as_tuple('868.0F') (8, 6, 8, '.', 0, 15)
[ "Represent", "a", "number", "-", "string", "in", "the", "form", "of", "a", "tuple", "of", "digits", ".", "868", ".", "0F", "-", ">", "(", "8", "6", "8", ".", "0", "15", ")", "Args", ":", "string", "-", "Number", "represented", "as", "a", "string"...
train
https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/baseconvert.py#L181-L195
squdle/baseconvert
baseconvert/baseconvert.py
represent_as_string
def represent_as_string(iterable): """ Represent a number in the form of a string. (8, 6, 8, '.', 0, 15) -> "868.0F" Args: iterable - Number represented as an iterable container of digits. Returns: Number represented as a string of digits. >>> represent_as_string((8, 6, 8, '.', 0, 15)) '868.0F' """ keep = (".", "[", "]") return "".join(tuple(int_to_str_digit(i) if i not in keep else i for i in iterable))
python
def represent_as_string(iterable): """ Represent a number in the form of a string. (8, 6, 8, '.', 0, 15) -> "868.0F" Args: iterable - Number represented as an iterable container of digits. Returns: Number represented as a string of digits. >>> represent_as_string((8, 6, 8, '.', 0, 15)) '868.0F' """ keep = (".", "[", "]") return "".join(tuple(int_to_str_digit(i) if i not in keep else i for i in iterable))
[ "def", "represent_as_string", "(", "iterable", ")", ":", "keep", "=", "(", "\".\"", ",", "\"[\"", ",", "\"]\"", ")", "return", "\"\"", ".", "join", "(", "tuple", "(", "int_to_str_digit", "(", "i", ")", "if", "i", "not", "in", "keep", "else", "i", "fo...
Represent a number in the form of a string. (8, 6, 8, '.', 0, 15) -> "868.0F" Args: iterable - Number represented as an iterable container of digits. Returns: Number represented as a string of digits. >>> represent_as_string((8, 6, 8, '.', 0, 15)) '868.0F'
[ "Represent", "a", "number", "in", "the", "form", "of", "a", "string", ".", "(", "8", "6", "8", ".", "0", "15", ")", "-", ">", "868", ".", "0F", "Args", ":", "iterable", "-", "Number", "represented", "as", "an", "iterable", "container", "of", "digit...
train
https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/baseconvert.py#L198-L213
squdle/baseconvert
baseconvert/baseconvert.py
digit
def digit(decimal, digit, input_base=10): """ Find the value of an integer at a specific digit when represented in a particular base. Args: decimal(int): A number represented in base 10 (positive integer). digit(int): The digit to find where zero is the first, lowest, digit. base(int): The base to use (default 10). Returns: The value at specified digit in the input decimal. This output value is represented as a base 10 integer. Examples: >>> digit(201, 0) 1 >>> digit(201, 1) 0 >>> digit(201, 2) 2 >>> tuple(digit(253, i, 2) for i in range(8)) (1, 0, 1, 1, 1, 1, 1, 1) # Find the lowest digit of a large hexidecimal number >>> digit(123456789123456789, 0, 16) 5 """ if decimal == 0: return 0 if digit != 0: return (decimal // (input_base ** digit)) % input_base else: return decimal % input_base
python
def digit(decimal, digit, input_base=10): """ Find the value of an integer at a specific digit when represented in a particular base. Args: decimal(int): A number represented in base 10 (positive integer). digit(int): The digit to find where zero is the first, lowest, digit. base(int): The base to use (default 10). Returns: The value at specified digit in the input decimal. This output value is represented as a base 10 integer. Examples: >>> digit(201, 0) 1 >>> digit(201, 1) 0 >>> digit(201, 2) 2 >>> tuple(digit(253, i, 2) for i in range(8)) (1, 0, 1, 1, 1, 1, 1, 1) # Find the lowest digit of a large hexidecimal number >>> digit(123456789123456789, 0, 16) 5 """ if decimal == 0: return 0 if digit != 0: return (decimal // (input_base ** digit)) % input_base else: return decimal % input_base
[ "def", "digit", "(", "decimal", ",", "digit", ",", "input_base", "=", "10", ")", ":", "if", "decimal", "==", "0", ":", "return", "0", "if", "digit", "!=", "0", ":", "return", "(", "decimal", "//", "(", "input_base", "**", "digit", ")", ")", "%", ...
Find the value of an integer at a specific digit when represented in a particular base. Args: decimal(int): A number represented in base 10 (positive integer). digit(int): The digit to find where zero is the first, lowest, digit. base(int): The base to use (default 10). Returns: The value at specified digit in the input decimal. This output value is represented as a base 10 integer. Examples: >>> digit(201, 0) 1 >>> digit(201, 1) 0 >>> digit(201, 2) 2 >>> tuple(digit(253, i, 2) for i in range(8)) (1, 0, 1, 1, 1, 1, 1, 1) # Find the lowest digit of a large hexidecimal number >>> digit(123456789123456789, 0, 16) 5
[ "Find", "the", "value", "of", "an", "integer", "at", "a", "specific", "digit", "when", "represented", "in", "a", "particular", "base", ".", "Args", ":", "decimal", "(", "int", ")", ":", "A", "number", "represented", "in", "base", "10", "(", "positive", ...
train
https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/baseconvert.py#L216-L249
squdle/baseconvert
baseconvert/baseconvert.py
digits
def digits(number, base=10): """ Determines the number of digits of a number in a specific base. Args: number(int): An integer number represented in base 10. base(int): The base to find the number of digits. Returns: Number of digits when represented in a particular base (integer). Examples: >>> digits(255) 3 >>> digits(255, 16) 2 >>> digits(256, 16) 3 >>> digits(256, 2) 9 >>> digits(0, 678363) 0 >>> digits(-1, 678363) 0 >>> digits(12345, 10) 5 """ if number < 1: return 0 digits = 0 n = 1 while(number >= 1): number //= base digits += 1 return digits
python
def digits(number, base=10): """ Determines the number of digits of a number in a specific base. Args: number(int): An integer number represented in base 10. base(int): The base to find the number of digits. Returns: Number of digits when represented in a particular base (integer). Examples: >>> digits(255) 3 >>> digits(255, 16) 2 >>> digits(256, 16) 3 >>> digits(256, 2) 9 >>> digits(0, 678363) 0 >>> digits(-1, 678363) 0 >>> digits(12345, 10) 5 """ if number < 1: return 0 digits = 0 n = 1 while(number >= 1): number //= base digits += 1 return digits
[ "def", "digits", "(", "number", ",", "base", "=", "10", ")", ":", "if", "number", "<", "1", ":", "return", "0", "digits", "=", "0", "n", "=", "1", "while", "(", "number", ">=", "1", ")", ":", "number", "//=", "base", "digits", "+=", "1", "retur...
Determines the number of digits of a number in a specific base. Args: number(int): An integer number represented in base 10. base(int): The base to find the number of digits. Returns: Number of digits when represented in a particular base (integer). Examples: >>> digits(255) 3 >>> digits(255, 16) 2 >>> digits(256, 16) 3 >>> digits(256, 2) 9 >>> digits(0, 678363) 0 >>> digits(-1, 678363) 0 >>> digits(12345, 10) 5
[ "Determines", "the", "number", "of", "digits", "of", "a", "number", "in", "a", "specific", "base", ".", "Args", ":", "number", "(", "int", ")", ":", "An", "integer", "number", "represented", "in", "base", "10", ".", "base", "(", "int", ")", ":", "The...
train
https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/baseconvert.py#L252-L286
squdle/baseconvert
baseconvert/baseconvert.py
integer_fractional_parts
def integer_fractional_parts(number): """ Returns a tuple of the integer and fractional parts of a number. Args: number(iterable container): A number in the following form: (..., ".", int, int, int, ...) Returns: (integer_part, fractional_part): tuple. Example: >>> integer_fractional_parts((1,2,3,".",4,5,6)) ((1, 2, 3), ('.', 4, 5, 6)) """ radix_point = number.index(".") integer_part = number[:radix_point] fractional_part = number[radix_point:] return(integer_part, fractional_part)
python
def integer_fractional_parts(number): """ Returns a tuple of the integer and fractional parts of a number. Args: number(iterable container): A number in the following form: (..., ".", int, int, int, ...) Returns: (integer_part, fractional_part): tuple. Example: >>> integer_fractional_parts((1,2,3,".",4,5,6)) ((1, 2, 3), ('.', 4, 5, 6)) """ radix_point = number.index(".") integer_part = number[:radix_point] fractional_part = number[radix_point:] return(integer_part, fractional_part)
[ "def", "integer_fractional_parts", "(", "number", ")", ":", "radix_point", "=", "number", ".", "index", "(", "\".\"", ")", "integer_part", "=", "number", "[", ":", "radix_point", "]", "fractional_part", "=", "number", "[", "radix_point", ":", "]", "return", ...
Returns a tuple of the integer and fractional parts of a number. Args: number(iterable container): A number in the following form: (..., ".", int, int, int, ...) Returns: (integer_part, fractional_part): tuple. Example: >>> integer_fractional_parts((1,2,3,".",4,5,6)) ((1, 2, 3), ('.', 4, 5, 6))
[ "Returns", "a", "tuple", "of", "the", "integer", "and", "fractional", "parts", "of", "a", "number", ".", "Args", ":", "number", "(", "iterable", "container", ")", ":", "A", "number", "in", "the", "following", "form", ":", "(", "...", ".", "int", "int",...
train
https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/baseconvert.py#L289-L307
squdle/baseconvert
baseconvert/baseconvert.py
from_base_10_int
def from_base_10_int(decimal, output_base=10): """ Converts a decimal integer to a specific base. Args: decimal(int) A base 10 number. output_base(int) base to convert to. Returns: A tuple of digits in the specified base. Examples: >>> from_base_10_int(255) (2, 5, 5) >>> from_base_10_int(255, 16) (15, 15) >>> from_base_10_int(9988664439, 8) (1, 1, 2, 3, 2, 7, 5, 6, 6, 1, 6, 7) >>> from_base_10_int(0, 17) (0,) """ if decimal <= 0: return (0,) if output_base == 1: return (1,) * decimal length = digits(decimal, output_base) converted = tuple(digit(decimal, i, output_base) for i in range(length)) return converted[::-1]
python
def from_base_10_int(decimal, output_base=10): """ Converts a decimal integer to a specific base. Args: decimal(int) A base 10 number. output_base(int) base to convert to. Returns: A tuple of digits in the specified base. Examples: >>> from_base_10_int(255) (2, 5, 5) >>> from_base_10_int(255, 16) (15, 15) >>> from_base_10_int(9988664439, 8) (1, 1, 2, 3, 2, 7, 5, 6, 6, 1, 6, 7) >>> from_base_10_int(0, 17) (0,) """ if decimal <= 0: return (0,) if output_base == 1: return (1,) * decimal length = digits(decimal, output_base) converted = tuple(digit(decimal, i, output_base) for i in range(length)) return converted[::-1]
[ "def", "from_base_10_int", "(", "decimal", ",", "output_base", "=", "10", ")", ":", "if", "decimal", "<=", "0", ":", "return", "(", "0", ",", ")", "if", "output_base", "==", "1", ":", "return", "(", "1", ",", ")", "*", "decimal", "length", "=", "di...
Converts a decimal integer to a specific base. Args: decimal(int) A base 10 number. output_base(int) base to convert to. Returns: A tuple of digits in the specified base. Examples: >>> from_base_10_int(255) (2, 5, 5) >>> from_base_10_int(255, 16) (15, 15) >>> from_base_10_int(9988664439, 8) (1, 1, 2, 3, 2, 7, 5, 6, 6, 1, 6, 7) >>> from_base_10_int(0, 17) (0,)
[ "Converts", "a", "decimal", "integer", "to", "a", "specific", "base", ".", "Args", ":", "decimal", "(", "int", ")", "A", "base", "10", "number", ".", "output_base", "(", "int", ")", "base", "to", "convert", "to", ".", "Returns", ":", "A", "tuple", "o...
train
https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/baseconvert.py#L310-L337
squdle/baseconvert
baseconvert/baseconvert.py
to_base_10_int
def to_base_10_int(n, input_base): """ Converts an integer in any base into it's decimal representation. Args: n - An integer represented as a tuple of digits in the specified base. input_base - the base of the input number. Returns: integer converted into base 10. Example: >>> to_base_10_int((8,1), 16) 129 """ return sum(c * input_base ** i for i, c in enumerate(n[::-1]))
python
def to_base_10_int(n, input_base): """ Converts an integer in any base into it's decimal representation. Args: n - An integer represented as a tuple of digits in the specified base. input_base - the base of the input number. Returns: integer converted into base 10. Example: >>> to_base_10_int((8,1), 16) 129 """ return sum(c * input_base ** i for i, c in enumerate(n[::-1]))
[ "def", "to_base_10_int", "(", "n", ",", "input_base", ")", ":", "return", "sum", "(", "c", "*", "input_base", "**", "i", "for", "i", ",", "c", "in", "enumerate", "(", "n", "[", ":", ":", "-", "1", "]", ")", ")" ]
Converts an integer in any base into it's decimal representation. Args: n - An integer represented as a tuple of digits in the specified base. input_base - the base of the input number. Returns: integer converted into base 10. Example: >>> to_base_10_int((8,1), 16) 129
[ "Converts", "an", "integer", "in", "any", "base", "into", "it", "s", "decimal", "representation", ".", "Args", ":", "n", "-", "An", "integer", "represented", "as", "a", "tuple", "of", "digits", "in", "the", "specified", "base", ".", "input_base", "-", "t...
train
https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/baseconvert.py#L340-L355
squdle/baseconvert
baseconvert/baseconvert.py
fractional_base
def fractional_base(fractional_part, input_base=10, output_base=10, max_depth=100): """ Convert the fractional part of a number from any base to any base. Args: fractional_part(iterable container): The fractional part of a number in the following form: ( ".", int, int, int, ...) input_base(int): The base to convert from (defualt 10). output_base(int): The base to convert to (default 10). max_depth(int): The maximum number of decimal digits to output. Returns: The converted number as a tuple of digits. Example: >>> fractional_base((".", 6,),10,16,10) ('.', 9, 9, 9, 9, 9, 9, 9, 9, 9, 9) """ fractional_part = fractional_part[1:] fractional_digits = len(fractional_part) numerator = 0 for i, value in enumerate(fractional_part, 1): numerator += value * input_base ** (fractional_digits - i) denominator = input_base ** fractional_digits i = 1 digits = [] while(i < max_depth + 1): numerator *= output_base ** i digit = numerator // denominator numerator -= digit * denominator denominator *= output_base ** i digits.append(digit) i += 1 greatest_common_divisor = gcd(numerator, denominator) numerator //= greatest_common_divisor denominator //= greatest_common_divisor return (".",) + tuple(digits)
python
def fractional_base(fractional_part, input_base=10, output_base=10, max_depth=100): """ Convert the fractional part of a number from any base to any base. Args: fractional_part(iterable container): The fractional part of a number in the following form: ( ".", int, int, int, ...) input_base(int): The base to convert from (defualt 10). output_base(int): The base to convert to (default 10). max_depth(int): The maximum number of decimal digits to output. Returns: The converted number as a tuple of digits. Example: >>> fractional_base((".", 6,),10,16,10) ('.', 9, 9, 9, 9, 9, 9, 9, 9, 9, 9) """ fractional_part = fractional_part[1:] fractional_digits = len(fractional_part) numerator = 0 for i, value in enumerate(fractional_part, 1): numerator += value * input_base ** (fractional_digits - i) denominator = input_base ** fractional_digits i = 1 digits = [] while(i < max_depth + 1): numerator *= output_base ** i digit = numerator // denominator numerator -= digit * denominator denominator *= output_base ** i digits.append(digit) i += 1 greatest_common_divisor = gcd(numerator, denominator) numerator //= greatest_common_divisor denominator //= greatest_common_divisor return (".",) + tuple(digits)
[ "def", "fractional_base", "(", "fractional_part", ",", "input_base", "=", "10", ",", "output_base", "=", "10", ",", "max_depth", "=", "100", ")", ":", "fractional_part", "=", "fractional_part", "[", "1", ":", "]", "fractional_digits", "=", "len", "(", "fract...
Convert the fractional part of a number from any base to any base. Args: fractional_part(iterable container): The fractional part of a number in the following form: ( ".", int, int, int, ...) input_base(int): The base to convert from (defualt 10). output_base(int): The base to convert to (default 10). max_depth(int): The maximum number of decimal digits to output. Returns: The converted number as a tuple of digits. Example: >>> fractional_base((".", 6,),10,16,10) ('.', 9, 9, 9, 9, 9, 9, 9, 9, 9, 9)
[ "Convert", "the", "fractional", "part", "of", "a", "number", "from", "any", "base", "to", "any", "base", ".", "Args", ":", "fractional_part", "(", "iterable", "container", ")", ":", "The", "fractional", "part", "of", "a", "number", "in", "the", "following"...
train
https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/baseconvert.py#L388-L425
squdle/baseconvert
baseconvert/baseconvert.py
truncate
def truncate(n): """ Removes trailing zeros. Args: n: The number to truncate. This number should be in the following form: (..., '.', int, int, int, ..., 0) Returns: n with all trailing zeros removed >>> truncate((9, 9, 9, '.', 9, 9, 9, 9, 0, 0, 0, 0)) (9, 9, 9, '.', 9, 9, 9, 9) >>> truncate(('.',)) ('.',) """ count = 0 for digit in n[-1::-1]: if digit != 0: break count += 1 return n[:-count] if count > 0 else n
python
def truncate(n): """ Removes trailing zeros. Args: n: The number to truncate. This number should be in the following form: (..., '.', int, int, int, ..., 0) Returns: n with all trailing zeros removed >>> truncate((9, 9, 9, '.', 9, 9, 9, 9, 0, 0, 0, 0)) (9, 9, 9, '.', 9, 9, 9, 9) >>> truncate(('.',)) ('.',) """ count = 0 for digit in n[-1::-1]: if digit != 0: break count += 1 return n[:-count] if count > 0 else n
[ "def", "truncate", "(", "n", ")", ":", "count", "=", "0", "for", "digit", "in", "n", "[", "-", "1", ":", ":", "-", "1", "]", ":", "if", "digit", "!=", "0", ":", "break", "count", "+=", "1", "return", "n", "[", ":", "-", "count", "]", "if", ...
Removes trailing zeros. Args: n: The number to truncate. This number should be in the following form: (..., '.', int, int, int, ..., 0) Returns: n with all trailing zeros removed >>> truncate((9, 9, 9, '.', 9, 9, 9, 9, 0, 0, 0, 0)) (9, 9, 9, '.', 9, 9, 9, 9) >>> truncate(('.',)) ('.',)
[ "Removes", "trailing", "zeros", ".", "Args", ":", "n", ":", "The", "number", "to", "truncate", ".", "This", "number", "should", "be", "in", "the", "following", "form", ":", "(", "...", ".", "int", "int", "int", "...", "0", ")", "Returns", ":", "n", ...
train
https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/baseconvert.py#L428-L449
squdle/baseconvert
baseconvert/baseconvert.py
str_digit_to_int
def str_digit_to_int(chr): """ Converts a string character to a decimal number. Where "A"->10, "B"->11, "C"->12, ...etc Args: chr(str): A single character in the form of a string. Returns: The integer value of the input string digit. """ # 0 - 9 if chr in ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9"): n = int(chr) else: n = ord(chr) # A - Z if n < 91: n -= 55 # a - z or higher else: n -= 61 return n
python
def str_digit_to_int(chr): """ Converts a string character to a decimal number. Where "A"->10, "B"->11, "C"->12, ...etc Args: chr(str): A single character in the form of a string. Returns: The integer value of the input string digit. """ # 0 - 9 if chr in ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9"): n = int(chr) else: n = ord(chr) # A - Z if n < 91: n -= 55 # a - z or higher else: n -= 61 return n
[ "def", "str_digit_to_int", "(", "chr", ")", ":", "# 0 - 9\r", "if", "chr", "in", "(", "\"0\"", ",", "\"1\"", ",", "\"2\"", ",", "\"3\"", ",", "\"4\"", ",", "\"5\"", ",", "\"6\"", ",", "\"7\"", ",", "\"8\"", ",", "\"9\"", ")", ":", "n", "=", "int", ...
Converts a string character to a decimal number. Where "A"->10, "B"->11, "C"->12, ...etc Args: chr(str): A single character in the form of a string. Returns: The integer value of the input string digit.
[ "Converts", "a", "string", "character", "to", "a", "decimal", "number", ".", "Where", "A", "-", ">", "10", "B", "-", ">", "11", "C", "-", ">", "12", "...", "etc", "Args", ":", "chr", "(", "str", ")", ":", "A", "single", "character", "in", "the", ...
train
https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/baseconvert.py#L452-L474
squdle/baseconvert
baseconvert/baseconvert.py
int_to_str_digit
def int_to_str_digit(n): """ Converts a positive integer, to a single string character. Where: 9 -> "9", 10 -> "A", 11 -> "B", 12 -> "C", ...etc Args: n(int): A positve integer number. Returns: The character representation of the input digit of value n (str). """ # 0 - 9 if n < 10: return str(n) # A - Z elif n < 36: return chr(n + 55) # a - z or higher else: return chr(n + 61)
python
def int_to_str_digit(n): """ Converts a positive integer, to a single string character. Where: 9 -> "9", 10 -> "A", 11 -> "B", 12 -> "C", ...etc Args: n(int): A positve integer number. Returns: The character representation of the input digit of value n (str). """ # 0 - 9 if n < 10: return str(n) # A - Z elif n < 36: return chr(n + 55) # a - z or higher else: return chr(n + 61)
[ "def", "int_to_str_digit", "(", "n", ")", ":", "# 0 - 9\r", "if", "n", "<", "10", ":", "return", "str", "(", "n", ")", "# A - Z\r", "elif", "n", "<", "36", ":", "return", "chr", "(", "n", "+", "55", ")", "# a - z or higher\r", "else", ":", "return", ...
Converts a positive integer, to a single string character. Where: 9 -> "9", 10 -> "A", 11 -> "B", 12 -> "C", ...etc Args: n(int): A positve integer number. Returns: The character representation of the input digit of value n (str).
[ "Converts", "a", "positive", "integer", "to", "a", "single", "string", "character", ".", "Where", ":", "9", "-", ">", "9", "10", "-", ">", "A", "11", "-", ">", "B", "12", "-", ">", "C", "...", "etc", "Args", ":", "n", "(", "int", ")", ":", "A...
train
https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/baseconvert.py#L477-L496
squdle/baseconvert
baseconvert/baseconvert.py
find_recurring
def find_recurring(number, min_repeat=5): """ Attempts to find repeating digits in the fractional component of a number. Args: number(tuple): the number to process in the form: (int, int, int, ... ".", ... , int int int) min_repeat(int): the minimum number of times a pattern must occur to be defined as recurring. A min_repeat of n would mean a pattern must occur at least n + 1 times, so as to be repeated n times. Returns: The original number with repeating digits (if found) enclosed by "[" and "]" (tuple). Examples: >>> find_recurring((3, 2, 1, '.', 1, 2, 3, 1, 2, 3), min_repeat=1) (3, 2, 1, '.', '[', 1, 2, 3, ']') """ # Return number if it has no fractional part, or min_repeat value invalid. if "." not in number or min_repeat < 1: return number # Seperate the number into integer and fractional parts. integer_part, fractional_part = integer_fractional_parts(number) # Reverse fractional part to get a sequence. sequence = fractional_part[::-1] # Initialize counters # The 'period' is the number of digits in a pattern. period = 0 # The best pattern found will be stored. best = 0 best_period = 0 best_repeat = 0 # Find recurring pattern. while (period < len(sequence)): period += 1 pattern = sequence[:period] repeat = 0 digit = period pattern_match = True while(pattern_match and digit < len(sequence)): for i, pattern_digit in enumerate(pattern): if sequence[digit + i] != pattern_digit: pattern_match = False break else: repeat += 1 digit += period # Give each pattern found a rank and use the best. rank = period * repeat if rank > best: best_period = period best_repeat = repeat best = rank # If the pattern does not repeat often enough, return the original number. if best_repeat < min_repeat: return number # Use the best pattern found. pattern = sequence[:best_period] # Remove the pattern from our original number. number = integer_part + fractional_part[:-(best + best_period)] # Ensure we are at the start of the pattern. pattern_temp = pattern for i, digit in enumerate(pattern): if number[-1] == digit: number = number[:-1] pattern_temp = pattern_temp[1:] + (pattern_temp[0],) pattern = pattern_temp # Return the number with the recurring pattern enclosed with '[' and ']'. return number + ("[",) + pattern[::-1] + ("]",)
python
def find_recurring(number, min_repeat=5): """ Attempts to find repeating digits in the fractional component of a number. Args: number(tuple): the number to process in the form: (int, int, int, ... ".", ... , int int int) min_repeat(int): the minimum number of times a pattern must occur to be defined as recurring. A min_repeat of n would mean a pattern must occur at least n + 1 times, so as to be repeated n times. Returns: The original number with repeating digits (if found) enclosed by "[" and "]" (tuple). Examples: >>> find_recurring((3, 2, 1, '.', 1, 2, 3, 1, 2, 3), min_repeat=1) (3, 2, 1, '.', '[', 1, 2, 3, ']') """ # Return number if it has no fractional part, or min_repeat value invalid. if "." not in number or min_repeat < 1: return number # Seperate the number into integer and fractional parts. integer_part, fractional_part = integer_fractional_parts(number) # Reverse fractional part to get a sequence. sequence = fractional_part[::-1] # Initialize counters # The 'period' is the number of digits in a pattern. period = 0 # The best pattern found will be stored. best = 0 best_period = 0 best_repeat = 0 # Find recurring pattern. while (period < len(sequence)): period += 1 pattern = sequence[:period] repeat = 0 digit = period pattern_match = True while(pattern_match and digit < len(sequence)): for i, pattern_digit in enumerate(pattern): if sequence[digit + i] != pattern_digit: pattern_match = False break else: repeat += 1 digit += period # Give each pattern found a rank and use the best. rank = period * repeat if rank > best: best_period = period best_repeat = repeat best = rank # If the pattern does not repeat often enough, return the original number. if best_repeat < min_repeat: return number # Use the best pattern found. pattern = sequence[:best_period] # Remove the pattern from our original number. number = integer_part + fractional_part[:-(best + best_period)] # Ensure we are at the start of the pattern. pattern_temp = pattern for i, digit in enumerate(pattern): if number[-1] == digit: number = number[:-1] pattern_temp = pattern_temp[1:] + (pattern_temp[0],) pattern = pattern_temp # Return the number with the recurring pattern enclosed with '[' and ']'. return number + ("[",) + pattern[::-1] + ("]",)
[ "def", "find_recurring", "(", "number", ",", "min_repeat", "=", "5", ")", ":", "# Return number if it has no fractional part, or min_repeat value invalid.\r", "if", "\".\"", "not", "in", "number", "or", "min_repeat", "<", "1", ":", "return", "number", "# Seperate the nu...
Attempts to find repeating digits in the fractional component of a number. Args: number(tuple): the number to process in the form: (int, int, int, ... ".", ... , int int int) min_repeat(int): the minimum number of times a pattern must occur to be defined as recurring. A min_repeat of n would mean a pattern must occur at least n + 1 times, so as to be repeated n times. Returns: The original number with repeating digits (if found) enclosed by "[" and "]" (tuple). Examples: >>> find_recurring((3, 2, 1, '.', 1, 2, 3, 1, 2, 3), min_repeat=1) (3, 2, 1, '.', '[', 1, 2, 3, ']')
[ "Attempts", "to", "find", "repeating", "digits", "in", "the", "fractional", "component", "of", "a", "number", ".", "Args", ":", "number", "(", "tuple", ")", ":", "the", "number", "to", "process", "in", "the", "form", ":", "(", "int", "int", "int", "......
train
https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/baseconvert.py#L499-L568
squdle/baseconvert
baseconvert/baseconvert.py
expand_recurring
def expand_recurring(number, repeat=5): """ Expands a recurring pattern within a number. Args: number(tuple): the number to process in the form: (int, int, int, ... ".", ... , int int int) repeat: the number of times to expand the pattern. Returns: The original number with recurring pattern expanded. Example: >>> expand_recurring((1, ".", 0, "[", 9, "]"), repeat=3) (1, '.', 0, 9, 9, 9, 9) """ if "[" in number: pattern_index = number.index("[") pattern = number[pattern_index + 1:-1] number = number[:pattern_index] number = number + pattern * (repeat + 1) return number
python
def expand_recurring(number, repeat=5): """ Expands a recurring pattern within a number. Args: number(tuple): the number to process in the form: (int, int, int, ... ".", ... , int int int) repeat: the number of times to expand the pattern. Returns: The original number with recurring pattern expanded. Example: >>> expand_recurring((1, ".", 0, "[", 9, "]"), repeat=3) (1, '.', 0, 9, 9, 9, 9) """ if "[" in number: pattern_index = number.index("[") pattern = number[pattern_index + 1:-1] number = number[:pattern_index] number = number + pattern * (repeat + 1) return number
[ "def", "expand_recurring", "(", "number", ",", "repeat", "=", "5", ")", ":", "if", "\"[\"", "in", "number", ":", "pattern_index", "=", "number", ".", "index", "(", "\"[\"", ")", "pattern", "=", "number", "[", "pattern_index", "+", "1", ":", "-", "1", ...
Expands a recurring pattern within a number. Args: number(tuple): the number to process in the form: (int, int, int, ... ".", ... , int int int) repeat: the number of times to expand the pattern. Returns: The original number with recurring pattern expanded. Example: >>> expand_recurring((1, ".", 0, "[", 9, "]"), repeat=3) (1, '.', 0, 9, 9, 9, 9)
[ "Expands", "a", "recurring", "pattern", "within", "a", "number", ".", "Args", ":", "number", "(", "tuple", ")", ":", "the", "number", "to", "process", "in", "the", "form", ":", "(", "int", "int", "int", "...", ".", "...", "int", "int", "int", ")", ...
train
https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/baseconvert.py#L571-L592
squdle/baseconvert
baseconvert/baseconvert.py
check_valid
def check_valid(number, input_base=10): """ Checks if there is an invalid digit in the input number. Args: number: An number in the following form: (int, int, int, ... , '.' , int, int, int) (iterable container) containing positive integers of the input base input_base(int): The base of the input number. Returns: bool, True if all digits valid, else False. Examples: >>> check_valid((1,9,6,'.',5,1,6), 12) True >>> check_valid((8,1,15,9), 15) False """ for n in number: if n in (".", "[", "]"): continue elif n >= input_base: if n == 1 and input_base == 1: continue else: return False return True
python
def check_valid(number, input_base=10): """ Checks if there is an invalid digit in the input number. Args: number: An number in the following form: (int, int, int, ... , '.' , int, int, int) (iterable container) containing positive integers of the input base input_base(int): The base of the input number. Returns: bool, True if all digits valid, else False. Examples: >>> check_valid((1,9,6,'.',5,1,6), 12) True >>> check_valid((8,1,15,9), 15) False """ for n in number: if n in (".", "[", "]"): continue elif n >= input_base: if n == 1 and input_base == 1: continue else: return False return True
[ "def", "check_valid", "(", "number", ",", "input_base", "=", "10", ")", ":", "for", "n", "in", "number", ":", "if", "n", "in", "(", "\".\"", ",", "\"[\"", ",", "\"]\"", ")", ":", "continue", "elif", "n", ">=", "input_base", ":", "if", "n", "==", ...
Checks if there is an invalid digit in the input number. Args: number: An number in the following form: (int, int, int, ... , '.' , int, int, int) (iterable container) containing positive integers of the input base input_base(int): The base of the input number. Returns: bool, True if all digits valid, else False. Examples: >>> check_valid((1,9,6,'.',5,1,6), 12) True >>> check_valid((8,1,15,9), 15) False
[ "Checks", "if", "there", "is", "an", "invalid", "digit", "in", "the", "input", "number", ".", "Args", ":", "number", ":", "An", "number", "in", "the", "following", "form", ":", "(", "int", "int", "int", "...", ".", "int", "int", "int", ")", "(", "i...
train
https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/baseconvert.py#L595-L622
squdle/baseconvert
baseconvert/baseconvert.py
base
def base(number, input_base=10, output_base=10, max_depth=10, string=False, recurring=True): """ Converts a number from any base to any another. Args: number(tuple|str|int): The number to convert. input_base(int): The base to convert from (defualt 10). output_base(int): The base to convert to (default 10). max_depth(int): The maximum number of fractional digits (defult 10). string(bool): If True output will be in string representation, if False output will be in tuple representation (defult False). recurring(bool): Attempt to find repeating digits in the fractional part of a number. Repeated digits will be enclosed with "[" and "]" (default True). Returns: A tuple of digits in the specified base: (int, int, int, ... , '.' , int, int, int) If the string flag is set to True, a string representation will be used instead. Raises: ValueError if a digit value is too high for the input_base. Example: >>> base((1,9,6,'.',5,1,6), 17, 20) (1, 2, 8, '.', 5, 19, 10, 7, 17, 2, 13, 13, 1, 8) """ # Convert number to tuple representation. if type(number) == int or type(number) == float: number = str(number) if type(number) == str: number = represent_as_tuple(number) # Check that the number is valid for the input base. if not check_valid(number, input_base): raise ValueError # Deal with base-1 special case if input_base == 1: number = (1,) * number.count(1) # Expand any recurring digits. number = expand_recurring(number, repeat=5) # Convert a fractional number. if "." in number: radix_point = number.index(".") integer_part = number[:radix_point] fractional_part = number[radix_point:] integer_part = integer_base(integer_part, input_base, output_base) fractional_part = fractional_base(fractional_part, input_base, output_base, max_depth) number = integer_part + fractional_part number = truncate(number) # Convert an integer number. else: number = integer_base(number, input_base, output_base) if recurring: number = find_recurring(number, min_repeat=2) # Return the converted number as a srring or tuple. return represent_as_string(number) if string else number
python
def base(number, input_base=10, output_base=10, max_depth=10, string=False, recurring=True): """ Converts a number from any base to any another. Args: number(tuple|str|int): The number to convert. input_base(int): The base to convert from (defualt 10). output_base(int): The base to convert to (default 10). max_depth(int): The maximum number of fractional digits (defult 10). string(bool): If True output will be in string representation, if False output will be in tuple representation (defult False). recurring(bool): Attempt to find repeating digits in the fractional part of a number. Repeated digits will be enclosed with "[" and "]" (default True). Returns: A tuple of digits in the specified base: (int, int, int, ... , '.' , int, int, int) If the string flag is set to True, a string representation will be used instead. Raises: ValueError if a digit value is too high for the input_base. Example: >>> base((1,9,6,'.',5,1,6), 17, 20) (1, 2, 8, '.', 5, 19, 10, 7, 17, 2, 13, 13, 1, 8) """ # Convert number to tuple representation. if type(number) == int or type(number) == float: number = str(number) if type(number) == str: number = represent_as_tuple(number) # Check that the number is valid for the input base. if not check_valid(number, input_base): raise ValueError # Deal with base-1 special case if input_base == 1: number = (1,) * number.count(1) # Expand any recurring digits. number = expand_recurring(number, repeat=5) # Convert a fractional number. if "." in number: radix_point = number.index(".") integer_part = number[:radix_point] fractional_part = number[radix_point:] integer_part = integer_base(integer_part, input_base, output_base) fractional_part = fractional_base(fractional_part, input_base, output_base, max_depth) number = integer_part + fractional_part number = truncate(number) # Convert an integer number. else: number = integer_base(number, input_base, output_base) if recurring: number = find_recurring(number, min_repeat=2) # Return the converted number as a srring or tuple. return represent_as_string(number) if string else number
[ "def", "base", "(", "number", ",", "input_base", "=", "10", ",", "output_base", "=", "10", ",", "max_depth", "=", "10", ",", "string", "=", "False", ",", "recurring", "=", "True", ")", ":", "# Convert number to tuple representation.\r", "if", "type", "(", ...
Converts a number from any base to any another. Args: number(tuple|str|int): The number to convert. input_base(int): The base to convert from (defualt 10). output_base(int): The base to convert to (default 10). max_depth(int): The maximum number of fractional digits (defult 10). string(bool): If True output will be in string representation, if False output will be in tuple representation (defult False). recurring(bool): Attempt to find repeating digits in the fractional part of a number. Repeated digits will be enclosed with "[" and "]" (default True). Returns: A tuple of digits in the specified base: (int, int, int, ... , '.' , int, int, int) If the string flag is set to True, a string representation will be used instead. Raises: ValueError if a digit value is too high for the input_base. Example: >>> base((1,9,6,'.',5,1,6), 17, 20) (1, 2, 8, '.', 5, 19, 10, 7, 17, 2, 13, 13, 1, 8)
[ "Converts", "a", "number", "from", "any", "base", "to", "any", "another", ".", "Args", ":", "number", "(", "tuple|str|int", ")", ":", "The", "number", "to", "convert", ".", "input_base", "(", "int", ")", ":", "The", "base", "to", "convert", "from", "("...
train
https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/baseconvert.py#L625-L682
tkf/rash
rash/utils/confutils.py
get_config_directory
def get_config_directory(appname): """ Get OS-specific configuration directory. :type appname: str :arg appname: capitalized name of the application """ if platform.system().lower() == 'windows': path = os.path.join(os.getenv('APPDATA') or '~', appname, appname) elif platform.system().lower() == 'darwin': path = os.path.join('~', 'Library', 'Application Support', appname) else: path = os.path.join(os.getenv('XDG_CONFIG_HOME') or '~/.config', appname.lower()) return os.path.expanduser(path)
python
def get_config_directory(appname): """ Get OS-specific configuration directory. :type appname: str :arg appname: capitalized name of the application """ if platform.system().lower() == 'windows': path = os.path.join(os.getenv('APPDATA') or '~', appname, appname) elif platform.system().lower() == 'darwin': path = os.path.join('~', 'Library', 'Application Support', appname) else: path = os.path.join(os.getenv('XDG_CONFIG_HOME') or '~/.config', appname.lower()) return os.path.expanduser(path)
[ "def", "get_config_directory", "(", "appname", ")", ":", "if", "platform", ".", "system", "(", ")", ".", "lower", "(", ")", "==", "'windows'", ":", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getenv", "(", "'APPDATA'", ")", "or", ...
Get OS-specific configuration directory. :type appname: str :arg appname: capitalized name of the application
[ "Get", "OS", "-", "specific", "configuration", "directory", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/utils/confutils.py#L21-L36
stefanfoulis/django-database-email-backend
database_email_backend/admin.py
SendEmailAdmin.save_model
def save_model(self, request, obj, form, change): """ sends the email and does not save it """ email = message.EmailMessage( subject=obj.subject, body=obj.body, from_email=obj.from_email, to=[t.strip() for t in obj.to_emails.split(',')], bcc=[t.strip() for t in obj.bcc_emails.split(',')], cc=[t.strip() for t in obj.cc_emails.split(',')] ) email.send()
python
def save_model(self, request, obj, form, change): """ sends the email and does not save it """ email = message.EmailMessage( subject=obj.subject, body=obj.body, from_email=obj.from_email, to=[t.strip() for t in obj.to_emails.split(',')], bcc=[t.strip() for t in obj.bcc_emails.split(',')], cc=[t.strip() for t in obj.cc_emails.split(',')] ) email.send()
[ "def", "save_model", "(", "self", ",", "request", ",", "obj", ",", "form", ",", "change", ")", ":", "email", "=", "message", ".", "EmailMessage", "(", "subject", "=", "obj", ".", "subject", ",", "body", "=", "obj", ".", "body", ",", "from_email", "="...
sends the email and does not save it
[ "sends", "the", "email", "and", "does", "not", "save", "it" ]
train
https://github.com/stefanfoulis/django-database-email-backend/blob/6dbd3f1d3e82fa9d33e442e6a62375e8536e44f0/database_email_backend/admin.py#L133-L145
INM-6/hybridLFPy
hybridLFPy/helpers.py
read_gdf
def read_gdf(fname): """ Fast line-by-line gdf-file reader. Parameters ---------- fname : str Path to gdf-file. Returns ------- numpy.ndarray ([gid, val0, val1, **]), dtype=object) mixed datatype array """ gdf_file = open(fname, 'r') gdf = [] for l in gdf_file: data = l.split() gdf += [data] gdf = np.array(gdf, dtype=object) if gdf.size > 0: gdf[:, 0] = gdf[:, 0].astype(int) gdf[:, 1:] = gdf[:, 1:].astype(float) return np.array(gdf)
python
def read_gdf(fname): """ Fast line-by-line gdf-file reader. Parameters ---------- fname : str Path to gdf-file. Returns ------- numpy.ndarray ([gid, val0, val1, **]), dtype=object) mixed datatype array """ gdf_file = open(fname, 'r') gdf = [] for l in gdf_file: data = l.split() gdf += [data] gdf = np.array(gdf, dtype=object) if gdf.size > 0: gdf[:, 0] = gdf[:, 0].astype(int) gdf[:, 1:] = gdf[:, 1:].astype(float) return np.array(gdf)
[ "def", "read_gdf", "(", "fname", ")", ":", "gdf_file", "=", "open", "(", "fname", ",", "'r'", ")", "gdf", "=", "[", "]", "for", "l", "in", "gdf_file", ":", "data", "=", "l", ".", "split", "(", ")", "gdf", "+=", "[", "data", "]", "gdf", "=", "...
Fast line-by-line gdf-file reader. Parameters ---------- fname : str Path to gdf-file. Returns ------- numpy.ndarray ([gid, val0, val1, **]), dtype=object) mixed datatype array
[ "Fast", "line", "-", "by", "-", "line", "gdf", "-", "file", "reader", ".", "Parameters", "----------", "fname", ":", "str", "Path", "to", "gdf", "-", "file", ".", "Returns", "-------", "numpy", ".", "ndarray", "(", "[", "gid", "val0", "val1", "**", "...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L34-L64
INM-6/hybridLFPy
hybridLFPy/helpers.py
write_gdf
def write_gdf(gdf, fname): """ Fast line-by-line gdf-file write function Parameters ---------- gdf : numpy.ndarray Column 0 is gids, columns 1: are values. fname : str Path to gdf-file. Returns ------- None """ gdf_file = open(fname, 'w') for line in gdf: for i in np.arange(len(line)): gdf_file.write(str(line[i]) + '\t') gdf_file.write('\n') return None
python
def write_gdf(gdf, fname): """ Fast line-by-line gdf-file write function Parameters ---------- gdf : numpy.ndarray Column 0 is gids, columns 1: are values. fname : str Path to gdf-file. Returns ------- None """ gdf_file = open(fname, 'w') for line in gdf: for i in np.arange(len(line)): gdf_file.write(str(line[i]) + '\t') gdf_file.write('\n') return None
[ "def", "write_gdf", "(", "gdf", ",", "fname", ")", ":", "gdf_file", "=", "open", "(", "fname", ",", "'w'", ")", "for", "line", "in", "gdf", ":", "for", "i", "in", "np", ".", "arange", "(", "len", "(", "line", ")", ")", ":", "gdf_file", ".", "wr...
Fast line-by-line gdf-file write function Parameters ---------- gdf : numpy.ndarray Column 0 is gids, columns 1: are values. fname : str Path to gdf-file. Returns ------- None
[ "Fast", "line", "-", "by", "-", "line", "gdf", "-", "file", "write", "function", "Parameters", "----------", "gdf", ":", "numpy", ".", "ndarray", "Column", "0", "is", "gids", "columns", "1", ":", "are", "values", ".", "fname", ":", "str", "Path", "to",...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L67-L91
INM-6/hybridLFPy
hybridLFPy/helpers.py
load_h5_data
def load_h5_data(path='', data_type='LFP', y=None, electrode=None, warmup=0., scaling=1.): """ Function loading results from hdf5 file Parameters ---------- path : str Path to hdf5-file data_type : str Signal types in ['CSD' , 'LFP', 'CSDsum', 'LFPsum']. y : None or str Name of population. electrode : None or int TODO: update, electrode is NOT USED warmup : float Lower cutoff of time series to remove possible transients scaling : float, Scaling factor for population size that determines the amount of loaded single-cell signals Returns ---------- numpy.ndarray [electrode id, compound signal] if `y` is None numpy.ndarray [cell id, electrode, single-cell signal] otherwise """ assert y is not None or electrode is not None if y is not None: f = h5py.File(os.path.join(path, '%s_%ss.h5' %(y,data_type))) data = f['data'].value[:,:, warmup:] if scaling != 1.: np.random.shuffle(data) num_cells = int(len(data)*scaling) data = data[:num_cells,:, warmup:] else: f = h5py.File(os.path.join(path, '%ssum.h5' %data_type)) data = f['data'].value[:, warmup:] return data
python
def load_h5_data(path='', data_type='LFP', y=None, electrode=None, warmup=0., scaling=1.): """ Function loading results from hdf5 file Parameters ---------- path : str Path to hdf5-file data_type : str Signal types in ['CSD' , 'LFP', 'CSDsum', 'LFPsum']. y : None or str Name of population. electrode : None or int TODO: update, electrode is NOT USED warmup : float Lower cutoff of time series to remove possible transients scaling : float, Scaling factor for population size that determines the amount of loaded single-cell signals Returns ---------- numpy.ndarray [electrode id, compound signal] if `y` is None numpy.ndarray [cell id, electrode, single-cell signal] otherwise """ assert y is not None or electrode is not None if y is not None: f = h5py.File(os.path.join(path, '%s_%ss.h5' %(y,data_type))) data = f['data'].value[:,:, warmup:] if scaling != 1.: np.random.shuffle(data) num_cells = int(len(data)*scaling) data = data[:num_cells,:, warmup:] else: f = h5py.File(os.path.join(path, '%ssum.h5' %data_type)) data = f['data'].value[:, warmup:] return data
[ "def", "load_h5_data", "(", "path", "=", "''", ",", "data_type", "=", "'LFP'", ",", "y", "=", "None", ",", "electrode", "=", "None", ",", "warmup", "=", "0.", ",", "scaling", "=", "1.", ")", ":", "assert", "y", "is", "not", "None", "or", "electrode...
Function loading results from hdf5 file Parameters ---------- path : str Path to hdf5-file data_type : str Signal types in ['CSD' , 'LFP', 'CSDsum', 'LFPsum']. y : None or str Name of population. electrode : None or int TODO: update, electrode is NOT USED warmup : float Lower cutoff of time series to remove possible transients scaling : float, Scaling factor for population size that determines the amount of loaded single-cell signals Returns ---------- numpy.ndarray [electrode id, compound signal] if `y` is None numpy.ndarray [cell id, electrode, single-cell signal] otherwise
[ "Function", "loading", "results", "from", "hdf5", "file", "Parameters", "----------", "path", ":", "str", "Path", "to", "hdf5", "-", "file", "data_type", ":", "str", "Signal", "types", "in", "[", "CSD", "LFP", "CSDsum", "LFPsum", "]", ".", "y", ":", "Non...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L94-L138
INM-6/hybridLFPy
hybridLFPy/helpers.py
dump_dict_of_nested_lists_to_h5
def dump_dict_of_nested_lists_to_h5(fname, data): """ Take nested list structure and dump it in hdf5 file. Parameters ---------- fname : str Filename data : dict(list(numpy.ndarray)) Dict of nested lists with variable len arrays. Returns ------- None """ # Open file print('writing to file: %s' % fname) f = h5py.File(fname) # Iterate over values for i, ivalue in list(data.items()): igrp = f.create_group(str(i)) for j, jvalue in enumerate(ivalue): jgrp = igrp.create_group(str(j)) for k, kvalue in enumerate(jvalue): if kvalue.size > 0: dset = jgrp.create_dataset(str(k), data=kvalue, compression='gzip') else: dset = jgrp.create_dataset(str(k), data=kvalue, maxshape=(None, ), compression='gzip') # Close file f.close()
python
def dump_dict_of_nested_lists_to_h5(fname, data): """ Take nested list structure and dump it in hdf5 file. Parameters ---------- fname : str Filename data : dict(list(numpy.ndarray)) Dict of nested lists with variable len arrays. Returns ------- None """ # Open file print('writing to file: %s' % fname) f = h5py.File(fname) # Iterate over values for i, ivalue in list(data.items()): igrp = f.create_group(str(i)) for j, jvalue in enumerate(ivalue): jgrp = igrp.create_group(str(j)) for k, kvalue in enumerate(jvalue): if kvalue.size > 0: dset = jgrp.create_dataset(str(k), data=kvalue, compression='gzip') else: dset = jgrp.create_dataset(str(k), data=kvalue, maxshape=(None, ), compression='gzip') # Close file f.close()
[ "def", "dump_dict_of_nested_lists_to_h5", "(", "fname", ",", "data", ")", ":", "# Open file", "print", "(", "'writing to file: %s'", "%", "fname", ")", "f", "=", "h5py", ".", "File", "(", "fname", ")", "# Iterate over values", "for", "i", ",", "ivalue", "in", ...
Take nested list structure and dump it in hdf5 file. Parameters ---------- fname : str Filename data : dict(list(numpy.ndarray)) Dict of nested lists with variable len arrays. Returns ------- None
[ "Take", "nested", "list", "structure", "and", "dump", "it", "in", "hdf5", "file", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L141-L176
INM-6/hybridLFPy
hybridLFPy/helpers.py
load_dict_of_nested_lists_from_h5
def load_dict_of_nested_lists_from_h5(fname, toplevelkeys=None): """ Load nested list structure from hdf5 file Parameters ---------- fname : str Filename toplevelkeys : None or iterable, Load a two(default) or three-layered structure. Returns ------- dict(list(numpy.ndarray)) dictionary of nested lists with variable length array data. """ # Container: data = {} # Open file object f = h5py.File(fname, 'r') # Iterate over partial dataset if toplevelkeys is not None: for i in toplevelkeys: ivalue = f[str(i)] data[i] = [] for j, jvalue in enumerate(ivalue.values()): data[int(i)].append([]) for k, kvalue in enumerate(jvalue.values()): data[i][j].append(kvalue.value) else: for i, ivalue in list(f.items()): i = int(i) data[i] = [] for j, jvalue in enumerate(ivalue.values()): data[i].append([]) for k, kvalue in enumerate(jvalue.values()): data[i][j].append(kvalue.value) # Close dataset f.close() return data
python
def load_dict_of_nested_lists_from_h5(fname, toplevelkeys=None): """ Load nested list structure from hdf5 file Parameters ---------- fname : str Filename toplevelkeys : None or iterable, Load a two(default) or three-layered structure. Returns ------- dict(list(numpy.ndarray)) dictionary of nested lists with variable length array data. """ # Container: data = {} # Open file object f = h5py.File(fname, 'r') # Iterate over partial dataset if toplevelkeys is not None: for i in toplevelkeys: ivalue = f[str(i)] data[i] = [] for j, jvalue in enumerate(ivalue.values()): data[int(i)].append([]) for k, kvalue in enumerate(jvalue.values()): data[i][j].append(kvalue.value) else: for i, ivalue in list(f.items()): i = int(i) data[i] = [] for j, jvalue in enumerate(ivalue.values()): data[i].append([]) for k, kvalue in enumerate(jvalue.values()): data[i][j].append(kvalue.value) # Close dataset f.close() return data
[ "def", "load_dict_of_nested_lists_from_h5", "(", "fname", ",", "toplevelkeys", "=", "None", ")", ":", "# Container:", "data", "=", "{", "}", "# Open file object", "f", "=", "h5py", ".", "File", "(", "fname", ",", "'r'", ")", "# Iterate over partial dataset", "if...
Load nested list structure from hdf5 file Parameters ---------- fname : str Filename toplevelkeys : None or iterable, Load a two(default) or three-layered structure. Returns ------- dict(list(numpy.ndarray)) dictionary of nested lists with variable length array data.
[ "Load", "nested", "list", "structure", "from", "hdf5", "file" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L179-L226
INM-6/hybridLFPy
hybridLFPy/helpers.py
setup_file_dest
def setup_file_dest(params, clearDestination=True): """ Function to set up the file catalog structure for simulation output Parameters ---------- params : object e.g., `cellsim16popsParams.multicompartment_params()` clear_dest : bool Savefolder will be cleared if already existing. Returns ------- None """ if RANK == 0: if not os.path.isdir(params.savefolder): os.mkdir(params.savefolder) assert(os.path.isdir(params.savefolder)) else: if clearDestination: print('removing folder tree %s' % params.savefolder) while os.path.isdir(params.savefolder): try: os.system('find %s -delete' % params.savefolder) except: shutil.rmtree(params.savefolder) os.mkdir(params.savefolder) assert(os.path.isdir(params.savefolder)) if not os.path.isdir(params.sim_scripts_path): print('creating %s' % params.sim_scripts_path) os.mkdir(params.sim_scripts_path) if not os.path.isdir(params.cells_path): print('creating %s' % params.cells_path) os.mkdir(params.cells_path) if not os.path.isdir(params.figures_path): print('creating %s' % params.figures_path) os.mkdir(params.figures_path) if not os.path.isdir(params.populations_path): print('creating %s' % params.populations_path) os.mkdir(params.populations_path) try: if not os.path.isdir(params.raw_nest_output_path): print('creating %s' % params.raw_nest_output_path) os.mkdir(params.raw_nest_output_path) except: pass if not os.path.isdir(params.spike_output_path): print('creating %s' % params.spike_output_path) os.mkdir(params.spike_output_path) for f in ['cellsim16popsParams.py', 'cellsim16pops.py', 'example_brunel.py', 'brunel_alpha_nest.py', 'mesocircuit.sli', 'mesocircuit_LFP_model.py', 'binzegger_connectivity_table.json', 'nest_simulation.py', 'microcircuit.sli']: if os.path.isfile(f): if not os.path.exists(os.path.join(params.sim_scripts_path, f)): shutil.copy(f, os.path.join(params.sim_scripts_path, f)) os.chmod(os.path.join(params.sim_scripts_path, f), stat.S_IREAD) COMM.Barrier()
python
def setup_file_dest(params, clearDestination=True): """ Function to set up the file catalog structure for simulation output Parameters ---------- params : object e.g., `cellsim16popsParams.multicompartment_params()` clear_dest : bool Savefolder will be cleared if already existing. Returns ------- None """ if RANK == 0: if not os.path.isdir(params.savefolder): os.mkdir(params.savefolder) assert(os.path.isdir(params.savefolder)) else: if clearDestination: print('removing folder tree %s' % params.savefolder) while os.path.isdir(params.savefolder): try: os.system('find %s -delete' % params.savefolder) except: shutil.rmtree(params.savefolder) os.mkdir(params.savefolder) assert(os.path.isdir(params.savefolder)) if not os.path.isdir(params.sim_scripts_path): print('creating %s' % params.sim_scripts_path) os.mkdir(params.sim_scripts_path) if not os.path.isdir(params.cells_path): print('creating %s' % params.cells_path) os.mkdir(params.cells_path) if not os.path.isdir(params.figures_path): print('creating %s' % params.figures_path) os.mkdir(params.figures_path) if not os.path.isdir(params.populations_path): print('creating %s' % params.populations_path) os.mkdir(params.populations_path) try: if not os.path.isdir(params.raw_nest_output_path): print('creating %s' % params.raw_nest_output_path) os.mkdir(params.raw_nest_output_path) except: pass if not os.path.isdir(params.spike_output_path): print('creating %s' % params.spike_output_path) os.mkdir(params.spike_output_path) for f in ['cellsim16popsParams.py', 'cellsim16pops.py', 'example_brunel.py', 'brunel_alpha_nest.py', 'mesocircuit.sli', 'mesocircuit_LFP_model.py', 'binzegger_connectivity_table.json', 'nest_simulation.py', 'microcircuit.sli']: if os.path.isfile(f): if not os.path.exists(os.path.join(params.sim_scripts_path, f)): shutil.copy(f, os.path.join(params.sim_scripts_path, f)) os.chmod(os.path.join(params.sim_scripts_path, f), stat.S_IREAD) COMM.Barrier()
[ "def", "setup_file_dest", "(", "params", ",", "clearDestination", "=", "True", ")", ":", "if", "RANK", "==", "0", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "params", ".", "savefolder", ")", ":", "os", ".", "mkdir", "(", "params", ".", ...
Function to set up the file catalog structure for simulation output Parameters ---------- params : object e.g., `cellsim16popsParams.multicompartment_params()` clear_dest : bool Savefolder will be cleared if already existing. Returns ------- None
[ "Function", "to", "set", "up", "the", "file", "catalog", "structure", "for", "simulation", "output", "Parameters", "----------", "params", ":", "object", "e", ".", "g", ".", "cellsim16popsParams", ".", "multicompartment_params", "()", "clear_dest", ":", "bool", ...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L229-L304
INM-6/hybridLFPy
hybridLFPy/helpers.py
calculate_fft
def calculate_fft(data, tbin): """ Function to calculate the Fourier transform of data. Parameters ---------- data : numpy.ndarray 1D or 2D array containing time series. tbin : float Bin size of time series (in ms). Returns ------- freqs : numpy.ndarray Frequency axis of signal in Fourier space. fft : numpy.ndarray Signal in Fourier space. """ if len(np.shape(data)) > 1: n = len(data[0]) return np.fft.fftfreq(n, tbin * 1e-3), np.fft.fft(data, axis=1) else: n = len(data) return np.fft.fftfreq(n, tbin * 1e-3), np.fft.fft(data)
python
def calculate_fft(data, tbin): """ Function to calculate the Fourier transform of data. Parameters ---------- data : numpy.ndarray 1D or 2D array containing time series. tbin : float Bin size of time series (in ms). Returns ------- freqs : numpy.ndarray Frequency axis of signal in Fourier space. fft : numpy.ndarray Signal in Fourier space. """ if len(np.shape(data)) > 1: n = len(data[0]) return np.fft.fftfreq(n, tbin * 1e-3), np.fft.fft(data, axis=1) else: n = len(data) return np.fft.fftfreq(n, tbin * 1e-3), np.fft.fft(data)
[ "def", "calculate_fft", "(", "data", ",", "tbin", ")", ":", "if", "len", "(", "np", ".", "shape", "(", "data", ")", ")", ">", "1", ":", "n", "=", "len", "(", "data", "[", "0", "]", ")", "return", "np", ".", "fft", ".", "fftfreq", "(", "n", ...
Function to calculate the Fourier transform of data. Parameters ---------- data : numpy.ndarray 1D or 2D array containing time series. tbin : float Bin size of time series (in ms). Returns ------- freqs : numpy.ndarray Frequency axis of signal in Fourier space. fft : numpy.ndarray Signal in Fourier space.
[ "Function", "to", "calculate", "the", "Fourier", "transform", "of", "data", ".", "Parameters", "----------", "data", ":", "numpy", ".", "ndarray", "1D", "or", "2D", "array", "containing", "time", "series", ".", "tbin", ":", "float", "Bin", "size", "of", "t...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L312-L338
INM-6/hybridLFPy
hybridLFPy/helpers.py
centralize
def centralize(data, time=False, units=False): """ Function to subtract the mean across time and/or across units from data Parameters ---------- data : numpy.ndarray 1D or 2D array containing time series, 1st index: unit, 2nd index: time time : bool True: subtract mean across time. units : bool True: subtract mean across units. Returns ------- numpy.ndarray 1D or 0D array of centralized signal. """ assert(time is not False or units is not False) res = copy.copy(data) if time is True: res = np.array([x - np.mean(x) for x in res]) if units is True: res = np.array(res - np.mean(res, axis=0)) return res
python
def centralize(data, time=False, units=False): """ Function to subtract the mean across time and/or across units from data Parameters ---------- data : numpy.ndarray 1D or 2D array containing time series, 1st index: unit, 2nd index: time time : bool True: subtract mean across time. units : bool True: subtract mean across units. Returns ------- numpy.ndarray 1D or 0D array of centralized signal. """ assert(time is not False or units is not False) res = copy.copy(data) if time is True: res = np.array([x - np.mean(x) for x in res]) if units is True: res = np.array(res - np.mean(res, axis=0)) return res
[ "def", "centralize", "(", "data", ",", "time", "=", "False", ",", "units", "=", "False", ")", ":", "assert", "(", "time", "is", "not", "False", "or", "units", "is", "not", "False", ")", "res", "=", "copy", ".", "copy", "(", "data", ")", "if", "ti...
Function to subtract the mean across time and/or across units from data Parameters ---------- data : numpy.ndarray 1D or 2D array containing time series, 1st index: unit, 2nd index: time time : bool True: subtract mean across time. units : bool True: subtract mean across units. Returns ------- numpy.ndarray 1D or 0D array of centralized signal.
[ "Function", "to", "subtract", "the", "mean", "across", "time", "and", "/", "or", "across", "units", "from", "data", "Parameters", "----------", "data", ":", "numpy", ".", "ndarray", "1D", "or", "2D", "array", "containing", "time", "series", "1st", "index", ...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L345-L375
INM-6/hybridLFPy
hybridLFPy/helpers.py
normalize
def normalize(data): """ Function to normalize data to have mean 0 and unity standard deviation (also called z-transform) Parameters ---------- data : numpy.ndarray Returns ------- numpy.ndarray z-transform of input array """ data = data.astype(float) data -= data.mean() return data / data.std()
python
def normalize(data): """ Function to normalize data to have mean 0 and unity standard deviation (also called z-transform) Parameters ---------- data : numpy.ndarray Returns ------- numpy.ndarray z-transform of input array """ data = data.astype(float) data -= data.mean() return data / data.std()
[ "def", "normalize", "(", "data", ")", ":", "data", "=", "data", ".", "astype", "(", "float", ")", "data", "-=", "data", ".", "mean", "(", ")", "return", "data", "/", "data", ".", "std", "(", ")" ]
Function to normalize data to have mean 0 and unity standard deviation (also called z-transform) Parameters ---------- data : numpy.ndarray Returns ------- numpy.ndarray z-transform of input array
[ "Function", "to", "normalize", "data", "to", "have", "mean", "0", "and", "unity", "standard", "deviation", "(", "also", "called", "z", "-", "transform", ")", "Parameters", "----------", "data", ":", "numpy", ".", "ndarray", "Returns", "-------", "numpy", "."...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L378-L398
INM-6/hybridLFPy
hybridLFPy/helpers.py
movav
def movav(y, Dx, dx): """ Moving average rectangular window filter: calculate average of signal y by using sliding rectangular window of size Dx using binsize dx Parameters ---------- y : numpy.ndarray Signal Dx : float Window length of filter. dx : float Bin size of signal sampling. Returns ------- numpy.ndarray Filtered signal. """ if Dx <= dx: return y else: ly = len(y) r = np.zeros(ly) n = np.int(np.round((Dx / dx))) r[0:np.int(n / 2.)] = 1.0 / n r[-np.int(n / 2.)::] = 1.0 / n R = np.fft.fft(r) Y = np.fft.fft(y) yf = np.fft.ifft(Y * R) return yf
python
def movav(y, Dx, dx): """ Moving average rectangular window filter: calculate average of signal y by using sliding rectangular window of size Dx using binsize dx Parameters ---------- y : numpy.ndarray Signal Dx : float Window length of filter. dx : float Bin size of signal sampling. Returns ------- numpy.ndarray Filtered signal. """ if Dx <= dx: return y else: ly = len(y) r = np.zeros(ly) n = np.int(np.round((Dx / dx))) r[0:np.int(n / 2.)] = 1.0 / n r[-np.int(n / 2.)::] = 1.0 / n R = np.fft.fft(r) Y = np.fft.fft(y) yf = np.fft.ifft(Y * R) return yf
[ "def", "movav", "(", "y", ",", "Dx", ",", "dx", ")", ":", "if", "Dx", "<=", "dx", ":", "return", "y", "else", ":", "ly", "=", "len", "(", "y", ")", "r", "=", "np", ".", "zeros", "(", "ly", ")", "n", "=", "np", ".", "int", "(", "np", "."...
Moving average rectangular window filter: calculate average of signal y by using sliding rectangular window of size Dx using binsize dx Parameters ---------- y : numpy.ndarray Signal Dx : float Window length of filter. dx : float Bin size of signal sampling. Returns ------- numpy.ndarray Filtered signal.
[ "Moving", "average", "rectangular", "window", "filter", ":", "calculate", "average", "of", "signal", "y", "by", "using", "sliding", "rectangular", "window", "of", "size", "Dx", "using", "binsize", "dx", "Parameters", "----------", "y", ":", "numpy", ".", "ndar...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L405-L439
INM-6/hybridLFPy
hybridLFPy/helpers.py
decimate
def decimate(x, q=10, n=4, k=0.8, filterfun=ss.cheby1): """ scipy.signal.decimate like downsampling using filtfilt instead of lfilter, and filter coeffs from butterworth or chebyshev type 1. Parameters ---------- x : numpy.ndarray Array to be downsampled along last axis. q : int Downsampling factor. n : int Filter order. k : float Aliasing filter critical frequency Wn will be set as Wn=k/q. filterfun : function `scipy.signal.filter_design.cheby1` or `scipy.signal.filter_design.butter` function Returns ------- numpy.ndarray Array of downsampled signal. """ if not isinstance(q, int): raise TypeError("q must be an integer") if n is None: n = 1 if filterfun == ss.butter: b, a = filterfun(n, k / q) elif filterfun == ss.cheby1: b, a = filterfun(n, 0.05, k / q) else: raise Exception('only ss.butter or ss.cheby1 supported') try: y = ss.filtfilt(b, a, x) except: # Multidim array can only be processed at once for scipy >= 0.9.0 y = [] for data in x: y.append(ss.filtfilt(b, a, data)) y = np.array(y) try: return y[:, ::q] except: return y[::q]
python
def decimate(x, q=10, n=4, k=0.8, filterfun=ss.cheby1): """ scipy.signal.decimate like downsampling using filtfilt instead of lfilter, and filter coeffs from butterworth or chebyshev type 1. Parameters ---------- x : numpy.ndarray Array to be downsampled along last axis. q : int Downsampling factor. n : int Filter order. k : float Aliasing filter critical frequency Wn will be set as Wn=k/q. filterfun : function `scipy.signal.filter_design.cheby1` or `scipy.signal.filter_design.butter` function Returns ------- numpy.ndarray Array of downsampled signal. """ if not isinstance(q, int): raise TypeError("q must be an integer") if n is None: n = 1 if filterfun == ss.butter: b, a = filterfun(n, k / q) elif filterfun == ss.cheby1: b, a = filterfun(n, 0.05, k / q) else: raise Exception('only ss.butter or ss.cheby1 supported') try: y = ss.filtfilt(b, a, x) except: # Multidim array can only be processed at once for scipy >= 0.9.0 y = [] for data in x: y.append(ss.filtfilt(b, a, data)) y = np.array(y) try: return y[:, ::q] except: return y[::q]
[ "def", "decimate", "(", "x", ",", "q", "=", "10", ",", "n", "=", "4", ",", "k", "=", "0.8", ",", "filterfun", "=", "ss", ".", "cheby1", ")", ":", "if", "not", "isinstance", "(", "q", ",", "int", ")", ":", "raise", "TypeError", "(", "\"q must be...
scipy.signal.decimate like downsampling using filtfilt instead of lfilter, and filter coeffs from butterworth or chebyshev type 1. Parameters ---------- x : numpy.ndarray Array to be downsampled along last axis. q : int Downsampling factor. n : int Filter order. k : float Aliasing filter critical frequency Wn will be set as Wn=k/q. filterfun : function `scipy.signal.filter_design.cheby1` or `scipy.signal.filter_design.butter` function Returns ------- numpy.ndarray Array of downsampled signal.
[ "scipy", ".", "signal", ".", "decimate", "like", "downsampling", "using", "filtfilt", "instead", "of", "lfilter", "and", "filter", "coeffs", "from", "butterworth", "or", "chebyshev", "type", "1", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L442-L492
INM-6/hybridLFPy
hybridLFPy/helpers.py
mean
def mean(data, units=False, time=False): """ Function to compute mean of data Parameters ---------- data : numpy.ndarray 1st axis unit, 2nd axis time units : bool Average over units time : bool Average over time Returns ------- if units=False and time=False: error if units=True: 1 dim numpy.ndarray; time series if time=True: 1 dim numpy.ndarray; series of unit means across time if units=True and time=True: float; unit and time mean Examples -------- >>> mean(np.array([[1, 2, 3], [4, 5, 6]]), units=True) array([ 2.5, 3.5, 4.5]) >>> mean(np.array([[1, 2, 3], [4, 5, 6]]), time=True) array([ 2., 5.]) >>> mean(np.array([[1, 2, 3], [4, 5, 6]]), units=True,time=True) 3.5 """ assert(units is not False or time is not False) if units is True and time is False: return np.mean(data, axis=0) elif units is False and time is True: return np.mean(data, axis=1) elif units is True and time is True: return np.mean(data)
python
def mean(data, units=False, time=False): """ Function to compute mean of data Parameters ---------- data : numpy.ndarray 1st axis unit, 2nd axis time units : bool Average over units time : bool Average over time Returns ------- if units=False and time=False: error if units=True: 1 dim numpy.ndarray; time series if time=True: 1 dim numpy.ndarray; series of unit means across time if units=True and time=True: float; unit and time mean Examples -------- >>> mean(np.array([[1, 2, 3], [4, 5, 6]]), units=True) array([ 2.5, 3.5, 4.5]) >>> mean(np.array([[1, 2, 3], [4, 5, 6]]), time=True) array([ 2., 5.]) >>> mean(np.array([[1, 2, 3], [4, 5, 6]]), units=True,time=True) 3.5 """ assert(units is not False or time is not False) if units is True and time is False: return np.mean(data, axis=0) elif units is False and time is True: return np.mean(data, axis=1) elif units is True and time is True: return np.mean(data)
[ "def", "mean", "(", "data", ",", "units", "=", "False", ",", "time", "=", "False", ")", ":", "assert", "(", "units", "is", "not", "False", "or", "time", "is", "not", "False", ")", "if", "units", "is", "True", "and", "time", "is", "False", ":", "r...
Function to compute mean of data Parameters ---------- data : numpy.ndarray 1st axis unit, 2nd axis time units : bool Average over units time : bool Average over time Returns ------- if units=False and time=False: error if units=True: 1 dim numpy.ndarray; time series if time=True: 1 dim numpy.ndarray; series of unit means across time if units=True and time=True: float; unit and time mean Examples -------- >>> mean(np.array([[1, 2, 3], [4, 5, 6]]), units=True) array([ 2.5, 3.5, 4.5]) >>> mean(np.array([[1, 2, 3], [4, 5, 6]]), time=True) array([ 2., 5.]) >>> mean(np.array([[1, 2, 3], [4, 5, 6]]), units=True,time=True) 3.5
[ "Function", "to", "compute", "mean", "of", "data" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L500-L546
INM-6/hybridLFPy
hybridLFPy/helpers.py
powerspec
def powerspec(data, tbin, Df=None, units=False, pointProcess=False): """ Calculate (smoothed) power spectra of all timeseries in data. If units=True, power spectra are averaged across units. Note that averaging is done on power spectra rather than data. If pointProcess is True, power spectra are normalized by the length T of the time series. Parameters ---------- data : numpy.ndarray 1st axis unit, 2nd axis time. tbin : float Binsize in ms. Df : float/None, Window width of sliding rectangular filter (smoothing), None is no smoothing. units : bool Average power spectrum. pointProcess : bool If set to True, powerspectrum is normalized to signal length T. Returns ------- freq : tuple numpy.ndarray of frequencies. POW : tuple if units=False: 2 dim numpy.ndarray; 1st axis unit, 2nd axis frequency if units=True: 1 dim numpy.ndarray; frequency series Examples -------- >>> powerspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df) Out[1]: (freq,POW) >>> POW.shape Out[2]: (2,len(analog_sig1)) >>> powerspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df, units=True) Out[1]: (freq,POW) >>> POW.shape Out[2]: (len(analog_sig1),) """ freq, DATA = calculate_fft(data, tbin) df = freq[1] - freq[0] T = tbin * len(freq) POW = np.abs(DATA) ** 2 if Df is not None: POW = [movav(x, Df, df) for x in POW] cut = int(Df / df) freq = freq[cut:] POW = np.array([x[cut:] for x in POW]) POW = np.abs(POW) assert(len(freq) == len(POW[0])) if units is True: POW = mean(POW, units=units) assert(len(freq) == len(POW)) if pointProcess: POW *= 1. / T * 1e3 # Normalization, power independent of T return freq, POW
python
def powerspec(data, tbin, Df=None, units=False, pointProcess=False): """ Calculate (smoothed) power spectra of all timeseries in data. If units=True, power spectra are averaged across units. Note that averaging is done on power spectra rather than data. If pointProcess is True, power spectra are normalized by the length T of the time series. Parameters ---------- data : numpy.ndarray 1st axis unit, 2nd axis time. tbin : float Binsize in ms. Df : float/None, Window width of sliding rectangular filter (smoothing), None is no smoothing. units : bool Average power spectrum. pointProcess : bool If set to True, powerspectrum is normalized to signal length T. Returns ------- freq : tuple numpy.ndarray of frequencies. POW : tuple if units=False: 2 dim numpy.ndarray; 1st axis unit, 2nd axis frequency if units=True: 1 dim numpy.ndarray; frequency series Examples -------- >>> powerspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df) Out[1]: (freq,POW) >>> POW.shape Out[2]: (2,len(analog_sig1)) >>> powerspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df, units=True) Out[1]: (freq,POW) >>> POW.shape Out[2]: (len(analog_sig1),) """ freq, DATA = calculate_fft(data, tbin) df = freq[1] - freq[0] T = tbin * len(freq) POW = np.abs(DATA) ** 2 if Df is not None: POW = [movav(x, Df, df) for x in POW] cut = int(Df / df) freq = freq[cut:] POW = np.array([x[cut:] for x in POW]) POW = np.abs(POW) assert(len(freq) == len(POW[0])) if units is True: POW = mean(POW, units=units) assert(len(freq) == len(POW)) if pointProcess: POW *= 1. / T * 1e3 # Normalization, power independent of T return freq, POW
[ "def", "powerspec", "(", "data", ",", "tbin", ",", "Df", "=", "None", ",", "units", "=", "False", ",", "pointProcess", "=", "False", ")", ":", "freq", ",", "DATA", "=", "calculate_fft", "(", "data", ",", "tbin", ")", "df", "=", "freq", "[", "1", ...
Calculate (smoothed) power spectra of all timeseries in data. If units=True, power spectra are averaged across units. Note that averaging is done on power spectra rather than data. If pointProcess is True, power spectra are normalized by the length T of the time series. Parameters ---------- data : numpy.ndarray 1st axis unit, 2nd axis time. tbin : float Binsize in ms. Df : float/None, Window width of sliding rectangular filter (smoothing), None is no smoothing. units : bool Average power spectrum. pointProcess : bool If set to True, powerspectrum is normalized to signal length T. Returns ------- freq : tuple numpy.ndarray of frequencies. POW : tuple if units=False: 2 dim numpy.ndarray; 1st axis unit, 2nd axis frequency if units=True: 1 dim numpy.ndarray; frequency series Examples -------- >>> powerspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df) Out[1]: (freq,POW) >>> POW.shape Out[2]: (2,len(analog_sig1)) >>> powerspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df, units=True) Out[1]: (freq,POW) >>> POW.shape Out[2]: (len(analog_sig1),)
[ "Calculate", "(", "smoothed", ")", "power", "spectra", "of", "all", "timeseries", "in", "data", ".", "If", "units", "=", "True", "power", "spectra", "are", "averaged", "across", "units", ".", "Note", "that", "averaging", "is", "done", "on", "power", "spect...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L656-L722
INM-6/hybridLFPy
hybridLFPy/helpers.py
compound_powerspec
def compound_powerspec(data, tbin, Df=None, pointProcess=False): """ Calculate the power spectrum of the compound/sum signal. data is first summed across units, then the power spectrum is calculated. If pointProcess=True, power spectra are normalized by the length T of the time series. Parameters ---------- data : numpy.ndarray, 1st axis unit, 2nd axis time tbin : float, binsize in ms Df : float/None, window width of sliding rectangular filter (smoothing), None -> no smoothing pointProcess : bool, if set to True, powerspectrum is normalized to signal length T Returns ------- freq : tuple numpy.ndarray of frequencies POW : tuple 1 dim numpy.ndarray, frequency series Examples -------- >>> compound_powerspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df) Out[1]: (freq,POW) >>> POW.shape Out[2]: (len(analog_sig1),) """ return powerspec([np.sum(data, axis=0)], tbin, Df=Df, units=True, pointProcess=pointProcess)
python
def compound_powerspec(data, tbin, Df=None, pointProcess=False): """ Calculate the power spectrum of the compound/sum signal. data is first summed across units, then the power spectrum is calculated. If pointProcess=True, power spectra are normalized by the length T of the time series. Parameters ---------- data : numpy.ndarray, 1st axis unit, 2nd axis time tbin : float, binsize in ms Df : float/None, window width of sliding rectangular filter (smoothing), None -> no smoothing pointProcess : bool, if set to True, powerspectrum is normalized to signal length T Returns ------- freq : tuple numpy.ndarray of frequencies POW : tuple 1 dim numpy.ndarray, frequency series Examples -------- >>> compound_powerspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df) Out[1]: (freq,POW) >>> POW.shape Out[2]: (len(analog_sig1),) """ return powerspec([np.sum(data, axis=0)], tbin, Df=Df, units=True, pointProcess=pointProcess)
[ "def", "compound_powerspec", "(", "data", ",", "tbin", ",", "Df", "=", "None", ",", "pointProcess", "=", "False", ")", ":", "return", "powerspec", "(", "[", "np", ".", "sum", "(", "data", ",", "axis", "=", "0", ")", "]", ",", "tbin", ",", "Df", "...
Calculate the power spectrum of the compound/sum signal. data is first summed across units, then the power spectrum is calculated. If pointProcess=True, power spectra are normalized by the length T of the time series. Parameters ---------- data : numpy.ndarray, 1st axis unit, 2nd axis time tbin : float, binsize in ms Df : float/None, window width of sliding rectangular filter (smoothing), None -> no smoothing pointProcess : bool, if set to True, powerspectrum is normalized to signal length T Returns ------- freq : tuple numpy.ndarray of frequencies POW : tuple 1 dim numpy.ndarray, frequency series Examples -------- >>> compound_powerspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df) Out[1]: (freq,POW) >>> POW.shape Out[2]: (len(analog_sig1),)
[ "Calculate", "the", "power", "spectrum", "of", "the", "compound", "/", "sum", "signal", ".", "data", "is", "first", "summed", "across", "units", "then", "the", "power", "spectrum", "is", "calculated", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L725-L766
INM-6/hybridLFPy
hybridLFPy/helpers.py
crossspec
def crossspec(data, tbin, Df=None, units=False, pointProcess=False): """ Calculate (smoothed) cross spectra of data. If `units`=True, cross spectra are averaged across units. Note that averaging is done on cross spectra rather than data. Cross spectra are normalized by the length T of the time series -> no scaling with T. If pointProcess=True, power spectra are normalized by the length T of the time series. Parameters ---------- data : numpy.ndarray, 1st axis unit, 2nd axis time tbin : float, binsize in ms Df : float/None, window width of sliding rectangular filter (smoothing), None -> no smoothing units : bool, average cross spectrum pointProcess : bool, if set to True, cross spectrum is normalized to signal length T Returns ------- freq : tuple numpy.ndarray of frequencies CRO : tuple if `units`=True: 1 dim numpy.ndarray; frequency series if `units`=False:3 dim numpy.ndarray; 1st axis first unit, 2nd axis second unit, 3rd axis frequency Examples -------- >>> crossspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df) Out[1]: (freq,CRO) >>> CRO.shape Out[2]: (2,2,len(analog_sig1)) >>> crossspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df, units=True) Out[1]: (freq,CRO) >>> CRO.shape Out[2]: (len(analog_sig1),) """ N = len(data) if units is True: # smoothing and normalization take place in powerspec # and compound_powerspec freq, POW = powerspec(data, tbin, Df=Df, units=True) freq_com, CPOW = compound_powerspec(data, tbin, Df=Df) assert(len(freq) == len(freq_com)) assert(np.min(freq) == np.min(freq_com)) assert(np.max(freq) == np.max(freq_com)) CRO = 1. / (1. * N * (N - 1.)) * (CPOW - 1. * N * POW) assert(len(freq) == len(CRO)) else: freq, DATA = calculate_fft(data, tbin) T = tbin * len(freq) df = freq[1] - freq[0] if Df is not None: cut = int(Df / df) freq = freq[cut:] CRO = np.zeros((N, N, len(freq)), dtype=complex) for i in range(N): for j in range(i + 1): tempij = DATA[i] * DATA[j].conj() if Df is not None: tempij = movav(tempij, Df, df)[cut:] CRO[i, j] = tempij CRO[j, i] = CRO[i, j].conj() assert(len(freq) == len(CRO[0, 0])) if pointProcess: CRO *= 1. / T * 1e3 # normalization return freq, CRO
python
def crossspec(data, tbin, Df=None, units=False, pointProcess=False): """ Calculate (smoothed) cross spectra of data. If `units`=True, cross spectra are averaged across units. Note that averaging is done on cross spectra rather than data. Cross spectra are normalized by the length T of the time series -> no scaling with T. If pointProcess=True, power spectra are normalized by the length T of the time series. Parameters ---------- data : numpy.ndarray, 1st axis unit, 2nd axis time tbin : float, binsize in ms Df : float/None, window width of sliding rectangular filter (smoothing), None -> no smoothing units : bool, average cross spectrum pointProcess : bool, if set to True, cross spectrum is normalized to signal length T Returns ------- freq : tuple numpy.ndarray of frequencies CRO : tuple if `units`=True: 1 dim numpy.ndarray; frequency series if `units`=False:3 dim numpy.ndarray; 1st axis first unit, 2nd axis second unit, 3rd axis frequency Examples -------- >>> crossspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df) Out[1]: (freq,CRO) >>> CRO.shape Out[2]: (2,2,len(analog_sig1)) >>> crossspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df, units=True) Out[1]: (freq,CRO) >>> CRO.shape Out[2]: (len(analog_sig1),) """ N = len(data) if units is True: # smoothing and normalization take place in powerspec # and compound_powerspec freq, POW = powerspec(data, tbin, Df=Df, units=True) freq_com, CPOW = compound_powerspec(data, tbin, Df=Df) assert(len(freq) == len(freq_com)) assert(np.min(freq) == np.min(freq_com)) assert(np.max(freq) == np.max(freq_com)) CRO = 1. / (1. * N * (N - 1.)) * (CPOW - 1. * N * POW) assert(len(freq) == len(CRO)) else: freq, DATA = calculate_fft(data, tbin) T = tbin * len(freq) df = freq[1] - freq[0] if Df is not None: cut = int(Df / df) freq = freq[cut:] CRO = np.zeros((N, N, len(freq)), dtype=complex) for i in range(N): for j in range(i + 1): tempij = DATA[i] * DATA[j].conj() if Df is not None: tempij = movav(tempij, Df, df)[cut:] CRO[i, j] = tempij CRO[j, i] = CRO[i, j].conj() assert(len(freq) == len(CRO[0, 0])) if pointProcess: CRO *= 1. / T * 1e3 # normalization return freq, CRO
[ "def", "crossspec", "(", "data", ",", "tbin", ",", "Df", "=", "None", ",", "units", "=", "False", ",", "pointProcess", "=", "False", ")", ":", "N", "=", "len", "(", "data", ")", "if", "units", "is", "True", ":", "# smoothing and normalization take place ...
Calculate (smoothed) cross spectra of data. If `units`=True, cross spectra are averaged across units. Note that averaging is done on cross spectra rather than data. Cross spectra are normalized by the length T of the time series -> no scaling with T. If pointProcess=True, power spectra are normalized by the length T of the time series. Parameters ---------- data : numpy.ndarray, 1st axis unit, 2nd axis time tbin : float, binsize in ms Df : float/None, window width of sliding rectangular filter (smoothing), None -> no smoothing units : bool, average cross spectrum pointProcess : bool, if set to True, cross spectrum is normalized to signal length T Returns ------- freq : tuple numpy.ndarray of frequencies CRO : tuple if `units`=True: 1 dim numpy.ndarray; frequency series if `units`=False:3 dim numpy.ndarray; 1st axis first unit, 2nd axis second unit, 3rd axis frequency Examples -------- >>> crossspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df) Out[1]: (freq,CRO) >>> CRO.shape Out[2]: (2,2,len(analog_sig1)) >>> crossspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df, units=True) Out[1]: (freq,CRO) >>> CRO.shape Out[2]: (len(analog_sig1),)
[ "Calculate", "(", "smoothed", ")", "cross", "spectra", "of", "data", ".", "If", "units", "=", "True", "cross", "spectra", "are", "averaged", "across", "units", ".", "Note", "that", "averaging", "is", "done", "on", "cross", "spectra", "rather", "than", "dat...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L769-L850
INM-6/hybridLFPy
hybridLFPy/helpers.py
compound_crossspec
def compound_crossspec(a_data, tbin, Df=None, pointProcess=False): """ Calculate cross spectra of compound signals. a_data is a list of datasets (a_data = [data1,data2,...]). For each dataset in a_data, the compound signal is calculated and the crossspectra between these compound signals is computed. If pointProcess=True, power spectra are normalized by the length T of the time series. Parameters ---------- a_data : list of numpy.ndarrays Array: 1st axis unit, 2nd axis time. tbin : float Binsize in ms. Df : float/None, Window width of sliding rectangular filter (smoothing), None -> no smoothing. pointProcess : bool If set to True, crossspectrum is normalized to signal length `T` Returns ------- freq : tuple numpy.ndarray of frequencies. CRO : tuple 3 dim numpy.ndarray; 1st axis first compound signal, 2nd axis second compound signal, 3rd axis frequency. Examples -------- >>> compound_crossspec([np.array([analog_sig1, analog_sig2]), np.array([analog_sig3,analog_sig4])], tbin, Df=Df) Out[1]: (freq,CRO) >>> CRO.shape Out[2]: (2,2,len(analog_sig1)) """ a_mdata = [] for data in a_data: a_mdata.append(np.sum(data, axis=0)) # calculate compound signals return crossspec(np.array(a_mdata), tbin, Df, units=False, pointProcess=pointProcess)
python
def compound_crossspec(a_data, tbin, Df=None, pointProcess=False): """ Calculate cross spectra of compound signals. a_data is a list of datasets (a_data = [data1,data2,...]). For each dataset in a_data, the compound signal is calculated and the crossspectra between these compound signals is computed. If pointProcess=True, power spectra are normalized by the length T of the time series. Parameters ---------- a_data : list of numpy.ndarrays Array: 1st axis unit, 2nd axis time. tbin : float Binsize in ms. Df : float/None, Window width of sliding rectangular filter (smoothing), None -> no smoothing. pointProcess : bool If set to True, crossspectrum is normalized to signal length `T` Returns ------- freq : tuple numpy.ndarray of frequencies. CRO : tuple 3 dim numpy.ndarray; 1st axis first compound signal, 2nd axis second compound signal, 3rd axis frequency. Examples -------- >>> compound_crossspec([np.array([analog_sig1, analog_sig2]), np.array([analog_sig3,analog_sig4])], tbin, Df=Df) Out[1]: (freq,CRO) >>> CRO.shape Out[2]: (2,2,len(analog_sig1)) """ a_mdata = [] for data in a_data: a_mdata.append(np.sum(data, axis=0)) # calculate compound signals return crossspec(np.array(a_mdata), tbin, Df, units=False, pointProcess=pointProcess)
[ "def", "compound_crossspec", "(", "a_data", ",", "tbin", ",", "Df", "=", "None", ",", "pointProcess", "=", "False", ")", ":", "a_mdata", "=", "[", "]", "for", "data", "in", "a_data", ":", "a_mdata", ".", "append", "(", "np", ".", "sum", "(", "data", ...
Calculate cross spectra of compound signals. a_data is a list of datasets (a_data = [data1,data2,...]). For each dataset in a_data, the compound signal is calculated and the crossspectra between these compound signals is computed. If pointProcess=True, power spectra are normalized by the length T of the time series. Parameters ---------- a_data : list of numpy.ndarrays Array: 1st axis unit, 2nd axis time. tbin : float Binsize in ms. Df : float/None, Window width of sliding rectangular filter (smoothing), None -> no smoothing. pointProcess : bool If set to True, crossspectrum is normalized to signal length `T` Returns ------- freq : tuple numpy.ndarray of frequencies. CRO : tuple 3 dim numpy.ndarray; 1st axis first compound signal, 2nd axis second compound signal, 3rd axis frequency. Examples -------- >>> compound_crossspec([np.array([analog_sig1, analog_sig2]), np.array([analog_sig3,analog_sig4])], tbin, Df=Df) Out[1]: (freq,CRO) >>> CRO.shape Out[2]: (2,2,len(analog_sig1))
[ "Calculate", "cross", "spectra", "of", "compound", "signals", ".", "a_data", "is", "a", "list", "of", "datasets", "(", "a_data", "=", "[", "data1", "data2", "...", "]", ")", ".", "For", "each", "dataset", "in", "a_data", "the", "compound", "signal", "is"...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L853-L899
INM-6/hybridLFPy
hybridLFPy/helpers.py
autocorrfunc
def autocorrfunc(freq, power): """ Calculate autocorrelation function(s) for given power spectrum/spectra. Parameters ---------- freq : numpy.ndarray 1 dimensional array of frequencies. power : numpy.ndarray 2 dimensional power spectra, 1st axis units, 2nd axis frequencies. Returns ------- time : tuple 1 dim numpy.ndarray of times. autof : tuple 2 dim numpy.ndarray; autocorrelation functions, 1st axis units, 2nd axis times. """ tbin = 1. / (2. * np.max(freq)) * 1e3 # tbin in ms time = np.arange(-len(freq) / 2. + 1, len(freq) / 2. + 1) * tbin # T = max(time) multidata = False if len(np.shape(power)) > 1: multidata = True if multidata: N = len(power) autof = np.zeros((N, len(freq))) for i in range(N): raw_autof = np.real(np.fft.ifft(power[i])) mid = int(len(raw_autof) / 2.) autof[i] = np.hstack([raw_autof[mid + 1:], raw_autof[:mid + 1]]) assert(len(time) == len(autof[0])) else: raw_autof = np.real(np.fft.ifft(power)) mid = int(len(raw_autof) / 2.) autof = np.hstack([raw_autof[mid + 1:], raw_autof[:mid + 1]]) assert(len(time) == len(autof)) # autof *= T*1e-3 # normalization is done in powerspec() return time, autof
python
def autocorrfunc(freq, power): """ Calculate autocorrelation function(s) for given power spectrum/spectra. Parameters ---------- freq : numpy.ndarray 1 dimensional array of frequencies. power : numpy.ndarray 2 dimensional power spectra, 1st axis units, 2nd axis frequencies. Returns ------- time : tuple 1 dim numpy.ndarray of times. autof : tuple 2 dim numpy.ndarray; autocorrelation functions, 1st axis units, 2nd axis times. """ tbin = 1. / (2. * np.max(freq)) * 1e3 # tbin in ms time = np.arange(-len(freq) / 2. + 1, len(freq) / 2. + 1) * tbin # T = max(time) multidata = False if len(np.shape(power)) > 1: multidata = True if multidata: N = len(power) autof = np.zeros((N, len(freq))) for i in range(N): raw_autof = np.real(np.fft.ifft(power[i])) mid = int(len(raw_autof) / 2.) autof[i] = np.hstack([raw_autof[mid + 1:], raw_autof[:mid + 1]]) assert(len(time) == len(autof[0])) else: raw_autof = np.real(np.fft.ifft(power)) mid = int(len(raw_autof) / 2.) autof = np.hstack([raw_autof[mid + 1:], raw_autof[:mid + 1]]) assert(len(time) == len(autof)) # autof *= T*1e-3 # normalization is done in powerspec() return time, autof
[ "def", "autocorrfunc", "(", "freq", ",", "power", ")", ":", "tbin", "=", "1.", "/", "(", "2.", "*", "np", ".", "max", "(", "freq", ")", ")", "*", "1e3", "# tbin in ms", "time", "=", "np", ".", "arange", "(", "-", "len", "(", "freq", ")", "/", ...
Calculate autocorrelation function(s) for given power spectrum/spectra. Parameters ---------- freq : numpy.ndarray 1 dimensional array of frequencies. power : numpy.ndarray 2 dimensional power spectra, 1st axis units, 2nd axis frequencies. Returns ------- time : tuple 1 dim numpy.ndarray of times. autof : tuple 2 dim numpy.ndarray; autocorrelation functions, 1st axis units, 2nd axis times.
[ "Calculate", "autocorrelation", "function", "(", "s", ")", "for", "given", "power", "spectrum", "/", "spectra", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L902-L944
INM-6/hybridLFPy
hybridLFPy/helpers.py
crosscorrfunc
def crosscorrfunc(freq, cross): """ Calculate crosscorrelation function(s) for given cross spectra. Parameters ---------- freq : numpy.ndarray 1 dimensional array of frequencies. cross : numpy.ndarray 2 dimensional array of cross spectra, 1st axis units, 2nd axis units, 3rd axis frequencies. Returns ------- time : tuple 1 dim numpy.ndarray of times. crossf : tuple 3 dim numpy.ndarray, crosscorrelation functions, 1st axis first unit, 2nd axis second unit, 3rd axis times. """ tbin = 1. / (2. * np.max(freq)) * 1e3 # tbin in ms time = np.arange(-len(freq) / 2. + 1, len(freq) / 2. + 1) * tbin # T = max(time) multidata = False # check whether cross contains many cross spectra if len(np.shape(cross)) > 1: multidata = True if multidata: N = len(cross) crossf = np.zeros((N, N, len(freq))) for i in range(N): for j in range(N): raw_crossf = np.real(np.fft.ifft(cross[i, j])) mid = int(len(raw_crossf) / 2.) crossf[i, j] = np.hstack( [raw_crossf[mid + 1:], raw_crossf[:mid + 1]]) assert(len(time) == len(crossf[0, 0])) else: raw_crossf = np.real(np.fft.ifft(cross)) mid = int(len(raw_crossf) / 2.) crossf = np.hstack([raw_crossf[mid + 1:], raw_crossf[:mid + 1]]) assert(len(time) == len(crossf)) # crossf *= T*1e-3 # normalization happens in cross spectrum return time, crossf
python
def crosscorrfunc(freq, cross): """ Calculate crosscorrelation function(s) for given cross spectra. Parameters ---------- freq : numpy.ndarray 1 dimensional array of frequencies. cross : numpy.ndarray 2 dimensional array of cross spectra, 1st axis units, 2nd axis units, 3rd axis frequencies. Returns ------- time : tuple 1 dim numpy.ndarray of times. crossf : tuple 3 dim numpy.ndarray, crosscorrelation functions, 1st axis first unit, 2nd axis second unit, 3rd axis times. """ tbin = 1. / (2. * np.max(freq)) * 1e3 # tbin in ms time = np.arange(-len(freq) / 2. + 1, len(freq) / 2. + 1) * tbin # T = max(time) multidata = False # check whether cross contains many cross spectra if len(np.shape(cross)) > 1: multidata = True if multidata: N = len(cross) crossf = np.zeros((N, N, len(freq))) for i in range(N): for j in range(N): raw_crossf = np.real(np.fft.ifft(cross[i, j])) mid = int(len(raw_crossf) / 2.) crossf[i, j] = np.hstack( [raw_crossf[mid + 1:], raw_crossf[:mid + 1]]) assert(len(time) == len(crossf[0, 0])) else: raw_crossf = np.real(np.fft.ifft(cross)) mid = int(len(raw_crossf) / 2.) crossf = np.hstack([raw_crossf[mid + 1:], raw_crossf[:mid + 1]]) assert(len(time) == len(crossf)) # crossf *= T*1e-3 # normalization happens in cross spectrum return time, crossf
[ "def", "crosscorrfunc", "(", "freq", ",", "cross", ")", ":", "tbin", "=", "1.", "/", "(", "2.", "*", "np", ".", "max", "(", "freq", ")", ")", "*", "1e3", "# tbin in ms", "time", "=", "np", ".", "arange", "(", "-", "len", "(", "freq", ")", "/", ...
Calculate crosscorrelation function(s) for given cross spectra. Parameters ---------- freq : numpy.ndarray 1 dimensional array of frequencies. cross : numpy.ndarray 2 dimensional array of cross spectra, 1st axis units, 2nd axis units, 3rd axis frequencies. Returns ------- time : tuple 1 dim numpy.ndarray of times. crossf : tuple 3 dim numpy.ndarray, crosscorrelation functions, 1st axis first unit, 2nd axis second unit, 3rd axis times.
[ "Calculate", "crosscorrelation", "function", "(", "s", ")", "for", "given", "cross", "spectra", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L947-L994
INM-6/hybridLFPy
hybridLFPy/helpers.py
corrcoef
def corrcoef(time, crossf, integration_window=0.): """ Calculate the correlation coefficient for given auto- and crosscorrelation functions. Standard settings yield the zero lag correlation coefficient. Setting integration_window > 0 yields the correlation coefficient of integrated auto- and crosscorrelation functions. The correlation coefficient between a zero signal with any other signal is defined as 0. Parameters ---------- time : numpy.ndarray 1 dim array of times corresponding to signal. crossf : numpy.ndarray Crosscorrelation functions, 1st axis first unit, 2nd axis second unit, 3rd axis times. integration_window: float Size of the integration window. Returns ------- cc : numpy.ndarray 2 dim array of correlation coefficient between two units. """ N = len(crossf) cc = np.zeros(np.shape(crossf)[:-1]) tbin = abs(time[1] - time[0]) lim = int(integration_window / tbin) if len(time)%2 == 0: mid = len(time)/2-1 else: mid = np.floor(len(time)/2.) for i in range(N): ai = np.sum(crossf[i, i][mid - lim:mid + lim + 1]) offset_autoi = np.mean(crossf[i,i][:mid-1]) for j in range(N): cij = np.sum(crossf[i, j][mid - lim:mid + lim + 1]) offset_cross = np.mean(crossf[i,j][:mid-1]) aj = np.sum(crossf[j, j][mid - lim:mid + lim + 1]) offset_autoj = np.mean(crossf[j,j][:mid-1]) if ai > 0. and aj > 0.: cc[i, j] = (cij-offset_cross) / np.sqrt((ai-offset_autoi) * \ (aj-offset_autoj)) else: cc[i, j] = 0. return cc
python
def corrcoef(time, crossf, integration_window=0.): """ Calculate the correlation coefficient for given auto- and crosscorrelation functions. Standard settings yield the zero lag correlation coefficient. Setting integration_window > 0 yields the correlation coefficient of integrated auto- and crosscorrelation functions. The correlation coefficient between a zero signal with any other signal is defined as 0. Parameters ---------- time : numpy.ndarray 1 dim array of times corresponding to signal. crossf : numpy.ndarray Crosscorrelation functions, 1st axis first unit, 2nd axis second unit, 3rd axis times. integration_window: float Size of the integration window. Returns ------- cc : numpy.ndarray 2 dim array of correlation coefficient between two units. """ N = len(crossf) cc = np.zeros(np.shape(crossf)[:-1]) tbin = abs(time[1] - time[0]) lim = int(integration_window / tbin) if len(time)%2 == 0: mid = len(time)/2-1 else: mid = np.floor(len(time)/2.) for i in range(N): ai = np.sum(crossf[i, i][mid - lim:mid + lim + 1]) offset_autoi = np.mean(crossf[i,i][:mid-1]) for j in range(N): cij = np.sum(crossf[i, j][mid - lim:mid + lim + 1]) offset_cross = np.mean(crossf[i,j][:mid-1]) aj = np.sum(crossf[j, j][mid - lim:mid + lim + 1]) offset_autoj = np.mean(crossf[j,j][:mid-1]) if ai > 0. and aj > 0.: cc[i, j] = (cij-offset_cross) / np.sqrt((ai-offset_autoi) * \ (aj-offset_autoj)) else: cc[i, j] = 0. return cc
[ "def", "corrcoef", "(", "time", ",", "crossf", ",", "integration_window", "=", "0.", ")", ":", "N", "=", "len", "(", "crossf", ")", "cc", "=", "np", ".", "zeros", "(", "np", ".", "shape", "(", "crossf", ")", "[", ":", "-", "1", "]", ")", "tbin"...
Calculate the correlation coefficient for given auto- and crosscorrelation functions. Standard settings yield the zero lag correlation coefficient. Setting integration_window > 0 yields the correlation coefficient of integrated auto- and crosscorrelation functions. The correlation coefficient between a zero signal with any other signal is defined as 0. Parameters ---------- time : numpy.ndarray 1 dim array of times corresponding to signal. crossf : numpy.ndarray Crosscorrelation functions, 1st axis first unit, 2nd axis second unit, 3rd axis times. integration_window: float Size of the integration window. Returns ------- cc : numpy.ndarray 2 dim array of correlation coefficient between two units.
[ "Calculate", "the", "correlation", "coefficient", "for", "given", "auto", "-", "and", "crosscorrelation", "functions", ".", "Standard", "settings", "yield", "the", "zero", "lag", "correlation", "coefficient", ".", "Setting", "integration_window", ">", "0", "yields",...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L997-L1048
INM-6/hybridLFPy
hybridLFPy/helpers.py
coherence
def coherence(freq, power, cross): """ Calculate frequency resolved coherence for given power- and crossspectra. Parameters ---------- freq : numpy.ndarray Frequencies, 1 dim array. power : numpy.ndarray Power spectra, 1st axis units, 2nd axis frequencies. cross : numpy.ndarray, Cross spectra, 1st axis units, 2nd axis units, 3rd axis frequencies. Returns ------- freq: tuple 1 dim numpy.ndarray of frequencies. coh: tuple ndim 3 numpy.ndarray of coherences, 1st axis units, 2nd axis units, 3rd axis frequencies. """ N = len(power) coh = np.zeros(np.shape(cross)) for i in range(N): for j in range(N): coh[i, j] = cross[i, j] / np.sqrt(power[i] * power[j]) assert(len(freq) == len(coh[0, 0])) return freq, coh
python
def coherence(freq, power, cross): """ Calculate frequency resolved coherence for given power- and crossspectra. Parameters ---------- freq : numpy.ndarray Frequencies, 1 dim array. power : numpy.ndarray Power spectra, 1st axis units, 2nd axis frequencies. cross : numpy.ndarray, Cross spectra, 1st axis units, 2nd axis units, 3rd axis frequencies. Returns ------- freq: tuple 1 dim numpy.ndarray of frequencies. coh: tuple ndim 3 numpy.ndarray of coherences, 1st axis units, 2nd axis units, 3rd axis frequencies. """ N = len(power) coh = np.zeros(np.shape(cross)) for i in range(N): for j in range(N): coh[i, j] = cross[i, j] / np.sqrt(power[i] * power[j]) assert(len(freq) == len(coh[0, 0])) return freq, coh
[ "def", "coherence", "(", "freq", ",", "power", ",", "cross", ")", ":", "N", "=", "len", "(", "power", ")", "coh", "=", "np", ".", "zeros", "(", "np", ".", "shape", "(", "cross", ")", ")", "for", "i", "in", "range", "(", "N", ")", ":", "for", ...
Calculate frequency resolved coherence for given power- and crossspectra. Parameters ---------- freq : numpy.ndarray Frequencies, 1 dim array. power : numpy.ndarray Power spectra, 1st axis units, 2nd axis frequencies. cross : numpy.ndarray, Cross spectra, 1st axis units, 2nd axis units, 3rd axis frequencies. Returns ------- freq: tuple 1 dim numpy.ndarray of frequencies. coh: tuple ndim 3 numpy.ndarray of coherences, 1st axis units, 2nd axis units, 3rd axis frequencies.
[ "Calculate", "frequency", "resolved", "coherence", "for", "given", "power", "-", "and", "crossspectra", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L1051-L1085
INM-6/hybridLFPy
hybridLFPy/helpers.py
cv
def cv(data, units=False): """ Calculate coefficient of variation (cv) of data. Mean and standard deviation are computed across time. Parameters ---------- data : numpy.ndarray 1st axis unit, 2nd axis time. units : bool Average `cv`. Returns ------- numpy.ndarray If units=False, series of unit `cv`s. float If units=True, mean `cv` across units. Examples -------- >>> cv(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]])) array([ 0.48795004, 0.63887656]) >>> cv(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]]), units=True) 0.56341330073710316 """ mu = mean(data, time=True) var = variance(data, time=True) cv = np.sqrt(var) / mu if units is True: return np.mean(cv) else: return cv
python
def cv(data, units=False): """ Calculate coefficient of variation (cv) of data. Mean and standard deviation are computed across time. Parameters ---------- data : numpy.ndarray 1st axis unit, 2nd axis time. units : bool Average `cv`. Returns ------- numpy.ndarray If units=False, series of unit `cv`s. float If units=True, mean `cv` across units. Examples -------- >>> cv(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]])) array([ 0.48795004, 0.63887656]) >>> cv(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]]), units=True) 0.56341330073710316 """ mu = mean(data, time=True) var = variance(data, time=True) cv = np.sqrt(var) / mu if units is True: return np.mean(cv) else: return cv
[ "def", "cv", "(", "data", ",", "units", "=", "False", ")", ":", "mu", "=", "mean", "(", "data", ",", "time", "=", "True", ")", "var", "=", "variance", "(", "data", ",", "time", "=", "True", ")", "cv", "=", "np", ".", "sqrt", "(", "var", ")", ...
Calculate coefficient of variation (cv) of data. Mean and standard deviation are computed across time. Parameters ---------- data : numpy.ndarray 1st axis unit, 2nd axis time. units : bool Average `cv`. Returns ------- numpy.ndarray If units=False, series of unit `cv`s. float If units=True, mean `cv` across units. Examples -------- >>> cv(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]])) array([ 0.48795004, 0.63887656]) >>> cv(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]]), units=True) 0.56341330073710316
[ "Calculate", "coefficient", "of", "variation", "(", "cv", ")", "of", "data", ".", "Mean", "and", "standard", "deviation", "are", "computed", "across", "time", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L1088-L1127
INM-6/hybridLFPy
hybridLFPy/helpers.py
fano
def fano(data, units=False): """ Calculate fano factor (FF) of data. Mean and variance are computed across time. Parameters ---------- data : numpy.ndarray 1st axis unit, 2nd axis time. units : bool Average `FF`. Returns ------- numpy.ndarray If units=False, series of unit FFs. float If units=True, mean FF across units. Examples -------- >>> fano(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]])) array([ 0.83333333, 1.9047619 ]) >>> fano(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]]), units=True) 1.3690476190476191 """ mu = mean(data, time=True) var = variance(data, time=True) ff = var / mu if units is True: return np.mean(ff) else: return ff
python
def fano(data, units=False): """ Calculate fano factor (FF) of data. Mean and variance are computed across time. Parameters ---------- data : numpy.ndarray 1st axis unit, 2nd axis time. units : bool Average `FF`. Returns ------- numpy.ndarray If units=False, series of unit FFs. float If units=True, mean FF across units. Examples -------- >>> fano(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]])) array([ 0.83333333, 1.9047619 ]) >>> fano(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]]), units=True) 1.3690476190476191 """ mu = mean(data, time=True) var = variance(data, time=True) ff = var / mu if units is True: return np.mean(ff) else: return ff
[ "def", "fano", "(", "data", ",", "units", "=", "False", ")", ":", "mu", "=", "mean", "(", "data", ",", "time", "=", "True", ")", "var", "=", "variance", "(", "data", ",", "time", "=", "True", ")", "ff", "=", "var", "/", "mu", "if", "units", "...
Calculate fano factor (FF) of data. Mean and variance are computed across time. Parameters ---------- data : numpy.ndarray 1st axis unit, 2nd axis time. units : bool Average `FF`. Returns ------- numpy.ndarray If units=False, series of unit FFs. float If units=True, mean FF across units. Examples -------- >>> fano(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]])) array([ 0.83333333, 1.9047619 ]) >>> fano(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]]), units=True) 1.3690476190476191
[ "Calculate", "fano", "factor", "(", "FF", ")", "of", "data", ".", "Mean", "and", "variance", "are", "computed", "across", "time", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/helpers.py#L1130-L1168
INM-6/hybridLFPy
hybridLFPy/cachednetworks.py
CachedNetwork.collect_gdf
def collect_gdf(self): """ Collect the gdf-files from network sim in folder `spike_output_path` into sqlite database, using the GDF-class. Parameters ---------- None Returns ------- None """ # Resync COMM.Barrier() # Raise Exception if there are no gdf files to be read if len(glob.glob(os.path.join(self.spike_output_path, self.label + '*.'+ self.ext))) == 0: raise Exception('path to files contain no gdf-files!') #create in-memory databases of spikes if not hasattr(self, 'dbs'): self.dbs = {} for X in self.X: db = GDF(os.path.join(self.dbname), debug=True, new_db=True) db.create(re=os.path.join(self.spike_output_path, '{0}*{1}*{2}'.format(self.label, X, self.ext)), index=True) self.dbs.update({ X : db }) COMM.Barrier()
python
def collect_gdf(self): """ Collect the gdf-files from network sim in folder `spike_output_path` into sqlite database, using the GDF-class. Parameters ---------- None Returns ------- None """ # Resync COMM.Barrier() # Raise Exception if there are no gdf files to be read if len(glob.glob(os.path.join(self.spike_output_path, self.label + '*.'+ self.ext))) == 0: raise Exception('path to files contain no gdf-files!') #create in-memory databases of spikes if not hasattr(self, 'dbs'): self.dbs = {} for X in self.X: db = GDF(os.path.join(self.dbname), debug=True, new_db=True) db.create(re=os.path.join(self.spike_output_path, '{0}*{1}*{2}'.format(self.label, X, self.ext)), index=True) self.dbs.update({ X : db }) COMM.Barrier()
[ "def", "collect_gdf", "(", "self", ")", ":", "# Resync", "COMM", ".", "Barrier", "(", ")", "# Raise Exception if there are no gdf files to be read", "if", "len", "(", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "spike_output_pat...
Collect the gdf-files from network sim in folder `spike_output_path` into sqlite database, using the GDF-class. Parameters ---------- None Returns ------- None
[ "Collect", "the", "gdf", "-", "files", "from", "network", "sim", "in", "folder", "spike_output_path", "into", "sqlite", "database", "using", "the", "GDF", "-", "class", ".", "Parameters", "----------", "None", "Returns", "-------", "None" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/cachednetworks.py#L174-L213
INM-6/hybridLFPy
hybridLFPy/cachednetworks.py
CachedNetwork.get_xy
def get_xy(self, xlim, fraction=1.): """ Get pairs of node units and spike trains on specific time interval. Parameters ---------- xlim : list of floats Spike time interval, e.g., [0., 1000.]. fraction : float in [0, 1.] If less than one, sample a fraction of nodes in random order. Returns ------- x : dict In `x` key-value entries are population name and neuron spike times. y : dict Where in `y` key-value entries are population name and neuron gid number. """ x = {} y = {} for X, nodes in self.nodes.items(): x[X] = np.array([]) y[X] = np.array([]) if fraction != 1: nodes = np.random.permutation(nodes)[:int(nodes.size*fraction)] nodes.sort() spiketimes = self.dbs[X].select_neurons_interval(nodes, T=xlim) i = 0 for times in spiketimes: x[X] = np.r_[x[X], times] y[X] = np.r_[y[X], np.zeros(times.size) + nodes[i]] i += 1 return x, y
python
def get_xy(self, xlim, fraction=1.): """ Get pairs of node units and spike trains on specific time interval. Parameters ---------- xlim : list of floats Spike time interval, e.g., [0., 1000.]. fraction : float in [0, 1.] If less than one, sample a fraction of nodes in random order. Returns ------- x : dict In `x` key-value entries are population name and neuron spike times. y : dict Where in `y` key-value entries are population name and neuron gid number. """ x = {} y = {} for X, nodes in self.nodes.items(): x[X] = np.array([]) y[X] = np.array([]) if fraction != 1: nodes = np.random.permutation(nodes)[:int(nodes.size*fraction)] nodes.sort() spiketimes = self.dbs[X].select_neurons_interval(nodes, T=xlim) i = 0 for times in spiketimes: x[X] = np.r_[x[X], times] y[X] = np.r_[y[X], np.zeros(times.size) + nodes[i]] i += 1 return x, y
[ "def", "get_xy", "(", "self", ",", "xlim", ",", "fraction", "=", "1.", ")", ":", "x", "=", "{", "}", "y", "=", "{", "}", "for", "X", ",", "nodes", "in", "self", ".", "nodes", ".", "items", "(", ")", ":", "x", "[", "X", "]", "=", "np", "."...
Get pairs of node units and spike trains on specific time interval. Parameters ---------- xlim : list of floats Spike time interval, e.g., [0., 1000.]. fraction : float in [0, 1.] If less than one, sample a fraction of nodes in random order. Returns ------- x : dict In `x` key-value entries are population name and neuron spike times. y : dict Where in `y` key-value entries are population name and neuron gid number.
[ "Get", "pairs", "of", "node", "units", "and", "spike", "trains", "on", "specific", "time", "interval", ".", "Parameters", "----------", "xlim", ":", "list", "of", "floats", "Spike", "time", "interval", "e", ".", "g", ".", "[", "0", ".", "1000", ".", "]...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/cachednetworks.py#L216-L255
INM-6/hybridLFPy
hybridLFPy/cachednetworks.py
CachedNetwork.plot_raster
def plot_raster(self, ax, xlim, x, y, pop_names=False, markersize=20., alpha=1., legend=True, marker='o', rasterized=True): """ Plot network raster plot in subplot object. Parameters ---------- ax : `matplotlib.axes.AxesSubplot` object plot axes xlim : list List of floats. Spike time interval, e.g., [0., 1000.]. x : dict Key-value entries are population name and neuron spike times. y : dict Key-value entries are population name and neuron gid number. pop_names: bool If True, show population names on yaxis instead of gid number. markersize : float raster plot marker size alpha : float in [0, 1] transparency of marker legend : bool Switch on axes legends. marker : str marker symbol for matplotlib.pyplot.plot rasterized : bool if True, the scatter plot will be treated as a bitmap embedded in pdf file output Returns ------- None """ yoffset = [sum(self.N_X) if X=='TC' else 0 for X in self.X] for i, X in enumerate(self.X): if y[X].size > 0: ax.plot(x[X], y[X]+yoffset[i], marker, markersize=markersize, mfc=self.colors[i], mec='none' if marker in '.ov><v^1234sp*hHDd' else self.colors[i], alpha=alpha, label=X, rasterized=rasterized, clip_on=True) #don't draw anything for the may-be-quiet TC population N_X_sum = 0 for i, X in enumerate(self.X): if y[X].size > 0: N_X_sum += self.N_X[i] ax.axis([xlim[0], xlim[1], self.GIDs[self.X[0]][0], self.GIDs[self.X[0]][0]+N_X_sum]) ax.set_ylim(ax.get_ylim()[::-1]) ax.set_ylabel('cell id', labelpad=0) ax.set_xlabel('$t$ (ms)', labelpad=0) if legend: ax.legend() if pop_names: yticks = [] yticklabels = [] for i, X in enumerate(self.X): if y[X] != []: yticks.append(y[X].mean()+yoffset[i]) yticklabels.append(self.X[i]) ax.set_yticks(yticks) ax.set_yticklabels(yticklabels) # Add some horizontal lines separating the populations for i, X in enumerate(self.X): if y[X].size > 0: ax.plot([xlim[0], xlim[1]], [y[X].max()+yoffset[i], y[X].max()+yoffset[i]], 'k', lw=0.25)
python
def plot_raster(self, ax, xlim, x, y, pop_names=False, markersize=20., alpha=1., legend=True, marker='o', rasterized=True): """ Plot network raster plot in subplot object. Parameters ---------- ax : `matplotlib.axes.AxesSubplot` object plot axes xlim : list List of floats. Spike time interval, e.g., [0., 1000.]. x : dict Key-value entries are population name and neuron spike times. y : dict Key-value entries are population name and neuron gid number. pop_names: bool If True, show population names on yaxis instead of gid number. markersize : float raster plot marker size alpha : float in [0, 1] transparency of marker legend : bool Switch on axes legends. marker : str marker symbol for matplotlib.pyplot.plot rasterized : bool if True, the scatter plot will be treated as a bitmap embedded in pdf file output Returns ------- None """ yoffset = [sum(self.N_X) if X=='TC' else 0 for X in self.X] for i, X in enumerate(self.X): if y[X].size > 0: ax.plot(x[X], y[X]+yoffset[i], marker, markersize=markersize, mfc=self.colors[i], mec='none' if marker in '.ov><v^1234sp*hHDd' else self.colors[i], alpha=alpha, label=X, rasterized=rasterized, clip_on=True) #don't draw anything for the may-be-quiet TC population N_X_sum = 0 for i, X in enumerate(self.X): if y[X].size > 0: N_X_sum += self.N_X[i] ax.axis([xlim[0], xlim[1], self.GIDs[self.X[0]][0], self.GIDs[self.X[0]][0]+N_X_sum]) ax.set_ylim(ax.get_ylim()[::-1]) ax.set_ylabel('cell id', labelpad=0) ax.set_xlabel('$t$ (ms)', labelpad=0) if legend: ax.legend() if pop_names: yticks = [] yticklabels = [] for i, X in enumerate(self.X): if y[X] != []: yticks.append(y[X].mean()+yoffset[i]) yticklabels.append(self.X[i]) ax.set_yticks(yticks) ax.set_yticklabels(yticklabels) # Add some horizontal lines separating the populations for i, X in enumerate(self.X): if y[X].size > 0: ax.plot([xlim[0], xlim[1]], [y[X].max()+yoffset[i], y[X].max()+yoffset[i]], 'k', lw=0.25)
[ "def", "plot_raster", "(", "self", ",", "ax", ",", "xlim", ",", "x", ",", "y", ",", "pop_names", "=", "False", ",", "markersize", "=", "20.", ",", "alpha", "=", "1.", ",", "legend", "=", "True", ",", "marker", "=", "'o'", ",", "rasterized", "=", ...
Plot network raster plot in subplot object. Parameters ---------- ax : `matplotlib.axes.AxesSubplot` object plot axes xlim : list List of floats. Spike time interval, e.g., [0., 1000.]. x : dict Key-value entries are population name and neuron spike times. y : dict Key-value entries are population name and neuron gid number. pop_names: bool If True, show population names on yaxis instead of gid number. markersize : float raster plot marker size alpha : float in [0, 1] transparency of marker legend : bool Switch on axes legends. marker : str marker symbol for matplotlib.pyplot.plot rasterized : bool if True, the scatter plot will be treated as a bitmap embedded in pdf file output Returns ------- None
[ "Plot", "network", "raster", "plot", "in", "subplot", "object", ".", "Parameters", "----------", "ax", ":", "matplotlib", ".", "axes", ".", "AxesSubplot", "object", "plot", "axes", "xlim", ":", "list", "List", "of", "floats", ".", "Spike", "time", "interval"...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/cachednetworks.py#L258-L334
INM-6/hybridLFPy
hybridLFPy/cachednetworks.py
CachedNetwork.plot_f_rate
def plot_f_rate(self, ax, X, i, xlim, x, y, binsize=1, yscale='linear', plottype='fill_between', show_label=False, rasterized=False): """ Plot network firing rate plot in subplot object. Parameters ---------- ax : `matplotlib.axes.AxesSubplot` object. X : str Population name. i : int Population index in class attribute `X`. xlim : list of floats Spike time interval, e.g., [0., 1000.]. x : dict Key-value entries are population name and neuron spike times. y : dict Key-value entries are population name and neuron gid number. yscale : 'str' Linear, log, or symlog y-axes in rate plot. plottype : str plot type string in `['fill_between', 'bar']` show_label : bool whether or not to show labels Returns ------- None """ bins = np.arange(xlim[0], xlim[1]+binsize, binsize) (hist, bins) = np.histogram(x[X], bins=bins) if plottype == 'fill_between': ax.fill_between(bins[:-1], hist * 1000. / self.N_X[i], color=self.colors[i], lw=0.5, label=X, rasterized=rasterized, clip_on=False) ax.plot(bins[:-1], hist * 1000. / self.N_X[i], color='k', lw=0.5, label=X, rasterized=rasterized, clip_on=False) elif plottype == 'bar': ax.bar(bins[:-1], hist * 1000. / self.N_X[i], color=self.colors[i], label=X, rasterized=rasterized , linewidth=0.25, width=0.9, clip_on=False) else: mssg = "plottype={} not in ['fill_between', 'bar']".format(plottype) raise Exception(mssg) remove_axis_junk(ax) ax.axis(ax.axis('tight')) ax.set_yscale(yscale) ax.set_xlim(xlim[0], xlim[1]) if show_label: ax.text(xlim[0] + .05*(xlim[1]-xlim[0]), ax.axis()[3]*1.5, X, va='center', ha='left')
python
def plot_f_rate(self, ax, X, i, xlim, x, y, binsize=1, yscale='linear', plottype='fill_between', show_label=False, rasterized=False): """ Plot network firing rate plot in subplot object. Parameters ---------- ax : `matplotlib.axes.AxesSubplot` object. X : str Population name. i : int Population index in class attribute `X`. xlim : list of floats Spike time interval, e.g., [0., 1000.]. x : dict Key-value entries are population name and neuron spike times. y : dict Key-value entries are population name and neuron gid number. yscale : 'str' Linear, log, or symlog y-axes in rate plot. plottype : str plot type string in `['fill_between', 'bar']` show_label : bool whether or not to show labels Returns ------- None """ bins = np.arange(xlim[0], xlim[1]+binsize, binsize) (hist, bins) = np.histogram(x[X], bins=bins) if plottype == 'fill_between': ax.fill_between(bins[:-1], hist * 1000. / self.N_X[i], color=self.colors[i], lw=0.5, label=X, rasterized=rasterized, clip_on=False) ax.plot(bins[:-1], hist * 1000. / self.N_X[i], color='k', lw=0.5, label=X, rasterized=rasterized, clip_on=False) elif plottype == 'bar': ax.bar(bins[:-1], hist * 1000. / self.N_X[i], color=self.colors[i], label=X, rasterized=rasterized , linewidth=0.25, width=0.9, clip_on=False) else: mssg = "plottype={} not in ['fill_between', 'bar']".format(plottype) raise Exception(mssg) remove_axis_junk(ax) ax.axis(ax.axis('tight')) ax.set_yscale(yscale) ax.set_xlim(xlim[0], xlim[1]) if show_label: ax.text(xlim[0] + .05*(xlim[1]-xlim[0]), ax.axis()[3]*1.5, X, va='center', ha='left')
[ "def", "plot_f_rate", "(", "self", ",", "ax", ",", "X", ",", "i", ",", "xlim", ",", "x", ",", "y", ",", "binsize", "=", "1", ",", "yscale", "=", "'linear'", ",", "plottype", "=", "'fill_between'", ",", "show_label", "=", "False", ",", "rasterized", ...
Plot network firing rate plot in subplot object. Parameters ---------- ax : `matplotlib.axes.AxesSubplot` object. X : str Population name. i : int Population index in class attribute `X`. xlim : list of floats Spike time interval, e.g., [0., 1000.]. x : dict Key-value entries are population name and neuron spike times. y : dict Key-value entries are population name and neuron gid number. yscale : 'str' Linear, log, or symlog y-axes in rate plot. plottype : str plot type string in `['fill_between', 'bar']` show_label : bool whether or not to show labels Returns ------- None
[ "Plot", "network", "firing", "rate", "plot", "in", "subplot", "object", ".", "Parameters", "----------", "ax", ":", "matplotlib", ".", "axes", ".", "AxesSubplot", "object", ".", "X", ":", "str", "Population", "name", ".", "i", ":", "int", "Population", "in...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/cachednetworks.py#L337-L397
INM-6/hybridLFPy
hybridLFPy/cachednetworks.py
CachedNetwork.raster_plots
def raster_plots(self, xlim=[0, 1000], markersize=1, alpha=1., marker='o'): """ Pretty plot of the spiking output of each population as raster and rate. Parameters ---------- xlim : list List of floats. Spike time interval, e.g., `[0., 1000.]`. markersize : float marker size for plot, see `matplotlib.pyplot.plot` alpha : float transparency for markers, see `matplotlib.pyplot.plot` marker : :mod:`A valid marker style <matplotlib.markers>` Returns ------- fig : `matplotlib.figure.Figure` object """ x, y = self.get_xy(xlim) fig = plt.figure() fig.subplots_adjust(left=0.12, hspace=0.15) ax0 = fig.add_subplot(211) self.plot_raster(ax0, xlim, x, y, markersize=markersize, alpha=alpha, marker=marker) remove_axis_junk(ax0) ax0.set_title('spike raster') ax0.set_xlabel("") nrows = len(self.X) bottom = np.linspace(0.1, 0.45, nrows+1)[::-1][1:] thickn = np.abs(np.diff(bottom))[0]*0.9 for i, layer in enumerate(self.X): ax1 = fig.add_axes([0.12, bottom[i], 0.78, thickn]) self.plot_f_rate(ax1, layer, i, xlim, x, y, ) if i == nrows-1: ax1.set_xlabel('time (ms)') else: ax1.set_xticklabels([]) if i == 4: ax1.set_ylabel(r'population rates ($s^{-1}$)') if i == 0: ax1.set_title(r'population firing rates ($s^{-1}$)') return fig
python
def raster_plots(self, xlim=[0, 1000], markersize=1, alpha=1., marker='o'): """ Pretty plot of the spiking output of each population as raster and rate. Parameters ---------- xlim : list List of floats. Spike time interval, e.g., `[0., 1000.]`. markersize : float marker size for plot, see `matplotlib.pyplot.plot` alpha : float transparency for markers, see `matplotlib.pyplot.plot` marker : :mod:`A valid marker style <matplotlib.markers>` Returns ------- fig : `matplotlib.figure.Figure` object """ x, y = self.get_xy(xlim) fig = plt.figure() fig.subplots_adjust(left=0.12, hspace=0.15) ax0 = fig.add_subplot(211) self.plot_raster(ax0, xlim, x, y, markersize=markersize, alpha=alpha, marker=marker) remove_axis_junk(ax0) ax0.set_title('spike raster') ax0.set_xlabel("") nrows = len(self.X) bottom = np.linspace(0.1, 0.45, nrows+1)[::-1][1:] thickn = np.abs(np.diff(bottom))[0]*0.9 for i, layer in enumerate(self.X): ax1 = fig.add_axes([0.12, bottom[i], 0.78, thickn]) self.plot_f_rate(ax1, layer, i, xlim, x, y, ) if i == nrows-1: ax1.set_xlabel('time (ms)') else: ax1.set_xticklabels([]) if i == 4: ax1.set_ylabel(r'population rates ($s^{-1}$)') if i == 0: ax1.set_title(r'population firing rates ($s^{-1}$)') return fig
[ "def", "raster_plots", "(", "self", ",", "xlim", "=", "[", "0", ",", "1000", "]", ",", "markersize", "=", "1", ",", "alpha", "=", "1.", ",", "marker", "=", "'o'", ")", ":", "x", ",", "y", "=", "self", ".", "get_xy", "(", "xlim", ")", "fig", "...
Pretty plot of the spiking output of each population as raster and rate. Parameters ---------- xlim : list List of floats. Spike time interval, e.g., `[0., 1000.]`. markersize : float marker size for plot, see `matplotlib.pyplot.plot` alpha : float transparency for markers, see `matplotlib.pyplot.plot` marker : :mod:`A valid marker style <matplotlib.markers>` Returns ------- fig : `matplotlib.figure.Figure` object
[ "Pretty", "plot", "of", "the", "spiking", "output", "of", "each", "population", "as", "raster", "and", "rate", ".", "Parameters", "----------", "xlim", ":", "list", "List", "of", "floats", ".", "Spike", "time", "interval", "e", ".", "g", ".", "[", "0", ...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/cachednetworks.py#L400-L455
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/plotting_helpers.py
add_sst_to_dot_display
def add_sst_to_dot_display(ax, sst, color= '0.',alpha= 1.): ''' suitable for plotting fraction of neurons ''' plt.sca(ax) N = len(sst) current_ymax = 0 counter = 0 while True: if len(ax.get_lines()) !=0: data = ax.get_lines()[-1-counter].get_data()[1] if np.sum(data) != 0: # if not empty array current_ymax = np.max(data) break counter +=1 else: break for i in np.arange(N): plt.plot(sst[i],np.ones_like(sst[i])+i+current_ymax -1, 'k o',ms=0.5, mfc=color,mec=color, alpha=alpha) plt.xlabel(r'time (ms)') plt.ylabel(r'neuron id') return ax
python
def add_sst_to_dot_display(ax, sst, color= '0.',alpha= 1.): ''' suitable for plotting fraction of neurons ''' plt.sca(ax) N = len(sst) current_ymax = 0 counter = 0 while True: if len(ax.get_lines()) !=0: data = ax.get_lines()[-1-counter].get_data()[1] if np.sum(data) != 0: # if not empty array current_ymax = np.max(data) break counter +=1 else: break for i in np.arange(N): plt.plot(sst[i],np.ones_like(sst[i])+i+current_ymax -1, 'k o',ms=0.5, mfc=color,mec=color, alpha=alpha) plt.xlabel(r'time (ms)') plt.ylabel(r'neuron id') return ax
[ "def", "add_sst_to_dot_display", "(", "ax", ",", "sst", ",", "color", "=", "'0.'", ",", "alpha", "=", "1.", ")", ":", "plt", ".", "sca", "(", "ax", ")", "N", "=", "len", "(", "sst", ")", "current_ymax", "=", "0", "counter", "=", "0", "while", "Tr...
suitable for plotting fraction of neurons
[ "suitable", "for", "plotting", "fraction", "of", "neurons" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/plotting_helpers.py#L18-L39
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/plotting_helpers.py
empty_bar_plot
def empty_bar_plot(ax): ''' Delete all axis ticks and labels ''' plt.sca(ax) plt.setp(plt.gca(),xticks=[],xticklabels=[]) return ax
python
def empty_bar_plot(ax): ''' Delete all axis ticks and labels ''' plt.sca(ax) plt.setp(plt.gca(),xticks=[],xticklabels=[]) return ax
[ "def", "empty_bar_plot", "(", "ax", ")", ":", "plt", ".", "sca", "(", "ax", ")", "plt", ".", "setp", "(", "plt", ".", "gca", "(", ")", ",", "xticks", "=", "[", "]", ",", "xticklabels", "=", "[", "]", ")", "return", "ax" ]
Delete all axis ticks and labels
[ "Delete", "all", "axis", "ticks", "and", "labels" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/plotting_helpers.py#L42-L46
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/plotting_helpers.py
add_to_bar_plot
def add_to_bar_plot(ax, x, number, name = '', color = '0.'): ''' This function takes an axes and adds one bar to it ''' plt.sca(ax) plt.setp(ax,xticks=np.append(ax.get_xticks(),np.array([x]))\ ,xticklabels=[item.get_text() for item in ax.get_xticklabels()] +[name]) plt.bar([x],number , color = color, width = 1.) return ax
python
def add_to_bar_plot(ax, x, number, name = '', color = '0.'): ''' This function takes an axes and adds one bar to it ''' plt.sca(ax) plt.setp(ax,xticks=np.append(ax.get_xticks(),np.array([x]))\ ,xticklabels=[item.get_text() for item in ax.get_xticklabels()] +[name]) plt.bar([x],number , color = color, width = 1.) return ax
[ "def", "add_to_bar_plot", "(", "ax", ",", "x", ",", "number", ",", "name", "=", "''", ",", "color", "=", "'0.'", ")", ":", "plt", ".", "sca", "(", "ax", ")", "plt", ".", "setp", "(", "ax", ",", "xticks", "=", "np", ".", "append", "(", "ax", "...
This function takes an axes and adds one bar to it
[ "This", "function", "takes", "an", "axes", "and", "adds", "one", "bar", "to", "it" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/plotting_helpers.py#L49-L55
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/plotting_helpers.py
add_to_line_plot
def add_to_line_plot(ax, x, y, color = '0.' , label = ''): ''' This function takes an axes and adds one line to it ''' plt.sca(ax) plt.plot(x,y, color = color, label = label) return ax
python
def add_to_line_plot(ax, x, y, color = '0.' , label = ''): ''' This function takes an axes and adds one line to it ''' plt.sca(ax) plt.plot(x,y, color = color, label = label) return ax
[ "def", "add_to_line_plot", "(", "ax", ",", "x", ",", "y", ",", "color", "=", "'0.'", ",", "label", "=", "''", ")", ":", "plt", ".", "sca", "(", "ax", ")", "plt", ".", "plot", "(", "x", ",", "y", ",", "color", "=", "color", ",", "label", "=", ...
This function takes an axes and adds one line to it
[ "This", "function", "takes", "an", "axes", "and", "adds", "one", "line", "to", "it" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/plotting_helpers.py#L58-L62
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/plotting_helpers.py
colorbar
def colorbar(fig, ax, im, width=0.05, height=1.0, hoffset=0.01, voffset=0.0, orientation='vertical'): ''' draw colorbar without resizing the axes object to make room kwargs: :: fig : matplotlib.figure.Figure ax : matplotlib.axes.AxesSubplot im : matplotlib.image.AxesImage width : float, colorbar width in fraction of ax width height : float, colorbar height in fraction of ax height hoffset : float, horizontal spacing to main axes in fraction of width voffset : float, vertical spacing to main axis in fraction of height orientation : str, 'horizontal' or 'vertical' return: :: object : colorbar handle ''' rect = np.array(ax.get_position().bounds) rect = np.array(ax.get_position().bounds) caxrect = [0]*4 caxrect[0] = rect[0] + rect[2] + hoffset*rect[2] caxrect[1] = rect[1] + voffset*rect[3] caxrect[2] = rect[2]*width caxrect[3] = rect[3]*height cax = fig.add_axes(caxrect) cb = fig.colorbar(im, cax=cax, orientation=orientation) return cb
python
def colorbar(fig, ax, im, width=0.05, height=1.0, hoffset=0.01, voffset=0.0, orientation='vertical'): ''' draw colorbar without resizing the axes object to make room kwargs: :: fig : matplotlib.figure.Figure ax : matplotlib.axes.AxesSubplot im : matplotlib.image.AxesImage width : float, colorbar width in fraction of ax width height : float, colorbar height in fraction of ax height hoffset : float, horizontal spacing to main axes in fraction of width voffset : float, vertical spacing to main axis in fraction of height orientation : str, 'horizontal' or 'vertical' return: :: object : colorbar handle ''' rect = np.array(ax.get_position().bounds) rect = np.array(ax.get_position().bounds) caxrect = [0]*4 caxrect[0] = rect[0] + rect[2] + hoffset*rect[2] caxrect[1] = rect[1] + voffset*rect[3] caxrect[2] = rect[2]*width caxrect[3] = rect[3]*height cax = fig.add_axes(caxrect) cb = fig.colorbar(im, cax=cax, orientation=orientation) return cb
[ "def", "colorbar", "(", "fig", ",", "ax", ",", "im", ",", "width", "=", "0.05", ",", "height", "=", "1.0", ",", "hoffset", "=", "0.01", ",", "voffset", "=", "0.0", ",", "orientation", "=", "'vertical'", ")", ":", "rect", "=", "np", ".", "array", ...
draw colorbar without resizing the axes object to make room kwargs: :: fig : matplotlib.figure.Figure ax : matplotlib.axes.AxesSubplot im : matplotlib.image.AxesImage width : float, colorbar width in fraction of ax width height : float, colorbar height in fraction of ax height hoffset : float, horizontal spacing to main axes in fraction of width voffset : float, vertical spacing to main axis in fraction of height orientation : str, 'horizontal' or 'vertical' return: :: object : colorbar handle
[ "draw", "colorbar", "without", "resizing", "the", "axes", "object", "to", "make", "room" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/plotting_helpers.py#L65-L102
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/plotting_helpers.py
frontiers_style
def frontiers_style(): ''' Figure styles for frontiers ''' inchpercm = 2.54 frontierswidth=8.5 textsize = 5 titlesize = 7 plt.rcdefaults() plt.rcParams.update({ 'figure.figsize' : [frontierswidth/inchpercm, frontierswidth/inchpercm], 'figure.dpi' : 160, 'xtick.labelsize' : textsize, 'ytick.labelsize' : textsize, 'font.size' : textsize, 'axes.labelsize' : textsize, 'axes.titlesize' : titlesize, 'axes.linewidth': 0.75, 'lines.linewidth': 0.75, 'legend.fontsize' : textsize, }) return None
python
def frontiers_style(): ''' Figure styles for frontiers ''' inchpercm = 2.54 frontierswidth=8.5 textsize = 5 titlesize = 7 plt.rcdefaults() plt.rcParams.update({ 'figure.figsize' : [frontierswidth/inchpercm, frontierswidth/inchpercm], 'figure.dpi' : 160, 'xtick.labelsize' : textsize, 'ytick.labelsize' : textsize, 'font.size' : textsize, 'axes.labelsize' : textsize, 'axes.titlesize' : titlesize, 'axes.linewidth': 0.75, 'lines.linewidth': 0.75, 'legend.fontsize' : textsize, }) return None
[ "def", "frontiers_style", "(", ")", ":", "inchpercm", "=", "2.54", "frontierswidth", "=", "8.5", "textsize", "=", "5", "titlesize", "=", "7", "plt", ".", "rcdefaults", "(", ")", "plt", ".", "rcParams", ".", "update", "(", "{", "'figure.figsize'", ":", "[...
Figure styles for frontiers
[ "Figure", "styles", "for", "frontiers" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/plotting_helpers.py#L110-L132
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/plotting_helpers.py
annotate_subplot
def annotate_subplot(ax, ncols=1, nrows=1, letter='a', linear_offset=0.075, fontsize=8): '''add a subplot annotation number''' ax.text(-ncols*linear_offset, 1+nrows*linear_offset, letter, horizontalalignment='center', verticalalignment='center', fontsize=fontsize, fontweight='demibold', transform=ax.transAxes)
python
def annotate_subplot(ax, ncols=1, nrows=1, letter='a', linear_offset=0.075, fontsize=8): '''add a subplot annotation number''' ax.text(-ncols*linear_offset, 1+nrows*linear_offset, letter, horizontalalignment='center', verticalalignment='center', fontsize=fontsize, fontweight='demibold', transform=ax.transAxes)
[ "def", "annotate_subplot", "(", "ax", ",", "ncols", "=", "1", ",", "nrows", "=", "1", ",", "letter", "=", "'a'", ",", "linear_offset", "=", "0.075", ",", "fontsize", "=", "8", ")", ":", "ax", ".", "text", "(", "-", "ncols", "*", "linear_offset", ",...
add a subplot annotation number
[ "add", "a", "subplot", "annotation", "number" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/plotting_helpers.py#L176-L183
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/plotting_helpers.py
get_colors
def get_colors(num=16, cmap=plt.cm.Set1): '''return a list of color tuples to use in plots''' colors = [] for i in xrange(num): if analysis_params.bw: colors.append('k' if i % 2 == 0 else 'gray') else: i *= 256. if num > 1: i /= num - 1. else: i /= num colors.append(cmap(int(i))) return colors
python
def get_colors(num=16, cmap=plt.cm.Set1): '''return a list of color tuples to use in plots''' colors = [] for i in xrange(num): if analysis_params.bw: colors.append('k' if i % 2 == 0 else 'gray') else: i *= 256. if num > 1: i /= num - 1. else: i /= num colors.append(cmap(int(i))) return colors
[ "def", "get_colors", "(", "num", "=", "16", ",", "cmap", "=", "plt", ".", "cm", ".", "Set1", ")", ":", "colors", "=", "[", "]", "for", "i", "in", "xrange", "(", "num", ")", ":", "if", "analysis_params", ".", "bw", ":", "colors", ".", "append", ...
return a list of color tuples to use in plots
[ "return", "a", "list", "of", "color", "tuples", "to", "use", "in", "plots" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/plotting_helpers.py#L186-L199
ianclegg/winrmlib
winrmlib/api/compression.py
Compressor.compress
def compress(self, data, windowLength = None): """Compresses text data using the LZ77 algorithm.""" if windowLength == None: windowLength = self.defaultWindowLength compressed = "" pos = 0 lastPos = len(data) - self.minStringLength while pos < lastPos: searchStart = max(pos - windowLength, 0); matchLength = self.minStringLength foundMatch = False bestMatchDistance = self.maxStringDistance bestMatchLength = 0 newCompressed = None while (searchStart + matchLength) < pos: m1 = data[searchStart : searchStart + matchLength] m2 = data[pos : pos + matchLength] isValidMatch = (m1 == m2 and matchLength < self.maxStringLength) if isValidMatch: matchLength += 1 foundMatch = True else: realMatchLength = matchLength - 1 if foundMatch and realMatchLength > bestMatchLength: bestMatchDistance = pos - searchStart - realMatchLength bestMatchLength = realMatchLength matchLength = self.minStringLength searchStart += 1 foundMatch = False if bestMatchLength: newCompressed = (self.referencePrefix + self.__encodeReferenceInt(bestMatchDistance, 2) + self.__encodeReferenceLength(bestMatchLength)) pos += bestMatchLength else: if data[pos] != self.referencePrefix: newCompressed = data[pos] else: newCompressed = self.referencePrefix + self.referencePrefix pos += 1 compressed += newCompressed return compressed + data[pos:].replace("`", "``")
python
def compress(self, data, windowLength = None): """Compresses text data using the LZ77 algorithm.""" if windowLength == None: windowLength = self.defaultWindowLength compressed = "" pos = 0 lastPos = len(data) - self.minStringLength while pos < lastPos: searchStart = max(pos - windowLength, 0); matchLength = self.minStringLength foundMatch = False bestMatchDistance = self.maxStringDistance bestMatchLength = 0 newCompressed = None while (searchStart + matchLength) < pos: m1 = data[searchStart : searchStart + matchLength] m2 = data[pos : pos + matchLength] isValidMatch = (m1 == m2 and matchLength < self.maxStringLength) if isValidMatch: matchLength += 1 foundMatch = True else: realMatchLength = matchLength - 1 if foundMatch and realMatchLength > bestMatchLength: bestMatchDistance = pos - searchStart - realMatchLength bestMatchLength = realMatchLength matchLength = self.minStringLength searchStart += 1 foundMatch = False if bestMatchLength: newCompressed = (self.referencePrefix + self.__encodeReferenceInt(bestMatchDistance, 2) + self.__encodeReferenceLength(bestMatchLength)) pos += bestMatchLength else: if data[pos] != self.referencePrefix: newCompressed = data[pos] else: newCompressed = self.referencePrefix + self.referencePrefix pos += 1 compressed += newCompressed return compressed + data[pos:].replace("`", "``")
[ "def", "compress", "(", "self", ",", "data", ",", "windowLength", "=", "None", ")", ":", "if", "windowLength", "==", "None", ":", "windowLength", "=", "self", ".", "defaultWindowLength", "compressed", "=", "\"\"", "pos", "=", "0", "lastPos", "=", "len", ...
Compresses text data using the LZ77 algorithm.
[ "Compresses", "text", "data", "using", "the", "LZ77", "algorithm", "." ]
train
https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/compression.py#L131-L182
ianclegg/winrmlib
winrmlib/api/compression.py
Compressor.decompress
def decompress(self, data): """Decompresses LZ77 compressed text data""" decompressed = "" pos = 0 while pos < len(data): currentChar = data[pos] if currentChar != self.referencePrefix: decompressed += currentChar pos += 1 else: nextChar = data[pos + 1] if nextChar != self.referencePrefix: distance = self.__decodeReferenceInt(data[pos + 1 : pos + 3], 2) length = self.__decodeReferenceLength(data[pos + 3]) start = len(decompressed) - distance - length end = start + length decompressed += decompressed[start : end] pos += self.minStringLength - 1 else: decompressed += self.referencePrefix pos += 2 return decompressed
python
def decompress(self, data): """Decompresses LZ77 compressed text data""" decompressed = "" pos = 0 while pos < len(data): currentChar = data[pos] if currentChar != self.referencePrefix: decompressed += currentChar pos += 1 else: nextChar = data[pos + 1] if nextChar != self.referencePrefix: distance = self.__decodeReferenceInt(data[pos + 1 : pos + 3], 2) length = self.__decodeReferenceLength(data[pos + 3]) start = len(decompressed) - distance - length end = start + length decompressed += decompressed[start : end] pos += self.minStringLength - 1 else: decompressed += self.referencePrefix pos += 2 return decompressed
[ "def", "decompress", "(", "self", ",", "data", ")", ":", "decompressed", "=", "\"\"", "pos", "=", "0", "while", "pos", "<", "len", "(", "data", ")", ":", "currentChar", "=", "data", "[", "pos", "]", "if", "currentChar", "!=", "self", ".", "referenceP...
Decompresses LZ77 compressed text data
[ "Decompresses", "LZ77", "compressed", "text", "data" ]
train
https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/compression.py#L184-L207
squdle/baseconvert
baseconvert/__main__.py
main
def main(): """ Main entry point for running baseconvert as a command. Examples: $ python -m baseconvert -n 0.5 -i 10 -o 20 -s True 0.A $ echo 3.1415926 | python -m baseconvert -i 10 -o 16 -d 3 -s True 3.243 """ # Parse arguments parser = argparse.ArgumentParser(description="Convert rational numbers between bases.") parser.add_argument("-n", "--number", default=None, help="The number to convert as a string, else stdin used.") parser.add_argument("-i", "--input-base", default=10, help="The input base (default 10).") parser.add_argument("-o", "--output-base", default=10, help="The output base (default 10).") parser.add_argument("-d", "--max_depth", default=10, type=int, help="The maximum fractional digits (default 10).") parser.add_argument("-r", "--recurring", default=True, type=bool, help="Boolean, if True will attempt to find recurring decimals (default True).") parser.add_argument("-s", "--string", type=bool, help="Boolean, if True will output number as String, else as tuple (default False).") args = parser.parse_args() args.input_base = float(args.input_base) args.output_base = float(args.output_base) if args.input_base == int(args.input_base): args.input_base = int(args.input_base) if args.output_base == int(args.output_base): args.output_base = int(args.output_base) if (args.number): return base(args.number, args.input_base, args.output_base, string=args.string, max_depth=args.max_depth, recurring=args.recurring) elif not sys.stdin.isatty(): return base(sys.stdin.read().strip(), args.input_base, args.output_base, string=args.string, max_depth=args.max_depth, recurring=args.recurring) else: raise ValueError("Please input a number!")
python
def main(): """ Main entry point for running baseconvert as a command. Examples: $ python -m baseconvert -n 0.5 -i 10 -o 20 -s True 0.A $ echo 3.1415926 | python -m baseconvert -i 10 -o 16 -d 3 -s True 3.243 """ # Parse arguments parser = argparse.ArgumentParser(description="Convert rational numbers between bases.") parser.add_argument("-n", "--number", default=None, help="The number to convert as a string, else stdin used.") parser.add_argument("-i", "--input-base", default=10, help="The input base (default 10).") parser.add_argument("-o", "--output-base", default=10, help="The output base (default 10).") parser.add_argument("-d", "--max_depth", default=10, type=int, help="The maximum fractional digits (default 10).") parser.add_argument("-r", "--recurring", default=True, type=bool, help="Boolean, if True will attempt to find recurring decimals (default True).") parser.add_argument("-s", "--string", type=bool, help="Boolean, if True will output number as String, else as tuple (default False).") args = parser.parse_args() args.input_base = float(args.input_base) args.output_base = float(args.output_base) if args.input_base == int(args.input_base): args.input_base = int(args.input_base) if args.output_base == int(args.output_base): args.output_base = int(args.output_base) if (args.number): return base(args.number, args.input_base, args.output_base, string=args.string, max_depth=args.max_depth, recurring=args.recurring) elif not sys.stdin.isatty(): return base(sys.stdin.read().strip(), args.input_base, args.output_base, string=args.string, max_depth=args.max_depth, recurring=args.recurring) else: raise ValueError("Please input a number!")
[ "def", "main", "(", ")", ":", "# Parse arguments", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Convert rational numbers between bases.\"", ")", "parser", ".", "add_argument", "(", "\"-n\"", ",", "\"--number\"", ",", "default", "=", ...
Main entry point for running baseconvert as a command. Examples: $ python -m baseconvert -n 0.5 -i 10 -o 20 -s True 0.A $ echo 3.1415926 | python -m baseconvert -i 10 -o 16 -d 3 -s True 3.243
[ "Main", "entry", "point", "for", "running", "baseconvert", "as", "a", "command", "." ]
train
https://github.com/squdle/baseconvert/blob/26c9a2c07c2ffcde7d078fb812419ca6d388900b/baseconvert/__main__.py#L8-L47
mathiasertl/xmpp-backends
xmpp_backends/ejabberdctl.py
EjabberdctlBackend.message_user
def message_user(self, username, domain, subject, message): """Currently use send_message_chat and discard subject, because headline messages are not stored by mod_offline.""" jid = '%s@%s' % (username, domain) if self.api_version <= (14, 7): # TODO: it's unclear when send_message was introduced command = 'send_message_chat' args = domain, '%s@%s' % (username, domain), message else: command = 'send_message' args = 'chat', domain, jid, subject, message code, out, err = self.ctl(command, *args) if code != 0: raise BackendError(code)
python
def message_user(self, username, domain, subject, message): """Currently use send_message_chat and discard subject, because headline messages are not stored by mod_offline.""" jid = '%s@%s' % (username, domain) if self.api_version <= (14, 7): # TODO: it's unclear when send_message was introduced command = 'send_message_chat' args = domain, '%s@%s' % (username, domain), message else: command = 'send_message' args = 'chat', domain, jid, subject, message code, out, err = self.ctl(command, *args) if code != 0: raise BackendError(code)
[ "def", "message_user", "(", "self", ",", "username", ",", "domain", ",", "subject", ",", "message", ")", ":", "jid", "=", "'%s@%s'", "%", "(", "username", ",", "domain", ")", "if", "self", ".", "api_version", "<=", "(", "14", ",", "7", ")", ":", "#...
Currently use send_message_chat and discard subject, because headline messages are not stored by mod_offline.
[ "Currently", "use", "send_message_chat", "and", "discard", "subject", "because", "headline", "messages", "are", "not", "stored", "by", "mod_offline", "." ]
train
https://github.com/mathiasertl/xmpp-backends/blob/214ef0664dbf90fa300c2483b9b3416559e5d171/xmpp_backends/ejabberdctl.py#L236-L250
avalente/appmetrics
appmetrics/simple_metrics.py
Counter.notify
def notify(self, value): """ Increment or decrement the value, according to the given value's sign The value should be an integer, an attempt to cast it to integer will be made """ value = int(value) with self.lock: self.value += value
python
def notify(self, value): """ Increment or decrement the value, according to the given value's sign The value should be an integer, an attempt to cast it to integer will be made """ value = int(value) with self.lock: self.value += value
[ "def", "notify", "(", "self", ",", "value", ")", ":", "value", "=", "int", "(", "value", ")", "with", "self", ".", "lock", ":", "self", ".", "value", "+=", "value" ]
Increment or decrement the value, according to the given value's sign The value should be an integer, an attempt to cast it to integer will be made
[ "Increment", "or", "decrement", "the", "value", "according", "to", "the", "given", "value", "s", "sign" ]
train
https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/simple_metrics.py#L34-L43
tkf/rash
rash/config.py
ConfigStore.get_config
def get_config(self): """ Load user configuration or return default when not found. :rtype: :class:`Configuration` """ if not self._config: namespace = {} if os.path.exists(self.config_path): execfile(self.config_path, namespace) self._config = namespace.get('config') or Configuration() return self._config
python
def get_config(self): """ Load user configuration or return default when not found. :rtype: :class:`Configuration` """ if not self._config: namespace = {} if os.path.exists(self.config_path): execfile(self.config_path, namespace) self._config = namespace.get('config') or Configuration() return self._config
[ "def", "get_config", "(", "self", ")", ":", "if", "not", "self", ".", "_config", ":", "namespace", "=", "{", "}", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "config_path", ")", ":", "execfile", "(", "self", ".", "config_path", ",", "...
Load user configuration or return default when not found. :rtype: :class:`Configuration`
[ "Load", "user", "configuration", "or", "return", "default", "when", "not", "found", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/config.py#L108-L120
pebble/libpebble2
libpebble2/services/putbytes.py
PutBytes.send
def send(self): """ Sends the object to the watch. Block until completion, or raises :exc:`.PutBytesError` on failure. During transmission, a "progress" event will be periodically emitted with the following signature: :: (sent_this_interval, sent_so_far, total_object_size) """ # Prepare the watch to receive something. cookie = self._prepare() # Send it. self._send_object(cookie) # Commit it. self._commit(cookie) # Install it. self._install(cookie)
python
def send(self): """ Sends the object to the watch. Block until completion, or raises :exc:`.PutBytesError` on failure. During transmission, a "progress" event will be periodically emitted with the following signature: :: (sent_this_interval, sent_so_far, total_object_size) """ # Prepare the watch to receive something. cookie = self._prepare() # Send it. self._send_object(cookie) # Commit it. self._commit(cookie) # Install it. self._install(cookie)
[ "def", "send", "(", "self", ")", ":", "# Prepare the watch to receive something.", "cookie", "=", "self", ".", "_prepare", "(", ")", "# Send it.", "self", ".", "_send_object", "(", "cookie", ")", "# Commit it.", "self", ".", "_commit", "(", "cookie", ")", "# I...
Sends the object to the watch. Block until completion, or raises :exc:`.PutBytesError` on failure. During transmission, a "progress" event will be periodically emitted with the following signature: :: (sent_this_interval, sent_so_far, total_object_size)
[ "Sends", "the", "object", "to", "the", "watch", ".", "Block", "until", "completion", "or", "raises", ":", "exc", ":", ".", "PutBytesError", "on", "failure", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/services/putbytes.py#L53-L71
ianclegg/winrmlib
winrmlib/shell.py
CommandShell.open
def open(self, input_streams=['stdin'], output_streams=['stderr', 'stdout']): """ Opens the remote shell """ shell = dict() shell['rsp:InputStreams'] = " ".join(input_streams) shell['rsp:OutputStreams'] = " ".join(output_streams) shell['rsp:IdleTimeout'] = str(self.idle_timeout) if self.working_directory is not None: shell['rsp:WorkingDirectory'] = str(self.working_directory) if self.environment is not None: variables = [] for key, value in self.environment.items(): variables.append({'#text': str(value), '@Name': key}) shell['rsp:Environment'] = {'Variable': variables} response = self.session.create(self.resource, {'rsp:Shell': shell}) self.__shell_id = response['rsp:Shell']['rsp:ShellId']
python
def open(self, input_streams=['stdin'], output_streams=['stderr', 'stdout']): """ Opens the remote shell """ shell = dict() shell['rsp:InputStreams'] = " ".join(input_streams) shell['rsp:OutputStreams'] = " ".join(output_streams) shell['rsp:IdleTimeout'] = str(self.idle_timeout) if self.working_directory is not None: shell['rsp:WorkingDirectory'] = str(self.working_directory) if self.environment is not None: variables = [] for key, value in self.environment.items(): variables.append({'#text': str(value), '@Name': key}) shell['rsp:Environment'] = {'Variable': variables} response = self.session.create(self.resource, {'rsp:Shell': shell}) self.__shell_id = response['rsp:Shell']['rsp:ShellId']
[ "def", "open", "(", "self", ",", "input_streams", "=", "[", "'stdin'", "]", ",", "output_streams", "=", "[", "'stderr'", ",", "'stdout'", "]", ")", ":", "shell", "=", "dict", "(", ")", "shell", "[", "'rsp:InputStreams'", "]", "=", "\" \"", ".", "join",...
Opens the remote shell
[ "Opens", "the", "remote", "shell" ]
train
https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/shell.py#L56-L75
ianclegg/winrmlib
winrmlib/shell.py
CommandShell.run
def run(self, command, arguments=(), console_mode_stdin=True, skip_cmd_shell=False): """This function does something. :param command: The command to be executed :type name: str. :param arguments: A list of arguments to be passed to the command :type state: str. :returns: int -- the return code. :raises: AttributeError, KeyError iclegg: blocking i/o operations are slow, doesnt Python have a moden 'async' mechanism rather than replying on 80's style callbacks? """ logging.info('running command: ' + command) resource = ResourceLocator(CommandShell.ShellResource) resource.add_selector('ShellId', self.__shell_id) resource.add_option('WINRS_SKIP_CMD_SHELL', ['FALSE', 'TRUE'][bool(skip_cmd_shell)], True) resource.add_option('WINRS_CONSOLEMODE_STDIN', ['FALSE', 'TRUE'][bool(console_mode_stdin)], True) command = OrderedDict([('rsp:Command', command)]) command['rsp:Arguments'] = list(arguments) response = self.session.command(resource, {'rsp:CommandLine': command}) command_id = response['rsp:CommandResponse']['rsp:CommandId'] logging.info('receive command: ' + command_id) return command_id
python
def run(self, command, arguments=(), console_mode_stdin=True, skip_cmd_shell=False): """This function does something. :param command: The command to be executed :type name: str. :param arguments: A list of arguments to be passed to the command :type state: str. :returns: int -- the return code. :raises: AttributeError, KeyError iclegg: blocking i/o operations are slow, doesnt Python have a moden 'async' mechanism rather than replying on 80's style callbacks? """ logging.info('running command: ' + command) resource = ResourceLocator(CommandShell.ShellResource) resource.add_selector('ShellId', self.__shell_id) resource.add_option('WINRS_SKIP_CMD_SHELL', ['FALSE', 'TRUE'][bool(skip_cmd_shell)], True) resource.add_option('WINRS_CONSOLEMODE_STDIN', ['FALSE', 'TRUE'][bool(console_mode_stdin)], True) command = OrderedDict([('rsp:Command', command)]) command['rsp:Arguments'] = list(arguments) response = self.session.command(resource, {'rsp:CommandLine': command}) command_id = response['rsp:CommandResponse']['rsp:CommandId'] logging.info('receive command: ' + command_id) return command_id
[ "def", "run", "(", "self", ",", "command", ",", "arguments", "=", "(", ")", ",", "console_mode_stdin", "=", "True", ",", "skip_cmd_shell", "=", "False", ")", ":", "logging", ".", "info", "(", "'running command: '", "+", "command", ")", "resource", "=", "...
This function does something. :param command: The command to be executed :type name: str. :param arguments: A list of arguments to be passed to the command :type state: str. :returns: int -- the return code. :raises: AttributeError, KeyError iclegg: blocking i/o operations are slow, doesnt Python have a moden 'async' mechanism rather than replying on 80's style callbacks?
[ "This", "function", "does", "something", ".", ":", "param", "command", ":", "The", "command", "to", "be", "executed", ":", "type", "name", ":", "str", ".", ":", "param", "arguments", ":", "A", "list", "of", "arguments", "to", "be", "passed", "to", "the...
train
https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/shell.py#L77-L101
ianclegg/winrmlib
winrmlib/shell.py
CommandShell.receive
def receive(self, command_id, streams=('stdout', 'stderr'), command_timeout=60): """ Recieves data :param command_id: :param streams: :param command_timeout: :return: """ logging.info('receive command: ' + command_id) response_streams = dict.fromkeys(streams, '') (complete, exit_code) = self._receive_poll(command_id, response_streams) while not complete: (complete, exit_code) = self._receive_poll(command_id, response_streams) # This retains some compatibility with pywinrm if sorted(response_streams.keys()) == sorted(['stderr', 'stdout']): return response_streams['stdout'], response_streams['stderr'], exit_code else: return response_streams, exit_code
python
def receive(self, command_id, streams=('stdout', 'stderr'), command_timeout=60): """ Recieves data :param command_id: :param streams: :param command_timeout: :return: """ logging.info('receive command: ' + command_id) response_streams = dict.fromkeys(streams, '') (complete, exit_code) = self._receive_poll(command_id, response_streams) while not complete: (complete, exit_code) = self._receive_poll(command_id, response_streams) # This retains some compatibility with pywinrm if sorted(response_streams.keys()) == sorted(['stderr', 'stdout']): return response_streams['stdout'], response_streams['stderr'], exit_code else: return response_streams, exit_code
[ "def", "receive", "(", "self", ",", "command_id", ",", "streams", "=", "(", "'stdout'", ",", "'stderr'", ")", ",", "command_timeout", "=", "60", ")", ":", "logging", ".", "info", "(", "'receive command: '", "+", "command_id", ")", "response_streams", "=", ...
Recieves data :param command_id: :param streams: :param command_timeout: :return:
[ "Recieves", "data", ":", "param", "command_id", ":", ":", "param", "streams", ":", ":", "param", "command_timeout", ":", ":", "return", ":" ]
train
https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/shell.py#L103-L121
ianclegg/winrmlib
winrmlib/shell.py
CommandShell._receive_poll
def _receive_poll(self, command_id, response_streams): """ Recieves data :param command_id: :param streams: :return: """ logging.info('receive command: ' + command_id) resource = ResourceLocator(CommandShell.ShellResource) resource.add_selector('ShellId', self.__shell_id) stream_attributes = {'#text': " ".join(response_streams.keys()), '@CommandId': command_id} receive = {'rsp:Receive': {'rsp:DesiredStream': stream_attributes}} try: response = self.session.recieve(resource, receive)['rsp:ReceiveResponse'] except Exception as e: return False, None # some responses will not include any output session_streams = response.get('rsp:Stream', ()) if not isinstance(session_streams, list): session_streams = [session_streams] for stream in session_streams: if stream['@CommandId'] == command_id and '#text' in stream: response_streams[stream['@Name']] += base64.b64decode(stream['#text']) # XPRESS Compression Testing # print "\\x".join("{:02x}".format(ord(c)) for c in base64.b64decode(stream['#text'])) # data = base64.b64decode(stream['#text']) # f = open('c:\\users\\developer\\temp\\data.bin', 'wb') # f.write(data) # f.close() # decode = api.compression.xpress_decode(data[4:]) done = response['rsp:CommandState']['@State'] == CommandShell.StateDone if done: exit_code = int(response['rsp:CommandState']['rsp:ExitCode']) else: exit_code = None return done, exit_code
python
def _receive_poll(self, command_id, response_streams): """ Recieves data :param command_id: :param streams: :return: """ logging.info('receive command: ' + command_id) resource = ResourceLocator(CommandShell.ShellResource) resource.add_selector('ShellId', self.__shell_id) stream_attributes = {'#text': " ".join(response_streams.keys()), '@CommandId': command_id} receive = {'rsp:Receive': {'rsp:DesiredStream': stream_attributes}} try: response = self.session.recieve(resource, receive)['rsp:ReceiveResponse'] except Exception as e: return False, None # some responses will not include any output session_streams = response.get('rsp:Stream', ()) if not isinstance(session_streams, list): session_streams = [session_streams] for stream in session_streams: if stream['@CommandId'] == command_id and '#text' in stream: response_streams[stream['@Name']] += base64.b64decode(stream['#text']) # XPRESS Compression Testing # print "\\x".join("{:02x}".format(ord(c)) for c in base64.b64decode(stream['#text'])) # data = base64.b64decode(stream['#text']) # f = open('c:\\users\\developer\\temp\\data.bin', 'wb') # f.write(data) # f.close() # decode = api.compression.xpress_decode(data[4:]) done = response['rsp:CommandState']['@State'] == CommandShell.StateDone if done: exit_code = int(response['rsp:CommandState']['rsp:ExitCode']) else: exit_code = None return done, exit_code
[ "def", "_receive_poll", "(", "self", ",", "command_id", ",", "response_streams", ")", ":", "logging", ".", "info", "(", "'receive command: '", "+", "command_id", ")", "resource", "=", "ResourceLocator", "(", "CommandShell", ".", "ShellResource", ")", "resource", ...
Recieves data :param command_id: :param streams: :return:
[ "Recieves", "data", ":", "param", "command_id", ":", ":", "param", "streams", ":", ":", "return", ":" ]
train
https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/shell.py#L123-L161
ianclegg/winrmlib
winrmlib/shell.py
CommandShell.close
def close(self): """ Closes pipe :return: """ resource = ResourceLocator(CommandShell.ShellResource) resource.add_selector('ShellId', self.__shell_id) self.session.delete(resource)
python
def close(self): """ Closes pipe :return: """ resource = ResourceLocator(CommandShell.ShellResource) resource.add_selector('ShellId', self.__shell_id) self.session.delete(resource)
[ "def", "close", "(", "self", ")", ":", "resource", "=", "ResourceLocator", "(", "CommandShell", ".", "ShellResource", ")", "resource", ".", "add_selector", "(", "'ShellId'", ",", "self", ".", "__shell_id", ")", "self", ".", "session", ".", "delete", "(", "...
Closes pipe :return:
[ "Closes", "pipe", ":", "return", ":" ]
train
https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/shell.py#L163-L170
nimeshkverma/mongo_joins
mongojoin/mongojoin.py
MongoJoin.change_dict_keys
def change_dict_keys(self, data_dict, prefix): """ Prefixes 'L_'/'R_' to the collection keys :param data_dict: dictionary which is to be altered :type data_dict: dict :param prefix: prefix to be attached before every key :type prefix: string :return dict_: dict """ keys = data_dict.keys() dummy_dict = copy.deepcopy(data_dict) changed_dict = {} for key in keys: changed_dict[prefix + str(key)] = dummy_dict.pop(key) return changed_dict
python
def change_dict_keys(self, data_dict, prefix): """ Prefixes 'L_'/'R_' to the collection keys :param data_dict: dictionary which is to be altered :type data_dict: dict :param prefix: prefix to be attached before every key :type prefix: string :return dict_: dict """ keys = data_dict.keys() dummy_dict = copy.deepcopy(data_dict) changed_dict = {} for key in keys: changed_dict[prefix + str(key)] = dummy_dict.pop(key) return changed_dict
[ "def", "change_dict_keys", "(", "self", ",", "data_dict", ",", "prefix", ")", ":", "keys", "=", "data_dict", ".", "keys", "(", ")", "dummy_dict", "=", "copy", ".", "deepcopy", "(", "data_dict", ")", "changed_dict", "=", "{", "}", "for", "key", "in", "k...
Prefixes 'L_'/'R_' to the collection keys :param data_dict: dictionary which is to be altered :type data_dict: dict :param prefix: prefix to be attached before every key :type prefix: string :return dict_: dict
[ "Prefixes", "L_", "/", "R_", "to", "the", "collection", "keys", ":", "param", "data_dict", ":", "dictionary", "which", "is", "to", "be", "altered", ":", "type", "data_dict", ":", "dict" ]
train
https://github.com/nimeshkverma/mongo_joins/blob/64c416c3402d5906f707b73867fbc55e28d5ec37/mongojoin/mongojoin.py#L18-L35
nimeshkverma/mongo_joins
mongojoin/mongojoin.py
MongoJoin.generate_join_docs_list
def generate_join_docs_list(self, left_collection_list, right_collection_list): """ Helper function for merge_join_docs :param left_collection_list: Left Collection to be joined :type left_collection_list: MongoCollection :param right_collection_list: Right Collection to be joined :type right_collection_list: MongoCollection :return joined_docs: List of docs post join """ joined_docs = [] if (len(left_collection_list) != 0) and (len(right_collection_list) != 0): for left_doc in left_collection_list: for right_doc in right_collection_list: l_dict = self.change_dict_keys(left_doc, 'L_') r_dict = self.change_dict_keys(right_doc, 'R_') joined_docs.append(dict(l_dict, **r_dict)) elif left_collection_list: for left_doc in left_collection_list: joined_docs.append(self.change_dict_keys(left_doc, 'L_')) else: for right_doc in right_collection_list: joined_docs.append(self.change_dict_keys(right_doc, 'R_')) return joined_docs
python
def generate_join_docs_list(self, left_collection_list, right_collection_list): """ Helper function for merge_join_docs :param left_collection_list: Left Collection to be joined :type left_collection_list: MongoCollection :param right_collection_list: Right Collection to be joined :type right_collection_list: MongoCollection :return joined_docs: List of docs post join """ joined_docs = [] if (len(left_collection_list) != 0) and (len(right_collection_list) != 0): for left_doc in left_collection_list: for right_doc in right_collection_list: l_dict = self.change_dict_keys(left_doc, 'L_') r_dict = self.change_dict_keys(right_doc, 'R_') joined_docs.append(dict(l_dict, **r_dict)) elif left_collection_list: for left_doc in left_collection_list: joined_docs.append(self.change_dict_keys(left_doc, 'L_')) else: for right_doc in right_collection_list: joined_docs.append(self.change_dict_keys(right_doc, 'R_')) return joined_docs
[ "def", "generate_join_docs_list", "(", "self", ",", "left_collection_list", ",", "right_collection_list", ")", ":", "joined_docs", "=", "[", "]", "if", "(", "len", "(", "left_collection_list", ")", "!=", "0", ")", "and", "(", "len", "(", "right_collection_list",...
Helper function for merge_join_docs :param left_collection_list: Left Collection to be joined :type left_collection_list: MongoCollection :param right_collection_list: Right Collection to be joined :type right_collection_list: MongoCollection :return joined_docs: List of docs post join
[ "Helper", "function", "for", "merge_join_docs", ":", "param", "left_collection_list", ":", "Left", "Collection", "to", "be", "joined", ":", "type", "left_collection_list", ":", "MongoCollection" ]
train
https://github.com/nimeshkverma/mongo_joins/blob/64c416c3402d5906f707b73867fbc55e28d5ec37/mongojoin/mongojoin.py#L37-L63
nimeshkverma/mongo_joins
mongojoin/mongojoin.py
MongoJoin.merge_join_docs
def merge_join_docs(self, keys): """ Merges the final list of docs :param left_collection_list: :type left_collection_list: MongoCollection :return join: dict """ join = defaultdict(list) for key in keys: join[key] = self.generate_join_docs_list( self.collections_data['left'].get(key, []), self.collections_data['right'].get(key, [])) return join
python
def merge_join_docs(self, keys): """ Merges the final list of docs :param left_collection_list: :type left_collection_list: MongoCollection :return join: dict """ join = defaultdict(list) for key in keys: join[key] = self.generate_join_docs_list( self.collections_data['left'].get(key, []), self.collections_data['right'].get(key, [])) return join
[ "def", "merge_join_docs", "(", "self", ",", "keys", ")", ":", "join", "=", "defaultdict", "(", "list", ")", "for", "key", "in", "keys", ":", "join", "[", "key", "]", "=", "self", ".", "generate_join_docs_list", "(", "self", ".", "collections_data", "[", ...
Merges the final list of docs :param left_collection_list: :type left_collection_list: MongoCollection :return join: dict
[ "Merges", "the", "final", "list", "of", "docs", ":", "param", "left_collection_list", ":", ":", "type", "left_collection_list", ":", "MongoCollection" ]
train
https://github.com/nimeshkverma/mongo_joins/blob/64c416c3402d5906f707b73867fbc55e28d5ec37/mongojoin/mongojoin.py#L65-L79
nimeshkverma/mongo_joins
mongojoin/mongojoin.py
MongoJoin.inner
def inner(self): """ Performs Inner Join :return inner_join: dict """ self.get_collections_data() inner_join = self.merge_join_docs(set(self.collections_data['left'].keys()) & set( self.collections_data['right'].keys())) return inner_join
python
def inner(self): """ Performs Inner Join :return inner_join: dict """ self.get_collections_data() inner_join = self.merge_join_docs(set(self.collections_data['left'].keys()) & set( self.collections_data['right'].keys())) return inner_join
[ "def", "inner", "(", "self", ")", ":", "self", ".", "get_collections_data", "(", ")", "inner_join", "=", "self", ".", "merge_join_docs", "(", "set", "(", "self", ".", "collections_data", "[", "'left'", "]", ".", "keys", "(", ")", ")", "&", "set", "(", ...
Performs Inner Join :return inner_join: dict
[ "Performs", "Inner", "Join", ":", "return", "inner_join", ":", "dict" ]
train
https://github.com/nimeshkverma/mongo_joins/blob/64c416c3402d5906f707b73867fbc55e28d5ec37/mongojoin/mongojoin.py#L81-L91
nimeshkverma/mongo_joins
mongojoin/mongojoin.py
MongoJoin.left_outer
def left_outer(self): """ Performs Left Outer Join :return left_outer: dict """ self.get_collections_data() left_outer_join = self.merge_join_docs( set(self.collections_data['left'].keys())) return left_outer_join
python
def left_outer(self): """ Performs Left Outer Join :return left_outer: dict """ self.get_collections_data() left_outer_join = self.merge_join_docs( set(self.collections_data['left'].keys())) return left_outer_join
[ "def", "left_outer", "(", "self", ")", ":", "self", ".", "get_collections_data", "(", ")", "left_outer_join", "=", "self", ".", "merge_join_docs", "(", "set", "(", "self", ".", "collections_data", "[", "'left'", "]", ".", "keys", "(", ")", ")", ")", "ret...
Performs Left Outer Join :return left_outer: dict
[ "Performs", "Left", "Outer", "Join", ":", "return", "left_outer", ":", "dict" ]
train
https://github.com/nimeshkverma/mongo_joins/blob/64c416c3402d5906f707b73867fbc55e28d5ec37/mongojoin/mongojoin.py#L93-L101
nimeshkverma/mongo_joins
mongojoin/mongojoin.py
MongoJoin.right_outer
def right_outer(self): """ Performs Right Outer Join :return right_outer: dict """ self.get_collections_data() right_outer_join = self.merge_join_docs( set(self.collections_data['right'].keys())) return right_outer_join
python
def right_outer(self): """ Performs Right Outer Join :return right_outer: dict """ self.get_collections_data() right_outer_join = self.merge_join_docs( set(self.collections_data['right'].keys())) return right_outer_join
[ "def", "right_outer", "(", "self", ")", ":", "self", ".", "get_collections_data", "(", ")", "right_outer_join", "=", "self", ".", "merge_join_docs", "(", "set", "(", "self", ".", "collections_data", "[", "'right'", "]", ".", "keys", "(", ")", ")", ")", "...
Performs Right Outer Join :return right_outer: dict
[ "Performs", "Right", "Outer", "Join", ":", "return", "right_outer", ":", "dict" ]
train
https://github.com/nimeshkverma/mongo_joins/blob/64c416c3402d5906f707b73867fbc55e28d5ec37/mongojoin/mongojoin.py#L103-L111
nimeshkverma/mongo_joins
mongojoin/mongojoin.py
MongoJoin.full_outer
def full_outer(self): """ Performs Full Outer Join :return full_outer: dict """ self.get_collections_data() full_outer_join = self.merge_join_docs( set(self.collections_data['left'].keys()) | set(self.collections_data['right'].keys())) return full_outer_join
python
def full_outer(self): """ Performs Full Outer Join :return full_outer: dict """ self.get_collections_data() full_outer_join = self.merge_join_docs( set(self.collections_data['left'].keys()) | set(self.collections_data['right'].keys())) return full_outer_join
[ "def", "full_outer", "(", "self", ")", ":", "self", ".", "get_collections_data", "(", ")", "full_outer_join", "=", "self", ".", "merge_join_docs", "(", "set", "(", "self", ".", "collections_data", "[", "'left'", "]", ".", "keys", "(", ")", ")", "|", "set...
Performs Full Outer Join :return full_outer: dict
[ "Performs", "Full", "Outer", "Join", ":", "return", "full_outer", ":", "dict" ]
train
https://github.com/nimeshkverma/mongo_joins/blob/64c416c3402d5906f707b73867fbc55e28d5ec37/mongojoin/mongojoin.py#L113-L121
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
needs_auth
def needs_auth(meth): """Wraps a method of :class:`TwitchSession` and raises an :class:`exceptions.NotAuthorizedError` if before calling the method, the session isn't authorized. :param meth: :type meth: :returns: the wrapped method :rtype: Method :raises: None """ @functools.wraps(meth) def wrapped(*args, **kwargs): if not args[0].authorized: raise exceptions.NotAuthorizedError('Please login first!') return meth(*args, **kwargs) return wrapped
python
def needs_auth(meth): """Wraps a method of :class:`TwitchSession` and raises an :class:`exceptions.NotAuthorizedError` if before calling the method, the session isn't authorized. :param meth: :type meth: :returns: the wrapped method :rtype: Method :raises: None """ @functools.wraps(meth) def wrapped(*args, **kwargs): if not args[0].authorized: raise exceptions.NotAuthorizedError('Please login first!') return meth(*args, **kwargs) return wrapped
[ "def", "needs_auth", "(", "meth", ")", ":", "@", "functools", ".", "wraps", "(", "meth", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "args", "[", "0", "]", ".", "authorized", ":", "raise", "exceptions", ...
Wraps a method of :class:`TwitchSession` and raises an :class:`exceptions.NotAuthorizedError` if before calling the method, the session isn't authorized. :param meth: :type meth: :returns: the wrapped method :rtype: Method :raises: None
[ "Wraps", "a", "method", "of", ":", "class", ":", "TwitchSession", "and", "raises", "an", ":", "class", ":", "exceptions", ".", "NotAuthorizedError", "if", "before", "calling", "the", "method", "the", "session", "isn", "t", "authorized", "." ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L49-L65
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
OAuthSession.request
def request(self, method, url, **kwargs): """Constructs a :class:`requests.Request`, prepares it and sends it. Raises HTTPErrors by default. :param method: method for the new :class:`Request` object. :type method: :class:`str` :param url: URL for the new :class:`Request` object. :type url: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ if oauthlib.oauth2.is_secure_transport(url): m = super(OAuthSession, self).request else: m = super(requests_oauthlib.OAuth2Session, self).request log.debug("%s \"%s\" with %s", method, url, kwargs) response = m(method, url, **kwargs) response.raise_for_status() return response
python
def request(self, method, url, **kwargs): """Constructs a :class:`requests.Request`, prepares it and sends it. Raises HTTPErrors by default. :param method: method for the new :class:`Request` object. :type method: :class:`str` :param url: URL for the new :class:`Request` object. :type url: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ if oauthlib.oauth2.is_secure_transport(url): m = super(OAuthSession, self).request else: m = super(requests_oauthlib.OAuth2Session, self).request log.debug("%s \"%s\" with %s", method, url, kwargs) response = m(method, url, **kwargs) response.raise_for_status() return response
[ "def", "request", "(", "self", ",", "method", ",", "url", ",", "*", "*", "kwargs", ")", ":", "if", "oauthlib", ".", "oauth2", ".", "is_secure_transport", "(", "url", ")", ":", "m", "=", "super", "(", "OAuthSession", ",", "self", ")", ".", "request", ...
Constructs a :class:`requests.Request`, prepares it and sends it. Raises HTTPErrors by default. :param method: method for the new :class:`Request` object. :type method: :class:`str` :param url: URL for the new :class:`Request` object. :type url: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError`
[ "Constructs", "a", ":", "class", ":", "requests", ".", "Request", "prepares", "it", "and", "sends", "it", ".", "Raises", "HTTPErrors", "by", "default", "." ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L89-L109
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
OAuthSession.start_login_server
def start_login_server(self, ): """Start a server that will get a request from a user logging in. This uses the Implicit Grant Flow of OAuth2. The user is asked to login to twitch and grant PyTwitcher authorization. Once the user agrees, he is redirected to an url. This server will respond to that url and get the oauth token. The server serves in another thread. To shut him down, call :meth:`TwitchSession.shutdown_login_server`. This sets the :data:`TwitchSession.login_server`, :data:`TwitchSession.login_thread` variables. :returns: The created server :rtype: :class:`BaseHTTPServer.HTTPServer` :raises: None """ self.login_server = oauth.LoginServer(session=self) target = self.login_server.serve_forever self.login_thread = threading.Thread(target=target) self.login_thread.setDaemon(True) log.debug('Starting login server thread.') self.login_thread.start()
python
def start_login_server(self, ): """Start a server that will get a request from a user logging in. This uses the Implicit Grant Flow of OAuth2. The user is asked to login to twitch and grant PyTwitcher authorization. Once the user agrees, he is redirected to an url. This server will respond to that url and get the oauth token. The server serves in another thread. To shut him down, call :meth:`TwitchSession.shutdown_login_server`. This sets the :data:`TwitchSession.login_server`, :data:`TwitchSession.login_thread` variables. :returns: The created server :rtype: :class:`BaseHTTPServer.HTTPServer` :raises: None """ self.login_server = oauth.LoginServer(session=self) target = self.login_server.serve_forever self.login_thread = threading.Thread(target=target) self.login_thread.setDaemon(True) log.debug('Starting login server thread.') self.login_thread.start()
[ "def", "start_login_server", "(", "self", ",", ")", ":", "self", ".", "login_server", "=", "oauth", ".", "LoginServer", "(", "session", "=", "self", ")", "target", "=", "self", ".", "login_server", ".", "serve_forever", "self", ".", "login_thread", "=", "t...
Start a server that will get a request from a user logging in. This uses the Implicit Grant Flow of OAuth2. The user is asked to login to twitch and grant PyTwitcher authorization. Once the user agrees, he is redirected to an url. This server will respond to that url and get the oauth token. The server serves in another thread. To shut him down, call :meth:`TwitchSession.shutdown_login_server`. This sets the :data:`TwitchSession.login_server`, :data:`TwitchSession.login_thread` variables. :returns: The created server :rtype: :class:`BaseHTTPServer.HTTPServer` :raises: None
[ "Start", "a", "server", "that", "will", "get", "a", "request", "from", "a", "user", "logging", "in", "." ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L111-L134
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
OAuthSession.shutdown_login_server
def shutdown_login_server(self, ): """Shutdown the login server and thread :returns: None :rtype: None :raises: None """ log.debug('Shutting down the login server thread.') self.login_server.shutdown() self.login_server.server_close() self.login_thread.join()
python
def shutdown_login_server(self, ): """Shutdown the login server and thread :returns: None :rtype: None :raises: None """ log.debug('Shutting down the login server thread.') self.login_server.shutdown() self.login_server.server_close() self.login_thread.join()
[ "def", "shutdown_login_server", "(", "self", ",", ")", ":", "log", ".", "debug", "(", "'Shutting down the login server thread.'", ")", "self", ".", "login_server", ".", "shutdown", "(", ")", "self", ".", "login_server", ".", "server_close", "(", ")", "self", "...
Shutdown the login server and thread :returns: None :rtype: None :raises: None
[ "Shutdown", "the", "login", "server", "and", "thread" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L136-L146
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.token
def token(self, token): """Set the oauth token and the current_user :param token: the oauth token :type token: :class:`dict` :returns: None :rtype: None :raises: None """ self._token = token if token: self.current_user = self.query_login_user()
python
def token(self, token): """Set the oauth token and the current_user :param token: the oauth token :type token: :class:`dict` :returns: None :rtype: None :raises: None """ self._token = token if token: self.current_user = self.query_login_user()
[ "def", "token", "(", "self", ",", "token", ")", ":", "self", ".", "_token", "=", "token", "if", "token", ":", "self", ".", "current_user", "=", "self", ".", "query_login_user", "(", ")" ]
Set the oauth token and the current_user :param token: the oauth token :type token: :class:`dict` :returns: None :rtype: None :raises: None
[ "Set", "the", "oauth", "token", "and", "the", "current_user" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L202-L213
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.kraken_request
def kraken_request(self, method, endpoint, **kwargs): """Make a request to one of the kraken api endpoints. Headers are automatically set to accept :data:`TWITCH_HEADER_ACCEPT`. Also the client id from :data:`CLIENT_ID` will be set. The url will be constructed of :data:`TWITCH_KRAKENURL` and the given endpoint. :param method: the request method :type method: :class:`str` :param endpoint: the endpoint of the kraken api. The base url is automatically provided. :type endpoint: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ url = TWITCH_KRAKENURL + endpoint headers = kwargs.setdefault('headers', {}) headers['Accept'] = TWITCH_HEADER_ACCEPT headers['Client-ID'] = CLIENT_ID # https://github.com/justintv/Twitch-API#rate-limits return self.request(method, url, **kwargs)
python
def kraken_request(self, method, endpoint, **kwargs): """Make a request to one of the kraken api endpoints. Headers are automatically set to accept :data:`TWITCH_HEADER_ACCEPT`. Also the client id from :data:`CLIENT_ID` will be set. The url will be constructed of :data:`TWITCH_KRAKENURL` and the given endpoint. :param method: the request method :type method: :class:`str` :param endpoint: the endpoint of the kraken api. The base url is automatically provided. :type endpoint: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ url = TWITCH_KRAKENURL + endpoint headers = kwargs.setdefault('headers', {}) headers['Accept'] = TWITCH_HEADER_ACCEPT headers['Client-ID'] = CLIENT_ID # https://github.com/justintv/Twitch-API#rate-limits return self.request(method, url, **kwargs)
[ "def", "kraken_request", "(", "self", ",", "method", ",", "endpoint", ",", "*", "*", "kwargs", ")", ":", "url", "=", "TWITCH_KRAKENURL", "+", "endpoint", "headers", "=", "kwargs", ".", "setdefault", "(", "'headers'", ",", "{", "}", ")", "headers", "[", ...
Make a request to one of the kraken api endpoints. Headers are automatically set to accept :data:`TWITCH_HEADER_ACCEPT`. Also the client id from :data:`CLIENT_ID` will be set. The url will be constructed of :data:`TWITCH_KRAKENURL` and the given endpoint. :param method: the request method :type method: :class:`str` :param endpoint: the endpoint of the kraken api. The base url is automatically provided. :type endpoint: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError`
[ "Make", "a", "request", "to", "one", "of", "the", "kraken", "api", "endpoints", "." ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L215-L237
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.usher_request
def usher_request(self, method, endpoint, **kwargs): """Make a request to one of the usher api endpoints. The url will be constructed of :data:`TWITCH_USHERURL` and the given endpoint. :param method: the request method :type method: :class:`str` :param endpoint: the endpoint of the usher api. The base url is automatically provided. :type endpoint: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ url = TWITCH_USHERURL + endpoint return self.request(method, url, **kwargs)
python
def usher_request(self, method, endpoint, **kwargs): """Make a request to one of the usher api endpoints. The url will be constructed of :data:`TWITCH_USHERURL` and the given endpoint. :param method: the request method :type method: :class:`str` :param endpoint: the endpoint of the usher api. The base url is automatically provided. :type endpoint: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ url = TWITCH_USHERURL + endpoint return self.request(method, url, **kwargs)
[ "def", "usher_request", "(", "self", ",", "method", ",", "endpoint", ",", "*", "*", "kwargs", ")", ":", "url", "=", "TWITCH_USHERURL", "+", "endpoint", "return", "self", ".", "request", "(", "method", ",", "url", ",", "*", "*", "kwargs", ")" ]
Make a request to one of the usher api endpoints. The url will be constructed of :data:`TWITCH_USHERURL` and the given endpoint. :param method: the request method :type method: :class:`str` :param endpoint: the endpoint of the usher api. The base url is automatically provided. :type endpoint: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError`
[ "Make", "a", "request", "to", "one", "of", "the", "usher", "api", "endpoints", "." ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L239-L256
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.oldapi_request
def oldapi_request(self, method, endpoint, **kwargs): """Make a request to one of the old api endpoints. The url will be constructed of :data:`TWITCH_APIURL` and the given endpoint. :param method: the request method :type method: :class:`str` :param endpoint: the endpoint of the old api. The base url is automatically provided. :type endpoint: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ headers = kwargs.setdefault('headers', {}) headers['Client-ID'] = CLIENT_ID # https://github.com/justintv/Twitch-API#rate-limits url = TWITCH_APIURL + endpoint return self.request(method, url, **kwargs)
python
def oldapi_request(self, method, endpoint, **kwargs): """Make a request to one of the old api endpoints. The url will be constructed of :data:`TWITCH_APIURL` and the given endpoint. :param method: the request method :type method: :class:`str` :param endpoint: the endpoint of the old api. The base url is automatically provided. :type endpoint: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ headers = kwargs.setdefault('headers', {}) headers['Client-ID'] = CLIENT_ID # https://github.com/justintv/Twitch-API#rate-limits url = TWITCH_APIURL + endpoint return self.request(method, url, **kwargs)
[ "def", "oldapi_request", "(", "self", ",", "method", ",", "endpoint", ",", "*", "*", "kwargs", ")", ":", "headers", "=", "kwargs", ".", "setdefault", "(", "'headers'", ",", "{", "}", ")", "headers", "[", "'Client-ID'", "]", "=", "CLIENT_ID", "# https://g...
Make a request to one of the old api endpoints. The url will be constructed of :data:`TWITCH_APIURL` and the given endpoint. :param method: the request method :type method: :class:`str` :param endpoint: the endpoint of the old api. The base url is automatically provided. :type endpoint: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError`
[ "Make", "a", "request", "to", "one", "of", "the", "old", "api", "endpoints", "." ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L258-L277
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.fetch_viewers
def fetch_viewers(self, game): """Query the viewers and channels of the given game and set them on the object :returns: the given game :rtype: :class:`models.Game` :raises: None """ r = self.kraken_request('GET', 'streams/summary', params={'game': game.name}).json() game.viewers = r['viewers'] game.channels = r['channels'] return game
python
def fetch_viewers(self, game): """Query the viewers and channels of the given game and set them on the object :returns: the given game :rtype: :class:`models.Game` :raises: None """ r = self.kraken_request('GET', 'streams/summary', params={'game': game.name}).json() game.viewers = r['viewers'] game.channels = r['channels'] return game
[ "def", "fetch_viewers", "(", "self", ",", "game", ")", ":", "r", "=", "self", ".", "kraken_request", "(", "'GET'", ",", "'streams/summary'", ",", "params", "=", "{", "'game'", ":", "game", ".", "name", "}", ")", ".", "json", "(", ")", "game", ".", ...
Query the viewers and channels of the given game and set them on the object :returns: the given game :rtype: :class:`models.Game` :raises: None
[ "Query", "the", "viewers", "and", "channels", "of", "the", "given", "game", "and", "set", "them", "on", "the", "object" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L279-L291
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.search_games
def search_games(self, query, live=True): """Search for games that are similar to the query :param query: the query string :type query: :class:`str` :param live: If true, only returns games that are live on at least one channel :type live: :class:`bool` :returns: A list of games :rtype: :class:`list` of :class:`models.Game` instances :raises: None """ r = self.kraken_request('GET', 'search/games', params={'query': query, 'type': 'suggest', 'live': live}) games = models.Game.wrap_search(r) for g in games: self.fetch_viewers(g) return games
python
def search_games(self, query, live=True): """Search for games that are similar to the query :param query: the query string :type query: :class:`str` :param live: If true, only returns games that are live on at least one channel :type live: :class:`bool` :returns: A list of games :rtype: :class:`list` of :class:`models.Game` instances :raises: None """ r = self.kraken_request('GET', 'search/games', params={'query': query, 'type': 'suggest', 'live': live}) games = models.Game.wrap_search(r) for g in games: self.fetch_viewers(g) return games
[ "def", "search_games", "(", "self", ",", "query", ",", "live", "=", "True", ")", ":", "r", "=", "self", ".", "kraken_request", "(", "'GET'", ",", "'search/games'", ",", "params", "=", "{", "'query'", ":", "query", ",", "'type'", ":", "'suggest'", ",", ...
Search for games that are similar to the query :param query: the query string :type query: :class:`str` :param live: If true, only returns games that are live on at least one channel :type live: :class:`bool` :returns: A list of games :rtype: :class:`list` of :class:`models.Game` instances :raises: None
[ "Search", "for", "games", "that", "are", "similar", "to", "the", "query" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L293-L312
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.top_games
def top_games(self, limit=10, offset=0): """Return the current top games :param limit: the maximum amount of top games to query :type limit: :class:`int` :param offset: the offset in the top games :type offset: :class:`int` :returns: a list of top games :rtype: :class:`list` of :class:`models.Game` :raises: None """ r = self.kraken_request('GET', 'games/top', params={'limit': limit, 'offset': offset}) return models.Game.wrap_topgames(r)
python
def top_games(self, limit=10, offset=0): """Return the current top games :param limit: the maximum amount of top games to query :type limit: :class:`int` :param offset: the offset in the top games :type offset: :class:`int` :returns: a list of top games :rtype: :class:`list` of :class:`models.Game` :raises: None """ r = self.kraken_request('GET', 'games/top', params={'limit': limit, 'offset': offset}) return models.Game.wrap_topgames(r)
[ "def", "top_games", "(", "self", ",", "limit", "=", "10", ",", "offset", "=", "0", ")", ":", "r", "=", "self", ".", "kraken_request", "(", "'GET'", ",", "'games/top'", ",", "params", "=", "{", "'limit'", ":", "limit", ",", "'offset'", ":", "offset", ...
Return the current top games :param limit: the maximum amount of top games to query :type limit: :class:`int` :param offset: the offset in the top games :type offset: :class:`int` :returns: a list of top games :rtype: :class:`list` of :class:`models.Game` :raises: None
[ "Return", "the", "current", "top", "games" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L314-L328
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.get_game
def get_game(self, name): """Get the game instance for a game name :param name: the name of the game :type name: :class:`str` :returns: the game instance :rtype: :class:`models.Game` | None :raises: None """ games = self.search_games(query=name, live=False) for g in games: if g.name == name: return g
python
def get_game(self, name): """Get the game instance for a game name :param name: the name of the game :type name: :class:`str` :returns: the game instance :rtype: :class:`models.Game` | None :raises: None """ games = self.search_games(query=name, live=False) for g in games: if g.name == name: return g
[ "def", "get_game", "(", "self", ",", "name", ")", ":", "games", "=", "self", ".", "search_games", "(", "query", "=", "name", ",", "live", "=", "False", ")", "for", "g", "in", "games", ":", "if", "g", ".", "name", "==", "name", ":", "return", "g" ...
Get the game instance for a game name :param name: the name of the game :type name: :class:`str` :returns: the game instance :rtype: :class:`models.Game` | None :raises: None
[ "Get", "the", "game", "instance", "for", "a", "game", "name" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L330-L342
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.get_channel
def get_channel(self, name): """Return the channel for the given name :param name: the channel name :type name: :class:`str` :returns: the model instance :rtype: :class:`models.Channel` :raises: None """ r = self.kraken_request('GET', 'channels/' + name) return models.Channel.wrap_get_channel(r)
python
def get_channel(self, name): """Return the channel for the given name :param name: the channel name :type name: :class:`str` :returns: the model instance :rtype: :class:`models.Channel` :raises: None """ r = self.kraken_request('GET', 'channels/' + name) return models.Channel.wrap_get_channel(r)
[ "def", "get_channel", "(", "self", ",", "name", ")", ":", "r", "=", "self", ".", "kraken_request", "(", "'GET'", ",", "'channels/'", "+", "name", ")", "return", "models", ".", "Channel", ".", "wrap_get_channel", "(", "r", ")" ]
Return the channel for the given name :param name: the channel name :type name: :class:`str` :returns: the model instance :rtype: :class:`models.Channel` :raises: None
[ "Return", "the", "channel", "for", "the", "given", "name" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L344-L354
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.search_channels
def search_channels(self, query, limit=25, offset=0): """Search for channels and return them :param query: the query string :type query: :class:`str` :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of channels :rtype: :class:`list` of :class:`models.Channel` instances :raises: None """ r = self.kraken_request('GET', 'search/channels', params={'query': query, 'limit': limit, 'offset': offset}) return models.Channel.wrap_search(r)
python
def search_channels(self, query, limit=25, offset=0): """Search for channels and return them :param query: the query string :type query: :class:`str` :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of channels :rtype: :class:`list` of :class:`models.Channel` instances :raises: None """ r = self.kraken_request('GET', 'search/channels', params={'query': query, 'limit': limit, 'offset': offset}) return models.Channel.wrap_search(r)
[ "def", "search_channels", "(", "self", ",", "query", ",", "limit", "=", "25", ",", "offset", "=", "0", ")", ":", "r", "=", "self", ".", "kraken_request", "(", "'GET'", ",", "'search/channels'", ",", "params", "=", "{", "'query'", ":", "query", ",", "...
Search for channels and return them :param query: the query string :type query: :class:`str` :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of channels :rtype: :class:`list` of :class:`models.Channel` instances :raises: None
[ "Search", "for", "channels", "and", "return", "them" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L356-L373
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.get_stream
def get_stream(self, channel): """Return the stream of the given channel :param channel: the channel that is broadcasting. Either name or models.Channel instance :type channel: :class:`str` | :class:`models.Channel` :returns: the stream or None, if the channel is offline :rtype: :class:`models.Stream` | None :raises: None """ if isinstance(channel, models.Channel): channel = channel.name r = self.kraken_request('GET', 'streams/' + channel) return models.Stream.wrap_get_stream(r)
python
def get_stream(self, channel): """Return the stream of the given channel :param channel: the channel that is broadcasting. Either name or models.Channel instance :type channel: :class:`str` | :class:`models.Channel` :returns: the stream or None, if the channel is offline :rtype: :class:`models.Stream` | None :raises: None """ if isinstance(channel, models.Channel): channel = channel.name r = self.kraken_request('GET', 'streams/' + channel) return models.Stream.wrap_get_stream(r)
[ "def", "get_stream", "(", "self", ",", "channel", ")", ":", "if", "isinstance", "(", "channel", ",", "models", ".", "Channel", ")", ":", "channel", "=", "channel", ".", "name", "r", "=", "self", ".", "kraken_request", "(", "'GET'", ",", "'streams/'", "...
Return the stream of the given channel :param channel: the channel that is broadcasting. Either name or models.Channel instance :type channel: :class:`str` | :class:`models.Channel` :returns: the stream or None, if the channel is offline :rtype: :class:`models.Stream` | None :raises: None
[ "Return", "the", "stream", "of", "the", "given", "channel" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L375-L389
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.get_streams
def get_streams(self, game=None, channels=None, limit=25, offset=0): """Return a list of streams queried by a number of parameters sorted by number of viewers descending :param game: the game or name of the game :type game: :class:`str` | :class:`models.Game` :param channels: list of models.Channels or channel names (can be mixed) :type channels: :class:`list` of :class:`models.Channel` or :class:`str` :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of streams :rtype: :class:`list` of :class:`models.Stream` :raises: None """ if isinstance(game, models.Game): game = game.name channelnames = [] cparam = None if channels: for c in channels: if isinstance(c, models.Channel): c = c.name channelnames.append(c) cparam = ','.join(channelnames) params = {'limit': limit, 'offset': offset, 'game': game, 'channel': cparam} r = self.kraken_request('GET', 'streams', params=params) return models.Stream.wrap_search(r)
python
def get_streams(self, game=None, channels=None, limit=25, offset=0): """Return a list of streams queried by a number of parameters sorted by number of viewers descending :param game: the game or name of the game :type game: :class:`str` | :class:`models.Game` :param channels: list of models.Channels or channel names (can be mixed) :type channels: :class:`list` of :class:`models.Channel` or :class:`str` :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of streams :rtype: :class:`list` of :class:`models.Stream` :raises: None """ if isinstance(game, models.Game): game = game.name channelnames = [] cparam = None if channels: for c in channels: if isinstance(c, models.Channel): c = c.name channelnames.append(c) cparam = ','.join(channelnames) params = {'limit': limit, 'offset': offset, 'game': game, 'channel': cparam} r = self.kraken_request('GET', 'streams', params=params) return models.Stream.wrap_search(r)
[ "def", "get_streams", "(", "self", ",", "game", "=", "None", ",", "channels", "=", "None", ",", "limit", "=", "25", ",", "offset", "=", "0", ")", ":", "if", "isinstance", "(", "game", ",", "models", ".", "Game", ")", ":", "game", "=", "game", "."...
Return a list of streams queried by a number of parameters sorted by number of viewers descending :param game: the game or name of the game :type game: :class:`str` | :class:`models.Game` :param channels: list of models.Channels or channel names (can be mixed) :type channels: :class:`list` of :class:`models.Channel` or :class:`str` :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of streams :rtype: :class:`list` of :class:`models.Stream` :raises: None
[ "Return", "a", "list", "of", "streams", "queried", "by", "a", "number", "of", "parameters", "sorted", "by", "number", "of", "viewers", "descending" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L391-L425
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.search_streams
def search_streams(self, query, hls=False, limit=25, offset=0): """Search for streams and return them :param query: the query string :type query: :class:`str` :param hls: If true, only return streams that have hls stream :type hls: :class:`bool` :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of streams :rtype: :class:`list` of :class:`models.Stream` instances :raises: None """ r = self.kraken_request('GET', 'search/streams', params={'query': query, 'hls': hls, 'limit': limit, 'offset': offset}) return models.Stream.wrap_search(r)
python
def search_streams(self, query, hls=False, limit=25, offset=0): """Search for streams and return them :param query: the query string :type query: :class:`str` :param hls: If true, only return streams that have hls stream :type hls: :class:`bool` :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of streams :rtype: :class:`list` of :class:`models.Stream` instances :raises: None """ r = self.kraken_request('GET', 'search/streams', params={'query': query, 'hls': hls, 'limit': limit, 'offset': offset}) return models.Stream.wrap_search(r)
[ "def", "search_streams", "(", "self", ",", "query", ",", "hls", "=", "False", ",", "limit", "=", "25", ",", "offset", "=", "0", ")", ":", "r", "=", "self", ".", "kraken_request", "(", "'GET'", ",", "'search/streams'", ",", "params", "=", "{", "'query...
Search for streams and return them :param query: the query string :type query: :class:`str` :param hls: If true, only return streams that have hls stream :type hls: :class:`bool` :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of streams :rtype: :class:`list` of :class:`models.Stream` instances :raises: None
[ "Search", "for", "streams", "and", "return", "them" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L427-L447
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.followed_streams
def followed_streams(self, limit=25, offset=0): """Return the streams the current user follows. Needs authorization ``user_read``. :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of streams :rtype: :class:`list`of :class:`models.Stream` instances :raises: :class:`exceptions.NotAuthorizedError` """ r = self.kraken_request('GET', 'streams/followed', params={'limit': limit, 'offset': offset}) return models.Stream.wrap_search(r)
python
def followed_streams(self, limit=25, offset=0): """Return the streams the current user follows. Needs authorization ``user_read``. :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of streams :rtype: :class:`list`of :class:`models.Stream` instances :raises: :class:`exceptions.NotAuthorizedError` """ r = self.kraken_request('GET', 'streams/followed', params={'limit': limit, 'offset': offset}) return models.Stream.wrap_search(r)
[ "def", "followed_streams", "(", "self", ",", "limit", "=", "25", ",", "offset", "=", "0", ")", ":", "r", "=", "self", ".", "kraken_request", "(", "'GET'", ",", "'streams/followed'", ",", "params", "=", "{", "'limit'", ":", "limit", ",", "'offset'", ":"...
Return the streams the current user follows. Needs authorization ``user_read``. :param limit: maximum number of results :type limit: :class:`int` :param offset: offset for pagination :type offset: :class:`int` :returns: A list of streams :rtype: :class:`list`of :class:`models.Stream` instances :raises: :class:`exceptions.NotAuthorizedError`
[ "Return", "the", "streams", "the", "current", "user", "follows", "." ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L450-L466
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.get_user
def get_user(self, name): """Get the user for the given name :param name: The username :type name: :class:`str` :returns: the user instance :rtype: :class:`models.User` :raises: None """ r = self.kraken_request('GET', 'user/' + name) return models.User.wrap_get_user(r)
python
def get_user(self, name): """Get the user for the given name :param name: The username :type name: :class:`str` :returns: the user instance :rtype: :class:`models.User` :raises: None """ r = self.kraken_request('GET', 'user/' + name) return models.User.wrap_get_user(r)
[ "def", "get_user", "(", "self", ",", "name", ")", ":", "r", "=", "self", ".", "kraken_request", "(", "'GET'", ",", "'user/'", "+", "name", ")", "return", "models", ".", "User", ".", "wrap_get_user", "(", "r", ")" ]
Get the user for the given name :param name: The username :type name: :class:`str` :returns: the user instance :rtype: :class:`models.User` :raises: None
[ "Get", "the", "user", "for", "the", "given", "name" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L468-L478
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.get_playlist
def get_playlist(self, channel): """Return the playlist for the given channel :param channel: the channel :type channel: :class:`models.Channel` | :class:`str` :returns: the playlist :rtype: :class:`m3u8.M3U8` :raises: :class:`requests.HTTPError` if channel is offline. """ if isinstance(channel, models.Channel): channel = channel.name token, sig = self.get_channel_access_token(channel) params = {'token': token, 'sig': sig, 'allow_audio_only': True, 'allow_source': True} r = self.usher_request( 'GET', 'channel/hls/%s.m3u8' % channel, params=params) playlist = m3u8.loads(r.text) return playlist
python
def get_playlist(self, channel): """Return the playlist for the given channel :param channel: the channel :type channel: :class:`models.Channel` | :class:`str` :returns: the playlist :rtype: :class:`m3u8.M3U8` :raises: :class:`requests.HTTPError` if channel is offline. """ if isinstance(channel, models.Channel): channel = channel.name token, sig = self.get_channel_access_token(channel) params = {'token': token, 'sig': sig, 'allow_audio_only': True, 'allow_source': True} r = self.usher_request( 'GET', 'channel/hls/%s.m3u8' % channel, params=params) playlist = m3u8.loads(r.text) return playlist
[ "def", "get_playlist", "(", "self", ",", "channel", ")", ":", "if", "isinstance", "(", "channel", ",", "models", ".", "Channel", ")", ":", "channel", "=", "channel", ".", "name", "token", ",", "sig", "=", "self", ".", "get_channel_access_token", "(", "ch...
Return the playlist for the given channel :param channel: the channel :type channel: :class:`models.Channel` | :class:`str` :returns: the playlist :rtype: :class:`m3u8.M3U8` :raises: :class:`requests.HTTPError` if channel is offline.
[ "Return", "the", "playlist", "for", "the", "given", "channel" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L491-L510
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.get_quality_options
def get_quality_options(self, channel): """Get the available quality options for streams of the given channel Possible values in the list: * source * high * medium * low * mobile * audio :param channel: the channel or channel name :type channel: :class:`models.Channel` | :class:`str` :returns: list of quality options :rtype: :class:`list` of :class:`str` :raises: :class:`requests.HTTPError` if channel is offline. """ optionmap = {'chunked': 'source', 'high': 'high', 'medium': 'medium', 'low': 'low', 'mobile': 'mobile', 'audio_only': 'audio'} p = self.get_playlist(channel) options = [] for pl in p.playlists: q = pl.media[0].group_id options.append(optionmap[q]) return options
python
def get_quality_options(self, channel): """Get the available quality options for streams of the given channel Possible values in the list: * source * high * medium * low * mobile * audio :param channel: the channel or channel name :type channel: :class:`models.Channel` | :class:`str` :returns: list of quality options :rtype: :class:`list` of :class:`str` :raises: :class:`requests.HTTPError` if channel is offline. """ optionmap = {'chunked': 'source', 'high': 'high', 'medium': 'medium', 'low': 'low', 'mobile': 'mobile', 'audio_only': 'audio'} p = self.get_playlist(channel) options = [] for pl in p.playlists: q = pl.media[0].group_id options.append(optionmap[q]) return options
[ "def", "get_quality_options", "(", "self", ",", "channel", ")", ":", "optionmap", "=", "{", "'chunked'", ":", "'source'", ",", "'high'", ":", "'high'", ",", "'medium'", ":", "'medium'", ",", "'low'", ":", "'low'", ",", "'mobile'", ":", "'mobile'", ",", "...
Get the available quality options for streams of the given channel Possible values in the list: * source * high * medium * low * mobile * audio :param channel: the channel or channel name :type channel: :class:`models.Channel` | :class:`str` :returns: list of quality options :rtype: :class:`list` of :class:`str` :raises: :class:`requests.HTTPError` if channel is offline.
[ "Get", "the", "available", "quality", "options", "for", "streams", "of", "the", "given", "channel" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L512-L541
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.get_channel_access_token
def get_channel_access_token(self, channel): """Return the token and sig for the given channel :param channel: the channel or channel name to get the access token for :type channel: :class:`channel` | :class:`str` :returns: The token and sig for the given channel :rtype: (:class:`unicode`, :class:`unicode`) :raises: None """ if isinstance(channel, models.Channel): channel = channel.name r = self.oldapi_request( 'GET', 'channels/%s/access_token' % channel).json() return r['token'], r['sig']
python
def get_channel_access_token(self, channel): """Return the token and sig for the given channel :param channel: the channel or channel name to get the access token for :type channel: :class:`channel` | :class:`str` :returns: The token and sig for the given channel :rtype: (:class:`unicode`, :class:`unicode`) :raises: None """ if isinstance(channel, models.Channel): channel = channel.name r = self.oldapi_request( 'GET', 'channels/%s/access_token' % channel).json() return r['token'], r['sig']
[ "def", "get_channel_access_token", "(", "self", ",", "channel", ")", ":", "if", "isinstance", "(", "channel", ",", "models", ".", "Channel", ")", ":", "channel", "=", "channel", ".", "name", "r", "=", "self", ".", "oldapi_request", "(", "'GET'", ",", "'c...
Return the token and sig for the given channel :param channel: the channel or channel name to get the access token for :type channel: :class:`channel` | :class:`str` :returns: The token and sig for the given channel :rtype: (:class:`unicode`, :class:`unicode`) :raises: None
[ "Return", "the", "token", "and", "sig", "for", "the", "given", "channel" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L543-L556
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.get_chat_server
def get_chat_server(self, channel): """Get an appropriate chat server for the given channel Usually the server is irc.twitch.tv. But because of the delicate twitch chat, they use a lot of servers. Big events are on special event servers. This method tries to find a good one. :param channel: the channel with the chat :type channel: :class:`models.Channel` :returns: the server address and port :rtype: (:class:`str`, :class:`int`) :raises: None """ r = self.oldapi_request( 'GET', 'channels/%s/chat_properties' % channel.name) json = r.json() servers = json['chat_servers'] try: r = self.get(TWITCH_STATUSURL) except requests.HTTPError: log.debug('Error getting chat server status. Using random one.') address = servers[0] else: stats = [client.ChatServerStatus(**d) for d in r.json()] address = self._find_best_chat_server(servers, stats) server, port = address.split(':') return server, int(port)
python
def get_chat_server(self, channel): """Get an appropriate chat server for the given channel Usually the server is irc.twitch.tv. But because of the delicate twitch chat, they use a lot of servers. Big events are on special event servers. This method tries to find a good one. :param channel: the channel with the chat :type channel: :class:`models.Channel` :returns: the server address and port :rtype: (:class:`str`, :class:`int`) :raises: None """ r = self.oldapi_request( 'GET', 'channels/%s/chat_properties' % channel.name) json = r.json() servers = json['chat_servers'] try: r = self.get(TWITCH_STATUSURL) except requests.HTTPError: log.debug('Error getting chat server status. Using random one.') address = servers[0] else: stats = [client.ChatServerStatus(**d) for d in r.json()] address = self._find_best_chat_server(servers, stats) server, port = address.split(':') return server, int(port)
[ "def", "get_chat_server", "(", "self", ",", "channel", ")", ":", "r", "=", "self", ".", "oldapi_request", "(", "'GET'", ",", "'channels/%s/chat_properties'", "%", "channel", ".", "name", ")", "json", "=", "r", ".", "json", "(", ")", "servers", "=", "json...
Get an appropriate chat server for the given channel Usually the server is irc.twitch.tv. But because of the delicate twitch chat, they use a lot of servers. Big events are on special event servers. This method tries to find a good one. :param channel: the channel with the chat :type channel: :class:`models.Channel` :returns: the server address and port :rtype: (:class:`str`, :class:`int`) :raises: None
[ "Get", "an", "appropriate", "chat", "server", "for", "the", "given", "channel" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L558-L586
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession._find_best_chat_server
def _find_best_chat_server(servers, stats): """Find the best from servers by comparing with the stats :param servers: a list if server adresses, e.g. ['0.0.0.0:80'] :type servers: :class:`list` of :class:`str` :param stats: list of server statuses :type stats: :class:`list` of :class:`chat.ChatServerStatus` :returns: the best server adress :rtype: :class:`str` :raises: None """ best = servers[0] # In case we sind no match with any status stats.sort() # gets sorted for performance for stat in stats: for server in servers: if server == stat: # found a chatserver that has the same address # than one of the chatserverstats. # since the stats are sorted for performance # the first hit is the best, thus break best = server break if best: # already found one, so no need to check the other # statuses, which are worse break return best
python
def _find_best_chat_server(servers, stats): """Find the best from servers by comparing with the stats :param servers: a list if server adresses, e.g. ['0.0.0.0:80'] :type servers: :class:`list` of :class:`str` :param stats: list of server statuses :type stats: :class:`list` of :class:`chat.ChatServerStatus` :returns: the best server adress :rtype: :class:`str` :raises: None """ best = servers[0] # In case we sind no match with any status stats.sort() # gets sorted for performance for stat in stats: for server in servers: if server == stat: # found a chatserver that has the same address # than one of the chatserverstats. # since the stats are sorted for performance # the first hit is the best, thus break best = server break if best: # already found one, so no need to check the other # statuses, which are worse break return best
[ "def", "_find_best_chat_server", "(", "servers", ",", "stats", ")", ":", "best", "=", "servers", "[", "0", "]", "# In case we sind no match with any status", "stats", ".", "sort", "(", ")", "# gets sorted for performance", "for", "stat", "in", "stats", ":", "for",...
Find the best from servers by comparing with the stats :param servers: a list if server adresses, e.g. ['0.0.0.0:80'] :type servers: :class:`list` of :class:`str` :param stats: list of server statuses :type stats: :class:`list` of :class:`chat.ChatServerStatus` :returns: the best server adress :rtype: :class:`str` :raises: None
[ "Find", "the", "best", "from", "servers", "by", "comparing", "with", "the", "stats" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L589-L615
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
TwitchSession.get_emote_picture
def get_emote_picture(self, emote, size=1.0): """Return the picture for the given emote :param emote: the emote object :type emote: :class:`pytwitcherapi.chat.message.Emote` :param size: the size of the picture. Choices are: 1.0, 2.0, 3.0 :type size: :class:`float` :returns: A string resembling the picturedata of the emote :rtype: :class:`str` :raises: None """ r = self.get('http://static-cdn.jtvnw.net/emoticons/v1/%s/%s' % (emote.emoteid, size)) return r.content
python
def get_emote_picture(self, emote, size=1.0): """Return the picture for the given emote :param emote: the emote object :type emote: :class:`pytwitcherapi.chat.message.Emote` :param size: the size of the picture. Choices are: 1.0, 2.0, 3.0 :type size: :class:`float` :returns: A string resembling the picturedata of the emote :rtype: :class:`str` :raises: None """ r = self.get('http://static-cdn.jtvnw.net/emoticons/v1/%s/%s' % (emote.emoteid, size)) return r.content
[ "def", "get_emote_picture", "(", "self", ",", "emote", ",", "size", "=", "1.0", ")", ":", "r", "=", "self", ".", "get", "(", "'http://static-cdn.jtvnw.net/emoticons/v1/%s/%s'", "%", "(", "emote", ".", "emoteid", ",", "size", ")", ")", "return", "r", ".", ...
Return the picture for the given emote :param emote: the emote object :type emote: :class:`pytwitcherapi.chat.message.Emote` :param size: the size of the picture. Choices are: 1.0, 2.0, 3.0 :type size: :class:`float` :returns: A string resembling the picturedata of the emote :rtype: :class:`str` :raises: None
[ "Return", "the", "picture", "for", "the", "given", "emote" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L617-L631