repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
mishbahr/djangocms-forms
djangocms_forms/forms.py
FormDefinitionAdminForm.clean_form_template
def clean_form_template(self): """ Check if template exists """ form_template = self.cleaned_data.get('form_template', '') if form_template: try: get_template(form_template) except TemplateDoesNotExist: msg = _('Selected Form Template does not exist.') raise forms.ValidationError(msg) return form_template
python
def clean_form_template(self): """ Check if template exists """ form_template = self.cleaned_data.get('form_template', '') if form_template: try: get_template(form_template) except TemplateDoesNotExist: msg = _('Selected Form Template does not exist.') raise forms.ValidationError(msg) return form_template
[ "def", "clean_form_template", "(", "self", ")", ":", "form_template", "=", "self", ".", "cleaned_data", ".", "get", "(", "'form_template'", ",", "''", ")", "if", "form_template", ":", "try", ":", "get_template", "(", "form_template", ")", "except", "TemplateDoesNotExist", ":", "msg", "=", "_", "(", "'Selected Form Template does not exist.'", ")", "raise", "forms", ".", "ValidationError", "(", "msg", ")", "return", "form_template" ]
Check if template exists
[ "Check", "if", "template", "exists" ]
9d7a4ef9769fd5e1526921c084d6da7b8070a2c1
https://github.com/mishbahr/djangocms-forms/blob/9d7a4ef9769fd5e1526921c084d6da7b8070a2c1/djangocms_forms/forms.py#L75-L84
train
238,500
nikdon/pyEntropy
pyentrp/entropy.py
_embed
def _embed(x, order=3, delay=1): """Time-delay embedding. Parameters ---------- x : 1d-array, shape (n_times) Time series order : int Embedding dimension (order) delay : int Delay. Returns ------- embedded : ndarray, shape (n_times - (order - 1) * delay, order) Embedded time-series. """ N = len(x) Y = np.empty((order, N - (order - 1) * delay)) for i in range(order): Y[i] = x[i * delay:i * delay + Y.shape[1]] return Y.T
python
def _embed(x, order=3, delay=1): """Time-delay embedding. Parameters ---------- x : 1d-array, shape (n_times) Time series order : int Embedding dimension (order) delay : int Delay. Returns ------- embedded : ndarray, shape (n_times - (order - 1) * delay, order) Embedded time-series. """ N = len(x) Y = np.empty((order, N - (order - 1) * delay)) for i in range(order): Y[i] = x[i * delay:i * delay + Y.shape[1]] return Y.T
[ "def", "_embed", "(", "x", ",", "order", "=", "3", ",", "delay", "=", "1", ")", ":", "N", "=", "len", "(", "x", ")", "Y", "=", "np", ".", "empty", "(", "(", "order", ",", "N", "-", "(", "order", "-", "1", ")", "*", "delay", ")", ")", "for", "i", "in", "range", "(", "order", ")", ":", "Y", "[", "i", "]", "=", "x", "[", "i", "*", "delay", ":", "i", "*", "delay", "+", "Y", ".", "shape", "[", "1", "]", "]", "return", "Y", ".", "T" ]
Time-delay embedding. Parameters ---------- x : 1d-array, shape (n_times) Time series order : int Embedding dimension (order) delay : int Delay. Returns ------- embedded : ndarray, shape (n_times - (order - 1) * delay, order) Embedded time-series.
[ "Time", "-", "delay", "embedding", "." ]
ae2bf71c2e5b6edb2e468ff52183b30acf7073e6
https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L10-L31
train
238,501
nikdon/pyEntropy
pyentrp/entropy.py
util_pattern_space
def util_pattern_space(time_series, lag, dim): """Create a set of sequences with given lag and dimension Args: time_series: Vector or string of the sample data lag: Lag between beginning of sequences dim: Dimension (number of patterns) Returns: 2D array of vectors """ n = len(time_series) if lag * dim > n: raise Exception('Result matrix exceeded size limit, try to change lag or dim.') elif lag < 1: raise Exception('Lag should be greater or equal to 1.') pattern_space = np.empty((n - lag * (dim - 1), dim)) for i in range(n - lag * (dim - 1)): for j in range(dim): pattern_space[i][j] = time_series[i + j * lag] return pattern_space
python
def util_pattern_space(time_series, lag, dim): """Create a set of sequences with given lag and dimension Args: time_series: Vector or string of the sample data lag: Lag between beginning of sequences dim: Dimension (number of patterns) Returns: 2D array of vectors """ n = len(time_series) if lag * dim > n: raise Exception('Result matrix exceeded size limit, try to change lag or dim.') elif lag < 1: raise Exception('Lag should be greater or equal to 1.') pattern_space = np.empty((n - lag * (dim - 1), dim)) for i in range(n - lag * (dim - 1)): for j in range(dim): pattern_space[i][j] = time_series[i + j * lag] return pattern_space
[ "def", "util_pattern_space", "(", "time_series", ",", "lag", ",", "dim", ")", ":", "n", "=", "len", "(", "time_series", ")", "if", "lag", "*", "dim", ">", "n", ":", "raise", "Exception", "(", "'Result matrix exceeded size limit, try to change lag or dim.'", ")", "elif", "lag", "<", "1", ":", "raise", "Exception", "(", "'Lag should be greater or equal to 1.'", ")", "pattern_space", "=", "np", ".", "empty", "(", "(", "n", "-", "lag", "*", "(", "dim", "-", "1", ")", ",", "dim", ")", ")", "for", "i", "in", "range", "(", "n", "-", "lag", "*", "(", "dim", "-", "1", ")", ")", ":", "for", "j", "in", "range", "(", "dim", ")", ":", "pattern_space", "[", "i", "]", "[", "j", "]", "=", "time_series", "[", "i", "+", "j", "*", "lag", "]", "return", "pattern_space" ]
Create a set of sequences with given lag and dimension Args: time_series: Vector or string of the sample data lag: Lag between beginning of sequences dim: Dimension (number of patterns) Returns: 2D array of vectors
[ "Create", "a", "set", "of", "sequences", "with", "given", "lag", "and", "dimension" ]
ae2bf71c2e5b6edb2e468ff52183b30acf7073e6
https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L34-L57
train
238,502
nikdon/pyEntropy
pyentrp/entropy.py
util_granulate_time_series
def util_granulate_time_series(time_series, scale): """Extract coarse-grained time series Args: time_series: Time series scale: Scale factor Returns: Vector of coarse-grained time series with given scale factor """ n = len(time_series) b = int(np.fix(n / scale)) temp = np.reshape(time_series[0:b*scale], (b, scale)) cts = np.mean(temp, axis = 1) return cts
python
def util_granulate_time_series(time_series, scale): """Extract coarse-grained time series Args: time_series: Time series scale: Scale factor Returns: Vector of coarse-grained time series with given scale factor """ n = len(time_series) b = int(np.fix(n / scale)) temp = np.reshape(time_series[0:b*scale], (b, scale)) cts = np.mean(temp, axis = 1) return cts
[ "def", "util_granulate_time_series", "(", "time_series", ",", "scale", ")", ":", "n", "=", "len", "(", "time_series", ")", "b", "=", "int", "(", "np", ".", "fix", "(", "n", "/", "scale", ")", ")", "temp", "=", "np", ".", "reshape", "(", "time_series", "[", "0", ":", "b", "*", "scale", "]", ",", "(", "b", ",", "scale", ")", ")", "cts", "=", "np", ".", "mean", "(", "temp", ",", "axis", "=", "1", ")", "return", "cts" ]
Extract coarse-grained time series Args: time_series: Time series scale: Scale factor Returns: Vector of coarse-grained time series with given scale factor
[ "Extract", "coarse", "-", "grained", "time", "series" ]
ae2bf71c2e5b6edb2e468ff52183b30acf7073e6
https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L64-L78
train
238,503
nikdon/pyEntropy
pyentrp/entropy.py
shannon_entropy
def shannon_entropy(time_series): """Return the Shannon Entropy of the sample data. Args: time_series: Vector or string of the sample data Returns: The Shannon Entropy as float value """ # Check if string if not isinstance(time_series, str): time_series = list(time_series) # Create a frequency data data_set = list(set(time_series)) freq_list = [] for entry in data_set: counter = 0. for i in time_series: if i == entry: counter += 1 freq_list.append(float(counter) / len(time_series)) # Shannon entropy ent = 0.0 for freq in freq_list: ent += freq * np.log2(freq) ent = -ent return ent
python
def shannon_entropy(time_series): """Return the Shannon Entropy of the sample data. Args: time_series: Vector or string of the sample data Returns: The Shannon Entropy as float value """ # Check if string if not isinstance(time_series, str): time_series = list(time_series) # Create a frequency data data_set = list(set(time_series)) freq_list = [] for entry in data_set: counter = 0. for i in time_series: if i == entry: counter += 1 freq_list.append(float(counter) / len(time_series)) # Shannon entropy ent = 0.0 for freq in freq_list: ent += freq * np.log2(freq) ent = -ent return ent
[ "def", "shannon_entropy", "(", "time_series", ")", ":", "# Check if string", "if", "not", "isinstance", "(", "time_series", ",", "str", ")", ":", "time_series", "=", "list", "(", "time_series", ")", "# Create a frequency data", "data_set", "=", "list", "(", "set", "(", "time_series", ")", ")", "freq_list", "=", "[", "]", "for", "entry", "in", "data_set", ":", "counter", "=", "0.", "for", "i", "in", "time_series", ":", "if", "i", "==", "entry", ":", "counter", "+=", "1", "freq_list", ".", "append", "(", "float", "(", "counter", ")", "/", "len", "(", "time_series", ")", ")", "# Shannon entropy", "ent", "=", "0.0", "for", "freq", "in", "freq_list", ":", "ent", "+=", "freq", "*", "np", ".", "log2", "(", "freq", ")", "ent", "=", "-", "ent", "return", "ent" ]
Return the Shannon Entropy of the sample data. Args: time_series: Vector or string of the sample data Returns: The Shannon Entropy as float value
[ "Return", "the", "Shannon", "Entropy", "of", "the", "sample", "data", "." ]
ae2bf71c2e5b6edb2e468ff52183b30acf7073e6
https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L81-L110
train
238,504
nikdon/pyEntropy
pyentrp/entropy.py
sample_entropy
def sample_entropy(time_series, sample_length, tolerance = None): """Calculates the sample entropy of degree m of a time_series. This method uses chebychev norm. It is quite fast for random data, but can be slower is there is structure in the input time series. Args: time_series: numpy array of time series sample_length: length of longest template vector tolerance: tolerance (defaults to 0.1 * std(time_series))) Returns: Array of sample entropies: SE[k] is ratio "#templates of length k+1" / "#templates of length k" where #templates of length 0" = n*(n - 1) / 2, by definition Note: The parameter 'sample_length' is equal to m + 1 in Ref[1]. References: [1] http://en.wikipedia.org/wiki/Sample_Entropy [2] http://physionet.incor.usp.br/physiotools/sampen/ [3] Madalena Costa, Ary Goldberger, CK Peng. Multiscale entropy analysis of biological signals """ #The code below follows the sample length convention of Ref [1] so: M = sample_length - 1; time_series = np.array(time_series) if tolerance is None: tolerance = 0.1*np.std(time_series) n = len(time_series) #Ntemp is a vector that holds the number of matches. N[k] holds matches templates of length k Ntemp = np.zeros(M + 2) #Templates of length 0 matches by definition: Ntemp[0] = n*(n - 1) / 2 for i in range(n - M - 1): template = time_series[i:(i+M+1)];#We have 'M+1' elements in the template rem_time_series = time_series[i+1:] searchlist = np.nonzero(np.abs(rem_time_series - template[0]) < tolerance)[0] go = len(searchlist) > 0; length = 1; Ntemp[length] += len(searchlist) while go: length += 1 nextindxlist = searchlist + 1; nextindxlist = nextindxlist[nextindxlist < n - 1 - i]#Remove candidates too close to the end nextcandidates = rem_time_series[nextindxlist] hitlist = np.abs(nextcandidates - template[length-1]) < tolerance searchlist = nextindxlist[hitlist] Ntemp[length] += np.sum(hitlist) go = any(hitlist) and length < M + 1 sampen = - np.log(Ntemp[1:] / Ntemp[:-1]) return sampen
python
def sample_entropy(time_series, sample_length, tolerance = None): """Calculates the sample entropy of degree m of a time_series. This method uses chebychev norm. It is quite fast for random data, but can be slower is there is structure in the input time series. Args: time_series: numpy array of time series sample_length: length of longest template vector tolerance: tolerance (defaults to 0.1 * std(time_series))) Returns: Array of sample entropies: SE[k] is ratio "#templates of length k+1" / "#templates of length k" where #templates of length 0" = n*(n - 1) / 2, by definition Note: The parameter 'sample_length' is equal to m + 1 in Ref[1]. References: [1] http://en.wikipedia.org/wiki/Sample_Entropy [2] http://physionet.incor.usp.br/physiotools/sampen/ [3] Madalena Costa, Ary Goldberger, CK Peng. Multiscale entropy analysis of biological signals """ #The code below follows the sample length convention of Ref [1] so: M = sample_length - 1; time_series = np.array(time_series) if tolerance is None: tolerance = 0.1*np.std(time_series) n = len(time_series) #Ntemp is a vector that holds the number of matches. N[k] holds matches templates of length k Ntemp = np.zeros(M + 2) #Templates of length 0 matches by definition: Ntemp[0] = n*(n - 1) / 2 for i in range(n - M - 1): template = time_series[i:(i+M+1)];#We have 'M+1' elements in the template rem_time_series = time_series[i+1:] searchlist = np.nonzero(np.abs(rem_time_series - template[0]) < tolerance)[0] go = len(searchlist) > 0; length = 1; Ntemp[length] += len(searchlist) while go: length += 1 nextindxlist = searchlist + 1; nextindxlist = nextindxlist[nextindxlist < n - 1 - i]#Remove candidates too close to the end nextcandidates = rem_time_series[nextindxlist] hitlist = np.abs(nextcandidates - template[length-1]) < tolerance searchlist = nextindxlist[hitlist] Ntemp[length] += np.sum(hitlist) go = any(hitlist) and length < M + 1 sampen = - np.log(Ntemp[1:] / Ntemp[:-1]) return sampen
[ "def", "sample_entropy", "(", "time_series", ",", "sample_length", ",", "tolerance", "=", "None", ")", ":", "#The code below follows the sample length convention of Ref [1] so:", "M", "=", "sample_length", "-", "1", "time_series", "=", "np", ".", "array", "(", "time_series", ")", "if", "tolerance", "is", "None", ":", "tolerance", "=", "0.1", "*", "np", ".", "std", "(", "time_series", ")", "n", "=", "len", "(", "time_series", ")", "#Ntemp is a vector that holds the number of matches. N[k] holds matches templates of length k", "Ntemp", "=", "np", ".", "zeros", "(", "M", "+", "2", ")", "#Templates of length 0 matches by definition:", "Ntemp", "[", "0", "]", "=", "n", "*", "(", "n", "-", "1", ")", "/", "2", "for", "i", "in", "range", "(", "n", "-", "M", "-", "1", ")", ":", "template", "=", "time_series", "[", "i", ":", "(", "i", "+", "M", "+", "1", ")", "]", "#We have 'M+1' elements in the template", "rem_time_series", "=", "time_series", "[", "i", "+", "1", ":", "]", "searchlist", "=", "np", ".", "nonzero", "(", "np", ".", "abs", "(", "rem_time_series", "-", "template", "[", "0", "]", ")", "<", "tolerance", ")", "[", "0", "]", "go", "=", "len", "(", "searchlist", ")", ">", "0", "length", "=", "1", "Ntemp", "[", "length", "]", "+=", "len", "(", "searchlist", ")", "while", "go", ":", "length", "+=", "1", "nextindxlist", "=", "searchlist", "+", "1", "nextindxlist", "=", "nextindxlist", "[", "nextindxlist", "<", "n", "-", "1", "-", "i", "]", "#Remove candidates too close to the end", "nextcandidates", "=", "rem_time_series", "[", "nextindxlist", "]", "hitlist", "=", "np", ".", "abs", "(", "nextcandidates", "-", "template", "[", "length", "-", "1", "]", ")", "<", "tolerance", "searchlist", "=", "nextindxlist", "[", "hitlist", "]", "Ntemp", "[", "length", "]", "+=", "np", ".", "sum", "(", "hitlist", ")", "go", "=", "any", "(", "hitlist", ")", "and", "length", "<", "M", "+", "1", "sampen", "=", "-", "np", ".", "log", "(", "Ntemp", "[", "1", ":", "]", "/", "Ntemp", "[", ":", "-", "1", "]", ")", "return", "sampen" ]
Calculates the sample entropy of degree m of a time_series. This method uses chebychev norm. It is quite fast for random data, but can be slower is there is structure in the input time series. Args: time_series: numpy array of time series sample_length: length of longest template vector tolerance: tolerance (defaults to 0.1 * std(time_series))) Returns: Array of sample entropies: SE[k] is ratio "#templates of length k+1" / "#templates of length k" where #templates of length 0" = n*(n - 1) / 2, by definition Note: The parameter 'sample_length' is equal to m + 1 in Ref[1]. References: [1] http://en.wikipedia.org/wiki/Sample_Entropy [2] http://physionet.incor.usp.br/physiotools/sampen/ [3] Madalena Costa, Ary Goldberger, CK Peng. Multiscale entropy analysis of biological signals
[ "Calculates", "the", "sample", "entropy", "of", "degree", "m", "of", "a", "time_series", "." ]
ae2bf71c2e5b6edb2e468ff52183b30acf7073e6
https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L113-L179
train
238,505
nikdon/pyEntropy
pyentrp/entropy.py
multiscale_entropy
def multiscale_entropy(time_series, sample_length, tolerance = None, maxscale = None): """Calculate the Multiscale Entropy of the given time series considering different time-scales of the time series. Args: time_series: Time series for analysis sample_length: Bandwidth or group of points tolerance: Tolerance (default = 0.1*std(time_series)) Returns: Vector containing Multiscale Entropy Reference: [1] http://en.pudn.com/downloads149/sourcecode/math/detail646216_en.html """ if tolerance is None: #we need to fix the tolerance at this level. If it remains 'None' it will be changed in call to sample_entropy() tolerance = 0.1*np.std(time_series) if maxscale is None: maxscale = len(time_series) mse = np.zeros(maxscale) for i in range(maxscale): temp = util_granulate_time_series(time_series, i+1) mse[i] = sample_entropy(temp, sample_length, tolerance)[-1] return mse
python
def multiscale_entropy(time_series, sample_length, tolerance = None, maxscale = None): """Calculate the Multiscale Entropy of the given time series considering different time-scales of the time series. Args: time_series: Time series for analysis sample_length: Bandwidth or group of points tolerance: Tolerance (default = 0.1*std(time_series)) Returns: Vector containing Multiscale Entropy Reference: [1] http://en.pudn.com/downloads149/sourcecode/math/detail646216_en.html """ if tolerance is None: #we need to fix the tolerance at this level. If it remains 'None' it will be changed in call to sample_entropy() tolerance = 0.1*np.std(time_series) if maxscale is None: maxscale = len(time_series) mse = np.zeros(maxscale) for i in range(maxscale): temp = util_granulate_time_series(time_series, i+1) mse[i] = sample_entropy(temp, sample_length, tolerance)[-1] return mse
[ "def", "multiscale_entropy", "(", "time_series", ",", "sample_length", ",", "tolerance", "=", "None", ",", "maxscale", "=", "None", ")", ":", "if", "tolerance", "is", "None", ":", "#we need to fix the tolerance at this level. If it remains 'None' it will be changed in call to sample_entropy()", "tolerance", "=", "0.1", "*", "np", ".", "std", "(", "time_series", ")", "if", "maxscale", "is", "None", ":", "maxscale", "=", "len", "(", "time_series", ")", "mse", "=", "np", ".", "zeros", "(", "maxscale", ")", "for", "i", "in", "range", "(", "maxscale", ")", ":", "temp", "=", "util_granulate_time_series", "(", "time_series", ",", "i", "+", "1", ")", "mse", "[", "i", "]", "=", "sample_entropy", "(", "temp", ",", "sample_length", ",", "tolerance", ")", "[", "-", "1", "]", "return", "mse" ]
Calculate the Multiscale Entropy of the given time series considering different time-scales of the time series. Args: time_series: Time series for analysis sample_length: Bandwidth or group of points tolerance: Tolerance (default = 0.1*std(time_series)) Returns: Vector containing Multiscale Entropy Reference: [1] http://en.pudn.com/downloads149/sourcecode/math/detail646216_en.html
[ "Calculate", "the", "Multiscale", "Entropy", "of", "the", "given", "time", "series", "considering", "different", "time", "-", "scales", "of", "the", "time", "series", "." ]
ae2bf71c2e5b6edb2e468ff52183b30acf7073e6
https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L182-L209
train
238,506
nikdon/pyEntropy
pyentrp/entropy.py
permutation_entropy
def permutation_entropy(time_series, order=3, delay=1, normalize=False): """Permutation Entropy. Parameters ---------- time_series : list or np.array Time series order : int Order of permutation entropy delay : int Time delay normalize : bool If True, divide by log2(factorial(m)) to normalize the entropy between 0 and 1. Otherwise, return the permutation entropy in bit. Returns ------- pe : float Permutation Entropy References ---------- .. [1] Massimiliano Zanin et al. Permutation Entropy and Its Main Biomedical and Econophysics Applications: A Review. http://www.mdpi.com/1099-4300/14/8/1553/pdf .. [2] Christoph Bandt and Bernd Pompe. Permutation entropy — a natural complexity measure for time series. http://stubber.math-inf.uni-greifswald.de/pub/full/prep/2001/11.pdf Notes ----- Last updated (Oct 2018) by Raphael Vallat (raphaelvallat9@gmail.com): - Major speed improvements - Use of base 2 instead of base e - Added normalization Examples -------- 1. Permutation entropy with order 2 >>> x = [4, 7, 9, 10, 6, 11, 3] >>> # Return a value between 0 and log2(factorial(order)) >>> print(permutation_entropy(x, order=2)) 0.918 2. Normalized permutation entropy with order 3 >>> x = [4, 7, 9, 10, 6, 11, 3] >>> # Return a value comprised between 0 and 1. >>> print(permutation_entropy(x, order=3, normalize=True)) 0.589 """ x = np.array(time_series) hashmult = np.power(order, np.arange(order)) # Embed x and sort the order of permutations sorted_idx = _embed(x, order=order, delay=delay).argsort(kind='quicksort') # Associate unique integer to each permutations hashval = (np.multiply(sorted_idx, hashmult)).sum(1) # Return the counts _, c = np.unique(hashval, return_counts=True) # Use np.true_divide for Python 2 compatibility p = np.true_divide(c, c.sum()) pe = -np.multiply(p, np.log2(p)).sum() if normalize: pe /= np.log2(factorial(order)) return pe
python
def permutation_entropy(time_series, order=3, delay=1, normalize=False): """Permutation Entropy. Parameters ---------- time_series : list or np.array Time series order : int Order of permutation entropy delay : int Time delay normalize : bool If True, divide by log2(factorial(m)) to normalize the entropy between 0 and 1. Otherwise, return the permutation entropy in bit. Returns ------- pe : float Permutation Entropy References ---------- .. [1] Massimiliano Zanin et al. Permutation Entropy and Its Main Biomedical and Econophysics Applications: A Review. http://www.mdpi.com/1099-4300/14/8/1553/pdf .. [2] Christoph Bandt and Bernd Pompe. Permutation entropy — a natural complexity measure for time series. http://stubber.math-inf.uni-greifswald.de/pub/full/prep/2001/11.pdf Notes ----- Last updated (Oct 2018) by Raphael Vallat (raphaelvallat9@gmail.com): - Major speed improvements - Use of base 2 instead of base e - Added normalization Examples -------- 1. Permutation entropy with order 2 >>> x = [4, 7, 9, 10, 6, 11, 3] >>> # Return a value between 0 and log2(factorial(order)) >>> print(permutation_entropy(x, order=2)) 0.918 2. Normalized permutation entropy with order 3 >>> x = [4, 7, 9, 10, 6, 11, 3] >>> # Return a value comprised between 0 and 1. >>> print(permutation_entropy(x, order=3, normalize=True)) 0.589 """ x = np.array(time_series) hashmult = np.power(order, np.arange(order)) # Embed x and sort the order of permutations sorted_idx = _embed(x, order=order, delay=delay).argsort(kind='quicksort') # Associate unique integer to each permutations hashval = (np.multiply(sorted_idx, hashmult)).sum(1) # Return the counts _, c = np.unique(hashval, return_counts=True) # Use np.true_divide for Python 2 compatibility p = np.true_divide(c, c.sum()) pe = -np.multiply(p, np.log2(p)).sum() if normalize: pe /= np.log2(factorial(order)) return pe
[ "def", "permutation_entropy", "(", "time_series", ",", "order", "=", "3", ",", "delay", "=", "1", ",", "normalize", "=", "False", ")", ":", "x", "=", "np", ".", "array", "(", "time_series", ")", "hashmult", "=", "np", ".", "power", "(", "order", ",", "np", ".", "arange", "(", "order", ")", ")", "# Embed x and sort the order of permutations", "sorted_idx", "=", "_embed", "(", "x", ",", "order", "=", "order", ",", "delay", "=", "delay", ")", ".", "argsort", "(", "kind", "=", "'quicksort'", ")", "# Associate unique integer to each permutations", "hashval", "=", "(", "np", ".", "multiply", "(", "sorted_idx", ",", "hashmult", ")", ")", ".", "sum", "(", "1", ")", "# Return the counts", "_", ",", "c", "=", "np", ".", "unique", "(", "hashval", ",", "return_counts", "=", "True", ")", "# Use np.true_divide for Python 2 compatibility", "p", "=", "np", ".", "true_divide", "(", "c", ",", "c", ".", "sum", "(", ")", ")", "pe", "=", "-", "np", ".", "multiply", "(", "p", ",", "np", ".", "log2", "(", "p", ")", ")", ".", "sum", "(", ")", "if", "normalize", ":", "pe", "/=", "np", ".", "log2", "(", "factorial", "(", "order", ")", ")", "return", "pe" ]
Permutation Entropy. Parameters ---------- time_series : list or np.array Time series order : int Order of permutation entropy delay : int Time delay normalize : bool If True, divide by log2(factorial(m)) to normalize the entropy between 0 and 1. Otherwise, return the permutation entropy in bit. Returns ------- pe : float Permutation Entropy References ---------- .. [1] Massimiliano Zanin et al. Permutation Entropy and Its Main Biomedical and Econophysics Applications: A Review. http://www.mdpi.com/1099-4300/14/8/1553/pdf .. [2] Christoph Bandt and Bernd Pompe. Permutation entropy — a natural complexity measure for time series. http://stubber.math-inf.uni-greifswald.de/pub/full/prep/2001/11.pdf Notes ----- Last updated (Oct 2018) by Raphael Vallat (raphaelvallat9@gmail.com): - Major speed improvements - Use of base 2 instead of base e - Added normalization Examples -------- 1. Permutation entropy with order 2 >>> x = [4, 7, 9, 10, 6, 11, 3] >>> # Return a value between 0 and log2(factorial(order)) >>> print(permutation_entropy(x, order=2)) 0.918 2. Normalized permutation entropy with order 3 >>> x = [4, 7, 9, 10, 6, 11, 3] >>> # Return a value comprised between 0 and 1. >>> print(permutation_entropy(x, order=3, normalize=True)) 0.589
[ "Permutation", "Entropy", "." ]
ae2bf71c2e5b6edb2e468ff52183b30acf7073e6
https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L212-L278
train
238,507
nikdon/pyEntropy
pyentrp/entropy.py
multiscale_permutation_entropy
def multiscale_permutation_entropy(time_series, m, delay, scale): """Calculate the Multiscale Permutation Entropy Args: time_series: Time series for analysis m: Order of permutation entropy delay: Time delay scale: Scale factor Returns: Vector containing Multiscale Permutation Entropy Reference: [1] Francesco Carlo Morabito et al. Multivariate Multi-Scale Permutation Entropy for Complexity Analysis of Alzheimer’s Disease EEG. www.mdpi.com/1099-4300/14/7/1186 [2] http://www.mathworks.com/matlabcentral/fileexchange/37288-multiscale-permutation-entropy-mpe/content/MPerm.m """ mspe = [] for i in range(scale): coarse_time_series = util_granulate_time_series(time_series, i + 1) pe = permutation_entropy(coarse_time_series, order=m, delay=delay) mspe.append(pe) return mspe
python
def multiscale_permutation_entropy(time_series, m, delay, scale): """Calculate the Multiscale Permutation Entropy Args: time_series: Time series for analysis m: Order of permutation entropy delay: Time delay scale: Scale factor Returns: Vector containing Multiscale Permutation Entropy Reference: [1] Francesco Carlo Morabito et al. Multivariate Multi-Scale Permutation Entropy for Complexity Analysis of Alzheimer’s Disease EEG. www.mdpi.com/1099-4300/14/7/1186 [2] http://www.mathworks.com/matlabcentral/fileexchange/37288-multiscale-permutation-entropy-mpe/content/MPerm.m """ mspe = [] for i in range(scale): coarse_time_series = util_granulate_time_series(time_series, i + 1) pe = permutation_entropy(coarse_time_series, order=m, delay=delay) mspe.append(pe) return mspe
[ "def", "multiscale_permutation_entropy", "(", "time_series", ",", "m", ",", "delay", ",", "scale", ")", ":", "mspe", "=", "[", "]", "for", "i", "in", "range", "(", "scale", ")", ":", "coarse_time_series", "=", "util_granulate_time_series", "(", "time_series", ",", "i", "+", "1", ")", "pe", "=", "permutation_entropy", "(", "coarse_time_series", ",", "order", "=", "m", ",", "delay", "=", "delay", ")", "mspe", ".", "append", "(", "pe", ")", "return", "mspe" ]
Calculate the Multiscale Permutation Entropy Args: time_series: Time series for analysis m: Order of permutation entropy delay: Time delay scale: Scale factor Returns: Vector containing Multiscale Permutation Entropy Reference: [1] Francesco Carlo Morabito et al. Multivariate Multi-Scale Permutation Entropy for Complexity Analysis of Alzheimer’s Disease EEG. www.mdpi.com/1099-4300/14/7/1186 [2] http://www.mathworks.com/matlabcentral/fileexchange/37288-multiscale-permutation-entropy-mpe/content/MPerm.m
[ "Calculate", "the", "Multiscale", "Permutation", "Entropy" ]
ae2bf71c2e5b6edb2e468ff52183b30acf7073e6
https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L281-L303
train
238,508
nikdon/pyEntropy
pyentrp/entropy.py
composite_multiscale_entropy
def composite_multiscale_entropy(time_series, sample_length, scale, tolerance=None): """Calculate the Composite Multiscale Entropy of the given time series. Args: time_series: Time series for analysis sample_length: Number of sequential points of the time series scale: Scale factor tolerance: Tolerance (default = 0.1...0.2 * std(time_series)) Returns: Vector containing Composite Multiscale Entropy Reference: [1] Wu, Shuen-De, et al. "Time series analysis using composite multiscale entropy." Entropy 15.3 (2013): 1069-1084. """ cmse = np.zeros((1, scale)) for i in range(scale): for j in range(i): tmp = util_granulate_time_series(time_series[j:], i + 1) cmse[i] += sample_entropy(tmp, sample_length, tolerance) / (i + 1) return cmse
python
def composite_multiscale_entropy(time_series, sample_length, scale, tolerance=None): """Calculate the Composite Multiscale Entropy of the given time series. Args: time_series: Time series for analysis sample_length: Number of sequential points of the time series scale: Scale factor tolerance: Tolerance (default = 0.1...0.2 * std(time_series)) Returns: Vector containing Composite Multiscale Entropy Reference: [1] Wu, Shuen-De, et al. "Time series analysis using composite multiscale entropy." Entropy 15.3 (2013): 1069-1084. """ cmse = np.zeros((1, scale)) for i in range(scale): for j in range(i): tmp = util_granulate_time_series(time_series[j:], i + 1) cmse[i] += sample_entropy(tmp, sample_length, tolerance) / (i + 1) return cmse
[ "def", "composite_multiscale_entropy", "(", "time_series", ",", "sample_length", ",", "scale", ",", "tolerance", "=", "None", ")", ":", "cmse", "=", "np", ".", "zeros", "(", "(", "1", ",", "scale", ")", ")", "for", "i", "in", "range", "(", "scale", ")", ":", "for", "j", "in", "range", "(", "i", ")", ":", "tmp", "=", "util_granulate_time_series", "(", "time_series", "[", "j", ":", "]", ",", "i", "+", "1", ")", "cmse", "[", "i", "]", "+=", "sample_entropy", "(", "tmp", ",", "sample_length", ",", "tolerance", ")", "/", "(", "i", "+", "1", ")", "return", "cmse" ]
Calculate the Composite Multiscale Entropy of the given time series. Args: time_series: Time series for analysis sample_length: Number of sequential points of the time series scale: Scale factor tolerance: Tolerance (default = 0.1...0.2 * std(time_series)) Returns: Vector containing Composite Multiscale Entropy Reference: [1] Wu, Shuen-De, et al. "Time series analysis using composite multiscale entropy." Entropy 15.3 (2013): 1069-1084.
[ "Calculate", "the", "Composite", "Multiscale", "Entropy", "of", "the", "given", "time", "series", "." ]
ae2bf71c2e5b6edb2e468ff52183b30acf7073e6
https://github.com/nikdon/pyEntropy/blob/ae2bf71c2e5b6edb2e468ff52183b30acf7073e6/pyentrp/entropy.py#L307-L329
train
238,509
kyan001/ping3
ping3.py
ones_comp_sum16
def ones_comp_sum16(num1: int, num2: int) -> int: """Calculates the 1's complement sum for 16-bit numbers. Args: num1: 16-bit number. num2: 16-bit number. Returns: The calculated result. """ carry = 1 << 16 result = num1 + num2 return result if result < carry else result + 1 - carry
python
def ones_comp_sum16(num1: int, num2: int) -> int: """Calculates the 1's complement sum for 16-bit numbers. Args: num1: 16-bit number. num2: 16-bit number. Returns: The calculated result. """ carry = 1 << 16 result = num1 + num2 return result if result < carry else result + 1 - carry
[ "def", "ones_comp_sum16", "(", "num1", ":", "int", ",", "num2", ":", "int", ")", "->", "int", ":", "carry", "=", "1", "<<", "16", "result", "=", "num1", "+", "num2", "return", "result", "if", "result", "<", "carry", "else", "result", "+", "1", "-", "carry" ]
Calculates the 1's complement sum for 16-bit numbers. Args: num1: 16-bit number. num2: 16-bit number. Returns: The calculated result.
[ "Calculates", "the", "1", "s", "complement", "sum", "for", "16", "-", "bit", "numbers", "." ]
fc9e8a4b828965a800036dfbd019e97114ad80b3
https://github.com/kyan001/ping3/blob/fc9e8a4b828965a800036dfbd019e97114ad80b3/ping3.py#L34-L47
train
238,510
kyan001/ping3
ping3.py
checksum
def checksum(source: bytes) -> int: """Calculates the checksum of the input bytes. RFC1071: https://tools.ietf.org/html/rfc1071 RFC792: https://tools.ietf.org/html/rfc792 Args: source: The input to be calculated. Returns: Calculated checksum. """ if len(source) % 2: # if the total length is odd, padding with one octet of zeros for computing the checksum source += b'\x00' sum = 0 for i in range(0, len(source), 2): sum = ones_comp_sum16(sum, (source[i + 1] << 8) + source[i]) return ~sum & 0xffff
python
def checksum(source: bytes) -> int: """Calculates the checksum of the input bytes. RFC1071: https://tools.ietf.org/html/rfc1071 RFC792: https://tools.ietf.org/html/rfc792 Args: source: The input to be calculated. Returns: Calculated checksum. """ if len(source) % 2: # if the total length is odd, padding with one octet of zeros for computing the checksum source += b'\x00' sum = 0 for i in range(0, len(source), 2): sum = ones_comp_sum16(sum, (source[i + 1] << 8) + source[i]) return ~sum & 0xffff
[ "def", "checksum", "(", "source", ":", "bytes", ")", "->", "int", ":", "if", "len", "(", "source", ")", "%", "2", ":", "# if the total length is odd, padding with one octet of zeros for computing the checksum", "source", "+=", "b'\\x00'", "sum", "=", "0", "for", "i", "in", "range", "(", "0", ",", "len", "(", "source", ")", ",", "2", ")", ":", "sum", "=", "ones_comp_sum16", "(", "sum", ",", "(", "source", "[", "i", "+", "1", "]", "<<", "8", ")", "+", "source", "[", "i", "]", ")", "return", "~", "sum", "&", "0xffff" ]
Calculates the checksum of the input bytes. RFC1071: https://tools.ietf.org/html/rfc1071 RFC792: https://tools.ietf.org/html/rfc792 Args: source: The input to be calculated. Returns: Calculated checksum.
[ "Calculates", "the", "checksum", "of", "the", "input", "bytes", "." ]
fc9e8a4b828965a800036dfbd019e97114ad80b3
https://github.com/kyan001/ping3/blob/fc9e8a4b828965a800036dfbd019e97114ad80b3/ping3.py#L50-L67
train
238,511
kyan001/ping3
ping3.py
send_one_ping
def send_one_ping(sock: socket, dest_addr: str, icmp_id: int, seq: int, size: int): """Sends one ping to the given destination. ICMP Header (bits): type (8), code (8), checksum (16), id (16), sequence (16) ICMP Payload: time (double), data ICMP Wikipedia: https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol Args: sock: Socket. dest_addr: The destination address, can be an IP address or a domain name. Ex. "192.168.1.1"/"example.com" icmp_id: ICMP packet id, usually is same as pid. seq: ICMP packet sequence, usually increases from 0 in the same process. size: The ICMP packet payload size in bytes. Note this is only for the payload part. Raises: HostUnkown: If destination address is a domain name and cannot resolved. """ try: dest_addr = socket.gethostbyname(dest_addr) # Domain name will translated into IP address, and IP address leaves unchanged. except socket.gaierror as e: print("Cannot resolve {}: Unknown host".format(dest_addr)) raise errors.HostUnknown(dest_addr) from e pseudo_checksum = 0 # Pseudo checksum is used to calculate the real checksum. icmp_header = struct.pack(ICMP_HEADER_FORMAT, IcmpType.ECHO_REQUEST, ICMP_DEFAULT_CODE, pseudo_checksum, icmp_id, seq) padding = (size - struct.calcsize(ICMP_TIME_FORMAT) - struct.calcsize(ICMP_HEADER_FORMAT)) * "Q" # Using double to store current time. icmp_payload = struct.pack(ICMP_TIME_FORMAT, default_timer()) + padding.encode() real_checksum = checksum(icmp_header + icmp_payload) # Calculates the checksum on the dummy header and the icmp_payload. # Don't know why I need socket.htons() on real_checksum since ICMP_HEADER_FORMAT already in Network Bytes Order (big-endian) icmp_header = struct.pack(ICMP_HEADER_FORMAT, IcmpType.ECHO_REQUEST, ICMP_DEFAULT_CODE, socket.htons(real_checksum), icmp_id, seq) # Put real checksum into ICMP header. packet = icmp_header + icmp_payload sock.sendto(packet, (dest_addr, 0))
python
def send_one_ping(sock: socket, dest_addr: str, icmp_id: int, seq: int, size: int): """Sends one ping to the given destination. ICMP Header (bits): type (8), code (8), checksum (16), id (16), sequence (16) ICMP Payload: time (double), data ICMP Wikipedia: https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol Args: sock: Socket. dest_addr: The destination address, can be an IP address or a domain name. Ex. "192.168.1.1"/"example.com" icmp_id: ICMP packet id, usually is same as pid. seq: ICMP packet sequence, usually increases from 0 in the same process. size: The ICMP packet payload size in bytes. Note this is only for the payload part. Raises: HostUnkown: If destination address is a domain name and cannot resolved. """ try: dest_addr = socket.gethostbyname(dest_addr) # Domain name will translated into IP address, and IP address leaves unchanged. except socket.gaierror as e: print("Cannot resolve {}: Unknown host".format(dest_addr)) raise errors.HostUnknown(dest_addr) from e pseudo_checksum = 0 # Pseudo checksum is used to calculate the real checksum. icmp_header = struct.pack(ICMP_HEADER_FORMAT, IcmpType.ECHO_REQUEST, ICMP_DEFAULT_CODE, pseudo_checksum, icmp_id, seq) padding = (size - struct.calcsize(ICMP_TIME_FORMAT) - struct.calcsize(ICMP_HEADER_FORMAT)) * "Q" # Using double to store current time. icmp_payload = struct.pack(ICMP_TIME_FORMAT, default_timer()) + padding.encode() real_checksum = checksum(icmp_header + icmp_payload) # Calculates the checksum on the dummy header and the icmp_payload. # Don't know why I need socket.htons() on real_checksum since ICMP_HEADER_FORMAT already in Network Bytes Order (big-endian) icmp_header = struct.pack(ICMP_HEADER_FORMAT, IcmpType.ECHO_REQUEST, ICMP_DEFAULT_CODE, socket.htons(real_checksum), icmp_id, seq) # Put real checksum into ICMP header. packet = icmp_header + icmp_payload sock.sendto(packet, (dest_addr, 0))
[ "def", "send_one_ping", "(", "sock", ":", "socket", ",", "dest_addr", ":", "str", ",", "icmp_id", ":", "int", ",", "seq", ":", "int", ",", "size", ":", "int", ")", ":", "try", ":", "dest_addr", "=", "socket", ".", "gethostbyname", "(", "dest_addr", ")", "# Domain name will translated into IP address, and IP address leaves unchanged.", "except", "socket", ".", "gaierror", "as", "e", ":", "print", "(", "\"Cannot resolve {}: Unknown host\"", ".", "format", "(", "dest_addr", ")", ")", "raise", "errors", ".", "HostUnknown", "(", "dest_addr", ")", "from", "e", "pseudo_checksum", "=", "0", "# Pseudo checksum is used to calculate the real checksum.", "icmp_header", "=", "struct", ".", "pack", "(", "ICMP_HEADER_FORMAT", ",", "IcmpType", ".", "ECHO_REQUEST", ",", "ICMP_DEFAULT_CODE", ",", "pseudo_checksum", ",", "icmp_id", ",", "seq", ")", "padding", "=", "(", "size", "-", "struct", ".", "calcsize", "(", "ICMP_TIME_FORMAT", ")", "-", "struct", ".", "calcsize", "(", "ICMP_HEADER_FORMAT", ")", ")", "*", "\"Q\"", "# Using double to store current time.", "icmp_payload", "=", "struct", ".", "pack", "(", "ICMP_TIME_FORMAT", ",", "default_timer", "(", ")", ")", "+", "padding", ".", "encode", "(", ")", "real_checksum", "=", "checksum", "(", "icmp_header", "+", "icmp_payload", ")", "# Calculates the checksum on the dummy header and the icmp_payload.", "# Don't know why I need socket.htons() on real_checksum since ICMP_HEADER_FORMAT already in Network Bytes Order (big-endian)", "icmp_header", "=", "struct", ".", "pack", "(", "ICMP_HEADER_FORMAT", ",", "IcmpType", ".", "ECHO_REQUEST", ",", "ICMP_DEFAULT_CODE", ",", "socket", ".", "htons", "(", "real_checksum", ")", ",", "icmp_id", ",", "seq", ")", "# Put real checksum into ICMP header.", "packet", "=", "icmp_header", "+", "icmp_payload", "sock", ".", "sendto", "(", "packet", ",", "(", "dest_addr", ",", "0", ")", ")" ]
Sends one ping to the given destination. ICMP Header (bits): type (8), code (8), checksum (16), id (16), sequence (16) ICMP Payload: time (double), data ICMP Wikipedia: https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol Args: sock: Socket. dest_addr: The destination address, can be an IP address or a domain name. Ex. "192.168.1.1"/"example.com" icmp_id: ICMP packet id, usually is same as pid. seq: ICMP packet sequence, usually increases from 0 in the same process. size: The ICMP packet payload size in bytes. Note this is only for the payload part. Raises: HostUnkown: If destination address is a domain name and cannot resolved.
[ "Sends", "one", "ping", "to", "the", "given", "destination", "." ]
fc9e8a4b828965a800036dfbd019e97114ad80b3
https://github.com/kyan001/ping3/blob/fc9e8a4b828965a800036dfbd019e97114ad80b3/ping3.py#L70-L100
train
238,512
kyan001/ping3
ping3.py
receive_one_ping
def receive_one_ping(sock: socket, icmp_id: int, seq: int, timeout: int) -> float or None: """Receives the ping from the socket. IP Header (bits): version (8), type of service (8), length (16), id (16), flags (16), time to live (8), protocol (8), checksum (16), source ip (32), destination ip (32). ICMP Packet (bytes): IP Header (20), ICMP Header (8), ICMP Payload (*). Ping Wikipedia: https://en.wikipedia.org/wiki/Ping_(networking_utility) ToS (Type of Service) in IP header for ICMP is 0. Protocol in IP header for ICMP is 1. Args: sock: The same socket used for send the ping. icmp_id: ICMP packet id. Sent packet id should be identical with received packet id. seq: ICMP packet sequence. Sent packet sequence should be identical with received packet sequence. timeout: Timeout in seconds. Returns: The delay in seconds or None on timeout. Raises: TimeToLiveExpired: If the Time-To-Live in IP Header is not large enough for destination. TimeExceeded: If time exceeded but Time-To-Live does not expired. """ ip_header_slice = slice(0, struct.calcsize(IP_HEADER_FORMAT)) # [0:20] icmp_header_slice = slice(ip_header_slice.stop, ip_header_slice.stop + struct.calcsize(ICMP_HEADER_FORMAT)) # [20:28] ip_header_keys = ('version', 'tos', 'len', 'id', 'flags', 'ttl', 'protocol', 'checksum', 'src_addr', 'dest_addr') icmp_header_keys = ('type', 'code', 'checksum', 'id', 'seq') while True: selected = select.select([sock], [], [], timeout) if selected[0] == []: # Timeout raise errors.Timeout(timeout) time_recv = default_timer() recv_data, addr = sock.recvfrom(1024) ip_header_raw, icmp_header_raw, icmp_payload_raw = recv_data[ip_header_slice], recv_data[icmp_header_slice], recv_data[icmp_header_slice.stop:] ip_header = dict(zip(ip_header_keys, struct.unpack(IP_HEADER_FORMAT, ip_header_raw))) _debug("IP HEADER:", ip_header) icmp_header = dict(zip(icmp_header_keys, struct.unpack(ICMP_HEADER_FORMAT, icmp_header_raw))) _debug("ICMP HEADER:", icmp_header) if icmp_header['type'] == IcmpType.TIME_EXCEEDED: # TIME_EXCEEDED has no icmp_id and icmp_seq. Usually they are 0. if icmp_header['code'] == IcmpTimeExceededCode.TTL_EXPIRED: raise errors.TimeToLiveExpired() # Some router does not report TTL expired and then timeout shows. raise errors.TimeExceeded() if icmp_header['id'] == icmp_id and icmp_header['seq'] == seq: # ECHO_REPLY should match the if icmp_header['type'] == IcmpType.ECHO_REQUEST: # filters out the ECHO_REQUEST itself. _debug("ECHO_REQUEST filtered out.") continue if icmp_header['type'] == IcmpType.ECHO_REPLY: time_sent = struct.unpack(ICMP_TIME_FORMAT, icmp_payload_raw[0:struct.calcsize(ICMP_TIME_FORMAT)])[0] return time_recv - time_sent
python
def receive_one_ping(sock: socket, icmp_id: int, seq: int, timeout: int) -> float or None: """Receives the ping from the socket. IP Header (bits): version (8), type of service (8), length (16), id (16), flags (16), time to live (8), protocol (8), checksum (16), source ip (32), destination ip (32). ICMP Packet (bytes): IP Header (20), ICMP Header (8), ICMP Payload (*). Ping Wikipedia: https://en.wikipedia.org/wiki/Ping_(networking_utility) ToS (Type of Service) in IP header for ICMP is 0. Protocol in IP header for ICMP is 1. Args: sock: The same socket used for send the ping. icmp_id: ICMP packet id. Sent packet id should be identical with received packet id. seq: ICMP packet sequence. Sent packet sequence should be identical with received packet sequence. timeout: Timeout in seconds. Returns: The delay in seconds or None on timeout. Raises: TimeToLiveExpired: If the Time-To-Live in IP Header is not large enough for destination. TimeExceeded: If time exceeded but Time-To-Live does not expired. """ ip_header_slice = slice(0, struct.calcsize(IP_HEADER_FORMAT)) # [0:20] icmp_header_slice = slice(ip_header_slice.stop, ip_header_slice.stop + struct.calcsize(ICMP_HEADER_FORMAT)) # [20:28] ip_header_keys = ('version', 'tos', 'len', 'id', 'flags', 'ttl', 'protocol', 'checksum', 'src_addr', 'dest_addr') icmp_header_keys = ('type', 'code', 'checksum', 'id', 'seq') while True: selected = select.select([sock], [], [], timeout) if selected[0] == []: # Timeout raise errors.Timeout(timeout) time_recv = default_timer() recv_data, addr = sock.recvfrom(1024) ip_header_raw, icmp_header_raw, icmp_payload_raw = recv_data[ip_header_slice], recv_data[icmp_header_slice], recv_data[icmp_header_slice.stop:] ip_header = dict(zip(ip_header_keys, struct.unpack(IP_HEADER_FORMAT, ip_header_raw))) _debug("IP HEADER:", ip_header) icmp_header = dict(zip(icmp_header_keys, struct.unpack(ICMP_HEADER_FORMAT, icmp_header_raw))) _debug("ICMP HEADER:", icmp_header) if icmp_header['type'] == IcmpType.TIME_EXCEEDED: # TIME_EXCEEDED has no icmp_id and icmp_seq. Usually they are 0. if icmp_header['code'] == IcmpTimeExceededCode.TTL_EXPIRED: raise errors.TimeToLiveExpired() # Some router does not report TTL expired and then timeout shows. raise errors.TimeExceeded() if icmp_header['id'] == icmp_id and icmp_header['seq'] == seq: # ECHO_REPLY should match the if icmp_header['type'] == IcmpType.ECHO_REQUEST: # filters out the ECHO_REQUEST itself. _debug("ECHO_REQUEST filtered out.") continue if icmp_header['type'] == IcmpType.ECHO_REPLY: time_sent = struct.unpack(ICMP_TIME_FORMAT, icmp_payload_raw[0:struct.calcsize(ICMP_TIME_FORMAT)])[0] return time_recv - time_sent
[ "def", "receive_one_ping", "(", "sock", ":", "socket", ",", "icmp_id", ":", "int", ",", "seq", ":", "int", ",", "timeout", ":", "int", ")", "->", "float", "or", "None", ":", "ip_header_slice", "=", "slice", "(", "0", ",", "struct", ".", "calcsize", "(", "IP_HEADER_FORMAT", ")", ")", "# [0:20]", "icmp_header_slice", "=", "slice", "(", "ip_header_slice", ".", "stop", ",", "ip_header_slice", ".", "stop", "+", "struct", ".", "calcsize", "(", "ICMP_HEADER_FORMAT", ")", ")", "# [20:28]", "ip_header_keys", "=", "(", "'version'", ",", "'tos'", ",", "'len'", ",", "'id'", ",", "'flags'", ",", "'ttl'", ",", "'protocol'", ",", "'checksum'", ",", "'src_addr'", ",", "'dest_addr'", ")", "icmp_header_keys", "=", "(", "'type'", ",", "'code'", ",", "'checksum'", ",", "'id'", ",", "'seq'", ")", "while", "True", ":", "selected", "=", "select", ".", "select", "(", "[", "sock", "]", ",", "[", "]", ",", "[", "]", ",", "timeout", ")", "if", "selected", "[", "0", "]", "==", "[", "]", ":", "# Timeout", "raise", "errors", ".", "Timeout", "(", "timeout", ")", "time_recv", "=", "default_timer", "(", ")", "recv_data", ",", "addr", "=", "sock", ".", "recvfrom", "(", "1024", ")", "ip_header_raw", ",", "icmp_header_raw", ",", "icmp_payload_raw", "=", "recv_data", "[", "ip_header_slice", "]", ",", "recv_data", "[", "icmp_header_slice", "]", ",", "recv_data", "[", "icmp_header_slice", ".", "stop", ":", "]", "ip_header", "=", "dict", "(", "zip", "(", "ip_header_keys", ",", "struct", ".", "unpack", "(", "IP_HEADER_FORMAT", ",", "ip_header_raw", ")", ")", ")", "_debug", "(", "\"IP HEADER:\"", ",", "ip_header", ")", "icmp_header", "=", "dict", "(", "zip", "(", "icmp_header_keys", ",", "struct", ".", "unpack", "(", "ICMP_HEADER_FORMAT", ",", "icmp_header_raw", ")", ")", ")", "_debug", "(", "\"ICMP HEADER:\"", ",", "icmp_header", ")", "if", "icmp_header", "[", "'type'", "]", "==", "IcmpType", ".", "TIME_EXCEEDED", ":", "# TIME_EXCEEDED has no icmp_id and icmp_seq. Usually they are 0.", "if", "icmp_header", "[", "'code'", "]", "==", "IcmpTimeExceededCode", ".", "TTL_EXPIRED", ":", "raise", "errors", ".", "TimeToLiveExpired", "(", ")", "# Some router does not report TTL expired and then timeout shows.", "raise", "errors", ".", "TimeExceeded", "(", ")", "if", "icmp_header", "[", "'id'", "]", "==", "icmp_id", "and", "icmp_header", "[", "'seq'", "]", "==", "seq", ":", "# ECHO_REPLY should match the", "if", "icmp_header", "[", "'type'", "]", "==", "IcmpType", ".", "ECHO_REQUEST", ":", "# filters out the ECHO_REQUEST itself.", "_debug", "(", "\"ECHO_REQUEST filtered out.\"", ")", "continue", "if", "icmp_header", "[", "'type'", "]", "==", "IcmpType", ".", "ECHO_REPLY", ":", "time_sent", "=", "struct", ".", "unpack", "(", "ICMP_TIME_FORMAT", ",", "icmp_payload_raw", "[", "0", ":", "struct", ".", "calcsize", "(", "ICMP_TIME_FORMAT", ")", "]", ")", "[", "0", "]", "return", "time_recv", "-", "time_sent" ]
Receives the ping from the socket. IP Header (bits): version (8), type of service (8), length (16), id (16), flags (16), time to live (8), protocol (8), checksum (16), source ip (32), destination ip (32). ICMP Packet (bytes): IP Header (20), ICMP Header (8), ICMP Payload (*). Ping Wikipedia: https://en.wikipedia.org/wiki/Ping_(networking_utility) ToS (Type of Service) in IP header for ICMP is 0. Protocol in IP header for ICMP is 1. Args: sock: The same socket used for send the ping. icmp_id: ICMP packet id. Sent packet id should be identical with received packet id. seq: ICMP packet sequence. Sent packet sequence should be identical with received packet sequence. timeout: Timeout in seconds. Returns: The delay in seconds or None on timeout. Raises: TimeToLiveExpired: If the Time-To-Live in IP Header is not large enough for destination. TimeExceeded: If time exceeded but Time-To-Live does not expired.
[ "Receives", "the", "ping", "from", "the", "socket", "." ]
fc9e8a4b828965a800036dfbd019e97114ad80b3
https://github.com/kyan001/ping3/blob/fc9e8a4b828965a800036dfbd019e97114ad80b3/ping3.py#L103-L149
train
238,513
kyan001/ping3
ping3.py
ping
def ping(dest_addr: str, timeout: int = 4, unit: str = "s", src_addr: str = None, ttl: int = 64, seq: int = 0, size: int = 56) -> float or None: """ Send one ping to destination address with the given timeout. Args: dest_addr: The destination address, can be an IP address or a domain name. Ex. "192.168.1.1"/"example.com" timeout: Timeout in seconds. Default is 4s, same as Windows CMD. (default 4) unit: The unit of returned value. "s" for seconds, "ms" for milliseconds. (default "s") src_addr: The IP address to ping from. This is for multi-interface clients. Ex. "192.168.1.20". (default None) ttl: The Time-To-Live of the outgoing packet. Default is 64, same as in Linux and macOS. (default 64) seq: ICMP packet sequence, usually increases from 0 in the same process. (default 0) size: The ICMP packet payload size in bytes. Default is 56, same as in macOS. (default 56) Returns: The delay in seconds/milliseconds or None on timeout. Raises: PingError: Any PingError will raise again if `ping3.EXCEPTIONS` is True. """ with socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP) as sock: sock.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl) if src_addr: sock.bind((src_addr, 0)) icmp_id = threading.current_thread().ident % 0xFFFF try: send_one_ping(sock=sock, dest_addr=dest_addr, icmp_id=icmp_id, seq=seq, size=size) delay = receive_one_ping(sock=sock, icmp_id=icmp_id, seq=seq, timeout=timeout) # in seconds except errors.PingError as e: _debug(e) if EXCEPTIONS: raise e return None if delay is None: return None if unit == "ms": delay *= 1000 # in milliseconds return delay
python
def ping(dest_addr: str, timeout: int = 4, unit: str = "s", src_addr: str = None, ttl: int = 64, seq: int = 0, size: int = 56) -> float or None: """ Send one ping to destination address with the given timeout. Args: dest_addr: The destination address, can be an IP address or a domain name. Ex. "192.168.1.1"/"example.com" timeout: Timeout in seconds. Default is 4s, same as Windows CMD. (default 4) unit: The unit of returned value. "s" for seconds, "ms" for milliseconds. (default "s") src_addr: The IP address to ping from. This is for multi-interface clients. Ex. "192.168.1.20". (default None) ttl: The Time-To-Live of the outgoing packet. Default is 64, same as in Linux and macOS. (default 64) seq: ICMP packet sequence, usually increases from 0 in the same process. (default 0) size: The ICMP packet payload size in bytes. Default is 56, same as in macOS. (default 56) Returns: The delay in seconds/milliseconds or None on timeout. Raises: PingError: Any PingError will raise again if `ping3.EXCEPTIONS` is True. """ with socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP) as sock: sock.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl) if src_addr: sock.bind((src_addr, 0)) icmp_id = threading.current_thread().ident % 0xFFFF try: send_one_ping(sock=sock, dest_addr=dest_addr, icmp_id=icmp_id, seq=seq, size=size) delay = receive_one_ping(sock=sock, icmp_id=icmp_id, seq=seq, timeout=timeout) # in seconds except errors.PingError as e: _debug(e) if EXCEPTIONS: raise e return None if delay is None: return None if unit == "ms": delay *= 1000 # in milliseconds return delay
[ "def", "ping", "(", "dest_addr", ":", "str", ",", "timeout", ":", "int", "=", "4", ",", "unit", ":", "str", "=", "\"s\"", ",", "src_addr", ":", "str", "=", "None", ",", "ttl", ":", "int", "=", "64", ",", "seq", ":", "int", "=", "0", ",", "size", ":", "int", "=", "56", ")", "->", "float", "or", "None", ":", "with", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_RAW", ",", "socket", ".", "IPPROTO_ICMP", ")", "as", "sock", ":", "sock", ".", "setsockopt", "(", "socket", ".", "SOL_IP", ",", "socket", ".", "IP_TTL", ",", "ttl", ")", "if", "src_addr", ":", "sock", ".", "bind", "(", "(", "src_addr", ",", "0", ")", ")", "icmp_id", "=", "threading", ".", "current_thread", "(", ")", ".", "ident", "%", "0xFFFF", "try", ":", "send_one_ping", "(", "sock", "=", "sock", ",", "dest_addr", "=", "dest_addr", ",", "icmp_id", "=", "icmp_id", ",", "seq", "=", "seq", ",", "size", "=", "size", ")", "delay", "=", "receive_one_ping", "(", "sock", "=", "sock", ",", "icmp_id", "=", "icmp_id", ",", "seq", "=", "seq", ",", "timeout", "=", "timeout", ")", "# in seconds", "except", "errors", ".", "PingError", "as", "e", ":", "_debug", "(", "e", ")", "if", "EXCEPTIONS", ":", "raise", "e", "return", "None", "if", "delay", "is", "None", ":", "return", "None", "if", "unit", "==", "\"ms\"", ":", "delay", "*=", "1000", "# in milliseconds", "return", "delay" ]
Send one ping to destination address with the given timeout. Args: dest_addr: The destination address, can be an IP address or a domain name. Ex. "192.168.1.1"/"example.com" timeout: Timeout in seconds. Default is 4s, same as Windows CMD. (default 4) unit: The unit of returned value. "s" for seconds, "ms" for milliseconds. (default "s") src_addr: The IP address to ping from. This is for multi-interface clients. Ex. "192.168.1.20". (default None) ttl: The Time-To-Live of the outgoing packet. Default is 64, same as in Linux and macOS. (default 64) seq: ICMP packet sequence, usually increases from 0 in the same process. (default 0) size: The ICMP packet payload size in bytes. Default is 56, same as in macOS. (default 56) Returns: The delay in seconds/milliseconds or None on timeout. Raises: PingError: Any PingError will raise again if `ping3.EXCEPTIONS` is True.
[ "Send", "one", "ping", "to", "destination", "address", "with", "the", "given", "timeout", "." ]
fc9e8a4b828965a800036dfbd019e97114ad80b3
https://github.com/kyan001/ping3/blob/fc9e8a4b828965a800036dfbd019e97114ad80b3/ping3.py#L152-L188
train
238,514
kyan001/ping3
ping3.py
verbose_ping
def verbose_ping(dest_addr: str, count: int = 4, *args, **kwargs): """ Send pings to destination address with the given timeout and display the result. Args: dest_addr: The destination address. Ex. "192.168.1.1"/"example.com" count: How many pings should be sent. Default is 4, same as Windows CMD. (default 4) *args and **kwargs: And all the other arguments available in ping() except `seq`. Returns: Formatted ping results printed. """ timeout = kwargs.get("timeout") src = kwargs.get("src") unit = kwargs.setdefault("unit", "ms") for i in range(count): output_text = "ping '{}'".format(dest_addr) output_text += " from '{}'".format(src) if src else "" output_text += " ... " print(output_text, end="") delay = ping(dest_addr, seq=i, *args, **kwargs) if delay is None: print("Timeout > {}s".format(timeout) if timeout else "Timeout") else: print("{value}{unit}".format(value=int(delay), unit=unit))
python
def verbose_ping(dest_addr: str, count: int = 4, *args, **kwargs): """ Send pings to destination address with the given timeout and display the result. Args: dest_addr: The destination address. Ex. "192.168.1.1"/"example.com" count: How many pings should be sent. Default is 4, same as Windows CMD. (default 4) *args and **kwargs: And all the other arguments available in ping() except `seq`. Returns: Formatted ping results printed. """ timeout = kwargs.get("timeout") src = kwargs.get("src") unit = kwargs.setdefault("unit", "ms") for i in range(count): output_text = "ping '{}'".format(dest_addr) output_text += " from '{}'".format(src) if src else "" output_text += " ... " print(output_text, end="") delay = ping(dest_addr, seq=i, *args, **kwargs) if delay is None: print("Timeout > {}s".format(timeout) if timeout else "Timeout") else: print("{value}{unit}".format(value=int(delay), unit=unit))
[ "def", "verbose_ping", "(", "dest_addr", ":", "str", ",", "count", ":", "int", "=", "4", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "timeout", "=", "kwargs", ".", "get", "(", "\"timeout\"", ")", "src", "=", "kwargs", ".", "get", "(", "\"src\"", ")", "unit", "=", "kwargs", ".", "setdefault", "(", "\"unit\"", ",", "\"ms\"", ")", "for", "i", "in", "range", "(", "count", ")", ":", "output_text", "=", "\"ping '{}'\"", ".", "format", "(", "dest_addr", ")", "output_text", "+=", "\" from '{}'\"", ".", "format", "(", "src", ")", "if", "src", "else", "\"\"", "output_text", "+=", "\" ... \"", "print", "(", "output_text", ",", "end", "=", "\"\"", ")", "delay", "=", "ping", "(", "dest_addr", ",", "seq", "=", "i", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "delay", "is", "None", ":", "print", "(", "\"Timeout > {}s\"", ".", "format", "(", "timeout", ")", "if", "timeout", "else", "\"Timeout\"", ")", "else", ":", "print", "(", "\"{value}{unit}\"", ".", "format", "(", "value", "=", "int", "(", "delay", ")", ",", "unit", "=", "unit", ")", ")" ]
Send pings to destination address with the given timeout and display the result. Args: dest_addr: The destination address. Ex. "192.168.1.1"/"example.com" count: How many pings should be sent. Default is 4, same as Windows CMD. (default 4) *args and **kwargs: And all the other arguments available in ping() except `seq`. Returns: Formatted ping results printed.
[ "Send", "pings", "to", "destination", "address", "with", "the", "given", "timeout", "and", "display", "the", "result", "." ]
fc9e8a4b828965a800036dfbd019e97114ad80b3
https://github.com/kyan001/ping3/blob/fc9e8a4b828965a800036dfbd019e97114ad80b3/ping3.py#L191-L215
train
238,515
barrust/pyspellchecker
spellchecker/spellchecker.py
SpellChecker.distance
def distance(self, val): """ set the distance parameter """ tmp = 2 try: int(val) if val > 0 and val <= 2: tmp = val except (ValueError, TypeError): pass self._distance = tmp
python
def distance(self, val): """ set the distance parameter """ tmp = 2 try: int(val) if val > 0 and val <= 2: tmp = val except (ValueError, TypeError): pass self._distance = tmp
[ "def", "distance", "(", "self", ",", "val", ")", ":", "tmp", "=", "2", "try", ":", "int", "(", "val", ")", "if", "val", ">", "0", "and", "val", "<=", "2", ":", "tmp", "=", "val", "except", "(", "ValueError", ",", "TypeError", ")", ":", "pass", "self", ".", "_distance", "=", "tmp" ]
set the distance parameter
[ "set", "the", "distance", "parameter" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L79-L88
train
238,516
barrust/pyspellchecker
spellchecker/spellchecker.py
SpellChecker.export
def export(self, filepath, encoding="utf-8", gzipped=True): """ Export the word frequency list for import in the future Args: filepath (str): The filepath to the exported dictionary encoding (str): The encoding of the resulting output gzipped (bool): Whether to gzip the dictionary or not """ data = json.dumps(self.word_frequency.dictionary, sort_keys=True) write_file(filepath, encoding, gzipped, data)
python
def export(self, filepath, encoding="utf-8", gzipped=True): """ Export the word frequency list for import in the future Args: filepath (str): The filepath to the exported dictionary encoding (str): The encoding of the resulting output gzipped (bool): Whether to gzip the dictionary or not """ data = json.dumps(self.word_frequency.dictionary, sort_keys=True) write_file(filepath, encoding, gzipped, data)
[ "def", "export", "(", "self", ",", "filepath", ",", "encoding", "=", "\"utf-8\"", ",", "gzipped", "=", "True", ")", ":", "data", "=", "json", ".", "dumps", "(", "self", ".", "word_frequency", ".", "dictionary", ",", "sort_keys", "=", "True", ")", "write_file", "(", "filepath", ",", "encoding", ",", "gzipped", ",", "data", ")" ]
Export the word frequency list for import in the future Args: filepath (str): The filepath to the exported dictionary encoding (str): The encoding of the resulting output gzipped (bool): Whether to gzip the dictionary or not
[ "Export", "the", "word", "frequency", "list", "for", "import", "in", "the", "future" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L100-L108
train
238,517
barrust/pyspellchecker
spellchecker/spellchecker.py
SpellChecker.word_probability
def word_probability(self, word, total_words=None): """ Calculate the probability of the `word` being the desired, correct word Args: word (str): The word for which the word probability is \ calculated total_words (int): The total number of words to use in the \ calculation; use the default for using the whole word \ frequency Returns: float: The probability that the word is the correct word """ if total_words is None: total_words = self._word_frequency.total_words return self._word_frequency.dictionary[word] / total_words
python
def word_probability(self, word, total_words=None): """ Calculate the probability of the `word` being the desired, correct word Args: word (str): The word for which the word probability is \ calculated total_words (int): The total number of words to use in the \ calculation; use the default for using the whole word \ frequency Returns: float: The probability that the word is the correct word """ if total_words is None: total_words = self._word_frequency.total_words return self._word_frequency.dictionary[word] / total_words
[ "def", "word_probability", "(", "self", ",", "word", ",", "total_words", "=", "None", ")", ":", "if", "total_words", "is", "None", ":", "total_words", "=", "self", ".", "_word_frequency", ".", "total_words", "return", "self", ".", "_word_frequency", ".", "dictionary", "[", "word", "]", "/", "total_words" ]
Calculate the probability of the `word` being the desired, correct word Args: word (str): The word for which the word probability is \ calculated total_words (int): The total number of words to use in the \ calculation; use the default for using the whole word \ frequency Returns: float: The probability that the word is the correct word
[ "Calculate", "the", "probability", "of", "the", "word", "being", "the", "desired", "correct", "word" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L110-L124
train
238,518
barrust/pyspellchecker
spellchecker/spellchecker.py
SpellChecker.correction
def correction(self, word): """ The most probable correct spelling for the word Args: word (str): The word to correct Returns: str: The most likely candidate """ return max(self.candidates(word), key=self.word_probability)
python
def correction(self, word): """ The most probable correct spelling for the word Args: word (str): The word to correct Returns: str: The most likely candidate """ return max(self.candidates(word), key=self.word_probability)
[ "def", "correction", "(", "self", ",", "word", ")", ":", "return", "max", "(", "self", ".", "candidates", "(", "word", ")", ",", "key", "=", "self", ".", "word_probability", ")" ]
The most probable correct spelling for the word Args: word (str): The word to correct Returns: str: The most likely candidate
[ "The", "most", "probable", "correct", "spelling", "for", "the", "word" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L126-L133
train
238,519
barrust/pyspellchecker
spellchecker/spellchecker.py
SpellChecker.candidates
def candidates(self, word): """ Generate possible spelling corrections for the provided word up to an edit distance of two, if and only when needed Args: word (str): The word for which to calculate candidate spellings Returns: set: The set of words that are possible candidates """ if self.known([word]): # short-cut if word is correct already return {word} # get edit distance 1... res = [x for x in self.edit_distance_1(word)] tmp = self.known(res) if tmp: return tmp # if still not found, use the edit distance 1 to calc edit distance 2 if self._distance == 2: tmp = self.known([x for x in self.__edit_distance_alt(res)]) if tmp: return tmp return {word}
python
def candidates(self, word): """ Generate possible spelling corrections for the provided word up to an edit distance of two, if and only when needed Args: word (str): The word for which to calculate candidate spellings Returns: set: The set of words that are possible candidates """ if self.known([word]): # short-cut if word is correct already return {word} # get edit distance 1... res = [x for x in self.edit_distance_1(word)] tmp = self.known(res) if tmp: return tmp # if still not found, use the edit distance 1 to calc edit distance 2 if self._distance == 2: tmp = self.known([x for x in self.__edit_distance_alt(res)]) if tmp: return tmp return {word}
[ "def", "candidates", "(", "self", ",", "word", ")", ":", "if", "self", ".", "known", "(", "[", "word", "]", ")", ":", "# short-cut if word is correct already", "return", "{", "word", "}", "# get edit distance 1...", "res", "=", "[", "x", "for", "x", "in", "self", ".", "edit_distance_1", "(", "word", ")", "]", "tmp", "=", "self", ".", "known", "(", "res", ")", "if", "tmp", ":", "return", "tmp", "# if still not found, use the edit distance 1 to calc edit distance 2", "if", "self", ".", "_distance", "==", "2", ":", "tmp", "=", "self", ".", "known", "(", "[", "x", "for", "x", "in", "self", ".", "__edit_distance_alt", "(", "res", ")", "]", ")", "if", "tmp", ":", "return", "tmp", "return", "{", "word", "}" ]
Generate possible spelling corrections for the provided word up to an edit distance of two, if and only when needed Args: word (str): The word for which to calculate candidate spellings Returns: set: The set of words that are possible candidates
[ "Generate", "possible", "spelling", "corrections", "for", "the", "provided", "word", "up", "to", "an", "edit", "distance", "of", "two", "if", "and", "only", "when", "needed" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L135-L155
train
238,520
barrust/pyspellchecker
spellchecker/spellchecker.py
SpellChecker.known
def known(self, words): """ The subset of `words` that appear in the dictionary of words Args: words (list): List of words to determine which are in the \ corpus Returns: set: The set of those words from the input that are in the \ corpus """ tmp = [w.lower() for w in words] return set( w for w in tmp if w in self._word_frequency.dictionary or not self._check_if_should_check(w) )
python
def known(self, words): """ The subset of `words` that appear in the dictionary of words Args: words (list): List of words to determine which are in the \ corpus Returns: set: The set of those words from the input that are in the \ corpus """ tmp = [w.lower() for w in words] return set( w for w in tmp if w in self._word_frequency.dictionary or not self._check_if_should_check(w) )
[ "def", "known", "(", "self", ",", "words", ")", ":", "tmp", "=", "[", "w", ".", "lower", "(", ")", "for", "w", "in", "words", "]", "return", "set", "(", "w", "for", "w", "in", "tmp", "if", "w", "in", "self", ".", "_word_frequency", ".", "dictionary", "or", "not", "self", ".", "_check_if_should_check", "(", "w", ")", ")" ]
The subset of `words` that appear in the dictionary of words Args: words (list): List of words to determine which are in the \ corpus Returns: set: The set of those words from the input that are in the \ corpus
[ "The", "subset", "of", "words", "that", "appear", "in", "the", "dictionary", "of", "words" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L157-L172
train
238,521
barrust/pyspellchecker
spellchecker/spellchecker.py
SpellChecker.edit_distance_1
def edit_distance_1(self, word): """ Compute all strings that are one edit away from `word` using only the letters in the corpus Args: word (str): The word for which to calculate the edit distance Returns: set: The set of strings that are edit distance one from the \ provided word """ word = word.lower() if self._check_if_should_check(word) is False: return {word} letters = self._word_frequency.letters splits = [(word[:i], word[i:]) for i in range(len(word) + 1)] deletes = [L + R[1:] for L, R in splits if R] transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1] replaces = [L + c + R[1:] for L, R in splits if R for c in letters] inserts = [L + c + R for L, R in splits for c in letters] return set(deletes + transposes + replaces + inserts)
python
def edit_distance_1(self, word): """ Compute all strings that are one edit away from `word` using only the letters in the corpus Args: word (str): The word for which to calculate the edit distance Returns: set: The set of strings that are edit distance one from the \ provided word """ word = word.lower() if self._check_if_should_check(word) is False: return {word} letters = self._word_frequency.letters splits = [(word[:i], word[i:]) for i in range(len(word) + 1)] deletes = [L + R[1:] for L, R in splits if R] transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1] replaces = [L + c + R[1:] for L, R in splits if R for c in letters] inserts = [L + c + R for L, R in splits for c in letters] return set(deletes + transposes + replaces + inserts)
[ "def", "edit_distance_1", "(", "self", ",", "word", ")", ":", "word", "=", "word", ".", "lower", "(", ")", "if", "self", ".", "_check_if_should_check", "(", "word", ")", "is", "False", ":", "return", "{", "word", "}", "letters", "=", "self", ".", "_word_frequency", ".", "letters", "splits", "=", "[", "(", "word", "[", ":", "i", "]", ",", "word", "[", "i", ":", "]", ")", "for", "i", "in", "range", "(", "len", "(", "word", ")", "+", "1", ")", "]", "deletes", "=", "[", "L", "+", "R", "[", "1", ":", "]", "for", "L", ",", "R", "in", "splits", "if", "R", "]", "transposes", "=", "[", "L", "+", "R", "[", "1", "]", "+", "R", "[", "0", "]", "+", "R", "[", "2", ":", "]", "for", "L", ",", "R", "in", "splits", "if", "len", "(", "R", ")", ">", "1", "]", "replaces", "=", "[", "L", "+", "c", "+", "R", "[", "1", ":", "]", "for", "L", ",", "R", "in", "splits", "if", "R", "for", "c", "in", "letters", "]", "inserts", "=", "[", "L", "+", "c", "+", "R", "for", "L", ",", "R", "in", "splits", "for", "c", "in", "letters", "]", "return", "set", "(", "deletes", "+", "transposes", "+", "replaces", "+", "inserts", ")" ]
Compute all strings that are one edit away from `word` using only the letters in the corpus Args: word (str): The word for which to calculate the edit distance Returns: set: The set of strings that are edit distance one from the \ provided word
[ "Compute", "all", "strings", "that", "are", "one", "edit", "away", "from", "word", "using", "only", "the", "letters", "in", "the", "corpus" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L186-L204
train
238,522
barrust/pyspellchecker
spellchecker/spellchecker.py
SpellChecker.edit_distance_2
def edit_distance_2(self, word): """ Compute all strings that are two edits away from `word` using only the letters in the corpus Args: word (str): The word for which to calculate the edit distance Returns: set: The set of strings that are edit distance two from the \ provided word """ word = word.lower() return [ e2 for e1 in self.edit_distance_1(word) for e2 in self.edit_distance_1(e1) ]
python
def edit_distance_2(self, word): """ Compute all strings that are two edits away from `word` using only the letters in the corpus Args: word (str): The word for which to calculate the edit distance Returns: set: The set of strings that are edit distance two from the \ provided word """ word = word.lower() return [ e2 for e1 in self.edit_distance_1(word) for e2 in self.edit_distance_1(e1) ]
[ "def", "edit_distance_2", "(", "self", ",", "word", ")", ":", "word", "=", "word", ".", "lower", "(", ")", "return", "[", "e2", "for", "e1", "in", "self", ".", "edit_distance_1", "(", "word", ")", "for", "e2", "in", "self", ".", "edit_distance_1", "(", "e1", ")", "]" ]
Compute all strings that are two edits away from `word` using only the letters in the corpus Args: word (str): The word for which to calculate the edit distance Returns: set: The set of strings that are edit distance two from the \ provided word
[ "Compute", "all", "strings", "that", "are", "two", "edits", "away", "from", "word", "using", "only", "the", "letters", "in", "the", "corpus" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L206-L218
train
238,523
barrust/pyspellchecker
spellchecker/spellchecker.py
SpellChecker.__edit_distance_alt
def __edit_distance_alt(self, words): """ Compute all strings that are 1 edits away from all the words using only the letters in the corpus Args: words (list): The words for which to calculate the edit distance Returns: set: The set of strings that are edit distance two from the \ provided words """ words = [x.lower() for x in words] return [e2 for e1 in words for e2 in self.edit_distance_1(e1)]
python
def __edit_distance_alt(self, words): """ Compute all strings that are 1 edits away from all the words using only the letters in the corpus Args: words (list): The words for which to calculate the edit distance Returns: set: The set of strings that are edit distance two from the \ provided words """ words = [x.lower() for x in words] return [e2 for e1 in words for e2 in self.edit_distance_1(e1)]
[ "def", "__edit_distance_alt", "(", "self", ",", "words", ")", ":", "words", "=", "[", "x", ".", "lower", "(", ")", "for", "x", "in", "words", "]", "return", "[", "e2", "for", "e1", "in", "words", "for", "e2", "in", "self", ".", "edit_distance_1", "(", "e1", ")", "]" ]
Compute all strings that are 1 edits away from all the words using only the letters in the corpus Args: words (list): The words for which to calculate the edit distance Returns: set: The set of strings that are edit distance two from the \ provided words
[ "Compute", "all", "strings", "that", "are", "1", "edits", "away", "from", "all", "the", "words", "using", "only", "the", "letters", "in", "the", "corpus" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L220-L230
train
238,524
barrust/pyspellchecker
spellchecker/spellchecker.py
WordFrequency.pop
def pop(self, key, default=None): """ Remove the key and return the associated value or default if not found Args: key (str): The key to remove default (obj): The value to return if key is not present """ return self._dictionary.pop(key.lower(), default)
python
def pop(self, key, default=None): """ Remove the key and return the associated value or default if not found Args: key (str): The key to remove default (obj): The value to return if key is not present """ return self._dictionary.pop(key.lower(), default)
[ "def", "pop", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "return", "self", ".", "_dictionary", ".", "pop", "(", "key", ".", "lower", "(", ")", ",", "default", ")" ]
Remove the key and return the associated value or default if not found Args: key (str): The key to remove default (obj): The value to return if key is not present
[ "Remove", "the", "key", "and", "return", "the", "associated", "value", "or", "default", "if", "not", "found" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L275-L282
train
238,525
barrust/pyspellchecker
spellchecker/spellchecker.py
WordFrequency.items
def items(self): """ Iterator over the words in the dictionary Yields: str: The next word in the dictionary int: The number of instances in the dictionary Note: This is the same as `dict.items()` """ for word in self._dictionary.keys(): yield word, self._dictionary[word]
python
def items(self): """ Iterator over the words in the dictionary Yields: str: The next word in the dictionary int: The number of instances in the dictionary Note: This is the same as `dict.items()` """ for word in self._dictionary.keys(): yield word, self._dictionary[word]
[ "def", "items", "(", "self", ")", ":", "for", "word", "in", "self", ".", "_dictionary", ".", "keys", "(", ")", ":", "yield", "word", ",", "self", ".", "_dictionary", "[", "word", "]" ]
Iterator over the words in the dictionary Yields: str: The next word in the dictionary int: The number of instances in the dictionary Note: This is the same as `dict.items()`
[ "Iterator", "over", "the", "words", "in", "the", "dictionary" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L350-L359
train
238,526
barrust/pyspellchecker
spellchecker/spellchecker.py
WordFrequency.load_dictionary
def load_dictionary(self, filename, encoding="utf-8"): """ Load in a pre-built word frequency list Args: filename (str): The filepath to the json (optionally gzipped) \ file to be loaded encoding (str): The encoding of the dictionary """ with load_file(filename, encoding) as data: self._dictionary.update(json.loads(data.lower(), encoding=encoding)) self._update_dictionary()
python
def load_dictionary(self, filename, encoding="utf-8"): """ Load in a pre-built word frequency list Args: filename (str): The filepath to the json (optionally gzipped) \ file to be loaded encoding (str): The encoding of the dictionary """ with load_file(filename, encoding) as data: self._dictionary.update(json.loads(data.lower(), encoding=encoding)) self._update_dictionary()
[ "def", "load_dictionary", "(", "self", ",", "filename", ",", "encoding", "=", "\"utf-8\"", ")", ":", "with", "load_file", "(", "filename", ",", "encoding", ")", "as", "data", ":", "self", ".", "_dictionary", ".", "update", "(", "json", ".", "loads", "(", "data", ".", "lower", "(", ")", ",", "encoding", "=", "encoding", ")", ")", "self", ".", "_update_dictionary", "(", ")" ]
Load in a pre-built word frequency list Args: filename (str): The filepath to the json (optionally gzipped) \ file to be loaded encoding (str): The encoding of the dictionary
[ "Load", "in", "a", "pre", "-", "built", "word", "frequency", "list" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L361-L370
train
238,527
barrust/pyspellchecker
spellchecker/spellchecker.py
WordFrequency.load_text_file
def load_text_file(self, filename, encoding="utf-8", tokenizer=None): """ Load in a text file from which to generate a word frequency list Args: filename (str): The filepath to the text file to be loaded encoding (str): The encoding of the text file tokenizer (function): The function to use to tokenize a string """ with load_file(filename, encoding=encoding) as data: self.load_text(data, tokenizer)
python
def load_text_file(self, filename, encoding="utf-8", tokenizer=None): """ Load in a text file from which to generate a word frequency list Args: filename (str): The filepath to the text file to be loaded encoding (str): The encoding of the text file tokenizer (function): The function to use to tokenize a string """ with load_file(filename, encoding=encoding) as data: self.load_text(data, tokenizer)
[ "def", "load_text_file", "(", "self", ",", "filename", ",", "encoding", "=", "\"utf-8\"", ",", "tokenizer", "=", "None", ")", ":", "with", "load_file", "(", "filename", ",", "encoding", "=", "encoding", ")", "as", "data", ":", "self", ".", "load_text", "(", "data", ",", "tokenizer", ")" ]
Load in a text file from which to generate a word frequency list Args: filename (str): The filepath to the text file to be loaded encoding (str): The encoding of the text file tokenizer (function): The function to use to tokenize a string
[ "Load", "in", "a", "text", "file", "from", "which", "to", "generate", "a", "word", "frequency", "list" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L372-L381
train
238,528
barrust/pyspellchecker
spellchecker/spellchecker.py
WordFrequency.load_text
def load_text(self, text, tokenizer=None): """ Load text from which to generate a word frequency list Args: text (str): The text to be loaded tokenizer (function): The function to use to tokenize a string """ if tokenizer: words = [x.lower() for x in tokenizer(text)] else: words = self.tokenize(text) self._dictionary.update(words) self._update_dictionary()
python
def load_text(self, text, tokenizer=None): """ Load text from which to generate a word frequency list Args: text (str): The text to be loaded tokenizer (function): The function to use to tokenize a string """ if tokenizer: words = [x.lower() for x in tokenizer(text)] else: words = self.tokenize(text) self._dictionary.update(words) self._update_dictionary()
[ "def", "load_text", "(", "self", ",", "text", ",", "tokenizer", "=", "None", ")", ":", "if", "tokenizer", ":", "words", "=", "[", "x", ".", "lower", "(", ")", "for", "x", "in", "tokenizer", "(", "text", ")", "]", "else", ":", "words", "=", "self", ".", "tokenize", "(", "text", ")", "self", ".", "_dictionary", ".", "update", "(", "words", ")", "self", ".", "_update_dictionary", "(", ")" ]
Load text from which to generate a word frequency list Args: text (str): The text to be loaded tokenizer (function): The function to use to tokenize a string
[ "Load", "text", "from", "which", "to", "generate", "a", "word", "frequency", "list" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L383-L396
train
238,529
barrust/pyspellchecker
spellchecker/spellchecker.py
WordFrequency.load_words
def load_words(self, words): """ Load a list of words from which to generate a word frequency list Args: words (list): The list of words to be loaded """ self._dictionary.update([word.lower() for word in words]) self._update_dictionary()
python
def load_words(self, words): """ Load a list of words from which to generate a word frequency list Args: words (list): The list of words to be loaded """ self._dictionary.update([word.lower() for word in words]) self._update_dictionary()
[ "def", "load_words", "(", "self", ",", "words", ")", ":", "self", ".", "_dictionary", ".", "update", "(", "[", "word", ".", "lower", "(", ")", "for", "word", "in", "words", "]", ")", "self", ".", "_update_dictionary", "(", ")" ]
Load a list of words from which to generate a word frequency list Args: words (list): The list of words to be loaded
[ "Load", "a", "list", "of", "words", "from", "which", "to", "generate", "a", "word", "frequency", "list" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L398-L404
train
238,530
barrust/pyspellchecker
spellchecker/spellchecker.py
WordFrequency.remove_words
def remove_words(self, words): """ Remove a list of words from the word frequency list Args: words (list): The list of words to remove """ for word in words: self._dictionary.pop(word.lower()) self._update_dictionary()
python
def remove_words(self, words): """ Remove a list of words from the word frequency list Args: words (list): The list of words to remove """ for word in words: self._dictionary.pop(word.lower()) self._update_dictionary()
[ "def", "remove_words", "(", "self", ",", "words", ")", ":", "for", "word", "in", "words", ":", "self", ".", "_dictionary", ".", "pop", "(", "word", ".", "lower", "(", ")", ")", "self", ".", "_update_dictionary", "(", ")" ]
Remove a list of words from the word frequency list Args: words (list): The list of words to remove
[ "Remove", "a", "list", "of", "words", "from", "the", "word", "frequency", "list" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L413-L420
train
238,531
barrust/pyspellchecker
spellchecker/spellchecker.py
WordFrequency.remove
def remove(self, word): """ Remove a word from the word frequency list Args: word (str): The word to remove """ self._dictionary.pop(word.lower()) self._update_dictionary()
python
def remove(self, word): """ Remove a word from the word frequency list Args: word (str): The word to remove """ self._dictionary.pop(word.lower()) self._update_dictionary()
[ "def", "remove", "(", "self", ",", "word", ")", ":", "self", ".", "_dictionary", ".", "pop", "(", "word", ".", "lower", "(", ")", ")", "self", ".", "_update_dictionary", "(", ")" ]
Remove a word from the word frequency list Args: word (str): The word to remove
[ "Remove", "a", "word", "from", "the", "word", "frequency", "list" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L422-L428
train
238,532
barrust/pyspellchecker
spellchecker/spellchecker.py
WordFrequency.remove_by_threshold
def remove_by_threshold(self, threshold=5): """ Remove all words at, or below, the provided threshold Args: threshold (int): The threshold at which a word is to be \ removed """ keys = [x for x in self._dictionary.keys()] for key in keys: if self._dictionary[key] <= threshold: self._dictionary.pop(key) self._update_dictionary()
python
def remove_by_threshold(self, threshold=5): """ Remove all words at, or below, the provided threshold Args: threshold (int): The threshold at which a word is to be \ removed """ keys = [x for x in self._dictionary.keys()] for key in keys: if self._dictionary[key] <= threshold: self._dictionary.pop(key) self._update_dictionary()
[ "def", "remove_by_threshold", "(", "self", ",", "threshold", "=", "5", ")", ":", "keys", "=", "[", "x", "for", "x", "in", "self", ".", "_dictionary", ".", "keys", "(", ")", "]", "for", "key", "in", "keys", ":", "if", "self", ".", "_dictionary", "[", "key", "]", "<=", "threshold", ":", "self", ".", "_dictionary", ".", "pop", "(", "key", ")", "self", ".", "_update_dictionary", "(", ")" ]
Remove all words at, or below, the provided threshold Args: threshold (int): The threshold at which a word is to be \ removed
[ "Remove", "all", "words", "at", "or", "below", "the", "provided", "threshold" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L430-L440
train
238,533
barrust/pyspellchecker
spellchecker/spellchecker.py
WordFrequency._update_dictionary
def _update_dictionary(self): """ Update the word frequency object """ self._total_words = sum(self._dictionary.values()) self._unique_words = len(self._dictionary.keys()) self._letters = set() for key in self._dictionary: self._letters.update(key)
python
def _update_dictionary(self): """ Update the word frequency object """ self._total_words = sum(self._dictionary.values()) self._unique_words = len(self._dictionary.keys()) self._letters = set() for key in self._dictionary: self._letters.update(key)
[ "def", "_update_dictionary", "(", "self", ")", ":", "self", ".", "_total_words", "=", "sum", "(", "self", ".", "_dictionary", ".", "values", "(", ")", ")", "self", ".", "_unique_words", "=", "len", "(", "self", ".", "_dictionary", ".", "keys", "(", ")", ")", "self", ".", "_letters", "=", "set", "(", ")", "for", "key", "in", "self", ".", "_dictionary", ":", "self", ".", "_letters", ".", "update", "(", "key", ")" ]
Update the word frequency object
[ "Update", "the", "word", "frequency", "object" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L442-L448
train
238,534
barrust/pyspellchecker
spellchecker/utils.py
load_file
def load_file(filename, encoding): """ Context manager to handle opening a gzip or text file correctly and reading all the data Args: filename (str): The filename to open encoding (str): The file encoding to use Yields: str: The string data from the file read """ try: with gzip.open(filename, mode="rt") as fobj: yield fobj.read() except (OSError, IOError): with OPEN(filename, mode="r", encoding=encoding) as fobj: yield fobj.read()
python
def load_file(filename, encoding): """ Context manager to handle opening a gzip or text file correctly and reading all the data Args: filename (str): The filename to open encoding (str): The file encoding to use Yields: str: The string data from the file read """ try: with gzip.open(filename, mode="rt") as fobj: yield fobj.read() except (OSError, IOError): with OPEN(filename, mode="r", encoding=encoding) as fobj: yield fobj.read()
[ "def", "load_file", "(", "filename", ",", "encoding", ")", ":", "try", ":", "with", "gzip", ".", "open", "(", "filename", ",", "mode", "=", "\"rt\"", ")", "as", "fobj", ":", "yield", "fobj", ".", "read", "(", ")", "except", "(", "OSError", ",", "IOError", ")", ":", "with", "OPEN", "(", "filename", ",", "mode", "=", "\"r\"", ",", "encoding", "=", "encoding", ")", "as", "fobj", ":", "yield", "fobj", ".", "read", "(", ")" ]
Context manager to handle opening a gzip or text file correctly and reading all the data Args: filename (str): The filename to open encoding (str): The file encoding to use Yields: str: The string data from the file read
[ "Context", "manager", "to", "handle", "opening", "a", "gzip", "or", "text", "file", "correctly", "and", "reading", "all", "the", "data" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/utils.py#L16-L31
train
238,535
barrust/pyspellchecker
spellchecker/utils.py
write_file
def write_file(filepath, encoding, gzipped, data): """ Write the data to file either as a gzip file or text based on the gzipped parameter Args: filepath (str): The filename to open encoding (str): The file encoding to use gzipped (bool): Whether the file should be gzipped or not data (str): The data to be written out """ if gzipped: with gzip.open(filepath, "wt") as fobj: fobj.write(data) else: with OPEN(filepath, "w", encoding=encoding) as fobj: if sys.version_info < (3, 0): data = data.decode(encoding) fobj.write(data)
python
def write_file(filepath, encoding, gzipped, data): """ Write the data to file either as a gzip file or text based on the gzipped parameter Args: filepath (str): The filename to open encoding (str): The file encoding to use gzipped (bool): Whether the file should be gzipped or not data (str): The data to be written out """ if gzipped: with gzip.open(filepath, "wt") as fobj: fobj.write(data) else: with OPEN(filepath, "w", encoding=encoding) as fobj: if sys.version_info < (3, 0): data = data.decode(encoding) fobj.write(data)
[ "def", "write_file", "(", "filepath", ",", "encoding", ",", "gzipped", ",", "data", ")", ":", "if", "gzipped", ":", "with", "gzip", ".", "open", "(", "filepath", ",", "\"wt\"", ")", "as", "fobj", ":", "fobj", ".", "write", "(", "data", ")", "else", ":", "with", "OPEN", "(", "filepath", ",", "\"w\"", ",", "encoding", "=", "encoding", ")", "as", "fobj", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", "0", ")", ":", "data", "=", "data", ".", "decode", "(", "encoding", ")", "fobj", ".", "write", "(", "data", ")" ]
Write the data to file either as a gzip file or text based on the gzipped parameter Args: filepath (str): The filename to open encoding (str): The file encoding to use gzipped (bool): Whether the file should be gzipped or not data (str): The data to be written out
[ "Write", "the", "data", "to", "file", "either", "as", "a", "gzip", "file", "or", "text", "based", "on", "the", "gzipped", "parameter" ]
fa96024c0cdeba99e10e11060d5fd7aba796b271
https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/utils.py#L34-L51
train
238,536
merantix/picasso
picasso/examples/keras/model.py
KerasMNISTModel.preprocess
def preprocess(self, raw_inputs): """Convert images into the format required by our model. Our model requires that inputs be grayscale (mode 'L'), be resized to `MNIST_DIM`, and be represented as float32 numpy arrays in range [0, 1]. Args: raw_inputs (list of Images): a list of PIL Image objects Returns: array (float32): num images * height * width * num channels """ image_arrays = [] for raw_im in raw_inputs: im = raw_im.convert('L') im = im.resize(MNIST_DIM, Image.ANTIALIAS) arr = np.array(im) image_arrays.append(arr) inputs = np.array(image_arrays) return inputs.reshape(len(inputs), MNIST_DIM[0], MNIST_DIM[1], 1).astype('float32') / 255
python
def preprocess(self, raw_inputs): """Convert images into the format required by our model. Our model requires that inputs be grayscale (mode 'L'), be resized to `MNIST_DIM`, and be represented as float32 numpy arrays in range [0, 1]. Args: raw_inputs (list of Images): a list of PIL Image objects Returns: array (float32): num images * height * width * num channels """ image_arrays = [] for raw_im in raw_inputs: im = raw_im.convert('L') im = im.resize(MNIST_DIM, Image.ANTIALIAS) arr = np.array(im) image_arrays.append(arr) inputs = np.array(image_arrays) return inputs.reshape(len(inputs), MNIST_DIM[0], MNIST_DIM[1], 1).astype('float32') / 255
[ "def", "preprocess", "(", "self", ",", "raw_inputs", ")", ":", "image_arrays", "=", "[", "]", "for", "raw_im", "in", "raw_inputs", ":", "im", "=", "raw_im", ".", "convert", "(", "'L'", ")", "im", "=", "im", ".", "resize", "(", "MNIST_DIM", ",", "Image", ".", "ANTIALIAS", ")", "arr", "=", "np", ".", "array", "(", "im", ")", "image_arrays", ".", "append", "(", "arr", ")", "inputs", "=", "np", ".", "array", "(", "image_arrays", ")", "return", "inputs", ".", "reshape", "(", "len", "(", "inputs", ")", ",", "MNIST_DIM", "[", "0", "]", ",", "MNIST_DIM", "[", "1", "]", ",", "1", ")", ".", "astype", "(", "'float32'", ")", "/", "255" ]
Convert images into the format required by our model. Our model requires that inputs be grayscale (mode 'L'), be resized to `MNIST_DIM`, and be represented as float32 numpy arrays in range [0, 1]. Args: raw_inputs (list of Images): a list of PIL Image objects Returns: array (float32): num images * height * width * num channels
[ "Convert", "images", "into", "the", "format", "required", "by", "our", "model", "." ]
d276b9b7408dd1032fe0ccb84ea9b6604a32915e
https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/examples/keras/model.py#L23-L47
train
238,537
merantix/picasso
picasso/interfaces/rest.py
initialize_new_session
def initialize_new_session(): """Check session and initialize if necessary Before every request, check the user session. If no session exists, add one and provide temporary locations for images """ if 'image_uid_counter' in session and 'image_list' in session: logger.debug('images are already being tracked') else: # reset image list counter for the session session['image_uid_counter'] = 0 session['image_list'] = [] if 'img_input_dir' in session and 'img_output_dir' in session: logger.debug('temporary image directories already exist') else: # make image upload directory session['img_input_dir'] = mkdtemp() session['img_output_dir'] = mkdtemp()
python
def initialize_new_session(): """Check session and initialize if necessary Before every request, check the user session. If no session exists, add one and provide temporary locations for images """ if 'image_uid_counter' in session and 'image_list' in session: logger.debug('images are already being tracked') else: # reset image list counter for the session session['image_uid_counter'] = 0 session['image_list'] = [] if 'img_input_dir' in session and 'img_output_dir' in session: logger.debug('temporary image directories already exist') else: # make image upload directory session['img_input_dir'] = mkdtemp() session['img_output_dir'] = mkdtemp()
[ "def", "initialize_new_session", "(", ")", ":", "if", "'image_uid_counter'", "in", "session", "and", "'image_list'", "in", "session", ":", "logger", ".", "debug", "(", "'images are already being tracked'", ")", "else", ":", "# reset image list counter for the session", "session", "[", "'image_uid_counter'", "]", "=", "0", "session", "[", "'image_list'", "]", "=", "[", "]", "if", "'img_input_dir'", "in", "session", "and", "'img_output_dir'", "in", "session", ":", "logger", ".", "debug", "(", "'temporary image directories already exist'", ")", "else", ":", "# make image upload directory", "session", "[", "'img_input_dir'", "]", "=", "mkdtemp", "(", ")", "session", "[", "'img_output_dir'", "]", "=", "mkdtemp", "(", ")" ]
Check session and initialize if necessary Before every request, check the user session. If no session exists, add one and provide temporary locations for images
[ "Check", "session", "and", "initialize", "if", "necessary" ]
d276b9b7408dd1032fe0ccb84ea9b6604a32915e
https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/interfaces/rest.py#L42-L60
train
238,538
merantix/picasso
picasso/interfaces/rest.py
images
def images(): """Upload images via REST interface Check if file upload was successful and sanatize user input. TODO: return file URL instead of filename """ if request.method == 'POST': file_upload = request.files['file'] if file_upload: image = dict() image['filename'] = secure_filename(file_upload.filename) full_path = os.path.join(session['img_input_dir'], image['filename']) file_upload.save(full_path) image['uid'] = session['image_uid_counter'] session['image_uid_counter'] += 1 current_app.logger.debug('File %d is saved as %s', image['uid'], image['filename']) session['image_list'].append(image) return jsonify(ok="true", file=image['filename'], uid=image['uid']) return jsonify(ok="false") if request.method == 'GET': return jsonify(images=session['image_list'])
python
def images(): """Upload images via REST interface Check if file upload was successful and sanatize user input. TODO: return file URL instead of filename """ if request.method == 'POST': file_upload = request.files['file'] if file_upload: image = dict() image['filename'] = secure_filename(file_upload.filename) full_path = os.path.join(session['img_input_dir'], image['filename']) file_upload.save(full_path) image['uid'] = session['image_uid_counter'] session['image_uid_counter'] += 1 current_app.logger.debug('File %d is saved as %s', image['uid'], image['filename']) session['image_list'].append(image) return jsonify(ok="true", file=image['filename'], uid=image['uid']) return jsonify(ok="false") if request.method == 'GET': return jsonify(images=session['image_list'])
[ "def", "images", "(", ")", ":", "if", "request", ".", "method", "==", "'POST'", ":", "file_upload", "=", "request", ".", "files", "[", "'file'", "]", "if", "file_upload", ":", "image", "=", "dict", "(", ")", "image", "[", "'filename'", "]", "=", "secure_filename", "(", "file_upload", ".", "filename", ")", "full_path", "=", "os", ".", "path", ".", "join", "(", "session", "[", "'img_input_dir'", "]", ",", "image", "[", "'filename'", "]", ")", "file_upload", ".", "save", "(", "full_path", ")", "image", "[", "'uid'", "]", "=", "session", "[", "'image_uid_counter'", "]", "session", "[", "'image_uid_counter'", "]", "+=", "1", "current_app", ".", "logger", ".", "debug", "(", "'File %d is saved as %s'", ",", "image", "[", "'uid'", "]", ",", "image", "[", "'filename'", "]", ")", "session", "[", "'image_list'", "]", ".", "append", "(", "image", ")", "return", "jsonify", "(", "ok", "=", "\"true\"", ",", "file", "=", "image", "[", "'filename'", "]", ",", "uid", "=", "image", "[", "'uid'", "]", ")", "return", "jsonify", "(", "ok", "=", "\"false\"", ")", "if", "request", ".", "method", "==", "'GET'", ":", "return", "jsonify", "(", "images", "=", "session", "[", "'image_list'", "]", ")" ]
Upload images via REST interface Check if file upload was successful and sanatize user input. TODO: return file URL instead of filename
[ "Upload", "images", "via", "REST", "interface" ]
d276b9b7408dd1032fe0ccb84ea9b6604a32915e
https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/interfaces/rest.py#L84-L109
train
238,539
merantix/picasso
picasso/interfaces/rest.py
visualizers
def visualizers(): """Get a list of available visualizers Responses with a JSON list of available visualizers """ list_of_visualizers = [] for visualizer in get_visualizations(): list_of_visualizers.append({'name': visualizer}) return jsonify(visualizers=list_of_visualizers)
python
def visualizers(): """Get a list of available visualizers Responses with a JSON list of available visualizers """ list_of_visualizers = [] for visualizer in get_visualizations(): list_of_visualizers.append({'name': visualizer}) return jsonify(visualizers=list_of_visualizers)
[ "def", "visualizers", "(", ")", ":", "list_of_visualizers", "=", "[", "]", "for", "visualizer", "in", "get_visualizations", "(", ")", ":", "list_of_visualizers", ".", "append", "(", "{", "'name'", ":", "visualizer", "}", ")", "return", "jsonify", "(", "visualizers", "=", "list_of_visualizers", ")" ]
Get a list of available visualizers Responses with a JSON list of available visualizers
[ "Get", "a", "list", "of", "available", "visualizers" ]
d276b9b7408dd1032fe0ccb84ea9b6604a32915e
https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/interfaces/rest.py#L113-L122
train
238,540
merantix/picasso
picasso/interfaces/rest.py
visualize
def visualize(): """Trigger a visualization via the REST API Takes a single image and generates the visualization data, returning the output exactly as given by the target visualization. """ session['settings'] = {} image_uid = request.args.get('image') vis_name = request.args.get('visualizer') vis = get_visualizations()[vis_name] if vis.ALLOWED_SETTINGS: for key in vis.ALLOWED_SETTINGS.keys(): if request.args.get(key) is not None: session['settings'][key] = request.args.get(key) else: session['settings'][key] = vis.ALLOWED_SETTINGS[key][0] else: logger.debug('Selected Visualizer {0} has no settings.'.format(vis_name)) inputs = [] for image in session['image_list']: if image['uid'] == int(image_uid): full_path = os.path.join(session['img_input_dir'], image['filename']) entry = dict() entry['filename'] = image['filename'] entry['data'] = Image.open(full_path) inputs.append(entry) vis.update_settings(session['settings']) output = vis.make_visualization( inputs, output_dir=session['img_output_dir']) return jsonify(output[0])
python
def visualize(): """Trigger a visualization via the REST API Takes a single image and generates the visualization data, returning the output exactly as given by the target visualization. """ session['settings'] = {} image_uid = request.args.get('image') vis_name = request.args.get('visualizer') vis = get_visualizations()[vis_name] if vis.ALLOWED_SETTINGS: for key in vis.ALLOWED_SETTINGS.keys(): if request.args.get(key) is not None: session['settings'][key] = request.args.get(key) else: session['settings'][key] = vis.ALLOWED_SETTINGS[key][0] else: logger.debug('Selected Visualizer {0} has no settings.'.format(vis_name)) inputs = [] for image in session['image_list']: if image['uid'] == int(image_uid): full_path = os.path.join(session['img_input_dir'], image['filename']) entry = dict() entry['filename'] = image['filename'] entry['data'] = Image.open(full_path) inputs.append(entry) vis.update_settings(session['settings']) output = vis.make_visualization( inputs, output_dir=session['img_output_dir']) return jsonify(output[0])
[ "def", "visualize", "(", ")", ":", "session", "[", "'settings'", "]", "=", "{", "}", "image_uid", "=", "request", ".", "args", ".", "get", "(", "'image'", ")", "vis_name", "=", "request", ".", "args", ".", "get", "(", "'visualizer'", ")", "vis", "=", "get_visualizations", "(", ")", "[", "vis_name", "]", "if", "vis", ".", "ALLOWED_SETTINGS", ":", "for", "key", "in", "vis", ".", "ALLOWED_SETTINGS", ".", "keys", "(", ")", ":", "if", "request", ".", "args", ".", "get", "(", "key", ")", "is", "not", "None", ":", "session", "[", "'settings'", "]", "[", "key", "]", "=", "request", ".", "args", ".", "get", "(", "key", ")", "else", ":", "session", "[", "'settings'", "]", "[", "key", "]", "=", "vis", ".", "ALLOWED_SETTINGS", "[", "key", "]", "[", "0", "]", "else", ":", "logger", ".", "debug", "(", "'Selected Visualizer {0} has no settings.'", ".", "format", "(", "vis_name", ")", ")", "inputs", "=", "[", "]", "for", "image", "in", "session", "[", "'image_list'", "]", ":", "if", "image", "[", "'uid'", "]", "==", "int", "(", "image_uid", ")", ":", "full_path", "=", "os", ".", "path", ".", "join", "(", "session", "[", "'img_input_dir'", "]", ",", "image", "[", "'filename'", "]", ")", "entry", "=", "dict", "(", ")", "entry", "[", "'filename'", "]", "=", "image", "[", "'filename'", "]", "entry", "[", "'data'", "]", "=", "Image", ".", "open", "(", "full_path", ")", "inputs", ".", "append", "(", "entry", ")", "vis", ".", "update_settings", "(", "session", "[", "'settings'", "]", ")", "output", "=", "vis", ".", "make_visualization", "(", "inputs", ",", "output_dir", "=", "session", "[", "'img_output_dir'", "]", ")", "return", "jsonify", "(", "output", "[", "0", "]", ")" ]
Trigger a visualization via the REST API Takes a single image and generates the visualization data, returning the output exactly as given by the target visualization.
[ "Trigger", "a", "visualization", "via", "the", "REST", "API" ]
d276b9b7408dd1032fe0ccb84ea9b6604a32915e
https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/interfaces/rest.py#L133-L166
train
238,541
merantix/picasso
picasso/interfaces/rest.py
reset
def reset(): """Delete the session and clear temporary directories """ shutil.rmtree(session['img_input_dir']) shutil.rmtree(session['img_output_dir']) session.clear() return jsonify(ok='true')
python
def reset(): """Delete the session and clear temporary directories """ shutil.rmtree(session['img_input_dir']) shutil.rmtree(session['img_output_dir']) session.clear() return jsonify(ok='true')
[ "def", "reset", "(", ")", ":", "shutil", ".", "rmtree", "(", "session", "[", "'img_input_dir'", "]", ")", "shutil", ".", "rmtree", "(", "session", "[", "'img_output_dir'", "]", ")", "session", ".", "clear", "(", ")", "return", "jsonify", "(", "ok", "=", "'true'", ")" ]
Delete the session and clear temporary directories
[ "Delete", "the", "session", "and", "clear", "temporary", "directories" ]
d276b9b7408dd1032fe0ccb84ea9b6604a32915e
https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/interfaces/rest.py#L170-L177
train
238,542
merantix/picasso
picasso/visualizations/base.py
BaseVisualization.update_settings
def update_settings(self, settings): """Update the settings If a derived class has an ALLOWED_SETTINGS dict, we check here that incoming settings from the web app are allowed, and set the child properties as appropriate. """ def error_string(setting, setting_val): return ('{val} is not an acceptable value for ' 'parameter {param} for visualization' '{vis}.').format(val=setting_val, param=setting, vis=self.__class__.__name__) for setting in settings: if settings[setting] in self.ALLOWED_SETTINGS[setting]: # if the setting is allowed, set the attribute but remove # invalid variable characters # # see: # # https://stackoverflow.com/questions/3303312/how-do-i-convert-a-string-to-a-valid-variable-name-in-python setattr(self, '_' + re.sub('\W|^(?=\d)', '_', setting).lower(), settings[setting]) else: raise ValueError(error_string(settings[setting], setting))
python
def update_settings(self, settings): """Update the settings If a derived class has an ALLOWED_SETTINGS dict, we check here that incoming settings from the web app are allowed, and set the child properties as appropriate. """ def error_string(setting, setting_val): return ('{val} is not an acceptable value for ' 'parameter {param} for visualization' '{vis}.').format(val=setting_val, param=setting, vis=self.__class__.__name__) for setting in settings: if settings[setting] in self.ALLOWED_SETTINGS[setting]: # if the setting is allowed, set the attribute but remove # invalid variable characters # # see: # # https://stackoverflow.com/questions/3303312/how-do-i-convert-a-string-to-a-valid-variable-name-in-python setattr(self, '_' + re.sub('\W|^(?=\d)', '_', setting).lower(), settings[setting]) else: raise ValueError(error_string(settings[setting], setting))
[ "def", "update_settings", "(", "self", ",", "settings", ")", ":", "def", "error_string", "(", "setting", ",", "setting_val", ")", ":", "return", "(", "'{val} is not an acceptable value for '", "'parameter {param} for visualization'", "'{vis}.'", ")", ".", "format", "(", "val", "=", "setting_val", ",", "param", "=", "setting", ",", "vis", "=", "self", ".", "__class__", ".", "__name__", ")", "for", "setting", "in", "settings", ":", "if", "settings", "[", "setting", "]", "in", "self", ".", "ALLOWED_SETTINGS", "[", "setting", "]", ":", "# if the setting is allowed, set the attribute but remove", "# invalid variable characters", "#", "# see:", "#", "# https://stackoverflow.com/questions/3303312/how-do-i-convert-a-string-to-a-valid-variable-name-in-python", "setattr", "(", "self", ",", "'_'", "+", "re", ".", "sub", "(", "'\\W|^(?=\\d)'", ",", "'_'", ",", "setting", ")", ".", "lower", "(", ")", ",", "settings", "[", "setting", "]", ")", "else", ":", "raise", "ValueError", "(", "error_string", "(", "settings", "[", "setting", "]", ",", "setting", ")", ")" ]
Update the settings If a derived class has an ALLOWED_SETTINGS dict, we check here that incoming settings from the web app are allowed, and set the child properties as appropriate.
[ "Update", "the", "settings" ]
d276b9b7408dd1032fe0ccb84ea9b6604a32915e
https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/visualizations/base.py#L60-L87
train
238,543
merantix/picasso
picasso/models/base.py
load_model
def load_model(model_cls_path, model_cls_name, model_load_args): """Get an instance of the described model. Args: model_cls_path: Path to the module in which the model class is defined. model_cls_name: Name of the model class. model_load_args: Dictionary of args to pass to the `load` method of the model instance. Returns: An instance of :class:`.models.model.BaseModel` or subclass """ spec = importlib.util.spec_from_file_location('active_model', model_cls_path) model_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(model_module) model_cls = getattr(model_module, model_cls_name) model = model_cls() if not isinstance(model, BaseModel): warnings.warn("Loaded model '%s' at '%s' is not an instance of %r" % (model_cls_name, model_cls_path, BaseModel)) model.load(**model_load_args) return model
python
def load_model(model_cls_path, model_cls_name, model_load_args): """Get an instance of the described model. Args: model_cls_path: Path to the module in which the model class is defined. model_cls_name: Name of the model class. model_load_args: Dictionary of args to pass to the `load` method of the model instance. Returns: An instance of :class:`.models.model.BaseModel` or subclass """ spec = importlib.util.spec_from_file_location('active_model', model_cls_path) model_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(model_module) model_cls = getattr(model_module, model_cls_name) model = model_cls() if not isinstance(model, BaseModel): warnings.warn("Loaded model '%s' at '%s' is not an instance of %r" % (model_cls_name, model_cls_path, BaseModel)) model.load(**model_load_args) return model
[ "def", "load_model", "(", "model_cls_path", ",", "model_cls_name", ",", "model_load_args", ")", ":", "spec", "=", "importlib", ".", "util", ".", "spec_from_file_location", "(", "'active_model'", ",", "model_cls_path", ")", "model_module", "=", "importlib", ".", "util", ".", "module_from_spec", "(", "spec", ")", "spec", ".", "loader", ".", "exec_module", "(", "model_module", ")", "model_cls", "=", "getattr", "(", "model_module", ",", "model_cls_name", ")", "model", "=", "model_cls", "(", ")", "if", "not", "isinstance", "(", "model", ",", "BaseModel", ")", ":", "warnings", ".", "warn", "(", "\"Loaded model '%s' at '%s' is not an instance of %r\"", "%", "(", "model_cls_name", ",", "model_cls_path", ",", "BaseModel", ")", ")", "model", ".", "load", "(", "*", "*", "model_load_args", ")", "return", "model" ]
Get an instance of the described model. Args: model_cls_path: Path to the module in which the model class is defined. model_cls_name: Name of the model class. model_load_args: Dictionary of args to pass to the `load` method of the model instance. Returns: An instance of :class:`.models.model.BaseModel` or subclass
[ "Get", "an", "instance", "of", "the", "described", "model", "." ]
d276b9b7408dd1032fe0ccb84ea9b6604a32915e
https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/models/base.py#L18-L42
train
238,544
merantix/picasso
picasso/models/base.py
BaseModel.decode_prob
def decode_prob(self, class_probabilities): """Given predicted class probabilites for a set of examples, annotate each logit with a class name. By default, we name each class using its index in the logits array. Args: class_probabilities (array): Class probabilities as output by `self.predict`, i.e., a numpy array of shape (num_examples, num_classes). Returns: Annotated class probabilities for each input example, as a list of dicts where each dict is formatted as: { 'index': class_index, 'name': class_name, 'prob': class_probability } """ results = [] for row in class_probabilities: entries = [] for i, prob in enumerate(row): entries.append({'index': i, 'name': str(i), 'prob': prob}) entries = sorted(entries, key=itemgetter('prob'), reverse=True)[:self.top_probs] for entry in entries: entry['prob'] = '{:.3f}'.format(entry['prob']) results.append(entries) return results
python
def decode_prob(self, class_probabilities): """Given predicted class probabilites for a set of examples, annotate each logit with a class name. By default, we name each class using its index in the logits array. Args: class_probabilities (array): Class probabilities as output by `self.predict`, i.e., a numpy array of shape (num_examples, num_classes). Returns: Annotated class probabilities for each input example, as a list of dicts where each dict is formatted as: { 'index': class_index, 'name': class_name, 'prob': class_probability } """ results = [] for row in class_probabilities: entries = [] for i, prob in enumerate(row): entries.append({'index': i, 'name': str(i), 'prob': prob}) entries = sorted(entries, key=itemgetter('prob'), reverse=True)[:self.top_probs] for entry in entries: entry['prob'] = '{:.3f}'.format(entry['prob']) results.append(entries) return results
[ "def", "decode_prob", "(", "self", ",", "class_probabilities", ")", ":", "results", "=", "[", "]", "for", "row", "in", "class_probabilities", ":", "entries", "=", "[", "]", "for", "i", ",", "prob", "in", "enumerate", "(", "row", ")", ":", "entries", ".", "append", "(", "{", "'index'", ":", "i", ",", "'name'", ":", "str", "(", "i", ")", ",", "'prob'", ":", "prob", "}", ")", "entries", "=", "sorted", "(", "entries", ",", "key", "=", "itemgetter", "(", "'prob'", ")", ",", "reverse", "=", "True", ")", "[", ":", "self", ".", "top_probs", "]", "for", "entry", "in", "entries", ":", "entry", "[", "'prob'", "]", "=", "'{:.3f}'", ".", "format", "(", "entry", "[", "'prob'", "]", ")", "results", ".", "append", "(", "entries", ")", "return", "results" ]
Given predicted class probabilites for a set of examples, annotate each logit with a class name. By default, we name each class using its index in the logits array. Args: class_probabilities (array): Class probabilities as output by `self.predict`, i.e., a numpy array of shape (num_examples, num_classes). Returns: Annotated class probabilities for each input example, as a list of dicts where each dict is formatted as: { 'index': class_index, 'name': class_name, 'prob': class_probability }
[ "Given", "predicted", "class", "probabilites", "for", "a", "set", "of", "examples", "annotate", "each", "logit", "with", "a", "class", "name", "." ]
d276b9b7408dd1032fe0ccb84ea9b6604a32915e
https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/models/base.py#L174-L210
train
238,545
merantix/picasso
picasso/utils.py
_get_visualization_classes
def _get_visualization_classes(): """Import visualizations classes dynamically """ visualization_attr = vars(import_module('picasso.visualizations')) visualization_submodules = [ visualization_attr[x] for x in visualization_attr if isinstance(visualization_attr[x], ModuleType)] visualization_classes = [] for submodule in visualization_submodules: attrs = vars(submodule) for attr_name in attrs: attr = attrs[attr_name] if (inspect.isclass(attr) and issubclass(attr, BaseVisualization) and attr is not BaseVisualization): visualization_classes.append(attr) return visualization_classes
python
def _get_visualization_classes(): """Import visualizations classes dynamically """ visualization_attr = vars(import_module('picasso.visualizations')) visualization_submodules = [ visualization_attr[x] for x in visualization_attr if isinstance(visualization_attr[x], ModuleType)] visualization_classes = [] for submodule in visualization_submodules: attrs = vars(submodule) for attr_name in attrs: attr = attrs[attr_name] if (inspect.isclass(attr) and issubclass(attr, BaseVisualization) and attr is not BaseVisualization): visualization_classes.append(attr) return visualization_classes
[ "def", "_get_visualization_classes", "(", ")", ":", "visualization_attr", "=", "vars", "(", "import_module", "(", "'picasso.visualizations'", ")", ")", "visualization_submodules", "=", "[", "visualization_attr", "[", "x", "]", "for", "x", "in", "visualization_attr", "if", "isinstance", "(", "visualization_attr", "[", "x", "]", ",", "ModuleType", ")", "]", "visualization_classes", "=", "[", "]", "for", "submodule", "in", "visualization_submodules", ":", "attrs", "=", "vars", "(", "submodule", ")", "for", "attr_name", "in", "attrs", ":", "attr", "=", "attrs", "[", "attr_name", "]", "if", "(", "inspect", ".", "isclass", "(", "attr", ")", "and", "issubclass", "(", "attr", ",", "BaseVisualization", ")", "and", "attr", "is", "not", "BaseVisualization", ")", ":", "visualization_classes", ".", "append", "(", "attr", ")", "return", "visualization_classes" ]
Import visualizations classes dynamically
[ "Import", "visualizations", "classes", "dynamically" ]
d276b9b7408dd1032fe0ccb84ea9b6604a32915e
https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/utils.py#L32-L49
train
238,546
merantix/picasso
picasso/utils.py
get_model
def get_model(): """Get the NN model that's being analyzed from the request context. Put the model in the request context if it is not yet there. Returns: instance of :class:`.models.model.Model` or derived class """ if not hasattr(g, 'model'): g.model = load_model(current_app.config['MODEL_CLS_PATH'], current_app.config['MODEL_CLS_NAME'], current_app.config['MODEL_LOAD_ARGS']) return g.model
python
def get_model(): """Get the NN model that's being analyzed from the request context. Put the model in the request context if it is not yet there. Returns: instance of :class:`.models.model.Model` or derived class """ if not hasattr(g, 'model'): g.model = load_model(current_app.config['MODEL_CLS_PATH'], current_app.config['MODEL_CLS_NAME'], current_app.config['MODEL_LOAD_ARGS']) return g.model
[ "def", "get_model", "(", ")", ":", "if", "not", "hasattr", "(", "g", ",", "'model'", ")", ":", "g", ".", "model", "=", "load_model", "(", "current_app", ".", "config", "[", "'MODEL_CLS_PATH'", "]", ",", "current_app", ".", "config", "[", "'MODEL_CLS_NAME'", "]", ",", "current_app", ".", "config", "[", "'MODEL_LOAD_ARGS'", "]", ")", "return", "g", ".", "model" ]
Get the NN model that's being analyzed from the request context. Put the model in the request context if it is not yet there. Returns: instance of :class:`.models.model.Model` or derived class
[ "Get", "the", "NN", "model", "that", "s", "being", "analyzed", "from", "the", "request", "context", ".", "Put", "the", "model", "in", "the", "request", "context", "if", "it", "is", "not", "yet", "there", "." ]
d276b9b7408dd1032fe0ccb84ea9b6604a32915e
https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/utils.py#L52-L64
train
238,547
merantix/picasso
picasso/utils.py
get_visualizations
def get_visualizations(): """Get the available visualizations from the request context. Put the visualizations in the request context if they are not yet there. Returns: :obj:`list` of instances of :class:`.BaseVisualization` or derived class """ if not hasattr(g, 'visualizations'): g.visualizations = {} for VisClass in _get_visualization_classes(): vis = VisClass(get_model()) g.visualizations[vis.__class__.__name__] = vis return g.visualizations
python
def get_visualizations(): """Get the available visualizations from the request context. Put the visualizations in the request context if they are not yet there. Returns: :obj:`list` of instances of :class:`.BaseVisualization` or derived class """ if not hasattr(g, 'visualizations'): g.visualizations = {} for VisClass in _get_visualization_classes(): vis = VisClass(get_model()) g.visualizations[vis.__class__.__name__] = vis return g.visualizations
[ "def", "get_visualizations", "(", ")", ":", "if", "not", "hasattr", "(", "g", ",", "'visualizations'", ")", ":", "g", ".", "visualizations", "=", "{", "}", "for", "VisClass", "in", "_get_visualization_classes", "(", ")", ":", "vis", "=", "VisClass", "(", "get_model", "(", ")", ")", "g", ".", "visualizations", "[", "vis", ".", "__class__", ".", "__name__", "]", "=", "vis", "return", "g", ".", "visualizations" ]
Get the available visualizations from the request context. Put the visualizations in the request context if they are not yet there. Returns: :obj:`list` of instances of :class:`.BaseVisualization` or derived class
[ "Get", "the", "available", "visualizations", "from", "the", "request", "context", ".", "Put", "the", "visualizations", "in", "the", "request", "context", "if", "they", "are", "not", "yet", "there", "." ]
d276b9b7408dd1032fe0ccb84ea9b6604a32915e
https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/utils.py#L67-L81
train
238,548
merantix/picasso
picasso/utils.py
get_app_state
def get_app_state(): """Get current status of application in context Returns: :obj:`dict` of application status """ if not hasattr(g, 'app_state'): model = get_model() g.app_state = { 'app_title': APP_TITLE, 'model_name': type(model).__name__, 'latest_ckpt_name': model.latest_ckpt_name, 'latest_ckpt_time': model.latest_ckpt_time } return g.app_state
python
def get_app_state(): """Get current status of application in context Returns: :obj:`dict` of application status """ if not hasattr(g, 'app_state'): model = get_model() g.app_state = { 'app_title': APP_TITLE, 'model_name': type(model).__name__, 'latest_ckpt_name': model.latest_ckpt_name, 'latest_ckpt_time': model.latest_ckpt_time } return g.app_state
[ "def", "get_app_state", "(", ")", ":", "if", "not", "hasattr", "(", "g", ",", "'app_state'", ")", ":", "model", "=", "get_model", "(", ")", "g", ".", "app_state", "=", "{", "'app_title'", ":", "APP_TITLE", ",", "'model_name'", ":", "type", "(", "model", ")", ".", "__name__", ",", "'latest_ckpt_name'", ":", "model", ".", "latest_ckpt_name", ",", "'latest_ckpt_time'", ":", "model", ".", "latest_ckpt_time", "}", "return", "g", ".", "app_state" ]
Get current status of application in context Returns: :obj:`dict` of application status
[ "Get", "current", "status", "of", "application", "in", "context" ]
d276b9b7408dd1032fe0ccb84ea9b6604a32915e
https://github.com/merantix/picasso/blob/d276b9b7408dd1032fe0ccb84ea9b6604a32915e/picasso/utils.py#L84-L99
train
238,549
arraylabs/pymyq
pymyq/api.py
login
async def login( username: str, password: str, brand: str, websession: ClientSession = None) -> API: """Log in to the API.""" api = API(brand, websession) await api.authenticate(username, password) return api
python
async def login( username: str, password: str, brand: str, websession: ClientSession = None) -> API: """Log in to the API.""" api = API(brand, websession) await api.authenticate(username, password) return api
[ "async", "def", "login", "(", "username", ":", "str", ",", "password", ":", "str", ",", "brand", ":", "str", ",", "websession", ":", "ClientSession", "=", "None", ")", "->", "API", ":", "api", "=", "API", "(", "brand", ",", "websession", ")", "await", "api", ".", "authenticate", "(", "username", ",", "password", ")", "return", "api" ]
Log in to the API.
[ "Log", "in", "to", "the", "API", "." ]
413ae01ca23568f7b5f698a87e872f456072356b
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L286-L292
train
238,550
arraylabs/pymyq
pymyq/api.py
API._create_websession
def _create_websession(self): """Create a web session.""" from socket import AF_INET from aiohttp import ClientTimeout, TCPConnector _LOGGER.debug('Creating web session') conn = TCPConnector( family=AF_INET, limit_per_host=5, enable_cleanup_closed=True, ) # Create session object. session_timeout = ClientTimeout(connect=10) self._websession = ClientSession(connector=conn, timeout=session_timeout) self._supplied_websession = False
python
def _create_websession(self): """Create a web session.""" from socket import AF_INET from aiohttp import ClientTimeout, TCPConnector _LOGGER.debug('Creating web session') conn = TCPConnector( family=AF_INET, limit_per_host=5, enable_cleanup_closed=True, ) # Create session object. session_timeout = ClientTimeout(connect=10) self._websession = ClientSession(connector=conn, timeout=session_timeout) self._supplied_websession = False
[ "def", "_create_websession", "(", "self", ")", ":", "from", "socket", "import", "AF_INET", "from", "aiohttp", "import", "ClientTimeout", ",", "TCPConnector", "_LOGGER", ".", "debug", "(", "'Creating web session'", ")", "conn", "=", "TCPConnector", "(", "family", "=", "AF_INET", ",", "limit_per_host", "=", "5", ",", "enable_cleanup_closed", "=", "True", ",", ")", "# Create session object.", "session_timeout", "=", "ClientTimeout", "(", "connect", "=", "10", ")", "self", ".", "_websession", "=", "ClientSession", "(", "connector", "=", "conn", ",", "timeout", "=", "session_timeout", ")", "self", ".", "_supplied_websession", "=", "False" ]
Create a web session.
[ "Create", "a", "web", "session", "." ]
413ae01ca23568f7b5f698a87e872f456072356b
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L73-L89
train
238,551
arraylabs/pymyq
pymyq/api.py
API.close_websession
async def close_websession(self): """Close web session if not already closed and created by us.""" # We do not close the web session if it was provided. if self._supplied_websession or self._websession is None: return _LOGGER.debug('Closing connections') # Need to set _websession to none first to prevent any other task # from closing it as well. temp_websession = self._websession self._websession = None await temp_websession.close() await asyncio.sleep(0) _LOGGER.debug('Connections closed')
python
async def close_websession(self): """Close web session if not already closed and created by us.""" # We do not close the web session if it was provided. if self._supplied_websession or self._websession is None: return _LOGGER.debug('Closing connections') # Need to set _websession to none first to prevent any other task # from closing it as well. temp_websession = self._websession self._websession = None await temp_websession.close() await asyncio.sleep(0) _LOGGER.debug('Connections closed')
[ "async", "def", "close_websession", "(", "self", ")", ":", "# We do not close the web session if it was provided.", "if", "self", ".", "_supplied_websession", "or", "self", ".", "_websession", "is", "None", ":", "return", "_LOGGER", ".", "debug", "(", "'Closing connections'", ")", "# Need to set _websession to none first to prevent any other task", "# from closing it as well.", "temp_websession", "=", "self", ".", "_websession", "self", ".", "_websession", "=", "None", "await", "temp_websession", ".", "close", "(", ")", "await", "asyncio", ".", "sleep", "(", "0", ")", "_LOGGER", ".", "debug", "(", "'Connections closed'", ")" ]
Close web session if not already closed and created by us.
[ "Close", "web", "session", "if", "not", "already", "closed", "and", "created", "by", "us", "." ]
413ae01ca23568f7b5f698a87e872f456072356b
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L91-L104
train
238,552
arraylabs/pymyq
pymyq/api.py
API.authenticate
async def authenticate(self, username: str, password: str) -> None: """Authenticate against the API.""" self._credentials = { 'username': username, 'password': password, } await self._get_security_token()
python
async def authenticate(self, username: str, password: str) -> None: """Authenticate against the API.""" self._credentials = { 'username': username, 'password': password, } await self._get_security_token()
[ "async", "def", "authenticate", "(", "self", ",", "username", ":", "str", ",", "password", ":", "str", ")", "->", "None", ":", "self", ".", "_credentials", "=", "{", "'username'", ":", "username", ",", "'password'", ":", "password", ",", "}", "await", "self", ".", "_get_security_token", "(", ")" ]
Authenticate against the API.
[ "Authenticate", "against", "the", "API", "." ]
413ae01ca23568f7b5f698a87e872f456072356b
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L219-L226
train
238,553
arraylabs/pymyq
pymyq/api.py
API._get_security_token
async def _get_security_token(self) -> None: """Request a security token.""" _LOGGER.debug('Requesting security token.') if self._credentials is None: return # Make sure only 1 request can be sent at a time. async with self._security_token_lock: # Confirm there is still no security token. if self._security_token is None: login_resp = await self._request( 'post', LOGIN_ENDPOINT, json=self._credentials, login_request=True, ) return_code = int(login_resp.get('ReturnCode', 1)) if return_code != 0: if return_code == 203: # Invalid username or password. _LOGGER.debug('Invalid username or password') self._credentials = None raise MyQError(login_resp['ErrorMessage']) self._security_token = login_resp['SecurityToken']
python
async def _get_security_token(self) -> None: """Request a security token.""" _LOGGER.debug('Requesting security token.') if self._credentials is None: return # Make sure only 1 request can be sent at a time. async with self._security_token_lock: # Confirm there is still no security token. if self._security_token is None: login_resp = await self._request( 'post', LOGIN_ENDPOINT, json=self._credentials, login_request=True, ) return_code = int(login_resp.get('ReturnCode', 1)) if return_code != 0: if return_code == 203: # Invalid username or password. _LOGGER.debug('Invalid username or password') self._credentials = None raise MyQError(login_resp['ErrorMessage']) self._security_token = login_resp['SecurityToken']
[ "async", "def", "_get_security_token", "(", "self", ")", "->", "None", ":", "_LOGGER", ".", "debug", "(", "'Requesting security token.'", ")", "if", "self", ".", "_credentials", "is", "None", ":", "return", "# Make sure only 1 request can be sent at a time.", "async", "with", "self", ".", "_security_token_lock", ":", "# Confirm there is still no security token.", "if", "self", ".", "_security_token", "is", "None", ":", "login_resp", "=", "await", "self", ".", "_request", "(", "'post'", ",", "LOGIN_ENDPOINT", ",", "json", "=", "self", ".", "_credentials", ",", "login_request", "=", "True", ",", ")", "return_code", "=", "int", "(", "login_resp", ".", "get", "(", "'ReturnCode'", ",", "1", ")", ")", "if", "return_code", "!=", "0", ":", "if", "return_code", "==", "203", ":", "# Invalid username or password.", "_LOGGER", ".", "debug", "(", "'Invalid username or password'", ")", "self", ".", "_credentials", "=", "None", "raise", "MyQError", "(", "login_resp", "[", "'ErrorMessage'", "]", ")", "self", ".", "_security_token", "=", "login_resp", "[", "'SecurityToken'", "]" ]
Request a security token.
[ "Request", "a", "security", "token", "." ]
413ae01ca23568f7b5f698a87e872f456072356b
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L228-L253
train
238,554
arraylabs/pymyq
pymyq/api.py
API.get_devices
async def get_devices(self, covers_only: bool = True) -> list: """Get a list of all devices associated with the account.""" from .device import MyQDevice _LOGGER.debug('Retrieving list of devices') devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT) # print(json.dumps(devices_resp, indent=4)) device_list = [] if devices_resp is None: return device_list for device in devices_resp['Devices']: if not covers_only or \ device['MyQDeviceTypeName'] in SUPPORTED_DEVICE_TYPE_NAMES: self._devices.append({ 'device_id': device['MyQDeviceId'], 'device_info': device }) myq_device = MyQDevice( self._devices[-1], self._brand, self) device_list.append(myq_device) # Store current device states. self._store_device_states(devices_resp.get('Devices', [])) _LOGGER.debug('List of devices retrieved') return device_list
python
async def get_devices(self, covers_only: bool = True) -> list: """Get a list of all devices associated with the account.""" from .device import MyQDevice _LOGGER.debug('Retrieving list of devices') devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT) # print(json.dumps(devices_resp, indent=4)) device_list = [] if devices_resp is None: return device_list for device in devices_resp['Devices']: if not covers_only or \ device['MyQDeviceTypeName'] in SUPPORTED_DEVICE_TYPE_NAMES: self._devices.append({ 'device_id': device['MyQDeviceId'], 'device_info': device }) myq_device = MyQDevice( self._devices[-1], self._brand, self) device_list.append(myq_device) # Store current device states. self._store_device_states(devices_resp.get('Devices', [])) _LOGGER.debug('List of devices retrieved') return device_list
[ "async", "def", "get_devices", "(", "self", ",", "covers_only", ":", "bool", "=", "True", ")", "->", "list", ":", "from", ".", "device", "import", "MyQDevice", "_LOGGER", ".", "debug", "(", "'Retrieving list of devices'", ")", "devices_resp", "=", "await", "self", ".", "_request", "(", "'get'", ",", "DEVICE_LIST_ENDPOINT", ")", "# print(json.dumps(devices_resp, indent=4))", "device_list", "=", "[", "]", "if", "devices_resp", "is", "None", ":", "return", "device_list", "for", "device", "in", "devices_resp", "[", "'Devices'", "]", ":", "if", "not", "covers_only", "or", "device", "[", "'MyQDeviceTypeName'", "]", "in", "SUPPORTED_DEVICE_TYPE_NAMES", ":", "self", ".", "_devices", ".", "append", "(", "{", "'device_id'", ":", "device", "[", "'MyQDeviceId'", "]", ",", "'device_info'", ":", "device", "}", ")", "myq_device", "=", "MyQDevice", "(", "self", ".", "_devices", "[", "-", "1", "]", ",", "self", ".", "_brand", ",", "self", ")", "device_list", ".", "append", "(", "myq_device", ")", "# Store current device states.", "self", ".", "_store_device_states", "(", "devices_resp", ".", "get", "(", "'Devices'", ",", "[", "]", ")", ")", "_LOGGER", ".", "debug", "(", "'List of devices retrieved'", ")", "return", "device_list" ]
Get a list of all devices associated with the account.
[ "Get", "a", "list", "of", "all", "devices", "associated", "with", "the", "account", "." ]
413ae01ca23568f7b5f698a87e872f456072356b
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/api.py#L255-L283
train
238,555
arraylabs/pymyq
pymyq/device.py
MyQDevice.name
def name(self) -> str: """Return the device name.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'desc')
python
def name(self) -> str: """Return the device name.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'desc')
[ "def", "name", "(", "self", ")", "->", "str", ":", "return", "next", "(", "attr", "[", "'Value'", "]", "for", "attr", "in", "self", ".", "_device_json", ".", "get", "(", "'Attributes'", ",", "[", "]", ")", "if", "attr", ".", "get", "(", "'AttributeDisplayName'", ")", "==", "'desc'", ")" ]
Return the device name.
[ "Return", "the", "device", "name", "." ]
413ae01ca23568f7b5f698a87e872f456072356b
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L64-L68
train
238,556
arraylabs/pymyq
pymyq/device.py
MyQDevice.available
def available(self) -> bool: """Return if device is online or not.""" # Both ability to retrieve state from MyQ cloud AND device itself has # to be online. is_available = self.api.online and \ next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'online') == "True" return is_available
python
def available(self) -> bool: """Return if device is online or not.""" # Both ability to retrieve state from MyQ cloud AND device itself has # to be online. is_available = self.api.online and \ next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'online') == "True" return is_available
[ "def", "available", "(", "self", ")", "->", "bool", ":", "# Both ability to retrieve state from MyQ cloud AND device itself has", "# to be online.", "is_available", "=", "self", ".", "api", ".", "online", "and", "next", "(", "attr", "[", "'Value'", "]", "for", "attr", "in", "self", ".", "_device_json", ".", "get", "(", "'Attributes'", ",", "[", "]", ")", "if", "attr", ".", "get", "(", "'AttributeDisplayName'", ")", "==", "'online'", ")", "==", "\"True\"", "return", "is_available" ]
Return if device is online or not.
[ "Return", "if", "device", "is", "online", "or", "not", "." ]
413ae01ca23568f7b5f698a87e872f456072356b
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L71-L81
train
238,557
arraylabs/pymyq
pymyq/device.py
MyQDevice.open_allowed
def open_allowed(self) -> bool: """Door can be opened unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedopenallowed')\ == "1"
python
def open_allowed(self) -> bool: """Door can be opened unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedopenallowed')\ == "1"
[ "def", "open_allowed", "(", "self", ")", "->", "bool", ":", "return", "next", "(", "attr", "[", "'Value'", "]", "for", "attr", "in", "self", ".", "_device_json", ".", "get", "(", "'Attributes'", ",", "[", "]", ")", "if", "attr", ".", "get", "(", "'AttributeDisplayName'", ")", "==", "'isunattendedopenallowed'", ")", "==", "\"1\"" ]
Door can be opened unattended.
[ "Door", "can", "be", "opened", "unattended", "." ]
413ae01ca23568f7b5f698a87e872f456072356b
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L89-L94
train
238,558
arraylabs/pymyq
pymyq/device.py
MyQDevice.close_allowed
def close_allowed(self) -> bool: """Door can be closed unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedcloseallowed')\ == "1"
python
def close_allowed(self) -> bool: """Door can be closed unattended.""" return next( attr['Value'] for attr in self._device_json.get('Attributes', []) if attr.get('AttributeDisplayName') == 'isunattendedcloseallowed')\ == "1"
[ "def", "close_allowed", "(", "self", ")", "->", "bool", ":", "return", "next", "(", "attr", "[", "'Value'", "]", "for", "attr", "in", "self", ".", "_device_json", ".", "get", "(", "'Attributes'", ",", "[", "]", ")", "if", "attr", ".", "get", "(", "'AttributeDisplayName'", ")", "==", "'isunattendedcloseallowed'", ")", "==", "\"1\"" ]
Door can be closed unattended.
[ "Door", "can", "be", "closed", "unattended", "." ]
413ae01ca23568f7b5f698a87e872f456072356b
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L97-L102
train
238,559
arraylabs/pymyq
pymyq/device.py
MyQDevice._update_state
def _update_state(self, value: str) -> None: """Update state temporary during open or close.""" attribute = next(attr for attr in self._device['device_info'].get( 'Attributes', []) if attr.get( 'AttributeDisplayName') == 'doorstate') if attribute is not None: attribute['Value'] = value
python
def _update_state(self, value: str) -> None: """Update state temporary during open or close.""" attribute = next(attr for attr in self._device['device_info'].get( 'Attributes', []) if attr.get( 'AttributeDisplayName') == 'doorstate') if attribute is not None: attribute['Value'] = value
[ "def", "_update_state", "(", "self", ",", "value", ":", "str", ")", "->", "None", ":", "attribute", "=", "next", "(", "attr", "for", "attr", "in", "self", ".", "_device", "[", "'device_info'", "]", ".", "get", "(", "'Attributes'", ",", "[", "]", ")", "if", "attr", ".", "get", "(", "'AttributeDisplayName'", ")", "==", "'doorstate'", ")", "if", "attribute", "is", "not", "None", ":", "attribute", "[", "'Value'", "]", "=", "value" ]
Update state temporary during open or close.
[ "Update", "state", "temporary", "during", "open", "or", "close", "." ]
413ae01ca23568f7b5f698a87e872f456072356b
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L113-L119
train
238,560
arraylabs/pymyq
pymyq/device.py
MyQDevice._coerce_state_from_string
def _coerce_state_from_string(value: Union[int, str]) -> str: """Return a proper state from a string input.""" try: return STATE_MAP[int(value)] except KeyError: _LOGGER.error('Unknown state: %s', value) return STATE_UNKNOWN
python
def _coerce_state_from_string(value: Union[int, str]) -> str: """Return a proper state from a string input.""" try: return STATE_MAP[int(value)] except KeyError: _LOGGER.error('Unknown state: %s', value) return STATE_UNKNOWN
[ "def", "_coerce_state_from_string", "(", "value", ":", "Union", "[", "int", ",", "str", "]", ")", "->", "str", ":", "try", ":", "return", "STATE_MAP", "[", "int", "(", "value", ")", "]", "except", "KeyError", ":", "_LOGGER", ".", "error", "(", "'Unknown state: %s'", ",", "value", ")", "return", "STATE_UNKNOWN" ]
Return a proper state from a string input.
[ "Return", "a", "proper", "state", "from", "a", "string", "input", "." ]
413ae01ca23568f7b5f698a87e872f456072356b
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L127-L133
train
238,561
arraylabs/pymyq
pymyq/device.py
MyQDevice._set_state
async def _set_state(self, state: int) -> bool: """Set the state of the device.""" try: set_state_resp = await self.api._request( 'put', DEVICE_SET_ENDPOINT, json={ 'attributeName': 'desireddoorstate', 'myQDeviceId': self.device_id, 'AttributeValue': state, }) except RequestError as err: _LOGGER.error('%s: Setting state failed (and halting): %s', self.name, err) return False if set_state_resp is None: return False if int(set_state_resp.get('ReturnCode', 1)) != 0: _LOGGER.error( '%s: Error setting the device state: %s', self.name, set_state_resp.get('ErrorMessage', 'Unknown Error')) return False return True
python
async def _set_state(self, state: int) -> bool: """Set the state of the device.""" try: set_state_resp = await self.api._request( 'put', DEVICE_SET_ENDPOINT, json={ 'attributeName': 'desireddoorstate', 'myQDeviceId': self.device_id, 'AttributeValue': state, }) except RequestError as err: _LOGGER.error('%s: Setting state failed (and halting): %s', self.name, err) return False if set_state_resp is None: return False if int(set_state_resp.get('ReturnCode', 1)) != 0: _LOGGER.error( '%s: Error setting the device state: %s', self.name, set_state_resp.get('ErrorMessage', 'Unknown Error')) return False return True
[ "async", "def", "_set_state", "(", "self", ",", "state", ":", "int", ")", "->", "bool", ":", "try", ":", "set_state_resp", "=", "await", "self", ".", "api", ".", "_request", "(", "'put'", ",", "DEVICE_SET_ENDPOINT", ",", "json", "=", "{", "'attributeName'", ":", "'desireddoorstate'", ",", "'myQDeviceId'", ":", "self", ".", "device_id", ",", "'AttributeValue'", ":", "state", ",", "}", ")", "except", "RequestError", "as", "err", ":", "_LOGGER", ".", "error", "(", "'%s: Setting state failed (and halting): %s'", ",", "self", ".", "name", ",", "err", ")", "return", "False", "if", "set_state_resp", "is", "None", ":", "return", "False", "if", "int", "(", "set_state_resp", ".", "get", "(", "'ReturnCode'", ",", "1", ")", ")", "!=", "0", ":", "_LOGGER", ".", "error", "(", "'%s: Error setting the device state: %s'", ",", "self", ".", "name", ",", "set_state_resp", ".", "get", "(", "'ErrorMessage'", ",", "'Unknown Error'", ")", ")", "return", "False", "return", "True" ]
Set the state of the device.
[ "Set", "the", "state", "of", "the", "device", "." ]
413ae01ca23568f7b5f698a87e872f456072356b
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L136-L161
train
238,562
arraylabs/pymyq
pymyq/device.py
MyQDevice.close
async def close(self) -> bool: """Close the device.""" _LOGGER.debug('%s: Sending close command', self.name) if not await self._set_state(0): return False # Do not allow update of this device's state for 10 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=10) # Ensure state is closed or closing. if self.state not in (STATE_CLOSED, STATE_CLOSING): # Set state to closing. self._update_state('5') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Close command send', self.name) return True
python
async def close(self) -> bool: """Close the device.""" _LOGGER.debug('%s: Sending close command', self.name) if not await self._set_state(0): return False # Do not allow update of this device's state for 10 seconds. self.next_allowed_update = datetime.utcnow() + timedelta(seconds=10) # Ensure state is closed or closing. if self.state not in (STATE_CLOSED, STATE_CLOSING): # Set state to closing. self._update_state('5') self._device_json = self._device['device_info'] _LOGGER.debug('%s: Close command send', self.name) return True
[ "async", "def", "close", "(", "self", ")", "->", "bool", ":", "_LOGGER", ".", "debug", "(", "'%s: Sending close command'", ",", "self", ".", "name", ")", "if", "not", "await", "self", ".", "_set_state", "(", "0", ")", ":", "return", "False", "# Do not allow update of this device's state for 10 seconds.", "self", ".", "next_allowed_update", "=", "datetime", ".", "utcnow", "(", ")", "+", "timedelta", "(", "seconds", "=", "10", ")", "# Ensure state is closed or closing.", "if", "self", ".", "state", "not", "in", "(", "STATE_CLOSED", ",", "STATE_CLOSING", ")", ":", "# Set state to closing.", "self", ".", "_update_state", "(", "'5'", ")", "self", ".", "_device_json", "=", "self", ".", "_device", "[", "'device_info'", "]", "_LOGGER", ".", "debug", "(", "'%s: Close command send'", ",", "self", ".", "name", ")", "return", "True" ]
Close the device.
[ "Close", "the", "device", "." ]
413ae01ca23568f7b5f698a87e872f456072356b
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L163-L179
train
238,563
arraylabs/pymyq
pymyq/device.py
MyQDevice.update
async def update(self) -> None: """Retrieve updated device state.""" if self.next_allowed_update is not None and \ datetime.utcnow() < self.next_allowed_update: return self.next_allowed_update = None await self.api._update_device_state() self._device_json = self._device['device_info']
python
async def update(self) -> None: """Retrieve updated device state.""" if self.next_allowed_update is not None and \ datetime.utcnow() < self.next_allowed_update: return self.next_allowed_update = None await self.api._update_device_state() self._device_json = self._device['device_info']
[ "async", "def", "update", "(", "self", ")", "->", "None", ":", "if", "self", ".", "next_allowed_update", "is", "not", "None", "and", "datetime", ".", "utcnow", "(", ")", "<", "self", ".", "next_allowed_update", ":", "return", "self", ".", "next_allowed_update", "=", "None", "await", "self", ".", "api", ".", "_update_device_state", "(", ")", "self", ".", "_device_json", "=", "self", ".", "_device", "[", "'device_info'", "]" ]
Retrieve updated device state.
[ "Retrieve", "updated", "device", "state", "." ]
413ae01ca23568f7b5f698a87e872f456072356b
https://github.com/arraylabs/pymyq/blob/413ae01ca23568f7b5f698a87e872f456072356b/pymyq/device.py#L200-L208
train
238,564
sporestack/bitcash
bitcash/network/services.py
NetworkAPI.get_transaction
def get_transaction(cls, txid): """Gets the full transaction details. :param txid: The transaction id in question. :type txid: ``str`` :raises ConnectionError: If all API services fail. :rtype: ``Transaction`` """ for api_call in cls.GET_TX_MAIN: try: return api_call(txid) except cls.IGNORED_ERRORS: pass raise ConnectionError('All APIs are unreachable.')
python
def get_transaction(cls, txid): """Gets the full transaction details. :param txid: The transaction id in question. :type txid: ``str`` :raises ConnectionError: If all API services fail. :rtype: ``Transaction`` """ for api_call in cls.GET_TX_MAIN: try: return api_call(txid) except cls.IGNORED_ERRORS: pass raise ConnectionError('All APIs are unreachable.')
[ "def", "get_transaction", "(", "cls", ",", "txid", ")", ":", "for", "api_call", "in", "cls", ".", "GET_TX_MAIN", ":", "try", ":", "return", "api_call", "(", "txid", ")", "except", "cls", ".", "IGNORED_ERRORS", ":", "pass", "raise", "ConnectionError", "(", "'All APIs are unreachable.'", ")" ]
Gets the full transaction details. :param txid: The transaction id in question. :type txid: ``str`` :raises ConnectionError: If all API services fail. :rtype: ``Transaction``
[ "Gets", "the", "full", "transaction", "details", "." ]
c7a18b9d82af98f1000c456dd06131524c260b7f
https://github.com/sporestack/bitcash/blob/c7a18b9d82af98f1000c456dd06131524c260b7f/bitcash/network/services.py#L346-L361
train
238,565
sporestack/bitcash
bitcash/network/services.py
NetworkAPI.get_tx_amount
def get_tx_amount(cls, txid, txindex): """Gets the amount of a given transaction output. :param txid: The transaction id in question. :type txid: ``str`` :param txindex: The transaction index in question. :type txindex: ``int`` :raises ConnectionError: If all API services fail. :rtype: ``Decimal`` """ for api_call in cls.GET_TX_AMOUNT_MAIN: try: return api_call(txid, txindex) except cls.IGNORED_ERRORS: pass raise ConnectionError('All APIs are unreachable.')
python
def get_tx_amount(cls, txid, txindex): """Gets the amount of a given transaction output. :param txid: The transaction id in question. :type txid: ``str`` :param txindex: The transaction index in question. :type txindex: ``int`` :raises ConnectionError: If all API services fail. :rtype: ``Decimal`` """ for api_call in cls.GET_TX_AMOUNT_MAIN: try: return api_call(txid, txindex) except cls.IGNORED_ERRORS: pass raise ConnectionError('All APIs are unreachable.')
[ "def", "get_tx_amount", "(", "cls", ",", "txid", ",", "txindex", ")", ":", "for", "api_call", "in", "cls", ".", "GET_TX_AMOUNT_MAIN", ":", "try", ":", "return", "api_call", "(", "txid", ",", "txindex", ")", "except", "cls", ".", "IGNORED_ERRORS", ":", "pass", "raise", "ConnectionError", "(", "'All APIs are unreachable.'", ")" ]
Gets the amount of a given transaction output. :param txid: The transaction id in question. :type txid: ``str`` :param txindex: The transaction index in question. :type txindex: ``int`` :raises ConnectionError: If all API services fail. :rtype: ``Decimal``
[ "Gets", "the", "amount", "of", "a", "given", "transaction", "output", "." ]
c7a18b9d82af98f1000c456dd06131524c260b7f
https://github.com/sporestack/bitcash/blob/c7a18b9d82af98f1000c456dd06131524c260b7f/bitcash/network/services.py#L383-L400
train
238,566
sporestack/bitcash
bitcash/network/fees.py
get_fee
def get_fee(speed=FEE_SPEED_MEDIUM): """Gets the recommended satoshi per byte fee. :param speed: One of: 'fast', 'medium', 'slow'. :type speed: ``string`` :rtype: ``int`` """ if speed == FEE_SPEED_FAST: return DEFAULT_FEE_FAST elif speed == FEE_SPEED_MEDIUM: return DEFAULT_FEE_MEDIUM elif speed == FEE_SPEED_SLOW: return DEFAULT_FEE_SLOW else: raise ValueError('Invalid speed argument.')
python
def get_fee(speed=FEE_SPEED_MEDIUM): """Gets the recommended satoshi per byte fee. :param speed: One of: 'fast', 'medium', 'slow'. :type speed: ``string`` :rtype: ``int`` """ if speed == FEE_SPEED_FAST: return DEFAULT_FEE_FAST elif speed == FEE_SPEED_MEDIUM: return DEFAULT_FEE_MEDIUM elif speed == FEE_SPEED_SLOW: return DEFAULT_FEE_SLOW else: raise ValueError('Invalid speed argument.')
[ "def", "get_fee", "(", "speed", "=", "FEE_SPEED_MEDIUM", ")", ":", "if", "speed", "==", "FEE_SPEED_FAST", ":", "return", "DEFAULT_FEE_FAST", "elif", "speed", "==", "FEE_SPEED_MEDIUM", ":", "return", "DEFAULT_FEE_MEDIUM", "elif", "speed", "==", "FEE_SPEED_SLOW", ":", "return", "DEFAULT_FEE_SLOW", "else", ":", "raise", "ValueError", "(", "'Invalid speed argument.'", ")" ]
Gets the recommended satoshi per byte fee. :param speed: One of: 'fast', 'medium', 'slow'. :type speed: ``string`` :rtype: ``int``
[ "Gets", "the", "recommended", "satoshi", "per", "byte", "fee", "." ]
c7a18b9d82af98f1000c456dd06131524c260b7f
https://github.com/sporestack/bitcash/blob/c7a18b9d82af98f1000c456dd06131524c260b7f/bitcash/network/fees.py#L15-L29
train
238,567
Groundworkstech/pybfd
setup.py
CustomBuildExtension.find_binutils_libs
def find_binutils_libs(self, libdir, lib_ext): """Find Binutils libraries.""" bfd_expr = re.compile("(lib(?:bfd)|(?:opcodes))(.*?)\%s" % lib_ext ) libs = {} for root, dirs, files in os.walk(libdir): for f in files: m = bfd_expr.search(f) if m: lib, version = m.groups() fp = os.path.join(root, f) if version in libs: libs[ version ].append( fp ) else: libs[ version ] = [fp,] # first, search for multiarch files. # check if we found more than one version of the multiarch libs. multiarch_libs = dict( [(v,_l) for v, _l in libs.items() \ if v.find("multiarch") != -1 ] ) if len(multiarch_libs) > 1: print "[W] Multiple binutils versions detected. Trying to build with default..." return multiarch_libs.values()[0] if len(multiarch_libs) == 1: return multiarch_libs.values()[0] # or use the default libs, or .. none return libs.get("",[])
python
def find_binutils_libs(self, libdir, lib_ext): """Find Binutils libraries.""" bfd_expr = re.compile("(lib(?:bfd)|(?:opcodes))(.*?)\%s" % lib_ext ) libs = {} for root, dirs, files in os.walk(libdir): for f in files: m = bfd_expr.search(f) if m: lib, version = m.groups() fp = os.path.join(root, f) if version in libs: libs[ version ].append( fp ) else: libs[ version ] = [fp,] # first, search for multiarch files. # check if we found more than one version of the multiarch libs. multiarch_libs = dict( [(v,_l) for v, _l in libs.items() \ if v.find("multiarch") != -1 ] ) if len(multiarch_libs) > 1: print "[W] Multiple binutils versions detected. Trying to build with default..." return multiarch_libs.values()[0] if len(multiarch_libs) == 1: return multiarch_libs.values()[0] # or use the default libs, or .. none return libs.get("",[])
[ "def", "find_binutils_libs", "(", "self", ",", "libdir", ",", "lib_ext", ")", ":", "bfd_expr", "=", "re", ".", "compile", "(", "\"(lib(?:bfd)|(?:opcodes))(.*?)\\%s\"", "%", "lib_ext", ")", "libs", "=", "{", "}", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "libdir", ")", ":", "for", "f", "in", "files", ":", "m", "=", "bfd_expr", ".", "search", "(", "f", ")", "if", "m", ":", "lib", ",", "version", "=", "m", ".", "groups", "(", ")", "fp", "=", "os", ".", "path", ".", "join", "(", "root", ",", "f", ")", "if", "version", "in", "libs", ":", "libs", "[", "version", "]", ".", "append", "(", "fp", ")", "else", ":", "libs", "[", "version", "]", "=", "[", "fp", ",", "]", "# first, search for multiarch files.", "# check if we found more than one version of the multiarch libs.", "multiarch_libs", "=", "dict", "(", "[", "(", "v", ",", "_l", ")", "for", "v", ",", "_l", "in", "libs", ".", "items", "(", ")", "if", "v", ".", "find", "(", "\"multiarch\"", ")", "!=", "-", "1", "]", ")", "if", "len", "(", "multiarch_libs", ")", ">", "1", ":", "print", "\"[W] Multiple binutils versions detected. Trying to build with default...\"", "return", "multiarch_libs", ".", "values", "(", ")", "[", "0", "]", "if", "len", "(", "multiarch_libs", ")", "==", "1", ":", "return", "multiarch_libs", ".", "values", "(", ")", "[", "0", "]", "# or use the default libs, or .. none", "return", "libs", ".", "get", "(", "\"\"", ",", "[", "]", ")" ]
Find Binutils libraries.
[ "Find", "Binutils", "libraries", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/setup.py#L117-L142
train
238,568
Groundworkstech/pybfd
setup.py
CustomBuildExtension.generate_source_files
def generate_source_files( self ): """ Genertate source files to be used during the compile process of the extension module. This is better than just hardcoding the values on python files because header definitions might change along differente Binutils versions and we'll be able to catch the changes and keep the correct values. """ from pybfd.gen_supported_disasm import get_supported_architectures, \ get_supported_machines, \ generate_supported_architectures_source, \ generate_supported_disassembler_header, \ gen_supported_archs # # Step 1 . Get the patch to libopcodes and nm utility for further # usage. # libs_dirs = [os.path.dirname(lib) for lib in self.libs] libopcodes = [lib for lib in self.libs if os.path.basename(lib).startswith("libopcodes")][0] print "[+] Detecting libbfd/libopcodes compiled architectures" if self.with_static_binutils: # use the nm from the binutils distro nms = [ os.path.join( libs_dir, "..", "bin", "nm" ), # default name of nm os.path.join( libs_dir, "..", "bin", "gnm" ) # in OSX brew install binutils's nm as gnm. ] path_to_nm = None for nm_fullpath in nms: if os.path.isfile( nm_fullpath ): path_to_nm = nm_fullpath break if path_to_nm == None: raise Exception("no suitable 'nm' found.") else: path_to_nm = "nm" # Use the nm in the $PATH (TODO: its assume that nm exists) # # Step 2 . # # Prepare the libs to be used as option of the compiler. path_to_bfd_header = os.path.join( self.includes, "bfd.h") supported_machines = get_supported_machines(path_to_bfd_header) supported_archs = get_supported_architectures( path_to_nm, libopcodes, supported_machines, self.with_static_binutils == None) source_bfd_archs_c = generate_supported_architectures_source(supported_archs, supported_machines) print "[+] Generating .C files..." gen_file = os.path.join(PACKAGE_DIR, "gen_bfd_archs.c") with open(gen_file, "w+") as fd: fd.write(source_bfd_archs_c) print "[+] %s" % gen_file if self.with_static_binutils: link_to_libs = [] # ... else: link_to_libs = [self.prepare_libs_for_cc(os.path.basename(lib)) for lib in self.libs] c_compiler = new_compiler() objects = c_compiler.compile( [os.path.join(PACKAGE_DIR, "gen_bfd_archs.c"), ], include_dirs = [self.includes,] ) program = c_compiler.link_executable( objects, libraries = link_to_libs, library_dirs = libs_dirs, output_progname = "gen_bfd_archs", output_dir = PACKAGE_DIR ) gen_tool = os.path.join(PACKAGE_DIR, "gen_bfd_archs") gen_file = os.path.join(self.build_lib, PACKAGE_DIR, "bfd_archs.py") cmd = "%s > %s" % ( gen_tool, gen_file ) print "[+] Generating .py files..." # generate C dependent definitions os.system( cmd ) # generate python specific data with open(gen_file, "a") as f: f.write( gen_supported_archs(supported_archs) ) # Remove unused files. for obj in objects: os.unlink(obj) os.unlink(gen_tool) print "[+] %s" % gen_file # # Step 3 . Generate header file to be used by the PyBFD extension # modules bfd.c and opcodes.c. # gen_source = generate_supported_disassembler_header(supported_archs) if len(supported_archs) == 0: raise Exception("Unable to determine libopcodes' supported " \ "platforms from '%s'" % libopcodes) print "[+] Generating .h files..." gen_file = os.path.join(PACKAGE_DIR, "supported_disasm.h") with open(gen_file, "w+") as fd: fd.write(gen_source) print "[+] %s" % gen_file return supported_archs
python
def generate_source_files( self ): """ Genertate source files to be used during the compile process of the extension module. This is better than just hardcoding the values on python files because header definitions might change along differente Binutils versions and we'll be able to catch the changes and keep the correct values. """ from pybfd.gen_supported_disasm import get_supported_architectures, \ get_supported_machines, \ generate_supported_architectures_source, \ generate_supported_disassembler_header, \ gen_supported_archs # # Step 1 . Get the patch to libopcodes and nm utility for further # usage. # libs_dirs = [os.path.dirname(lib) for lib in self.libs] libopcodes = [lib for lib in self.libs if os.path.basename(lib).startswith("libopcodes")][0] print "[+] Detecting libbfd/libopcodes compiled architectures" if self.with_static_binutils: # use the nm from the binutils distro nms = [ os.path.join( libs_dir, "..", "bin", "nm" ), # default name of nm os.path.join( libs_dir, "..", "bin", "gnm" ) # in OSX brew install binutils's nm as gnm. ] path_to_nm = None for nm_fullpath in nms: if os.path.isfile( nm_fullpath ): path_to_nm = nm_fullpath break if path_to_nm == None: raise Exception("no suitable 'nm' found.") else: path_to_nm = "nm" # Use the nm in the $PATH (TODO: its assume that nm exists) # # Step 2 . # # Prepare the libs to be used as option of the compiler. path_to_bfd_header = os.path.join( self.includes, "bfd.h") supported_machines = get_supported_machines(path_to_bfd_header) supported_archs = get_supported_architectures( path_to_nm, libopcodes, supported_machines, self.with_static_binutils == None) source_bfd_archs_c = generate_supported_architectures_source(supported_archs, supported_machines) print "[+] Generating .C files..." gen_file = os.path.join(PACKAGE_DIR, "gen_bfd_archs.c") with open(gen_file, "w+") as fd: fd.write(source_bfd_archs_c) print "[+] %s" % gen_file if self.with_static_binutils: link_to_libs = [] # ... else: link_to_libs = [self.prepare_libs_for_cc(os.path.basename(lib)) for lib in self.libs] c_compiler = new_compiler() objects = c_compiler.compile( [os.path.join(PACKAGE_DIR, "gen_bfd_archs.c"), ], include_dirs = [self.includes,] ) program = c_compiler.link_executable( objects, libraries = link_to_libs, library_dirs = libs_dirs, output_progname = "gen_bfd_archs", output_dir = PACKAGE_DIR ) gen_tool = os.path.join(PACKAGE_DIR, "gen_bfd_archs") gen_file = os.path.join(self.build_lib, PACKAGE_DIR, "bfd_archs.py") cmd = "%s > %s" % ( gen_tool, gen_file ) print "[+] Generating .py files..." # generate C dependent definitions os.system( cmd ) # generate python specific data with open(gen_file, "a") as f: f.write( gen_supported_archs(supported_archs) ) # Remove unused files. for obj in objects: os.unlink(obj) os.unlink(gen_tool) print "[+] %s" % gen_file # # Step 3 . Generate header file to be used by the PyBFD extension # modules bfd.c and opcodes.c. # gen_source = generate_supported_disassembler_header(supported_archs) if len(supported_archs) == 0: raise Exception("Unable to determine libopcodes' supported " \ "platforms from '%s'" % libopcodes) print "[+] Generating .h files..." gen_file = os.path.join(PACKAGE_DIR, "supported_disasm.h") with open(gen_file, "w+") as fd: fd.write(gen_source) print "[+] %s" % gen_file return supported_archs
[ "def", "generate_source_files", "(", "self", ")", ":", "from", "pybfd", ".", "gen_supported_disasm", "import", "get_supported_architectures", ",", "get_supported_machines", ",", "generate_supported_architectures_source", ",", "generate_supported_disassembler_header", ",", "gen_supported_archs", "#", "# Step 1 . Get the patch to libopcodes and nm utility for further", "# usage.", "#", "libs_dirs", "=", "[", "os", ".", "path", ".", "dirname", "(", "lib", ")", "for", "lib", "in", "self", ".", "libs", "]", "libopcodes", "=", "[", "lib", "for", "lib", "in", "self", ".", "libs", "if", "os", ".", "path", ".", "basename", "(", "lib", ")", ".", "startswith", "(", "\"libopcodes\"", ")", "]", "[", "0", "]", "print", "\"[+] Detecting libbfd/libopcodes compiled architectures\"", "if", "self", ".", "with_static_binutils", ":", "# use the nm from the binutils distro", "nms", "=", "[", "os", ".", "path", ".", "join", "(", "libs_dir", ",", "\"..\"", ",", "\"bin\"", ",", "\"nm\"", ")", ",", "# default name of nm", "os", ".", "path", ".", "join", "(", "libs_dir", ",", "\"..\"", ",", "\"bin\"", ",", "\"gnm\"", ")", "# in OSX brew install binutils's nm as gnm.", "]", "path_to_nm", "=", "None", "for", "nm_fullpath", "in", "nms", ":", "if", "os", ".", "path", ".", "isfile", "(", "nm_fullpath", ")", ":", "path_to_nm", "=", "nm_fullpath", "break", "if", "path_to_nm", "==", "None", ":", "raise", "Exception", "(", "\"no suitable 'nm' found.\"", ")", "else", ":", "path_to_nm", "=", "\"nm\"", "# Use the nm in the $PATH (TODO: its assume that nm exists)", "#", "# Step 2 .", "#", "# Prepare the libs to be used as option of the compiler.", "path_to_bfd_header", "=", "os", ".", "path", ".", "join", "(", "self", ".", "includes", ",", "\"bfd.h\"", ")", "supported_machines", "=", "get_supported_machines", "(", "path_to_bfd_header", ")", "supported_archs", "=", "get_supported_architectures", "(", "path_to_nm", ",", "libopcodes", ",", "supported_machines", ",", "self", ".", "with_static_binutils", "==", "None", ")", "source_bfd_archs_c", "=", "generate_supported_architectures_source", "(", "supported_archs", ",", "supported_machines", ")", "print", "\"[+] Generating .C files...\"", "gen_file", "=", "os", ".", "path", ".", "join", "(", "PACKAGE_DIR", ",", "\"gen_bfd_archs.c\"", ")", "with", "open", "(", "gen_file", ",", "\"w+\"", ")", "as", "fd", ":", "fd", ".", "write", "(", "source_bfd_archs_c", ")", "print", "\"[+] %s\"", "%", "gen_file", "if", "self", ".", "with_static_binutils", ":", "link_to_libs", "=", "[", "]", "# ...", "else", ":", "link_to_libs", "=", "[", "self", ".", "prepare_libs_for_cc", "(", "os", ".", "path", ".", "basename", "(", "lib", ")", ")", "for", "lib", "in", "self", ".", "libs", "]", "c_compiler", "=", "new_compiler", "(", ")", "objects", "=", "c_compiler", ".", "compile", "(", "[", "os", ".", "path", ".", "join", "(", "PACKAGE_DIR", ",", "\"gen_bfd_archs.c\"", ")", ",", "]", ",", "include_dirs", "=", "[", "self", ".", "includes", ",", "]", ")", "program", "=", "c_compiler", ".", "link_executable", "(", "objects", ",", "libraries", "=", "link_to_libs", ",", "library_dirs", "=", "libs_dirs", ",", "output_progname", "=", "\"gen_bfd_archs\"", ",", "output_dir", "=", "PACKAGE_DIR", ")", "gen_tool", "=", "os", ".", "path", ".", "join", "(", "PACKAGE_DIR", ",", "\"gen_bfd_archs\"", ")", "gen_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "build_lib", ",", "PACKAGE_DIR", ",", "\"bfd_archs.py\"", ")", "cmd", "=", "\"%s > %s\"", "%", "(", "gen_tool", ",", "gen_file", ")", "print", "\"[+] Generating .py files...\"", "# generate C dependent definitions", "os", ".", "system", "(", "cmd", ")", "# generate python specific data", "with", "open", "(", "gen_file", ",", "\"a\"", ")", "as", "f", ":", "f", ".", "write", "(", "gen_supported_archs", "(", "supported_archs", ")", ")", "# Remove unused files.", "for", "obj", "in", "objects", ":", "os", ".", "unlink", "(", "obj", ")", "os", ".", "unlink", "(", "gen_tool", ")", "print", "\"[+] %s\"", "%", "gen_file", "#", "# Step 3 . Generate header file to be used by the PyBFD extension", "# modules bfd.c and opcodes.c.", "#", "gen_source", "=", "generate_supported_disassembler_header", "(", "supported_archs", ")", "if", "len", "(", "supported_archs", ")", "==", "0", ":", "raise", "Exception", "(", "\"Unable to determine libopcodes' supported \"", "\"platforms from '%s'\"", "%", "libopcodes", ")", "print", "\"[+] Generating .h files...\"", "gen_file", "=", "os", ".", "path", ".", "join", "(", "PACKAGE_DIR", ",", "\"supported_disasm.h\"", ")", "with", "open", "(", "gen_file", ",", "\"w+\"", ")", "as", "fd", ":", "fd", ".", "write", "(", "gen_source", ")", "print", "\"[+] %s\"", "%", "gen_file", "return", "supported_archs" ]
Genertate source files to be used during the compile process of the extension module. This is better than just hardcoding the values on python files because header definitions might change along differente Binutils versions and we'll be able to catch the changes and keep the correct values.
[ "Genertate", "source", "files", "to", "be", "used", "during", "the", "compile", "process", "of", "the", "extension", "module", ".", "This", "is", "better", "than", "just", "hardcoding", "the", "values", "on", "python", "files", "because", "header", "definitions", "might", "change", "along", "differente", "Binutils", "versions", "and", "we", "ll", "be", "able", "to", "catch", "the", "changes", "and", "keep", "the", "correct", "values", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/setup.py#L152-L265
train
238,569
Groundworkstech/pybfd
setup.py
CustomBuildExtension._darwin_current_arch
def _darwin_current_arch(self): """Add Mac OS X support.""" if sys.platform == "darwin": if sys.maxsize > 2 ** 32: # 64bits. return platform.mac_ver()[2] # Both Darwin and Python are 64bits. else: # Python 32 bits return platform.processor()
python
def _darwin_current_arch(self): """Add Mac OS X support.""" if sys.platform == "darwin": if sys.maxsize > 2 ** 32: # 64bits. return platform.mac_ver()[2] # Both Darwin and Python are 64bits. else: # Python 32 bits return platform.processor()
[ "def", "_darwin_current_arch", "(", "self", ")", ":", "if", "sys", ".", "platform", "==", "\"darwin\"", ":", "if", "sys", ".", "maxsize", ">", "2", "**", "32", ":", "# 64bits.", "return", "platform", ".", "mac_ver", "(", ")", "[", "2", "]", "# Both Darwin and Python are 64bits.", "else", ":", "# Python 32 bits", "return", "platform", ".", "processor", "(", ")" ]
Add Mac OS X support.
[ "Add", "Mac", "OS", "X", "support", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/setup.py#L267-L273
train
238,570
Groundworkstech/pybfd
pybfd/objdump.py
init_parser
def init_parser(): """Initialize option parser.""" usage = "Usage: %(prog)s <option(s)> <file(s)>" description = " Display information from object <file(s)>.\n" description += " At least one of the following switches must be given:" # # Create an argument parser and an exclusive group. # parser = ArgumentParser( usage=usage, description=description, add_help=False) group = parser.add_mutually_exclusive_group() # # Add objdump parameters. # group.add_argument("-a", "--archive-headers", action=DumpArchieveHeadersAction, type=FileType("r"), nargs="+", help="Display archive header information") group.add_argument("-f", "--file-headers", action=DumpFileHeadersAction, type=FileType("r"), nargs="+", help="Display the contents of the overall file header") #group.add_argument("-p", "--private-headers", action="store", type=FileType("r"), nargs="+", help="Display object format specific file header contents") #group.add_argument("-P", "--private=OPT,OPT...", action="store", type=FileType("r"), nargs="+", help="Display object format specific contents") group.add_argument("-h", "--section-headers", action=DumpSectionHeadersAction, type=FileType("r"), nargs="+", help="Display the contents of the section headers") #group.add_argument("-x", "--all-headers", action="store", type=FileType("r"), nargs="+", help="Display the contents of all headers") group.add_argument("-d", "--disassemble", action=DisassembleSectionAction, type=FileType("r"), nargs="+", help="Display assembler contents of executable sections") group.add_argument("-D", "--disassemble-all", action=DisassembleSectionsAction, type=FileType("r"), nargs="+", help="Display assembler contents of executable sections") #group.add_argument("-S", "--source", action="store", type=FileType("r"), nargs="+", help="Intermix source code with disassembly") group.add_argument("-s", "--full-contents", action=DumpSectionContentAction, type=FileType("r"), nargs="+", help="Display the full contents of all sections requested") #group.add_argument("-g", "--debugging", action="store", type=FileType("r"), nargs="+", help="Display debug information in object file") #group.add_argument("-e", "--debugging-tags", action="store", type=FileType("r"), nargs="+", help="Display debug information using ctags style") #group.add_argument("-G", "--stabs", action="store", type=FileType("r"), nargs="+", help="Display (in raw form) any STABS info in the file") #-W[lLiaprmfFsoRt] or") #--dwarf[=rawline,=decodedline,=info,=abbrev,=pubnames,=aranges,=macro,=frames,") # =frames-interp,=str,=loc,=Ranges,=pubtypes,") # =gdb_index,=trace_info,=trace_abbrev,=trace_aranges]") # Display DWARF info in the file") group.add_argument("-t", "--syms", action=DumpFileSymbols, type=FileType("r"), nargs="+", help="Display the contents of the symbol table(s)") #-T, --dynamic-syms Display the contents of the dynamic symbol table") #-r, --reloc Display the relocation entries in the file") #-R, --dynamic-reloc Display the dynamic relocation entries in the file") group.add_argument("-v", "--version", action="version", version="%%(prog)s %s (%s)" % (__version__, __description__), help="Display this program's version number") group.add_argument("-i", "--info", action=ListFormatAndArchitecturesInformationAction, nargs=REMAINDER, help="List object formats and architectures supported") group.add_argument("-H", "--help", action="store_true", default=False, help="Display this information") return parser
python
def init_parser(): """Initialize option parser.""" usage = "Usage: %(prog)s <option(s)> <file(s)>" description = " Display information from object <file(s)>.\n" description += " At least one of the following switches must be given:" # # Create an argument parser and an exclusive group. # parser = ArgumentParser( usage=usage, description=description, add_help=False) group = parser.add_mutually_exclusive_group() # # Add objdump parameters. # group.add_argument("-a", "--archive-headers", action=DumpArchieveHeadersAction, type=FileType("r"), nargs="+", help="Display archive header information") group.add_argument("-f", "--file-headers", action=DumpFileHeadersAction, type=FileType("r"), nargs="+", help="Display the contents of the overall file header") #group.add_argument("-p", "--private-headers", action="store", type=FileType("r"), nargs="+", help="Display object format specific file header contents") #group.add_argument("-P", "--private=OPT,OPT...", action="store", type=FileType("r"), nargs="+", help="Display object format specific contents") group.add_argument("-h", "--section-headers", action=DumpSectionHeadersAction, type=FileType("r"), nargs="+", help="Display the contents of the section headers") #group.add_argument("-x", "--all-headers", action="store", type=FileType("r"), nargs="+", help="Display the contents of all headers") group.add_argument("-d", "--disassemble", action=DisassembleSectionAction, type=FileType("r"), nargs="+", help="Display assembler contents of executable sections") group.add_argument("-D", "--disassemble-all", action=DisassembleSectionsAction, type=FileType("r"), nargs="+", help="Display assembler contents of executable sections") #group.add_argument("-S", "--source", action="store", type=FileType("r"), nargs="+", help="Intermix source code with disassembly") group.add_argument("-s", "--full-contents", action=DumpSectionContentAction, type=FileType("r"), nargs="+", help="Display the full contents of all sections requested") #group.add_argument("-g", "--debugging", action="store", type=FileType("r"), nargs="+", help="Display debug information in object file") #group.add_argument("-e", "--debugging-tags", action="store", type=FileType("r"), nargs="+", help="Display debug information using ctags style") #group.add_argument("-G", "--stabs", action="store", type=FileType("r"), nargs="+", help="Display (in raw form) any STABS info in the file") #-W[lLiaprmfFsoRt] or") #--dwarf[=rawline,=decodedline,=info,=abbrev,=pubnames,=aranges,=macro,=frames,") # =frames-interp,=str,=loc,=Ranges,=pubtypes,") # =gdb_index,=trace_info,=trace_abbrev,=trace_aranges]") # Display DWARF info in the file") group.add_argument("-t", "--syms", action=DumpFileSymbols, type=FileType("r"), nargs="+", help="Display the contents of the symbol table(s)") #-T, --dynamic-syms Display the contents of the dynamic symbol table") #-r, --reloc Display the relocation entries in the file") #-R, --dynamic-reloc Display the dynamic relocation entries in the file") group.add_argument("-v", "--version", action="version", version="%%(prog)s %s (%s)" % (__version__, __description__), help="Display this program's version number") group.add_argument("-i", "--info", action=ListFormatAndArchitecturesInformationAction, nargs=REMAINDER, help="List object formats and architectures supported") group.add_argument("-H", "--help", action="store_true", default=False, help="Display this information") return parser
[ "def", "init_parser", "(", ")", ":", "usage", "=", "\"Usage: %(prog)s <option(s)> <file(s)>\"", "description", "=", "\" Display information from object <file(s)>.\\n\"", "description", "+=", "\" At least one of the following switches must be given:\"", "#", "# Create an argument parser and an exclusive group.", "#", "parser", "=", "ArgumentParser", "(", "usage", "=", "usage", ",", "description", "=", "description", ",", "add_help", "=", "False", ")", "group", "=", "parser", ".", "add_mutually_exclusive_group", "(", ")", "#", "# Add objdump parameters.", "#", "group", ".", "add_argument", "(", "\"-a\"", ",", "\"--archive-headers\"", ",", "action", "=", "DumpArchieveHeadersAction", ",", "type", "=", "FileType", "(", "\"r\"", ")", ",", "nargs", "=", "\"+\"", ",", "help", "=", "\"Display archive header information\"", ")", "group", ".", "add_argument", "(", "\"-f\"", ",", "\"--file-headers\"", ",", "action", "=", "DumpFileHeadersAction", ",", "type", "=", "FileType", "(", "\"r\"", ")", ",", "nargs", "=", "\"+\"", ",", "help", "=", "\"Display the contents of the overall file header\"", ")", "#group.add_argument(\"-p\", \"--private-headers\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Display object format specific file header contents\")", "#group.add_argument(\"-P\", \"--private=OPT,OPT...\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Display object format specific contents\")", "group", ".", "add_argument", "(", "\"-h\"", ",", "\"--section-headers\"", ",", "action", "=", "DumpSectionHeadersAction", ",", "type", "=", "FileType", "(", "\"r\"", ")", ",", "nargs", "=", "\"+\"", ",", "help", "=", "\"Display the contents of the section headers\"", ")", "#group.add_argument(\"-x\", \"--all-headers\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Display the contents of all headers\")", "group", ".", "add_argument", "(", "\"-d\"", ",", "\"--disassemble\"", ",", "action", "=", "DisassembleSectionAction", ",", "type", "=", "FileType", "(", "\"r\"", ")", ",", "nargs", "=", "\"+\"", ",", "help", "=", "\"Display assembler contents of executable sections\"", ")", "group", ".", "add_argument", "(", "\"-D\"", ",", "\"--disassemble-all\"", ",", "action", "=", "DisassembleSectionsAction", ",", "type", "=", "FileType", "(", "\"r\"", ")", ",", "nargs", "=", "\"+\"", ",", "help", "=", "\"Display assembler contents of executable sections\"", ")", "#group.add_argument(\"-S\", \"--source\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Intermix source code with disassembly\")", "group", ".", "add_argument", "(", "\"-s\"", ",", "\"--full-contents\"", ",", "action", "=", "DumpSectionContentAction", ",", "type", "=", "FileType", "(", "\"r\"", ")", ",", "nargs", "=", "\"+\"", ",", "help", "=", "\"Display the full contents of all sections requested\"", ")", "#group.add_argument(\"-g\", \"--debugging\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Display debug information in object file\")", "#group.add_argument(\"-e\", \"--debugging-tags\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Display debug information using ctags style\")", "#group.add_argument(\"-G\", \"--stabs\", action=\"store\", type=FileType(\"r\"), nargs=\"+\", help=\"Display (in raw form) any STABS info in the file\")", "#-W[lLiaprmfFsoRt] or\")", "#--dwarf[=rawline,=decodedline,=info,=abbrev,=pubnames,=aranges,=macro,=frames,\")", "# =frames-interp,=str,=loc,=Ranges,=pubtypes,\")", "# =gdb_index,=trace_info,=trace_abbrev,=trace_aranges]\")", "# Display DWARF info in the file\")", "group", ".", "add_argument", "(", "\"-t\"", ",", "\"--syms\"", ",", "action", "=", "DumpFileSymbols", ",", "type", "=", "FileType", "(", "\"r\"", ")", ",", "nargs", "=", "\"+\"", ",", "help", "=", "\"Display the contents of the symbol table(s)\"", ")", "#-T, --dynamic-syms Display the contents of the dynamic symbol table\")", "#-r, --reloc Display the relocation entries in the file\")", "#-R, --dynamic-reloc Display the dynamic relocation entries in the file\")", "group", ".", "add_argument", "(", "\"-v\"", ",", "\"--version\"", ",", "action", "=", "\"version\"", ",", "version", "=", "\"%%(prog)s %s (%s)\"", "%", "(", "__version__", ",", "__description__", ")", ",", "help", "=", "\"Display this program's version number\"", ")", "group", ".", "add_argument", "(", "\"-i\"", ",", "\"--info\"", ",", "action", "=", "ListFormatAndArchitecturesInformationAction", ",", "nargs", "=", "REMAINDER", ",", "help", "=", "\"List object formats and architectures supported\"", ")", "group", ".", "add_argument", "(", "\"-H\"", ",", "\"--help\"", ",", "action", "=", "\"store_true\"", ",", "default", "=", "False", ",", "help", "=", "\"Display this information\"", ")", "return", "parser" ]
Initialize option parser.
[ "Initialize", "option", "parser", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/objdump.py#L254-L341
train
238,571
Groundworkstech/pybfd
pybfd/objdump.py
DumpSectionContentAction.dump
def dump(self, src, length=16, start=0, preffix=""): """Dump the specified buffer in hex + ASCII format.""" FILTER = \ "".join([(len(repr(chr(x)))==3) and chr(x) or '.' \ for x in xrange(256)]) result = list() for i in xrange(0, len(src), length): s = src[i : i + length] hexa = " ".join(["%02X" % ord(x) for x in s]) printable = s.translate(FILTER) result.append("%s%08X %-*s %s\n" % \ (preffix, start + i, length * 3, hexa, printable)) return ''.join(result)
python
def dump(self, src, length=16, start=0, preffix=""): """Dump the specified buffer in hex + ASCII format.""" FILTER = \ "".join([(len(repr(chr(x)))==3) and chr(x) or '.' \ for x in xrange(256)]) result = list() for i in xrange(0, len(src), length): s = src[i : i + length] hexa = " ".join(["%02X" % ord(x) for x in s]) printable = s.translate(FILTER) result.append("%s%08X %-*s %s\n" % \ (preffix, start + i, length * 3, hexa, printable)) return ''.join(result)
[ "def", "dump", "(", "self", ",", "src", ",", "length", "=", "16", ",", "start", "=", "0", ",", "preffix", "=", "\"\"", ")", ":", "FILTER", "=", "\"\"", ".", "join", "(", "[", "(", "len", "(", "repr", "(", "chr", "(", "x", ")", ")", ")", "==", "3", ")", "and", "chr", "(", "x", ")", "or", "'.'", "for", "x", "in", "xrange", "(", "256", ")", "]", ")", "result", "=", "list", "(", ")", "for", "i", "in", "xrange", "(", "0", ",", "len", "(", "src", ")", ",", "length", ")", ":", "s", "=", "src", "[", "i", ":", "i", "+", "length", "]", "hexa", "=", "\" \"", ".", "join", "(", "[", "\"%02X\"", "%", "ord", "(", "x", ")", "for", "x", "in", "s", "]", ")", "printable", "=", "s", ".", "translate", "(", "FILTER", ")", "result", ".", "append", "(", "\"%s%08X %-*s %s\\n\"", "%", "(", "preffix", ",", "start", "+", "i", ",", "length", "*", "3", ",", "hexa", ",", "printable", ")", ")", "return", "''", ".", "join", "(", "result", ")" ]
Dump the specified buffer in hex + ASCII format.
[ "Dump", "the", "specified", "buffer", "in", "hex", "+", "ASCII", "format", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/objdump.py#L208-L224
train
238,572
Groundworkstech/pybfd
pybfd/section.py
BfdSection.content
def content(self): """Return the entire section content.""" return _bfd.section_get_content(self.bfd, self._ptr, 0, self.size)
python
def content(self): """Return the entire section content.""" return _bfd.section_get_content(self.bfd, self._ptr, 0, self.size)
[ "def", "content", "(", "self", ")", ":", "return", "_bfd", ".", "section_get_content", "(", "self", ".", "bfd", ",", "self", ".", "_ptr", ",", "0", ",", "self", ".", "size", ")" ]
Return the entire section content.
[ "Return", "the", "entire", "section", "content", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/section.py#L473-L475
train
238,573
Groundworkstech/pybfd
pybfd/section.py
BfdSection.get_content
def get_content(self, offset, size): """Return the specified number of bytes from the current section.""" return _bfd.section_get_content(self.bfd, self._ptr, offset, size)
python
def get_content(self, offset, size): """Return the specified number of bytes from the current section.""" return _bfd.section_get_content(self.bfd, self._ptr, offset, size)
[ "def", "get_content", "(", "self", ",", "offset", ",", "size", ")", ":", "return", "_bfd", ".", "section_get_content", "(", "self", ".", "bfd", ",", "self", ".", "_ptr", ",", "offset", ",", "size", ")" ]
Return the specified number of bytes from the current section.
[ "Return", "the", "specified", "number", "of", "bytes", "from", "the", "current", "section", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/section.py#L477-L479
train
238,574
Groundworkstech/pybfd
pybfd/opcodes.py
main
def main(): """Test case for simple opcode disassembly.""" test_targets = ( [ARCH_I386, MACH_I386_I386_INTEL_SYNTAX, ENDIAN_MONO, "\x55\x89\xe5\xE8\xB8\xFF\xFF\xFF", 0x1000], [ARCH_I386, MACH_X86_64_INTEL_SYNTAX, ENDIAN_MONO, "\x55\x48\x89\xe5\xE8\xA3\xFF\xFF\xFF", 0x1000], [ARCH_ARM, MACH_ARM_2, ENDIAN_LITTLE, "\x04\xe0\x2d\xe5\xED\xFF\xFF\xEB", 0x1000], [ARCH_MIPS, MACH_MIPSISA32, ENDIAN_BIG, "\x0C\x10\x00\x97\x00\x00\x00\x00", 0x1000], [ARCH_POWERPC, MACH_PPC, ENDIAN_BIG, "\x94\x21\xFF\xE8\x7C\x08\x02\xA6", 0x1000], #[ARCH_XTENSA, MACH_XTENSA, ENDIAN_BIG, "\x6C\x10\x06\xD7\x10", 0x1000], ) for target_arch, target_mach, target_endian, binary, address in test_targets: # # Initialize libopcodes with the current architecture. # opcodes = Opcodes(target_arch, target_mach, target_endian) # Print some architecture-specific information. print "\n[+] Architecture %s - Machine %d" % \ (opcodes.architecture_name, opcodes.machine) print "[+] Disassembly:" # Print all the disassembled instructions. for vma, size, disasm in opcodes.disassemble(binary, address): print "0x%X (size=%d)\t %s" % (vma, size, disasm)
python
def main(): """Test case for simple opcode disassembly.""" test_targets = ( [ARCH_I386, MACH_I386_I386_INTEL_SYNTAX, ENDIAN_MONO, "\x55\x89\xe5\xE8\xB8\xFF\xFF\xFF", 0x1000], [ARCH_I386, MACH_X86_64_INTEL_SYNTAX, ENDIAN_MONO, "\x55\x48\x89\xe5\xE8\xA3\xFF\xFF\xFF", 0x1000], [ARCH_ARM, MACH_ARM_2, ENDIAN_LITTLE, "\x04\xe0\x2d\xe5\xED\xFF\xFF\xEB", 0x1000], [ARCH_MIPS, MACH_MIPSISA32, ENDIAN_BIG, "\x0C\x10\x00\x97\x00\x00\x00\x00", 0x1000], [ARCH_POWERPC, MACH_PPC, ENDIAN_BIG, "\x94\x21\xFF\xE8\x7C\x08\x02\xA6", 0x1000], #[ARCH_XTENSA, MACH_XTENSA, ENDIAN_BIG, "\x6C\x10\x06\xD7\x10", 0x1000], ) for target_arch, target_mach, target_endian, binary, address in test_targets: # # Initialize libopcodes with the current architecture. # opcodes = Opcodes(target_arch, target_mach, target_endian) # Print some architecture-specific information. print "\n[+] Architecture %s - Machine %d" % \ (opcodes.architecture_name, opcodes.machine) print "[+] Disassembly:" # Print all the disassembled instructions. for vma, size, disasm in opcodes.disassemble(binary, address): print "0x%X (size=%d)\t %s" % (vma, size, disasm)
[ "def", "main", "(", ")", ":", "test_targets", "=", "(", "[", "ARCH_I386", ",", "MACH_I386_I386_INTEL_SYNTAX", ",", "ENDIAN_MONO", ",", "\"\\x55\\x89\\xe5\\xE8\\xB8\\xFF\\xFF\\xFF\"", ",", "0x1000", "]", ",", "[", "ARCH_I386", ",", "MACH_X86_64_INTEL_SYNTAX", ",", "ENDIAN_MONO", ",", "\"\\x55\\x48\\x89\\xe5\\xE8\\xA3\\xFF\\xFF\\xFF\"", ",", "0x1000", "]", ",", "[", "ARCH_ARM", ",", "MACH_ARM_2", ",", "ENDIAN_LITTLE", ",", "\"\\x04\\xe0\\x2d\\xe5\\xED\\xFF\\xFF\\xEB\"", ",", "0x1000", "]", ",", "[", "ARCH_MIPS", ",", "MACH_MIPSISA32", ",", "ENDIAN_BIG", ",", "\"\\x0C\\x10\\x00\\x97\\x00\\x00\\x00\\x00\"", ",", "0x1000", "]", ",", "[", "ARCH_POWERPC", ",", "MACH_PPC", ",", "ENDIAN_BIG", ",", "\"\\x94\\x21\\xFF\\xE8\\x7C\\x08\\x02\\xA6\"", ",", "0x1000", "]", ",", "#[ARCH_XTENSA, MACH_XTENSA, ENDIAN_BIG, \"\\x6C\\x10\\x06\\xD7\\x10\", 0x1000],", ")", "for", "target_arch", ",", "target_mach", ",", "target_endian", ",", "binary", ",", "address", "in", "test_targets", ":", "#", "# Initialize libopcodes with the current architecture.", "#", "opcodes", "=", "Opcodes", "(", "target_arch", ",", "target_mach", ",", "target_endian", ")", "# Print some architecture-specific information.", "print", "\"\\n[+] Architecture %s - Machine %d\"", "%", "(", "opcodes", ".", "architecture_name", ",", "opcodes", ".", "machine", ")", "print", "\"[+] Disassembly:\"", "# Print all the disassembled instructions.", "for", "vma", ",", "size", ",", "disasm", "in", "opcodes", ".", "disassemble", "(", "binary", ",", "address", ")", ":", "print", "\"0x%X (size=%d)\\t %s\"", "%", "(", "vma", ",", "size", ",", "disasm", ")" ]
Test case for simple opcode disassembly.
[ "Test", "case", "for", "simple", "opcode", "disassembly", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/opcodes.py#L186-L211
train
238,575
Groundworkstech/pybfd
pybfd/opcodes.py
Opcodes.initialize_bfd
def initialize_bfd(self, abfd): """Initialize underlying libOpcodes library using BFD.""" self._ptr = _opcodes.initialize_bfd(abfd._ptr) # Already done inside opcodes.c #self.architecture = abfd.architecture #self.machine = abfd.machine #self.endian = abfd.endian # force intel syntax if self.architecture == ARCH_I386: if abfd.arch_size == 32: self.machine = MACH_I386_I386_INTEL_SYNTAX #abfd.machine = MACH_I386_I386_INTEL_SYNTAX elif abfd.arch_size == 64: self.machine = MACH_X86_64_INTEL_SYNTAX
python
def initialize_bfd(self, abfd): """Initialize underlying libOpcodes library using BFD.""" self._ptr = _opcodes.initialize_bfd(abfd._ptr) # Already done inside opcodes.c #self.architecture = abfd.architecture #self.machine = abfd.machine #self.endian = abfd.endian # force intel syntax if self.architecture == ARCH_I386: if abfd.arch_size == 32: self.machine = MACH_I386_I386_INTEL_SYNTAX #abfd.machine = MACH_I386_I386_INTEL_SYNTAX elif abfd.arch_size == 64: self.machine = MACH_X86_64_INTEL_SYNTAX
[ "def", "initialize_bfd", "(", "self", ",", "abfd", ")", ":", "self", ".", "_ptr", "=", "_opcodes", ".", "initialize_bfd", "(", "abfd", ".", "_ptr", ")", "# Already done inside opcodes.c", "#self.architecture = abfd.architecture", "#self.machine = abfd.machine", "#self.endian = abfd.endian", "# force intel syntax", "if", "self", ".", "architecture", "==", "ARCH_I386", ":", "if", "abfd", ".", "arch_size", "==", "32", ":", "self", ".", "machine", "=", "MACH_I386_I386_INTEL_SYNTAX", "#abfd.machine = MACH_I386_I386_INTEL_SYNTAX", "elif", "abfd", ".", "arch_size", "==", "64", ":", "self", ".", "machine", "=", "MACH_X86_64_INTEL_SYNTAX" ]
Initialize underlying libOpcodes library using BFD.
[ "Initialize", "underlying", "libOpcodes", "library", "using", "BFD", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/opcodes.py#L84-L99
train
238,576
Groundworkstech/pybfd
pybfd/opcodes.py
Opcodes.initialize_non_bfd
def initialize_non_bfd(self, architecture=None, machine=None, endian=ENDIAN_UNKNOWN): """Initialize underlying libOpcodes library not using BFD.""" if None in [architecture, machine, endian]: return self.architecture = architecture self.machine = machine self.endian = endian
python
def initialize_non_bfd(self, architecture=None, machine=None, endian=ENDIAN_UNKNOWN): """Initialize underlying libOpcodes library not using BFD.""" if None in [architecture, machine, endian]: return self.architecture = architecture self.machine = machine self.endian = endian
[ "def", "initialize_non_bfd", "(", "self", ",", "architecture", "=", "None", ",", "machine", "=", "None", ",", "endian", "=", "ENDIAN_UNKNOWN", ")", ":", "if", "None", "in", "[", "architecture", ",", "machine", ",", "endian", "]", ":", "return", "self", ".", "architecture", "=", "architecture", "self", ".", "machine", "=", "machine", "self", ".", "endian", "=", "endian" ]
Initialize underlying libOpcodes library not using BFD.
[ "Initialize", "underlying", "libOpcodes", "library", "not", "using", "BFD", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/opcodes.py#L102-L111
train
238,577
Groundworkstech/pybfd
pybfd/opcodes.py
Opcodes.initialize_smart_disassemble
def initialize_smart_disassemble(self, data, start_address=0): """ Set the binary buffer to disassemble with other related information ready for an instruction by instruction disassembly session. """ _opcodes.initialize_smart_disassemble( self._ptr, data, start_address)
python
def initialize_smart_disassemble(self, data, start_address=0): """ Set the binary buffer to disassemble with other related information ready for an instruction by instruction disassembly session. """ _opcodes.initialize_smart_disassemble( self._ptr, data, start_address)
[ "def", "initialize_smart_disassemble", "(", "self", ",", "data", ",", "start_address", "=", "0", ")", ":", "_opcodes", ".", "initialize_smart_disassemble", "(", "self", ".", "_ptr", ",", "data", ",", "start_address", ")" ]
Set the binary buffer to disassemble with other related information ready for an instruction by instruction disassembly session.
[ "Set", "the", "binary", "buffer", "to", "disassemble", "with", "other", "related", "information", "ready", "for", "an", "instruction", "by", "instruction", "disassembly", "session", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/opcodes.py#L113-L120
train
238,578
Groundworkstech/pybfd
pybfd/opcodes.py
Opcodes.print_single_instruction_callback
def print_single_instruction_callback(self, address, size, branch_delay_insn, insn_type, target, target2, disassembly): """ Callack on each disassembled instruction to print its information. """ print "0x%X SZ=%d BD=%d IT=%d\t%s" % \ (address, size, branch_delay_insn, insn_type, disassembly) return PYBFD_DISASM_CONTINUE
python
def print_single_instruction_callback(self, address, size, branch_delay_insn, insn_type, target, target2, disassembly): """ Callack on each disassembled instruction to print its information. """ print "0x%X SZ=%d BD=%d IT=%d\t%s" % \ (address, size, branch_delay_insn, insn_type, disassembly) return PYBFD_DISASM_CONTINUE
[ "def", "print_single_instruction_callback", "(", "self", ",", "address", ",", "size", ",", "branch_delay_insn", ",", "insn_type", ",", "target", ",", "target2", ",", "disassembly", ")", ":", "print", "\"0x%X SZ=%d BD=%d IT=%d\\t%s\"", "%", "(", "address", ",", "size", ",", "branch_delay_insn", ",", "insn_type", ",", "disassembly", ")", "return", "PYBFD_DISASM_CONTINUE" ]
Callack on each disassembled instruction to print its information.
[ "Callack", "on", "each", "disassembled", "instruction", "to", "print", "its", "information", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/opcodes.py#L131-L140
train
238,579
Groundworkstech/pybfd
pybfd/opcodes.py
Opcodes.disassemble
def disassemble(self, data, start_address=0): """ Return a list containing the virtual memory address, instruction length and disassembly code for the given binary buffer. """ return _opcodes.disassemble(self._ptr, data, start_address)
python
def disassemble(self, data, start_address=0): """ Return a list containing the virtual memory address, instruction length and disassembly code for the given binary buffer. """ return _opcodes.disassemble(self._ptr, data, start_address)
[ "def", "disassemble", "(", "self", ",", "data", ",", "start_address", "=", "0", ")", ":", "return", "_opcodes", ".", "disassemble", "(", "self", ".", "_ptr", ",", "data", ",", "start_address", ")" ]
Return a list containing the virtual memory address, instruction length and disassembly code for the given binary buffer.
[ "Return", "a", "list", "containing", "the", "virtual", "memory", "address", "instruction", "length", "and", "disassembly", "code", "for", "the", "given", "binary", "buffer", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/opcodes.py#L142-L148
train
238,580
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.open
def open(self, _file, target=DEFAULT_TARGET): """ Open the existing file for reading. @param _file : A filename of file descriptor. @param target: A user-specific BFD target name. @return : None """ # Close any existing BFD structure instance. self.close() # # STEP 1. Open the BFD pointer. # # Determine if the user passed a file-descriptor or a _file and # proceed accordingly. if type(_file) is FileType: # The user specified a file descriptor. filename = _file.name if islink(filename): raise BfdException("Symlinks file-descriptors are not valid") try: self._ptr = _bfd.fdopenr(filename, target, dup(_file.fileno())) except Exception, err: raise BfdException( "Unable to open file-descriptor %s : %s" % (filename, err)) elif type(_file) is StringType: # The user spcified a filaname so first check if file exists. filename = _file try: with open(_file): pass except IOError: raise BfdException("File %s does not exist." % filename) # # Proceed to open the specified file and create a new BFD. # try: self._ptr = _bfd.openr(filename, target) except (TypeError, IOError), err: raise BfdException( "Unable to open file %s : %s" % (filename, err)) elif type(_file) is IntType: # The user specified an already-open BFD pointer so we avoid any # further open operation and move on to file format recognition. self._ptr = _file else: raise BfdException( "Invalid file type specified for open operation (%r)" % _file) # # STEP 2. Determine file format of the BFD. # # Now that the BFD is open we'll proceed to determine its file format. # We'll use the objdump logic to determine it and raise an error in # case we were unable to get it right. # try: # Type opening it as an archieve and if it success then check # subfiles. if _bfd.check_format(self._ptr, BfdFormat.ARCHIVE): # Set current format and store the inner file list. self.file_format = BfdFormat.ARCHIVE self.__populate_archive_files() else: # DO NOT USE bfd_check_format_matches() becuase its not tested. # An implementation example if on objdump.c at function # display_bfd(). if _bfd.check_format(self._ptr, BfdFormat.OBJECT): self.file_format = BfdFormat.OBJECT elif _bfd.check_format(self._ptr, BfdFormat.CORE): self.file_format = BfdFormat.CORE else: pass raise BfdException(_bfd.get_last_error_message()) except TypeError, err: raise BfdException( "Unable to initialize file format : %s" % err) # # STEP 3. Extract inner sections and symbolic information. # if self._ptr is not None: # If the file is a valid BFD file format but not an archive then # get its sections and symbolic information (if any). if self.file_format in [BfdFormat.OBJECT, BfdFormat.CORE]: self.__populate_sections() self.__populate_symbols()
python
def open(self, _file, target=DEFAULT_TARGET): """ Open the existing file for reading. @param _file : A filename of file descriptor. @param target: A user-specific BFD target name. @return : None """ # Close any existing BFD structure instance. self.close() # # STEP 1. Open the BFD pointer. # # Determine if the user passed a file-descriptor or a _file and # proceed accordingly. if type(_file) is FileType: # The user specified a file descriptor. filename = _file.name if islink(filename): raise BfdException("Symlinks file-descriptors are not valid") try: self._ptr = _bfd.fdopenr(filename, target, dup(_file.fileno())) except Exception, err: raise BfdException( "Unable to open file-descriptor %s : %s" % (filename, err)) elif type(_file) is StringType: # The user spcified a filaname so first check if file exists. filename = _file try: with open(_file): pass except IOError: raise BfdException("File %s does not exist." % filename) # # Proceed to open the specified file and create a new BFD. # try: self._ptr = _bfd.openr(filename, target) except (TypeError, IOError), err: raise BfdException( "Unable to open file %s : %s" % (filename, err)) elif type(_file) is IntType: # The user specified an already-open BFD pointer so we avoid any # further open operation and move on to file format recognition. self._ptr = _file else: raise BfdException( "Invalid file type specified for open operation (%r)" % _file) # # STEP 2. Determine file format of the BFD. # # Now that the BFD is open we'll proceed to determine its file format. # We'll use the objdump logic to determine it and raise an error in # case we were unable to get it right. # try: # Type opening it as an archieve and if it success then check # subfiles. if _bfd.check_format(self._ptr, BfdFormat.ARCHIVE): # Set current format and store the inner file list. self.file_format = BfdFormat.ARCHIVE self.__populate_archive_files() else: # DO NOT USE bfd_check_format_matches() becuase its not tested. # An implementation example if on objdump.c at function # display_bfd(). if _bfd.check_format(self._ptr, BfdFormat.OBJECT): self.file_format = BfdFormat.OBJECT elif _bfd.check_format(self._ptr, BfdFormat.CORE): self.file_format = BfdFormat.CORE else: pass raise BfdException(_bfd.get_last_error_message()) except TypeError, err: raise BfdException( "Unable to initialize file format : %s" % err) # # STEP 3. Extract inner sections and symbolic information. # if self._ptr is not None: # If the file is a valid BFD file format but not an archive then # get its sections and symbolic information (if any). if self.file_format in [BfdFormat.OBJECT, BfdFormat.CORE]: self.__populate_sections() self.__populate_symbols()
[ "def", "open", "(", "self", ",", "_file", ",", "target", "=", "DEFAULT_TARGET", ")", ":", "# Close any existing BFD structure instance. ", "self", ".", "close", "(", ")", "#", "# STEP 1. Open the BFD pointer.", "#", "# Determine if the user passed a file-descriptor or a _file and", "# proceed accordingly.", "if", "type", "(", "_file", ")", "is", "FileType", ":", "# The user specified a file descriptor.", "filename", "=", "_file", ".", "name", "if", "islink", "(", "filename", ")", ":", "raise", "BfdException", "(", "\"Symlinks file-descriptors are not valid\"", ")", "try", ":", "self", ".", "_ptr", "=", "_bfd", ".", "fdopenr", "(", "filename", ",", "target", ",", "dup", "(", "_file", ".", "fileno", "(", ")", ")", ")", "except", "Exception", ",", "err", ":", "raise", "BfdException", "(", "\"Unable to open file-descriptor %s : %s\"", "%", "(", "filename", ",", "err", ")", ")", "elif", "type", "(", "_file", ")", "is", "StringType", ":", "# The user spcified a filaname so first check if file exists.", "filename", "=", "_file", "try", ":", "with", "open", "(", "_file", ")", ":", "pass", "except", "IOError", ":", "raise", "BfdException", "(", "\"File %s does not exist.\"", "%", "filename", ")", "#", "# Proceed to open the specified file and create a new BFD.", "#", "try", ":", "self", ".", "_ptr", "=", "_bfd", ".", "openr", "(", "filename", ",", "target", ")", "except", "(", "TypeError", ",", "IOError", ")", ",", "err", ":", "raise", "BfdException", "(", "\"Unable to open file %s : %s\"", "%", "(", "filename", ",", "err", ")", ")", "elif", "type", "(", "_file", ")", "is", "IntType", ":", "# The user specified an already-open BFD pointer so we avoid any", "# further open operation and move on to file format recognition.", "self", ".", "_ptr", "=", "_file", "else", ":", "raise", "BfdException", "(", "\"Invalid file type specified for open operation (%r)\"", "%", "_file", ")", "#", "# STEP 2. Determine file format of the BFD.", "#", "# Now that the BFD is open we'll proceed to determine its file format.", "# We'll use the objdump logic to determine it and raise an error in", "# case we were unable to get it right.", "#", "try", ":", "# Type opening it as an archieve and if it success then check", "# subfiles.", "if", "_bfd", ".", "check_format", "(", "self", ".", "_ptr", ",", "BfdFormat", ".", "ARCHIVE", ")", ":", "# Set current format and store the inner file list.", "self", ".", "file_format", "=", "BfdFormat", ".", "ARCHIVE", "self", ".", "__populate_archive_files", "(", ")", "else", ":", "# DO NOT USE bfd_check_format_matches() becuase its not tested.", "# An implementation example if on objdump.c at function", "# display_bfd().", "if", "_bfd", ".", "check_format", "(", "self", ".", "_ptr", ",", "BfdFormat", ".", "OBJECT", ")", ":", "self", ".", "file_format", "=", "BfdFormat", ".", "OBJECT", "elif", "_bfd", ".", "check_format", "(", "self", ".", "_ptr", ",", "BfdFormat", ".", "CORE", ")", ":", "self", ".", "file_format", "=", "BfdFormat", ".", "CORE", "else", ":", "pass", "raise", "BfdException", "(", "_bfd", ".", "get_last_error_message", "(", ")", ")", "except", "TypeError", ",", "err", ":", "raise", "BfdException", "(", "\"Unable to initialize file format : %s\"", "%", "err", ")", "#", "# STEP 3. Extract inner sections and symbolic information.", "#", "if", "self", ".", "_ptr", "is", "not", "None", ":", "# If the file is a valid BFD file format but not an archive then", "# get its sections and symbolic information (if any).", "if", "self", ".", "file_format", "in", "[", "BfdFormat", ".", "OBJECT", ",", "BfdFormat", ".", "CORE", "]", ":", "self", ".", "__populate_sections", "(", ")", "self", ".", "__populate_symbols", "(", ")" ]
Open the existing file for reading. @param _file : A filename of file descriptor. @param target: A user-specific BFD target name. @return : None
[ "Open", "the", "existing", "file", "for", "reading", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L118-L215
train
238,581
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.__populate_archive_files
def __populate_archive_files(self): """Store the list of files inside an archive file.""" self.archive_files = [] for _ptr in _bfd.archive_list_files(self._ptr): try: self.archive_files.append(Bfd(_ptr)) except BfdException, err: #print "Error populating archive file list : %s" % err #print_exc() pass
python
def __populate_archive_files(self): """Store the list of files inside an archive file.""" self.archive_files = [] for _ptr in _bfd.archive_list_files(self._ptr): try: self.archive_files.append(Bfd(_ptr)) except BfdException, err: #print "Error populating archive file list : %s" % err #print_exc() pass
[ "def", "__populate_archive_files", "(", "self", ")", ":", "self", ".", "archive_files", "=", "[", "]", "for", "_ptr", "in", "_bfd", ".", "archive_list_files", "(", "self", ".", "_ptr", ")", ":", "try", ":", "self", ".", "archive_files", ".", "append", "(", "Bfd", "(", "_ptr", ")", ")", "except", "BfdException", ",", "err", ":", "#print \"Error populating archive file list : %s\" % err", "#print_exc()", "pass" ]
Store the list of files inside an archive file.
[ "Store", "the", "list", "of", "files", "inside", "an", "archive", "file", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L217-L226
train
238,582
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.archive_filenames
def archive_filenames(self): """Return the list of files inside an archive file.""" try: return _bfd.archive_list_filenames(self._ptr) except TypeError, err: raise BfdException(err)
python
def archive_filenames(self): """Return the list of files inside an archive file.""" try: return _bfd.archive_list_filenames(self._ptr) except TypeError, err: raise BfdException(err)
[ "def", "archive_filenames", "(", "self", ")", ":", "try", ":", "return", "_bfd", ".", "archive_list_filenames", "(", "self", ".", "_ptr", ")", "except", "TypeError", ",", "err", ":", "raise", "BfdException", "(", "err", ")" ]
Return the list of files inside an archive file.
[ "Return", "the", "list", "of", "files", "inside", "an", "archive", "file", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L239-L244
train
238,583
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.file_format_name
def file_format_name(self): """Return the current format name of the open bdf.""" try: return BfdFormatNamesLong[self.file_format] except IndexError, err: raise BfdException("Invalid format specified (%d)" % self.file_format)
python
def file_format_name(self): """Return the current format name of the open bdf.""" try: return BfdFormatNamesLong[self.file_format] except IndexError, err: raise BfdException("Invalid format specified (%d)" % self.file_format)
[ "def", "file_format_name", "(", "self", ")", ":", "try", ":", "return", "BfdFormatNamesLong", "[", "self", ".", "file_format", "]", "except", "IndexError", ",", "err", ":", "raise", "BfdException", "(", "\"Invalid format specified (%d)\"", "%", "self", ".", "file_format", ")" ]
Return the current format name of the open bdf.
[ "Return", "the", "current", "format", "name", "of", "the", "open", "bdf", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L272-L277
train
238,584
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.__populate_sections
def __populate_sections(self): """Get a list of the section present in the bfd to populate our internal list. """ if not self._ptr: raise BfdException("BFD not initialized") for section in _bfd.get_sections_list(self._ptr): try: bfd_section = BfdSection(self._ptr, section) self._sections[bfd_section.name] = bfd_section except BfdSectionException, err: #print "Exception during section pasing : %s" % err pass
python
def __populate_sections(self): """Get a list of the section present in the bfd to populate our internal list. """ if not self._ptr: raise BfdException("BFD not initialized") for section in _bfd.get_sections_list(self._ptr): try: bfd_section = BfdSection(self._ptr, section) self._sections[bfd_section.name] = bfd_section except BfdSectionException, err: #print "Exception during section pasing : %s" % err pass
[ "def", "__populate_sections", "(", "self", ")", ":", "if", "not", "self", ".", "_ptr", ":", "raise", "BfdException", "(", "\"BFD not initialized\"", ")", "for", "section", "in", "_bfd", ".", "get_sections_list", "(", "self", ".", "_ptr", ")", ":", "try", ":", "bfd_section", "=", "BfdSection", "(", "self", ".", "_ptr", ",", "section", ")", "self", ".", "_sections", "[", "bfd_section", ".", "name", "]", "=", "bfd_section", "except", "BfdSectionException", ",", "err", ":", "#print \"Exception during section pasing : %s\" % err", "pass" ]
Get a list of the section present in the bfd to populate our internal list.
[ "Get", "a", "list", "of", "the", "section", "present", "in", "the", "bfd", "to", "populate", "our", "internal", "list", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L295-L309
train
238,585
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.__populate_symbols
def __populate_symbols(self): """Get a list of the symbols present in the bfd to populate our internal list. """ if not self._ptr: raise BfdException("BFD not initialized") try: symbols = _bfd.get_symbols(self._ptr) # Temporary dictionary ordered by section index. This is necessary # because the symbolic information return the section index it belongs # to. sections = {} for section in self.sections: sections[self.sections[section].index] = self.sections[section] for symbol in symbols: # Extract each field for further processing. symbol_section_index = symbol[0] symbol_name = symbol[1] symbol_value = symbol[2] symbol_flags = symbol[3] # Get the effective address of the current symbol. symbol_flags = tuple( [f for f in SYMBOL_FLAGS_LIST if symbol_flags & f == f] ) # Create a new symbol instance to hold symbolic information. new_symbol = Symbol( sections.get(symbol_section_index, None), symbol_name, symbol_value, symbol_flags) if new_symbol.section is None: continue symbol_address = new_symbol.section.vma + new_symbol.value #if new_symbol.flags in \ # [SymbolFlags.LOCAL , SymbolFlags.GLOBAL , SymbolFlags.EXPORT]: # symbol_address = new_symbol.section.vma + new_symbol.value #else: # # TODO: Enhance this! # # Discard any other symbol information. # continue self._symbols[symbol_address] = new_symbol del sections except BfdSectionException, err: raise BfdException("Exception on symbolic ifnormation parsing.")
python
def __populate_symbols(self): """Get a list of the symbols present in the bfd to populate our internal list. """ if not self._ptr: raise BfdException("BFD not initialized") try: symbols = _bfd.get_symbols(self._ptr) # Temporary dictionary ordered by section index. This is necessary # because the symbolic information return the section index it belongs # to. sections = {} for section in self.sections: sections[self.sections[section].index] = self.sections[section] for symbol in symbols: # Extract each field for further processing. symbol_section_index = symbol[0] symbol_name = symbol[1] symbol_value = symbol[2] symbol_flags = symbol[3] # Get the effective address of the current symbol. symbol_flags = tuple( [f for f in SYMBOL_FLAGS_LIST if symbol_flags & f == f] ) # Create a new symbol instance to hold symbolic information. new_symbol = Symbol( sections.get(symbol_section_index, None), symbol_name, symbol_value, symbol_flags) if new_symbol.section is None: continue symbol_address = new_symbol.section.vma + new_symbol.value #if new_symbol.flags in \ # [SymbolFlags.LOCAL , SymbolFlags.GLOBAL , SymbolFlags.EXPORT]: # symbol_address = new_symbol.section.vma + new_symbol.value #else: # # TODO: Enhance this! # # Discard any other symbol information. # continue self._symbols[symbol_address] = new_symbol del sections except BfdSectionException, err: raise BfdException("Exception on symbolic ifnormation parsing.")
[ "def", "__populate_symbols", "(", "self", ")", ":", "if", "not", "self", ".", "_ptr", ":", "raise", "BfdException", "(", "\"BFD not initialized\"", ")", "try", ":", "symbols", "=", "_bfd", ".", "get_symbols", "(", "self", ".", "_ptr", ")", "# Temporary dictionary ordered by section index. This is necessary", "# because the symbolic information return the section index it belongs", "# to.", "sections", "=", "{", "}", "for", "section", "in", "self", ".", "sections", ":", "sections", "[", "self", ".", "sections", "[", "section", "]", ".", "index", "]", "=", "self", ".", "sections", "[", "section", "]", "for", "symbol", "in", "symbols", ":", "# Extract each field for further processing.", "symbol_section_index", "=", "symbol", "[", "0", "]", "symbol_name", "=", "symbol", "[", "1", "]", "symbol_value", "=", "symbol", "[", "2", "]", "symbol_flags", "=", "symbol", "[", "3", "]", "# Get the effective address of the current symbol.", "symbol_flags", "=", "tuple", "(", "[", "f", "for", "f", "in", "SYMBOL_FLAGS_LIST", "if", "symbol_flags", "&", "f", "==", "f", "]", ")", "# Create a new symbol instance to hold symbolic information.", "new_symbol", "=", "Symbol", "(", "sections", ".", "get", "(", "symbol_section_index", ",", "None", ")", ",", "symbol_name", ",", "symbol_value", ",", "symbol_flags", ")", "if", "new_symbol", ".", "section", "is", "None", ":", "continue", "symbol_address", "=", "new_symbol", ".", "section", ".", "vma", "+", "new_symbol", ".", "value", "#if new_symbol.flags in \\", "# [SymbolFlags.LOCAL , SymbolFlags.GLOBAL , SymbolFlags.EXPORT]:", "# symbol_address = new_symbol.section.vma + new_symbol.value", "#else:", "# # TODO: Enhance this!", "# # Discard any other symbol information.", "# continue", "self", ".", "_symbols", "[", "symbol_address", "]", "=", "new_symbol", "del", "sections", "except", "BfdSectionException", ",", "err", ":", "raise", "BfdException", "(", "\"Exception on symbolic ifnormation parsing.\"", ")" ]
Get a list of the symbols present in the bfd to populate our internal list.
[ "Get", "a", "list", "of", "the", "symbols", "present", "in", "the", "bfd", "to", "populate", "our", "internal", "list", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L311-L362
train
238,586
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.close
def close(self): """Close any existing BFD structure before open a new one.""" if self._ptr: #try: # # Release inner BFD files in case we're an archive BFD. # if self.is_archive: # [inner_bfd.close() for inner_bfd in self.archive_files] #except TypeError, err: # pass try: _bfd.close(self._ptr) except TypeError, err: raise BfdException("Unable to close bfd (%s)" % err) finally: self._ptr = None
python
def close(self): """Close any existing BFD structure before open a new one.""" if self._ptr: #try: # # Release inner BFD files in case we're an archive BFD. # if self.is_archive: # [inner_bfd.close() for inner_bfd in self.archive_files] #except TypeError, err: # pass try: _bfd.close(self._ptr) except TypeError, err: raise BfdException("Unable to close bfd (%s)" % err) finally: self._ptr = None
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_ptr", ":", "#try:", "# # Release inner BFD files in case we're an archive BFD.", "# if self.is_archive:", "# [inner_bfd.close() for inner_bfd in self.archive_files]", "#except TypeError, err:", "# pass", "try", ":", "_bfd", ".", "close", "(", "self", ".", "_ptr", ")", "except", "TypeError", ",", "err", ":", "raise", "BfdException", "(", "\"Unable to close bfd (%s)\"", "%", "err", ")", "finally", ":", "self", ".", "_ptr", "=", "None" ]
Close any existing BFD structure before open a new one.
[ "Close", "any", "existing", "BFD", "structure", "before", "open", "a", "new", "one", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L364-L379
train
238,587
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.filename
def filename(self): """Return the filename of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FILENAME)
python
def filename(self): """Return the filename of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FILENAME)
[ "def", "filename", "(", "self", ")", ":", "if", "not", "self", ".", "_ptr", ":", "raise", "BfdException", "(", "\"BFD not initialized\"", ")", "return", "_bfd", ".", "get_bfd_attribute", "(", "self", ".", "_ptr", ",", "BfdAttributes", ".", "FILENAME", ")" ]
Return the filename of the BFD file being processed.
[ "Return", "the", "filename", "of", "the", "BFD", "file", "being", "processed", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L390-L395
train
238,588
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.cacheable
def cacheable(self): """Return the cacheable attribute of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.CACHEABLE)
python
def cacheable(self): """Return the cacheable attribute of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.CACHEABLE)
[ "def", "cacheable", "(", "self", ")", ":", "if", "not", "self", ".", "_ptr", ":", "raise", "BfdException", "(", "\"BFD not initialized\"", ")", "return", "_bfd", ".", "get_bfd_attribute", "(", "self", ".", "_ptr", ",", "BfdAttributes", ".", "CACHEABLE", ")" ]
Return the cacheable attribute of the BFD file being processed.
[ "Return", "the", "cacheable", "attribute", "of", "the", "BFD", "file", "being", "processed", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L398-L403
train
238,589
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.format
def format(self): """Return the format attribute of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FORMAT)
python
def format(self): """Return the format attribute of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FORMAT)
[ "def", "format", "(", "self", ")", ":", "if", "not", "self", ".", "_ptr", ":", "raise", "BfdException", "(", "\"BFD not initialized\"", ")", "return", "_bfd", ".", "get_bfd_attribute", "(", "self", ".", "_ptr", ",", "BfdAttributes", ".", "FORMAT", ")" ]
Return the format attribute of the BFD file being processed.
[ "Return", "the", "format", "attribute", "of", "the", "BFD", "file", "being", "processed", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L406-L411
train
238,590
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.target
def target(self): """Return the target of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.TARGET)
python
def target(self): """Return the target of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.TARGET)
[ "def", "target", "(", "self", ")", ":", "if", "not", "self", ".", "_ptr", ":", "raise", "BfdException", "(", "\"BFD not initialized\"", ")", "return", "_bfd", ".", "get_bfd_attribute", "(", "self", ".", "_ptr", ",", "BfdAttributes", ".", "TARGET", ")" ]
Return the target of the BFD file being processed.
[ "Return", "the", "target", "of", "the", "BFD", "file", "being", "processed", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L414-L419
train
238,591
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.machine
def machine(self): """Return the flavour attribute of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FLAVOUR)
python
def machine(self): """Return the flavour attribute of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FLAVOUR)
[ "def", "machine", "(", "self", ")", ":", "if", "not", "self", ".", "_ptr", ":", "raise", "BfdException", "(", "\"BFD not initialized\"", ")", "return", "_bfd", ".", "get_bfd_attribute", "(", "self", ".", "_ptr", ",", "BfdAttributes", ".", "FLAVOUR", ")" ]
Return the flavour attribute of the BFD file being processed.
[ "Return", "the", "flavour", "attribute", "of", "the", "BFD", "file", "being", "processed", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L435-L440
train
238,592
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.family_coff
def family_coff(self): """Return the family_coff attribute of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FAMILY_COFF)
python
def family_coff(self): """Return the family_coff attribute of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FAMILY_COFF)
[ "def", "family_coff", "(", "self", ")", ":", "if", "not", "self", ".", "_ptr", ":", "raise", "BfdException", "(", "\"BFD not initialized\"", ")", "return", "_bfd", ".", "get_bfd_attribute", "(", "self", ".", "_ptr", ",", "BfdAttributes", ".", "FAMILY_COFF", ")" ]
Return the family_coff attribute of the BFD file being processed.
[ "Return", "the", "family_coff", "attribute", "of", "the", "BFD", "file", "being", "processed", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L450-L455
train
238,593
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.big_endian
def big_endian(self): """Return the big endian attribute of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.IS_BIG_ENDIAN)
python
def big_endian(self): """Return the big endian attribute of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.IS_BIG_ENDIAN)
[ "def", "big_endian", "(", "self", ")", ":", "if", "not", "self", ".", "_ptr", ":", "raise", "BfdException", "(", "\"BFD not initialized\"", ")", "return", "_bfd", ".", "get_bfd_attribute", "(", "self", ".", "_ptr", ",", "BfdAttributes", ".", "IS_BIG_ENDIAN", ")" ]
Return the big endian attribute of the BFD file being processed.
[ "Return", "the", "big", "endian", "attribute", "of", "the", "BFD", "file", "being", "processed", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L466-L471
train
238,594
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.little_endian
def little_endian(self): """ Return the little_endian attribute of the BFD file being processed. """ if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.IS_LITTLE_ENDIAN)
python
def little_endian(self): """ Return the little_endian attribute of the BFD file being processed. """ if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.IS_LITTLE_ENDIAN)
[ "def", "little_endian", "(", "self", ")", ":", "if", "not", "self", ".", "_ptr", ":", "raise", "BfdException", "(", "\"BFD not initialized\"", ")", "return", "_bfd", ".", "get_bfd_attribute", "(", "self", ".", "_ptr", ",", "BfdAttributes", ".", "IS_LITTLE_ENDIAN", ")" ]
Return the little_endian attribute of the BFD file being processed.
[ "Return", "the", "little_endian", "attribute", "of", "the", "BFD", "file", "being", "processed", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L474-L481
train
238,595
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.header_big_endian
def header_big_endian(self): """ Return the header_big_endian attribute of the BFD file being processed. """ if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute( self._ptr, BfdAttributes.HEADER_BIG_ENDIAN)
python
def header_big_endian(self): """ Return the header_big_endian attribute of the BFD file being processed. """ if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute( self._ptr, BfdAttributes.HEADER_BIG_ENDIAN)
[ "def", "header_big_endian", "(", "self", ")", ":", "if", "not", "self", ".", "_ptr", ":", "raise", "BfdException", "(", "\"BFD not initialized\"", ")", "return", "_bfd", ".", "get_bfd_attribute", "(", "self", ".", "_ptr", ",", "BfdAttributes", ".", "HEADER_BIG_ENDIAN", ")" ]
Return the header_big_endian attribute of the BFD file being processed.
[ "Return", "the", "header_big_endian", "attribute", "of", "the", "BFD", "file", "being", "processed", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L484-L493
train
238,596
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.header_little_endian
def header_little_endian(self): """Return the header_little_endian attribute of the BFD file being processed. """ if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute( self._ptr, BfdAttributes.HEADER_LITTLE_ENDIAN)
python
def header_little_endian(self): """Return the header_little_endian attribute of the BFD file being processed. """ if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute( self._ptr, BfdAttributes.HEADER_LITTLE_ENDIAN)
[ "def", "header_little_endian", "(", "self", ")", ":", "if", "not", "self", ".", "_ptr", ":", "raise", "BfdException", "(", "\"BFD not initialized\"", ")", "return", "_bfd", ".", "get_bfd_attribute", "(", "self", ".", "_ptr", ",", "BfdAttributes", ".", "HEADER_LITTLE_ENDIAN", ")" ]
Return the header_little_endian attribute of the BFD file being processed.
[ "Return", "the", "header_little_endian", "attribute", "of", "the", "BFD", "file", "being", "processed", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L496-L505
train
238,597
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.file_flags
def file_flags(self): """Return the file flags attribute of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FILE_FLAGS)
python
def file_flags(self): """Return the file flags attribute of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FILE_FLAGS)
[ "def", "file_flags", "(", "self", ")", ":", "if", "not", "self", ".", "_ptr", ":", "raise", "BfdException", "(", "\"BFD not initialized\"", ")", "return", "_bfd", ".", "get_bfd_attribute", "(", "self", ".", "_ptr", ",", "BfdAttributes", ".", "FILE_FLAGS", ")" ]
Return the file flags attribute of the BFD file being processed.
[ "Return", "the", "file", "flags", "attribute", "of", "the", "BFD", "file", "being", "processed", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L508-L513
train
238,598
Groundworkstech/pybfd
pybfd/bfd.py
Bfd.file_flags
def file_flags(self, _file_flags): """Set the new file flags attribute of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.set_file_flags(self._ptr, _file_flags)
python
def file_flags(self, _file_flags): """Set the new file flags attribute of the BFD file being processed.""" if not self._ptr: raise BfdException("BFD not initialized") return _bfd.set_file_flags(self._ptr, _file_flags)
[ "def", "file_flags", "(", "self", ",", "_file_flags", ")", ":", "if", "not", "self", ".", "_ptr", ":", "raise", "BfdException", "(", "\"BFD not initialized\"", ")", "return", "_bfd", ".", "set_file_flags", "(", "self", ".", "_ptr", ",", "_file_flags", ")" ]
Set the new file flags attribute of the BFD file being processed.
[ "Set", "the", "new", "file", "flags", "attribute", "of", "the", "BFD", "file", "being", "processed", "." ]
9e722435929b4ad52212043a6f1e9e9ce60b5d72
https://github.com/Groundworkstech/pybfd/blob/9e722435929b4ad52212043a6f1e9e9ce60b5d72/pybfd/bfd.py#L516-L521
train
238,599