text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def tokenise_word(string, strict=False, replace=False, tones=False, unknown=False): """ Tokenise the string into a list of tokens or raise ValueError if it cannot be tokenised (relatively) unambiguously. The string should not include whitespace, i.e. it is assumed to be a single word. If strict=False, allow non-standard letters and diacritics, as well as initial diacritic-only tokens (e.g. pre-aspiration). If replace=True, replace some common non-IPA symbols with their IPA counterparts. If tones=False, ignore tone symbols. If unknown=False, ignore symbols that cannot be classified into a relevant category. Helper for tokenise(string, ..). """ string = normalise(string) if replace: string = ipa.replace_substitutes(string) tokens = [] for index, char in enumerate(string): if ipa.is_letter(char, strict): if tokens and ipa.is_tie_bar(string[index-1]): tokens[-1] += char else: tokens.append(char) elif ipa.is_tie_bar(char): if not tokens: raise ValueError('The string starts with a tie bar: {}'.format(string)) tokens[-1] += char elif ipa.is_diacritic(char, strict) or ipa.is_length(char): if tokens: tokens[-1] += char else: if strict: raise ValueError('The string starts with a diacritic: {}'.format(string)) else: tokens.append(char) elif tones and ipa.is_tone(char, strict): if unicodedata.combining(char): if not tokens: raise ValueError('The string starts with an accent mark: {}'.format(string)) tokens[-1] += char elif tokens and ipa.is_tone(tokens[-1][-1], strict): tokens[-1] += char else: tokens.append(char) elif ipa.is_suprasegmental(char, strict): pass else: if strict: raise ValueError('Unrecognised char: {} ({})'.format(char, unicodedata.name(char))) elif unknown: tokens.append(char) else: pass return tokens
[ "def", "tokenise_word", "(", "string", ",", "strict", "=", "False", ",", "replace", "=", "False", ",", "tones", "=", "False", ",", "unknown", "=", "False", ")", ":", "string", "=", "normalise", "(", "string", ")", "if", "replace", ":", "string", "=", "ipa", ".", "replace_substitutes", "(", "string", ")", "tokens", "=", "[", "]", "for", "index", ",", "char", "in", "enumerate", "(", "string", ")", ":", "if", "ipa", ".", "is_letter", "(", "char", ",", "strict", ")", ":", "if", "tokens", "and", "ipa", ".", "is_tie_bar", "(", "string", "[", "index", "-", "1", "]", ")", ":", "tokens", "[", "-", "1", "]", "+=", "char", "else", ":", "tokens", ".", "append", "(", "char", ")", "elif", "ipa", ".", "is_tie_bar", "(", "char", ")", ":", "if", "not", "tokens", ":", "raise", "ValueError", "(", "'The string starts with a tie bar: {}'", ".", "format", "(", "string", ")", ")", "tokens", "[", "-", "1", "]", "+=", "char", "elif", "ipa", ".", "is_diacritic", "(", "char", ",", "strict", ")", "or", "ipa", ".", "is_length", "(", "char", ")", ":", "if", "tokens", ":", "tokens", "[", "-", "1", "]", "+=", "char", "else", ":", "if", "strict", ":", "raise", "ValueError", "(", "'The string starts with a diacritic: {}'", ".", "format", "(", "string", ")", ")", "else", ":", "tokens", ".", "append", "(", "char", ")", "elif", "tones", "and", "ipa", ".", "is_tone", "(", "char", ",", "strict", ")", ":", "if", "unicodedata", ".", "combining", "(", "char", ")", ":", "if", "not", "tokens", ":", "raise", "ValueError", "(", "'The string starts with an accent mark: {}'", ".", "format", "(", "string", ")", ")", "tokens", "[", "-", "1", "]", "+=", "char", "elif", "tokens", "and", "ipa", ".", "is_tone", "(", "tokens", "[", "-", "1", "]", "[", "-", "1", "]", ",", "strict", ")", ":", "tokens", "[", "-", "1", "]", "+=", "char", "else", ":", "tokens", ".", "append", "(", "char", ")", "elif", "ipa", ".", "is_suprasegmental", "(", "char", ",", "strict", ")", ":", "pass", "else", ":", "if", "strict", ":", "raise", "ValueError", "(", "'Unrecognised char: {} ({})'", ".", "format", "(", "char", ",", "unicodedata", ".", "name", "(", "char", ")", ")", ")", "elif", "unknown", ":", "tokens", ".", "append", "(", "char", ")", "else", ":", "pass", "return", "tokens" ]
28.030769
24.461538
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): """Operation: Change Adapter Type (requires DPM mode).""" assert wait_for_completion is True # HMC operation is synchronous adapter_uri = uri.split('/operations/')[0] try: adapter = hmc.lookup_by_uri(adapter_uri) except KeyError: raise InvalidResourceError(method, uri) cpc = adapter.manager.parent assert cpc.dpm_enabled check_required_fields(method, uri, body, ['type']) new_adapter_type = body['type'] # Check the validity of the adapter family adapter_family = adapter.properties.get('adapter-family', None) if adapter_family != 'ficon': raise BadRequestError( method, uri, reason=18, message="The adapter type cannot be changed for adapter " "family: %s" % adapter_family) # Check the adapter status adapter_status = adapter.properties.get('status', None) if adapter_status == 'exceptions': raise BadRequestError( method, uri, reason=18, message="The adapter type cannot be changed for adapter " "status: %s" % adapter_status) # Check the validity of the new adapter type if new_adapter_type not in ['fc', 'fcp', 'not-configured']: raise BadRequestError( method, uri, reason=8, message="Invalid new value for 'type' field: %s" % new_adapter_type) # Check that the new adapter type is not already set adapter_type = adapter.properties.get('type', None) if new_adapter_type == adapter_type: raise BadRequestError( method, uri, reason=8, message="New value for 'type' field is already set: %s" % new_adapter_type) # TODO: Reject if adapter is attached to a partition. # Reflect the result of changing the adapter type adapter.properties['type'] = new_adapter_type
[ "def", "post", "(", "method", ",", "hmc", ",", "uri", ",", "uri_parms", ",", "body", ",", "logon_required", ",", "wait_for_completion", ")", ":", "assert", "wait_for_completion", "is", "True", "# HMC operation is synchronous", "adapter_uri", "=", "uri", ".", "split", "(", "'/operations/'", ")", "[", "0", "]", "try", ":", "adapter", "=", "hmc", ".", "lookup_by_uri", "(", "adapter_uri", ")", "except", "KeyError", ":", "raise", "InvalidResourceError", "(", "method", ",", "uri", ")", "cpc", "=", "adapter", ".", "manager", ".", "parent", "assert", "cpc", ".", "dpm_enabled", "check_required_fields", "(", "method", ",", "uri", ",", "body", ",", "[", "'type'", "]", ")", "new_adapter_type", "=", "body", "[", "'type'", "]", "# Check the validity of the adapter family", "adapter_family", "=", "adapter", ".", "properties", ".", "get", "(", "'adapter-family'", ",", "None", ")", "if", "adapter_family", "!=", "'ficon'", ":", "raise", "BadRequestError", "(", "method", ",", "uri", ",", "reason", "=", "18", ",", "message", "=", "\"The adapter type cannot be changed for adapter \"", "\"family: %s\"", "%", "adapter_family", ")", "# Check the adapter status", "adapter_status", "=", "adapter", ".", "properties", ".", "get", "(", "'status'", ",", "None", ")", "if", "adapter_status", "==", "'exceptions'", ":", "raise", "BadRequestError", "(", "method", ",", "uri", ",", "reason", "=", "18", ",", "message", "=", "\"The adapter type cannot be changed for adapter \"", "\"status: %s\"", "%", "adapter_status", ")", "# Check the validity of the new adapter type", "if", "new_adapter_type", "not", "in", "[", "'fc'", ",", "'fcp'", ",", "'not-configured'", "]", ":", "raise", "BadRequestError", "(", "method", ",", "uri", ",", "reason", "=", "8", ",", "message", "=", "\"Invalid new value for 'type' field: %s\"", "%", "new_adapter_type", ")", "# Check that the new adapter type is not already set", "adapter_type", "=", "adapter", ".", "properties", ".", "get", "(", "'type'", ",", "None", ")", "if", "new_adapter_type", "==", "adapter_type", ":", "raise", "BadRequestError", "(", "method", ",", "uri", ",", "reason", "=", "8", ",", "message", "=", "\"New value for 'type' field is already set: %s\"", "%", "new_adapter_type", ")", "# TODO: Reject if adapter is attached to a partition.", "# Reflect the result of changing the adapter type", "adapter", ".", "properties", "[", "'type'", "]", "=", "new_adapter_type" ]
41.98
16.84
def round(self, value_array): """ Rounds a categorical variable by setting to one the max of the given vector and to zero the rest of the entries. Assumes an 1x[number of categories] array (due to one-hot encoding) as an input """ rounded_values = np.zeros(value_array.shape) rounded_values[np.argmax(value_array)] = 1 return rounded_values
[ "def", "round", "(", "self", ",", "value_array", ")", ":", "rounded_values", "=", "np", ".", "zeros", "(", "value_array", ".", "shape", ")", "rounded_values", "[", "np", ".", "argmax", "(", "value_array", ")", "]", "=", "1", "return", "rounded_values" ]
43.222222
23.444444
def roc_auc(model, X, y=None, ax=None, **kwargs): """ROCAUC Quick method: Receiver Operating Characteristic (ROC) curves are a measure of a classifier's predictive quality that compares and visualizes the tradeoff between the models' sensitivity and specificity. The ROC curve displays the true positive rate on the Y axis and the false positive rate on the X axis on both a global average and per-class basis. The ideal point is therefore the top-left corner of the plot: false positives are zero and true positives are one. This leads to another metric, area under the curve (AUC), a computation of the relationship between false positives and true positives. The higher the AUC, the better the model generally is. However, it is also important to inspect the "steepness" of the curve, as this describes the maximization of the true positive rate while minimizing the false positive rate. Generalizing "steepness" usually leads to discussions about convexity, which we do not get into here. Parameters ---------- model : the Scikit-Learn estimator Should be an instance of a classifier, else the __init__ will return an error. X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n An array or series of target or class values ax : the axis to plot the figure on. classes : list A list of class names for the legend. If classes is None and a y value is passed to fit then the classes are selected from the target vector. Note that the curves must be computed based on what is in the target vector passed to the ``score()`` method. Class names are used for labeling only and must be in the correct order to prevent confusion. micro : bool, default = True Plot the micro-averages ROC curve, computed from the sum of all true positives and false positives across all classes. Micro is not defined for binary classification problems with estimators with only a decision_function method. macro : bool, default = True Plot the macro-averages ROC curve, which simply takes the average of curves across all classes. Macro is not defined for binary classification problems with estimators with only a decision_function method. per_class : bool, default = True Plot the ROC curves for each individual class. This should be set to false if only the macro or micro average curves are required. Per- class classification is not defined for binary classification problems with estimators with only a decision_function method. Notes ----- ROC curves are typically used in binary classification, and in fact the Scikit-Learn ``roc_curve`` metric is only able to perform metrics for binary classifiers. As a result it is necessary to binarize the output or to use one-vs-rest or one-vs-all strategies of classification. The visualizer does its best to handle multiple situations, but exceptions can arise from unexpected models or outputs. Another important point is the relationship of class labels specified on initialization to those drawn on the curves. The classes are not used to constrain ordering or filter curves; the ROC computation happens on the unique values specified in the target vector to the ``score`` method. To ensure the best quality visualization, do not use a LabelEncoder for this and do not pass in class labels. .. seealso:: http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html .. todo:: Allow the class list to filter the curves on the visualization. Examples -------- >>> from yellowbrick.classifier import ROCAUC >>> from sklearn.linear_model import LogisticRegression >>> data = load_data("occupancy") >>> features = ["temp", "relative humidity", "light", "C02", "humidity"] >>> X = data[features].values >>> y = data.occupancy.values >>> roc_auc(LogisticRegression(), X, y) Returns ------- ax : matplotlib axes Returns the axes that the roc-auc curve was drawn on. """ # Instantiate the visualizer visualizer = ROCAUC(model, ax, **kwargs) # Create the train and test splits X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # Fit and transform the visualizer (calls draw) visualizer.fit(X_train, y_train, **kwargs) visualizer.score(X_test, y_test) visualizer.finalize() # Return the axes object on the visualizer return visualizer.ax
[ "def", "roc_auc", "(", "model", ",", "X", ",", "y", "=", "None", ",", "ax", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Instantiate the visualizer", "visualizer", "=", "ROCAUC", "(", "model", ",", "ax", ",", "*", "*", "kwargs", ")", "# Create the train and test splits", "X_train", ",", "X_test", ",", "y_train", ",", "y_test", "=", "train_test_split", "(", "X", ",", "y", ",", "test_size", "=", "0.2", ")", "# Fit and transform the visualizer (calls draw)", "visualizer", ".", "fit", "(", "X_train", ",", "y_train", ",", "*", "*", "kwargs", ")", "visualizer", ".", "score", "(", "X_test", ",", "y_test", ")", "visualizer", ".", "finalize", "(", ")", "# Return the axes object on the visualizer", "return", "visualizer", ".", "ax" ]
43.809524
25.92381
def configure(project=LOGGING_PROJECT): """Configures cloud logging This is called for all main calls. If a $LOGGING_PROJECT is environment variable configured, then STDERR and STDOUT are redirected to cloud logging. """ if not project: sys.stderr.write('!! Error: The $LOGGING_PROJECT enviroment ' 'variable is required in order to set up cloud logging. ' 'Cloud logging is disabled.\n') return try: # if this fails, redirect stderr to /dev/null so no startup spam. with contextlib.redirect_stderr(io.StringIO()): client = glog.Client(project) client.setup_logging(logging.INFO) except: logging.basicConfig(level=logging.INFO) sys.stderr.write('!! Cloud logging disabled\n')
[ "def", "configure", "(", "project", "=", "LOGGING_PROJECT", ")", ":", "if", "not", "project", ":", "sys", ".", "stderr", ".", "write", "(", "'!! Error: The $LOGGING_PROJECT enviroment '", "'variable is required in order to set up cloud logging. '", "'Cloud logging is disabled.\\n'", ")", "return", "try", ":", "# if this fails, redirect stderr to /dev/null so no startup spam.", "with", "contextlib", ".", "redirect_stderr", "(", "io", ".", "StringIO", "(", ")", ")", ":", "client", "=", "glog", ".", "Client", "(", "project", ")", "client", ".", "setup_logging", "(", "logging", ".", "INFO", ")", "except", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ")", "sys", ".", "stderr", ".", "write", "(", "'!! Cloud logging disabled\\n'", ")" ]
38.619048
21.285714
def reverse_translate( protein_seq, template_dna=None, leading_seq=None, trailing_seq=None, forbidden_seqs=(), include_stop=True, manufacturer=None): """ Generate a well-behaved DNA sequence from the given protein sequence. If a template DNA sequence is specified, the returned DNA sequence will be as similar to it as possible. Any given restriction sites will not be present in the sequence. And finally, the given leading and trailing sequences will be appropriately concatenated. """ if manufacturer == 'gen9': forbidden_seqs += gen9.reserved_restriction_sites leading_seq = restriction_sites.get(leading_seq, leading_seq or '') trailing_seq = restriction_sites.get(trailing_seq, trailing_seq or '') codon_list = make_codon_list(protein_seq, template_dna, include_stop) sanitize_codon_list(codon_list, forbidden_seqs) dna_seq = leading_seq + ''.join(codon_list) + trailing_seq if manufacturer == 'gen9': gen9.apply_quality_control_checks(dna_seq) return dna_seq
[ "def", "reverse_translate", "(", "protein_seq", ",", "template_dna", "=", "None", ",", "leading_seq", "=", "None", ",", "trailing_seq", "=", "None", ",", "forbidden_seqs", "=", "(", ")", ",", "include_stop", "=", "True", ",", "manufacturer", "=", "None", ")", ":", "if", "manufacturer", "==", "'gen9'", ":", "forbidden_seqs", "+=", "gen9", ".", "reserved_restriction_sites", "leading_seq", "=", "restriction_sites", ".", "get", "(", "leading_seq", ",", "leading_seq", "or", "''", ")", "trailing_seq", "=", "restriction_sites", ".", "get", "(", "trailing_seq", ",", "trailing_seq", "or", "''", ")", "codon_list", "=", "make_codon_list", "(", "protein_seq", ",", "template_dna", ",", "include_stop", ")", "sanitize_codon_list", "(", "codon_list", ",", "forbidden_seqs", ")", "dna_seq", "=", "leading_seq", "+", "''", ".", "join", "(", "codon_list", ")", "+", "trailing_seq", "if", "manufacturer", "==", "'gen9'", ":", "gen9", ".", "apply_quality_control_checks", "(", "dna_seq", ")", "return", "dna_seq" ]
40.5
24.576923
def total_size(self): """ Determine the size (in bytes) of this node. If an array, returns size of the entire array """ if self.inst.is_array: # Total size of arrays is technically supposed to be: # self.inst.array_stride * (self.inst.n_elements-1) + self.size # However this opens up a whole slew of ugly corner cases that the # spec designers may not have anticipated. # Using a simplified calculation for now until someone actually cares return self.inst.array_stride * self.inst.n_elements else: return self.size
[ "def", "total_size", "(", "self", ")", ":", "if", "self", ".", "inst", ".", "is_array", ":", "# Total size of arrays is technically supposed to be:", "# self.inst.array_stride * (self.inst.n_elements-1) + self.size", "# However this opens up a whole slew of ugly corner cases that the", "# spec designers may not have anticipated.", "# Using a simplified calculation for now until someone actually cares", "return", "self", ".", "inst", ".", "array_stride", "*", "self", ".", "inst", ".", "n_elements", "else", ":", "return", "self", ".", "size" ]
42.466667
20.733333
def _getStickersTemplatesDirectory(self, resource_name): """Returns the paths for the directory containing the css and pt files for the stickers deppending on the filter_by_type. :param resource_name: The name of the resource folder. :type resource_name: string :returns: a string as a path """ templates_dir =\ queryResourceDirectory("stickers", resource_name).directory if self.filter_by_type: templates_dir = templates_dir + "/" + self.filter_by_type return templates_dir
[ "def", "_getStickersTemplatesDirectory", "(", "self", ",", "resource_name", ")", ":", "templates_dir", "=", "queryResourceDirectory", "(", "\"stickers\"", ",", "resource_name", ")", ".", "directory", "if", "self", ".", "filter_by_type", ":", "templates_dir", "=", "templates_dir", "+", "\"/\"", "+", "self", ".", "filter_by_type", "return", "templates_dir" ]
43
15.538462
def _match_with_pandas(filtered, matcher): """Find matches in a set using Pandas' library.""" import pandas data = [fl.to_dict() for fl in filtered] if not data: return [] df = pandas.DataFrame(data) df = df.sort_values(['uuid']) cdfs = [] criteria = matcher.matching_criteria() for c in criteria: cdf = df[['id', 'uuid', c]] cdf = cdf.dropna(subset=[c]) cdf = pandas.merge(cdf, cdf, on=c, how='left') cdf = cdf[['uuid_x', 'uuid_y']] cdfs.append(cdf) result = pandas.concat(cdfs) result = result.drop_duplicates() groups = result.groupby(by=['uuid_x'], as_index=True, sort=True) matched = _calculate_matches_closures(groups) return matched
[ "def", "_match_with_pandas", "(", "filtered", ",", "matcher", ")", ":", "import", "pandas", "data", "=", "[", "fl", ".", "to_dict", "(", ")", "for", "fl", "in", "filtered", "]", "if", "not", "data", ":", "return", "[", "]", "df", "=", "pandas", ".", "DataFrame", "(", "data", ")", "df", "=", "df", ".", "sort_values", "(", "[", "'uuid'", "]", ")", "cdfs", "=", "[", "]", "criteria", "=", "matcher", ".", "matching_criteria", "(", ")", "for", "c", "in", "criteria", ":", "cdf", "=", "df", "[", "[", "'id'", ",", "'uuid'", ",", "c", "]", "]", "cdf", "=", "cdf", ".", "dropna", "(", "subset", "=", "[", "c", "]", ")", "cdf", "=", "pandas", ".", "merge", "(", "cdf", ",", "cdf", ",", "on", "=", "c", ",", "how", "=", "'left'", ")", "cdf", "=", "cdf", "[", "[", "'uuid_x'", ",", "'uuid_y'", "]", "]", "cdfs", ".", "append", "(", "cdf", ")", "result", "=", "pandas", ".", "concat", "(", "cdfs", ")", "result", "=", "result", ".", "drop_duplicates", "(", ")", "groups", "=", "result", ".", "groupby", "(", "by", "=", "[", "'uuid_x'", "]", ",", "as_index", "=", "True", ",", "sort", "=", "True", ")", "matched", "=", "_calculate_matches_closures", "(", "groups", ")", "return", "matched" ]
24.193548
19.225806
def path_types(obj, path): """ Given a list of path name elements, return anew list of [name, type] path components, given the reference object. """ result = [] #for elem in path[:-1]: cur = obj for elem in path[:-1]: if ((issubclass(cur.__class__, MutableMapping) and elem in cur)): result.append([elem, cur[elem].__class__]) cur = cur[elem] elif (issubclass(cur.__class__, MutableSequence) and int(elem) < len(cur)): elem = int(elem) result.append([elem, cur[elem].__class__]) cur = cur[elem] else: result.append([elem, dict]) try: try: result.append([path[-1], cur[path[-1]].__class__]) except TypeError: result.append([path[-1], cur[int(path[-1])].__class__]) except (KeyError, IndexError): result.append([path[-1], path[-1].__class__]) return result
[ "def", "path_types", "(", "obj", ",", "path", ")", ":", "result", "=", "[", "]", "#for elem in path[:-1]:", "cur", "=", "obj", "for", "elem", "in", "path", "[", ":", "-", "1", "]", ":", "if", "(", "(", "issubclass", "(", "cur", ".", "__class__", ",", "MutableMapping", ")", "and", "elem", "in", "cur", ")", ")", ":", "result", ".", "append", "(", "[", "elem", ",", "cur", "[", "elem", "]", ".", "__class__", "]", ")", "cur", "=", "cur", "[", "elem", "]", "elif", "(", "issubclass", "(", "cur", ".", "__class__", ",", "MutableSequence", ")", "and", "int", "(", "elem", ")", "<", "len", "(", "cur", ")", ")", ":", "elem", "=", "int", "(", "elem", ")", "result", ".", "append", "(", "[", "elem", ",", "cur", "[", "elem", "]", ".", "__class__", "]", ")", "cur", "=", "cur", "[", "elem", "]", "else", ":", "result", ".", "append", "(", "[", "elem", ",", "dict", "]", ")", "try", ":", "try", ":", "result", ".", "append", "(", "[", "path", "[", "-", "1", "]", ",", "cur", "[", "path", "[", "-", "1", "]", "]", ".", "__class__", "]", ")", "except", "TypeError", ":", "result", ".", "append", "(", "[", "path", "[", "-", "1", "]", ",", "cur", "[", "int", "(", "path", "[", "-", "1", "]", ")", "]", ".", "__class__", "]", ")", "except", "(", "KeyError", ",", "IndexError", ")", ":", "result", ".", "append", "(", "[", "path", "[", "-", "1", "]", ",", "path", "[", "-", "1", "]", ".", "__class__", "]", ")", "return", "result" ]
36.56
20.24
def _convert_1bit_array_to_byte_array(arr): """ Convert bit array to byte array. :param arr: list Bits as a list where each element is an integer of 0 or 1 Returns ------- numpy.array 1D numpy array of type uint8 """ # Padding if necessary while len(arr) < 8 or len(arr) % 8: arr.append(0) arr = _np.array(arr, dtype='uint8') bit_arr = [] idx = 0 # Iterate and combine 8-bits into a uint8 for arr_idx in range(int(len(arr) / 8)): bit_arr.append(((arr[idx] << 7) & (1 << 7)) | ((arr[idx+1] << 6) & (1 << 6)) | ((arr[idx+2] << 5) & (1 << 5)) | ((arr[idx+3] << 4) & (1 << 4)) | ((arr[idx+4] << 3) & (1 << 3)) | ((arr[idx+5] << 2) & (1 << 2)) | ((arr[idx+6] << 1) & (1 << 1)) | ((arr[idx+7] << 0) & (1 << 0)) ) idx += 8 return _np.array(bit_arr, dtype='uint8')
[ "def", "_convert_1bit_array_to_byte_array", "(", "arr", ")", ":", "# Padding if necessary", "while", "len", "(", "arr", ")", "<", "8", "or", "len", "(", "arr", ")", "%", "8", ":", "arr", ".", "append", "(", "0", ")", "arr", "=", "_np", ".", "array", "(", "arr", ",", "dtype", "=", "'uint8'", ")", "bit_arr", "=", "[", "]", "idx", "=", "0", "# Iterate and combine 8-bits into a uint8", "for", "arr_idx", "in", "range", "(", "int", "(", "len", "(", "arr", ")", "/", "8", ")", ")", ":", "bit_arr", ".", "append", "(", "(", "(", "arr", "[", "idx", "]", "<<", "7", ")", "&", "(", "1", "<<", "7", ")", ")", "|", "(", "(", "arr", "[", "idx", "+", "1", "]", "<<", "6", ")", "&", "(", "1", "<<", "6", ")", ")", "|", "(", "(", "arr", "[", "idx", "+", "2", "]", "<<", "5", ")", "&", "(", "1", "<<", "5", ")", ")", "|", "(", "(", "arr", "[", "idx", "+", "3", "]", "<<", "4", ")", "&", "(", "1", "<<", "4", ")", ")", "|", "(", "(", "arr", "[", "idx", "+", "4", "]", "<<", "3", ")", "&", "(", "1", "<<", "3", ")", ")", "|", "(", "(", "arr", "[", "idx", "+", "5", "]", "<<", "2", ")", "&", "(", "1", "<<", "2", ")", ")", "|", "(", "(", "arr", "[", "idx", "+", "6", "]", "<<", "1", ")", "&", "(", "1", "<<", "1", ")", ")", "|", "(", "(", "arr", "[", "idx", "+", "7", "]", "<<", "0", ")", "&", "(", "1", "<<", "0", ")", ")", ")", "idx", "+=", "8", "return", "_np", ".", "array", "(", "bit_arr", ",", "dtype", "=", "'uint8'", ")" ]
31.875
16.3125
def is_valid_port(instance: int): """Validates data is a valid port""" if not isinstance(instance, (int, str)): return True return int(instance) in range(65535)
[ "def", "is_valid_port", "(", "instance", ":", "int", ")", ":", "if", "not", "isinstance", "(", "instance", ",", "(", "int", ",", "str", ")", ")", ":", "return", "True", "return", "int", "(", "instance", ")", "in", "range", "(", "65535", ")" ]
35.2
6.4
def generate_annotations_json_string(source_path, only_simple=False): # type: (str, bool) -> List[FunctionData] """Produce annotation data JSON file from a JSON file with runtime-collected types. Data formats: * The source JSON is a list of pyannotate_tools.annotations.parse.RawEntry items. * The output JSON is a list of FunctionData items. """ items = parse_json(source_path) results = [] for item in items: signature = unify_type_comments(item.type_comments) if is_signature_simple(signature) or not only_simple: data = { 'path': item.path, 'line': item.line, 'func_name': item.func_name, 'signature': signature, 'samples': item.samples } # type: FunctionData results.append(data) return results
[ "def", "generate_annotations_json_string", "(", "source_path", ",", "only_simple", "=", "False", ")", ":", "# type: (str, bool) -> List[FunctionData]", "items", "=", "parse_json", "(", "source_path", ")", "results", "=", "[", "]", "for", "item", "in", "items", ":", "signature", "=", "unify_type_comments", "(", "item", ".", "type_comments", ")", "if", "is_signature_simple", "(", "signature", ")", "or", "not", "only_simple", ":", "data", "=", "{", "'path'", ":", "item", ".", "path", ",", "'line'", ":", "item", ".", "line", ",", "'func_name'", ":", "item", ".", "func_name", ",", "'signature'", ":", "signature", ",", "'samples'", ":", "item", ".", "samples", "}", "# type: FunctionData", "results", ".", "append", "(", "data", ")", "return", "results" ]
37.043478
15.478261
def naive_grouped_rowwise_apply(data, group_labels, func, func_args=(), out=None): """ Simple implementation of grouped row-wise function application. Parameters ---------- data : ndarray[ndim=2] Input array over which to apply a grouped function. group_labels : ndarray[ndim=2, dtype=int64] Labels to use to bucket inputs from array. Should be the same shape as array. func : function[ndarray[ndim=1]] -> function[ndarray[ndim=1]] Function to apply to pieces of each row in array. func_args : tuple Additional positional arguments to provide to each row in array. out : ndarray, optional Array into which to write output. If not supplied, a new array of the same shape as ``data`` is allocated and returned. Examples -------- >>> data = np.array([[1., 2., 3.], ... [2., 3., 4.], ... [5., 6., 7.]]) >>> labels = np.array([[0, 0, 1], ... [0, 1, 0], ... [1, 0, 2]]) >>> naive_grouped_rowwise_apply(data, labels, lambda row: row - row.min()) array([[ 0., 1., 0.], [ 0., 0., 2.], [ 0., 0., 0.]]) >>> naive_grouped_rowwise_apply(data, labels, lambda row: row / row.sum()) array([[ 0.33333333, 0.66666667, 1. ], [ 0.33333333, 1. , 0.66666667], [ 1. , 1. , 1. ]]) """ if out is None: out = np.empty_like(data) for (row, label_row, out_row) in zip(data, group_labels, out): for label in np.unique(label_row): locs = (label_row == label) out_row[locs] = func(row[locs], *func_args) return out
[ "def", "naive_grouped_rowwise_apply", "(", "data", ",", "group_labels", ",", "func", ",", "func_args", "=", "(", ")", ",", "out", "=", "None", ")", ":", "if", "out", "is", "None", ":", "out", "=", "np", ".", "empty_like", "(", "data", ")", "for", "(", "row", ",", "label_row", ",", "out_row", ")", "in", "zip", "(", "data", ",", "group_labels", ",", "out", ")", ":", "for", "label", "in", "np", ".", "unique", "(", "label_row", ")", ":", "locs", "=", "(", "label_row", "==", "label", ")", "out_row", "[", "locs", "]", "=", "func", "(", "row", "[", "locs", "]", ",", "*", "func_args", ")", "return", "out" ]
38.0625
15.520833
def _conditions(self, full_path, environ): """Return Etag and Last-Modified values defaults to now for both.""" magic = self._match_magic(full_path) if magic is not None: return magic.conditions(full_path, environ) else: mtime = stat(full_path).st_mtime return str(mtime), rfc822.formatdate(mtime)
[ "def", "_conditions", "(", "self", ",", "full_path", ",", "environ", ")", ":", "magic", "=", "self", ".", "_match_magic", "(", "full_path", ")", "if", "magic", "is", "not", "None", ":", "return", "magic", ".", "conditions", "(", "full_path", ",", "environ", ")", "else", ":", "mtime", "=", "stat", "(", "full_path", ")", ".", "st_mtime", "return", "str", "(", "mtime", ")", ",", "rfc822", ".", "formatdate", "(", "mtime", ")" ]
44.75
9.75
def counter(self, name, description, labels=None, **kwargs): """ Use a Counter to track the total number of invocations of the method. :param name: the name of the metric :param description: the description of the metric :param labels: a dictionary of `{labelname: callable_or_value}` for labels :param kwargs: additional keyword arguments for creating the Counter """ return self._track( Counter, lambda metric, time: metric.inc(), kwargs, name, description, labels, registry=self.registry )
[ "def", "counter", "(", "self", ",", "name", ",", "description", ",", "labels", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_track", "(", "Counter", ",", "lambda", "metric", ",", "time", ":", "metric", ".", "inc", "(", ")", ",", "kwargs", ",", "name", ",", "description", ",", "labels", ",", "registry", "=", "self", ".", "registry", ")" ]
37.4375
19.8125
def cudnnGetConvolutionForwardWorkspaceSize(handle, srcDesc, wDesc, convDesc, destDesc, algo): """" This function returns the amount of GPU memory workspace the user needs to allocate to be able to call cudnnConvolutionForward with the specified algorithm. Parameters ---------- handle : cudnnHandle Handle to a previously created cuDNN context. srcDesc : cudnnTensorDescriptor Handle to a previously initialized tensor descriptor. wDesc : cudnnFilterDescriptor Handle to a previously initialized filter descriptor. convDesc : cudnnConvolutionDescriptor Previously initialized convolution descriptor. destDesc : cudnnTensorDescriptor Handle to a previously initialized tensor descriptor. algo : cudnnConvolutionFwdAlgo Enumerant that specifies the chosen convolution algorithm. Returns ------- sizeInBytes: c_size_t Amount of GPU memory needed as workspace to be able to execute a forward convolution with the sepcified algo. """ sizeInBytes = ctypes.c_size_t() status = _libcudnn.cudnnGetConvolutionForwardWorkspaceSize(handle, srcDesc, wDesc, convDesc, destDesc, algo, ctypes.byref(sizeInBytes)) cudnnCheckStatus(status) return sizeInBytes
[ "def", "cudnnGetConvolutionForwardWorkspaceSize", "(", "handle", ",", "srcDesc", ",", "wDesc", ",", "convDesc", ",", "destDesc", ",", "algo", ")", ":", "sizeInBytes", "=", "ctypes", ".", "c_size_t", "(", ")", "status", "=", "_libcudnn", ".", "cudnnGetConvolutionForwardWorkspaceSize", "(", "handle", ",", "srcDesc", ",", "wDesc", ",", "convDesc", ",", "destDesc", ",", "algo", ",", "ctypes", ".", "byref", "(", "sizeInBytes", ")", ")", "cudnnCheckStatus", "(", "status", ")", "return", "sizeInBytes" ]
39.685714
22
def get_source(self, index, doc_type, id, **query_params): """ Get the _source of the document `<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html>`_ :param index: the index name :param doc_type: the document type :param id: the id of the doc type :arg _source: True or false to return the _source field or not, or a list of fields to return :arg _source_exclude: A list of fields to exclude from the returned _source field :arg _source_include: A list of fields to extract and return from the _source field :arg parent: The ID of the parent document :arg preference: Specify the node or shard the operation should be performed on (default: random) :arg realtime: Specify whether to perform the operation in realtime or search mode :arg refresh: Refresh the shard containing the document before performing the operation :arg routing: Specific routing value :arg version: Explicit version number for concurrency control :arg version_type: Specific version type, valid choices are: 'internal', 'external', 'external_gte', 'force' """ self._es_parser.is_not_empty_params(index, doc_type, id) path = self._es_parser.make_path(index, doc_type, id, EsMethods.SOURCE) result = yield self._perform_request(HttpMethod.GET, path, params=query_params) returnValue(result)
[ "def", "get_source", "(", "self", ",", "index", ",", "doc_type", ",", "id", ",", "*", "*", "query_params", ")", ":", "self", ".", "_es_parser", ".", "is_not_empty_params", "(", "index", ",", "doc_type", ",", "id", ")", "path", "=", "self", ".", "_es_parser", ".", "make_path", "(", "index", ",", "doc_type", ",", "id", ",", "EsMethods", ".", "SOURCE", ")", "result", "=", "yield", "self", ".", "_perform_request", "(", "HttpMethod", ".", "GET", ",", "path", ",", "params", "=", "query_params", ")", "returnValue", "(", "result", ")" ]
51.580645
18.16129
def readQword(self): """ Reads a qword value from the L{ReadData} stream object. @rtype: int @return: The qword value read from the L{ReadData} stream. """ qword = unpack(self.endianness + ('Q' if not self.signed else 'b'), self.readAt(self.offset, 8))[0] self.offset += 8 return qword
[ "def", "readQword", "(", "self", ")", ":", "qword", "=", "unpack", "(", "self", ".", "endianness", "+", "(", "'Q'", "if", "not", "self", ".", "signed", "else", "'b'", ")", ",", "self", ".", "readAt", "(", "self", ".", "offset", ",", "8", ")", ")", "[", "0", "]", "self", ".", "offset", "+=", "8", "return", "qword" ]
35
22.6
def is_running(self) -> bool: """Return True if ffmpeg is running.""" if self._proc is None or self._proc.returncode is not None: return False return True
[ "def", "is_running", "(", "self", ")", "->", "bool", ":", "if", "self", ".", "_proc", "is", "None", "or", "self", ".", "_proc", ".", "returncode", "is", "not", "None", ":", "return", "False", "return", "True" ]
37.2
15
def local_replace(self, dt, use_dst=True, _recurse=False, **kwds): """Return pywws timestamp (utc, no tzinfo) for the most recent local time before the pywws timestamp dt, with datetime replace applied. """ local_time = dt + self.standard_offset if use_dst: dst_offset = self.dst(local_time) if dst_offset: local_time += dst_offset adjusted_time = local_time.replace(**kwds) if adjusted_time > local_time and not _recurse: return self.local_replace( dt - DAY, use_dst=use_dst, _recurse=True, **kwds) adjusted_time -= dst_offset if self.dst(adjusted_time): return adjusted_time - self.standard_offset adjusted_time = local_time.replace(**kwds) if use_dst: dst_offset = self.dst(adjusted_time) adjusted_time -= dst_offset if adjusted_time > local_time and not _recurse: return self.local_replace( dt - DAY, use_dst=use_dst, _recurse=True, **kwds) return adjusted_time - self.standard_offset
[ "def", "local_replace", "(", "self", ",", "dt", ",", "use_dst", "=", "True", ",", "_recurse", "=", "False", ",", "*", "*", "kwds", ")", ":", "local_time", "=", "dt", "+", "self", ".", "standard_offset", "if", "use_dst", ":", "dst_offset", "=", "self", ".", "dst", "(", "local_time", ")", "if", "dst_offset", ":", "local_time", "+=", "dst_offset", "adjusted_time", "=", "local_time", ".", "replace", "(", "*", "*", "kwds", ")", "if", "adjusted_time", ">", "local_time", "and", "not", "_recurse", ":", "return", "self", ".", "local_replace", "(", "dt", "-", "DAY", ",", "use_dst", "=", "use_dst", ",", "_recurse", "=", "True", ",", "*", "*", "kwds", ")", "adjusted_time", "-=", "dst_offset", "if", "self", ".", "dst", "(", "adjusted_time", ")", ":", "return", "adjusted_time", "-", "self", ".", "standard_offset", "adjusted_time", "=", "local_time", ".", "replace", "(", "*", "*", "kwds", ")", "if", "use_dst", ":", "dst_offset", "=", "self", ".", "dst", "(", "adjusted_time", ")", "adjusted_time", "-=", "dst_offset", "if", "adjusted_time", ">", "local_time", "and", "not", "_recurse", ":", "return", "self", ".", "local_replace", "(", "dt", "-", "DAY", ",", "use_dst", "=", "use_dst", ",", "_recurse", "=", "True", ",", "*", "*", "kwds", ")", "return", "adjusted_time", "-", "self", ".", "standard_offset" ]
44.769231
14.192308
def load(trajfiles, features=None, top=None, stride=1, chunksize=None, **kw): r""" Loads coordinate features into memory. If your memory is not big enough consider the use of **pipeline**, or use the stride option to subsample the data. Parameters ---------- trajfiles : str, list of str or nested list (one level) of str A filename or a list of filenames to trajectory files that can be processed by pyemma. Both molecular dynamics trajectory files and raw data files (tabulated ASCII or binary) can be loaded. If a nested list of filenames is given, eg.: [['traj1_0.xtc', 'traj1_1.xtc'], 'traj2_full.xtc'], ['traj3_0.xtc, ...]] the grouped fragments will be treated as a joint trajectory. When molecular dynamics trajectory files are loaded either a featurizer must be specified (for reading specific quantities such as distances or dihedrals), or a topology file (in that case only Cartesian coordinates will be read). In the latter case, the resulting feature vectors will have length 3N for each trajectory frame, with N being the number of atoms and (x1, y1, z1, x2, y2, z2, ...) being the sequence of coordinates in the vector. Molecular dynamics trajectory files are loaded through mdtraj (http://mdtraj.org/latest/), and can possess any of the mdtraj-compatible trajectory formats including: * CHARMM/NAMD (.dcd) * Gromacs (.xtc) * Gromacs (.trr) * AMBER (.binpos) * AMBER (.netcdf) * PDB trajectory format (.pdb) * TINKER (.arc), * MDTRAJ (.hdf5) * LAMMPS trajectory format (.lammpstrj) Raw data can be in the following format: * tabulated ASCII (.dat, .txt) * binary python (.npy, .npz) features : MDFeaturizer, optional, default = None a featurizer object specifying how molecular dynamics files should be read (e.g. intramolecular distances, angles, dihedrals, etc). top : str, mdtraj.Trajectory or mdtraj.Topology, optional, default = None A molecular topology file, e.g. in PDB (.pdb) format or an already loaded mdtraj.Topology object. If it is an mdtraj.Trajectory object, the topology will be extracted from it. stride : int, optional, default = 1 Load only every stride'th frame. By default, every frame is loaded chunksize: int, default=None Number of data frames to process at once. Choose a higher value here, to optimize thread usage and gain processing speed. If None is passed, use the default value of the underlying reader/data source. Choose zero to disable chunking at all. Returns ------- data : ndarray or list of ndarray If a single filename was given as an input (and unless the format is .npz), the return will be a single ndarray of size (T, d), where T is the number of time steps in the trajectory and d is the number of features (coordinates, observables). When reading from molecular dynamics data without a specific featurizer, each feature vector will have size d=3N and will hold the Cartesian coordinates in the sequence (x1, y1, z1, x2, y2, z2, ...). If multiple filenames were given, or if the file is a .npz holding multiple arrays, the result is a list of appropriately shaped arrays See also -------- :func:`pyemma.coordinates.source` if your memory is not big enough, specify data source and put it into your transformation or clustering algorithms instead of the loaded data. This will stream the data and save memory on the cost of longer processing times. Examples -------- >>> from pyemma.coordinates import load >>> files = ['traj01.xtc', 'traj02.xtc'] # doctest: +SKIP >>> output = load(files, top='my_structure.pdb') # doctest: +SKIP """ from pyemma.coordinates.data.util.reader_utils import create_file_reader from pyemma.util.reflection import get_default_args cs = _check_old_chunksize_arg(chunksize, get_default_args(load)['chunksize'], **kw) if isinstance(trajfiles, _string_types) or ( isinstance(trajfiles, (list, tuple)) and (any(isinstance(item, (list, tuple, str)) for item in trajfiles) or len(trajfiles) is 0)): reader = create_file_reader(trajfiles, top, features, chunksize=cs, **kw) trajs = reader.get_output(stride=stride) if len(trajs) == 1: return trajs[0] else: return trajs else: raise ValueError('unsupported type (%s) of input' % type(trajfiles))
[ "def", "load", "(", "trajfiles", ",", "features", "=", "None", ",", "top", "=", "None", ",", "stride", "=", "1", ",", "chunksize", "=", "None", ",", "*", "*", "kw", ")", ":", "from", "pyemma", ".", "coordinates", ".", "data", ".", "util", ".", "reader_utils", "import", "create_file_reader", "from", "pyemma", ".", "util", ".", "reflection", "import", "get_default_args", "cs", "=", "_check_old_chunksize_arg", "(", "chunksize", ",", "get_default_args", "(", "load", ")", "[", "'chunksize'", "]", ",", "*", "*", "kw", ")", "if", "isinstance", "(", "trajfiles", ",", "_string_types", ")", "or", "(", "isinstance", "(", "trajfiles", ",", "(", "list", ",", "tuple", ")", ")", "and", "(", "any", "(", "isinstance", "(", "item", ",", "(", "list", ",", "tuple", ",", "str", ")", ")", "for", "item", "in", "trajfiles", ")", "or", "len", "(", "trajfiles", ")", "is", "0", ")", ")", ":", "reader", "=", "create_file_reader", "(", "trajfiles", ",", "top", ",", "features", ",", "chunksize", "=", "cs", ",", "*", "*", "kw", ")", "trajs", "=", "reader", ".", "get_output", "(", "stride", "=", "stride", ")", "if", "len", "(", "trajs", ")", "==", "1", ":", "return", "trajs", "[", "0", "]", "else", ":", "return", "trajs", "else", ":", "raise", "ValueError", "(", "'unsupported type (%s) of input'", "%", "type", "(", "trajfiles", ")", ")" ]
44.245283
26.622642
def convert_simple_rnn(builder, layer, input_names, output_names, keras_layer): """Convert an SimpleRNN layer from keras to coreml. Parameters ---------- keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ # Get input and output names hidden_size = keras_layer.output_dim input_size = keras_layer.input_shape[-1] output_all = keras_layer.return_sequences reverse_input = keras_layer.go_backwards if keras_layer.consume_less not in ['cpu', 'gpu']: raise ValueError('Cannot convert Keras layer with consume_less = %s' % keras_layer.consume_less) W_h = np.zeros((hidden_size, hidden_size)) W_x = np.zeros((hidden_size, input_size)) b = np.zeros((hidden_size,)) if keras_layer.consume_less == 'cpu': W_h = keras_layer.get_weights()[1].T W_x = keras_layer.get_weights()[0].T b = keras_layer.get_weights()[2] else: W_h = keras_layer.get_weights()[1].T W_x = keras_layer.get_weights()[0].T b = keras_layer.get_weights()[2] # Set actication type activation_str = _get_recurrent_activation_name_from_keras(keras_layer.activation) # Add to the network builder.add_simple_rnn( name = layer, W_h = W_h, W_x = W_x, b = b, hidden_size = hidden_size, input_size = input_size, activation = activation_str, input_names = input_names, output_names = output_names, output_all=output_all, reverse_input=reverse_input)
[ "def", "convert_simple_rnn", "(", "builder", ",", "layer", ",", "input_names", ",", "output_names", ",", "keras_layer", ")", ":", "# Get input and output names", "hidden_size", "=", "keras_layer", ".", "output_dim", "input_size", "=", "keras_layer", ".", "input_shape", "[", "-", "1", "]", "output_all", "=", "keras_layer", ".", "return_sequences", "reverse_input", "=", "keras_layer", ".", "go_backwards", "if", "keras_layer", ".", "consume_less", "not", "in", "[", "'cpu'", ",", "'gpu'", "]", ":", "raise", "ValueError", "(", "'Cannot convert Keras layer with consume_less = %s'", "%", "keras_layer", ".", "consume_less", ")", "W_h", "=", "np", ".", "zeros", "(", "(", "hidden_size", ",", "hidden_size", ")", ")", "W_x", "=", "np", ".", "zeros", "(", "(", "hidden_size", ",", "input_size", ")", ")", "b", "=", "np", ".", "zeros", "(", "(", "hidden_size", ",", ")", ")", "if", "keras_layer", ".", "consume_less", "==", "'cpu'", ":", "W_h", "=", "keras_layer", ".", "get_weights", "(", ")", "[", "1", "]", ".", "T", "W_x", "=", "keras_layer", ".", "get_weights", "(", ")", "[", "0", "]", ".", "T", "b", "=", "keras_layer", ".", "get_weights", "(", ")", "[", "2", "]", "else", ":", "W_h", "=", "keras_layer", ".", "get_weights", "(", ")", "[", "1", "]", ".", "T", "W_x", "=", "keras_layer", ".", "get_weights", "(", ")", "[", "0", "]", ".", "T", "b", "=", "keras_layer", ".", "get_weights", "(", ")", "[", "2", "]", "# Set actication type", "activation_str", "=", "_get_recurrent_activation_name_from_keras", "(", "keras_layer", ".", "activation", ")", "# Add to the network", "builder", ".", "add_simple_rnn", "(", "name", "=", "layer", ",", "W_h", "=", "W_h", ",", "W_x", "=", "W_x", ",", "b", "=", "b", ",", "hidden_size", "=", "hidden_size", ",", "input_size", "=", "input_size", ",", "activation", "=", "activation_str", ",", "input_names", "=", "input_names", ",", "output_names", "=", "output_names", ",", "output_all", "=", "output_all", ",", "reverse_input", "=", "reverse_input", ")" ]
32.125
16
def write_url (self, url_data): """Write url_data.base_url.""" self.writeln(u"<tr>") self.writeln(u'<td class="url">%s</td>' % self.part("url")) self.write(u'<td class="url">') self.write(u"`%s'" % cgi.escape(url_data.base_url)) self.writeln(u"</td></tr>")
[ "def", "write_url", "(", "self", ",", "url_data", ")", ":", "self", ".", "writeln", "(", "u\"<tr>\"", ")", "self", ".", "writeln", "(", "u'<td class=\"url\">%s</td>'", "%", "self", ".", "part", "(", "\"url\"", ")", ")", "self", ".", "write", "(", "u'<td class=\"url\">'", ")", "self", ".", "write", "(", "u\"`%s'\"", "%", "cgi", ".", "escape", "(", "url_data", ".", "base_url", ")", ")", "self", ".", "writeln", "(", "u\"</td></tr>\"", ")" ]
42.571429
10.285714
def check_case(institute_id, case_name): """Mark a case that is has been checked. This means to set case['needs_check'] to False """ institute_obj, case_obj = institute_and_case(store, institute_id, case_name) store.case_collection.find_one_and_update({'_id':case_obj['_id']}, {'$set': {'needs_check': False}}) return redirect(request.referrer)
[ "def", "check_case", "(", "institute_id", ",", "case_name", ")", ":", "institute_obj", ",", "case_obj", "=", "institute_and_case", "(", "store", ",", "institute_id", ",", "case_name", ")", "store", ".", "case_collection", ".", "find_one_and_update", "(", "{", "'_id'", ":", "case_obj", "[", "'_id'", "]", "}", ",", "{", "'$set'", ":", "{", "'needs_check'", ":", "False", "}", "}", ")", "return", "redirect", "(", "request", ".", "referrer", ")" ]
52.142857
17.142857
def acm_certificate_arn(self, lookup, default=None): """ Args: lookup: region/domain on the certificate to be looked up default: the optional value to return if lookup failed; returns None if not set Returns: ARN of a certificate with status "Issued" for the region/domain, if found, or default/None if no match If more than one "Issued" certificate matches the region/domain: - if any matching cert was issued by Amazon, returns ARN of certificate with most recent IssuedAt timestamp - if no certs were issued by Amazon, returns ARN of an arbitrary matching certificate - certificates issued by Amazon take precedence over certificates not issued by Amazon """ # @todo: Only searches the first 100 certificates in the account try: # This a region-specific client, so we'll make a new client in the right place using existing SESSION region_name, domain_name = lookup.split("/") acm_client = EFAwsResolver.__CLIENTS["SESSION"].client(service_name="acm", region_name=region_name) response = acm_client.list_certificates( CertificateStatuses=['ISSUED'], MaxItems=100 ) except Exception: return default # No certificates if len(response["CertificateSummaryList"]) < 1: return default # One or more certificates - find cert with latest IssuedAt date or an arbitrary cert if none are dated best_match_cert = None for cert_handle in response["CertificateSummaryList"]: if cert_handle["DomainName"] == domain_name: cert = acm_client.describe_certificate(CertificateArn=cert_handle["CertificateArn"])["Certificate"] # Patch up cert if there is no IssuedAt (i.e. cert was not issued by Amazon) if not cert.has_key("IssuedAt"): cert[u"IssuedAt"] = datetime.datetime(1970, 1, 1, 0, 0) if best_match_cert is None: best_match_cert = cert elif cert["IssuedAt"] > best_match_cert["IssuedAt"]: best_match_cert = cert if best_match_cert is not None: return best_match_cert["CertificateArn"] return default
[ "def", "acm_certificate_arn", "(", "self", ",", "lookup", ",", "default", "=", "None", ")", ":", "# @todo: Only searches the first 100 certificates in the account", "try", ":", "# This a region-specific client, so we'll make a new client in the right place using existing SESSION", "region_name", ",", "domain_name", "=", "lookup", ".", "split", "(", "\"/\"", ")", "acm_client", "=", "EFAwsResolver", ".", "__CLIENTS", "[", "\"SESSION\"", "]", ".", "client", "(", "service_name", "=", "\"acm\"", ",", "region_name", "=", "region_name", ")", "response", "=", "acm_client", ".", "list_certificates", "(", "CertificateStatuses", "=", "[", "'ISSUED'", "]", ",", "MaxItems", "=", "100", ")", "except", "Exception", ":", "return", "default", "# No certificates", "if", "len", "(", "response", "[", "\"CertificateSummaryList\"", "]", ")", "<", "1", ":", "return", "default", "# One or more certificates - find cert with latest IssuedAt date or an arbitrary cert if none are dated", "best_match_cert", "=", "None", "for", "cert_handle", "in", "response", "[", "\"CertificateSummaryList\"", "]", ":", "if", "cert_handle", "[", "\"DomainName\"", "]", "==", "domain_name", ":", "cert", "=", "acm_client", ".", "describe_certificate", "(", "CertificateArn", "=", "cert_handle", "[", "\"CertificateArn\"", "]", ")", "[", "\"Certificate\"", "]", "# Patch up cert if there is no IssuedAt (i.e. cert was not issued by Amazon)", "if", "not", "cert", ".", "has_key", "(", "\"IssuedAt\"", ")", ":", "cert", "[", "u\"IssuedAt\"", "]", "=", "datetime", ".", "datetime", "(", "1970", ",", "1", ",", "1", ",", "0", ",", "0", ")", "if", "best_match_cert", "is", "None", ":", "best_match_cert", "=", "cert", "elif", "cert", "[", "\"IssuedAt\"", "]", ">", "best_match_cert", "[", "\"IssuedAt\"", "]", ":", "best_match_cert", "=", "cert", "if", "best_match_cert", "is", "not", "None", ":", "return", "best_match_cert", "[", "\"CertificateArn\"", "]", "return", "default" ]
49.809524
26.857143
def pinc(lat): """ calculate paleoinclination from latitude using dipole formula: tan(I) = 2tan(lat) Parameters ________________ lat : either a single value or an array of latitudes Returns ------- array of inclinations """ tanl = np.tan(np.radians(lat)) inc = np.arctan(2. * tanl) return np.degrees(inc)
[ "def", "pinc", "(", "lat", ")", ":", "tanl", "=", "np", ".", "tan", "(", "np", ".", "radians", "(", "lat", ")", ")", "inc", "=", "np", ".", "arctan", "(", "2.", "*", "tanl", ")", "return", "np", ".", "degrees", "(", "inc", ")" ]
21.25
22.25
def default_52xhandler(response, resource, url, params): """ Default 52x handler that loops every second until a non 52x response is received. :param response: The response of the last executed api request. :param resource: The resource of the last executed api request. :param url: The url of the last executed api request sans encoded query parameters. :param params: The query params of the last executed api request in dictionary format. """ time.sleep(1) return resource.execute(url, params)
[ "def", "default_52xhandler", "(", "response", ",", "resource", ",", "url", ",", "params", ")", ":", "time", ".", "sleep", "(", "1", ")", "return", "resource", ".", "execute", "(", "url", ",", "params", ")" ]
52.3
23.5
def _infodict(cls, name): """load the info dictionary for the given name""" info = cls._dataset_info.get(name, None) if info is None: raise ValueError('No such dataset {0} exists, ' 'use list_datasets() to get a list ' 'of available datasets.'.format(name)) return info
[ "def", "_infodict", "(", "cls", ",", "name", ")", ":", "info", "=", "cls", ".", "_dataset_info", ".", "get", "(", "name", ",", "None", ")", "if", "info", "is", "None", ":", "raise", "ValueError", "(", "'No such dataset {0} exists, '", "'use list_datasets() to get a list '", "'of available datasets.'", ".", "format", "(", "name", ")", ")", "return", "info" ]
45.5
16.375
def extract_locals(trcback): """ Extracts the frames locals of given traceback. :param trcback: Traceback. :type trcback: Traceback :return: Frames locals. :rtype: list """ output = [] stack = extract_stack(get_inner_most_frame(trcback)) for frame, file_name, line_number, name, context, index in stack: args_names, nameless, keyword = extract_arguments(frame) arguments, nameless_args, keyword_args, locals = OrderedDict(), [], {}, {} for key, data in frame.f_locals.iteritems(): if key == nameless: nameless_args = map(repr, frame.f_locals.get(nameless, ())) elif key == keyword: keyword_args = dict((arg, repr(value)) for arg, value in frame.f_locals.get(keyword, {}).iteritems()) elif key in args_names: arguments[key] = repr(data) else: locals[key] = repr(data) output.append(((name, file_name, line_number), (arguments, nameless_args, keyword_args, locals))) return output
[ "def", "extract_locals", "(", "trcback", ")", ":", "output", "=", "[", "]", "stack", "=", "extract_stack", "(", "get_inner_most_frame", "(", "trcback", ")", ")", "for", "frame", ",", "file_name", ",", "line_number", ",", "name", ",", "context", ",", "index", "in", "stack", ":", "args_names", ",", "nameless", ",", "keyword", "=", "extract_arguments", "(", "frame", ")", "arguments", ",", "nameless_args", ",", "keyword_args", ",", "locals", "=", "OrderedDict", "(", ")", ",", "[", "]", ",", "{", "}", ",", "{", "}", "for", "key", ",", "data", "in", "frame", ".", "f_locals", ".", "iteritems", "(", ")", ":", "if", "key", "==", "nameless", ":", "nameless_args", "=", "map", "(", "repr", ",", "frame", ".", "f_locals", ".", "get", "(", "nameless", ",", "(", ")", ")", ")", "elif", "key", "==", "keyword", ":", "keyword_args", "=", "dict", "(", "(", "arg", ",", "repr", "(", "value", ")", ")", "for", "arg", ",", "value", "in", "frame", ".", "f_locals", ".", "get", "(", "keyword", ",", "{", "}", ")", ".", "iteritems", "(", ")", ")", "elif", "key", "in", "args_names", ":", "arguments", "[", "key", "]", "=", "repr", "(", "data", ")", "else", ":", "locals", "[", "key", "]", "=", "repr", "(", "data", ")", "output", ".", "append", "(", "(", "(", "name", ",", "file_name", ",", "line_number", ")", ",", "(", "arguments", ",", "nameless_args", ",", "keyword_args", ",", "locals", ")", ")", ")", "return", "output" ]
40.115385
21.423077
def connect(self): """ Returns a new socket connection to this server. """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(("127.0.0.1", self._port)) return sock
[ "def", "connect", "(", "self", ")", ":", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "sock", ".", "connect", "(", "(", "\"127.0.0.1\"", ",", "self", ".", "_port", ")", ")", "return", "sock" ]
39
14
def get_process_pids(self, process): """Returns PIDs of all processes with process name. If the process doesn't exist, returns an empty list""" pids = [] cmd_line_glob = "/proc/[0-9]*/cmdline" cmd_line_paths = glob.glob(cmd_line_glob) for path in cmd_line_paths: try: with open(path, 'r') as f: cmd_line = f.read().strip() if process in cmd_line: pids.append(path.split("/")[2]) except IOError as e: continue return pids
[ "def", "get_process_pids", "(", "self", ",", "process", ")", ":", "pids", "=", "[", "]", "cmd_line_glob", "=", "\"/proc/[0-9]*/cmdline\"", "cmd_line_paths", "=", "glob", ".", "glob", "(", "cmd_line_glob", ")", "for", "path", "in", "cmd_line_paths", ":", "try", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "cmd_line", "=", "f", ".", "read", "(", ")", ".", "strip", "(", ")", "if", "process", "in", "cmd_line", ":", "pids", ".", "append", "(", "path", ".", "split", "(", "\"/\"", ")", "[", "2", "]", ")", "except", "IOError", "as", "e", ":", "continue", "return", "pids" ]
38.8
9.533333
async def _handle_conversation_delta(self, conversation): """Receive Conversation delta and create or update the conversation. Args: conversation: hangouts_pb2.Conversation instance Raises: NetworkError: A request to fetch the complete conversation failed. """ conv_id = conversation.conversation_id.id conv = self._conv_dict.get(conv_id, None) if conv is None: # Ignore the delta and fetch the complete conversation. await self._get_or_fetch_conversation(conv_id) else: # Update conversation using the delta. conv.update_conversation(conversation)
[ "async", "def", "_handle_conversation_delta", "(", "self", ",", "conversation", ")", ":", "conv_id", "=", "conversation", ".", "conversation_id", ".", "id", "conv", "=", "self", ".", "_conv_dict", ".", "get", "(", "conv_id", ",", "None", ")", "if", "conv", "is", "None", ":", "# Ignore the delta and fetch the complete conversation.", "await", "self", ".", "_get_or_fetch_conversation", "(", "conv_id", ")", "else", ":", "# Update conversation using the delta.", "conv", ".", "update_conversation", "(", "conversation", ")" ]
39.411765
19.588235
def install_hook(dialog=SimpleExceptionDialog, invoke_old_hook=False, **extra): """ install the configured exception hook wrapping the old exception hook don't use it twice :oparam dialog: a different exception dialog class :oparam invoke_old_hook: should we invoke the old exception hook? """ global _old_hook assert _old_hook is None def new_hook(etype, eval, trace): gobject.idle_add(dialog_handler, dialog, etype, eval, trace, extra) if invoke_old_hook: _old_hook(etype, eval, trace) _old_hook = sys.excepthook sys.excepthook = new_hook
[ "def", "install_hook", "(", "dialog", "=", "SimpleExceptionDialog", ",", "invoke_old_hook", "=", "False", ",", "*", "*", "extra", ")", ":", "global", "_old_hook", "assert", "_old_hook", "is", "None", "def", "new_hook", "(", "etype", ",", "eval", ",", "trace", ")", ":", "gobject", ".", "idle_add", "(", "dialog_handler", ",", "dialog", ",", "etype", ",", "eval", ",", "trace", ",", "extra", ")", "if", "invoke_old_hook", ":", "_old_hook", "(", "etype", ",", "eval", ",", "trace", ")", "_old_hook", "=", "sys", ".", "excepthook", "sys", ".", "excepthook", "=", "new_hook" ]
31.473684
20.947368
def polygon(self, *coords, color="black", outline=False, outline_color="black"): """ Draws a polygon from an list of co-ordinates :param int *coords: Pairs of x and y positions which make up the polygon. :param str color: The color of the shape. Defaults to `"black"`. :param int outline: `0` or `False` is no outline. `True` or value > 1 sets an outline. Defaults to `False`. :param str outline_color: The color of the outline. Defaults to `"black"`. :return: The id of the shape. """ return self.tk.create_polygon( *coords, outline = utils.convert_color(outline_color) if outline else "", width = int(outline), fill = "" if color is None else utils.convert_color(color) )
[ "def", "polygon", "(", "self", ",", "*", "coords", ",", "color", "=", "\"black\"", ",", "outline", "=", "False", ",", "outline_color", "=", "\"black\"", ")", ":", "return", "self", ".", "tk", ".", "create_polygon", "(", "*", "coords", ",", "outline", "=", "utils", ".", "convert_color", "(", "outline_color", ")", "if", "outline", "else", "\"\"", ",", "width", "=", "int", "(", "outline", ")", ",", "fill", "=", "\"\"", "if", "color", "is", "None", "else", "utils", ".", "convert_color", "(", "color", ")", ")" ]
33.88
23
def set_bucket_notification(self, bucket_name, notifications): """ Set the given notifications on the bucket. :param bucket_name: Bucket name. :param notifications: Notifications structure """ is_valid_bucket_name(bucket_name) is_valid_bucket_notification_config(notifications) content = xml_marshal_bucket_notifications(notifications) headers = { 'Content-Length': str(len(content)), 'Content-Md5': get_md5_base64digest(content) } content_sha256_hex = get_sha256_hexdigest(content) self._url_open( 'PUT', bucket_name=bucket_name, query={"notification": ""}, headers=headers, body=content, content_sha256=content_sha256_hex )
[ "def", "set_bucket_notification", "(", "self", ",", "bucket_name", ",", "notifications", ")", ":", "is_valid_bucket_name", "(", "bucket_name", ")", "is_valid_bucket_notification_config", "(", "notifications", ")", "content", "=", "xml_marshal_bucket_notifications", "(", "notifications", ")", "headers", "=", "{", "'Content-Length'", ":", "str", "(", "len", "(", "content", ")", ")", ",", "'Content-Md5'", ":", "get_md5_base64digest", "(", "content", ")", "}", "content_sha256_hex", "=", "get_sha256_hexdigest", "(", "content", ")", "self", ".", "_url_open", "(", "'PUT'", ",", "bucket_name", "=", "bucket_name", ",", "query", "=", "{", "\"notification\"", ":", "\"\"", "}", ",", "headers", "=", "headers", ",", "body", "=", "content", ",", "content_sha256", "=", "content_sha256_hex", ")" ]
33.5
15.416667
def create_secondary_zone(self, account_name, zone_name, master, tsig_key=None, key_value=None): """Creates a new secondary zone. Arguments: account_name -- The name of the account. zone_name -- The name of the zone. master -- Primary name server IP address. Keyword Arguments: tsig_key -- For TSIG-enabled zones: The transaction signature key. NOTE: Requires key_value. key_value -- TSIG key secret. """ zone_properties = {"name": zone_name, "accountName": account_name, "type": "SECONDARY"} if tsig_key is not None and key_value is not None: name_server_info = {"ip": master, "tsigKey": tsig_key, "tsigKeyValue": key_value} else: name_server_info = {"ip": master} name_server_ip_1 = {"nameServerIp1": name_server_info} name_server_ip_list = {"nameServerIpList": name_server_ip_1} secondary_zone_info = {"primaryNameServers": name_server_ip_list} zone_data = {"properties": zone_properties, "secondaryCreateInfo": secondary_zone_info} return self.rest_api_connection.post("/v1/zones", json.dumps(zone_data))
[ "def", "create_secondary_zone", "(", "self", ",", "account_name", ",", "zone_name", ",", "master", ",", "tsig_key", "=", "None", ",", "key_value", "=", "None", ")", ":", "zone_properties", "=", "{", "\"name\"", ":", "zone_name", ",", "\"accountName\"", ":", "account_name", ",", "\"type\"", ":", "\"SECONDARY\"", "}", "if", "tsig_key", "is", "not", "None", "and", "key_value", "is", "not", "None", ":", "name_server_info", "=", "{", "\"ip\"", ":", "master", ",", "\"tsigKey\"", ":", "tsig_key", ",", "\"tsigKeyValue\"", ":", "key_value", "}", "else", ":", "name_server_info", "=", "{", "\"ip\"", ":", "master", "}", "name_server_ip_1", "=", "{", "\"nameServerIp1\"", ":", "name_server_info", "}", "name_server_ip_list", "=", "{", "\"nameServerIpList\"", ":", "name_server_ip_1", "}", "secondary_zone_info", "=", "{", "\"primaryNameServers\"", ":", "name_server_ip_list", "}", "zone_data", "=", "{", "\"properties\"", ":", "zone_properties", ",", "\"secondaryCreateInfo\"", ":", "secondary_zone_info", "}", "return", "self", ".", "rest_api_connection", ".", "post", "(", "\"/v1/zones\"", ",", "json", ".", "dumps", "(", "zone_data", ")", ")" ]
48.666667
25.375
def get_group_headers(self, table_name, group_name): """ Return a list of all headers for a given group """ # get all headers of a particular group df = self.dm[table_name] cond = df['group'] == group_name return df[cond].index
[ "def", "get_group_headers", "(", "self", ",", "table_name", ",", "group_name", ")", ":", "# get all headers of a particular group", "df", "=", "self", ".", "dm", "[", "table_name", "]", "cond", "=", "df", "[", "'group'", "]", "==", "group_name", "return", "df", "[", "cond", "]", ".", "index" ]
34.5
6.5
def setValue(self, value): """setter function to _lineEdit.text. Sets minimum/maximum as new value if value is out of bounds. Args: value (int/long): new value to set. Returns True if all went fine. """ if value >= self.minimum() and value <= self.maximum(): self._lineEdit.setText(str(value)) elif value < self.minimum(): self._lineEdit.setText(str(self.minimum())) elif value > self.maximum(): self._lineEdit.setText(str(self.maximum())) return True
[ "def", "setValue", "(", "self", ",", "value", ")", ":", "if", "value", ">=", "self", ".", "minimum", "(", ")", "and", "value", "<=", "self", ".", "maximum", "(", ")", ":", "self", ".", "_lineEdit", ".", "setText", "(", "str", "(", "value", ")", ")", "elif", "value", "<", "self", ".", "minimum", "(", ")", ":", "self", ".", "_lineEdit", ".", "setText", "(", "str", "(", "self", ".", "minimum", "(", ")", ")", ")", "elif", "value", ">", "self", ".", "maximum", "(", ")", ":", "self", ".", "_lineEdit", ".", "setText", "(", "str", "(", "self", ".", "maximum", "(", ")", ")", ")", "return", "True" ]
35.1875
15.4375
def reset_sequence(app_label): """ Reset the primary key sequence for the tables in an application. This is necessary if any local edits have happened to the table. """ puts("Resetting primary key sequence for {0}".format(app_label)) cursor = connection.cursor() cmd = get_reset_command(app_label) cursor.execute(cmd)
[ "def", "reset_sequence", "(", "app_label", ")", ":", "puts", "(", "\"Resetting primary key sequence for {0}\"", ".", "format", "(", "app_label", ")", ")", "cursor", "=", "connection", ".", "cursor", "(", ")", "cmd", "=", "get_reset_command", "(", "app_label", ")", "cursor", ".", "execute", "(", "cmd", ")" ]
31
18.272727
def p_simple_list(p): '''simple_list : simple_list1 | simple_list1 AMPERSAND | simple_list1 SEMICOLON''' tok = p.lexer heredoc.gatherheredocuments(tok) if len(p) == 3 or len(p[1]) > 1: parts = p[1] if len(p) == 3: parts.append(ast.node(kind='operator', op=p[2], pos=p.lexspan(2))) p[0] = ast.node(kind='list', parts=parts, pos=_partsspan(parts)) else: assert len(p[1]) == 1 p[0] = p[1][0] if (len(p) == 2 and p.lexer._parserstate & flags.parser.CMDSUBST and p.lexer._current_token.nopos() == p.lexer._shell_eof_token): # accept the input p.accept()
[ "def", "p_simple_list", "(", "p", ")", ":", "tok", "=", "p", ".", "lexer", "heredoc", ".", "gatherheredocuments", "(", "tok", ")", "if", "len", "(", "p", ")", "==", "3", "or", "len", "(", "p", "[", "1", "]", ")", ">", "1", ":", "parts", "=", "p", "[", "1", "]", "if", "len", "(", "p", ")", "==", "3", ":", "parts", ".", "append", "(", "ast", ".", "node", "(", "kind", "=", "'operator'", ",", "op", "=", "p", "[", "2", "]", ",", "pos", "=", "p", ".", "lexspan", "(", "2", ")", ")", ")", "p", "[", "0", "]", "=", "ast", ".", "node", "(", "kind", "=", "'list'", ",", "parts", "=", "parts", ",", "pos", "=", "_partsspan", "(", "parts", ")", ")", "else", ":", "assert", "len", "(", "p", "[", "1", "]", ")", "==", "1", "p", "[", "0", "]", "=", "p", "[", "1", "]", "[", "0", "]", "if", "(", "len", "(", "p", ")", "==", "2", "and", "p", ".", "lexer", ".", "_parserstate", "&", "flags", ".", "parser", ".", "CMDSUBST", "and", "p", ".", "lexer", ".", "_current_token", ".", "nopos", "(", ")", "==", "p", ".", "lexer", ".", "_shell_eof_token", ")", ":", "# accept the input", "p", ".", "accept", "(", ")" ]
33.65
20.65
def load_configuration(configuration): """Returns a dictionary, accepts a dictionary or a path to a JSON file.""" if isinstance(configuration, dict): return configuration else: with open(configuration) as configfile: return json.load(configfile)
[ "def", "load_configuration", "(", "configuration", ")", ":", "if", "isinstance", "(", "configuration", ",", "dict", ")", ":", "return", "configuration", "else", ":", "with", "open", "(", "configuration", ")", "as", "configfile", ":", "return", "json", ".", "load", "(", "configfile", ")" ]
39.857143
7.571429
def create_pie_chart(self, snapshot, filename=''): """ Create a pie chart that depicts the distribution of the allocated memory for a given `snapshot`. The chart is saved to `filename`. """ try: from pylab import figure, title, pie, axes, savefig from pylab import sum as pylab_sum except ImportError: return self.nopylab_msg % ("pie_chart") # Don't bother illustrating a pie without pieces. if not snapshot.tracked_total: return '' classlist = [] sizelist = [] for k, v in list(snapshot.classes.items()): if v['pct'] > 3.0: classlist.append(k) sizelist.append(v['sum']) sizelist.insert(0, snapshot.asizeof_total - pylab_sum(sizelist)) classlist.insert(0, 'Other') #sizelist = [x*0.01 for x in sizelist] title("Snapshot (%s) Memory Distribution" % (snapshot.desc)) figure(figsize=(8,8)) axes([0.1, 0.1, 0.8, 0.8]) pie(sizelist, labels=classlist) savefig(filename, dpi=50) return self.chart_tag % (self.relative_path(filename))
[ "def", "create_pie_chart", "(", "self", ",", "snapshot", ",", "filename", "=", "''", ")", ":", "try", ":", "from", "pylab", "import", "figure", ",", "title", ",", "pie", ",", "axes", ",", "savefig", "from", "pylab", "import", "sum", "as", "pylab_sum", "except", "ImportError", ":", "return", "self", ".", "nopylab_msg", "%", "(", "\"pie_chart\"", ")", "# Don't bother illustrating a pie without pieces.", "if", "not", "snapshot", ".", "tracked_total", ":", "return", "''", "classlist", "=", "[", "]", "sizelist", "=", "[", "]", "for", "k", ",", "v", "in", "list", "(", "snapshot", ".", "classes", ".", "items", "(", ")", ")", ":", "if", "v", "[", "'pct'", "]", ">", "3.0", ":", "classlist", ".", "append", "(", "k", ")", "sizelist", ".", "append", "(", "v", "[", "'sum'", "]", ")", "sizelist", ".", "insert", "(", "0", ",", "snapshot", ".", "asizeof_total", "-", "pylab_sum", "(", "sizelist", ")", ")", "classlist", ".", "insert", "(", "0", ",", "'Other'", ")", "#sizelist = [x*0.01 for x in sizelist]", "title", "(", "\"Snapshot (%s) Memory Distribution\"", "%", "(", "snapshot", ".", "desc", ")", ")", "figure", "(", "figsize", "=", "(", "8", ",", "8", ")", ")", "axes", "(", "[", "0.1", ",", "0.1", ",", "0.8", ",", "0.8", "]", ")", "pie", "(", "sizelist", ",", "labels", "=", "classlist", ")", "savefig", "(", "filename", ",", "dpi", "=", "50", ")", "return", "self", ".", "chart_tag", "%", "(", "self", ".", "relative_path", "(", "filename", ")", ")" ]
35.96875
16.71875
def date_asn_block(self, ip, announce_date=None): """ Get the ASN and the IP Block announcing the IP at a specific date. :param ip: IP address to search for :param announce_date: Date of the announcement :rtype: tuple .. code-block:: python (announce_date, asn, block) .. note:: the returned announce_date might be different of the one given in parameter because some raw files are missing and we don't have the information. In this case, the nearest known date will be chosen, """ assignations, announce_date, keys = self.run(ip, announce_date) pos = next((i for i, j in enumerate(assignations) if j is not None), None) if pos is not None: block = keys[pos] if block != '0.0.0.0/0': return announce_date, assignations[pos], block return None
[ "def", "date_asn_block", "(", "self", ",", "ip", ",", "announce_date", "=", "None", ")", ":", "assignations", ",", "announce_date", ",", "keys", "=", "self", ".", "run", "(", "ip", ",", "announce_date", ")", "pos", "=", "next", "(", "(", "i", "for", "i", ",", "j", "in", "enumerate", "(", "assignations", ")", "if", "j", "is", "not", "None", ")", ",", "None", ")", "if", "pos", "is", "not", "None", ":", "block", "=", "keys", "[", "pos", "]", "if", "block", "!=", "'0.0.0.0/0'", ":", "return", "announce_date", ",", "assignations", "[", "pos", "]", ",", "block", "return", "None" ]
35.925926
22.444444
def walker(top, names): """ Walks a directory and records all packages and file extensions. """ global packages, extensions if any(exc in top for exc in excludes): return package = top[top.rfind('holoviews'):].replace(os.path.sep, '.') packages.append(package) for name in names: ext = '.'.join(name.split('.')[1:]) ext_str = '*.%s' % ext if ext and ext not in excludes and ext_str not in extensions[package]: extensions[package].append(ext_str)
[ "def", "walker", "(", "top", ",", "names", ")", ":", "global", "packages", ",", "extensions", "if", "any", "(", "exc", "in", "top", "for", "exc", "in", "excludes", ")", ":", "return", "package", "=", "top", "[", "top", ".", "rfind", "(", "'holoviews'", ")", ":", "]", ".", "replace", "(", "os", ".", "path", ".", "sep", ",", "'.'", ")", "packages", ".", "append", "(", "package", ")", "for", "name", "in", "names", ":", "ext", "=", "'.'", ".", "join", "(", "name", ".", "split", "(", "'.'", ")", "[", "1", ":", "]", ")", "ext_str", "=", "'*.%s'", "%", "ext", "if", "ext", "and", "ext", "not", "in", "excludes", "and", "ext_str", "not", "in", "extensions", "[", "package", "]", ":", "extensions", "[", "package", "]", ".", "append", "(", "ext_str", ")" ]
36.285714
14.142857
def transform(self, img, transformation, params): ''' Apply transformations to the image. New transformations can be defined as methods:: def do__transformationname(self, img, transformation, params): 'returns new image with transformation applied' ... def new_size__transformationname(self, size, target_size, params): 'dry run, returns a size of image if transformation is applied' ... ''' # Transformations MUST be idempotent. # The limitation is caused by implementation of # image upload in iktomi.cms. # The transformation can be applied twice: # on image upload after crop (when TransientFile is created) # and on object save (when PersistentFile is created). method = getattr(self, 'do__' + transformation) return method(img, transformation, params)
[ "def", "transform", "(", "self", ",", "img", ",", "transformation", ",", "params", ")", ":", "# Transformations MUST be idempotent.", "# The limitation is caused by implementation of", "# image upload in iktomi.cms.", "# The transformation can be applied twice:", "# on image upload after crop (when TransientFile is created)", "# and on object save (when PersistentFile is created).", "method", "=", "getattr", "(", "self", ",", "'do__'", "+", "transformation", ")", "return", "method", "(", "img", ",", "transformation", ",", "params", ")" ]
41.954545
22.227273
def get_session(self, sid, namespace=None): """Return the user session for a client. :param sid: The session id of the client. :param namespace: The Socket.IO namespace. If this argument is omitted the default namespace is used. The return value is a dictionary. Modifications made to this dictionary are not guaranteed to be preserved unless ``save_session()`` is called, or when the ``session`` context manager is used. """ namespace = namespace or '/' eio_session = self.eio.get_session(sid) return eio_session.setdefault(namespace, {})
[ "def", "get_session", "(", "self", ",", "sid", ",", "namespace", "=", "None", ")", ":", "namespace", "=", "namespace", "or", "'/'", "eio_session", "=", "self", ".", "eio", ".", "get_session", "(", "sid", ")", "return", "eio_session", ".", "setdefault", "(", "namespace", ",", "{", "}", ")" ]
42.733333
18.533333
def datasets_org_count(self): """Return the number of datasets of user's organizations.""" from udata.models import Dataset # Circular imports. return sum(Dataset.objects(organization=org).visible().count() for org in self.organizations)
[ "def", "datasets_org_count", "(", "self", ")", ":", "from", "udata", ".", "models", "import", "Dataset", "# Circular imports.", "return", "sum", "(", "Dataset", ".", "objects", "(", "organization", "=", "org", ")", ".", "visible", "(", ")", ".", "count", "(", ")", "for", "org", "in", "self", ".", "organizations", ")" ]
55.4
14.2
def _logsum(a_n): """Compute the log of a sum of exponentiated terms exp(a_n) in a numerically-stable manner. NOTE: this function has been deprecated in favor of logsumexp. Parameters ---------- a_n : np.ndarray, shape=(n_samples) a_n[n] is the nth exponential argument Returns ------- a_n : np.ndarray, shape=(n_samples) a_n[n] is the nth exponential argument Notes ----- _logsum a_n = max_arg + \log \sum_{n=1}^N \exp[a_n - max_arg] where max_arg = max_n a_n. This is mathematically (but not numerically) equivalent to _logsum a_n = \log \sum_{n=1}^N \exp[a_n] Example ------- >>> a_n = np.array([0.0, 1.0, 1.2], np.float64) >>> print('%.3e' % _logsum(a_n)) 1.951e+00 """ # Compute the maximum argument. max_log_term = np.max(a_n) # Compute the reduced terms. terms = np.exp(a_n - max_log_term) # Compute the log sum. log_sum = np.log(np.sum(terms)) + max_log_term return log_sum
[ "def", "_logsum", "(", "a_n", ")", ":", "# Compute the maximum argument.", "max_log_term", "=", "np", ".", "max", "(", "a_n", ")", "# Compute the reduced terms.", "terms", "=", "np", ".", "exp", "(", "a_n", "-", "max_log_term", ")", "# Compute the log sum.", "log_sum", "=", "np", ".", "log", "(", "np", ".", "sum", "(", "terms", ")", ")", "+", "max_log_term", "return", "log_sum" ]
23.756098
23.560976
def _activate_outbound(self): """switch on newly negotiated encryption parameters for outbound traffic""" m = Message() m.add_byte(cMSG_NEWKEYS) self._send_message(m) block_size = self._cipher_info[self.local_cipher]["block-size"] if self.server_mode: IV_out = self._compute_key("B", block_size) key_out = self._compute_key( "D", self._cipher_info[self.local_cipher]["key-size"] ) else: IV_out = self._compute_key("A", block_size) key_out = self._compute_key( "C", self._cipher_info[self.local_cipher]["key-size"] ) engine = self._get_cipher( self.local_cipher, key_out, IV_out, self._ENCRYPT ) mac_size = self._mac_info[self.local_mac]["size"] mac_engine = self._mac_info[self.local_mac]["class"] # initial mac keys are done in the hash's natural size (not the # potentially truncated transmission size) if self.server_mode: mac_key = self._compute_key("F", mac_engine().digest_size) else: mac_key = self._compute_key("E", mac_engine().digest_size) sdctr = self.local_cipher.endswith("-ctr") self.packetizer.set_outbound_cipher( engine, block_size, mac_engine, mac_size, mac_key, sdctr ) compress_out = self._compression_info[self.local_compression][0] if compress_out is not None and ( self.local_compression != "zlib@openssh.com" or self.authenticated ): self._log(DEBUG, "Switching on outbound compression ...") self.packetizer.set_outbound_compressor(compress_out()) if not self.packetizer.need_rekey(): self.in_kex = False # we always expect to receive NEWKEYS now self._expect_packet(MSG_NEWKEYS)
[ "def", "_activate_outbound", "(", "self", ")", ":", "m", "=", "Message", "(", ")", "m", ".", "add_byte", "(", "cMSG_NEWKEYS", ")", "self", ".", "_send_message", "(", "m", ")", "block_size", "=", "self", ".", "_cipher_info", "[", "self", ".", "local_cipher", "]", "[", "\"block-size\"", "]", "if", "self", ".", "server_mode", ":", "IV_out", "=", "self", ".", "_compute_key", "(", "\"B\"", ",", "block_size", ")", "key_out", "=", "self", ".", "_compute_key", "(", "\"D\"", ",", "self", ".", "_cipher_info", "[", "self", ".", "local_cipher", "]", "[", "\"key-size\"", "]", ")", "else", ":", "IV_out", "=", "self", ".", "_compute_key", "(", "\"A\"", ",", "block_size", ")", "key_out", "=", "self", ".", "_compute_key", "(", "\"C\"", ",", "self", ".", "_cipher_info", "[", "self", ".", "local_cipher", "]", "[", "\"key-size\"", "]", ")", "engine", "=", "self", ".", "_get_cipher", "(", "self", ".", "local_cipher", ",", "key_out", ",", "IV_out", ",", "self", ".", "_ENCRYPT", ")", "mac_size", "=", "self", ".", "_mac_info", "[", "self", ".", "local_mac", "]", "[", "\"size\"", "]", "mac_engine", "=", "self", ".", "_mac_info", "[", "self", ".", "local_mac", "]", "[", "\"class\"", "]", "# initial mac keys are done in the hash's natural size (not the", "# potentially truncated transmission size)", "if", "self", ".", "server_mode", ":", "mac_key", "=", "self", ".", "_compute_key", "(", "\"F\"", ",", "mac_engine", "(", ")", ".", "digest_size", ")", "else", ":", "mac_key", "=", "self", ".", "_compute_key", "(", "\"E\"", ",", "mac_engine", "(", ")", ".", "digest_size", ")", "sdctr", "=", "self", ".", "local_cipher", ".", "endswith", "(", "\"-ctr\"", ")", "self", ".", "packetizer", ".", "set_outbound_cipher", "(", "engine", ",", "block_size", ",", "mac_engine", ",", "mac_size", ",", "mac_key", ",", "sdctr", ")", "compress_out", "=", "self", ".", "_compression_info", "[", "self", ".", "local_compression", "]", "[", "0", "]", "if", "compress_out", "is", "not", "None", "and", "(", "self", ".", "local_compression", "!=", "\"zlib@openssh.com\"", "or", "self", ".", "authenticated", ")", ":", "self", ".", "_log", "(", "DEBUG", ",", "\"Switching on outbound compression ...\"", ")", "self", ".", "packetizer", ".", "set_outbound_compressor", "(", "compress_out", "(", ")", ")", "if", "not", "self", ".", "packetizer", ".", "need_rekey", "(", ")", ":", "self", ".", "in_kex", "=", "False", "# we always expect to receive NEWKEYS now", "self", ".", "_expect_packet", "(", "MSG_NEWKEYS", ")" ]
44.357143
17.809524
def xenon_interactive_worker( machine, worker_config, input_queue=None, stderr_sink=None): """Uses Xenon to run a single remote interactive worker. Jobs are read from stdin, and results written to stdout. :param machine: Specification of the machine on which to run. :type machine: noodles.run.xenon.Machine :param worker_config: Job configuration. Specifies the command to be run remotely. :type worker_config: noodles.run.xenon.XenonJobConfig """ if input_queue is None: input_queue = Queue() registry = worker_config.registry() @pull_map def serialise(obj): """Serialise incoming objects, yielding strings.""" if isinstance(obj, JobMessage): print('serializing:', str(obj.node), file=sys.stderr) return (registry.to_json(obj, host='scheduler') + '\n').encode() @pull_map def echo(line): print('{} input: {}'.format(worker_config.name, line), file=sys.stderr) return line def do_iterate(source): for x in source(): if x is EndOfQueue: yield EndOfWork return yield x job, output_stream = machine.scheduler.submit_interactive_job( worker_config.xenon_job_description, echo(lambda: serialise(lambda: do_iterate(input_queue.source)))) @sink_map def echo_stderr(text): """Print lines.""" for line in text.split('\n'): print("{}: {}".format(worker_config.name, line), file=sys.stderr) if stderr_sink is None: stderr_sink = echo_stderr() @pull def read_output(source): """Handle output from job, sending stderr data to given `stderr_sink`, passing on lines from stdout.""" line_buffer = "" try: for chunk in source(): if chunk.stdout: lines = chunk.stdout.decode().splitlines(keepends=True) if not lines: continue if lines[0][-1] == '\n': yield line_buffer + lines[0] line_buffer = "" else: line_buffer += lines[0] if len(lines) == 1: continue yield from lines[1:-1] if lines[-1][-1] == '\n': yield lines[-1] else: line_buffer = lines[-1] if chunk.stderr: for line in chunk.stderr.decode().split('\n'): stripped_line = line.strip() if stripped_line != '': stderr_sink.send(stripped_line) except grpc.RpcError as e: return @pull_map def deserialise(line): result = registry.from_json(line, deref=False) return result return Connection( lambda: deserialise(lambda: read_output(lambda: output_stream)), input_queue.sink, aux=job)
[ "def", "xenon_interactive_worker", "(", "machine", ",", "worker_config", ",", "input_queue", "=", "None", ",", "stderr_sink", "=", "None", ")", ":", "if", "input_queue", "is", "None", ":", "input_queue", "=", "Queue", "(", ")", "registry", "=", "worker_config", ".", "registry", "(", ")", "@", "pull_map", "def", "serialise", "(", "obj", ")", ":", "\"\"\"Serialise incoming objects, yielding strings.\"\"\"", "if", "isinstance", "(", "obj", ",", "JobMessage", ")", ":", "print", "(", "'serializing:'", ",", "str", "(", "obj", ".", "node", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "(", "registry", ".", "to_json", "(", "obj", ",", "host", "=", "'scheduler'", ")", "+", "'\\n'", ")", ".", "encode", "(", ")", "@", "pull_map", "def", "echo", "(", "line", ")", ":", "print", "(", "'{} input: {}'", ".", "format", "(", "worker_config", ".", "name", ",", "line", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "line", "def", "do_iterate", "(", "source", ")", ":", "for", "x", "in", "source", "(", ")", ":", "if", "x", "is", "EndOfQueue", ":", "yield", "EndOfWork", "return", "yield", "x", "job", ",", "output_stream", "=", "machine", ".", "scheduler", ".", "submit_interactive_job", "(", "worker_config", ".", "xenon_job_description", ",", "echo", "(", "lambda", ":", "serialise", "(", "lambda", ":", "do_iterate", "(", "input_queue", ".", "source", ")", ")", ")", ")", "@", "sink_map", "def", "echo_stderr", "(", "text", ")", ":", "\"\"\"Print lines.\"\"\"", "for", "line", "in", "text", ".", "split", "(", "'\\n'", ")", ":", "print", "(", "\"{}: {}\"", ".", "format", "(", "worker_config", ".", "name", ",", "line", ")", ",", "file", "=", "sys", ".", "stderr", ")", "if", "stderr_sink", "is", "None", ":", "stderr_sink", "=", "echo_stderr", "(", ")", "@", "pull", "def", "read_output", "(", "source", ")", ":", "\"\"\"Handle output from job, sending stderr data to given\n `stderr_sink`, passing on lines from stdout.\"\"\"", "line_buffer", "=", "\"\"", "try", ":", "for", "chunk", "in", "source", "(", ")", ":", "if", "chunk", ".", "stdout", ":", "lines", "=", "chunk", ".", "stdout", ".", "decode", "(", ")", ".", "splitlines", "(", "keepends", "=", "True", ")", "if", "not", "lines", ":", "continue", "if", "lines", "[", "0", "]", "[", "-", "1", "]", "==", "'\\n'", ":", "yield", "line_buffer", "+", "lines", "[", "0", "]", "line_buffer", "=", "\"\"", "else", ":", "line_buffer", "+=", "lines", "[", "0", "]", "if", "len", "(", "lines", ")", "==", "1", ":", "continue", "yield", "from", "lines", "[", "1", ":", "-", "1", "]", "if", "lines", "[", "-", "1", "]", "[", "-", "1", "]", "==", "'\\n'", ":", "yield", "lines", "[", "-", "1", "]", "else", ":", "line_buffer", "=", "lines", "[", "-", "1", "]", "if", "chunk", ".", "stderr", ":", "for", "line", "in", "chunk", ".", "stderr", ".", "decode", "(", ")", ".", "split", "(", "'\\n'", ")", ":", "stripped_line", "=", "line", ".", "strip", "(", ")", "if", "stripped_line", "!=", "''", ":", "stderr_sink", ".", "send", "(", "stripped_line", ")", "except", "grpc", ".", "RpcError", "as", "e", ":", "return", "@", "pull_map", "def", "deserialise", "(", "line", ")", ":", "result", "=", "registry", ".", "from_json", "(", "line", ",", "deref", "=", "False", ")", "return", "result", "return", "Connection", "(", "lambda", ":", "deserialise", "(", "lambda", ":", "read_output", "(", "lambda", ":", "output_stream", ")", ")", ",", "input_queue", ".", "sink", ",", "aux", "=", "job", ")" ]
30.814433
19.536082
def has_assessment_section_begun(self, assessment_section_id): """Tests if this assessment section has started. A section begins from the designated start time if a start time is defined. If no start time is defined the section may begin at any time. Assessment items cannot be accessed or submitted if the return for this method is ``false``. arg: assessment_section_id (osid.id.Id): ``Id`` of the ``AssessmentSection`` return: (boolean) - ``true`` if this assessment section has begun, ``false`` otherwise raise: IllegalState - ``has_assessment_begun()`` is ``false or is_assessment_over()`` is ``true`` raise: NotFound - ``assessment_section_id`` is not found raise: NullArgument - ``assessment_section_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ return get_section_util(assessment_section_id, runtime=self._runtime)._assessment_taken.has_started()
[ "def", "has_assessment_section_begun", "(", "self", ",", "assessment_section_id", ")", ":", "return", "get_section_util", "(", "assessment_section_id", ",", "runtime", "=", "self", ".", "_runtime", ")", ".", "_assessment_taken", ".", "has_started", "(", ")" ]
51.173913
22.434783
def getSearchUrl(self, album, artist): """ See CoverSource.getSearchUrl. """ url = "%s/search" % (__class__.BASE_URL) params = collections.OrderedDict() params["search-alias"] = "digital-music" params["field-keywords"] = " ".join((artist, album)) params["sort"] = "relevancerank" return __class__.assembleUrl(url, params)
[ "def", "getSearchUrl", "(", "self", ",", "album", ",", "artist", ")", ":", "url", "=", "\"%s/search\"", "%", "(", "__class__", ".", "BASE_URL", ")", "params", "=", "collections", ".", "OrderedDict", "(", ")", "params", "[", "\"search-alias\"", "]", "=", "\"digital-music\"", "params", "[", "\"field-keywords\"", "]", "=", "\" \"", ".", "join", "(", "(", "artist", ",", "album", ")", ")", "params", "[", "\"sort\"", "]", "=", "\"relevancerank\"", "return", "__class__", ".", "assembleUrl", "(", "url", ",", "params", ")" ]
42.75
4.625
def have_cycle(graph:dict) -> frozenset: """Perform a topologic sort to detect any cycle. Return the set of unsortable nodes. If at least one item, then there is cycle in given graph. """ # topological sort walked = set() # walked nodes nodes = frozenset(it.chain(it.chain.from_iterable(graph.values()), graph.keys())) # all nodes of the graph preds = reversed_graph(graph) # succ: preds last_walked_len = -1 while last_walked_len != len(walked): last_walked_len = len(walked) for node in nodes - walked: if len(preds.get(node, set()) - walked) == 0: walked.add(node) return frozenset(nodes - walked)
[ "def", "have_cycle", "(", "graph", ":", "dict", ")", "->", "frozenset", ":", "# topological sort", "walked", "=", "set", "(", ")", "# walked nodes", "nodes", "=", "frozenset", "(", "it", ".", "chain", "(", "it", ".", "chain", ".", "from_iterable", "(", "graph", ".", "values", "(", ")", ")", ",", "graph", ".", "keys", "(", ")", ")", ")", "# all nodes of the graph", "preds", "=", "reversed_graph", "(", "graph", ")", "# succ: preds", "last_walked_len", "=", "-", "1", "while", "last_walked_len", "!=", "len", "(", "walked", ")", ":", "last_walked_len", "=", "len", "(", "walked", ")", "for", "node", "in", "nodes", "-", "walked", ":", "if", "len", "(", "preds", ".", "get", "(", "node", ",", "set", "(", ")", ")", "-", "walked", ")", "==", "0", ":", "walked", ".", "add", "(", "node", ")", "return", "frozenset", "(", "nodes", "-", "walked", ")" ]
37.555556
14.388889
def match_msequence(self, tokens, item): """Matches a middle sequence.""" series_type, head_matches, middle, _, last_matches = tokens self.add_check("_coconut.isinstance(" + item + ", _coconut.abc.Sequence)") self.add_check("_coconut.len(" + item + ") >= " + str(len(head_matches) + len(last_matches))) if middle != wildcard: if len(head_matches) and len(last_matches): splice = "[" + str(len(head_matches)) + ":" + str(-len(last_matches)) + "]" elif len(head_matches): splice = "[" + str(len(head_matches)) + ":]" elif len(last_matches): splice = "[:" + str(-len(last_matches)) + "]" else: splice = "" self.assign_to_series(middle, series_type, item + splice) self.match_all_in(head_matches, item) for i, match in enumerate(last_matches): self.match(match, item + "[" + str(i - len(last_matches)) + "]")
[ "def", "match_msequence", "(", "self", ",", "tokens", ",", "item", ")", ":", "series_type", ",", "head_matches", ",", "middle", ",", "_", ",", "last_matches", "=", "tokens", "self", ".", "add_check", "(", "\"_coconut.isinstance(\"", "+", "item", "+", "\", _coconut.abc.Sequence)\"", ")", "self", ".", "add_check", "(", "\"_coconut.len(\"", "+", "item", "+", "\") >= \"", "+", "str", "(", "len", "(", "head_matches", ")", "+", "len", "(", "last_matches", ")", ")", ")", "if", "middle", "!=", "wildcard", ":", "if", "len", "(", "head_matches", ")", "and", "len", "(", "last_matches", ")", ":", "splice", "=", "\"[\"", "+", "str", "(", "len", "(", "head_matches", ")", ")", "+", "\":\"", "+", "str", "(", "-", "len", "(", "last_matches", ")", ")", "+", "\"]\"", "elif", "len", "(", "head_matches", ")", ":", "splice", "=", "\"[\"", "+", "str", "(", "len", "(", "head_matches", ")", ")", "+", "\":]\"", "elif", "len", "(", "last_matches", ")", ":", "splice", "=", "\"[:\"", "+", "str", "(", "-", "len", "(", "last_matches", ")", ")", "+", "\"]\"", "else", ":", "splice", "=", "\"\"", "self", ".", "assign_to_series", "(", "middle", ",", "series_type", ",", "item", "+", "splice", ")", "self", ".", "match_all_in", "(", "head_matches", ",", "item", ")", "for", "i", ",", "match", "in", "enumerate", "(", "last_matches", ")", ":", "self", ".", "match", "(", "match", ",", "item", "+", "\"[\"", "+", "str", "(", "i", "-", "len", "(", "last_matches", ")", ")", "+", "\"]\"", ")" ]
54.388889
20.611111
def _migrate_resource(instance, migrations, version=''): """ Migrate a resource instance Subresources are migrated first, then the resource is recursively migrated :param instance: a perch.Document instance :param migrations: the migrations for a resource :param version: the current resource version to migrate """ if version not in migrations: return instance instance = _migrate_subresources( instance, migrations[version]['subresources'] ) for migration in migrations[version]['migrations']: instance = migration(instance) instance._resource['doc_version'] = unicode(migration.version) instance = _migrate_resource( instance, migrations, version=migration.version ) return instance
[ "def", "_migrate_resource", "(", "instance", ",", "migrations", ",", "version", "=", "''", ")", ":", "if", "version", "not", "in", "migrations", ":", "return", "instance", "instance", "=", "_migrate_subresources", "(", "instance", ",", "migrations", "[", "version", "]", "[", "'subresources'", "]", ")", "for", "migration", "in", "migrations", "[", "version", "]", "[", "'migrations'", "]", ":", "instance", "=", "migration", "(", "instance", ")", "instance", ".", "_resource", "[", "'doc_version'", "]", "=", "unicode", "(", "migration", ".", "version", ")", "instance", "=", "_migrate_resource", "(", "instance", ",", "migrations", ",", "version", "=", "migration", ".", "version", ")", "return", "instance" ]
27.689655
19.62069
def route_handler(context, content, pargs, kwargs): """ Route shortcode works a lot like rendering a page based on the url or route. This allows inserting in rendered HTML within another page. Activate it with the 'shortcodes' template filter. Within the content use the chill route shortcode: "[chill route /path/to/something/]" where the '[chill' and ']' are the shortcode starting and ending tags. And 'route' is this route handler that takes one argument which is the url. """ (node, rule_kw) = node_from_uri(pargs[0]) if node == None: return u"<!-- 404 '{0}' -->".format(pargs[0]) rule_kw.update( node ) values = rule_kw values.update( request.form.to_dict(flat=True) ) values.update( request.args.to_dict(flat=True) ) values['method'] = request.method noderequest = values.copy() noderequest.pop('node_id') noderequest.pop('name') noderequest.pop('value') rendered = render_node(node['id'], noderequest=noderequest, **values) if rendered: if not isinstance(rendered, (str, unicode, int, float)): # return a json string return encoder.encode(rendered) return rendered # Nothing to show, so nothing found return "<!-- 404 '{0}' -->".format(pargs[0])
[ "def", "route_handler", "(", "context", ",", "content", ",", "pargs", ",", "kwargs", ")", ":", "(", "node", ",", "rule_kw", ")", "=", "node_from_uri", "(", "pargs", "[", "0", "]", ")", "if", "node", "==", "None", ":", "return", "u\"<!-- 404 '{0}' -->\"", ".", "format", "(", "pargs", "[", "0", "]", ")", "rule_kw", ".", "update", "(", "node", ")", "values", "=", "rule_kw", "values", ".", "update", "(", "request", ".", "form", ".", "to_dict", "(", "flat", "=", "True", ")", ")", "values", ".", "update", "(", "request", ".", "args", ".", "to_dict", "(", "flat", "=", "True", ")", ")", "values", "[", "'method'", "]", "=", "request", ".", "method", "noderequest", "=", "values", ".", "copy", "(", ")", "noderequest", ".", "pop", "(", "'node_id'", ")", "noderequest", ".", "pop", "(", "'name'", ")", "noderequest", ".", "pop", "(", "'value'", ")", "rendered", "=", "render_node", "(", "node", "[", "'id'", "]", ",", "noderequest", "=", "noderequest", ",", "*", "*", "values", ")", "if", "rendered", ":", "if", "not", "isinstance", "(", "rendered", ",", "(", "str", ",", "unicode", ",", "int", ",", "float", ")", ")", ":", "# return a json string", "return", "encoder", ".", "encode", "(", "rendered", ")", "return", "rendered", "# Nothing to show, so nothing found", "return", "\"<!-- 404 '{0}' -->\"", ".", "format", "(", "pargs", "[", "0", "]", ")" ]
35.166667
20.833333
def get_batch_header_values(self): """Scrape the "Batch Header" values from the original input file """ lines = self.getOriginalFile().data.splitlines() reader = csv.reader(lines) batch_headers = batch_data = [] for row in reader: if not any(row): continue if row[0].strip().lower() == 'batch header': batch_headers = [x.strip() for x in row][1:] continue if row[0].strip().lower() == 'batch data': batch_data = [x.strip() for x in row][1:] break if not (batch_data or batch_headers): return None if not (batch_data and batch_headers): self.error("Missing batch headers or data") return None # Inject us out of here values = dict(zip(batch_headers, batch_data)) return values
[ "def", "get_batch_header_values", "(", "self", ")", ":", "lines", "=", "self", ".", "getOriginalFile", "(", ")", ".", "data", ".", "splitlines", "(", ")", "reader", "=", "csv", ".", "reader", "(", "lines", ")", "batch_headers", "=", "batch_data", "=", "[", "]", "for", "row", "in", "reader", ":", "if", "not", "any", "(", "row", ")", ":", "continue", "if", "row", "[", "0", "]", ".", "strip", "(", ")", ".", "lower", "(", ")", "==", "'batch header'", ":", "batch_headers", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "row", "]", "[", "1", ":", "]", "continue", "if", "row", "[", "0", "]", ".", "strip", "(", ")", ".", "lower", "(", ")", "==", "'batch data'", ":", "batch_data", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "row", "]", "[", "1", ":", "]", "break", "if", "not", "(", "batch_data", "or", "batch_headers", ")", ":", "return", "None", "if", "not", "(", "batch_data", "and", "batch_headers", ")", ":", "self", ".", "error", "(", "\"Missing batch headers or data\"", ")", "return", "None", "# Inject us out of here", "values", "=", "dict", "(", "zip", "(", "batch_headers", ",", "batch_data", ")", ")", "return", "values" ]
38.826087
11.913043
def validateRegexStr(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None): """Raises ValidationException if value can't be used as a regular expression string. Returns the value argument as a regex object. If you want to check if a string matches a regular expression, call validateRegex(). * value (str): The value being validated as a regular expression string. * regex (str, regex): The regular expression to match the value against. * flags (int): Identical to the flags argument in re.compile(). Pass re.VERBOSE et al here. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. >>> import pysimplevalidate as pysv >>> pysv.validateRegexStr('(cat)|(dog)') re.compile('(cat)|(dog)') >>> pysv.validateRegexStr('"(.*?)"') re.compile('"(.*?)"') >>> pysv.validateRegexStr('"(.*?"') Traceback (most recent call last): ... pysimplevalidate.ValidationException: '"(.*?"' is not a valid regular expression: missing ), unterminated subpattern at position 1 """ # TODO - I'd be nice to check regexes in other languages, i.e. JS and Perl. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value try: return re.compile(value) except Exception as ex: _raiseValidationException(_('%r is not a valid regular expression: %s') % (_errstr(value), ex), excMsg)
[ "def", "validateRegexStr", "(", "value", ",", "blank", "=", "False", ",", "strip", "=", "None", ",", "allowlistRegexes", "=", "None", ",", "blocklistRegexes", "=", "None", ",", "excMsg", "=", "None", ")", ":", "# TODO - I'd be nice to check regexes in other languages, i.e. JS and Perl.", "_validateGenericParameters", "(", "blank", "=", "blank", ",", "strip", "=", "strip", ",", "allowlistRegexes", "=", "allowlistRegexes", ",", "blocklistRegexes", "=", "blocklistRegexes", ")", "returnNow", ",", "value", "=", "_prevalidationCheck", "(", "value", ",", "blank", ",", "strip", ",", "allowlistRegexes", ",", "blocklistRegexes", ",", "excMsg", ")", "if", "returnNow", ":", "return", "value", "try", ":", "return", "re", ".", "compile", "(", "value", ")", "except", "Exception", "as", "ex", ":", "_raiseValidationException", "(", "_", "(", "'%r is not a valid regular expression: %s'", ")", "%", "(", "_errstr", "(", "value", ")", ",", "ex", ")", ",", "excMsg", ")" ]
55.842105
37.289474
def runEditor(self, dir=os.getcwd(), debug=False, args=[]): """ Runs the editor for the Unreal project in the specified directory (or without a project if dir is None) """ projectFile = self.getProjectDescriptor(dir) if dir is not None else '' extraFlags = ['-debug'] + args if debug == True else args Utility.run([self.getEditorBinary(True), projectFile, '-stdout', '-FullStdOutLogOutput'] + extraFlags, raiseOnError=True)
[ "def", "runEditor", "(", "self", ",", "dir", "=", "os", ".", "getcwd", "(", ")", ",", "debug", "=", "False", ",", "args", "=", "[", "]", ")", ":", "projectFile", "=", "self", ".", "getProjectDescriptor", "(", "dir", ")", "if", "dir", "is", "not", "None", "else", "''", "extraFlags", "=", "[", "'-debug'", "]", "+", "args", "if", "debug", "==", "True", "else", "args", "Utility", ".", "run", "(", "[", "self", ".", "getEditorBinary", "(", "True", ")", ",", "projectFile", ",", "'-stdout'", ",", "'-FullStdOutLogOutput'", "]", "+", "extraFlags", ",", "raiseOnError", "=", "True", ")" ]
61.285714
31.285714
def list(self, search_from=None, search_to=None, limit=None): """List all the objects saved elasticsearch. :param search_from: start offset of objects to return. :param search_to: last offset of objects to return. :param limit: max number of values to be returned. :return: list with transactions. """ self.logger.debug('elasticsearch::list') body = { 'sort': [ {"_id": "asc"}, ], 'query': { 'match_all': {} } } if search_from: body['from'] = search_from if search_to: body['size'] = search_to - search_from if limit: body['size'] = limit page = self.driver._es.search( index=self.driver._index, doc_type='_doc', body=body ) object_list = [] for x in page['hits']['hits']: object_list.append(x['_source']) return object_list
[ "def", "list", "(", "self", ",", "search_from", "=", "None", ",", "search_to", "=", "None", ",", "limit", "=", "None", ")", ":", "self", ".", "logger", ".", "debug", "(", "'elasticsearch::list'", ")", "body", "=", "{", "'sort'", ":", "[", "{", "\"_id\"", ":", "\"asc\"", "}", ",", "]", ",", "'query'", ":", "{", "'match_all'", ":", "{", "}", "}", "}", "if", "search_from", ":", "body", "[", "'from'", "]", "=", "search_from", "if", "search_to", ":", "body", "[", "'size'", "]", "=", "search_to", "-", "search_from", "if", "limit", ":", "body", "[", "'size'", "]", "=", "limit", "page", "=", "self", ".", "driver", ".", "_es", ".", "search", "(", "index", "=", "self", ".", "driver", ".", "_index", ",", "doc_type", "=", "'_doc'", ",", "body", "=", "body", ")", "object_list", "=", "[", "]", "for", "x", "in", "page", "[", "'hits'", "]", "[", "'hits'", "]", ":", "object_list", ".", "append", "(", "x", "[", "'_source'", "]", ")", "return", "object_list" ]
29.382353
16.382353
def login(ctx): """Add an API key (saved in ~/.onecodex)""" base_url = os.environ.get("ONE_CODEX_API_BASE", "https://app.onecodex.com") if not ctx.obj["API_KEY"]: _login(base_url) else: email = _login(base_url, api_key=ctx.obj["API_KEY"]) ocx = Api(api_key=ctx.obj["API_KEY"], telemetry=ctx.obj["TELEMETRY"]) # TODO: This should be protected or built in as a first class resource # with, e.g., connection error catching (it's not part of our formally documeted API at the moment) if ocx._client.Account.instances()["email"] != email: click.echo("Your login credentials do not match the provided email!", err=True) _remove_creds() ctx.exit(1)
[ "def", "login", "(", "ctx", ")", ":", "base_url", "=", "os", ".", "environ", ".", "get", "(", "\"ONE_CODEX_API_BASE\"", ",", "\"https://app.onecodex.com\"", ")", "if", "not", "ctx", ".", "obj", "[", "\"API_KEY\"", "]", ":", "_login", "(", "base_url", ")", "else", ":", "email", "=", "_login", "(", "base_url", ",", "api_key", "=", "ctx", ".", "obj", "[", "\"API_KEY\"", "]", ")", "ocx", "=", "Api", "(", "api_key", "=", "ctx", ".", "obj", "[", "\"API_KEY\"", "]", ",", "telemetry", "=", "ctx", ".", "obj", "[", "\"TELEMETRY\"", "]", ")", "# TODO: This should be protected or built in as a first class resource", "# with, e.g., connection error catching (it's not part of our formally documeted API at the moment)", "if", "ocx", ".", "_client", ".", "Account", ".", "instances", "(", ")", "[", "\"email\"", "]", "!=", "email", ":", "click", ".", "echo", "(", "\"Your login credentials do not match the provided email!\"", ",", "err", "=", "True", ")", "_remove_creds", "(", ")", "ctx", ".", "exit", "(", "1", ")" ]
48.533333
28.333333
def import_gene_history(file_handle, tax_id, tax_id_col, id_col, symbol_col): """ Read input gene history file into the database. Note that the arguments tax_id_col, id_col and symbol_col have been converted into 0-based column indexes. """ # Make sure that tax_id is not "" or " " if not tax_id or tax_id.isspace(): raise Exception("Input tax_id is blank") # Make sure that tax_id exists in Organism table in the database. try: organism = Organism.objects.get(taxonomy_id=tax_id) except Organism.DoesNotExist: raise Exception('Input tax_id %s does NOT exist in Organism table. ' 'Please add it into Organism table first.' % tax_id) if tax_id_col < 0 or id_col < 0 or symbol_col < 0: raise Exception( 'tax_id_col, id_col and symbol_col must be positive integers') for line_index, line in enumerate(file_handle): if line.startswith('#'): # Skip comment lines. continue fields = line.rstrip().split('\t') # Check input column numbers. chk_col_numbers(line_index + 1, len(fields), tax_id_col, id_col, symbol_col) # Skip lines whose tax_id's do not match input tax_id. if tax_id != fields[tax_id_col]: continue entrez_id = fields[id_col] # If the gene already exists in database, set its "obsolete" attribute # to True; otherwise create a new obsolete Gene record in database. try: gene = Gene.objects.get(entrezid=entrez_id) if not gene.obsolete: gene.obsolete = True gene.save() except Gene.DoesNotExist: Gene.objects.create(entrezid=entrez_id, organism=organism, systematic_name=fields[symbol_col], obsolete=True)
[ "def", "import_gene_history", "(", "file_handle", ",", "tax_id", ",", "tax_id_col", ",", "id_col", ",", "symbol_col", ")", ":", "# Make sure that tax_id is not \"\" or \" \"", "if", "not", "tax_id", "or", "tax_id", ".", "isspace", "(", ")", ":", "raise", "Exception", "(", "\"Input tax_id is blank\"", ")", "# Make sure that tax_id exists in Organism table in the database.", "try", ":", "organism", "=", "Organism", ".", "objects", ".", "get", "(", "taxonomy_id", "=", "tax_id", ")", "except", "Organism", ".", "DoesNotExist", ":", "raise", "Exception", "(", "'Input tax_id %s does NOT exist in Organism table. '", "'Please add it into Organism table first.'", "%", "tax_id", ")", "if", "tax_id_col", "<", "0", "or", "id_col", "<", "0", "or", "symbol_col", "<", "0", ":", "raise", "Exception", "(", "'tax_id_col, id_col and symbol_col must be positive integers'", ")", "for", "line_index", ",", "line", "in", "enumerate", "(", "file_handle", ")", ":", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "# Skip comment lines.", "continue", "fields", "=", "line", ".", "rstrip", "(", ")", ".", "split", "(", "'\\t'", ")", "# Check input column numbers.", "chk_col_numbers", "(", "line_index", "+", "1", ",", "len", "(", "fields", ")", ",", "tax_id_col", ",", "id_col", ",", "symbol_col", ")", "# Skip lines whose tax_id's do not match input tax_id.", "if", "tax_id", "!=", "fields", "[", "tax_id_col", "]", ":", "continue", "entrez_id", "=", "fields", "[", "id_col", "]", "# If the gene already exists in database, set its \"obsolete\" attribute", "# to True; otherwise create a new obsolete Gene record in database.", "try", ":", "gene", "=", "Gene", ".", "objects", ".", "get", "(", "entrezid", "=", "entrez_id", ")", "if", "not", "gene", ".", "obsolete", ":", "gene", ".", "obsolete", "=", "True", "gene", ".", "save", "(", ")", "except", "Gene", ".", "DoesNotExist", ":", "Gene", ".", "objects", ".", "create", "(", "entrezid", "=", "entrez_id", ",", "organism", "=", "organism", ",", "systematic_name", "=", "fields", "[", "symbol_col", "]", ",", "obsolete", "=", "True", ")" ]
39.553191
20.106383
def setPANID(self, xPAN): """set Thread Network PAN ID Args: xPAN: a given PAN ID in hex format Returns: True: successful to set the Thread Network PAN ID False: fail to set the Thread Network PAN ID """ print '%s call setPANID' % self.port print xPAN panid = '' try: if not isinstance(xPAN, str): panid = str(hex(xPAN)) print panid cmd = WPANCTL_CMD + 'setprop -s Network:PANID %s' % panid datasetCmd = WPANCTL_CMD + 'setprop Dataset:PanId %s' % panid self.hasActiveDatasetToCommit = True return self.__sendCommand(cmd)[0] != 'Fail' and self.__sendCommand(datasetCmd)[0] != 'Fail' except Exception, e: ModuleHelper.WriteIntoDebugLogger('setPANID() Error: ' + str(e))
[ "def", "setPANID", "(", "self", ",", "xPAN", ")", ":", "print", "'%s call setPANID'", "%", "self", ".", "port", "print", "xPAN", "panid", "=", "''", "try", ":", "if", "not", "isinstance", "(", "xPAN", ",", "str", ")", ":", "panid", "=", "str", "(", "hex", "(", "xPAN", ")", ")", "print", "panid", "cmd", "=", "WPANCTL_CMD", "+", "'setprop -s Network:PANID %s'", "%", "panid", "datasetCmd", "=", "WPANCTL_CMD", "+", "'setprop Dataset:PanId %s'", "%", "panid", "self", ".", "hasActiveDatasetToCommit", "=", "True", "return", "self", ".", "__sendCommand", "(", "cmd", ")", "[", "0", "]", "!=", "'Fail'", "and", "self", ".", "__sendCommand", "(", "datasetCmd", ")", "[", "0", "]", "!=", "'Fail'", "except", "Exception", ",", "e", ":", "ModuleHelper", ".", "WriteIntoDebugLogger", "(", "'setPANID() Error: '", "+", "str", "(", "e", ")", ")" ]
35.791667
20.916667
def main(): """Primary entry point; we supply '/', but the class brings '/metadata'""" app = apikit.APIFlask(name="Hello", version="0.0.1", repository="http://example.repo", description="Hello World App") # pylint: disable=unused-variable @app.route("/") def hello_world(): """The main route.""" return "Hello, World!" app.run()
[ "def", "main", "(", ")", ":", "app", "=", "apikit", ".", "APIFlask", "(", "name", "=", "\"Hello\"", ",", "version", "=", "\"0.0.1\"", ",", "repository", "=", "\"http://example.repo\"", ",", "description", "=", "\"Hello World App\"", ")", "# pylint: disable=unused-variable", "@", "app", ".", "route", "(", "\"/\"", ")", "def", "hello_world", "(", ")", ":", "\"\"\"The main route.\"\"\"", "return", "\"Hello, World!\"", "app", ".", "run", "(", ")" ]
31.285714
16.357143
def raise_for_missing_namespace(self, line: str, position: int, namespace: str, name: str) -> None: """Raise an exception if the namespace is not defined.""" if not self.has_namespace(namespace): raise UndefinedNamespaceWarning(self.get_line_number(), line, position, namespace, name)
[ "def", "raise_for_missing_namespace", "(", "self", ",", "line", ":", "str", ",", "position", ":", "int", ",", "namespace", ":", "str", ",", "name", ":", "str", ")", "->", "None", ":", "if", "not", "self", ".", "has_namespace", "(", "namespace", ")", ":", "raise", "UndefinedNamespaceWarning", "(", "self", ".", "get_line_number", "(", ")", ",", "line", ",", "position", ",", "namespace", ",", "name", ")" ]
77.25
31
def _canon_decode_tag(self, value, mn_tags): """ Decode Canon MakerNote tag based on offset within tag. See http://www.burren.cx/david/canon.html by David Burren """ for i in range(1, len(value)): tag = mn_tags.get(i, ('Unknown', )) name = tag[0] if len(tag) > 1: val = tag[1].get(value[i], 'Unknown') else: val = value[i] try: logger.debug(" %s %s %s", i, name, hex(value[i])) except TypeError: logger.debug(" %s %s %s", i, name, value[i]) # it's not a real IFD Tag but we fake one to make everybody # happy. this will have a "proprietary" type self.tags['MakerNote ' + name] = IfdTag(str(val), None, 0, None, None, None)
[ "def", "_canon_decode_tag", "(", "self", ",", "value", ",", "mn_tags", ")", ":", "for", "i", "in", "range", "(", "1", ",", "len", "(", "value", ")", ")", ":", "tag", "=", "mn_tags", ".", "get", "(", "i", ",", "(", "'Unknown'", ",", ")", ")", "name", "=", "tag", "[", "0", "]", "if", "len", "(", "tag", ")", ">", "1", ":", "val", "=", "tag", "[", "1", "]", ".", "get", "(", "value", "[", "i", "]", ",", "'Unknown'", ")", "else", ":", "val", "=", "value", "[", "i", "]", "try", ":", "logger", ".", "debug", "(", "\" %s %s %s\"", ",", "i", ",", "name", ",", "hex", "(", "value", "[", "i", "]", ")", ")", "except", "TypeError", ":", "logger", ".", "debug", "(", "\" %s %s %s\"", ",", "i", ",", "name", ",", "value", "[", "i", "]", ")", "# it's not a real IFD Tag but we fake one to make everybody", "# happy. this will have a \"proprietary\" type", "self", ".", "tags", "[", "'MakerNote '", "+", "name", "]", "=", "IfdTag", "(", "str", "(", "val", ")", ",", "None", ",", "0", ",", "None", ",", "None", ",", "None", ")" ]
39.409091
18.136364
def from_csv(cls, path): """ Get box vectors from comma-separated values in file `path`. The csv file must containt only one line, which in turn can contain three values (orthogonal vectors) or nine values (triclinic box). The values should be in nanometers. Parameters ---------- path : str Path to CSV file Returns ------- vectors : simtk.unit.Quantity([3, 3], unit=nanometers """ with open(path) as f: fields = map(float, next(f).split(',')) if len(fields) == 3: return u.Quantity([[fields[0], 0, 0], [0, fields[1], 0], [0, 0, fields[2]]], unit=u.nanometers) elif len(fields) == 9: return u.Quantity([fields[0:3], fields[3:6], fields[6:9]], unit=u.nanometers) else: raise ValueError('This type of CSV is not supported. Please ' 'provide a comma-separated list of three or nine ' 'floats in a single-line file.')
[ "def", "from_csv", "(", "cls", ",", "path", ")", ":", "with", "open", "(", "path", ")", "as", "f", ":", "fields", "=", "map", "(", "float", ",", "next", "(", "f", ")", ".", "split", "(", "','", ")", ")", "if", "len", "(", "fields", ")", "==", "3", ":", "return", "u", ".", "Quantity", "(", "[", "[", "fields", "[", "0", "]", ",", "0", ",", "0", "]", ",", "[", "0", ",", "fields", "[", "1", "]", ",", "0", "]", ",", "[", "0", ",", "0", ",", "fields", "[", "2", "]", "]", "]", ",", "unit", "=", "u", ".", "nanometers", ")", "elif", "len", "(", "fields", ")", "==", "9", ":", "return", "u", ".", "Quantity", "(", "[", "fields", "[", "0", ":", "3", "]", ",", "fields", "[", "3", ":", "6", "]", ",", "fields", "[", "6", ":", "9", "]", "]", ",", "unit", "=", "u", ".", "nanometers", ")", "else", ":", "raise", "ValueError", "(", "'This type of CSV is not supported. Please '", "'provide a comma-separated list of three or nine '", "'floats in a single-line file.'", ")" ]
36.15625
20.71875
def ScanForStorageMediaImage(self, source_path_spec): """Scans the path specification for a supported storage media image format. Args: source_path_spec (PathSpec): source path specification. Returns: PathSpec: storage media image path specification or None if no supported storage media image type was found. Raises: BackEndError: if the source cannot be scanned or more than one storage media image type is found. """ try: type_indicators = analyzer.Analyzer.GetStorageMediaImageTypeIndicators( source_path_spec, resolver_context=self._resolver_context) except RuntimeError as exception: raise errors.BackEndError(( 'Unable to process source path specification with error: ' '{0!s}').format(exception)) if not type_indicators: # The RAW storage media image type cannot be detected based on # a signature so we try to detect it based on common file naming schemas. file_system = resolver.Resolver.OpenFileSystem( source_path_spec, resolver_context=self._resolver_context) raw_path_spec = path_spec_factory.Factory.NewPathSpec( definitions.TYPE_INDICATOR_RAW, parent=source_path_spec) try: # The RAW glob function will raise a PathSpecError if the path # specification is unsuitable for globbing. glob_results = raw.RawGlobPathSpec(file_system, raw_path_spec) except errors.PathSpecError: glob_results = None file_system.Close() if not glob_results: return None return raw_path_spec if len(type_indicators) > 1: raise errors.BackEndError( 'Unsupported source found more than one storage media image types.') return path_spec_factory.Factory.NewPathSpec( type_indicators[0], parent=source_path_spec)
[ "def", "ScanForStorageMediaImage", "(", "self", ",", "source_path_spec", ")", ":", "try", ":", "type_indicators", "=", "analyzer", ".", "Analyzer", ".", "GetStorageMediaImageTypeIndicators", "(", "source_path_spec", ",", "resolver_context", "=", "self", ".", "_resolver_context", ")", "except", "RuntimeError", "as", "exception", ":", "raise", "errors", ".", "BackEndError", "(", "(", "'Unable to process source path specification with error: '", "'{0!s}'", ")", ".", "format", "(", "exception", ")", ")", "if", "not", "type_indicators", ":", "# The RAW storage media image type cannot be detected based on", "# a signature so we try to detect it based on common file naming schemas.", "file_system", "=", "resolver", ".", "Resolver", ".", "OpenFileSystem", "(", "source_path_spec", ",", "resolver_context", "=", "self", ".", "_resolver_context", ")", "raw_path_spec", "=", "path_spec_factory", ".", "Factory", ".", "NewPathSpec", "(", "definitions", ".", "TYPE_INDICATOR_RAW", ",", "parent", "=", "source_path_spec", ")", "try", ":", "# The RAW glob function will raise a PathSpecError if the path", "# specification is unsuitable for globbing.", "glob_results", "=", "raw", ".", "RawGlobPathSpec", "(", "file_system", ",", "raw_path_spec", ")", "except", "errors", ".", "PathSpecError", ":", "glob_results", "=", "None", "file_system", ".", "Close", "(", ")", "if", "not", "glob_results", ":", "return", "None", "return", "raw_path_spec", "if", "len", "(", "type_indicators", ")", ">", "1", ":", "raise", "errors", ".", "BackEndError", "(", "'Unsupported source found more than one storage media image types.'", ")", "return", "path_spec_factory", ".", "Factory", ".", "NewPathSpec", "(", "type_indicators", "[", "0", "]", ",", "parent", "=", "source_path_spec", ")" ]
36.34
23.38
def url_converter(self, *args, **kwargs): """ Return the custom URL converter for the given file name. """ upstream_converter = super(PatchedManifestStaticFilesStorage, self).url_converter(*args, **kwargs) def converter(matchobj): try: upstream_converter(matchobj) except ValueError: # e.g. a static file 'static/media/logo.6a30f15f.svg' could not be found # because the upstream converter stripped 'static/' from the path matched, url = matchobj.groups() return matched return converter
[ "def", "url_converter", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "upstream_converter", "=", "super", "(", "PatchedManifestStaticFilesStorage", ",", "self", ")", ".", "url_converter", "(", "*", "args", ",", "*", "*", "kwargs", ")", "def", "converter", "(", "matchobj", ")", ":", "try", ":", "upstream_converter", "(", "matchobj", ")", "except", "ValueError", ":", "# e.g. a static file 'static/media/logo.6a30f15f.svg' could not be found", "# because the upstream converter stripped 'static/' from the path", "matched", ",", "url", "=", "matchobj", ".", "groups", "(", ")", "return", "matched", "return", "converter" ]
39.125
21.25
def json_get(json: JsonValue, path: str, expected_type: Any = ANY) -> Any: """Get a JSON value by path, optionally checking its type. >>> j = {"foo": {"num": 3.4, "s": "Text"}, "arr": [10, 20, 30]} >>> json_get(j, "/foo/num") 3.4 >>> json_get(j, "/arr[1]") 20 Raise ValueError if the path is not found: >>> json_get(j, "/foo/unknown") Traceback (most recent call last): ... ValueError: JSON path '/foo/unknown' not found Raise TypeError if the path contains a non-object element: >>> json_get(j, "/foo/num/bar") Traceback (most recent call last): ... TypeError: JSON path '/foo/num' is not an object Or a non-array element: >>> json_get(j, "/foo[2]") Traceback (most recent call last): ... TypeError: JSON path '/foo' is not an array Raise an IndexError if the array index is out of bounds: >>> json_get(j, "/arr[10]") Traceback (most recent call last): ... IndexError: JSON array '/arr' too small (3 <= 10) Recognized types are: str, int, float, bool, list, dict, and None. TypeError is raised if the type does not match. >>> json_get(j, "/foo/num", str) Traceback (most recent call last): ... TypeError: wrong JSON type str != float float will match any number, int will only match numbers without a fractional part. >>> json_get(j, "/foo/num", float) 3.4 >>> json_get(j, "/foo/num", int) Traceback (most recent call last): ... TypeError: wrong JSON type int != float """ elements = _parse_json_path(path) current = json current_path = "" for i, element in enumerate(elements): if isinstance(element, str): if not isinstance(current, dict): msg = "JSON path '{}' is not an object".format(current_path) raise TypeError(msg) from None if element not in current: raise ValueError("JSON path '{}' not found".format(path)) current_path += "/" + element current = current[element] else: if not isinstance(current, list): msg = "JSON path '{}' is not an array".format(current_path) raise TypeError(msg) from None if element >= len(current): msg = "JSON array '{}' too small ({} <= {})".format( current_path, len(current), element) raise IndexError(msg) current_path += "[{}]".format(i) current = current[element] if expected_type != ANY: assert_json_type(current, cast(JsonType, expected_type)) return current
[ "def", "json_get", "(", "json", ":", "JsonValue", ",", "path", ":", "str", ",", "expected_type", ":", "Any", "=", "ANY", ")", "->", "Any", ":", "elements", "=", "_parse_json_path", "(", "path", ")", "current", "=", "json", "current_path", "=", "\"\"", "for", "i", ",", "element", "in", "enumerate", "(", "elements", ")", ":", "if", "isinstance", "(", "element", ",", "str", ")", ":", "if", "not", "isinstance", "(", "current", ",", "dict", ")", ":", "msg", "=", "\"JSON path '{}' is not an object\"", ".", "format", "(", "current_path", ")", "raise", "TypeError", "(", "msg", ")", "from", "None", "if", "element", "not", "in", "current", ":", "raise", "ValueError", "(", "\"JSON path '{}' not found\"", ".", "format", "(", "path", ")", ")", "current_path", "+=", "\"/\"", "+", "element", "current", "=", "current", "[", "element", "]", "else", ":", "if", "not", "isinstance", "(", "current", ",", "list", ")", ":", "msg", "=", "\"JSON path '{}' is not an array\"", ".", "format", "(", "current_path", ")", "raise", "TypeError", "(", "msg", ")", "from", "None", "if", "element", ">=", "len", "(", "current", ")", ":", "msg", "=", "\"JSON array '{}' too small ({} <= {})\"", ".", "format", "(", "current_path", ",", "len", "(", "current", ")", ",", "element", ")", "raise", "IndexError", "(", "msg", ")", "current_path", "+=", "\"[{}]\"", ".", "format", "(", "i", ")", "current", "=", "current", "[", "element", "]", "if", "expected_type", "!=", "ANY", ":", "assert_json_type", "(", "current", ",", "cast", "(", "JsonType", ",", "expected_type", ")", ")", "return", "current" ]
31.731707
18.597561
def put(self, id, project_id=None): """put.""" result = db.session.query(Result).filter_by(id=id).first() if result is None: response = jsonify({ 'result': None, 'message': 'No interface defined for URL.' }) return response, 404 request_json = request.get_json() request_result = request_json.get('result') name = request_result.get('name', None) if name is not None: result.name = name is_unregistered = request_result.get('isUnregistered', None) if is_unregistered is not None: result.is_unregistered = is_unregistered db.session.add(result) db.session.commit() return jsonify({'result': result.serialize})
[ "def", "put", "(", "self", ",", "id", ",", "project_id", "=", "None", ")", ":", "result", "=", "db", ".", "session", ".", "query", "(", "Result", ")", ".", "filter_by", "(", "id", "=", "id", ")", ".", "first", "(", ")", "if", "result", "is", "None", ":", "response", "=", "jsonify", "(", "{", "'result'", ":", "None", ",", "'message'", ":", "'No interface defined for URL.'", "}", ")", "return", "response", ",", "404", "request_json", "=", "request", ".", "get_json", "(", ")", "request_result", "=", "request_json", ".", "get", "(", "'result'", ")", "name", "=", "request_result", ".", "get", "(", "'name'", ",", "None", ")", "if", "name", "is", "not", "None", ":", "result", ".", "name", "=", "name", "is_unregistered", "=", "request_result", ".", "get", "(", "'isUnregistered'", ",", "None", ")", "if", "is_unregistered", "is", "not", "None", ":", "result", ".", "is_unregistered", "=", "is_unregistered", "db", ".", "session", ".", "add", "(", "result", ")", "db", ".", "session", ".", "commit", "(", ")", "return", "jsonify", "(", "{", "'result'", ":", "result", ".", "serialize", "}", ")" ]
31.75
18.25
def cmd_help(self, *args): """help [cmd] Get general help, or help for command `cmd`. """ if len(args) > 0: cmdname = args[0].lower() try: method = getattr(self, "cmd_" + cmdname) doc = method.__doc__ if doc is None: self.log("Sorry, no documentation found for '%s'" % ( cmdname)) else: self.log("%s: %s" % (cmdname, doc)) except AttributeError: self.log("No such command '%s'; type help for general help." % ( cmdname)) else: res = [] for attrname in dir(self): if attrname.startswith('cmd_'): method = getattr(self, attrname) doc = method.__doc__ cmdname = attrname[4:] if doc is None: doc = "no documentation" res.append("%s: %s" % (cmdname, doc)) self.log('\n'.join(res))
[ "def", "cmd_help", "(", "self", ",", "*", "args", ")", ":", "if", "len", "(", "args", ")", ">", "0", ":", "cmdname", "=", "args", "[", "0", "]", ".", "lower", "(", ")", "try", ":", "method", "=", "getattr", "(", "self", ",", "\"cmd_\"", "+", "cmdname", ")", "doc", "=", "method", ".", "__doc__", "if", "doc", "is", "None", ":", "self", ".", "log", "(", "\"Sorry, no documentation found for '%s'\"", "%", "(", "cmdname", ")", ")", "else", ":", "self", ".", "log", "(", "\"%s: %s\"", "%", "(", "cmdname", ",", "doc", ")", ")", "except", "AttributeError", ":", "self", ".", "log", "(", "\"No such command '%s'; type help for general help.\"", "%", "(", "cmdname", ")", ")", "else", ":", "res", "=", "[", "]", "for", "attrname", "in", "dir", "(", "self", ")", ":", "if", "attrname", ".", "startswith", "(", "'cmd_'", ")", ":", "method", "=", "getattr", "(", "self", ",", "attrname", ")", "doc", "=", "method", ".", "__doc__", "cmdname", "=", "attrname", "[", "4", ":", "]", "if", "doc", "is", "None", ":", "doc", "=", "\"no documentation\"", "res", ".", "append", "(", "\"%s: %s\"", "%", "(", "cmdname", ",", "doc", ")", ")", "self", ".", "log", "(", "'\\n'", ".", "join", "(", "res", ")", ")" ]
36.689655
12.827586
def this_year(self): """ Get AnnouncementRequests from this school year only. """ start_date, end_date = get_date_range_this_year() return self.filter(start_time__gte=start_date, start_time__lte=end_date)
[ "def", "this_year", "(", "self", ")", ":", "start_date", ",", "end_date", "=", "get_date_range_this_year", "(", ")", "return", "self", ".", "filter", "(", "start_time__gte", "=", "start_date", ",", "start_time__lte", "=", "end_date", ")" ]
56.25
19.25
def get_chi(self, scalar=None): """sqrt(Chi_Squared) statistic (see `mcc`, `phi`, or google 'Matthews Correlation Coefficient'""" phi = self.get_phi(scalar=scalar) return mcc_chi(phi, self._num_samples)
[ "def", "get_chi", "(", "self", ",", "scalar", "=", "None", ")", ":", "phi", "=", "self", ".", "get_phi", "(", "scalar", "=", "scalar", ")", "return", "mcc_chi", "(", "phi", ",", "self", ".", "_num_samples", ")" ]
55.75
4
def v_lift_valve_Crane(rho, D1=None, D2=None, style='swing check angled'): r'''Calculates the approximate minimum velocity required to lift the disk or other controlling element of a check valve to a fully open, stable, position according to the Crane method [1]_. .. math:: v_{min} = N\cdot \text{m/s} \cdot \sqrt{\frac{\text{kg/m}^3}{\rho}} .. math:: v_{min} = N\beta^2 \cdot \text{m/s} \cdot \sqrt{\frac{\text{kg/m}^3}{\rho}} See the notes for the definition of values of N and which check valves use which formulas. Parameters ---------- rho : float Density of the fluid [kg/m^3] D1 : float, optional Diameter of the valve bore (must be equal to or smaller than `D2`), [m] D2 : float, optional Diameter of the pipe attached to the valve, [m] style : str The type of valve; one of ['swing check angled', 'swing check straight', 'swing check UL', 'lift check straight', 'lift check angled', 'tilting check 5°', 'tilting check 15°', 'stop check globe 1', 'stop check angle 1', 'stop check globe 2', 'stop check angle 2', 'stop check globe 3', 'stop check angle 3', 'foot valve poppet disc', 'foot valve hinged disc'], [-] Returns ------- v_min : float Approximate minimum velocity required to keep the disc fully lifted, preventing chattering and wear [m/s] Notes ----- This equation is not dimensionless. +--------------------------+-----+------+ | Name/string | N | Full | +==========================+=====+======+ | 'swing check angled' | 45 | No | +--------------------------+-----+------+ | 'swing check straight' | 75 | No | +--------------------------+-----+------+ | 'swing check UL' | 120 | No | +--------------------------+-----+------+ | 'lift check straight' | 50 | Yes | +--------------------------+-----+------+ | 'lift check angled' | 170 | Yes | +--------------------------+-----+------+ | 'tilting check 5°' | 100 | No | +--------------------------+-----+------+ | 'tilting check 15°' | 40 | No | +--------------------------+-----+------+ | 'stop check globe 1' | 70 | Yes | +--------------------------+-----+------+ | 'stop check angle 1' | 95 | Yes | +--------------------------+-----+------+ | 'stop check globe 2' | 75 | Yes | +--------------------------+-----+------+ | 'stop check angle 2' | 75 | Yes | +--------------------------+-----+------+ | 'stop check globe 3' | 170 | Yes | +--------------------------+-----+------+ | 'stop check angle 3' | 170 | Yes | +--------------------------+-----+------+ | 'foot valve poppet disc' | 20 | No | +--------------------------+-----+------+ | 'foot valve hinged disc' | 45 | No | +--------------------------+-----+------+ Examples -------- >>> v_lift_valve_Crane(rho=998.2, D1=0.0627, D2=0.0779, style='lift check straight') 1.0252301935349286 References ---------- .. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane, 2009. ''' specific_volume = 1./rho if D1 is not None and D2 is not None: beta = D1/D2 beta2 = beta*beta if style == 'swing check angled': return 45.0*specific_volume**0.5 elif style == 'swing check straight': return 75.0*specific_volume**0.5 elif style == 'swing check UL': return 120.0*specific_volume**0.5 elif style == 'lift check straight': return 50.0*beta2*specific_volume**0.5 elif style == 'lift check angled': return 170.0*beta2*specific_volume**0.5 elif style == 'tilting check 5°': return 100.0*specific_volume**0.5 elif style == 'tilting check 15°': return 40.0*specific_volume**0.5 elif style == 'stop check globe 1': return 70.0*beta2*specific_volume**0.5 elif style == 'stop check angle 1': return 95.0*beta2*specific_volume**0.5 elif style in ('stop check globe 2', 'stop check angle 2'): return 75.0*beta2*specific_volume**0.5 elif style in ('stop check globe 3', 'stop check angle 3'): return 170.0*beta2*specific_volume**0.5 elif style == 'foot valve poppet disc': return 20.0*specific_volume**0.5 elif style == 'foot valve hinged disc': return 45.0*specific_volume**0.5
[ "def", "v_lift_valve_Crane", "(", "rho", ",", "D1", "=", "None", ",", "D2", "=", "None", ",", "style", "=", "'swing check angled'", ")", ":", "specific_volume", "=", "1.", "/", "rho", "if", "D1", "is", "not", "None", "and", "D2", "is", "not", "None", ":", "beta", "=", "D1", "/", "D2", "beta2", "=", "beta", "*", "beta", "if", "style", "==", "'swing check angled'", ":", "return", "45.0", "*", "specific_volume", "**", "0.5", "elif", "style", "==", "'swing check straight'", ":", "return", "75.0", "*", "specific_volume", "**", "0.5", "elif", "style", "==", "'swing check UL'", ":", "return", "120.0", "*", "specific_volume", "**", "0.5", "elif", "style", "==", "'lift check straight'", ":", "return", "50.0", "*", "beta2", "*", "specific_volume", "**", "0.5", "elif", "style", "==", "'lift check angled'", ":", "return", "170.0", "*", "beta2", "*", "specific_volume", "**", "0.5", "elif", "style", "==", "'tilting check 5°':", "", "return", "100.0", "*", "specific_volume", "**", "0.5", "elif", "style", "==", "'tilting check 15°':", "", "return", "40.0", "*", "specific_volume", "**", "0.5", "elif", "style", "==", "'stop check globe 1'", ":", "return", "70.0", "*", "beta2", "*", "specific_volume", "**", "0.5", "elif", "style", "==", "'stop check angle 1'", ":", "return", "95.0", "*", "beta2", "*", "specific_volume", "**", "0.5", "elif", "style", "in", "(", "'stop check globe 2'", ",", "'stop check angle 2'", ")", ":", "return", "75.0", "*", "beta2", "*", "specific_volume", "**", "0.5", "elif", "style", "in", "(", "'stop check globe 3'", ",", "'stop check angle 3'", ")", ":", "return", "170.0", "*", "beta2", "*", "specific_volume", "**", "0.5", "elif", "style", "==", "'foot valve poppet disc'", ":", "return", "20.0", "*", "specific_volume", "**", "0.5", "elif", "style", "==", "'foot valve hinged disc'", ":", "return", "45.0", "*", "specific_volume", "**", "0.5" ]
38.878261
15.626087
async def handle_request(self, request): """Respond to request if PIN is correct.""" service_name = request.rel_url.query['servicename'] received_code = request.rel_url.query['pairingcode'].lower() _LOGGER.info('Got pairing request from %s with code %s', service_name, received_code) if self._verify_pin(received_code): cmpg = tags.uint64_tag('cmpg', int(self._pairing_guid, 16)) cmnm = tags.string_tag('cmnm', self._name) cmty = tags.string_tag('cmty', 'iPhone') response = tags.container_tag('cmpa', cmpg + cmnm + cmty) self._has_paired = True return web.Response(body=response) # Code did not match, generate an error return web.Response(status=500)
[ "async", "def", "handle_request", "(", "self", ",", "request", ")", ":", "service_name", "=", "request", ".", "rel_url", ".", "query", "[", "'servicename'", "]", "received_code", "=", "request", ".", "rel_url", ".", "query", "[", "'pairingcode'", "]", ".", "lower", "(", ")", "_LOGGER", ".", "info", "(", "'Got pairing request from %s with code %s'", ",", "service_name", ",", "received_code", ")", "if", "self", ".", "_verify_pin", "(", "received_code", ")", ":", "cmpg", "=", "tags", ".", "uint64_tag", "(", "'cmpg'", ",", "int", "(", "self", ".", "_pairing_guid", ",", "16", ")", ")", "cmnm", "=", "tags", ".", "string_tag", "(", "'cmnm'", ",", "self", ".", "_name", ")", "cmty", "=", "tags", ".", "string_tag", "(", "'cmty'", ",", "'iPhone'", ")", "response", "=", "tags", ".", "container_tag", "(", "'cmpa'", ",", "cmpg", "+", "cmnm", "+", "cmty", ")", "self", ".", "_has_paired", "=", "True", "return", "web", ".", "Response", "(", "body", "=", "response", ")", "# Code did not match, generate an error", "return", "web", ".", "Response", "(", "status", "=", "500", ")" ]
46.294118
15.764706
def mod_repo(repo, basedir=None, **kwargs): ''' Modify one or more values for a repo. If the repo does not exist, it will be created, so long as the following values are specified: repo name by which the yum refers to the repo name a human-readable name for the repo baseurl the URL for yum to reference mirrorlist the URL for yum to reference key_url the URL to gather the repo key from (salt:// or any other scheme supported by cp.cache_file) Key/Value pairs may also be removed from a repo's configuration by setting a key to a blank value. Bear in mind that a name cannot be deleted, and a baseurl can only be deleted if a mirrorlist is specified (or vice versa). CLI Examples: .. code-block:: bash salt '*' pkg.mod_repo reponame enabled=1 gpgcheck=1 salt '*' pkg.mod_repo reponame basedir=/path/to/dir enabled=1 salt '*' pkg.mod_repo reponame baseurl= mirrorlist=http://host.com/ ''' # Filter out '__pub' arguments, as well as saltenv repo_opts = dict( (x, kwargs[x]) for x in kwargs if not x.startswith('__') and x not in ('saltenv',) ) if all(x in repo_opts for x in ('mirrorlist', 'baseurl')): raise SaltInvocationError( 'Only one of \'mirrorlist\' and \'baseurl\' can be specified' ) # Build a list of keys to be deleted todelete = [] # list() of keys because the dict could be shrinking in the for loop. for key in list(repo_opts): if repo_opts[key] != 0 and not repo_opts[key]: del repo_opts[key] todelete.append(key) # Add baseurl or mirrorlist to the 'todelete' list if the other was # specified in the repo_opts if 'mirrorlist' in repo_opts: todelete.append('baseurl') elif 'baseurl' in repo_opts: todelete.append('mirrorlist') # Fail if the user tried to delete the name if 'name' in todelete: raise SaltInvocationError('The repo name cannot be deleted') # Give the user the ability to change the basedir repos = {} basedirs = _normalize_basedir(basedir) repos = list_repos(basedirs) repofile = '' header = '' filerepos = {} if repo not in repos: # If the repo doesn't exist, create it in a new file in the first # repo directory that exists newdir = None for d in basedirs: if os.path.exists(d): newdir = d break if not newdir: raise SaltInvocationError( 'The repo does not exist and needs to be created, but none ' 'of the following basedir directories exist: {0}'.format(basedirs) ) repofile = '{0}/{1}.repo'.format(newdir, repo) if 'name' not in repo_opts: raise SaltInvocationError( 'The repo does not exist and needs to be created, but a name ' 'was not given' ) if 'baseurl' not in repo_opts and 'mirrorlist' not in repo_opts: raise SaltInvocationError( 'The repo does not exist and needs to be created, but either ' 'a baseurl or a mirrorlist needs to be given' ) filerepos[repo] = {} else: # The repo does exist, open its file repofile = repos[repo]['file'] header, filerepos = _parse_repo_file(repofile) # Error out if they tried to delete baseurl or mirrorlist improperly if 'baseurl' in todelete: if 'mirrorlist' not in repo_opts and 'mirrorlist' \ not in filerepos[repo]: raise SaltInvocationError( 'Cannot delete baseurl without specifying mirrorlist' ) if 'mirrorlist' in todelete: if 'baseurl' not in repo_opts and 'baseurl' \ not in filerepos[repo]: raise SaltInvocationError( 'Cannot delete mirrorlist without specifying baseurl' ) # Import repository gpg key if 'key_url' in repo_opts: key_url = kwargs['key_url'] fn_ = __salt__['cp.cache_file'](key_url, saltenv=(kwargs['saltenv'] if 'saltenv' in kwargs else 'base')) if not fn_: raise CommandExecutionError( 'Error: Unable to copy key from URL {0} for repository {1}'.format(key_url, repo_opts['name']) ) cmd = ['rpm', '--import', fn_] out = __salt__['cmd.retcode'](cmd, python_shell=False, **kwargs) if out != salt.defaults.exitcodes.EX_OK: raise CommandExecutionError( 'Error: Unable to import key from URL {0} for repository {1}'.format(key_url, repo_opts['name']) ) del repo_opts['key_url'] # Delete anything in the todelete list for key in todelete: if key in six.iterkeys(filerepos[repo].copy()): del filerepos[repo][key] _bool_to_str = lambda x: '1' if x else '0' # Old file or new, write out the repos(s) filerepos[repo].update(repo_opts) content = header for stanza in six.iterkeys(filerepos): comments = salt.utils.pkg.rpm.combine_comments( filerepos[stanza].pop('comments', []) ) content += '[{0}]\n'.format(stanza) for line in six.iterkeys(filerepos[stanza]): content += '{0}={1}\n'.format( line, filerepos[stanza][line] if not isinstance(filerepos[stanza][line], bool) else _bool_to_str(filerepos[stanza][line]) ) content += comments + '\n' with salt.utils.files.fopen(repofile, 'w') as fileout: fileout.write(salt.utils.stringutils.to_str(content)) return {repofile: filerepos}
[ "def", "mod_repo", "(", "repo", ",", "basedir", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Filter out '__pub' arguments, as well as saltenv", "repo_opts", "=", "dict", "(", "(", "x", ",", "kwargs", "[", "x", "]", ")", "for", "x", "in", "kwargs", "if", "not", "x", ".", "startswith", "(", "'__'", ")", "and", "x", "not", "in", "(", "'saltenv'", ",", ")", ")", "if", "all", "(", "x", "in", "repo_opts", "for", "x", "in", "(", "'mirrorlist'", ",", "'baseurl'", ")", ")", ":", "raise", "SaltInvocationError", "(", "'Only one of \\'mirrorlist\\' and \\'baseurl\\' can be specified'", ")", "# Build a list of keys to be deleted", "todelete", "=", "[", "]", "# list() of keys because the dict could be shrinking in the for loop.", "for", "key", "in", "list", "(", "repo_opts", ")", ":", "if", "repo_opts", "[", "key", "]", "!=", "0", "and", "not", "repo_opts", "[", "key", "]", ":", "del", "repo_opts", "[", "key", "]", "todelete", ".", "append", "(", "key", ")", "# Add baseurl or mirrorlist to the 'todelete' list if the other was", "# specified in the repo_opts", "if", "'mirrorlist'", "in", "repo_opts", ":", "todelete", ".", "append", "(", "'baseurl'", ")", "elif", "'baseurl'", "in", "repo_opts", ":", "todelete", ".", "append", "(", "'mirrorlist'", ")", "# Fail if the user tried to delete the name", "if", "'name'", "in", "todelete", ":", "raise", "SaltInvocationError", "(", "'The repo name cannot be deleted'", ")", "# Give the user the ability to change the basedir", "repos", "=", "{", "}", "basedirs", "=", "_normalize_basedir", "(", "basedir", ")", "repos", "=", "list_repos", "(", "basedirs", ")", "repofile", "=", "''", "header", "=", "''", "filerepos", "=", "{", "}", "if", "repo", "not", "in", "repos", ":", "# If the repo doesn't exist, create it in a new file in the first", "# repo directory that exists", "newdir", "=", "None", "for", "d", "in", "basedirs", ":", "if", "os", ".", "path", ".", "exists", "(", "d", ")", ":", "newdir", "=", "d", "break", "if", "not", "newdir", ":", "raise", "SaltInvocationError", "(", "'The repo does not exist and needs to be created, but none '", "'of the following basedir directories exist: {0}'", ".", "format", "(", "basedirs", ")", ")", "repofile", "=", "'{0}/{1}.repo'", ".", "format", "(", "newdir", ",", "repo", ")", "if", "'name'", "not", "in", "repo_opts", ":", "raise", "SaltInvocationError", "(", "'The repo does not exist and needs to be created, but a name '", "'was not given'", ")", "if", "'baseurl'", "not", "in", "repo_opts", "and", "'mirrorlist'", "not", "in", "repo_opts", ":", "raise", "SaltInvocationError", "(", "'The repo does not exist and needs to be created, but either '", "'a baseurl or a mirrorlist needs to be given'", ")", "filerepos", "[", "repo", "]", "=", "{", "}", "else", ":", "# The repo does exist, open its file", "repofile", "=", "repos", "[", "repo", "]", "[", "'file'", "]", "header", ",", "filerepos", "=", "_parse_repo_file", "(", "repofile", ")", "# Error out if they tried to delete baseurl or mirrorlist improperly", "if", "'baseurl'", "in", "todelete", ":", "if", "'mirrorlist'", "not", "in", "repo_opts", "and", "'mirrorlist'", "not", "in", "filerepos", "[", "repo", "]", ":", "raise", "SaltInvocationError", "(", "'Cannot delete baseurl without specifying mirrorlist'", ")", "if", "'mirrorlist'", "in", "todelete", ":", "if", "'baseurl'", "not", "in", "repo_opts", "and", "'baseurl'", "not", "in", "filerepos", "[", "repo", "]", ":", "raise", "SaltInvocationError", "(", "'Cannot delete mirrorlist without specifying baseurl'", ")", "# Import repository gpg key", "if", "'key_url'", "in", "repo_opts", ":", "key_url", "=", "kwargs", "[", "'key_url'", "]", "fn_", "=", "__salt__", "[", "'cp.cache_file'", "]", "(", "key_url", ",", "saltenv", "=", "(", "kwargs", "[", "'saltenv'", "]", "if", "'saltenv'", "in", "kwargs", "else", "'base'", ")", ")", "if", "not", "fn_", ":", "raise", "CommandExecutionError", "(", "'Error: Unable to copy key from URL {0} for repository {1}'", ".", "format", "(", "key_url", ",", "repo_opts", "[", "'name'", "]", ")", ")", "cmd", "=", "[", "'rpm'", ",", "'--import'", ",", "fn_", "]", "out", "=", "__salt__", "[", "'cmd.retcode'", "]", "(", "cmd", ",", "python_shell", "=", "False", ",", "*", "*", "kwargs", ")", "if", "out", "!=", "salt", ".", "defaults", ".", "exitcodes", ".", "EX_OK", ":", "raise", "CommandExecutionError", "(", "'Error: Unable to import key from URL {0} for repository {1}'", ".", "format", "(", "key_url", ",", "repo_opts", "[", "'name'", "]", ")", ")", "del", "repo_opts", "[", "'key_url'", "]", "# Delete anything in the todelete list", "for", "key", "in", "todelete", ":", "if", "key", "in", "six", ".", "iterkeys", "(", "filerepos", "[", "repo", "]", ".", "copy", "(", ")", ")", ":", "del", "filerepos", "[", "repo", "]", "[", "key", "]", "_bool_to_str", "=", "lambda", "x", ":", "'1'", "if", "x", "else", "'0'", "# Old file or new, write out the repos(s)", "filerepos", "[", "repo", "]", ".", "update", "(", "repo_opts", ")", "content", "=", "header", "for", "stanza", "in", "six", ".", "iterkeys", "(", "filerepos", ")", ":", "comments", "=", "salt", ".", "utils", ".", "pkg", ".", "rpm", ".", "combine_comments", "(", "filerepos", "[", "stanza", "]", ".", "pop", "(", "'comments'", ",", "[", "]", ")", ")", "content", "+=", "'[{0}]\\n'", ".", "format", "(", "stanza", ")", "for", "line", "in", "six", ".", "iterkeys", "(", "filerepos", "[", "stanza", "]", ")", ":", "content", "+=", "'{0}={1}\\n'", ".", "format", "(", "line", ",", "filerepos", "[", "stanza", "]", "[", "line", "]", "if", "not", "isinstance", "(", "filerepos", "[", "stanza", "]", "[", "line", "]", ",", "bool", ")", "else", "_bool_to_str", "(", "filerepos", "[", "stanza", "]", "[", "line", "]", ")", ")", "content", "+=", "comments", "+", "'\\n'", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "repofile", ",", "'w'", ")", "as", "fileout", ":", "fileout", ".", "write", "(", "salt", ".", "utils", ".", "stringutils", ".", "to_str", "(", "content", ")", ")", "return", "{", "repofile", ":", "filerepos", "}" ]
36.269231
21.051282
def delete(self, workflow_id, email_id): """ Removes an individual Automation workflow email. :param workflow_id: The unique id for the Automation workflow. :type workflow_id: :py:class:`str` :param email_id: The unique id for the Automation workflow email. :type email_id: :py:class:`str` """ self.workflow_id = workflow_id self.email_id = email_id return self._mc_client._delete(url=self._build_path(workflow_id, 'emails', email_id))
[ "def", "delete", "(", "self", ",", "workflow_id", ",", "email_id", ")", ":", "self", ".", "workflow_id", "=", "workflow_id", "self", ".", "email_id", "=", "email_id", "return", "self", ".", "_mc_client", ".", "_delete", "(", "url", "=", "self", ".", "_build_path", "(", "workflow_id", ",", "'emails'", ",", "email_id", ")", ")" ]
38.846154
17.307692
def SetTimelineName(self, timeline_name): """Sets the timeline name. Args: timeline_name (str): timeline name. """ self._timeline_name = timeline_name logger.info('Timeline name: {0:s}'.format(self._timeline_name))
[ "def", "SetTimelineName", "(", "self", ",", "timeline_name", ")", ":", "self", ".", "_timeline_name", "=", "timeline_name", "logger", ".", "info", "(", "'Timeline name: {0:s}'", ".", "format", "(", "self", ".", "_timeline_name", ")", ")" ]
29.25
12.625
def delete_by_external_id(self, api_objects): """ Delete (DELETE) one or more API objects by external_id. :param api_objects: """ if not isinstance(api_objects, collections.Iterable): api_objects = [api_objects] return CRUDRequest(self).delete(api_objects, destroy_many_external=True)
[ "def", "delete_by_external_id", "(", "self", ",", "api_objects", ")", ":", "if", "not", "isinstance", "(", "api_objects", ",", "collections", ".", "Iterable", ")", ":", "api_objects", "=", "[", "api_objects", "]", "return", "CRUDRequest", "(", "self", ")", ".", "delete", "(", "api_objects", ",", "destroy_many_external", "=", "True", ")" ]
37.444444
15.888889
def get_post_file(self, hdr, f_in, clen, post, files): """Reads from a multipart/form-data.""" lens = { 'clen': clen, 'push': [], } prefix = "boundary=" if not hdr.startswith(prefix): return None boundary = hdr[len(prefix):].strip().encode('utf8') if not boundary: return None boundary = b'--' + boundary raw_boundary = b'\r\n' + boundary end_boundary = boundary + b'--' def push_back(line): ln = BytesIO() ln.write(line) ln.flush() ln.seek(0) lens['clen'] += len(line) lens['push'].append(ln) def read_line(): line = b'' while not line.endswith(b'\n') and lens['push']: br = lens['push'].pop() line += br.readline() tmp = br.read(1) if tmp != b'': br.seek(br.tell() - 1) lens['push'].append(br) if not line.endswith(b'\n'): line += f_in.readline(lens['clen']) lens['clen'] -= len(line) if line == b'' or lens['clen'] < 0: raise ValueError("Unexpected EOF") return line.strip() def read(length): res = b'' while len(res) < length and lens['push']: br = lens['push'].pop() res += br.read(length - len(res)) tmp = br.read(1) if tmp != b'': br.seek(br.tell() - 1) lens['push'].append(br) if len(res) < length: res += f_in.read(length - len(res)) lens['clen'] -= len(res) if res == b'' or lens['clen'] < 0: raise ValueError("Unexpected EOF") return res def parse_file(): f = BytesIO() buff_size = 10 * 1024 def write_buff(buff): if f.tell() + len(buff) > self.server.max_file_size: raise PreventDefaultResponse( 413, "Uploaded file is too large! {0} > {1}".format( f.tell() + len(buff), self.server.max_file_size)) f.write(buff) f.flush() buff = b"" while True: buff += read(min(lens['clen'], buff_size)) bix = buff.find(raw_boundary) if bix >= 0: write_buff(buff[:bix]) push_back(buff[bix + len(raw_boundary) - len(boundary):]) break out_split = max(len(buff) - len(raw_boundary), 0) if out_split > 0: write_buff(buff[:out_split]) buff = buff[out_split:] f.seek(0) return f def parse_field(): return parse_file().read().decode('utf8') while True: line = read_line() if line == end_boundary: if lens['clen'] > 0: raise ValueError( "Expected EOF got: {0}".format( repr(f_in.read(lens['clen'])))) return if line != boundary: raise ValueError( "Expected boundary got: {0}".format(repr(line))) headers = {} while True: line = read_line() if not line: break key, value = line.split(b':', 1) headers[key.lower()] = value.strip() name = None if b'content-disposition' in headers: cdis = headers[b'content-disposition'] if not cdis.startswith(b'form-data'): raise ValueError( "Unknown content-disposition: {0}".format(repr(cdis))) name_field = b'name="' ix = cdis.find(name_field) if ix >= 0: name = cdis[ix + len(name_field):] name = name[:name.index(b'"')].decode('utf8') ctype = None if b'content-type' in headers: ctype = headers[b'content-type'] # b'application/octet-stream': # we treat all files the same if ctype is not None: files[name] = parse_file() else: post[name] = parse_field()
[ "def", "get_post_file", "(", "self", ",", "hdr", ",", "f_in", ",", "clen", ",", "post", ",", "files", ")", ":", "lens", "=", "{", "'clen'", ":", "clen", ",", "'push'", ":", "[", "]", ",", "}", "prefix", "=", "\"boundary=\"", "if", "not", "hdr", ".", "startswith", "(", "prefix", ")", ":", "return", "None", "boundary", "=", "hdr", "[", "len", "(", "prefix", ")", ":", "]", ".", "strip", "(", ")", ".", "encode", "(", "'utf8'", ")", "if", "not", "boundary", ":", "return", "None", "boundary", "=", "b'--'", "+", "boundary", "raw_boundary", "=", "b'\\r\\n'", "+", "boundary", "end_boundary", "=", "boundary", "+", "b'--'", "def", "push_back", "(", "line", ")", ":", "ln", "=", "BytesIO", "(", ")", "ln", ".", "write", "(", "line", ")", "ln", ".", "flush", "(", ")", "ln", ".", "seek", "(", "0", ")", "lens", "[", "'clen'", "]", "+=", "len", "(", "line", ")", "lens", "[", "'push'", "]", ".", "append", "(", "ln", ")", "def", "read_line", "(", ")", ":", "line", "=", "b''", "while", "not", "line", ".", "endswith", "(", "b'\\n'", ")", "and", "lens", "[", "'push'", "]", ":", "br", "=", "lens", "[", "'push'", "]", ".", "pop", "(", ")", "line", "+=", "br", ".", "readline", "(", ")", "tmp", "=", "br", ".", "read", "(", "1", ")", "if", "tmp", "!=", "b''", ":", "br", ".", "seek", "(", "br", ".", "tell", "(", ")", "-", "1", ")", "lens", "[", "'push'", "]", ".", "append", "(", "br", ")", "if", "not", "line", ".", "endswith", "(", "b'\\n'", ")", ":", "line", "+=", "f_in", ".", "readline", "(", "lens", "[", "'clen'", "]", ")", "lens", "[", "'clen'", "]", "-=", "len", "(", "line", ")", "if", "line", "==", "b''", "or", "lens", "[", "'clen'", "]", "<", "0", ":", "raise", "ValueError", "(", "\"Unexpected EOF\"", ")", "return", "line", ".", "strip", "(", ")", "def", "read", "(", "length", ")", ":", "res", "=", "b''", "while", "len", "(", "res", ")", "<", "length", "and", "lens", "[", "'push'", "]", ":", "br", "=", "lens", "[", "'push'", "]", ".", "pop", "(", ")", "res", "+=", "br", ".", "read", "(", "length", "-", "len", "(", "res", ")", ")", "tmp", "=", "br", ".", "read", "(", "1", ")", "if", "tmp", "!=", "b''", ":", "br", ".", "seek", "(", "br", ".", "tell", "(", ")", "-", "1", ")", "lens", "[", "'push'", "]", ".", "append", "(", "br", ")", "if", "len", "(", "res", ")", "<", "length", ":", "res", "+=", "f_in", ".", "read", "(", "length", "-", "len", "(", "res", ")", ")", "lens", "[", "'clen'", "]", "-=", "len", "(", "res", ")", "if", "res", "==", "b''", "or", "lens", "[", "'clen'", "]", "<", "0", ":", "raise", "ValueError", "(", "\"Unexpected EOF\"", ")", "return", "res", "def", "parse_file", "(", ")", ":", "f", "=", "BytesIO", "(", ")", "buff_size", "=", "10", "*", "1024", "def", "write_buff", "(", "buff", ")", ":", "if", "f", ".", "tell", "(", ")", "+", "len", "(", "buff", ")", ">", "self", ".", "server", ".", "max_file_size", ":", "raise", "PreventDefaultResponse", "(", "413", ",", "\"Uploaded file is too large! {0} > {1}\"", ".", "format", "(", "f", ".", "tell", "(", ")", "+", "len", "(", "buff", ")", ",", "self", ".", "server", ".", "max_file_size", ")", ")", "f", ".", "write", "(", "buff", ")", "f", ".", "flush", "(", ")", "buff", "=", "b\"\"", "while", "True", ":", "buff", "+=", "read", "(", "min", "(", "lens", "[", "'clen'", "]", ",", "buff_size", ")", ")", "bix", "=", "buff", ".", "find", "(", "raw_boundary", ")", "if", "bix", ">=", "0", ":", "write_buff", "(", "buff", "[", ":", "bix", "]", ")", "push_back", "(", "buff", "[", "bix", "+", "len", "(", "raw_boundary", ")", "-", "len", "(", "boundary", ")", ":", "]", ")", "break", "out_split", "=", "max", "(", "len", "(", "buff", ")", "-", "len", "(", "raw_boundary", ")", ",", "0", ")", "if", "out_split", ">", "0", ":", "write_buff", "(", "buff", "[", ":", "out_split", "]", ")", "buff", "=", "buff", "[", "out_split", ":", "]", "f", ".", "seek", "(", "0", ")", "return", "f", "def", "parse_field", "(", ")", ":", "return", "parse_file", "(", ")", ".", "read", "(", ")", ".", "decode", "(", "'utf8'", ")", "while", "True", ":", "line", "=", "read_line", "(", ")", "if", "line", "==", "end_boundary", ":", "if", "lens", "[", "'clen'", "]", ">", "0", ":", "raise", "ValueError", "(", "\"Expected EOF got: {0}\"", ".", "format", "(", "repr", "(", "f_in", ".", "read", "(", "lens", "[", "'clen'", "]", ")", ")", ")", ")", "return", "if", "line", "!=", "boundary", ":", "raise", "ValueError", "(", "\"Expected boundary got: {0}\"", ".", "format", "(", "repr", "(", "line", ")", ")", ")", "headers", "=", "{", "}", "while", "True", ":", "line", "=", "read_line", "(", ")", "if", "not", "line", ":", "break", "key", ",", "value", "=", "line", ".", "split", "(", "b':'", ",", "1", ")", "headers", "[", "key", ".", "lower", "(", ")", "]", "=", "value", ".", "strip", "(", ")", "name", "=", "None", "if", "b'content-disposition'", "in", "headers", ":", "cdis", "=", "headers", "[", "b'content-disposition'", "]", "if", "not", "cdis", ".", "startswith", "(", "b'form-data'", ")", ":", "raise", "ValueError", "(", "\"Unknown content-disposition: {0}\"", ".", "format", "(", "repr", "(", "cdis", ")", ")", ")", "name_field", "=", "b'name=\"'", "ix", "=", "cdis", ".", "find", "(", "name_field", ")", "if", "ix", ">=", "0", ":", "name", "=", "cdis", "[", "ix", "+", "len", "(", "name_field", ")", ":", "]", "name", "=", "name", "[", ":", "name", ".", "index", "(", "b'\"'", ")", "]", ".", "decode", "(", "'utf8'", ")", "ctype", "=", "None", "if", "b'content-type'", "in", "headers", ":", "ctype", "=", "headers", "[", "b'content-type'", "]", "# b'application/octet-stream': # we treat all files the same", "if", "ctype", "is", "not", "None", ":", "files", "[", "name", "]", "=", "parse_file", "(", ")", "else", ":", "post", "[", "name", "]", "=", "parse_field", "(", ")" ]
35.829268
14.065041
def tree(self, tree_alias, context): """Builds and returns tree structure for 'sitetree_tree' tag. :param str|unicode tree_alias: :param Context context: :rtype: list|str """ tree_alias, sitetree_items = self.init_tree(tree_alias, context) if not sitetree_items: return '' tree_items = self.filter_items(self.get_children(tree_alias, None), 'sitetree') tree_items = self.apply_hook(tree_items, 'sitetree') self.update_has_children(tree_alias, tree_items, 'sitetree') return tree_items
[ "def", "tree", "(", "self", ",", "tree_alias", ",", "context", ")", ":", "tree_alias", ",", "sitetree_items", "=", "self", ".", "init_tree", "(", "tree_alias", ",", "context", ")", "if", "not", "sitetree_items", ":", "return", "''", "tree_items", "=", "self", ".", "filter_items", "(", "self", ".", "get_children", "(", "tree_alias", ",", "None", ")", ",", "'sitetree'", ")", "tree_items", "=", "self", ".", "apply_hook", "(", "tree_items", ",", "'sitetree'", ")", "self", ".", "update_has_children", "(", "tree_alias", ",", "tree_items", ",", "'sitetree'", ")", "return", "tree_items" ]
33.647059
21.294118
def contacts(self,*args,**kwargs): """ Use assess the cell-to-cell contacts recorded in the celldataframe Returns: Contacts: returns a class that holds cell-to-cell contact information for whatever phenotypes were in the CellDataFrame before execution. """ n = Contacts.read_cellframe(self,prune_neighbors=True) if 'measured_regions' in kwargs: n.measured_regions = kwargs['measured_regions'] else: n.measured_regions = self.get_measured_regions() if 'measured_phenotypes' in kwargs: n.measured_phenotypes = kwargs['measured_phenotypes'] else: n.measured_phenotypes = self.phenotypes n.microns_per_pixel = self.microns_per_pixel return n
[ "def", "contacts", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "n", "=", "Contacts", ".", "read_cellframe", "(", "self", ",", "prune_neighbors", "=", "True", ")", "if", "'measured_regions'", "in", "kwargs", ":", "n", ".", "measured_regions", "=", "kwargs", "[", "'measured_regions'", "]", "else", ":", "n", ".", "measured_regions", "=", "self", ".", "get_measured_regions", "(", ")", "if", "'measured_phenotypes'", "in", "kwargs", ":", "n", ".", "measured_phenotypes", "=", "kwargs", "[", "'measured_phenotypes'", "]", "else", ":", "n", ".", "measured_phenotypes", "=", "self", ".", "phenotypes", "n", ".", "microns_per_pixel", "=", "self", ".", "microns_per_pixel", "return", "n" ]
51.928571
29.5
def archive_sciobj(pid): """Set the status of an object to archived. Preconditions: - The object with the pid is verified to exist. - The object is not a replica. - The object is not archived. """ sciobj_model = d1_gmn.app.model_util.get_sci_model(pid) sciobj_model.is_archived = True sciobj_model.save() _update_modified_timestamp(sciobj_model)
[ "def", "archive_sciobj", "(", "pid", ")", ":", "sciobj_model", "=", "d1_gmn", ".", "app", ".", "model_util", ".", "get_sci_model", "(", "pid", ")", "sciobj_model", ".", "is_archived", "=", "True", "sciobj_model", ".", "save", "(", ")", "_update_modified_timestamp", "(", "sciobj_model", ")" ]
28.846154
14.384615
def spin_in_roche(s, etheta, elongan, eincl): """ Transform the spin s of a star on Kerpler orbit with etheta - true anomaly elongan - longitude of ascending node eincl - inclination from in the plane of sky reference frame into the Roche reference frame. """ # m = Rz(long).Rx(-incl).Rz(theta).Rz(pi) m = euler_trans_matrix(etheta, elongan, eincl) return np.dot(m.T, s)
[ "def", "spin_in_roche", "(", "s", ",", "etheta", ",", "elongan", ",", "eincl", ")", ":", "# m = Rz(long).Rx(-incl).Rz(theta).Rz(pi)", "m", "=", "euler_trans_matrix", "(", "etheta", ",", "elongan", ",", "eincl", ")", "return", "np", ".", "dot", "(", "m", ".", "T", ",", "s", ")" ]
26.733333
14.6
def _create_class_instance(class_name, _proxy): """ Look for the class in .extensions in case it has already been imported (perhaps as a builtin extensions hard compiled into unity_server). """ try: return _class_instance_from_name('turicreate.extensions.' + class_name, _proxy=_proxy) except: pass return _class_instance_from_name(class_name, _proxy=_proxy)
[ "def", "_create_class_instance", "(", "class_name", ",", "_proxy", ")", ":", "try", ":", "return", "_class_instance_from_name", "(", "'turicreate.extensions.'", "+", "class_name", ",", "_proxy", "=", "_proxy", ")", "except", ":", "pass", "return", "_class_instance_from_name", "(", "class_name", ",", "_proxy", "=", "_proxy", ")" ]
39.3
23.7
def same_as(self, rows: List[Row], column: Column) -> List[Row]: """ Takes a row and a column and returns a list of rows from the full set of rows that contain the same value under the given column as the given row. """ cell_value = rows[0].values[column.name] return_list = [] for table_row in self.table_data: if table_row.values[column.name] == cell_value: return_list.append(table_row) return return_list
[ "def", "same_as", "(", "self", ",", "rows", ":", "List", "[", "Row", "]", ",", "column", ":", "Column", ")", "->", "List", "[", "Row", "]", ":", "cell_value", "=", "rows", "[", "0", "]", ".", "values", "[", "column", ".", "name", "]", "return_list", "=", "[", "]", "for", "table_row", "in", "self", ".", "table_data", ":", "if", "table_row", ".", "values", "[", "column", ".", "name", "]", "==", "cell_value", ":", "return_list", ".", "append", "(", "table_row", ")", "return", "return_list" ]
44.545455
15.272727
def _matches_filters(self, msg): """Checks whether the given message matches at least one of the current filters. See :meth:`~can.BusABC.set_filters` for details on how the filters work. This method should not be overridden. :param can.Message msg: the message to check if matching :rtype: bool :return: whether the given message matches at least one filter """ # if no filters are set, all messages are matched if self._filters is None: return True for _filter in self._filters: # check if this filter even applies to the message if 'extended' in _filter and \ _filter['extended'] != msg.is_extended_id: continue # then check for the mask and id can_id = _filter['can_id'] can_mask = _filter['can_mask'] # basically, we compute # `msg.arbitration_id & can_mask == can_id & can_mask` # by using the shorter, but equivalent from below: if (can_id ^ msg.arbitration_id) & can_mask == 0: return True # nothing matched return False
[ "def", "_matches_filters", "(", "self", ",", "msg", ")", ":", "# if no filters are set, all messages are matched", "if", "self", ".", "_filters", "is", "None", ":", "return", "True", "for", "_filter", "in", "self", ".", "_filters", ":", "# check if this filter even applies to the message", "if", "'extended'", "in", "_filter", "and", "_filter", "[", "'extended'", "]", "!=", "msg", ".", "is_extended_id", ":", "continue", "# then check for the mask and id", "can_id", "=", "_filter", "[", "'can_id'", "]", "can_mask", "=", "_filter", "[", "'can_mask'", "]", "# basically, we compute", "# `msg.arbitration_id & can_mask == can_id & can_mask`", "# by using the shorter, but equivalent from below:", "if", "(", "can_id", "^", "msg", ".", "arbitration_id", ")", "&", "can_mask", "==", "0", ":", "return", "True", "# nothing matched", "return", "False" ]
33.942857
18.057143
def timezone(self): """ timezone of GSSHA model """ if self._tz is None: # GET CENTROID FROM GSSHA GRID cen_lat, cen_lon = self.centerLatLon() # update time zone tf = TimezoneFinder() tz_name = tf.timezone_at(lng=cen_lon, lat=cen_lat) self._tz = timezone(tz_name) return self._tz
[ "def", "timezone", "(", "self", ")", ":", "if", "self", ".", "_tz", "is", "None", ":", "# GET CENTROID FROM GSSHA GRID", "cen_lat", ",", "cen_lon", "=", "self", ".", "centerLatLon", "(", ")", "# update time zone", "tf", "=", "TimezoneFinder", "(", ")", "tz_name", "=", "tf", ".", "timezone_at", "(", "lng", "=", "cen_lon", ",", "lat", "=", "cen_lat", ")", "self", ".", "_tz", "=", "timezone", "(", "tz_name", ")", "return", "self", ".", "_tz" ]
29.230769
11.538462
def create_char_dataframe(words): """ Give list of input tokenized words, create dataframe of characters where first character of the word is tagged as 1, otherwise 0 Example ======= ['กิน', 'หมด'] to dataframe of [{'char': 'ก', 'type': ..., 'target': 1}, ..., {'char': 'ด', 'type': ..., 'target': 0}] """ char_dict = [] for word in words: for i, char in enumerate(word): if i == 0: char_dict.append({'char': char, 'type': CHAR_TYPE_FLATTEN.get(char, 'o'), 'target': True}) else: char_dict.append({'char': char, 'type': CHAR_TYPE_FLATTEN.get(char, 'o'), 'target': False}) return pd.DataFrame(char_dict)
[ "def", "create_char_dataframe", "(", "words", ")", ":", "char_dict", "=", "[", "]", "for", "word", "in", "words", ":", "for", "i", ",", "char", "in", "enumerate", "(", "word", ")", ":", "if", "i", "==", "0", ":", "char_dict", ".", "append", "(", "{", "'char'", ":", "char", ",", "'type'", ":", "CHAR_TYPE_FLATTEN", ".", "get", "(", "char", ",", "'o'", ")", ",", "'target'", ":", "True", "}", ")", "else", ":", "char_dict", ".", "append", "(", "{", "'char'", ":", "char", ",", "'type'", ":", "CHAR_TYPE_FLATTEN", ".", "get", "(", "char", ",", "'o'", ")", ",", "'target'", ":", "False", "}", ")", "return", "pd", ".", "DataFrame", "(", "char_dict", ")" ]
34.708333
14.125
def _addPredecessor(self, predecessorJob): """ Adds a predecessor job to the set of predecessor jobs. Raises a \ RuntimeError if the job is already a predecessor. """ if predecessorJob in self._directPredecessors: raise RuntimeError("The given job is already a predecessor of this job") self._directPredecessors.add(predecessorJob)
[ "def", "_addPredecessor", "(", "self", ",", "predecessorJob", ")", ":", "if", "predecessorJob", "in", "self", ".", "_directPredecessors", ":", "raise", "RuntimeError", "(", "\"The given job is already a predecessor of this job\"", ")", "self", ".", "_directPredecessors", ".", "add", "(", "predecessorJob", ")" ]
48
15.25
def update(self, vips): """ Method to update vip's :param vips: List containing vip's desired to updated :return: None """ data = {'vips': vips} vips_ids = [str(vip.get('id')) for vip in vips] return super(ApiVipRequest, self).put('api/v3/vip-request/%s/' % ';'.join(vips_ids), data)
[ "def", "update", "(", "self", ",", "vips", ")", ":", "data", "=", "{", "'vips'", ":", "vips", "}", "vips_ids", "=", "[", "str", "(", "vip", ".", "get", "(", "'id'", ")", ")", "for", "vip", "in", "vips", "]", "return", "super", "(", "ApiVipRequest", ",", "self", ")", ".", "put", "(", "'api/v3/vip-request/%s/'", "%", "';'", ".", "join", "(", "vips_ids", ")", ",", "data", ")" ]
29.538462
21.230769
def p_jsonpath_operator_jsonpath(self, p): """jsonpath : NUMBER operator NUMBER | FLOAT operator FLOAT | ID operator ID | NUMBER operator jsonpath | FLOAT operator jsonpath | jsonpath operator NUMBER | jsonpath operator FLOAT | jsonpath operator jsonpath """ # NOTE(sileht): If we have choice between a field or a string we # always choice string, because field can be full qualified # like $.foo == foo and where string can't. for i in [1, 3]: if (isinstance(p[i], jsonpath_rw.Fields) and len(p[i].fields) == 1): p[i] = p[i].fields[0] p[0] = _arithmetic.Operation(p[1], p[2], p[3])
[ "def", "p_jsonpath_operator_jsonpath", "(", "self", ",", "p", ")", ":", "# NOTE(sileht): If we have choice between a field or a string we", "# always choice string, because field can be full qualified", "# like $.foo == foo and where string can't.", "for", "i", "in", "[", "1", ",", "3", "]", ":", "if", "(", "isinstance", "(", "p", "[", "i", "]", ",", "jsonpath_rw", ".", "Fields", ")", "and", "len", "(", "p", "[", "i", "]", ".", "fields", ")", "==", "1", ")", ":", "p", "[", "i", "]", "=", "p", "[", "i", "]", ".", "fields", "[", "0", "]", "p", "[", "0", "]", "=", "_arithmetic", ".", "Operation", "(", "p", "[", "1", "]", ",", "p", "[", "2", "]", ",", "p", "[", "3", "]", ")" ]
40.45
12
def delete(self, campaign_id): """ Remove a campaign from your MailChimp account. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` """ self.campaign_id = campaign_id return self._mc_client._delete(url=self._build_path(campaign_id))
[ "def", "delete", "(", "self", ",", "campaign_id", ")", ":", "self", ".", "campaign_id", "=", "campaign_id", "return", "self", ".", "_mc_client", ".", "_delete", "(", "url", "=", "self", ".", "_build_path", "(", "campaign_id", ")", ")" ]
35.333333
13.333333
def preview_article(self, request, object_id, language): """Redirecting preview function based on draft_id """ article = get_object_or_404(self.model, id=object_id) attrs = '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON') attrs += '&language=' + language with force_language(language): url = article.get_absolute_url(language) + attrs return HttpResponseRedirect(url)
[ "def", "preview_article", "(", "self", ",", "request", ",", "object_id", ",", "language", ")", ":", "article", "=", "get_object_or_404", "(", "self", ".", "model", ",", "id", "=", "object_id", ")", "attrs", "=", "'?%s'", "%", "get_cms_setting", "(", "'CMS_TOOLBAR_URL__EDIT_ON'", ")", "attrs", "+=", "'&language='", "+", "language", "with", "force_language", "(", "language", ")", ":", "url", "=", "article", ".", "get_absolute_url", "(", "language", ")", "+", "attrs", "return", "HttpResponseRedirect", "(", "url", ")" ]
47.777778
9.555556
def run_hooks(obj, hooks, *args): """Run each function in `hooks' with args""" for hook in hooks: if hook(obj, *args): return True pass return False
[ "def", "run_hooks", "(", "obj", ",", "hooks", ",", "*", "args", ")", ":", "for", "hook", "in", "hooks", ":", "if", "hook", "(", "obj", ",", "*", "args", ")", ":", "return", "True", "pass", "return", "False" ]
28.5
12.833333
def _calc_digest(self, origin): """calculate digest for the given file or readable/seekable object Args: origin -- could be the path of a file or a readable/seekable object ( fileobject, stream, stringIO...) Returns: String rapresenting the digest for the given origin """ if hasattr(origin, 'read') and hasattr(origin, 'seek'): pos = origin.tell() digest = hashtools.calc_digest(origin, algorithm=self._conf['hash_alg']) origin.seek(pos) else: digest = hashtools.calc_file_digest(origin, algorithm=self._conf['hash_alg']) return digest
[ "def", "_calc_digest", "(", "self", ",", "origin", ")", ":", "if", "hasattr", "(", "origin", ",", "'read'", ")", "and", "hasattr", "(", "origin", ",", "'seek'", ")", ":", "pos", "=", "origin", ".", "tell", "(", ")", "digest", "=", "hashtools", ".", "calc_digest", "(", "origin", ",", "algorithm", "=", "self", ".", "_conf", "[", "'hash_alg'", "]", ")", "origin", ".", "seek", "(", "pos", ")", "else", ":", "digest", "=", "hashtools", ".", "calc_file_digest", "(", "origin", ",", "algorithm", "=", "self", ".", "_conf", "[", "'hash_alg'", "]", ")", "return", "digest" ]
43.533333
25.2
def AgregarComprobanteAAjustar(self, tipo_cbte, pto_vta, nro_cbte): "Agrega comprobante a ajustar" cbte = dict(tipoComprobante=tipo_cbte, puntoVenta=pto_vta, nroComprobante=nro_cbte) self.solicitud['liquidacion']["comprobanteAAjustar"].append(cbte) return True
[ "def", "AgregarComprobanteAAjustar", "(", "self", ",", "tipo_cbte", ",", "pto_vta", ",", "nro_cbte", ")", ":", "cbte", "=", "dict", "(", "tipoComprobante", "=", "tipo_cbte", ",", "puntoVenta", "=", "pto_vta", ",", "nroComprobante", "=", "nro_cbte", ")", "self", ".", "solicitud", "[", "'liquidacion'", "]", "[", "\"comprobanteAAjustar\"", "]", ".", "append", "(", "cbte", ")", "return", "True" ]
57.6
26.8
def get_sym_srv_debug_entry_client_key(self, debug_entry_client_key): """GetSymSrvDebugEntryClientKey. [Preview API] Given a client key, returns the best matched debug entry. :param str debug_entry_client_key: A "client key" used by both ends of Microsoft's symbol protocol to identify a debug entry. The semantics of client key is governed by symsrv and is beyond the scope of this documentation. """ route_values = {} if debug_entry_client_key is not None: route_values['debugEntryClientKey'] = self._serialize.url('debug_entry_client_key', debug_entry_client_key, 'str') self._send(http_method='GET', location_id='9648e256-c9f9-4f16-8a27-630b06396942', version='5.0-preview.1', route_values=route_values)
[ "def", "get_sym_srv_debug_entry_client_key", "(", "self", ",", "debug_entry_client_key", ")", ":", "route_values", "=", "{", "}", "if", "debug_entry_client_key", "is", "not", "None", ":", "route_values", "[", "'debugEntryClientKey'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'debug_entry_client_key'", ",", "debug_entry_client_key", ",", "'str'", ")", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'9648e256-c9f9-4f16-8a27-630b06396942'", ",", "version", "=", "'5.0-preview.1'", ",", "route_values", "=", "route_values", ")" ]
68.416667
33.833333