text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def post_parse_action(self, entry): """separate hosts and ports after entry is parsed""" if 'source_host' in entry.keys(): host = self.ip_port_regex.findall(entry['source_host']) if host: hlist = host[0].split('.') entry['source_host'] = '.'.join(hlist[:4]) entry['source_port'] = hlist[-1] if 'dest_host' in entry.keys(): host = self.ip_port_regex.findall(entry['dest_host']) if host: hlist = host[0].split('.') entry['dest_host'] = '.'.join(hlist[:4]) entry['dest_port'] = hlist[-1] return entry
[ "def", "post_parse_action", "(", "self", ",", "entry", ")", ":", "if", "'source_host'", "in", "entry", ".", "keys", "(", ")", ":", "host", "=", "self", ".", "ip_port_regex", ".", "findall", "(", "entry", "[", "'source_host'", "]", ")", "if", "host", ":", "hlist", "=", "host", "[", "0", "]", ".", "split", "(", "'.'", ")", "entry", "[", "'source_host'", "]", "=", "'.'", ".", "join", "(", "hlist", "[", ":", "4", "]", ")", "entry", "[", "'source_port'", "]", "=", "hlist", "[", "-", "1", "]", "if", "'dest_host'", "in", "entry", ".", "keys", "(", ")", ":", "host", "=", "self", ".", "ip_port_regex", ".", "findall", "(", "entry", "[", "'dest_host'", "]", ")", "if", "host", ":", "hlist", "=", "host", "[", "0", "]", ".", "split", "(", "'.'", ")", "entry", "[", "'dest_host'", "]", "=", "'.'", ".", "join", "(", "hlist", "[", ":", "4", "]", ")", "entry", "[", "'dest_port'", "]", "=", "hlist", "[", "-", "1", "]", "return", "entry" ]
41.1875
13.1875
def process_file(source_file): """ Extract text from a file (pdf, txt, eml, csv, json) :param source_file path to file to read :return text from file """ if source_file.endswith(('.pdf', '.PDF')): txt = extract_pdf(source_file) elif source_file.endswith(('.txt', '.eml', '.csv', '.json')): with open(source_file, 'r') as f: txt = f.read() else: logger.info("Unsupported file extension for file {}".format(source_file)) return "" return txt
[ "def", "process_file", "(", "source_file", ")", ":", "if", "source_file", ".", "endswith", "(", "(", "'.pdf'", ",", "'.PDF'", ")", ")", ":", "txt", "=", "extract_pdf", "(", "source_file", ")", "elif", "source_file", ".", "endswith", "(", "(", "'.txt'", ",", "'.eml'", ",", "'.csv'", ",", "'.json'", ")", ")", ":", "with", "open", "(", "source_file", ",", "'r'", ")", "as", "f", ":", "txt", "=", "f", ".", "read", "(", ")", "else", ":", "logger", ".", "info", "(", "\"Unsupported file extension for file {}\"", ".", "format", "(", "source_file", ")", ")", "return", "\"\"", "return", "txt" ]
33.666667
14.066667
def srotate(self): """Single rotation. Assumes that balance is +-2.""" # self save save # save 3 -> 1 self -> 1 self.rot() # 1 2 2 3 # # self save save # 3 save -> self 1 -> self.rot() 1 # 2 1 3 2 #assert(self.balance != 0) heavy = self.balance > 0 light = not heavy save = self[heavy] #print("srotate: bal={},{}".format(self.balance, save.balance)) #self.print_structure() self[heavy] = save[light] # 2 #assert(save[light]) save[light] = self.rotate() # Needed to ensure the 2 and 3 are balanced under new subnode # Some intervals may overlap both self.x_center and save.x_center # Promote those to the new tip of the tree promotees = [iv for iv in save[light].s_center if save.center_hit(iv)] if promotees: for iv in promotees: save[light] = save[light].remove(iv) # may trigger pruning # TODO: Use Node.add() here, to simplify future balancing improvements. # For now, this is the same as augmenting save.s_center, but that may # change. save.s_center.update(promotees) save.refresh_balance() return save
[ "def", "srotate", "(", "self", ")", ":", "# self save save", "# save 3 -> 1 self -> 1 self.rot()", "# 1 2 2 3", "#", "# self save save", "# 3 save -> self 1 -> self.rot() 1", "# 2 1 3 2", "#assert(self.balance != 0)", "heavy", "=", "self", ".", "balance", ">", "0", "light", "=", "not", "heavy", "save", "=", "self", "[", "heavy", "]", "#print(\"srotate: bal={},{}\".format(self.balance, save.balance))", "#self.print_structure()", "self", "[", "heavy", "]", "=", "save", "[", "light", "]", "# 2", "#assert(save[light])", "save", "[", "light", "]", "=", "self", ".", "rotate", "(", ")", "# Needed to ensure the 2 and 3 are balanced under new subnode", "# Some intervals may overlap both self.x_center and save.x_center", "# Promote those to the new tip of the tree", "promotees", "=", "[", "iv", "for", "iv", "in", "save", "[", "light", "]", ".", "s_center", "if", "save", ".", "center_hit", "(", "iv", ")", "]", "if", "promotees", ":", "for", "iv", "in", "promotees", ":", "save", "[", "light", "]", "=", "save", "[", "light", "]", ".", "remove", "(", "iv", ")", "# may trigger pruning", "# TODO: Use Node.add() here, to simplify future balancing improvements.", "# For now, this is the same as augmenting save.s_center, but that may", "# change.", "save", ".", "s_center", ".", "update", "(", "promotees", ")", "save", ".", "refresh_balance", "(", ")", "return", "save" ]
41.71875
19.5
def _exec_query(self): """ Executes solr query if it hasn't already executed. Returns: Self. """ if not self._solr_locked: if not self.compiled_query: self._compile_query() try: solr_params = self._process_params() if settings.DEBUG: t1 = time.time() self._solr_cache = self.bucket.search(self.compiled_query, self.index_name, **solr_params) # if DEBUG is on and DEBUG_LEVEL set to a value higher than 5 # print query in to console. if settings.DEBUG and settings.DEBUG_LEVEL >= 5: print("QRY => %s\nSOLR_PARAMS => %s" % (self.compiled_query, solr_params)) except riak.RiakError as err: err.value += self._get_debug_data() raise self._solr_locked = True return self._solr_cache['docs']
[ "def", "_exec_query", "(", "self", ")", ":", "if", "not", "self", ".", "_solr_locked", ":", "if", "not", "self", ".", "compiled_query", ":", "self", ".", "_compile_query", "(", ")", "try", ":", "solr_params", "=", "self", ".", "_process_params", "(", ")", "if", "settings", ".", "DEBUG", ":", "t1", "=", "time", ".", "time", "(", ")", "self", ".", "_solr_cache", "=", "self", ".", "bucket", ".", "search", "(", "self", ".", "compiled_query", ",", "self", ".", "index_name", ",", "*", "*", "solr_params", ")", "# if DEBUG is on and DEBUG_LEVEL set to a value higher than 5", "# print query in to console.", "if", "settings", ".", "DEBUG", "and", "settings", ".", "DEBUG_LEVEL", ">=", "5", ":", "print", "(", "\"QRY => %s\\nSOLR_PARAMS => %s\"", "%", "(", "self", ".", "compiled_query", ",", "solr_params", ")", ")", "except", "riak", ".", "RiakError", "as", "err", ":", "err", ".", "value", "+=", "self", ".", "_get_debug_data", "(", ")", "raise", "self", ".", "_solr_locked", "=", "True", "return", "self", ".", "_solr_cache", "[", "'docs'", "]" ]
39.444444
17.37037
def permutations(x): '''Given a listlike, x, return all permutations of x Returns the permutations of x in the lexical order of their indices: e.g. >>> x = [ 1, 2, 3, 4 ] >>> for p in permutations(x): >>> print p [ 1, 2, 3, 4 ] [ 1, 2, 4, 3 ] [ 1, 3, 2, 4 ] [ 1, 3, 4, 2 ] [ 1, 4, 2, 3 ] [ 1, 4, 3, 2 ] [ 2, 1, 3, 4 ] ... [ 4, 3, 2, 1 ] ''' # # The algorithm is attributed to Narayana Pandit from his # Ganita Kaumundi (1356). The following is from # # http://en.wikipedia.org/wiki/Permutation#Systematic_generation_of_all_permutations # # 1. Find the largest index k such that a[k] < a[k + 1]. # If no such index exists, the permutation is the last permutation. # 2. Find the largest index l such that a[k] < a[l]. # Since k + 1 is such an index, l is well defined and satisfies k < l. # 3. Swap a[k] with a[l]. # 4. Reverse the sequence from a[k + 1] up to and including the final # element a[n]. # yield list(x) # don't forget to do the first one x = np.array(x) a = np.arange(len(x)) while True: # 1 - find largest or stop ak_lt_ak_next = np.argwhere(a[:-1] < a[1:]) if len(ak_lt_ak_next) == 0: raise StopIteration() k = ak_lt_ak_next[-1, 0] # 2 - find largest a[l] < a[k] ak_lt_al = np.argwhere(a[k] < a) l = ak_lt_al[-1, 0] # 3 - swap a[k], a[l] = (a[l], a[k]) # 4 - reverse if k < len(x)-1: a[k+1:] = a[:k:-1].copy() yield x[a].tolist()
[ "def", "permutations", "(", "x", ")", ":", "#", "# The algorithm is attributed to Narayana Pandit from his", "# Ganita Kaumundi (1356). The following is from", "#", "# http://en.wikipedia.org/wiki/Permutation#Systematic_generation_of_all_permutations", "#", "# 1. Find the largest index k such that a[k] < a[k + 1].", "# If no such index exists, the permutation is the last permutation.", "# 2. Find the largest index l such that a[k] < a[l].", "# Since k + 1 is such an index, l is well defined and satisfies k < l.", "# 3. Swap a[k] with a[l].", "# 4. Reverse the sequence from a[k + 1] up to and including the final", "# element a[n].", "#", "yield", "list", "(", "x", ")", "# don't forget to do the first one", "x", "=", "np", ".", "array", "(", "x", ")", "a", "=", "np", ".", "arange", "(", "len", "(", "x", ")", ")", "while", "True", ":", "# 1 - find largest or stop", "ak_lt_ak_next", "=", "np", ".", "argwhere", "(", "a", "[", ":", "-", "1", "]", "<", "a", "[", "1", ":", "]", ")", "if", "len", "(", "ak_lt_ak_next", ")", "==", "0", ":", "raise", "StopIteration", "(", ")", "k", "=", "ak_lt_ak_next", "[", "-", "1", ",", "0", "]", "# 2 - find largest a[l] < a[k]", "ak_lt_al", "=", "np", ".", "argwhere", "(", "a", "[", "k", "]", "<", "a", ")", "l", "=", "ak_lt_al", "[", "-", "1", ",", "0", "]", "# 3 - swap", "a", "[", "k", "]", ",", "a", "[", "l", "]", "=", "(", "a", "[", "l", "]", ",", "a", "[", "k", "]", ")", "# 4 - reverse", "if", "k", "<", "len", "(", "x", ")", "-", "1", ":", "a", "[", "k", "+", "1", ":", "]", "=", "a", "[", ":", "k", ":", "-", "1", "]", ".", "copy", "(", ")", "yield", "x", "[", "a", "]", ".", "tolist", "(", ")" ]
31.28
20.36
def statistical_axes(fit, **kw): """ Hyperbolic error using a statistical process (either sampling or noise errors) Integrates covariance with error level and degrees of freedom for plotting confidence intervals. Degrees of freedom is set to 2, which is the relevant number of independent dimensions to planar fitting of *a priori* centered data. """ method = kw.pop('method', 'noise') confidence_level = kw.pop('confidence_level', 0.95) dof = kw.pop('dof',2) nominal = fit.eigenvalues if method == 'sampling': cov = sampling_covariance(fit,**kw) elif method == 'noise': cov = noise_covariance(fit,**kw) if kw.pop('chisq', False): # Model the incorrect behaviour of using the # Chi2 distribution instead of the Fisher # distribution (which is a measure of the # ratio between the two). z = chi2.ppf(confidence_level,dof) else: z = fisher_statistic(fit.n,confidence_level,dof=dof) # Apply two fisher F parameters (one along each axis) # Since we apply to each axis without division, # it is as if we are applying N.sqrt(2*F) to the entire # distribution, aligning us with (Francq, 2014) err = z*N.sqrt(cov) return apply_error_scaling(nominal, err, n=fit.n, **kw)
[ "def", "statistical_axes", "(", "fit", ",", "*", "*", "kw", ")", ":", "method", "=", "kw", ".", "pop", "(", "'method'", ",", "'noise'", ")", "confidence_level", "=", "kw", ".", "pop", "(", "'confidence_level'", ",", "0.95", ")", "dof", "=", "kw", ".", "pop", "(", "'dof'", ",", "2", ")", "nominal", "=", "fit", ".", "eigenvalues", "if", "method", "==", "'sampling'", ":", "cov", "=", "sampling_covariance", "(", "fit", ",", "*", "*", "kw", ")", "elif", "method", "==", "'noise'", ":", "cov", "=", "noise_covariance", "(", "fit", ",", "*", "*", "kw", ")", "if", "kw", ".", "pop", "(", "'chisq'", ",", "False", ")", ":", "# Model the incorrect behaviour of using the", "# Chi2 distribution instead of the Fisher", "# distribution (which is a measure of the", "# ratio between the two).", "z", "=", "chi2", ".", "ppf", "(", "confidence_level", ",", "dof", ")", "else", ":", "z", "=", "fisher_statistic", "(", "fit", ".", "n", ",", "confidence_level", ",", "dof", "=", "dof", ")", "# Apply two fisher F parameters (one along each axis)", "# Since we apply to each axis without division,", "# it is as if we are applying N.sqrt(2*F) to the entire", "# distribution, aligning us with (Francq, 2014)", "err", "=", "z", "*", "N", ".", "sqrt", "(", "cov", ")", "return", "apply_error_scaling", "(", "nominal", ",", "err", ",", "n", "=", "fit", ".", "n", ",", "*", "*", "kw", ")" ]
33.051282
15.358974
def p_gate_op_1(self, program): """ gate_op : CX id ',' id ';' """ program[0] = node.Cnot([program[2], program[4]]) self.verify_declared_bit(program[2]) self.verify_declared_bit(program[4]) self.verify_distinct([program[2], program[4]])
[ "def", "p_gate_op_1", "(", "self", ",", "program", ")", ":", "program", "[", "0", "]", "=", "node", ".", "Cnot", "(", "[", "program", "[", "2", "]", ",", "program", "[", "4", "]", "]", ")", "self", ".", "verify_declared_bit", "(", "program", "[", "2", "]", ")", "self", ".", "verify_declared_bit", "(", "program", "[", "4", "]", ")", "self", ".", "verify_distinct", "(", "[", "program", "[", "2", "]", ",", "program", "[", "4", "]", "]", ")" ]
35.625
6.625
async def servo_config(self, pin, min_pulse=544, max_pulse=2400): """ Configure a pin as a servo pin. Set pulse min, max in ms. Use this method (not set_pin_mode) to configure a pin for servo operation. :param pin: Servo Pin. :param min_pulse: Min pulse width in ms. :param max_pulse: Max pulse width in ms. :returns: No return value """ command = [pin, min_pulse & 0x7f, (min_pulse >> 7) & 0x7f, max_pulse & 0x7f, (max_pulse >> 7) & 0x7f] await self._send_sysex(PrivateConstants.SERVO_CONFIG, command)
[ "async", "def", "servo_config", "(", "self", ",", "pin", ",", "min_pulse", "=", "544", ",", "max_pulse", "=", "2400", ")", ":", "command", "=", "[", "pin", ",", "min_pulse", "&", "0x7f", ",", "(", "min_pulse", ">>", "7", ")", "&", "0x7f", ",", "max_pulse", "&", "0x7f", ",", "(", "max_pulse", ">>", "7", ")", "&", "0x7f", "]", "await", "self", ".", "_send_sysex", "(", "PrivateConstants", ".", "SERVO_CONFIG", ",", "command", ")" ]
33.166667
22.944444
def condition_from_text(text) -> Condition: """ Return a Condition instance with PEG grammar from text :param text: PEG parsable string :return: """ try: condition = pypeg2.parse(text, output.Condition) except SyntaxError: # Invalid conditions are possible, see https://github.com/duniter/duniter/issues/1156 # In such a case, they are store as empty PEG grammar object and considered unlockable condition = Condition(text) return condition
[ "def", "condition_from_text", "(", "text", ")", "->", "Condition", ":", "try", ":", "condition", "=", "pypeg2", ".", "parse", "(", "text", ",", "output", ".", "Condition", ")", "except", "SyntaxError", ":", "# Invalid conditions are possible, see https://github.com/duniter/duniter/issues/1156", "# In such a case, they are store as empty PEG grammar object and considered unlockable", "condition", "=", "Condition", "(", "text", ")", "return", "condition" ]
38.571429
20.142857
def merge_all_models_into_first_model(biop_structure): """Merge all existing models into a Structure's first_model attribute. This directly modifies the Biopython Structure object. Chains IDs will start from A and increment for each new chain (model that is converted). Args: biop_structure (Structure): Structure with multiple models that should be merged """ from string import ascii_uppercase idx = 1 first_model = biop_structure[0] for m in biop_structure.get_models(): # Don't duplicate the original model if first_model.id == m.id: continue for c in m.get_chains(): c.id = ascii_uppercase[idx] first_model.add(c) idx += 1
[ "def", "merge_all_models_into_first_model", "(", "biop_structure", ")", ":", "from", "string", "import", "ascii_uppercase", "idx", "=", "1", "first_model", "=", "biop_structure", "[", "0", "]", "for", "m", "in", "biop_structure", ".", "get_models", "(", ")", ":", "# Don't duplicate the original model", "if", "first_model", ".", "id", "==", "m", ".", "id", ":", "continue", "for", "c", "in", "m", ".", "get_chains", "(", ")", ":", "c", ".", "id", "=", "ascii_uppercase", "[", "idx", "]", "first_model", ".", "add", "(", "c", ")", "idx", "+=", "1" ]
32.818182
20.045455
def resolve_url(self, resource_name): """Return a URL to a local copy of a resource, suitable for get_generator() For Package URLS, resolution involves generating a URL to a data file from the package URL and the value of a resource. The resource value, the url, can be one of: - An absolute URL, with a web scheme - A relative URL, relative to the package, with a file scheme. URLs with non-file schemes are returned. File scheme are assumed to be relative to the package, and are resolved according to the type of resource. """ u = parse_app_url(resource_name) if u.scheme != 'file': t = u elif self.target_format == 'csv' and self.target_file != DEFAULT_METATAB_FILE: # Thre are two forms for CSV package URLS: # - A CSV package, which can only have absolute URLs # - A Filesystem package, which can have relative URLs. # The complication is that the filsystem package usually has a metadata file named # DEFAULT_METATAB_FILE, which can distinguish it from a CSV package, but it's also possible # to have a filesystem package with a non standard package name. # So, this clause can happed for two cases: A CSV package or a Filesystem package with a nonstandard # metadata file name. # For CSV packages, need to get the package and open it to get the resource URL, because # they are always absolute web URLs and may not be related to the location of the metadata. s = self.get_resource() rs = s.metadata_url.doc.resource(resource_name) if rs is not None: t = parse_app_url(rs.url) else: raise ResourceError("No resource for '{}' in '{}' ".format(resource_name, self)) else: jt = self.join_target(resource_name) try: rs = jt.get_resource() except DownloadError: raise ResourceError( "Failed to download resource for '{}' for '{}' in '{}'".format(jt, resource_name, self)) t = rs.get_target() return t
[ "def", "resolve_url", "(", "self", ",", "resource_name", ")", ":", "u", "=", "parse_app_url", "(", "resource_name", ")", "if", "u", ".", "scheme", "!=", "'file'", ":", "t", "=", "u", "elif", "self", ".", "target_format", "==", "'csv'", "and", "self", ".", "target_file", "!=", "DEFAULT_METATAB_FILE", ":", "# Thre are two forms for CSV package URLS:", "# - A CSV package, which can only have absolute URLs", "# - A Filesystem package, which can have relative URLs.", "# The complication is that the filsystem package usually has a metadata file named", "# DEFAULT_METATAB_FILE, which can distinguish it from a CSV package, but it's also possible", "# to have a filesystem package with a non standard package name.", "# So, this clause can happed for two cases: A CSV package or a Filesystem package with a nonstandard", "# metadata file name.", "# For CSV packages, need to get the package and open it to get the resource URL, because", "# they are always absolute web URLs and may not be related to the location of the metadata.", "s", "=", "self", ".", "get_resource", "(", ")", "rs", "=", "s", ".", "metadata_url", ".", "doc", ".", "resource", "(", "resource_name", ")", "if", "rs", "is", "not", "None", ":", "t", "=", "parse_app_url", "(", "rs", ".", "url", ")", "else", ":", "raise", "ResourceError", "(", "\"No resource for '{}' in '{}' \"", ".", "format", "(", "resource_name", ",", "self", ")", ")", "else", ":", "jt", "=", "self", ".", "join_target", "(", "resource_name", ")", "try", ":", "rs", "=", "jt", ".", "get_resource", "(", ")", "except", "DownloadError", ":", "raise", "ResourceError", "(", "\"Failed to download resource for '{}' for '{}' in '{}'\"", ".", "format", "(", "jt", ",", "resource_name", ",", "self", ")", ")", "t", "=", "rs", ".", "get_target", "(", ")", "return", "t" ]
41.942308
30.057692
def get_exception_information(self, index): """ @type index: int @param index: Index into the exception information block. @rtype: int @return: Exception information DWORD. """ if index < 0 or index > win32.EXCEPTION_MAXIMUM_PARAMETERS: raise IndexError("Array index out of range: %s" % repr(index)) info = self.raw.u.Exception.ExceptionRecord.ExceptionInformation value = info[index] if value is None: value = 0 return value
[ "def", "get_exception_information", "(", "self", ",", "index", ")", ":", "if", "index", "<", "0", "or", "index", ">", "win32", ".", "EXCEPTION_MAXIMUM_PARAMETERS", ":", "raise", "IndexError", "(", "\"Array index out of range: %s\"", "%", "repr", "(", "index", ")", ")", "info", "=", "self", ".", "raw", ".", "u", ".", "Exception", ".", "ExceptionRecord", ".", "ExceptionInformation", "value", "=", "info", "[", "index", "]", "if", "value", "is", "None", ":", "value", "=", "0", "return", "value" ]
35.066667
17.866667
def safe_unicode(string): """If Python 2, replace non-ascii characters and return encoded string.""" if not PY3: uni = string.replace(u'\u2019', "'") return uni.encode('utf-8') return string
[ "def", "safe_unicode", "(", "string", ")", ":", "if", "not", "PY3", ":", "uni", "=", "string", ".", "replace", "(", "u'\\u2019'", ",", "\"'\"", ")", "return", "uni", ".", "encode", "(", "'utf-8'", ")", "return", "string" ]
31.571429
15
def delete(self, path=None, headers=None): """ HTTP DELETE - path: string additionnal path to the uri - headers: dict, optionnal headers that will be added to HTTP request. - params: Optionnal parameterss added to the request """ return self.request("DELETE", path=path, headers=headers)
[ "def", "delete", "(", "self", ",", "path", "=", "None", ",", "headers", "=", "None", ")", ":", "return", "self", ".", "request", "(", "\"DELETE\"", ",", "path", "=", "path", ",", "headers", "=", "headers", ")" ]
42.625
9.125
def get_mean_and_stddevs(self, sctx, rctx, dctx, imt, stddev_types): """ Call the get mean and stddevs of the GMPE for the respective IMT """ return self.kwargs[str(imt)].get_mean_and_stddevs( sctx, rctx, dctx, imt, stddev_types)
[ "def", "get_mean_and_stddevs", "(", "self", ",", "sctx", ",", "rctx", ",", "dctx", ",", "imt", ",", "stddev_types", ")", ":", "return", "self", ".", "kwargs", "[", "str", "(", "imt", ")", "]", ".", "get_mean_and_stddevs", "(", "sctx", ",", "rctx", ",", "dctx", ",", "imt", ",", "stddev_types", ")" ]
44.666667
14.333333
def get_followers(self, auth_secret): """Get the follower list of a logged-in user. Parameters ---------- auth_secret: str The authentication secret of the logged-in user. Returns ------- bool True if the follower list is successfully obtained, False otherwise. result A dict containing the follower list with the key FOLLOWER_LIST_KEY if the follower list is successfully obtained, a dict containing the error string with the key ERROR_KEY otherwise. Note ---- Possible error strings are listed as below: - ERROR_NOT_LOGGED_IN """ result = {pytwis_constants.ERROR_KEY: None} # Check if the user is logged in. loggedin, userid = self._is_loggedin(auth_secret) if not loggedin: result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN return (False, result) # Get the list of followers' userids. follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid) follower_userids = self._rc.zrange(follower_zset_key, 0, -1) if follower_userids is None or not follower_userids: result[pytwis_constants.FOLLOWER_LIST_KEY] = [] return (True, result) # Get the list of followers' usernames from their userids. with self._rc.pipeline() as pipe: pipe.multi() for follower_userid in follower_userids: follower_userid_profile_key = \ pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid) pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY) result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute() return (True, result)
[ "def", "get_followers", "(", "self", ",", "auth_secret", ")", ":", "result", "=", "{", "pytwis_constants", ".", "ERROR_KEY", ":", "None", "}", "# Check if the user is logged in.", "loggedin", ",", "userid", "=", "self", ".", "_is_loggedin", "(", "auth_secret", ")", "if", "not", "loggedin", ":", "result", "[", "pytwis_constants", ".", "ERROR_KEY", "]", "=", "pytwis_constants", ".", "ERROR_NOT_LOGGED_IN", "return", "(", "False", ",", "result", ")", "# Get the list of followers' userids.", "follower_zset_key", "=", "pytwis_constants", ".", "FOLLOWER_KEY_FORMAT", ".", "format", "(", "userid", ")", "follower_userids", "=", "self", ".", "_rc", ".", "zrange", "(", "follower_zset_key", ",", "0", ",", "-", "1", ")", "if", "follower_userids", "is", "None", "or", "not", "follower_userids", ":", "result", "[", "pytwis_constants", ".", "FOLLOWER_LIST_KEY", "]", "=", "[", "]", "return", "(", "True", ",", "result", ")", "# Get the list of followers' usernames from their userids.", "with", "self", ".", "_rc", ".", "pipeline", "(", ")", "as", "pipe", ":", "pipe", ".", "multi", "(", ")", "for", "follower_userid", "in", "follower_userids", ":", "follower_userid_profile_key", "=", "pytwis_constants", ".", "USER_PROFILE_KEY_FORMAT", ".", "format", "(", "follower_userid", ")", "pipe", ".", "hget", "(", "follower_userid_profile_key", ",", "pytwis_constants", ".", "USERNAME_KEY", ")", "result", "[", "pytwis_constants", ".", "FOLLOWER_LIST_KEY", "]", "=", "pipe", ".", "execute", "(", ")", "return", "(", "True", ",", "result", ")" ]
35.54902
24.45098
def lowest(self, rtol=1.e-5, atol=1.e-8): """Return a sample set containing the lowest-energy samples. A sample is included if its energy is within tolerance of the lowest energy in the sample set. The following equation is used to determine if two values are equivalent: absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) See :func:`numpy.isclose` for additional details and caveats. Args: rtol (float, optional, default=1.e-5): The relative tolerance (see above). atol (float, optional, default=1.e-8): The absolute tolerance (see above). Returns: :obj:`.SampleSet`: A new sample set containing the lowest energy samples as delimited by configured tolerances from the lowest energy sample in the current sample set. Examples: >>> sampleset = dimod.ExactSolver().sample_ising({'a': .001}, ... {('a', 'b'): -1}) >>> print(sampleset.lowest()) a b energy num_oc. 0 -1 -1 -1.001 1 ['SPIN', 1 rows, 1 samples, 2 variables] >>> print(sampleset.lowest(atol=.1)) a b energy num_oc. 0 -1 -1 -1.001 1 1 +1 +1 -0.999 1 ['SPIN', 2 rows, 2 samples, 2 variables] Note: "Lowest energy" is the lowest energy in the sample set. This is not always the "ground energy" which is the lowest energy possible for a binary quadratic model. """ if len(self) == 0: # empty so all are lowest return self.copy() record = self.record # want all the rows within tolerance of the minimal energy close = np.isclose(record.energy, np.min(record.energy), rtol=rtol, atol=atol) record = record[close] return type(self)(record, self.variables, copy.deepcopy(self.info), self.vartype)
[ "def", "lowest", "(", "self", ",", "rtol", "=", "1.e-5", ",", "atol", "=", "1.e-8", ")", ":", "if", "len", "(", "self", ")", "==", "0", ":", "# empty so all are lowest", "return", "self", ".", "copy", "(", ")", "record", "=", "self", ".", "record", "# want all the rows within tolerance of the minimal energy", "close", "=", "np", ".", "isclose", "(", "record", ".", "energy", ",", "np", ".", "min", "(", "record", ".", "energy", ")", ",", "rtol", "=", "rtol", ",", "atol", "=", "atol", ")", "record", "=", "record", "[", "close", "]", "return", "type", "(", "self", ")", "(", "record", ",", "self", ".", "variables", ",", "copy", ".", "deepcopy", "(", "self", ".", "info", ")", ",", "self", ".", "vartype", ")" ]
36.526316
21.245614
def put(self, transfer_id, amount, created_timestamp, receipt): """ :param transfer_id: int of the account_id to deposit the money to :param amount: float of the amount to transfer :param created_timestamp: str of the validated receipt that money has been received :param receipt: str of the receipt :return: Transfer dict """ return self.connection.put('account/transfer/claim', data=dict(transfer_id=transfer_id, amount=amount, created_timestamp=created_timestamp, receipt=receipt))
[ "def", "put", "(", "self", ",", "transfer_id", ",", "amount", ",", "created_timestamp", ",", "receipt", ")", ":", "return", "self", ".", "connection", ".", "put", "(", "'account/transfer/claim'", ",", "data", "=", "dict", "(", "transfer_id", "=", "transfer_id", ",", "amount", "=", "amount", ",", "created_timestamp", "=", "created_timestamp", ",", "receipt", "=", "receipt", ")", ")" ]
58.076923
22.538462
def call_path(self, basepath): """return that path to be able to call this script from the passed in basename example -- basepath = /foo/bar self.path = /foo/bar/che/baz.py self.call_path(basepath) # che/baz.py basepath -- string -- the directory you would be calling this script in return -- string -- the minimum path that you could use to execute this script in basepath """ rel_filepath = self.path if basepath: rel_filepath = os.path.relpath(self.path, basepath) basename = self.name if basename in set(['__init__.py', '__main__.py']): rel_filepath = os.path.dirname(rel_filepath) return rel_filepath
[ "def", "call_path", "(", "self", ",", "basepath", ")", ":", "rel_filepath", "=", "self", ".", "path", "if", "basepath", ":", "rel_filepath", "=", "os", ".", "path", ".", "relpath", "(", "self", ".", "path", ",", "basepath", ")", "basename", "=", "self", ".", "name", "if", "basename", "in", "set", "(", "[", "'__init__.py'", ",", "'__main__.py'", "]", ")", ":", "rel_filepath", "=", "os", ".", "path", ".", "dirname", "(", "rel_filepath", ")", "return", "rel_filepath" ]
34.045455
20.409091
def _get_dependencies(sql): """ Return the list of variables referenced in this SQL. """ dependencies = [] for (_, placeholder, dollar, _) in SqlStatement._get_tokens(sql): if placeholder: variable = placeholder[1:] if variable not in dependencies: dependencies.append(variable) elif dollar: raise Exception('Invalid sql; $ with no following $ or identifier: %s.' % sql) return dependencies
[ "def", "_get_dependencies", "(", "sql", ")", ":", "dependencies", "=", "[", "]", "for", "(", "_", ",", "placeholder", ",", "dollar", ",", "_", ")", "in", "SqlStatement", ".", "_get_tokens", "(", "sql", ")", ":", "if", "placeholder", ":", "variable", "=", "placeholder", "[", "1", ":", "]", "if", "variable", "not", "in", "dependencies", ":", "dependencies", ".", "append", "(", "variable", ")", "elif", "dollar", ":", "raise", "Exception", "(", "'Invalid sql; $ with no following $ or identifier: %s.'", "%", "sql", ")", "return", "dependencies" ]
40.181818
15.636364
def sum_to_n(n, size, limit=None): #from http://stackoverflow.com/questions/2065553/python-get-all-numbers-that-add-up-to-a-number """Produce all lists of `size` positive integers in decreasing order that add up to `n`.""" if size == 1: yield [n] return if limit is None: limit = n start = (n + size - 1) // size stop = min(limit, n - size + 1) + 1 for i in range(start, stop): for tail in sum_to_n(n - i, size - 1, i): yield [i] + tail
[ "def", "sum_to_n", "(", "n", ",", "size", ",", "limit", "=", "None", ")", ":", "#from http://stackoverflow.com/questions/2065553/python-get-all-numbers-that-add-up-to-a-number", "if", "size", "==", "1", ":", "yield", "[", "n", "]", "return", "if", "limit", "is", "None", ":", "limit", "=", "n", "start", "=", "(", "n", "+", "size", "-", "1", ")", "//", "size", "stop", "=", "min", "(", "limit", ",", "n", "-", "size", "+", "1", ")", "+", "1", "for", "i", "in", "range", "(", "start", ",", "stop", ")", ":", "for", "tail", "in", "sum_to_n", "(", "n", "-", "i", ",", "size", "-", "1", ",", "i", ")", ":", "yield", "[", "i", "]", "+", "tail" ]
38.153846
18.461538
def clean_slug(self): """slug title if is not provided """ slug = self.cleaned_data.get('slug', None) if slug is None or len(slug) == 0 and 'title' in self.cleaned_data: slug = slugify(self.cleaned_data['title']) return slug
[ "def", "clean_slug", "(", "self", ")", ":", "slug", "=", "self", ".", "cleaned_data", ".", "get", "(", "'slug'", ",", "None", ")", "if", "slug", "is", "None", "or", "len", "(", "slug", ")", "==", "0", "and", "'title'", "in", "self", ".", "cleaned_data", ":", "slug", "=", "slugify", "(", "self", ".", "cleaned_data", "[", "'title'", "]", ")", "return", "slug" ]
38.571429
14.142857
def generate(self): """Generate a new string and return it.""" key = self._propose_new_key() while self.key_exists(key): _logger.warning('Previous candidate was used.' ' Regenerating another...') key = self._propose_new_key() return key
[ "def", "generate", "(", "self", ")", ":", "key", "=", "self", ".", "_propose_new_key", "(", ")", "while", "self", ".", "key_exists", "(", "key", ")", ":", "_logger", ".", "warning", "(", "'Previous candidate was used.'", "' Regenerating another...'", ")", "key", "=", "self", ".", "_propose_new_key", "(", ")", "return", "key" ]
39.125
10.625
def tz_file(name): """ Open a timezone file from the zoneinfo subdir for reading. :param name: The name of the timezone. :type name: str :rtype: file """ try: filepath = tz_path(name) return open(filepath, 'rb') except TimezoneNotFound: # http://bugs.launchpad.net/bugs/383171 - we avoid using this # unless absolutely necessary to help when a broken version of # pkg_resources is installed. try: from pkg_resources import resource_stream except ImportError: resource_stream = None if resource_stream is not None: try: return resource_stream(__name__, 'zoneinfo/' + name) except FileNotFoundError: return tz_path(name) raise
[ "def", "tz_file", "(", "name", ")", ":", "try", ":", "filepath", "=", "tz_path", "(", "name", ")", "return", "open", "(", "filepath", ",", "'rb'", ")", "except", "TimezoneNotFound", ":", "# http://bugs.launchpad.net/bugs/383171 - we avoid using this", "# unless absolutely necessary to help when a broken version of", "# pkg_resources is installed.", "try", ":", "from", "pkg_resources", "import", "resource_stream", "except", "ImportError", ":", "resource_stream", "=", "None", "if", "resource_stream", "is", "not", "None", ":", "try", ":", "return", "resource_stream", "(", "__name__", ",", "'zoneinfo/'", "+", "name", ")", "except", "FileNotFoundError", ":", "return", "tz_path", "(", "name", ")", "raise" ]
27.068966
19.206897
def get_base_modules(): " Get list of installed modules. " return sorted(filter( lambda x: op.isdir(op.join(MOD_DIR, x)), listdir(MOD_DIR)))
[ "def", "get_base_modules", "(", ")", ":", "return", "sorted", "(", "filter", "(", "lambda", "x", ":", "op", ".", "isdir", "(", "op", ".", "join", "(", "MOD_DIR", ",", "x", ")", ")", ",", "listdir", "(", "MOD_DIR", ")", ")", ")" ]
30
16
def _exec(self, cmd, url, json_data=None): """ execute a command at the device using the RESTful API :param str cmd: one of the REST commands, e.g. GET or POST :param str url: URL of the REST API the command should be applied to :param dict json_data: json data that should be attached to the command """ assert(cmd in ("GET", "POST", "PUT", "DELETE")) assert(self.dev is not None) if json_data is None: json_data = {} # add device address to the URL url = url.format(self.dev["ipv4_internal"]) # set basic authentication auth = HTTPBasicAuth("dev", self.dev["api_key"]) # execute HTTP request res = None if cmd == "GET": res = self._local_session.session.get( url, auth=auth, verify=False ) elif cmd == "POST": res = self._local_session.session.post( url, auth=auth, json=json_data, verify=False ) elif cmd == "PUT": res = self._local_session.session.put( url, auth=auth, json=json_data, verify=False ) elif cmd == "DELETE": res = self._local_session.session.delete( url, auth=auth, verify=False ) if res is not None: # raise an exception on error res.raise_for_status() return res.json()
[ "def", "_exec", "(", "self", ",", "cmd", ",", "url", ",", "json_data", "=", "None", ")", ":", "assert", "(", "cmd", "in", "(", "\"GET\"", ",", "\"POST\"", ",", "\"PUT\"", ",", "\"DELETE\"", ")", ")", "assert", "(", "self", ".", "dev", "is", "not", "None", ")", "if", "json_data", "is", "None", ":", "json_data", "=", "{", "}", "# add device address to the URL", "url", "=", "url", ".", "format", "(", "self", ".", "dev", "[", "\"ipv4_internal\"", "]", ")", "# set basic authentication", "auth", "=", "HTTPBasicAuth", "(", "\"dev\"", ",", "self", ".", "dev", "[", "\"api_key\"", "]", ")", "# execute HTTP request", "res", "=", "None", "if", "cmd", "==", "\"GET\"", ":", "res", "=", "self", ".", "_local_session", ".", "session", ".", "get", "(", "url", ",", "auth", "=", "auth", ",", "verify", "=", "False", ")", "elif", "cmd", "==", "\"POST\"", ":", "res", "=", "self", ".", "_local_session", ".", "session", ".", "post", "(", "url", ",", "auth", "=", "auth", ",", "json", "=", "json_data", ",", "verify", "=", "False", ")", "elif", "cmd", "==", "\"PUT\"", ":", "res", "=", "self", ".", "_local_session", ".", "session", ".", "put", "(", "url", ",", "auth", "=", "auth", ",", "json", "=", "json_data", ",", "verify", "=", "False", ")", "elif", "cmd", "==", "\"DELETE\"", ":", "res", "=", "self", ".", "_local_session", ".", "session", ".", "delete", "(", "url", ",", "auth", "=", "auth", ",", "verify", "=", "False", ")", "if", "res", "is", "not", "None", ":", "# raise an exception on error", "res", ".", "raise_for_status", "(", ")", "return", "res", ".", "json", "(", ")" ]
30.148936
19.638298
def points(self, points): """ set points without copying """ if not isinstance(points, np.ndarray): raise TypeError('Points must be a numpy array') vtk_points = vtki.vtk_points(points, False) self.SetPoints(vtk_points) self.GetPoints().Modified() self.Modified()
[ "def", "points", "(", "self", ",", "points", ")", ":", "if", "not", "isinstance", "(", "points", ",", "np", ".", "ndarray", ")", ":", "raise", "TypeError", "(", "'Points must be a numpy array'", ")", "vtk_points", "=", "vtki", ".", "vtk_points", "(", "points", ",", "False", ")", "self", ".", "SetPoints", "(", "vtk_points", ")", "self", ".", "GetPoints", "(", ")", ".", "Modified", "(", ")", "self", ".", "Modified", "(", ")" ]
39.375
9.875
def reduce_logsumexp(x, reduced_dim, extra_logit=None, name=None): """Numerically stable version of log(reduce_sum(exp(x))). Unlike other reductions, the output has the same shape as the input. Note: with a minor change, we could allow multiple reduced dimensions. Args: x: a Tensor reduced_dim: a dimension in x extra_logit: an optional Tensor broadcastable to (x.shape - reduced_dim) name: an optional string Returns: a Tensor with the same shape and dtype as x. """ reduced_dim = convert_to_dimension(reduced_dim) with tf.variable_scope(name, default_name="reduce_logsumexp"): reduced_shape = x.shape - reduced_dim max_logit = reduce_max(stop_gradient(x), output_shape=reduced_shape) if extra_logit is not None: if isinstance(extra_logit, Tensor): extra_logit = stop_gradient(extra_logit) max_logit = maximum(max_logit, extra_logit) x -= max_logit exp_x = exp(x) sum_exp_x = reduce_sum(exp_x, output_shape=reduced_shape) if extra_logit is not None: sum_exp_x += exp(extra_logit - max_logit) return log(sum_exp_x) + max_logit
[ "def", "reduce_logsumexp", "(", "x", ",", "reduced_dim", ",", "extra_logit", "=", "None", ",", "name", "=", "None", ")", ":", "reduced_dim", "=", "convert_to_dimension", "(", "reduced_dim", ")", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"reduce_logsumexp\"", ")", ":", "reduced_shape", "=", "x", ".", "shape", "-", "reduced_dim", "max_logit", "=", "reduce_max", "(", "stop_gradient", "(", "x", ")", ",", "output_shape", "=", "reduced_shape", ")", "if", "extra_logit", "is", "not", "None", ":", "if", "isinstance", "(", "extra_logit", ",", "Tensor", ")", ":", "extra_logit", "=", "stop_gradient", "(", "extra_logit", ")", "max_logit", "=", "maximum", "(", "max_logit", ",", "extra_logit", ")", "x", "-=", "max_logit", "exp_x", "=", "exp", "(", "x", ")", "sum_exp_x", "=", "reduce_sum", "(", "exp_x", ",", "output_shape", "=", "reduced_shape", ")", "if", "extra_logit", "is", "not", "None", ":", "sum_exp_x", "+=", "exp", "(", "extra_logit", "-", "max_logit", ")", "return", "log", "(", "sum_exp_x", ")", "+", "max_logit" ]
39.142857
17.714286
def zone_position(self): """ Returns the card's position (1-indexed) in its zone, or 0 if not available. """ if self.zone == Zone.HAND: return self.controller.hand.index(self) + 1 return 0
[ "def", "zone_position", "(", "self", ")", ":", "if", "self", ".", "zone", "==", "Zone", ".", "HAND", ":", "return", "self", ".", "controller", ".", "hand", ".", "index", "(", "self", ")", "+", "1", "return", "0" ]
27.857143
14.428571
def play_sync(self): """ Play the video and block whilst the video is playing """ self.play() logger.info("Playing synchronously") try: time.sleep(0.05) logger.debug("Wait for playing to start") while self.is_playing(): time.sleep(0.05) except DBusException: logger.error( "Cannot play synchronously any longer as DBus calls timed out." )
[ "def", "play_sync", "(", "self", ")", ":", "self", ".", "play", "(", ")", "logger", ".", "info", "(", "\"Playing synchronously\"", ")", "try", ":", "time", ".", "sleep", "(", "0.05", ")", "logger", ".", "debug", "(", "\"Wait for playing to start\"", ")", "while", "self", ".", "is_playing", "(", ")", ":", "time", ".", "sleep", "(", "0.05", ")", "except", "DBusException", ":", "logger", ".", "error", "(", "\"Cannot play synchronously any longer as DBus calls timed out.\"", ")" ]
31.466667
14.8
def _attach(self, purrlog, watchdirs=None): """Attaches Purr to a purrlog directory, and loads content. Returns False if nothing new has been loaded (because directory is the same), or True otherwise.""" purrlog = os.path.abspath(purrlog) dprint(1, "attaching to purrlog", purrlog) self.logdir = purrlog self.indexfile = os.path.join(self.logdir, "index.html") self.logtitle = "Unnamed log" self.timestamp = self.last_scan_timestamp = time.time() self._initIndexDir() # reset internal state self.ignorelistfile = None self.autopounce = False self.watched_dirs = [] self.entries = [] self._default_dp_props = {} self.watchers = {} self.temp_watchers = {} self.attached = False self._watching_state = {} # check that we hold a lock on the directory self.lockfile = os.path.join(self.logdir, ".purrlock") # try to open lock file for r/w try: self.lockfile_fd = os.open(self.lockfile, os.O_RDWR | os.O_CREAT) except: raise Purrer.LockFailError("failed to open lock file %s for writing" % self.lockfile) # try to acquire lock on the lock file try: fcntl.lockf(self.lockfile_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except: other_lock = os.fdopen(self.lockfile_fd, 'r').read() self.lockfile_fd = None raise Purrer.LockedError(other_lock) # got lock, write our ID to the lock file global _lockstring try: self.lockfile_fobj = os.fdopen(self.lockfile_fd, 'w') self.lockfile_fobj.write(_lockstring) self.lockfile_fobj.flush() os.fsync(self.lockfile_fd) except: raise # raise Purrer.LockFailError("cannot write to lock file %s"%self.lockfile) # load log state if log directory already exists if os.path.exists(self.logdir): _busy = Purr.BusyIndicator() if os.path.exists(self.indexfile): try: parser = Purr.Parsers.LogIndexParser() for line in open(self.indexfile): parser.feed(line) self.logtitle = parser.title or self.logtitle self.timestamp = parser.timestamp or self.timestamp dprintf(2, "attached log '%s', timestamp %s\n", self.logtitle, time.strftime("%x %X", time.localtime(self.timestamp))) except: traceback.print_exc() print("Error parsing %s, reverting to defaults" % self.indexfile) # load log entries entries = [] for fname in os.listdir(self.logdir): pathname = os.path.join(self.logdir, fname) if Purr.LogEntry.isValidPathname(pathname): try: entry = Purr.LogEntry(load=pathname) dprint(2, "loaded log entry", pathname) except: print("Error loading entry %s, skipping" % fname) traceback.print_exc() continue entries.append(entry) else: dprint(2, fname, "is not a valid Purr entry") # sort log entires by timestamp entries.sort(lambda a, b: cmp(a.timestamp, b.timestamp)) self.setLogEntries(entries, save=False) # update own timestamp if entries: self.timestamp = max(self.timestamp, entries[-1].timestamp) # else logfile doesn't exist, create it else: self._initIndexDir() # load configuration if it exists # init config file self.dirconfig = configparser.RawConfigParser() self.dirconfigfile = os.path.join(self.logdir, "dirconfig") if os.path.exists(self.dirconfigfile): try: self.dirconfig.read(self.dirconfigfile) except: print("Error loading config file %s" % self.dirconfigfile) traceback.print_exc() # load directory configuration for dirname in self.dirconfig.sections(): try: watching = self.dirconfig.getint(dirname, "watching") except: watching = Purr.WATCHED dirname = os.path.expanduser(dirname) self.addWatchedDirectory(dirname, watching, save_config=False) # start watching the specified directories for name in (watchdirs or []): self.addWatchedDirectory(name, watching=None) # Finally, go through list of ignored files and mark their watchers accordingly. # The ignorelist is a list of lines of the form "timestamp filename", giving the timestamp when a # file was last "ignored" by the purrlog user. self.ignorelistfile = os.path.join(self.logdir, "ignorelist") if os.path.exists(self.ignorelistfile): # read lines from file, ignore exceptions ignores = {} try: for line in open(self.ignorelistfile).readlines(): timestamp, policy, filename = line.strip().split(" ", 2) # update dictiornary with latest timestamp ignores[filename] = int(timestamp), policy except: print("Error reading %s" % self.ignorelistfile) traceback.print_exc() # now scan all listed files, and make sure their watchers' mtime is no older than the given # last-ignore-timestamp. This ensures that we don't pounce on these files after restarting purr. for filename, (timestamp, policy) in ignores.items(): watcher = self.watchers.get(filename, None) if watcher: watcher.mtime = max(watcher.mtime, timestamp) # init complete self.attached = True return True
[ "def", "_attach", "(", "self", ",", "purrlog", ",", "watchdirs", "=", "None", ")", ":", "purrlog", "=", "os", ".", "path", ".", "abspath", "(", "purrlog", ")", "dprint", "(", "1", ",", "\"attaching to purrlog\"", ",", "purrlog", ")", "self", ".", "logdir", "=", "purrlog", "self", ".", "indexfile", "=", "os", ".", "path", ".", "join", "(", "self", ".", "logdir", ",", "\"index.html\"", ")", "self", ".", "logtitle", "=", "\"Unnamed log\"", "self", ".", "timestamp", "=", "self", ".", "last_scan_timestamp", "=", "time", ".", "time", "(", ")", "self", ".", "_initIndexDir", "(", ")", "# reset internal state", "self", ".", "ignorelistfile", "=", "None", "self", ".", "autopounce", "=", "False", "self", ".", "watched_dirs", "=", "[", "]", "self", ".", "entries", "=", "[", "]", "self", ".", "_default_dp_props", "=", "{", "}", "self", ".", "watchers", "=", "{", "}", "self", ".", "temp_watchers", "=", "{", "}", "self", ".", "attached", "=", "False", "self", ".", "_watching_state", "=", "{", "}", "# check that we hold a lock on the directory", "self", ".", "lockfile", "=", "os", ".", "path", ".", "join", "(", "self", ".", "logdir", ",", "\".purrlock\"", ")", "# try to open lock file for r/w", "try", ":", "self", ".", "lockfile_fd", "=", "os", ".", "open", "(", "self", ".", "lockfile", ",", "os", ".", "O_RDWR", "|", "os", ".", "O_CREAT", ")", "except", ":", "raise", "Purrer", ".", "LockFailError", "(", "\"failed to open lock file %s for writing\"", "%", "self", ".", "lockfile", ")", "# try to acquire lock on the lock file", "try", ":", "fcntl", ".", "lockf", "(", "self", ".", "lockfile_fd", ",", "fcntl", ".", "LOCK_EX", "|", "fcntl", ".", "LOCK_NB", ")", "except", ":", "other_lock", "=", "os", ".", "fdopen", "(", "self", ".", "lockfile_fd", ",", "'r'", ")", ".", "read", "(", ")", "self", ".", "lockfile_fd", "=", "None", "raise", "Purrer", ".", "LockedError", "(", "other_lock", ")", "# got lock, write our ID to the lock file", "global", "_lockstring", "try", ":", "self", ".", "lockfile_fobj", "=", "os", ".", "fdopen", "(", "self", ".", "lockfile_fd", ",", "'w'", ")", "self", ".", "lockfile_fobj", ".", "write", "(", "_lockstring", ")", "self", ".", "lockfile_fobj", ".", "flush", "(", ")", "os", ".", "fsync", "(", "self", ".", "lockfile_fd", ")", "except", ":", "raise", "# raise Purrer.LockFailError(\"cannot write to lock file %s\"%self.lockfile)", "# load log state if log directory already exists", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "logdir", ")", ":", "_busy", "=", "Purr", ".", "BusyIndicator", "(", ")", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "indexfile", ")", ":", "try", ":", "parser", "=", "Purr", ".", "Parsers", ".", "LogIndexParser", "(", ")", "for", "line", "in", "open", "(", "self", ".", "indexfile", ")", ":", "parser", ".", "feed", "(", "line", ")", "self", ".", "logtitle", "=", "parser", ".", "title", "or", "self", ".", "logtitle", "self", ".", "timestamp", "=", "parser", ".", "timestamp", "or", "self", ".", "timestamp", "dprintf", "(", "2", ",", "\"attached log '%s', timestamp %s\\n\"", ",", "self", ".", "logtitle", ",", "time", ".", "strftime", "(", "\"%x %X\"", ",", "time", ".", "localtime", "(", "self", ".", "timestamp", ")", ")", ")", "except", ":", "traceback", ".", "print_exc", "(", ")", "print", "(", "\"Error parsing %s, reverting to defaults\"", "%", "self", ".", "indexfile", ")", "# load log entries", "entries", "=", "[", "]", "for", "fname", "in", "os", ".", "listdir", "(", "self", ".", "logdir", ")", ":", "pathname", "=", "os", ".", "path", ".", "join", "(", "self", ".", "logdir", ",", "fname", ")", "if", "Purr", ".", "LogEntry", ".", "isValidPathname", "(", "pathname", ")", ":", "try", ":", "entry", "=", "Purr", ".", "LogEntry", "(", "load", "=", "pathname", ")", "dprint", "(", "2", ",", "\"loaded log entry\"", ",", "pathname", ")", "except", ":", "print", "(", "\"Error loading entry %s, skipping\"", "%", "fname", ")", "traceback", ".", "print_exc", "(", ")", "continue", "entries", ".", "append", "(", "entry", ")", "else", ":", "dprint", "(", "2", ",", "fname", ",", "\"is not a valid Purr entry\"", ")", "# sort log entires by timestamp", "entries", ".", "sort", "(", "lambda", "a", ",", "b", ":", "cmp", "(", "a", ".", "timestamp", ",", "b", ".", "timestamp", ")", ")", "self", ".", "setLogEntries", "(", "entries", ",", "save", "=", "False", ")", "# update own timestamp", "if", "entries", ":", "self", ".", "timestamp", "=", "max", "(", "self", ".", "timestamp", ",", "entries", "[", "-", "1", "]", ".", "timestamp", ")", "# else logfile doesn't exist, create it", "else", ":", "self", ".", "_initIndexDir", "(", ")", "# load configuration if it exists", "# init config file", "self", ".", "dirconfig", "=", "configparser", ".", "RawConfigParser", "(", ")", "self", ".", "dirconfigfile", "=", "os", ".", "path", ".", "join", "(", "self", ".", "logdir", ",", "\"dirconfig\"", ")", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "dirconfigfile", ")", ":", "try", ":", "self", ".", "dirconfig", ".", "read", "(", "self", ".", "dirconfigfile", ")", "except", ":", "print", "(", "\"Error loading config file %s\"", "%", "self", ".", "dirconfigfile", ")", "traceback", ".", "print_exc", "(", ")", "# load directory configuration", "for", "dirname", "in", "self", ".", "dirconfig", ".", "sections", "(", ")", ":", "try", ":", "watching", "=", "self", ".", "dirconfig", ".", "getint", "(", "dirname", ",", "\"watching\"", ")", "except", ":", "watching", "=", "Purr", ".", "WATCHED", "dirname", "=", "os", ".", "path", ".", "expanduser", "(", "dirname", ")", "self", ".", "addWatchedDirectory", "(", "dirname", ",", "watching", ",", "save_config", "=", "False", ")", "# start watching the specified directories", "for", "name", "in", "(", "watchdirs", "or", "[", "]", ")", ":", "self", ".", "addWatchedDirectory", "(", "name", ",", "watching", "=", "None", ")", "# Finally, go through list of ignored files and mark their watchers accordingly.", "# The ignorelist is a list of lines of the form \"timestamp filename\", giving the timestamp when a", "# file was last \"ignored\" by the purrlog user.", "self", ".", "ignorelistfile", "=", "os", ".", "path", ".", "join", "(", "self", ".", "logdir", ",", "\"ignorelist\"", ")", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "ignorelistfile", ")", ":", "# read lines from file, ignore exceptions", "ignores", "=", "{", "}", "try", ":", "for", "line", "in", "open", "(", "self", ".", "ignorelistfile", ")", ".", "readlines", "(", ")", ":", "timestamp", ",", "policy", ",", "filename", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ",", "2", ")", "# update dictiornary with latest timestamp", "ignores", "[", "filename", "]", "=", "int", "(", "timestamp", ")", ",", "policy", "except", ":", "print", "(", "\"Error reading %s\"", "%", "self", ".", "ignorelistfile", ")", "traceback", ".", "print_exc", "(", ")", "# now scan all listed files, and make sure their watchers' mtime is no older than the given", "# last-ignore-timestamp. This ensures that we don't pounce on these files after restarting purr.", "for", "filename", ",", "(", "timestamp", ",", "policy", ")", "in", "ignores", ".", "items", "(", ")", ":", "watcher", "=", "self", ".", "watchers", ".", "get", "(", "filename", ",", "None", ")", "if", "watcher", ":", "watcher", ".", "mtime", "=", "max", "(", "watcher", ".", "mtime", ",", "timestamp", ")", "# init complete", "self", ".", "attached", "=", "True", "return", "True" ]
46.883721
18.139535
def mean_otu_pct_abundance(ra, otuIDs): """ Calculate the mean OTU abundance percentage. :type ra: Dict :param ra: 'ra' refers to a dictionary keyed on SampleIDs, and the values are dictionaries keyed on OTUID's and their values represent the relative abundance of that OTUID in that SampleID. 'ra' is the output of relative_abundance() function. :type otuIDs: List :param otuIDs: A list of OTUID's for which the percentage abundance needs to be measured. :rtype: dict :return: A dictionary of OTUID and their percent relative abundance as key/value pair. """ sids = ra.keys() otumeans = defaultdict(int) for oid in otuIDs: otumeans[oid] = sum([ra[sid][oid] for sid in sids if oid in ra[sid]]) / len(sids) * 100 return otumeans
[ "def", "mean_otu_pct_abundance", "(", "ra", ",", "otuIDs", ")", ":", "sids", "=", "ra", ".", "keys", "(", ")", "otumeans", "=", "defaultdict", "(", "int", ")", "for", "oid", "in", "otuIDs", ":", "otumeans", "[", "oid", "]", "=", "sum", "(", "[", "ra", "[", "sid", "]", "[", "oid", "]", "for", "sid", "in", "sids", "if", "oid", "in", "ra", "[", "sid", "]", "]", ")", "/", "len", "(", "sids", ")", "*", "100", "return", "otumeans" ]
35.875
24.041667
def attach_usage_plan_to_apis(plan_id, apis, region=None, key=None, keyid=None, profile=None): ''' Attaches given usage plan to each of the apis provided in a list of apiId and stage values .. versionadded:: 2017.7.0 apis a list of dictionaries, where each dictionary contains the following: apiId a string, which is the id of the created API in AWS ApiGateway stage a string, which is the stage that the created API is deployed to. CLI Example: .. code-block:: bash salt myminion boto_apigateway.attach_usage_plan_to_apis plan_id='usage plan id' apis='[{"apiId": "some id 1", "stage": "some stage 1"}]' ''' return _update_usage_plan_apis(plan_id, apis, 'add', region=region, key=key, keyid=keyid, profile=profile)
[ "def", "attach_usage_plan_to_apis", "(", "plan_id", ",", "apis", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "return", "_update_usage_plan_apis", "(", "plan_id", ",", "apis", ",", "'add'", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")" ]
34.26087
39.652174
def tokenize(self, s): """Splits a string into tokens.""" s = tf.compat.as_text(s) if self.reserved_tokens: # First split out the reserved tokens substrs = self._reserved_tokens_re.split(s) else: substrs = [s] toks = [] for substr in substrs: if substr in self.reserved_tokens: toks.append(substr) else: toks.extend(self._alphanum_re.split(substr)) # Filter out empty strings toks = [t for t in toks if t] return toks
[ "def", "tokenize", "(", "self", ",", "s", ")", ":", "s", "=", "tf", ".", "compat", ".", "as_text", "(", "s", ")", "if", "self", ".", "reserved_tokens", ":", "# First split out the reserved tokens", "substrs", "=", "self", ".", "_reserved_tokens_re", ".", "split", "(", "s", ")", "else", ":", "substrs", "=", "[", "s", "]", "toks", "=", "[", "]", "for", "substr", "in", "substrs", ":", "if", "substr", "in", "self", ".", "reserved_tokens", ":", "toks", ".", "append", "(", "substr", ")", "else", ":", "toks", ".", "extend", "(", "self", ".", "_alphanum_re", ".", "split", "(", "substr", ")", ")", "# Filter out empty strings", "toks", "=", "[", "t", "for", "t", "in", "toks", "if", "t", "]", "return", "toks" ]
24.15
18.15
def pack(self, value=None): """Pack the value as a binary representation. Returns: bytes: The binary representation. Raises: ValueError: If value can't be represented with bytes """ if value is None: value = self._value if hasattr(value, 'pack') and callable(value.pack): return value.pack() elif isinstance(value, bytes): return value elif value is None: return b'' else: raise ValueError(f"BinaryData can't be {type(value)} = '{value}'")
[ "def", "pack", "(", "self", ",", "value", "=", "None", ")", ":", "if", "value", "is", "None", ":", "value", "=", "self", ".", "_value", "if", "hasattr", "(", "value", ",", "'pack'", ")", "and", "callable", "(", "value", ".", "pack", ")", ":", "return", "value", ".", "pack", "(", ")", "elif", "isinstance", "(", "value", ",", "bytes", ")", ":", "return", "value", "elif", "value", "is", "None", ":", "return", "b''", "else", ":", "raise", "ValueError", "(", "f\"BinaryData can't be {type(value)} = '{value}'\"", ")" ]
27.571429
19.857143
def write_svg(matrix, version, out, scale=1, border=None, color='#000', background=None, xmldecl=True, svgns=True, title=None, desc=None, svgid=None, svgclass='segno', lineclass='qrline', omitsize=False, unit=None, encoding='utf-8', svgversion=None, nl=True): """\ Serializes the QR Code as SVG document. :param matrix: The matrix to serialize. :param int version: The (Micro) QR code version :param out: Filename or a file-like object supporting to write bytes. :param scale: Indicates the size of a single module (default: 1 which corresponds to 1 x 1 pixel per module). :param int border: Integer indicating the size of the quiet zone. If set to ``None`` (default), the recommended border size will be used (``4`` for QR Codes, ``2`` for a Micro QR Codes). :param color: Color of the modules (default: ``#000``). Any value which is supported by SVG can be used. In addition, ``None`` is a valid value. The resulting path won't have a ``stroke`` attribute. :param background: Optional background color (default: ``None`` = no background color). See `color` for valid values. :param bool xmldecl: Inidcates if the XML declaration header should be written (default: ``True``) :param bool svgns: Indicates if the SVG namespace should be written (default: ``True``). :param str title: Optional title of the generated SVG document. :param str desc: Optional description of the generated SVG document. :param svgid: The ID of the SVG document (if set to ``None`` (default), the SVG element won't have an ID). :param svgclass: The CSS class of the SVG document (if set to ``None``, the SVG element won't have a class). :param lineclass: The CSS class of the path element (which draws the "black" modules (if set to ``None``, the path won't have a class). :param bool omitsize: Indicates if width and height attributes should be omitted (default: ``False``). If these attributes are omitted, a ``viewBox`` attribute will be added to the document. :param str unit: Unit for width / height and other coordinates. By default, the unit is unspecified and all values are in the user space. Valid values: em, ex, px, pt, pc, cm, mm, in, and percentages :param str encoding: Encoding of the XML document. "utf-8" by default. :param float svgversion: SVG version (default: None) :param bool nl: Indicates if the document should have a trailing newline (default: ``True``) """ check_valid_scale(scale) check_valid_border(border) unit = unit or '' if unit and omitsize: raise ValueError('The unit "{0}" has no effect if the size ' '(width and height) is omitted.'.format(unit)) with writable(out, 'wt', encoding=encoding) as f: write = f.write # Write the document header if xmldecl: write('<?xml version="1.0" encoding="{0}"?>\n'.format(encoding)) write('<svg') if svgns: write(' xmlns="http://www.w3.org/2000/svg"') if svgversion is not None and svgversion < 2.0: write(' version={0}'.format(quoteattr(str(svgversion)))) border = get_border(version, border) width, height = get_symbol_size(version, scale, border) if not omitsize: write(' width="{0}{2}" height="{1}{2}"'.format(width, height, unit)) if omitsize or unit: write(' viewBox="0 0 {0} {1}"'.format(width, height)) if svgid: write(' id={0}'.format(quoteattr(svgid))) if svgclass: write(' class={0}'.format(quoteattr(svgclass))) write('>') if title is not None: write('<title>{0}</title>'.format(escape(title))) if desc is not None: write('<desc>{0}</desc>'.format(escape(desc))) allow_css3_colors = svgversion is not None and svgversion >= 2.0 if background is not None: bg_color = colors.color_to_webcolor(background, allow_css3_colors=allow_css3_colors) fill_opacity = '' if isinstance(bg_color, tuple): bg_color, opacity = bg_color fill_opacity = ' fill-opacity={0}'.format(quoteattr(str(opacity))) write('<path fill="{0}"{1} d="M0 0h{2}v{3}h-{2}z"/>' .format(bg_color, fill_opacity, width, height)) write('<path') if scale != 1: write(' transform="scale({0})"'.format(scale)) if color is not None: opacity = None stroke_color = colors.color_to_webcolor(color, allow_css3_colors=allow_css3_colors) if isinstance(stroke_color, tuple): stroke_color, opacity = stroke_color write(' stroke={0}'.format(quoteattr(stroke_color))) if opacity is not None: write(' stroke-opacity={0}'.format(quoteattr(str(opacity)))) if lineclass: write(' class={0}'.format(quoteattr(lineclass))) write(' d="') # Current pen pointer position x, y = border, border + .5 # .5 == stroke-width / 2 line_iter = matrix_to_lines(matrix, x, y) # 1st coord is absolute (x1, y1), (x2, y2) = next(line_iter) coord = ['M{0} {1}h{2}'.format(x1, y1, x2 - x1)] append_coord = coord.append x, y = x2, y2 for (x1, y1), (x2, y2) in line_iter: append_coord('m{0} {1}h{2}'.format(x1 - x, int(y1 - y), x2 - x1)) x, y = x2, y2 write(''.join(coord)) # Close path and doc write('"/></svg>') if nl: write('\n')
[ "def", "write_svg", "(", "matrix", ",", "version", ",", "out", ",", "scale", "=", "1", ",", "border", "=", "None", ",", "color", "=", "'#000'", ",", "background", "=", "None", ",", "xmldecl", "=", "True", ",", "svgns", "=", "True", ",", "title", "=", "None", ",", "desc", "=", "None", ",", "svgid", "=", "None", ",", "svgclass", "=", "'segno'", ",", "lineclass", "=", "'qrline'", ",", "omitsize", "=", "False", ",", "unit", "=", "None", ",", "encoding", "=", "'utf-8'", ",", "svgversion", "=", "None", ",", "nl", "=", "True", ")", ":", "check_valid_scale", "(", "scale", ")", "check_valid_border", "(", "border", ")", "unit", "=", "unit", "or", "''", "if", "unit", "and", "omitsize", ":", "raise", "ValueError", "(", "'The unit \"{0}\" has no effect if the size '", "'(width and height) is omitted.'", ".", "format", "(", "unit", ")", ")", "with", "writable", "(", "out", ",", "'wt'", ",", "encoding", "=", "encoding", ")", "as", "f", ":", "write", "=", "f", ".", "write", "# Write the document header", "if", "xmldecl", ":", "write", "(", "'<?xml version=\"1.0\" encoding=\"{0}\"?>\\n'", ".", "format", "(", "encoding", ")", ")", "write", "(", "'<svg'", ")", "if", "svgns", ":", "write", "(", "' xmlns=\"http://www.w3.org/2000/svg\"'", ")", "if", "svgversion", "is", "not", "None", "and", "svgversion", "<", "2.0", ":", "write", "(", "' version={0}'", ".", "format", "(", "quoteattr", "(", "str", "(", "svgversion", ")", ")", ")", ")", "border", "=", "get_border", "(", "version", ",", "border", ")", "width", ",", "height", "=", "get_symbol_size", "(", "version", ",", "scale", ",", "border", ")", "if", "not", "omitsize", ":", "write", "(", "' width=\"{0}{2}\" height=\"{1}{2}\"'", ".", "format", "(", "width", ",", "height", ",", "unit", ")", ")", "if", "omitsize", "or", "unit", ":", "write", "(", "' viewBox=\"0 0 {0} {1}\"'", ".", "format", "(", "width", ",", "height", ")", ")", "if", "svgid", ":", "write", "(", "' id={0}'", ".", "format", "(", "quoteattr", "(", "svgid", ")", ")", ")", "if", "svgclass", ":", "write", "(", "' class={0}'", ".", "format", "(", "quoteattr", "(", "svgclass", ")", ")", ")", "write", "(", "'>'", ")", "if", "title", "is", "not", "None", ":", "write", "(", "'<title>{0}</title>'", ".", "format", "(", "escape", "(", "title", ")", ")", ")", "if", "desc", "is", "not", "None", ":", "write", "(", "'<desc>{0}</desc>'", ".", "format", "(", "escape", "(", "desc", ")", ")", ")", "allow_css3_colors", "=", "svgversion", "is", "not", "None", "and", "svgversion", ">=", "2.0", "if", "background", "is", "not", "None", ":", "bg_color", "=", "colors", ".", "color_to_webcolor", "(", "background", ",", "allow_css3_colors", "=", "allow_css3_colors", ")", "fill_opacity", "=", "''", "if", "isinstance", "(", "bg_color", ",", "tuple", ")", ":", "bg_color", ",", "opacity", "=", "bg_color", "fill_opacity", "=", "' fill-opacity={0}'", ".", "format", "(", "quoteattr", "(", "str", "(", "opacity", ")", ")", ")", "write", "(", "'<path fill=\"{0}\"{1} d=\"M0 0h{2}v{3}h-{2}z\"/>'", ".", "format", "(", "bg_color", ",", "fill_opacity", ",", "width", ",", "height", ")", ")", "write", "(", "'<path'", ")", "if", "scale", "!=", "1", ":", "write", "(", "' transform=\"scale({0})\"'", ".", "format", "(", "scale", ")", ")", "if", "color", "is", "not", "None", ":", "opacity", "=", "None", "stroke_color", "=", "colors", ".", "color_to_webcolor", "(", "color", ",", "allow_css3_colors", "=", "allow_css3_colors", ")", "if", "isinstance", "(", "stroke_color", ",", "tuple", ")", ":", "stroke_color", ",", "opacity", "=", "stroke_color", "write", "(", "' stroke={0}'", ".", "format", "(", "quoteattr", "(", "stroke_color", ")", ")", ")", "if", "opacity", "is", "not", "None", ":", "write", "(", "' stroke-opacity={0}'", ".", "format", "(", "quoteattr", "(", "str", "(", "opacity", ")", ")", ")", ")", "if", "lineclass", ":", "write", "(", "' class={0}'", ".", "format", "(", "quoteattr", "(", "lineclass", ")", ")", ")", "write", "(", "' d=\"'", ")", "# Current pen pointer position", "x", ",", "y", "=", "border", ",", "border", "+", ".5", "# .5 == stroke-width / 2", "line_iter", "=", "matrix_to_lines", "(", "matrix", ",", "x", ",", "y", ")", "# 1st coord is absolute", "(", "x1", ",", "y1", ")", ",", "(", "x2", ",", "y2", ")", "=", "next", "(", "line_iter", ")", "coord", "=", "[", "'M{0} {1}h{2}'", ".", "format", "(", "x1", ",", "y1", ",", "x2", "-", "x1", ")", "]", "append_coord", "=", "coord", ".", "append", "x", ",", "y", "=", "x2", ",", "y2", "for", "(", "x1", ",", "y1", ")", ",", "(", "x2", ",", "y2", ")", "in", "line_iter", ":", "append_coord", "(", "'m{0} {1}h{2}'", ".", "format", "(", "x1", "-", "x", ",", "int", "(", "y1", "-", "y", ")", ",", "x2", "-", "x1", ")", ")", "x", ",", "y", "=", "x2", ",", "y2", "write", "(", "''", ".", "join", "(", "coord", ")", ")", "# Close path and doc", "write", "(", "'\"/></svg>'", ")", "if", "nl", ":", "write", "(", "'\\n'", ")" ]
49.86087
20.756522
def maps_get_default_rules_output_rules_action(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") maps_get_default_rules = ET.Element("maps_get_default_rules") config = maps_get_default_rules output = ET.SubElement(maps_get_default_rules, "output") rules = ET.SubElement(output, "rules") action = ET.SubElement(rules, "action") action.text = kwargs.pop('action') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "maps_get_default_rules_output_rules_action", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "maps_get_default_rules", "=", "ET", ".", "Element", "(", "\"maps_get_default_rules\"", ")", "config", "=", "maps_get_default_rules", "output", "=", "ET", ".", "SubElement", "(", "maps_get_default_rules", ",", "\"output\"", ")", "rules", "=", "ET", ".", "SubElement", "(", "output", ",", "\"rules\"", ")", "action", "=", "ET", ".", "SubElement", "(", "rules", ",", "\"action\"", ")", "action", ".", "text", "=", "kwargs", ".", "pop", "(", "'action'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
41.230769
12.384615
def image_from_simplestreams(server, alias, remote_addr=None, cert=None, key=None, verify_cert=True, aliases=None, public=False, auto_update=False, _raw=False): ''' Create an image from simplestreams server : Simplestreams server URI alias : The alias of the image to retrieve remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. aliases : [] List of aliases to append to the copied image public : False Make this image public available auto_update : False Should LXD auto update that image? _raw : False Return the raw pylxd object or a dict of the image? CLI Examples: ..code-block:: bash $ salt '*' lxd.image_from_simplestreams "https://cloud-images.ubuntu.com/releases" "trusty/amd64" aliases='["t", "trusty/amd64"]' auto_update=True ''' if aliases is None: aliases = [] client = pylxd_client_get(remote_addr, cert, key, verify_cert) try: image = client.images.create_from_simplestreams( server, alias, public=public, auto_update=auto_update ) except pylxd.exceptions.LXDAPIException as e: raise CommandExecutionError(six.text_type(e)) # Aliases support for alias in aliases: image_alias_add(image, alias) if _raw: return image return _pylxd_model_to_dict(image)
[ "def", "image_from_simplestreams", "(", "server", ",", "alias", ",", "remote_addr", "=", "None", ",", "cert", "=", "None", ",", "key", "=", "None", ",", "verify_cert", "=", "True", ",", "aliases", "=", "None", ",", "public", "=", "False", ",", "auto_update", "=", "False", ",", "_raw", "=", "False", ")", ":", "if", "aliases", "is", "None", ":", "aliases", "=", "[", "]", "client", "=", "pylxd_client_get", "(", "remote_addr", ",", "cert", ",", "key", ",", "verify_cert", ")", "try", ":", "image", "=", "client", ".", "images", ".", "create_from_simplestreams", "(", "server", ",", "alias", ",", "public", "=", "public", ",", "auto_update", "=", "auto_update", ")", "except", "pylxd", ".", "exceptions", ".", "LXDAPIException", "as", "e", ":", "raise", "CommandExecutionError", "(", "six", ".", "text_type", "(", "e", ")", ")", "# Aliases support", "for", "alias", "in", "aliases", ":", "image_alias_add", "(", "image", ",", "alias", ")", "if", "_raw", ":", "return", "image", "return", "_pylxd_model_to_dict", "(", "image", ")" ]
27.950617
22
def write(self, filename, header=None): """ Writes the AST as a configuration file. `filename` Filename to save configuration file to. `header` Header string to use for the file. Returns boolean. """ origfile = self._filename try: with open(filename, 'w') as _file: self.writestream(_file, header) self._filename = filename return True except IOError: self._filename = origfile return False
[ "def", "write", "(", "self", ",", "filename", ",", "header", "=", "None", ")", ":", "origfile", "=", "self", ".", "_filename", "try", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "_file", ":", "self", ".", "writestream", "(", "_file", ",", "header", ")", "self", ".", "_filename", "=", "filename", "return", "True", "except", "IOError", ":", "self", ".", "_filename", "=", "origfile", "return", "False" ]
25.727273
17.181818
def _summary(self, name, tensor): """Create a scalar or histogram summary matching the rank of the tensor. Args: name: Name for the summary. tensor: Tensor to summarize. Returns: Summary tensor. """ if tensor.shape.ndims == 0: return tf.summary.scalar(name, tensor) else: return tf.summary.histogram(name, tensor)
[ "def", "_summary", "(", "self", ",", "name", ",", "tensor", ")", ":", "if", "tensor", ".", "shape", ".", "ndims", "==", "0", ":", "return", "tf", ".", "summary", ".", "scalar", "(", "name", ",", "tensor", ")", "else", ":", "return", "tf", ".", "summary", ".", "histogram", "(", "name", ",", "tensor", ")" ]
25.428571
16.357143
def dateAt( self, point ): """ Returns the date at the given point. :param point | <QPoint> """ for date, data in self._dateGrid.items(): if ( data[1].contains(point) ): return QDate.fromJulianDay(date) return QDate()
[ "def", "dateAt", "(", "self", ",", "point", ")", ":", "for", "date", ",", "data", "in", "self", ".", "_dateGrid", ".", "items", "(", ")", ":", "if", "(", "data", "[", "1", "]", ".", "contains", "(", "point", ")", ")", ":", "return", "QDate", ".", "fromJulianDay", "(", "date", ")", "return", "QDate", "(", ")" ]
30.7
9.3
def start_centroid_distance(item_a, item_b, max_value): """ Distance between the centroids of the first step in each object. Args: item_a: STObject from the first set in TrackMatcher item_b: STObject from the second set in TrackMatcher max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1. """ start_a = item_a.center_of_mass(item_a.times[0]) start_b = item_b.center_of_mass(item_b.times[0]) start_distance = np.sqrt((start_a[0] - start_b[0]) ** 2 + (start_a[1] - start_b[1]) ** 2) return np.minimum(start_distance, max_value) / float(max_value)
[ "def", "start_centroid_distance", "(", "item_a", ",", "item_b", ",", "max_value", ")", ":", "start_a", "=", "item_a", ".", "center_of_mass", "(", "item_a", ".", "times", "[", "0", "]", ")", "start_b", "=", "item_b", ".", "center_of_mass", "(", "item_b", ".", "times", "[", "0", "]", ")", "start_distance", "=", "np", ".", "sqrt", "(", "(", "start_a", "[", "0", "]", "-", "start_b", "[", "0", "]", ")", "**", "2", "+", "(", "start_a", "[", "1", "]", "-", "start_b", "[", "1", "]", ")", "**", "2", ")", "return", "np", ".", "minimum", "(", "start_distance", ",", "max_value", ")", "/", "float", "(", "max_value", ")" ]
41.5625
23.1875
def libvlc_video_set_scale(p_mi, f_factor): '''Set the video scaling factor. That is the ratio of the number of pixels on screen to the number of pixels in the original decoded video in each dimension. Zero is a special value; it will adjust the video to the output window/drawable (in windowed mode) or the entire screen. Note that not all video outputs support scaling. @param p_mi: the media player. @param f_factor: the scaling factor, or zero. ''' f = _Cfunctions.get('libvlc_video_set_scale', None) or \ _Cfunction('libvlc_video_set_scale', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_float) return f(p_mi, f_factor)
[ "def", "libvlc_video_set_scale", "(", "p_mi", ",", "f_factor", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_video_set_scale'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_video_set_scale'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ",", ")", ",", "None", ",", "None", ",", "MediaPlayer", ",", "ctypes", ".", "c_float", ")", "return", "f", "(", "p_mi", ",", "f_factor", ")" ]
52.538462
20.384615
def do_batch(args): """Runs the batch list, batch show or batch status command, printing output to the console Args: args: The parsed arguments sent to the command at runtime """ if args.subcommand == 'list': do_batch_list(args) if args.subcommand == 'show': do_batch_show(args) if args.subcommand == 'status': do_batch_status(args) if args.subcommand == 'submit': do_batch_submit(args)
[ "def", "do_batch", "(", "args", ")", ":", "if", "args", ".", "subcommand", "==", "'list'", ":", "do_batch_list", "(", "args", ")", "if", "args", ".", "subcommand", "==", "'show'", ":", "do_batch_show", "(", "args", ")", "if", "args", ".", "subcommand", "==", "'status'", ":", "do_batch_status", "(", "args", ")", "if", "args", ".", "subcommand", "==", "'submit'", ":", "do_batch_submit", "(", "args", ")" ]
25.166667
18.388889
def libvlc_audio_equalizer_get_band_frequency(u_index): '''Get a particular equalizer band frequency. This value can be used, for example, to create a label for an equalizer band control in a user interface. @param u_index: index of the band, counting from zero. @return: equalizer band frequency (Hz), or -1 if there is no such band. @version: LibVLC 2.2.0 or later. ''' f = _Cfunctions.get('libvlc_audio_equalizer_get_band_frequency', None) or \ _Cfunction('libvlc_audio_equalizer_get_band_frequency', ((1,),), None, ctypes.c_float, ctypes.c_uint) return f(u_index)
[ "def", "libvlc_audio_equalizer_get_band_frequency", "(", "u_index", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_audio_equalizer_get_band_frequency'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_audio_equalizer_get_band_frequency'", ",", "(", "(", "1", ",", ")", ",", ")", ",", "None", ",", "ctypes", ".", "c_float", ",", "ctypes", ".", "c_uint", ")", "return", "f", "(", "u_index", ")" ]
51.666667
23.666667
def _update_sys_path(self, package_path=None): """Updates and adds current directory to sys path""" self.package_path = package_path if not self.package_path in sys.path: sys.path.append(self.package_path)
[ "def", "_update_sys_path", "(", "self", ",", "package_path", "=", "None", ")", ":", "self", ".", "package_path", "=", "package_path", "if", "not", "self", ".", "package_path", "in", "sys", ".", "path", ":", "sys", ".", "path", ".", "append", "(", "self", ".", "package_path", ")" ]
47.4
3.4
def filter_images_urls(image_urls, image_filter, common_image_filter=None): ''' 图片链接过滤器,根据传入的过滤器规则,对图片链接列表进行过滤并返回结果列表 :param list(str) image_urls: 图片链接字串列表 :param list(str) image_filter: 过滤器字串列表 :param list(str) common_image_filter: 可选,通用的基础过滤器, 会在定制过滤器前对传入图片应用 :return: 过滤后的结果链接列表,以及被过滤掉的链接列表 :rtype: list(str), list(str) :raises TypeError: image_filter 不为字串或列表 :raises ValueError: image_filter 中存在空值 ''' common_image_filter = common_image_filter or [] # 对图片过滤器进行完整性验证 image_filter = json.loads(image_filter, encoding='utf-8') if not isinstance(image_filter, (str, list)): raise TypeError('image_filter not str or list') if isinstance(image_filter, str): image_filter = [image_filter] if not all(image_filter): raise ValueError('image_filter 中存在空值:{}'.format(image_filter)) rc = copy.deepcopy(image_urls) rc_removed = [] for i in image_urls: # 执行内置过滤器 for f in common_image_filter: if re.search(f, i): rc.remove(i) rc_removed.append(i) # 执行具体文章源的定制过滤器 for f in image_filter: if re.search(f, i): rc.remove(i) rc_removed.append(i) return rc, rc_removed
[ "def", "filter_images_urls", "(", "image_urls", ",", "image_filter", ",", "common_image_filter", "=", "None", ")", ":", "common_image_filter", "=", "common_image_filter", "or", "[", "]", "# 对图片过滤器进行完整性验证", "image_filter", "=", "json", ".", "loads", "(", "image_filter", ",", "encoding", "=", "'utf-8'", ")", "if", "not", "isinstance", "(", "image_filter", ",", "(", "str", ",", "list", ")", ")", ":", "raise", "TypeError", "(", "'image_filter not str or list'", ")", "if", "isinstance", "(", "image_filter", ",", "str", ")", ":", "image_filter", "=", "[", "image_filter", "]", "if", "not", "all", "(", "image_filter", ")", ":", "raise", "ValueError", "(", "'image_filter 中存在空值:{}'.format(imag", "e", "_filte", "r", "))", "", "", "rc", "=", "copy", ".", "deepcopy", "(", "image_urls", ")", "rc_removed", "=", "[", "]", "for", "i", "in", "image_urls", ":", "# 执行内置过滤器", "for", "f", "in", "common_image_filter", ":", "if", "re", ".", "search", "(", "f", ",", "i", ")", ":", "rc", ".", "remove", "(", "i", ")", "rc_removed", ".", "append", "(", "i", ")", "# 执行具体文章源的定制过滤器", "for", "f", "in", "image_filter", ":", "if", "re", ".", "search", "(", "f", ",", "i", ")", ":", "rc", ".", "remove", "(", "i", ")", "rc_removed", ".", "append", "(", "i", ")", "return", "rc", ",", "rc_removed" ]
35.512821
14.282051
def derivative(self, x): """Derivative of the broadcast operator. Parameters ---------- x : `domain` element The point to take the derivative in Returns ------- adjoint : linear `BroadcastOperator` The derivative Examples -------- Example with an affine operator: >>> I = odl.IdentityOperator(odl.rn(3)) >>> residual_op = I - I.domain.element([1, 1, 1]) >>> op = BroadcastOperator(residual_op, 2 * residual_op) Calling operator offsets by ``[1, 1, 1]``: >>> x = [1, 2, 3] >>> op(x) ProductSpace(rn(3), 2).element([ [ 0., 1., 2.], [ 0., 2., 4.] ]) The derivative of this affine operator does not have an offset: >>> op.derivative(x)(x) ProductSpace(rn(3), 2).element([ [ 1., 2., 3.], [ 2., 4., 6.] ]) """ return BroadcastOperator(*[op.derivative(x) for op in self.operators])
[ "def", "derivative", "(", "self", ",", "x", ")", ":", "return", "BroadcastOperator", "(", "*", "[", "op", ".", "derivative", "(", "x", ")", "for", "op", "in", "self", ".", "operators", "]", ")" ]
26.25
19.825
def Dir(self, name, create=True): """ Looks up or creates a directory node named 'name' relative to this directory. """ return self.fs.Dir(name, self, create)
[ "def", "Dir", "(", "self", ",", "name", ",", "create", "=", "True", ")", ":", "return", "self", ".", "fs", ".", "Dir", "(", "name", ",", "self", ",", "create", ")" ]
32.166667
9.833333
def create_class(self, data, options=None, path=None, **kwargs): """ Return instance of class based on Roslyn type property Data keys handled here: type Set the object class items Recurse into :py:meth:`create_class` to create child object instances :param data: dictionary data from Roslyn output artifact """ obj_map = dict((cls.type, cls) for cls in ALL_CLASSES) try: cls = obj_map[data["type"].lower()] except KeyError: LOGGER.warning("Unknown type: %s" % data) else: obj = cls( data, jinja_env=self.jinja_env, options=options, url_root=self.url_root, **kwargs ) # Append child objects # TODO this should recurse in the case we're getting back more # complex argument listings yield obj
[ "def", "create_class", "(", "self", ",", "data", ",", "options", "=", "None", ",", "path", "=", "None", ",", "*", "*", "kwargs", ")", ":", "obj_map", "=", "dict", "(", "(", "cls", ".", "type", ",", "cls", ")", "for", "cls", "in", "ALL_CLASSES", ")", "try", ":", "cls", "=", "obj_map", "[", "data", "[", "\"type\"", "]", ".", "lower", "(", ")", "]", "except", "KeyError", ":", "LOGGER", ".", "warning", "(", "\"Unknown type: %s\"", "%", "data", ")", "else", ":", "obj", "=", "cls", "(", "data", ",", "jinja_env", "=", "self", ".", "jinja_env", ",", "options", "=", "options", ",", "url_root", "=", "self", ".", "url_root", ",", "*", "*", "kwargs", ")", "# Append child objects", "# TODO this should recurse in the case we're getting back more", "# complex argument listings", "yield", "obj" ]
28.085714
20.657143
def reset(): """Resets Logger to its initial state""" Logger.journal = [] Logger.fatal_warnings = False Logger._ignored_codes = set() Logger._ignored_domains = set() Logger._verbosity = 2 Logger._last_checkpoint = 0
[ "def", "reset", "(", ")", ":", "Logger", ".", "journal", "=", "[", "]", "Logger", ".", "fatal_warnings", "=", "False", "Logger", ".", "_ignored_codes", "=", "set", "(", ")", "Logger", ".", "_ignored_domains", "=", "set", "(", ")", "Logger", ".", "_verbosity", "=", "2", "Logger", ".", "_last_checkpoint", "=", "0" ]
33
8
def egg_info_writer(cmd, basename, filename): # type: (setuptools.command.egg_info.egg_info, str, str) -> None """Read rcli configuration and write it out to the egg info. Args: cmd: An egg info command instance to use for writing. basename: The basename of the file to write. filename: The full path of the file to write into the egg info. """ setupcfg = next((f for f in setuptools.findall() if os.path.basename(f) == 'setup.cfg'), None) if not setupcfg: return parser = six.moves.configparser.ConfigParser() # type: ignore parser.read(setupcfg) if not parser.has_section('rcli') or not parser.items('rcli'): return config = dict(parser.items('rcli')) # type: typing.Dict[str, typing.Any] for k, v in six.iteritems(config): if v.lower() in ('y', 'yes', 'true'): config[k] = True elif v.lower() in ('n', 'no', 'false'): config[k] = False else: try: config[k] = json.loads(v) except ValueError: pass cmd.write_file(basename, filename, json.dumps(config))
[ "def", "egg_info_writer", "(", "cmd", ",", "basename", ",", "filename", ")", ":", "# type: (setuptools.command.egg_info.egg_info, str, str) -> None", "setupcfg", "=", "next", "(", "(", "f", "for", "f", "in", "setuptools", ".", "findall", "(", ")", "if", "os", ".", "path", ".", "basename", "(", "f", ")", "==", "'setup.cfg'", ")", ",", "None", ")", "if", "not", "setupcfg", ":", "return", "parser", "=", "six", ".", "moves", ".", "configparser", ".", "ConfigParser", "(", ")", "# type: ignore", "parser", ".", "read", "(", "setupcfg", ")", "if", "not", "parser", ".", "has_section", "(", "'rcli'", ")", "or", "not", "parser", ".", "items", "(", "'rcli'", ")", ":", "return", "config", "=", "dict", "(", "parser", ".", "items", "(", "'rcli'", ")", ")", "# type: typing.Dict[str, typing.Any]", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "config", ")", ":", "if", "v", ".", "lower", "(", ")", "in", "(", "'y'", ",", "'yes'", ",", "'true'", ")", ":", "config", "[", "k", "]", "=", "True", "elif", "v", ".", "lower", "(", ")", "in", "(", "'n'", ",", "'no'", ",", "'false'", ")", ":", "config", "[", "k", "]", "=", "False", "else", ":", "try", ":", "config", "[", "k", "]", "=", "json", ".", "loads", "(", "v", ")", "except", "ValueError", ":", "pass", "cmd", ".", "write_file", "(", "basename", ",", "filename", ",", "json", ".", "dumps", "(", "config", ")", ")" ]
39.37931
17.896552
def time_str (time_t, slug = False): '''Converts floating point number a'la time.time() using DEFAULT_TIMEFORMAT ''' return datetime.fromtimestamp (int (time_t)).strftime ( DEFAULT_SLUGFORMAT if slug else DEFAULT_TIMEFORMAT)
[ "def", "time_str", "(", "time_t", ",", "slug", "=", "False", ")", ":", "return", "datetime", ".", "fromtimestamp", "(", "int", "(", "time_t", ")", ")", ".", "strftime", "(", "DEFAULT_SLUGFORMAT", "if", "slug", "else", "DEFAULT_TIMEFORMAT", ")" ]
38.5
16.166667
def matching_files(self): """ Find files. Returns: list: the list of matching files. """ matching = [] matcher = self.file_path_regex pieces = self.file_path_regex.pattern.split(sep) partial_matchers = list(map(re.compile, ( sep.join(pieces[:i + 1]) for i in range(len(pieces))))) for root, dirs, files in walk(self.top_dir, topdown=True): for i in reversed(range(len(dirs))): dirname = relpath(join(root, dirs[i]), self.top_dir) dirlevel = dirname.count(sep) if not partial_matchers[dirlevel].match(dirname): del dirs[i] for filename in files: if matcher.match(filename): matching.append(abspath(join(root, filename))) return matching
[ "def", "matching_files", "(", "self", ")", ":", "matching", "=", "[", "]", "matcher", "=", "self", ".", "file_path_regex", "pieces", "=", "self", ".", "file_path_regex", ".", "pattern", ".", "split", "(", "sep", ")", "partial_matchers", "=", "list", "(", "map", "(", "re", ".", "compile", ",", "(", "sep", ".", "join", "(", "pieces", "[", ":", "i", "+", "1", "]", ")", "for", "i", "in", "range", "(", "len", "(", "pieces", ")", ")", ")", ")", ")", "for", "root", ",", "dirs", ",", "files", "in", "walk", "(", "self", ".", "top_dir", ",", "topdown", "=", "True", ")", ":", "for", "i", "in", "reversed", "(", "range", "(", "len", "(", "dirs", ")", ")", ")", ":", "dirname", "=", "relpath", "(", "join", "(", "root", ",", "dirs", "[", "i", "]", ")", ",", "self", ".", "top_dir", ")", "dirlevel", "=", "dirname", ".", "count", "(", "sep", ")", "if", "not", "partial_matchers", "[", "dirlevel", "]", ".", "match", "(", "dirname", ")", ":", "del", "dirs", "[", "i", "]", "for", "filename", "in", "files", ":", "if", "matcher", ".", "match", "(", "filename", ")", ":", "matching", ".", "append", "(", "abspath", "(", "join", "(", "root", ",", "filename", ")", ")", ")", "return", "matching" ]
33.88
18.04
def define_example_values(self, http_method, route, values, update=False): """Define example values for a given request. By default, example values are determined from the example properties in the schema. But if you want to change the example used in the documentation for a specific route, and this method lets you do that. :param str http_method: An HTTP method, like "get". :param str route: The route to match. :param dict values: A dictionary of parameters for the example request. :param bool update: If True, the values will be merged into the default example values for the request. If False, the values will replace the default example values. """ self.defined_example_values[(http_method.lower(), route)] = { 'update': update, 'values': values }
[ "def", "define_example_values", "(", "self", ",", "http_method", ",", "route", ",", "values", ",", "update", "=", "False", ")", ":", "self", ".", "defined_example_values", "[", "(", "http_method", ".", "lower", "(", ")", ",", "route", ")", "]", "=", "{", "'update'", ":", "update", ",", "'values'", ":", "values", "}" ]
48.722222
24.611111
def get_file(self, project, build_id, artifact_name, file_id, file_name, **kwargs): """GetFile. Gets a file from the build. :param str project: Project ID or project name :param int build_id: The ID of the build. :param str artifact_name: The name of the artifact. :param str file_id: The primary key for the file. :param str file_name: The name that the file will be set to. :rtype: object """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') query_parameters = {} if artifact_name is not None: query_parameters['artifactName'] = self._serialize.query('artifact_name', artifact_name, 'str') if file_id is not None: query_parameters['fileId'] = self._serialize.query('file_id', file_id, 'str') if file_name is not None: query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str') response = self._send(http_method='GET', location_id='1db06c96-014e-44e1-ac91-90b2d4b3e984', version='5.0', route_values=route_values, query_parameters=query_parameters, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback)
[ "def", "get_file", "(", "self", ",", "project", ",", "build_id", ",", "artifact_name", ",", "file_id", ",", "file_name", ",", "*", "*", "kwargs", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "if", "build_id", "is", "not", "None", ":", "route_values", "[", "'buildId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'build_id'", ",", "build_id", ",", "'int'", ")", "query_parameters", "=", "{", "}", "if", "artifact_name", "is", "not", "None", ":", "query_parameters", "[", "'artifactName'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'artifact_name'", ",", "artifact_name", ",", "'str'", ")", "if", "file_id", "is", "not", "None", ":", "query_parameters", "[", "'fileId'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'file_id'", ",", "file_id", ",", "'str'", ")", "if", "file_name", "is", "not", "None", ":", "query_parameters", "[", "'fileName'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'file_name'", ",", "file_name", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'1db06c96-014e-44e1-ac91-90b2d4b3e984'", ",", "version", "=", "'5.0'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ",", "accept_media_type", "=", "'application/octet-stream'", ")", "if", "\"callback\"", "in", "kwargs", ":", "callback", "=", "kwargs", "[", "\"callback\"", "]", "else", ":", "callback", "=", "None", "return", "self", ".", "_client", ".", "stream_download", "(", "response", ",", "callback", "=", "callback", ")" ]
51.181818
20.757576
def add_record(self, length): # type: (int) -> None ''' Add some more length to this CE record. Used when a new record is going to get recorded into the CE (rather than the DR). Parameters: length - The length to add to this CE record. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('CE record not yet initialized!') self.len_cont_area += length
[ "def", "add_record", "(", "self", ",", "length", ")", ":", "# type: (int) -> None", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'CE record not yet initialized!'", ")", "self", ".", "len_cont_area", "+=", "length" ]
32.066667
23.933333
def affine_zoom_matrix(zoom_range=(0.8, 1.1)): """Create an affine transform matrix for zooming/scaling an image's height and width. OpenCV format, x is width. Parameters ----------- x : numpy.array An image with dimension of [row, col, channel] (default). zoom_range : float or tuple of 2 floats The zooming/scaling ratio, greater than 1 means larger. - float, a fixed ratio. - tuple of 2 floats, randomly sample a value as the ratio between these 2 values. Returns ------- numpy.array An affine transform matrix. """ if isinstance(zoom_range, (float, int)): scale = zoom_range elif isinstance(zoom_range, tuple): scale = np.random.uniform(zoom_range[0], zoom_range[1]) else: raise Exception("zoom_range: float or tuple of 2 floats") zoom_matrix = np.array([[scale, 0, 0], \ [0, scale, 0], \ [0, 0, 1]]) return zoom_matrix
[ "def", "affine_zoom_matrix", "(", "zoom_range", "=", "(", "0.8", ",", "1.1", ")", ")", ":", "if", "isinstance", "(", "zoom_range", ",", "(", "float", ",", "int", ")", ")", ":", "scale", "=", "zoom_range", "elif", "isinstance", "(", "zoom_range", ",", "tuple", ")", ":", "scale", "=", "np", ".", "random", ".", "uniform", "(", "zoom_range", "[", "0", "]", ",", "zoom_range", "[", "1", "]", ")", "else", ":", "raise", "Exception", "(", "\"zoom_range: float or tuple of 2 floats\"", ")", "zoom_matrix", "=", "np", ".", "array", "(", "[", "[", "scale", ",", "0", ",", "0", "]", ",", "[", "0", ",", "scale", ",", "0", "]", ",", "[", "0", ",", "0", ",", "1", "]", "]", ")", "return", "zoom_matrix" ]
31.806452
19.677419
def to_binary_string(self): """Pack the feedback to binary form and return it as string.""" timestamp = datetime_to_timestamp(self.when) token = binascii.unhexlify(self.token) return struct.pack(self.FORMAT_PREFIX + '{0}s'.format(len(token)), timestamp, len(token), token)
[ "def", "to_binary_string", "(", "self", ")", ":", "timestamp", "=", "datetime_to_timestamp", "(", "self", ".", "when", ")", "token", "=", "binascii", ".", "unhexlify", "(", "self", ".", "token", ")", "return", "struct", ".", "pack", "(", "self", ".", "FORMAT_PREFIX", "+", "'{0}s'", ".", "format", "(", "len", "(", "token", ")", ")", ",", "timestamp", ",", "len", "(", "token", ")", ",", "token", ")" ]
54.333333
13.5
def smooth(x,window_len,window='bartlett'): """smooth the data using a sliding window with requested size. This method is based on the convolution of a scaled window with the signal. The signal is prepared by padding the beginning and the end of the signal with average of the first (last) ten values of the signal, to evoid jumps at the beggining/end input: x: the input signal, equaly spaced! window_len: the dimension of the smoothing window window: type of window from numpy library ['flat','hanning','hamming','bartlett','blackman'] -flat window will produce a moving average smoothing. -Bartlett window is very similar to triangular window, but always ends with zeros at points 1 and n, -hanning,hamming,blackman are used for smoothing the Fourier transfrom for curie temperature calculation the default is Bartlett output: aray of the smoothed signal """ if x.ndim != 1: raise ValueError("smooth only accepts 1 dimension arrays.") if x.size < window_len: raise ValueError("Input vector needs to be bigger than window size.") if window_len<3: return x # numpy available windows if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'") # padding the beggining and the end of the signal with an average value to evoid edge effect start=[average(x[0:10])]*window_len end=[average(x[-10:])]*window_len s=start+list(x)+end #s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]] if window == 'flat': #moving average w=ones(window_len,'d') else: w=eval('numpy.'+window+'(window_len)') y=numpy.convolve(old_div(w,w.sum()),s,mode='same') return array(y[window_len:-window_len])
[ "def", "smooth", "(", "x", ",", "window_len", ",", "window", "=", "'bartlett'", ")", ":", "if", "x", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"smooth only accepts 1 dimension arrays.\"", ")", "if", "x", ".", "size", "<", "window_len", ":", "raise", "ValueError", "(", "\"Input vector needs to be bigger than window size.\"", ")", "if", "window_len", "<", "3", ":", "return", "x", "# numpy available windows", "if", "not", "window", "in", "[", "'flat'", ",", "'hanning'", ",", "'hamming'", ",", "'bartlett'", ",", "'blackman'", "]", ":", "raise", "ValueError", "(", "\"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"", ")", "# padding the beggining and the end of the signal with an average value to evoid edge effect", "start", "=", "[", "average", "(", "x", "[", "0", ":", "10", "]", ")", "]", "*", "window_len", "end", "=", "[", "average", "(", "x", "[", "-", "10", ":", "]", ")", "]", "*", "window_len", "s", "=", "start", "+", "list", "(", "x", ")", "+", "end", "#s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]", "if", "window", "==", "'flat'", ":", "#moving average", "w", "=", "ones", "(", "window_len", ",", "'d'", ")", "else", ":", "w", "=", "eval", "(", "'numpy.'", "+", "window", "+", "'(window_len)'", ")", "y", "=", "numpy", ".", "convolve", "(", "old_div", "(", "w", ",", "w", ".", "sum", "(", ")", ")", ",", "s", ",", "mode", "=", "'same'", ")", "return", "array", "(", "y", "[", "window_len", ":", "-", "window_len", "]", ")" ]
37.8
26.38
def cmd_tool(args=None): """ Command line tool for plotting and viewing info on filterbank files """ from argparse import ArgumentParser parser = ArgumentParser(description="Command line utility for reading and plotting filterbank files.") parser.add_argument('-p', action='store', default='ank', dest='what_to_plot', type=str, help='Show: "w" waterfall (freq vs. time) plot; "s" integrated spectrum plot; \ "t" for time series; "mm" for spectrum including min max; "k" for kurtosis; \ "a" for all available plots and information; and "ank" for all but kurtosis.') parser.add_argument('filename', type=str, help='Name of file to read') parser.add_argument('-b', action='store', default=None, dest='f_start', type=float, help='Start frequency (begin), in MHz') parser.add_argument('-e', action='store', default=None, dest='f_stop', type=float, help='Stop frequency (end), in MHz') parser.add_argument('-B', action='store', default=None, dest='t_start', type=int, help='Start integration (begin) ID') parser.add_argument('-E', action='store', default=None, dest='t_stop', type=int, help='Stop integration (end) ID') parser.add_argument('-i', action='store_true', default=False, dest='info_only', help='Show info only') parser.add_argument('-a', action='store_true', default=False, dest='average', help='average along time axis (plot spectrum only)') parser.add_argument('-s', action='store', default='', dest='plt_filename', type=str, help='save plot graphic to file (give filename as argument)') parser.add_argument('-S', action='store_true', default=False, dest='save_only', help='Turn off plotting of data and only save to file.') parser.add_argument('-D', action='store_false', default=True, dest='blank_dc', help='Use to not blank DC bin.') parser.add_argument('-c', action='store_true', default=False, dest='calibrate_band_pass', help='Calibrate band pass.') args = parser.parse_args() # Open blimpy data filename = args.filename load_data = not args.info_only # only load one integration if looking at spectrum wtp = args.what_to_plot if not wtp or 's' in wtp: if args.t_start == None: t_start = 0 else: t_start = args.t_start t_stop = t_start + 1 if args.average: t_start = None t_stop = None else: t_start = args.t_start t_stop = args.t_stop if args.info_only: args.blank_dc = False args.calibrate_band_pass = False fil = Filterbank(filename, f_start=args.f_start, f_stop=args.f_stop, t_start=t_start, t_stop=t_stop, load_data=load_data,blank_dc=args.blank_dc, cal_band_pass=args.calibrate_band_pass) fil.info() # And if we want to plot data, then plot data. if not args.info_only: # check start & stop frequencies make sense #try: # if args.f_start: # print "Start freq: %2.2f" % args.f_start # assert args.f_start >= fil.freqs[0] or np.isclose(args.f_start, fil.freqs[0]) # # if args.f_stop: # print "Stop freq: %2.2f" % args.f_stop # assert args.f_stop <= fil.freqs[-1] or np.isclose(args.f_stop, fil.freqs[-1]) #except AssertionError: # print "Error: Start and stop frequencies must lie inside file's frequency range." # print "i.e. between %2.2f-%2.2f MHz." % (fil.freqs[0], fil.freqs[-1]) # exit() if args.what_to_plot == "w": plt.figure("waterfall", figsize=(8, 6)) fil.plot_waterfall(f_start=args.f_start, f_stop=args.f_stop) elif args.what_to_plot == "s": plt.figure("Spectrum", figsize=(8, 6)) fil.plot_spectrum(logged=True, f_start=args.f_start, f_stop=args.f_stop, t='all') elif args.what_to_plot == "mm": plt.figure("min max", figsize=(8, 6)) fil.plot_spectrum_min_max(logged=True, f_start=args.f_start, f_stop=args.f_stop, t='all') elif args.what_to_plot == "k": plt.figure("kurtosis", figsize=(8, 6)) fil.plot_kurtosis(f_start=args.f_start, f_stop=args.f_stop) elif args.what_to_plot == "t": plt.figure("Time Series", figsize=(8, 6)) fil.plot_time_series(f_start=args.f_start, f_stop=args.f_stop,orientation='h') elif args.what_to_plot == "a": plt.figure("Multiple diagnostic plots", figsize=(12, 9),facecolor='white') fil.plot_all(logged=True, f_start=args.f_start, f_stop=args.f_stop, t='all') elif args.what_to_plot == "ank": plt.figure("Multiple diagnostic plots", figsize=(12, 9),facecolor='white') fil.plot_all(logged=True, f_start=args.f_start, f_stop=args.f_stop, t='all',kurtosis=False) if args.plt_filename != '': plt.savefig(args.plt_filename) if not args.save_only: if 'DISPLAY' in os.environ.keys(): plt.show() else: print("No $DISPLAY available.")
[ "def", "cmd_tool", "(", "args", "=", "None", ")", ":", "from", "argparse", "import", "ArgumentParser", "parser", "=", "ArgumentParser", "(", "description", "=", "\"Command line utility for reading and plotting filterbank files.\"", ")", "parser", ".", "add_argument", "(", "'-p'", ",", "action", "=", "'store'", ",", "default", "=", "'ank'", ",", "dest", "=", "'what_to_plot'", ",", "type", "=", "str", ",", "help", "=", "'Show: \"w\" waterfall (freq vs. time) plot; \"s\" integrated spectrum plot; \\\n \"t\" for time series; \"mm\" for spectrum including min max; \"k\" for kurtosis; \\\n \"a\" for all available plots and information; and \"ank\" for all but kurtosis.'", ")", "parser", ".", "add_argument", "(", "'filename'", ",", "type", "=", "str", ",", "help", "=", "'Name of file to read'", ")", "parser", ".", "add_argument", "(", "'-b'", ",", "action", "=", "'store'", ",", "default", "=", "None", ",", "dest", "=", "'f_start'", ",", "type", "=", "float", ",", "help", "=", "'Start frequency (begin), in MHz'", ")", "parser", ".", "add_argument", "(", "'-e'", ",", "action", "=", "'store'", ",", "default", "=", "None", ",", "dest", "=", "'f_stop'", ",", "type", "=", "float", ",", "help", "=", "'Stop frequency (end), in MHz'", ")", "parser", ".", "add_argument", "(", "'-B'", ",", "action", "=", "'store'", ",", "default", "=", "None", ",", "dest", "=", "'t_start'", ",", "type", "=", "int", ",", "help", "=", "'Start integration (begin) ID'", ")", "parser", ".", "add_argument", "(", "'-E'", ",", "action", "=", "'store'", ",", "default", "=", "None", ",", "dest", "=", "'t_stop'", ",", "type", "=", "int", ",", "help", "=", "'Stop integration (end) ID'", ")", "parser", ".", "add_argument", "(", "'-i'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "dest", "=", "'info_only'", ",", "help", "=", "'Show info only'", ")", "parser", ".", "add_argument", "(", "'-a'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "dest", "=", "'average'", ",", "help", "=", "'average along time axis (plot spectrum only)'", ")", "parser", ".", "add_argument", "(", "'-s'", ",", "action", "=", "'store'", ",", "default", "=", "''", ",", "dest", "=", "'plt_filename'", ",", "type", "=", "str", ",", "help", "=", "'save plot graphic to file (give filename as argument)'", ")", "parser", ".", "add_argument", "(", "'-S'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "dest", "=", "'save_only'", ",", "help", "=", "'Turn off plotting of data and only save to file.'", ")", "parser", ".", "add_argument", "(", "'-D'", ",", "action", "=", "'store_false'", ",", "default", "=", "True", ",", "dest", "=", "'blank_dc'", ",", "help", "=", "'Use to not blank DC bin.'", ")", "parser", ".", "add_argument", "(", "'-c'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "dest", "=", "'calibrate_band_pass'", ",", "help", "=", "'Calibrate band pass.'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "# Open blimpy data", "filename", "=", "args", ".", "filename", "load_data", "=", "not", "args", ".", "info_only", "# only load one integration if looking at spectrum", "wtp", "=", "args", ".", "what_to_plot", "if", "not", "wtp", "or", "'s'", "in", "wtp", ":", "if", "args", ".", "t_start", "==", "None", ":", "t_start", "=", "0", "else", ":", "t_start", "=", "args", ".", "t_start", "t_stop", "=", "t_start", "+", "1", "if", "args", ".", "average", ":", "t_start", "=", "None", "t_stop", "=", "None", "else", ":", "t_start", "=", "args", ".", "t_start", "t_stop", "=", "args", ".", "t_stop", "if", "args", ".", "info_only", ":", "args", ".", "blank_dc", "=", "False", "args", ".", "calibrate_band_pass", "=", "False", "fil", "=", "Filterbank", "(", "filename", ",", "f_start", "=", "args", ".", "f_start", ",", "f_stop", "=", "args", ".", "f_stop", ",", "t_start", "=", "t_start", ",", "t_stop", "=", "t_stop", ",", "load_data", "=", "load_data", ",", "blank_dc", "=", "args", ".", "blank_dc", ",", "cal_band_pass", "=", "args", ".", "calibrate_band_pass", ")", "fil", ".", "info", "(", ")", "# And if we want to plot data, then plot data.", "if", "not", "args", ".", "info_only", ":", "# check start & stop frequencies make sense", "#try:", "# if args.f_start:", "# print \"Start freq: %2.2f\" % args.f_start", "# assert args.f_start >= fil.freqs[0] or np.isclose(args.f_start, fil.freqs[0])", "#", "# if args.f_stop:", "# print \"Stop freq: %2.2f\" % args.f_stop", "# assert args.f_stop <= fil.freqs[-1] or np.isclose(args.f_stop, fil.freqs[-1])", "#except AssertionError:", "# print \"Error: Start and stop frequencies must lie inside file's frequency range.\"", "# print \"i.e. between %2.2f-%2.2f MHz.\" % (fil.freqs[0], fil.freqs[-1])", "# exit()", "if", "args", ".", "what_to_plot", "==", "\"w\"", ":", "plt", ".", "figure", "(", "\"waterfall\"", ",", "figsize", "=", "(", "8", ",", "6", ")", ")", "fil", ".", "plot_waterfall", "(", "f_start", "=", "args", ".", "f_start", ",", "f_stop", "=", "args", ".", "f_stop", ")", "elif", "args", ".", "what_to_plot", "==", "\"s\"", ":", "plt", ".", "figure", "(", "\"Spectrum\"", ",", "figsize", "=", "(", "8", ",", "6", ")", ")", "fil", ".", "plot_spectrum", "(", "logged", "=", "True", ",", "f_start", "=", "args", ".", "f_start", ",", "f_stop", "=", "args", ".", "f_stop", ",", "t", "=", "'all'", ")", "elif", "args", ".", "what_to_plot", "==", "\"mm\"", ":", "plt", ".", "figure", "(", "\"min max\"", ",", "figsize", "=", "(", "8", ",", "6", ")", ")", "fil", ".", "plot_spectrum_min_max", "(", "logged", "=", "True", ",", "f_start", "=", "args", ".", "f_start", ",", "f_stop", "=", "args", ".", "f_stop", ",", "t", "=", "'all'", ")", "elif", "args", ".", "what_to_plot", "==", "\"k\"", ":", "plt", ".", "figure", "(", "\"kurtosis\"", ",", "figsize", "=", "(", "8", ",", "6", ")", ")", "fil", ".", "plot_kurtosis", "(", "f_start", "=", "args", ".", "f_start", ",", "f_stop", "=", "args", ".", "f_stop", ")", "elif", "args", ".", "what_to_plot", "==", "\"t\"", ":", "plt", ".", "figure", "(", "\"Time Series\"", ",", "figsize", "=", "(", "8", ",", "6", ")", ")", "fil", ".", "plot_time_series", "(", "f_start", "=", "args", ".", "f_start", ",", "f_stop", "=", "args", ".", "f_stop", ",", "orientation", "=", "'h'", ")", "elif", "args", ".", "what_to_plot", "==", "\"a\"", ":", "plt", ".", "figure", "(", "\"Multiple diagnostic plots\"", ",", "figsize", "=", "(", "12", ",", "9", ")", ",", "facecolor", "=", "'white'", ")", "fil", ".", "plot_all", "(", "logged", "=", "True", ",", "f_start", "=", "args", ".", "f_start", ",", "f_stop", "=", "args", ".", "f_stop", ",", "t", "=", "'all'", ")", "elif", "args", ".", "what_to_plot", "==", "\"ank\"", ":", "plt", ".", "figure", "(", "\"Multiple diagnostic plots\"", ",", "figsize", "=", "(", "12", ",", "9", ")", ",", "facecolor", "=", "'white'", ")", "fil", ".", "plot_all", "(", "logged", "=", "True", ",", "f_start", "=", "args", ".", "f_start", ",", "f_stop", "=", "args", ".", "f_stop", ",", "t", "=", "'all'", ",", "kurtosis", "=", "False", ")", "if", "args", ".", "plt_filename", "!=", "''", ":", "plt", ".", "savefig", "(", "args", ".", "plt_filename", ")", "if", "not", "args", ".", "save_only", ":", "if", "'DISPLAY'", "in", "os", ".", "environ", ".", "keys", "(", ")", ":", "plt", ".", "show", "(", ")", "else", ":", "print", "(", "\"No $DISPLAY available.\"", ")" ]
48.315315
25.567568
def setup(use_latex=False, overwrite=False): """Set up matplotlib imports and settings. Parameters ---------- use_latex: bool, optional Determine if Latex output should be used. Latex will only be enable if a 'latex' binary is found in the system. overwrite: bool, optional Overwrite some matplotlib config values. Returns ------- plt: :mod:`pylab` pylab module imported as plt mpl: :mod:`matplotlib` matplotlib module imported as mpl """ # just make sure we can access matplotlib as mpl import matplotlib as mpl # general settings if overwrite: mpl.rcParams["lines.linewidth"] = 2.0 mpl.rcParams["lines.markeredgewidth"] = 3.0 mpl.rcParams["lines.markersize"] = 3.0 mpl.rcParams["font.size"] = 12 mpl.rcParams['mathtext.default'] = 'regular' if latex and use_latex: mpl.rcParams['text.usetex'] = True mpl.rc( 'text.latex', preamble=''.join(( # r'\usepackage{droidsans} r'\usepackage[T1]{fontenc} ', r'\usepackage{sfmath} \renewcommand{\rmfamily}{\sffamily}', r'\renewcommand\familydefault{\sfdefault} ', r'\usepackage{mathastext} ' )) ) else: mpl.rcParams['text.usetex'] = False import matplotlib.pyplot as plt return plt, mpl
[ "def", "setup", "(", "use_latex", "=", "False", ",", "overwrite", "=", "False", ")", ":", "# just make sure we can access matplotlib as mpl", "import", "matplotlib", "as", "mpl", "# general settings", "if", "overwrite", ":", "mpl", ".", "rcParams", "[", "\"lines.linewidth\"", "]", "=", "2.0", "mpl", ".", "rcParams", "[", "\"lines.markeredgewidth\"", "]", "=", "3.0", "mpl", ".", "rcParams", "[", "\"lines.markersize\"", "]", "=", "3.0", "mpl", ".", "rcParams", "[", "\"font.size\"", "]", "=", "12", "mpl", ".", "rcParams", "[", "'mathtext.default'", "]", "=", "'regular'", "if", "latex", "and", "use_latex", ":", "mpl", ".", "rcParams", "[", "'text.usetex'", "]", "=", "True", "mpl", ".", "rc", "(", "'text.latex'", ",", "preamble", "=", "''", ".", "join", "(", "(", "# r'\\usepackage{droidsans}", "r'\\usepackage[T1]{fontenc} '", ",", "r'\\usepackage{sfmath} \\renewcommand{\\rmfamily}{\\sffamily}'", ",", "r'\\renewcommand\\familydefault{\\sfdefault} '", ",", "r'\\usepackage{mathastext} '", ")", ")", ")", "else", ":", "mpl", ".", "rcParams", "[", "'text.usetex'", "]", "=", "False", "import", "matplotlib", ".", "pyplot", "as", "plt", "return", "plt", ",", "mpl" ]
30.326087
17.043478
def parse(uri, user=None, port=22): """ parses ssh connection uri-like sentences. ex: - root@google.com -> (root, google.com, 22) - noreply@facebook.com:22 -> (noreply, facebook.com, 22) - facebook.com:3306 -> ($USER, facebook.com, 3306) - twitter.com -> ($USER, twitter.com, 22) default port: 22 default user: $USER (getpass.getuser()) """ uri = uri.strip() if not user: user = getpass.getuser() # get user if '@' in uri: user = uri.split("@")[0] # get port if ':' in uri: port = uri.split(":")[-1] try: port = int(port) except ValueError: raise ValueError("port must be numeric.") # get host uri = re.sub(":.*", "", uri) uri = re.sub(".*@", "", uri) host = uri return ( user, host, port, )
[ "def", "parse", "(", "uri", ",", "user", "=", "None", ",", "port", "=", "22", ")", ":", "uri", "=", "uri", ".", "strip", "(", ")", "if", "not", "user", ":", "user", "=", "getpass", ".", "getuser", "(", ")", "# get user", "if", "'@'", "in", "uri", ":", "user", "=", "uri", ".", "split", "(", "\"@\"", ")", "[", "0", "]", "# get port", "if", "':'", "in", "uri", ":", "port", "=", "uri", ".", "split", "(", "\":\"", ")", "[", "-", "1", "]", "try", ":", "port", "=", "int", "(", "port", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"port must be numeric.\"", ")", "# get host", "uri", "=", "re", ".", "sub", "(", "\":.*\"", ",", "\"\"", ",", "uri", ")", "uri", "=", "re", ".", "sub", "(", "\".*@\"", ",", "\"\"", ",", "uri", ")", "host", "=", "uri", "return", "(", "user", ",", "host", ",", "port", ",", ")" ]
20.365854
21.878049
def mstmap(args): """ %prog mstmap LMD50.snps.genotype.txt Convert LMDs to MSTMAP input. """ from jcvi.assembly.geneticmap import MSTMatrix p = OptionParser(mstmap.__doc__) p.add_option("--population_type", default="RIL6", help="Type of population, possible values are DH and RILd") p.add_option("--missing_threshold", default=.5, help="Missing threshold, .25 excludes any marker with >25% missing") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) lmd, = args fp = open(lmd) next(fp) # Header table = {"0": "-", "1": "A", "2": "B", "3": "X"} mh = ["locus_name"] + fp.next().split()[4:] genotypes = [] for row in fp: atoms = row.split() chr, pos, ref, alt = atoms[:4] locus_name = ".".join((chr, pos)) codes = [table[x] for x in atoms[4:]] genotypes.append([locus_name] + codes) mm = MSTMatrix(genotypes, mh, opts.population_type, opts.missing_threshold) mm.write(opts.outfile, header=True)
[ "def", "mstmap", "(", "args", ")", ":", "from", "jcvi", ".", "assembly", ".", "geneticmap", "import", "MSTMatrix", "p", "=", "OptionParser", "(", "mstmap", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--population_type\"", ",", "default", "=", "\"RIL6\"", ",", "help", "=", "\"Type of population, possible values are DH and RILd\"", ")", "p", ".", "add_option", "(", "\"--missing_threshold\"", ",", "default", "=", ".5", ",", "help", "=", "\"Missing threshold, .25 excludes any marker with >25% missing\"", ")", "p", ".", "set_outfile", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "lmd", ",", "=", "args", "fp", "=", "open", "(", "lmd", ")", "next", "(", "fp", ")", "# Header", "table", "=", "{", "\"0\"", ":", "\"-\"", ",", "\"1\"", ":", "\"A\"", ",", "\"2\"", ":", "\"B\"", ",", "\"3\"", ":", "\"X\"", "}", "mh", "=", "[", "\"locus_name\"", "]", "+", "fp", ".", "next", "(", ")", ".", "split", "(", ")", "[", "4", ":", "]", "genotypes", "=", "[", "]", "for", "row", "in", "fp", ":", "atoms", "=", "row", ".", "split", "(", ")", "chr", ",", "pos", ",", "ref", ",", "alt", "=", "atoms", "[", ":", "4", "]", "locus_name", "=", "\".\"", ".", "join", "(", "(", "chr", ",", "pos", ")", ")", "codes", "=", "[", "table", "[", "x", "]", "for", "x", "in", "atoms", "[", "4", ":", "]", "]", "genotypes", ".", "append", "(", "[", "locus_name", "]", "+", "codes", ")", "mm", "=", "MSTMatrix", "(", "genotypes", ",", "mh", ",", "opts", ".", "population_type", ",", "opts", ".", "missing_threshold", ")", "mm", ".", "write", "(", "opts", ".", "outfile", ",", "header", "=", "True", ")" ]
31.529412
17.411765
def __compute_evolution( df, id_cols, value_col, date_col=None, freq=1, compare_to=None, method='abs', format='column', offseted_suffix='_offseted', evolution_col_name='evolution_computed', how='left', fillna=None, raise_duplicate_error=True ): """ Compute an evolution column : - against a period distant from a fixed frequency. - against a part of the df Unfortunately, pandas doesn't allow .change() and .pct_change() to be executed with a MultiIndex. Args: df (pd.DataFrame): id_cols (list(str)): value_col (str): date_col (str/dict): default None freq (int/pd.DateOffset/pd.Serie): default 1 compare_to (str): default None method (str): default ``'abs'`` can be also ``'pct'`` format(str): default 'column' can be also 'df' offseted_suffix(str): default '_offseted' evolution_col_name(str): default 'evolution_computed' how(str): default 'left' fillna(str/int): default None """ if date_col is not None: is_date_to_format = isinstance(date_col, dict) or (df[date_col].dtype == np.object) if is_date_to_format: if isinstance(date_col, dict): date_format = date_col.get('format', None) date_col = date_col['selector'] else: date_format = None df['_'+date_col + '_copy_'] = pd.to_datetime(df[date_col], format=date_format) date_col = '_'+date_col + '_copy_' is_freq_dict = isinstance(freq, dict) if is_freq_dict: freq = pd.DateOffset(**{k: int(v) for k, v in freq.items()}) check_params_columns_duplicate(id_cols + [value_col, date_col]) # create df_offseted group_cols = id_cols + [date_col] df_offseted = df[group_cols + [value_col]].copy() df_offseted[date_col] += freq df_with_offseted_values = apply_merge( df, df_offseted, group_cols, how, offseted_suffix, raise_duplicate_error ) if is_date_to_format: del df_with_offseted_values[date_col] elif compare_to is not None: # create df_offseted check_params_columns_duplicate(id_cols + [value_col]) group_cols = id_cols df_offseted = df.query(compare_to).copy() df_offseted = df_offseted[group_cols + [value_col]] df_with_offseted_values = apply_merge( df, df_offseted, group_cols, how, offseted_suffix, raise_duplicate_error ) apply_fillna(df_with_offseted_values, value_col, offseted_suffix, fillna) apply_method(df_with_offseted_values, evolution_col_name, value_col, offseted_suffix, method) return apply_format(df_with_offseted_values, evolution_col_name, format)
[ "def", "__compute_evolution", "(", "df", ",", "id_cols", ",", "value_col", ",", "date_col", "=", "None", ",", "freq", "=", "1", ",", "compare_to", "=", "None", ",", "method", "=", "'abs'", ",", "format", "=", "'column'", ",", "offseted_suffix", "=", "'_offseted'", ",", "evolution_col_name", "=", "'evolution_computed'", ",", "how", "=", "'left'", ",", "fillna", "=", "None", ",", "raise_duplicate_error", "=", "True", ")", ":", "if", "date_col", "is", "not", "None", ":", "is_date_to_format", "=", "isinstance", "(", "date_col", ",", "dict", ")", "or", "(", "df", "[", "date_col", "]", ".", "dtype", "==", "np", ".", "object", ")", "if", "is_date_to_format", ":", "if", "isinstance", "(", "date_col", ",", "dict", ")", ":", "date_format", "=", "date_col", ".", "get", "(", "'format'", ",", "None", ")", "date_col", "=", "date_col", "[", "'selector'", "]", "else", ":", "date_format", "=", "None", "df", "[", "'_'", "+", "date_col", "+", "'_copy_'", "]", "=", "pd", ".", "to_datetime", "(", "df", "[", "date_col", "]", ",", "format", "=", "date_format", ")", "date_col", "=", "'_'", "+", "date_col", "+", "'_copy_'", "is_freq_dict", "=", "isinstance", "(", "freq", ",", "dict", ")", "if", "is_freq_dict", ":", "freq", "=", "pd", ".", "DateOffset", "(", "*", "*", "{", "k", ":", "int", "(", "v", ")", "for", "k", ",", "v", "in", "freq", ".", "items", "(", ")", "}", ")", "check_params_columns_duplicate", "(", "id_cols", "+", "[", "value_col", ",", "date_col", "]", ")", "# create df_offseted", "group_cols", "=", "id_cols", "+", "[", "date_col", "]", "df_offseted", "=", "df", "[", "group_cols", "+", "[", "value_col", "]", "]", ".", "copy", "(", ")", "df_offseted", "[", "date_col", "]", "+=", "freq", "df_with_offseted_values", "=", "apply_merge", "(", "df", ",", "df_offseted", ",", "group_cols", ",", "how", ",", "offseted_suffix", ",", "raise_duplicate_error", ")", "if", "is_date_to_format", ":", "del", "df_with_offseted_values", "[", "date_col", "]", "elif", "compare_to", "is", "not", "None", ":", "# create df_offseted", "check_params_columns_duplicate", "(", "id_cols", "+", "[", "value_col", "]", ")", "group_cols", "=", "id_cols", "df_offseted", "=", "df", ".", "query", "(", "compare_to", ")", ".", "copy", "(", ")", "df_offseted", "=", "df_offseted", "[", "group_cols", "+", "[", "value_col", "]", "]", "df_with_offseted_values", "=", "apply_merge", "(", "df", ",", "df_offseted", ",", "group_cols", ",", "how", ",", "offseted_suffix", ",", "raise_duplicate_error", ")", "apply_fillna", "(", "df_with_offseted_values", ",", "value_col", ",", "offseted_suffix", ",", "fillna", ")", "apply_method", "(", "df_with_offseted_values", ",", "evolution_col_name", ",", "value_col", ",", "offseted_suffix", ",", "method", ")", "return", "apply_format", "(", "df_with_offseted_values", ",", "evolution_col_name", ",", "format", ")" ]
34.7375
19.3625
def execute(self): """Resolves the specified confs for the configured targets and returns an iterator over tuples of (conf, jar path). """ if JvmResolveSubsystem.global_instance().get_options().resolver != 'ivy': return compile_classpath = self.context.products.get_data('compile_classpath', init_func=ClasspathProducts.init_func(self.get_options().pants_workdir)) targets = self.context.targets() if all(not isinstance(target, JarLibrary) for target in targets): if self._report: self.context.log.info("Not generating a report. No resolution performed.") return executor = self.create_java_executor() results = self.resolve(executor=executor, targets=targets, classpath_products=compile_classpath, confs=self.get_options().confs, extra_args=self._args) if self._report: results_with_resolved_artifacts = [r for r in results if r.has_resolved_artifacts] if not results_with_resolved_artifacts: self.context.log.info("Not generating a report. No resolution performed.") else: for result in results_with_resolved_artifacts: self._generate_ivy_report(result)
[ "def", "execute", "(", "self", ")", ":", "if", "JvmResolveSubsystem", ".", "global_instance", "(", ")", ".", "get_options", "(", ")", ".", "resolver", "!=", "'ivy'", ":", "return", "compile_classpath", "=", "self", ".", "context", ".", "products", ".", "get_data", "(", "'compile_classpath'", ",", "init_func", "=", "ClasspathProducts", ".", "init_func", "(", "self", ".", "get_options", "(", ")", ".", "pants_workdir", ")", ")", "targets", "=", "self", ".", "context", ".", "targets", "(", ")", "if", "all", "(", "not", "isinstance", "(", "target", ",", "JarLibrary", ")", "for", "target", "in", "targets", ")", ":", "if", "self", ".", "_report", ":", "self", ".", "context", ".", "log", ".", "info", "(", "\"Not generating a report. No resolution performed.\"", ")", "return", "executor", "=", "self", ".", "create_java_executor", "(", ")", "results", "=", "self", ".", "resolve", "(", "executor", "=", "executor", ",", "targets", "=", "targets", ",", "classpath_products", "=", "compile_classpath", ",", "confs", "=", "self", ".", "get_options", "(", ")", ".", "confs", ",", "extra_args", "=", "self", ".", "_args", ")", "if", "self", ".", "_report", ":", "results_with_resolved_artifacts", "=", "[", "r", "for", "r", "in", "results", "if", "r", ".", "has_resolved_artifacts", "]", "if", "not", "results_with_resolved_artifacts", ":", "self", ".", "context", ".", "log", ".", "info", "(", "\"Not generating a report. No resolution performed.\"", ")", "else", ":", "for", "result", "in", "results_with_resolved_artifacts", ":", "self", ".", "_generate_ivy_report", "(", "result", ")" ]
43.333333
23.933333
def conv(self,field_name,conv_func): """When a record is returned by a SELECT, ask conversion of specified field value with the specified function""" if field_name not in self.fields: raise NameError,"Unknown field %s" %field_name self.conv_func[field_name] = conv_func
[ "def", "conv", "(", "self", ",", "field_name", ",", "conv_func", ")", ":", "if", "field_name", "not", "in", "self", ".", "fields", ":", "raise", "NameError", ",", "\"Unknown field %s\"", "%", "field_name", "self", ".", "conv_func", "[", "field_name", "]", "=", "conv_func" ]
52.166667
5
def set_auxiliary_basis_set(self, folder, auxiliary_folder, auxiliary_basis_set_type="aug_cc_pvtz"): """ copy in the desired folder the needed auxiliary basis set "X2.ion" where X is a specie. :param auxiliary_folder: folder where the auxiliary basis sets are stored :param auxiliary_basis_set_type: type of basis set (string to be found in the extension of the file name; must be in lower case) ex: C2.ion_aug_cc_pvtz_RI_Weigend find "aug_cc_pvtz" """ list_files = os.listdir(auxiliary_folder) for specie in self._mol.symbol_set: for file in list_files: if file.upper().find( specie.upper() + "2") != -1 and file.lower().find( auxiliary_basis_set_type) != -1: shutil.copyfile(auxiliary_folder + "/" + file, folder + "/" + specie + "2.ion")
[ "def", "set_auxiliary_basis_set", "(", "self", ",", "folder", ",", "auxiliary_folder", ",", "auxiliary_basis_set_type", "=", "\"aug_cc_pvtz\"", ")", ":", "list_files", "=", "os", ".", "listdir", "(", "auxiliary_folder", ")", "for", "specie", "in", "self", ".", "_mol", ".", "symbol_set", ":", "for", "file", "in", "list_files", ":", "if", "file", ".", "upper", "(", ")", ".", "find", "(", "specie", ".", "upper", "(", ")", "+", "\"2\"", ")", "!=", "-", "1", "and", "file", ".", "lower", "(", ")", ".", "find", "(", "auxiliary_basis_set_type", ")", "!=", "-", "1", ":", "shutil", ".", "copyfile", "(", "auxiliary_folder", "+", "\"/\"", "+", "file", ",", "folder", "+", "\"/\"", "+", "specie", "+", "\"2.ion\"", ")" ]
53.777778
26.777778
def copy(self): """ Returns a copy of JunctionTree. Returns ------- JunctionTree : copy of JunctionTree Examples -------- >>> import numpy as np >>> from pgmpy.factors.discrete import DiscreteFactor >>> from pgmpy.models import JunctionTree >>> G = JunctionTree() >>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')), (('a', 'b', 'c'), ('a', 'c'))]) >>> phi1 = DiscreteFactor(['a', 'b'], [1, 2], np.random.rand(2)) >>> phi2 = DiscreteFactor(['a', 'c'], [1, 2], np.random.rand(2)) >>> G.add_factors(phi1,phi2) >>> modelCopy = G.copy() >>> modelCopy.edges() [(('a', 'b'), ('a', 'b', 'c')), (('a', 'c'), ('a', 'b', 'c'))] >>> G.factors [<DiscreteFactor representing phi(a:1, b:2) at 0xb720ee4c>, <DiscreteFactor representing phi(a:1, c:2) at 0xb4e1e06c>] >>> modelCopy.factors [<DiscreteFactor representing phi(a:1, b:2) at 0xb4bd11ec>, <DiscreteFactor representing phi(a:1, c:2) at 0xb4bd138c>] """ copy = JunctionTree(self.edges()) copy.add_nodes_from(self.nodes()) if self.factors: factors_copy = [factor.copy() for factor in self.factors] copy.add_factors(*factors_copy) return copy
[ "def", "copy", "(", "self", ")", ":", "copy", "=", "JunctionTree", "(", "self", ".", "edges", "(", ")", ")", "copy", ".", "add_nodes_from", "(", "self", ".", "nodes", "(", ")", ")", "if", "self", ".", "factors", ":", "factors_copy", "=", "[", "factor", ".", "copy", "(", ")", "for", "factor", "in", "self", ".", "factors", "]", "copy", ".", "add_factors", "(", "*", "factors_copy", ")", "return", "copy" ]
37.4
19.285714
def define_function(self, function, name=None): """Define the Python function within the CLIPS environment. If a name is given, it will be the function name within CLIPS. Otherwise, the name of the Python function will be used. The Python function will be accessible within CLIPS via its name as if it was defined via the `deffunction` construct. """ name = name if name is not None else function.__name__ ENVIRONMENT_DATA[self._env].user_functions[name] = function self.build(DEFFUNCTION.format(name))
[ "def", "define_function", "(", "self", ",", "function", ",", "name", "=", "None", ")", ":", "name", "=", "name", "if", "name", "is", "not", "None", "else", "function", ".", "__name__", "ENVIRONMENT_DATA", "[", "self", ".", "_env", "]", ".", "user_functions", "[", "name", "]", "=", "function", "self", ".", "build", "(", "DEFFUNCTION", ".", "format", "(", "name", ")", ")" ]
37.666667
24.466667
def _get_core_keywords(skw_matches, ckw_matches, spires=False): """Return the output for the field codes. :var skw_matches: dict of {keyword: [info,...]} :var ckw_matches: dict of {keyword: [info,...]} :keyword spires: bool, to get the spires output :return: list of formatted core keywords """ output = {} category = {} def _get_value_kw(kw): """Help to sort the Core keywords.""" i = 0 while kw[i].isdigit(): i += 1 if i > 0: return int(kw[:i]) else: return 0 for skw, info in skw_matches: if skw.core: output[skw.output(spires)] = len(info[0]) category[skw.output(spires)] = skw.type for ckw, info in ckw_matches: if ckw.core: output[ckw.output(spires)] = len(info[0]) else: # test if one of the components is not core i = 0 for c in ckw.getComponents(): if c.core: output[c.output(spires)] = info[1][i] i += 1 output = [{'keyword': key, 'number': value} for key, value in output.iteritems()] return sorted(output, key=lambda x: x['number'], reverse=True)
[ "def", "_get_core_keywords", "(", "skw_matches", ",", "ckw_matches", ",", "spires", "=", "False", ")", ":", "output", "=", "{", "}", "category", "=", "{", "}", "def", "_get_value_kw", "(", "kw", ")", ":", "\"\"\"Help to sort the Core keywords.\"\"\"", "i", "=", "0", "while", "kw", "[", "i", "]", ".", "isdigit", "(", ")", ":", "i", "+=", "1", "if", "i", ">", "0", ":", "return", "int", "(", "kw", "[", ":", "i", "]", ")", "else", ":", "return", "0", "for", "skw", ",", "info", "in", "skw_matches", ":", "if", "skw", ".", "core", ":", "output", "[", "skw", ".", "output", "(", "spires", ")", "]", "=", "len", "(", "info", "[", "0", "]", ")", "category", "[", "skw", ".", "output", "(", "spires", ")", "]", "=", "skw", ".", "type", "for", "ckw", ",", "info", "in", "ckw_matches", ":", "if", "ckw", ".", "core", ":", "output", "[", "ckw", ".", "output", "(", "spires", ")", "]", "=", "len", "(", "info", "[", "0", "]", ")", "else", ":", "# test if one of the components is not core", "i", "=", "0", "for", "c", "in", "ckw", ".", "getComponents", "(", ")", ":", "if", "c", ".", "core", ":", "output", "[", "c", ".", "output", "(", "spires", ")", "]", "=", "info", "[", "1", "]", "[", "i", "]", "i", "+=", "1", "output", "=", "[", "{", "'keyword'", ":", "key", ",", "'number'", ":", "value", "}", "for", "key", ",", "value", "in", "output", ".", "iteritems", "(", ")", "]", "return", "sorted", "(", "output", ",", "key", "=", "lambda", "x", ":", "x", "[", "'number'", "]", ",", "reverse", "=", "True", ")" ]
31.973684
16.631579
def read(self, size): """ Read from the current offset a total number of `size` bytes and increment the offset by `size` :param int size: length of bytes to read :rtype: bytearray """ if isinstance(size, SV): size = size.value buff = self.__buff[self.__idx:self.__idx + size] self.__idx += size return buff
[ "def", "read", "(", "self", ",", "size", ")", ":", "if", "isinstance", "(", "size", ",", "SV", ")", ":", "size", "=", "size", ".", "value", "buff", "=", "self", ".", "__buff", "[", "self", ".", "__idx", ":", "self", ".", "__idx", "+", "size", "]", "self", ".", "__idx", "+=", "size", "return", "buff" ]
25.8
17.4
def unbind_opr(self, opr, path=None): """ 接触操作员与权限关联 """ if path: self.routes[path]['oprs'].remove(opr) else: for path in self.routes: route = self.routes.get(path) if route and opr in route['oprs']: route['oprs'].remove(opr)
[ "def", "unbind_opr", "(", "self", ",", "opr", ",", "path", "=", "None", ")", ":", "if", "path", ":", "self", ".", "routes", "[", "path", "]", "[", "'oprs'", "]", ".", "remove", "(", "opr", ")", "else", ":", "for", "path", "in", "self", ".", "routes", ":", "route", "=", "self", ".", "routes", ".", "get", "(", "path", ")", "if", "route", "and", "opr", "in", "route", "[", "'oprs'", "]", ":", "route", "[", "'oprs'", "]", ".", "remove", "(", "opr", ")" ]
32.4
8.7
def create(self, alert_config, occurrence_frequency_count=None, occurrence_frequency_unit=None, alert_frequency_count=None, alert_frequency_unit=None): """ Create a new alert :param alert_config: A list of AlertConfig classes (Ex: ``[EmailAlertConfig('me@mydomain.com')]``) :type alert_config: list of :class:`PagerDutyAlertConfig<logentries_api.alerts.PagerDutyAlertConfig>`, :class:`WebHookAlertConfig<logentries_api.alerts.WebHookAlertConfig>`, :class:`EmailAlertConfig<logentries_api.alerts.EmailAlertConfig>`, :class:`SlackAlertConfig<logentries_api.alerts.SlackAlertConfig>`, or :class:`HipChatAlertConfig<logentries_api.alerts.HipChatAlertConfig>` :param occurrence_frequency_count: How many times per ``alert_frequency_unit`` for a match before issuing an alert. Defaults to 1 :type occurrence_frequency_count: int :param occurrence_frequency_unit: The time period to monitor for sending an alert. Must be 'day', or 'hour'. Defaults to 'hour' :type occurrence_frequency_unit: str :param alert_frequency_count: How many times per ``alert_frequency_unit`` to issue an alert. Defaults to 1 :type alert_frequency_count: int :param alert_frequency_unit: How often to regulate sending alerts. Must be 'day', or 'hour'. Defaults to 'hour' :type alert_frequency_unit: str :returns: The response of your post :rtype: dict :raises: This will raise a :class:`ServerException<logentries_api.exceptions.ServerException>` if there is an error from Logentries """ data = { 'rate_count': occurrence_frequency_count or 1, 'rate_range': occurrence_frequency_unit or 'hour', 'limit_count': alert_frequency_count or 1, 'limit_range': alert_frequency_unit or 'hour', 'schedule': [], 'enabled': True, } data.update(alert_config.args()) # Yes, it's confusing. the `/actions/` endpoint is used for alerts, while # the /tags/ endpoint is used for labels. return self._post( request=ApiActions.CREATE.value, uri=ApiUri.ACTIONS.value, params=data )
[ "def", "create", "(", "self", ",", "alert_config", ",", "occurrence_frequency_count", "=", "None", ",", "occurrence_frequency_unit", "=", "None", ",", "alert_frequency_count", "=", "None", ",", "alert_frequency_unit", "=", "None", ")", ":", "data", "=", "{", "'rate_count'", ":", "occurrence_frequency_count", "or", "1", ",", "'rate_range'", ":", "occurrence_frequency_unit", "or", "'hour'", ",", "'limit_count'", ":", "alert_frequency_count", "or", "1", ",", "'limit_range'", ":", "alert_frequency_unit", "or", "'hour'", ",", "'schedule'", ":", "[", "]", ",", "'enabled'", ":", "True", ",", "}", "data", ".", "update", "(", "alert_config", ".", "args", "(", ")", ")", "# Yes, it's confusing. the `/actions/` endpoint is used for alerts, while", "# the /tags/ endpoint is used for labels.", "return", "self", ".", "_post", "(", "request", "=", "ApiActions", ".", "CREATE", ".", "value", ",", "uri", "=", "ApiUri", ".", "ACTIONS", ".", "value", ",", "params", "=", "data", ")" ]
40.711864
20.745763
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. """ raise NotImplementedError
[ "def", "send", "(", "self", ",", "request", ",", "stream", "=", "False", ",", "timeout", "=", "None", ",", "verify", "=", "True", ",", "cert", "=", "None", ",", "proxies", "=", "None", ")", ":", "raise", "NotImplementedError" ]
57.117647
24.294118
def random_deletion(self,fastq,rate): """Perform the permutation on the sequence :param fastq: FASTQ sequence to permute :type fastq: format.fastq.FASTQ :param rate: how frequently to permute :type rate: float :return: Permutted FASTQ :rtype: format.fastq.FASTQ """ sequence = fastq.sequence quality = fastq.qual seq = '' qual = None if quality: qual = '' for i in range(len(sequence)): # check context prev = None if i >= 1: prev = sequence[i-1] next = None if i < len(sequence)-1: next = sequence[i+1] if self._before_base and (not prev or prev != self._before_base): seq+=sequence[i] if quality: qual+=quality[i] continue if self._after_base and (not next or next != self._after_base): seq+=sequence[i] if quality: qual+=quality[i] continue if self._observed_base and (sequence[i] != self._observed_base): seq+=sequence[i] if quality: qual+=quality[i] continue rnum = self.random.random() if rnum >= rate: seq += sequence[i] if quality: qual+=quality[i] return FASTQ('@'+fastq.header+"\n"+seq+"\n+\n"+qual+"\n")
[ "def", "random_deletion", "(", "self", ",", "fastq", ",", "rate", ")", ":", "sequence", "=", "fastq", ".", "sequence", "quality", "=", "fastq", ".", "qual", "seq", "=", "''", "qual", "=", "None", "if", "quality", ":", "qual", "=", "''", "for", "i", "in", "range", "(", "len", "(", "sequence", ")", ")", ":", "# check context", "prev", "=", "None", "if", "i", ">=", "1", ":", "prev", "=", "sequence", "[", "i", "-", "1", "]", "next", "=", "None", "if", "i", "<", "len", "(", "sequence", ")", "-", "1", ":", "next", "=", "sequence", "[", "i", "+", "1", "]", "if", "self", ".", "_before_base", "and", "(", "not", "prev", "or", "prev", "!=", "self", ".", "_before_base", ")", ":", "seq", "+=", "sequence", "[", "i", "]", "if", "quality", ":", "qual", "+=", "quality", "[", "i", "]", "continue", "if", "self", ".", "_after_base", "and", "(", "not", "next", "or", "next", "!=", "self", ".", "_after_base", ")", ":", "seq", "+=", "sequence", "[", "i", "]", "if", "quality", ":", "qual", "+=", "quality", "[", "i", "]", "continue", "if", "self", ".", "_observed_base", "and", "(", "sequence", "[", "i", "]", "!=", "self", ".", "_observed_base", ")", ":", "seq", "+=", "sequence", "[", "i", "]", "if", "quality", ":", "qual", "+=", "quality", "[", "i", "]", "continue", "rnum", "=", "self", ".", "random", ".", "random", "(", ")", "if", "rnum", ">=", "rate", ":", "seq", "+=", "sequence", "[", "i", "]", "if", "quality", ":", "qual", "+=", "quality", "[", "i", "]", "return", "FASTQ", "(", "'@'", "+", "fastq", ".", "header", "+", "\"\\n\"", "+", "seq", "+", "\"\\n+\\n\"", "+", "qual", "+", "\"\\n\"", ")" ]
30.461538
15.410256
def find_optconf(self, pconfs): """Find the optimal Parallel configuration.""" # Save pconfs for future reference. self.set_pconfs(pconfs) # Select the partition on which we'll be running and set MPI/OMP cores. optconf = self.manager.select_qadapter(pconfs) return optconf
[ "def", "find_optconf", "(", "self", ",", "pconfs", ")", ":", "# Save pconfs for future reference.", "self", ".", "set_pconfs", "(", "pconfs", ")", "# Select the partition on which we'll be running and set MPI/OMP cores.", "optconf", "=", "self", ".", "manager", ".", "select_qadapter", "(", "pconfs", ")", "return", "optconf" ]
39.25
16.5
def qloguniform(low, high, q, random_state): ''' low: an float that represent an lower bound high: an float that represent an upper bound q: sample step random_state: an object of numpy.random.RandomState ''' return np.round(loguniform(low, high, random_state) / q) * q
[ "def", "qloguniform", "(", "low", ",", "high", ",", "q", ",", "random_state", ")", ":", "return", "np", ".", "round", "(", "loguniform", "(", "low", ",", "high", ",", "random_state", ")", "/", "q", ")", "*", "q" ]
36.25
18.25
def _onError(self, message): """Memorizies a parser error message""" self.isOK = False if message.strip() != "": self.errors.append(message)
[ "def", "_onError", "(", "self", ",", "message", ")", ":", "self", ".", "isOK", "=", "False", "if", "message", ".", "strip", "(", ")", "!=", "\"\"", ":", "self", ".", "errors", ".", "append", "(", "message", ")" ]
34.4
7
def zip_(zip_file, sources, template=None, cwd=None, runas=None, zip64=False): ''' Uses the ``zipfile`` Python module to create zip files .. versionchanged:: 2015.5.0 This function was rewritten to use Python's native zip file support. The old functionality has been preserved in the new function :mod:`archive.cmd_zip <salt.modules.archive.cmd_zip>`. For versions 2014.7.x and earlier, see the :mod:`archive.cmd_zip <salt.modules.archive.cmd_zip>` documentation. zip_file Path of zip file to be created sources Comma-separated list of sources to include in the zip file. Sources can also be passed in a Python list. .. versionchanged:: 2017.7.0 Globbing is now supported for this argument template : None Can be set to 'jinja' or another supported template engine to render the command arguments before execution: .. code-block:: bash salt '*' archive.zip template=jinja /tmp/zipfile.zip /tmp/sourcefile1,/tmp/{{grains.id}}.txt cwd : None Use this argument along with relative paths in ``sources`` to create zip files which do not contain the leading directories. If not specified, the zip file will be created as if the cwd was ``/``, and creating a zip file of ``/foo/bar/baz.txt`` will contain the parent directories ``foo`` and ``bar``. To create a zip file containing just ``baz.txt``, the following command would be used: .. code-block:: bash salt '*' archive.zip /tmp/baz.zip baz.txt cwd=/foo/bar runas : None Create the zip file as the specified user. Defaults to the user under which the minion is running. zip64 : False Used to enable ZIP64 support, necessary to create archives larger than 4 GByte in size. If true, will create ZIP file with the ZIPp64 extension when the zipfile is larger than 2 GB. ZIP64 extension is disabled by default in the Python native zip support because the default zip and unzip commands on Unix (the InfoZIP utilities) don't support these extensions. CLI Example: .. code-block:: bash salt '*' archive.zip /tmp/zipfile.zip /tmp/sourcefile1,/tmp/sourcefile2 # Globbing for sources (2017.7.0 and later) salt '*' archive.zip /tmp/zipfile.zip '/tmp/sourcefile*' ''' if runas: euid = os.geteuid() egid = os.getegid() uinfo = __salt__['user.info'](runas) if not uinfo: raise SaltInvocationError( 'User \'{0}\' does not exist'.format(runas) ) zip_file, sources = _render_filenames(zip_file, sources, None, template) sources = _expand_sources(sources) if not cwd: for src in sources: if not os.path.isabs(src): raise SaltInvocationError( 'Relative paths require the \'cwd\' parameter' ) else: err_msg = 'cwd must be absolute' try: if not os.path.isabs(cwd): raise SaltInvocationError(err_msg) except AttributeError: raise SaltInvocationError(err_msg) if runas and (euid != uinfo['uid'] or egid != uinfo['gid']): # Change the egid first, as changing it after the euid will fail # if the runas user is non-privileged. os.setegid(uinfo['gid']) os.seteuid(uinfo['uid']) try: exc = None archived_files = [] with contextlib.closing(zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED, zip64)) as zfile: for src in sources: if cwd: src = os.path.join(cwd, src) if os.path.exists(src): if os.path.isabs(src): rel_root = '/' else: rel_root = cwd if cwd is not None else '/' if os.path.isdir(src): for dir_name, sub_dirs, files in salt.utils.path.os_walk(src): if cwd and dir_name.startswith(cwd): arc_dir = os.path.relpath(dir_name, cwd) else: arc_dir = os.path.relpath(dir_name, rel_root) if arc_dir: archived_files.append(arc_dir + '/') zfile.write(dir_name, arc_dir) for filename in files: abs_name = os.path.join(dir_name, filename) arc_name = os.path.join(arc_dir, filename) archived_files.append(arc_name) zfile.write(abs_name, arc_name) else: if cwd and src.startswith(cwd): arc_name = os.path.relpath(src, cwd) else: arc_name = os.path.relpath(src, rel_root) archived_files.append(arc_name) zfile.write(src, arc_name) except Exception as exc: pass finally: # Restore the euid/egid if runas: os.seteuid(euid) os.setegid(egid) if exc is not None: # Wait to raise the exception until euid/egid are restored to avoid # permission errors in writing to minion log. if exc == zipfile.LargeZipFile: raise CommandExecutionError( 'Resulting zip file too large, would require ZIP64 support' 'which has not been enabled. Rerun command with zip64=True' ) else: raise CommandExecutionError( 'Exception encountered creating zipfile: {0}'.format(exc) ) return archived_files
[ "def", "zip_", "(", "zip_file", ",", "sources", ",", "template", "=", "None", ",", "cwd", "=", "None", ",", "runas", "=", "None", ",", "zip64", "=", "False", ")", ":", "if", "runas", ":", "euid", "=", "os", ".", "geteuid", "(", ")", "egid", "=", "os", ".", "getegid", "(", ")", "uinfo", "=", "__salt__", "[", "'user.info'", "]", "(", "runas", ")", "if", "not", "uinfo", ":", "raise", "SaltInvocationError", "(", "'User \\'{0}\\' does not exist'", ".", "format", "(", "runas", ")", ")", "zip_file", ",", "sources", "=", "_render_filenames", "(", "zip_file", ",", "sources", ",", "None", ",", "template", ")", "sources", "=", "_expand_sources", "(", "sources", ")", "if", "not", "cwd", ":", "for", "src", "in", "sources", ":", "if", "not", "os", ".", "path", ".", "isabs", "(", "src", ")", ":", "raise", "SaltInvocationError", "(", "'Relative paths require the \\'cwd\\' parameter'", ")", "else", ":", "err_msg", "=", "'cwd must be absolute'", "try", ":", "if", "not", "os", ".", "path", ".", "isabs", "(", "cwd", ")", ":", "raise", "SaltInvocationError", "(", "err_msg", ")", "except", "AttributeError", ":", "raise", "SaltInvocationError", "(", "err_msg", ")", "if", "runas", "and", "(", "euid", "!=", "uinfo", "[", "'uid'", "]", "or", "egid", "!=", "uinfo", "[", "'gid'", "]", ")", ":", "# Change the egid first, as changing it after the euid will fail", "# if the runas user is non-privileged.", "os", ".", "setegid", "(", "uinfo", "[", "'gid'", "]", ")", "os", ".", "seteuid", "(", "uinfo", "[", "'uid'", "]", ")", "try", ":", "exc", "=", "None", "archived_files", "=", "[", "]", "with", "contextlib", ".", "closing", "(", "zipfile", ".", "ZipFile", "(", "zip_file", ",", "'w'", ",", "zipfile", ".", "ZIP_DEFLATED", ",", "zip64", ")", ")", "as", "zfile", ":", "for", "src", "in", "sources", ":", "if", "cwd", ":", "src", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "src", ")", "if", "os", ".", "path", ".", "exists", "(", "src", ")", ":", "if", "os", ".", "path", ".", "isabs", "(", "src", ")", ":", "rel_root", "=", "'/'", "else", ":", "rel_root", "=", "cwd", "if", "cwd", "is", "not", "None", "else", "'/'", "if", "os", ".", "path", ".", "isdir", "(", "src", ")", ":", "for", "dir_name", ",", "sub_dirs", ",", "files", "in", "salt", ".", "utils", ".", "path", ".", "os_walk", "(", "src", ")", ":", "if", "cwd", "and", "dir_name", ".", "startswith", "(", "cwd", ")", ":", "arc_dir", "=", "os", ".", "path", ".", "relpath", "(", "dir_name", ",", "cwd", ")", "else", ":", "arc_dir", "=", "os", ".", "path", ".", "relpath", "(", "dir_name", ",", "rel_root", ")", "if", "arc_dir", ":", "archived_files", ".", "append", "(", "arc_dir", "+", "'/'", ")", "zfile", ".", "write", "(", "dir_name", ",", "arc_dir", ")", "for", "filename", "in", "files", ":", "abs_name", "=", "os", ".", "path", ".", "join", "(", "dir_name", ",", "filename", ")", "arc_name", "=", "os", ".", "path", ".", "join", "(", "arc_dir", ",", "filename", ")", "archived_files", ".", "append", "(", "arc_name", ")", "zfile", ".", "write", "(", "abs_name", ",", "arc_name", ")", "else", ":", "if", "cwd", "and", "src", ".", "startswith", "(", "cwd", ")", ":", "arc_name", "=", "os", ".", "path", ".", "relpath", "(", "src", ",", "cwd", ")", "else", ":", "arc_name", "=", "os", ".", "path", ".", "relpath", "(", "src", ",", "rel_root", ")", "archived_files", ".", "append", "(", "arc_name", ")", "zfile", ".", "write", "(", "src", ",", "arc_name", ")", "except", "Exception", "as", "exc", ":", "pass", "finally", ":", "# Restore the euid/egid", "if", "runas", ":", "os", ".", "seteuid", "(", "euid", ")", "os", ".", "setegid", "(", "egid", ")", "if", "exc", "is", "not", "None", ":", "# Wait to raise the exception until euid/egid are restored to avoid", "# permission errors in writing to minion log.", "if", "exc", "==", "zipfile", ".", "LargeZipFile", ":", "raise", "CommandExecutionError", "(", "'Resulting zip file too large, would require ZIP64 support'", "'which has not been enabled. Rerun command with zip64=True'", ")", "else", ":", "raise", "CommandExecutionError", "(", "'Exception encountered creating zipfile: {0}'", ".", "format", "(", "exc", ")", ")", "return", "archived_files" ]
39.614865
23.222973
def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an RDS subnet group exists. CLI example:: salt myminion boto_rds.subnet_group_exists my-param-group \ region=us-east-1 ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return {'exists': bool(conn)} rds = conn.describe_db_subnet_groups(DBSubnetGroupName=name) return {'exists': bool(rds)} except ClientError as e: if "DBSubnetGroupNotFoundFault" in e.message: return {'exists': False} else: return {'error': __utils__['boto3.get_error'](e)}
[ "def", "subnet_group_exists", "(", "name", ",", "tags", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "not", "conn", ":", "return", "{", "'exists'", ":", "bool", "(", "conn", ")", "}", "rds", "=", "conn", ".", "describe_db_subnet_groups", "(", "DBSubnetGroupName", "=", "name", ")", "return", "{", "'exists'", ":", "bool", "(", "rds", ")", "}", "except", "ClientError", "as", "e", ":", "if", "\"DBSubnetGroupNotFoundFault\"", "in", "e", ".", "message", ":", "return", "{", "'exists'", ":", "False", "}", "else", ":", "return", "{", "'error'", ":", "__utils__", "[", "'boto3.get_error'", "]", "(", "e", ")", "}" ]
33.272727
22.181818
def sum(x, axis=None, keepdims=False): """Reduction along axes with sum operation. Args: x (Variable): An input variable. axis (None, int or tuple of ints): Axis or axes along which the sum is calculated. Passing the default value `None` will reduce all dimensions. keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element. Returns: ~nnabla.Variable: N-D array. """ from .function_bases import sum as sum_base if axis is None: axis = range(x.ndim) elif not hasattr(axis, '__iter__'): axis = [axis] return sum_base(x, axis, keepdims)
[ "def", "sum", "(", "x", ",", "axis", "=", "None", ",", "keepdims", "=", "False", ")", ":", "from", ".", "function_bases", "import", "sum", "as", "sum_base", "if", "axis", "is", "None", ":", "axis", "=", "range", "(", "x", ".", "ndim", ")", "elif", "not", "hasattr", "(", "axis", ",", "'__iter__'", ")", ":", "axis", "=", "[", "axis", "]", "return", "sum_base", "(", "x", ",", "axis", ",", "keepdims", ")" ]
35.444444
19
def restore(self, filename): """Restore object from mat-file. TODO: determine format specification """ matfile = loadmat(filename) if matfile['dim'] == 1: matfile['solution'] = matfile['solution'][0, :] self.elapsed_time = matfile['elapsed_time'][0, 0] self.solution = matfile['solution'] return self
[ "def", "restore", "(", "self", ",", "filename", ")", ":", "matfile", "=", "loadmat", "(", "filename", ")", "if", "matfile", "[", "'dim'", "]", "==", "1", ":", "matfile", "[", "'solution'", "]", "=", "matfile", "[", "'solution'", "]", "[", "0", ",", ":", "]", "self", ".", "elapsed_time", "=", "matfile", "[", "'elapsed_time'", "]", "[", "0", ",", "0", "]", "self", ".", "solution", "=", "matfile", "[", "'solution'", "]", "return", "self" ]
30
17.166667
def main(argv=None): """The entry point of the application.""" if argv is None: argv = sys.argv[1:] usage = '\n\n\n'.join(__doc__.split('\n\n\n')[1:]) version = 'Gitpress ' + __version__ # Parse options args = docopt(usage, argv=argv, version=version) # Execute command try: return execute(args) except RepositoryNotFoundError as ex: error('No Gitpress repository found at', ex.directory)
[ "def", "main", "(", "argv", "=", "None", ")", ":", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", "usage", "=", "'\\n\\n\\n'", ".", "join", "(", "__doc__", ".", "split", "(", "'\\n\\n\\n'", ")", "[", "1", ":", "]", ")", "version", "=", "'Gitpress '", "+", "__version__", "# Parse options", "args", "=", "docopt", "(", "usage", ",", "argv", "=", "argv", ",", "version", "=", "version", ")", "# Execute command", "try", ":", "return", "execute", "(", "args", ")", "except", "RepositoryNotFoundError", "as", "ex", ":", "error", "(", "'No Gitpress repository found at'", ",", "ex", ".", "directory", ")" ]
29.066667
17.8
def datatype(self, value): """ Args: value (string): 'uint8', 'uint16', 'uint64' Raises: ValueError """ self._datatype = self.validate_datatype(value) self._cutout_ready = True
[ "def", "datatype", "(", "self", ",", "value", ")", ":", "self", ".", "_datatype", "=", "self", ".", "validate_datatype", "(", "value", ")", "self", ".", "_cutout_ready", "=", "True" ]
26.666667
13.333333
def load(pipette_model: str, pipette_id: str = None) -> pipette_config: """ Load pipette config data This function loads from a combination of - the pipetteModelSpecs.json file in the wheel (should never be edited) - the pipetteNameSpecs.json file in the wheel(should never be edited) - any config overrides found in ``opentrons.config.CONFIG['pipette_config_overrides_dir']`` This function reads from disk each time, so changes to the overrides will be picked up in subsequent calls. :param str pipette_model: The pipette model name (i.e. "p10_single_v1.3") for which to load configuration :param pipette_id: An (optional) unique ID for the pipette to locate config overrides. If the ID is not specified, the system assumes this is a simulated pipette and does not save settings. If the ID is specified but no overrides corresponding to the ID are found, the system creates a new overrides file for it. :type pipette_id: str or None :raises KeyError: if ``pipette_model`` is not in the top-level keys of pipetteModeLSpecs.json (and therefore not in :py:attr:`configs`) :returns pipette_config: The configuration, loaded and checked """ # Load the model config and update with the name config cfg = copy.deepcopy(configs[pipette_model]) cfg.update(copy.deepcopy(name_config()[cfg['name']])) # Load overrides if we have a pipette id if pipette_id: try: override = load_overrides(pipette_id) except FileNotFoundError: save_overrides(pipette_id, {}, pipette_model) log.info( "Save defaults for pipette model {} and id {}".format( pipette_model, pipette_id)) else: cfg.update(override) # the ulPerMm functions are structured in pipetteModelSpecs.json as # a list sorted from oldest to newest. That means the latest functions # are always the last element and, as of right now, the older ones are # the first element (for models that only have one function, the first # and last elements are the same, which is fine). If we add more in the # future, we’ll have to change this code to select items more # intelligently if ff.use_old_aspiration_functions(): log.info("Using old aspiration functions") ul_per_mm = cfg['ulPerMm'][0] else: log.info("Using new aspiration functions") ul_per_mm = cfg['ulPerMm'][-1] res = pipette_config( top=ensure_value( cfg, 'top', mutable_configs), bottom=ensure_value( cfg, 'bottom', mutable_configs), blow_out=ensure_value( cfg, 'blowout', mutable_configs), drop_tip=ensure_value( cfg, 'dropTip', mutable_configs), pick_up_current=ensure_value(cfg, 'pickUpCurrent', mutable_configs), pick_up_distance=ensure_value(cfg, 'pickUpDistance', mutable_configs), pick_up_increment=ensure_value( cfg, 'pickUpIncrement', mutable_configs), pick_up_presses=ensure_value(cfg, 'pickUpPresses', mutable_configs), pick_up_speed=ensure_value(cfg, 'pickUpSpeed', mutable_configs), aspirate_flow_rate=ensure_value( cfg, 'defaultAspirateFlowRate', mutable_configs), dispense_flow_rate=ensure_value( cfg, 'defaultDispenseFlowRate', mutable_configs), channels=ensure_value(cfg, 'channels', mutable_configs), model_offset=ensure_value(cfg, 'modelOffset', mutable_configs), plunger_current=ensure_value(cfg, 'plungerCurrent', mutable_configs), drop_tip_current=ensure_value(cfg, 'dropTipCurrent', mutable_configs), drop_tip_speed=ensure_value(cfg, 'dropTipSpeed', mutable_configs), min_volume=ensure_value(cfg, 'minVolume', mutable_configs), max_volume=ensure_value(cfg, 'maxVolume', mutable_configs), ul_per_mm=ul_per_mm, quirks=ensure_value(cfg, 'quirks', mutable_configs), tip_length=ensure_value(cfg, 'tipLength', mutable_configs), display_name=ensure_value(cfg, 'displayName', mutable_configs) ) return res
[ "def", "load", "(", "pipette_model", ":", "str", ",", "pipette_id", ":", "str", "=", "None", ")", "->", "pipette_config", ":", "# Load the model config and update with the name config", "cfg", "=", "copy", ".", "deepcopy", "(", "configs", "[", "pipette_model", "]", ")", "cfg", ".", "update", "(", "copy", ".", "deepcopy", "(", "name_config", "(", ")", "[", "cfg", "[", "'name'", "]", "]", ")", ")", "# Load overrides if we have a pipette id", "if", "pipette_id", ":", "try", ":", "override", "=", "load_overrides", "(", "pipette_id", ")", "except", "FileNotFoundError", ":", "save_overrides", "(", "pipette_id", ",", "{", "}", ",", "pipette_model", ")", "log", ".", "info", "(", "\"Save defaults for pipette model {} and id {}\"", ".", "format", "(", "pipette_model", ",", "pipette_id", ")", ")", "else", ":", "cfg", ".", "update", "(", "override", ")", "# the ulPerMm functions are structured in pipetteModelSpecs.json as", "# a list sorted from oldest to newest. That means the latest functions", "# are always the last element and, as of right now, the older ones are", "# the first element (for models that only have one function, the first", "# and last elements are the same, which is fine). If we add more in the", "# future, we’ll have to change this code to select items more", "# intelligently", "if", "ff", ".", "use_old_aspiration_functions", "(", ")", ":", "log", ".", "info", "(", "\"Using old aspiration functions\"", ")", "ul_per_mm", "=", "cfg", "[", "'ulPerMm'", "]", "[", "0", "]", "else", ":", "log", ".", "info", "(", "\"Using new aspiration functions\"", ")", "ul_per_mm", "=", "cfg", "[", "'ulPerMm'", "]", "[", "-", "1", "]", "res", "=", "pipette_config", "(", "top", "=", "ensure_value", "(", "cfg", ",", "'top'", ",", "mutable_configs", ")", ",", "bottom", "=", "ensure_value", "(", "cfg", ",", "'bottom'", ",", "mutable_configs", ")", ",", "blow_out", "=", "ensure_value", "(", "cfg", ",", "'blowout'", ",", "mutable_configs", ")", ",", "drop_tip", "=", "ensure_value", "(", "cfg", ",", "'dropTip'", ",", "mutable_configs", ")", ",", "pick_up_current", "=", "ensure_value", "(", "cfg", ",", "'pickUpCurrent'", ",", "mutable_configs", ")", ",", "pick_up_distance", "=", "ensure_value", "(", "cfg", ",", "'pickUpDistance'", ",", "mutable_configs", ")", ",", "pick_up_increment", "=", "ensure_value", "(", "cfg", ",", "'pickUpIncrement'", ",", "mutable_configs", ")", ",", "pick_up_presses", "=", "ensure_value", "(", "cfg", ",", "'pickUpPresses'", ",", "mutable_configs", ")", ",", "pick_up_speed", "=", "ensure_value", "(", "cfg", ",", "'pickUpSpeed'", ",", "mutable_configs", ")", ",", "aspirate_flow_rate", "=", "ensure_value", "(", "cfg", ",", "'defaultAspirateFlowRate'", ",", "mutable_configs", ")", ",", "dispense_flow_rate", "=", "ensure_value", "(", "cfg", ",", "'defaultDispenseFlowRate'", ",", "mutable_configs", ")", ",", "channels", "=", "ensure_value", "(", "cfg", ",", "'channels'", ",", "mutable_configs", ")", ",", "model_offset", "=", "ensure_value", "(", "cfg", ",", "'modelOffset'", ",", "mutable_configs", ")", ",", "plunger_current", "=", "ensure_value", "(", "cfg", ",", "'plungerCurrent'", ",", "mutable_configs", ")", ",", "drop_tip_current", "=", "ensure_value", "(", "cfg", ",", "'dropTipCurrent'", ",", "mutable_configs", ")", ",", "drop_tip_speed", "=", "ensure_value", "(", "cfg", ",", "'dropTipSpeed'", ",", "mutable_configs", ")", ",", "min_volume", "=", "ensure_value", "(", "cfg", ",", "'minVolume'", ",", "mutable_configs", ")", ",", "max_volume", "=", "ensure_value", "(", "cfg", ",", "'maxVolume'", ",", "mutable_configs", ")", ",", "ul_per_mm", "=", "ul_per_mm", ",", "quirks", "=", "ensure_value", "(", "cfg", ",", "'quirks'", ",", "mutable_configs", ")", ",", "tip_length", "=", "ensure_value", "(", "cfg", ",", "'tipLength'", ",", "mutable_configs", ")", ",", "display_name", "=", "ensure_value", "(", "cfg", ",", "'displayName'", ",", "mutable_configs", ")", ")", "return", "res" ]
45.709677
22.075269
def chain_future(a: "Future[_T]", b: "Future[_T]") -> None: """Chain two futures together so that when one completes, so does the other. The result (success or failure) of ``a`` will be copied to ``b``, unless ``b`` has already been completed or cancelled by the time ``a`` finishes. .. versionchanged:: 5.0 Now accepts both Tornado/asyncio `Future` objects and `concurrent.futures.Future`. """ def copy(future: "Future[_T]") -> None: assert future is a if b.done(): return if hasattr(a, "exc_info") and a.exc_info() is not None: # type: ignore future_set_exc_info(b, a.exc_info()) # type: ignore elif a.exception() is not None: b.set_exception(a.exception()) else: b.set_result(a.result()) if isinstance(a, Future): future_add_done_callback(a, copy) else: # concurrent.futures.Future from tornado.ioloop import IOLoop IOLoop.current().add_future(a, copy)
[ "def", "chain_future", "(", "a", ":", "\"Future[_T]\"", ",", "b", ":", "\"Future[_T]\"", ")", "->", "None", ":", "def", "copy", "(", "future", ":", "\"Future[_T]\"", ")", "->", "None", ":", "assert", "future", "is", "a", "if", "b", ".", "done", "(", ")", ":", "return", "if", "hasattr", "(", "a", ",", "\"exc_info\"", ")", "and", "a", ".", "exc_info", "(", ")", "is", "not", "None", ":", "# type: ignore", "future_set_exc_info", "(", "b", ",", "a", ".", "exc_info", "(", ")", ")", "# type: ignore", "elif", "a", ".", "exception", "(", ")", "is", "not", "None", ":", "b", ".", "set_exception", "(", "a", ".", "exception", "(", ")", ")", "else", ":", "b", ".", "set_result", "(", "a", ".", "result", "(", ")", ")", "if", "isinstance", "(", "a", ",", "Future", ")", ":", "future_add_done_callback", "(", "a", ",", "copy", ")", "else", ":", "# concurrent.futures.Future", "from", "tornado", ".", "ioloop", "import", "IOLoop", "IOLoop", ".", "current", "(", ")", ".", "add_future", "(", "a", ",", "copy", ")" ]
32.258065
19.967742
def memoize(func=None, maxlen=None): """Cache a function's return value each time it is called. This function serves as a function decorator to provide a caching of evaluated fitness values. If called later with the same arguments, the cached value is returned instead of being re-evaluated. This decorator assumes that candidates are individually pickleable, and their pickled values are used for hashing into a dictionary. It should be used when evaluating an *expensive* fitness function to avoid costly re-evaluation of those fitnesses. The typical usage is as follows:: @memoize def expensive_fitness_function(candidates, args): # Implementation of expensive fitness calculation pass It is also possible to provide the named argument *maxlen*, which specifies the size of the memoization cache to use. (If *maxlen* is ``None``, then an unbounded cache is used.) Once the size of the cache has reached *maxlen*, the oldest element is replaced by the newest element in order to keep the size constant. This usage is as follows:: @memoize(maxlen=100) def expensive_fitness_function(candidates, args): # Implementation of expensive fitness calculation pass .. warning:: The ``maxlen`` parameter must be passed as a named keyword argument, or an ``AttributeError`` will be raised (e.g., saying ``@memoize(100)`` will cause an error). """ if func is not None: cache = BoundedOrderedDict(maxlen=maxlen) @functools.wraps(func) def memo_target(candidates, args): fitness = [] for candidate in candidates: lookup_value = pickle.dumps(candidate, 1) if lookup_value not in cache: cache[lookup_value] = func([candidate], args)[0] fitness.append(cache[lookup_value]) return fitness return memo_target else: def memoize_factory(func): return memoize(func, maxlen=maxlen) return memoize_factory
[ "def", "memoize", "(", "func", "=", "None", ",", "maxlen", "=", "None", ")", ":", "if", "func", "is", "not", "None", ":", "cache", "=", "BoundedOrderedDict", "(", "maxlen", "=", "maxlen", ")", "@", "functools", ".", "wraps", "(", "func", ")", "def", "memo_target", "(", "candidates", ",", "args", ")", ":", "fitness", "=", "[", "]", "for", "candidate", "in", "candidates", ":", "lookup_value", "=", "pickle", ".", "dumps", "(", "candidate", ",", "1", ")", "if", "lookup_value", "not", "in", "cache", ":", "cache", "[", "lookup_value", "]", "=", "func", "(", "[", "candidate", "]", ",", "args", ")", "[", "0", "]", "fitness", ".", "append", "(", "cache", "[", "lookup_value", "]", ")", "return", "fitness", "return", "memo_target", "else", ":", "def", "memoize_factory", "(", "func", ")", ":", "return", "memoize", "(", "func", ",", "maxlen", "=", "maxlen", ")", "return", "memoize_factory" ]
42.44
20.58
def get_filename(file): """ Safe method to retrieve only the name of the file. :param file: Path of the file to retrieve the name from. :return: None if the file is non-existant, otherwise the filename (extension included) :rtype: None, str """ if not os.path.exists(file): return None return "%s%s" % os.path.splitext(file)
[ "def", "get_filename", "(", "file", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "file", ")", ":", "return", "None", "return", "\"%s%s\"", "%", "os", ".", "path", ".", "splitext", "(", "file", ")" ]
35.5
15.1
def merge_entity(self, table_name, entity, if_match='*', timeout=None): ''' Updates an existing entity by merging the entity's properties. Throws if the entity does not exist. This operation does not replace the existing entity as the update_entity operation does. A property cannot be removed with merge_entity. Any properties with null values are ignored. All other properties will be updated or added. :param str table_name: The name of the table containing the entity to merge. :param entity: The entity to merge. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: dict or :class:`~azure.storage.table.models.Entity` :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The merge operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional merge, set If-Match to the wildcard character (*). :param int timeout: The server timeout, expressed in seconds. :return: The etag of the entity. :rtype: str ''' _validate_not_none('table_name', table_name) request = _merge_entity(entity, if_match, self.require_encryption, self.key_encryption_key) request.host_locations = self._get_host_locations() request.query['timeout'] = _int_to_str(timeout) request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey']) return self._perform_request(request, _extract_etag)
[ "def", "merge_entity", "(", "self", ",", "table_name", ",", "entity", ",", "if_match", "=", "'*'", ",", "timeout", "=", "None", ")", ":", "_validate_not_none", "(", "'table_name'", ",", "table_name", ")", "request", "=", "_merge_entity", "(", "entity", ",", "if_match", ",", "self", ".", "require_encryption", ",", "self", ".", "key_encryption_key", ")", "request", ".", "host_locations", "=", "self", ".", "_get_host_locations", "(", ")", "request", ".", "query", "[", "'timeout'", "]", "=", "_int_to_str", "(", "timeout", ")", "request", ".", "path", "=", "_get_entity_path", "(", "table_name", ",", "entity", "[", "'PartitionKey'", "]", ",", "entity", "[", "'RowKey'", "]", ")", "return", "self", ".", "_perform_request", "(", "request", ",", "_extract_etag", ")" ]
48.5
27.15
def child_added(self, child): """ Handle the child added event from the declaration. This handler will unparent the child toolkit widget. Subclasses which need more control should reimplement this method. """ super(AndroidViewGroup, self).child_added(child) widget = self.widget #: TODO: Should index be cached? for i, child_widget in enumerate(self.child_widgets()): if child_widget == child.widget: if child.layout_params: widget.addView_(child_widget, i, child.layout_params) else: widget.addView(child_widget, i)
[ "def", "child_added", "(", "self", ",", "child", ")", ":", "super", "(", "AndroidViewGroup", ",", "self", ")", ".", "child_added", "(", "child", ")", "widget", "=", "self", ".", "widget", "#: TODO: Should index be cached?", "for", "i", ",", "child_widget", "in", "enumerate", "(", "self", ".", "child_widgets", "(", ")", ")", ":", "if", "child_widget", "==", "child", ".", "widget", ":", "if", "child", ".", "layout_params", ":", "widget", ".", "addView_", "(", "child_widget", ",", "i", ",", "child", ".", "layout_params", ")", "else", ":", "widget", ".", "addView", "(", "child_widget", ",", "i", ")" ]
38.611111
17.111111
def set_verify_depth(self, depth): """ Set the maximum depth for the certificate chain verification that shall be allowed for this Context object. :param depth: An integer specifying the verify depth :return: None """ if not isinstance(depth, integer_types): raise TypeError("depth must be an integer") _lib.SSL_CTX_set_verify_depth(self._context, depth)
[ "def", "set_verify_depth", "(", "self", ",", "depth", ")", ":", "if", "not", "isinstance", "(", "depth", ",", "integer_types", ")", ":", "raise", "TypeError", "(", "\"depth must be an integer\"", ")", "_lib", ".", "SSL_CTX_set_verify_depth", "(", "self", ".", "_context", ",", "depth", ")" ]
35.083333
17.416667
def from_config(cls, cp, section, outputs, skip_opts=None, additional_opts=None): """Initializes a transform from the given section. Parameters ---------- cp : pycbc.workflow.WorkflowConfigParser A parsed configuration file that contains the transform options. section : str Name of the section in the configuration file. outputs : str The names of the parameters that are output by this transformation, separated by `VARARGS_DELIM`. These must appear in the "tag" part of the section header. skip_opts : list, optional Do not read options in the given list. additional_opts : dict, optional Any additional arguments to pass to the class. If an option is provided that also exists in the config file, the value provided will be used instead of being read from the file. Returns ------- cls An instance of the class. """ tag = outputs if skip_opts is None: skip_opts = [] if additional_opts is None: additional_opts = {} else: additional_opts = additional_opts.copy() outputs = set(outputs.split(VARARGS_DELIM)) special_args = ['name'] + skip_opts + additional_opts.keys() # get any extra arguments to pass to init extra_args = {} for opt in cp.options("-".join([section, tag])): if opt in special_args: continue # check if option can be cast as a float val = cp.get_opt_tag(section, opt, tag) try: val = float(val) except ValueError: pass # add option extra_args.update({opt:val}) extra_args.update(additional_opts) out = cls(**extra_args) # check that the outputs matches if outputs-out.outputs != set() or out.outputs-outputs != set(): raise ValueError("outputs of class do not match outputs specified " "in section") return out
[ "def", "from_config", "(", "cls", ",", "cp", ",", "section", ",", "outputs", ",", "skip_opts", "=", "None", ",", "additional_opts", "=", "None", ")", ":", "tag", "=", "outputs", "if", "skip_opts", "is", "None", ":", "skip_opts", "=", "[", "]", "if", "additional_opts", "is", "None", ":", "additional_opts", "=", "{", "}", "else", ":", "additional_opts", "=", "additional_opts", ".", "copy", "(", ")", "outputs", "=", "set", "(", "outputs", ".", "split", "(", "VARARGS_DELIM", ")", ")", "special_args", "=", "[", "'name'", "]", "+", "skip_opts", "+", "additional_opts", ".", "keys", "(", ")", "# get any extra arguments to pass to init", "extra_args", "=", "{", "}", "for", "opt", "in", "cp", ".", "options", "(", "\"-\"", ".", "join", "(", "[", "section", ",", "tag", "]", ")", ")", ":", "if", "opt", "in", "special_args", ":", "continue", "# check if option can be cast as a float", "val", "=", "cp", ".", "get_opt_tag", "(", "section", ",", "opt", ",", "tag", ")", "try", ":", "val", "=", "float", "(", "val", ")", "except", "ValueError", ":", "pass", "# add option", "extra_args", ".", "update", "(", "{", "opt", ":", "val", "}", ")", "extra_args", ".", "update", "(", "additional_opts", ")", "out", "=", "cls", "(", "*", "*", "extra_args", ")", "# check that the outputs matches", "if", "outputs", "-", "out", ".", "outputs", "!=", "set", "(", ")", "or", "out", ".", "outputs", "-", "outputs", "!=", "set", "(", ")", ":", "raise", "ValueError", "(", "\"outputs of class do not match outputs specified \"", "\"in section\"", ")", "return", "out" ]
38.818182
16.727273
def resolve_path(self, address): ''' Resolve the given address in this tree branch ''' match = self.find_one(address) if not match: return [self] # Go further up the tree if possible if isinstance(match, ContainerNode): return match.resolve_path(address) + [self] # This is as far as we go return [match, self]
[ "def", "resolve_path", "(", "self", ",", "address", ")", ":", "match", "=", "self", ".", "find_one", "(", "address", ")", "if", "not", "match", ":", "return", "[", "self", "]", "# Go further up the tree if possible", "if", "isinstance", "(", "match", ",", "ContainerNode", ")", ":", "return", "match", ".", "resolve_path", "(", "address", ")", "+", "[", "self", "]", "# This is as far as we go", "return", "[", "match", ",", "self", "]" ]
28.214286
16.928571
def Print(self, output_writer): """Prints a human readable version of the filter. Args: output_writer (CLIOutputWriter): output writer. """ if self._filters: output_writer.Write('Filters:\n') for file_entry_filter in self._filters: file_entry_filter.Print(output_writer)
[ "def", "Print", "(", "self", ",", "output_writer", ")", ":", "if", "self", ".", "_filters", ":", "output_writer", ".", "Write", "(", "'Filters:\\n'", ")", "for", "file_entry_filter", "in", "self", ".", "_filters", ":", "file_entry_filter", ".", "Print", "(", "output_writer", ")" ]
30.4
12.4
def list(self): """Returns a list of the users gists as GistInfo objects Returns: a list of GistInfo objects """ # Define the basic request. The per_page parameter is set to 100, which # is the maximum github allows. If the user has more than one page of # gists, this request object will be modified to retrieve each # successive page of gists. request = requests.Request( 'GET', 'https://api.github.com/gists', headers={ 'Accept-Encoding': 'identity, deflate, compress, gzip', 'User-Agent': 'python-requests/1.2.0', 'Accept': 'application/vnd.github.v3.base64', }, params={ 'access_token': self.token, 'per_page': 100, }, ) # Github provides a 'link' header that contains information to # navigate through a users page of gists. This regex is used to # extract the URLs contained in this header, and to find the next page # of gists. pattern = re.compile(r'<([^>]*)>; rel="([^"]*)"') gists = [] while True: # Retrieve the next page of gists try: response = self.send(request).json() except Exception: break # Extract the list of gists for gist in response: try: gists.append( GistInfo( gist['id'], gist['public'], gist['description'], ) ) except KeyError: continue try: link = response.headers['link'] # Search for the next page of gist. If a 'next' page is found, # the URL is set to this new page and the iteration continues. # If there is no next page, return the list of gists. for result in pattern.finditer(link): url = result.group(1) rel = result.group(2) if rel == 'next': request.url = url break else: return gists except Exception: break return gists
[ "def", "list", "(", "self", ")", ":", "# Define the basic request. The per_page parameter is set to 100, which", "# is the maximum github allows. If the user has more than one page of", "# gists, this request object will be modified to retrieve each", "# successive page of gists.", "request", "=", "requests", ".", "Request", "(", "'GET'", ",", "'https://api.github.com/gists'", ",", "headers", "=", "{", "'Accept-Encoding'", ":", "'identity, deflate, compress, gzip'", ",", "'User-Agent'", ":", "'python-requests/1.2.0'", ",", "'Accept'", ":", "'application/vnd.github.v3.base64'", ",", "}", ",", "params", "=", "{", "'access_token'", ":", "self", ".", "token", ",", "'per_page'", ":", "100", ",", "}", ",", ")", "# Github provides a 'link' header that contains information to", "# navigate through a users page of gists. This regex is used to", "# extract the URLs contained in this header, and to find the next page", "# of gists.", "pattern", "=", "re", ".", "compile", "(", "r'<([^>]*)>; rel=\"([^\"]*)\"'", ")", "gists", "=", "[", "]", "while", "True", ":", "# Retrieve the next page of gists", "try", ":", "response", "=", "self", ".", "send", "(", "request", ")", ".", "json", "(", ")", "except", "Exception", ":", "break", "# Extract the list of gists", "for", "gist", "in", "response", ":", "try", ":", "gists", ".", "append", "(", "GistInfo", "(", "gist", "[", "'id'", "]", ",", "gist", "[", "'public'", "]", ",", "gist", "[", "'description'", "]", ",", ")", ")", "except", "KeyError", ":", "continue", "try", ":", "link", "=", "response", ".", "headers", "[", "'link'", "]", "# Search for the next page of gist. If a 'next' page is found,", "# the URL is set to this new page and the iteration continues.", "# If there is no next page, return the list of gists.", "for", "result", "in", "pattern", ".", "finditer", "(", "link", ")", ":", "url", "=", "result", ".", "group", "(", "1", ")", "rel", "=", "result", ".", "group", "(", "2", ")", "if", "rel", "==", "'next'", ":", "request", ".", "url", "=", "url", "break", "else", ":", "return", "gists", "except", "Exception", ":", "break", "return", "gists" ]
33.432432
19.5
def _interpolate(self, kind='linear'): """Apply scipy.interpolate.interp1d along resampling dimension.""" # drop any existing non-dimension coordinates along the resampling # dimension dummy = self._obj.copy() for k, v in self._obj.coords.items(): if k != self._dim and self._dim in v.dims: dummy = dummy.drop(k) return dummy.interp(assume_sorted=True, method=kind, kwargs={'bounds_error': False}, **{self._dim: self._full_index})
[ "def", "_interpolate", "(", "self", ",", "kind", "=", "'linear'", ")", ":", "# drop any existing non-dimension coordinates along the resampling", "# dimension", "dummy", "=", "self", ".", "_obj", ".", "copy", "(", ")", "for", "k", ",", "v", "in", "self", ".", "_obj", ".", "coords", ".", "items", "(", ")", ":", "if", "k", "!=", "self", ".", "_dim", "and", "self", ".", "_dim", "in", "v", ".", "dims", ":", "dummy", "=", "dummy", ".", "drop", "(", "k", ")", "return", "dummy", ".", "interp", "(", "assume_sorted", "=", "True", ",", "method", "=", "kind", ",", "kwargs", "=", "{", "'bounds_error'", ":", "False", "}", ",", "*", "*", "{", "self", ".", "_dim", ":", "self", ".", "_full_index", "}", ")" ]
50.181818
13.272727
def register_integration(package_folder): """Register a honeycomb integration. :param package_folder: Path to folder with integration to load :returns: Validated integration object :rtype: :func:`honeycomb.utils.defs.Integration` """ logger.debug("registering integration %s", package_folder) package_folder = os.path.realpath(package_folder) if not os.path.exists(package_folder): raise IntegrationNotFound(os.path.basename(package_folder)) json_config_path = os.path.join(package_folder, CONFIG_FILE_NAME) if not os.path.exists(json_config_path): raise ConfigFileNotFound(json_config_path) with open(json_config_path, "r") as f: config_json = json.load(f) # Validate integration and alert config validate_config(config_json, defs.INTEGRATION_VALIDATE_CONFIG_FIELDS) validate_config_parameters(config_json, defs.INTEGRATION_PARAMETERS_ALLOWED_KEYS, defs.INTEGRATION_PARAMETERS_ALLOWED_TYPES) integration_type = _create_integration_object(config_json) return integration_type
[ "def", "register_integration", "(", "package_folder", ")", ":", "logger", ".", "debug", "(", "\"registering integration %s\"", ",", "package_folder", ")", "package_folder", "=", "os", ".", "path", ".", "realpath", "(", "package_folder", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "package_folder", ")", ":", "raise", "IntegrationNotFound", "(", "os", ".", "path", ".", "basename", "(", "package_folder", ")", ")", "json_config_path", "=", "os", ".", "path", ".", "join", "(", "package_folder", ",", "CONFIG_FILE_NAME", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "json_config_path", ")", ":", "raise", "ConfigFileNotFound", "(", "json_config_path", ")", "with", "open", "(", "json_config_path", ",", "\"r\"", ")", "as", "f", ":", "config_json", "=", "json", ".", "load", "(", "f", ")", "# Validate integration and alert config", "validate_config", "(", "config_json", ",", "defs", ".", "INTEGRATION_VALIDATE_CONFIG_FIELDS", ")", "validate_config_parameters", "(", "config_json", ",", "defs", ".", "INTEGRATION_PARAMETERS_ALLOWED_KEYS", ",", "defs", ".", "INTEGRATION_PARAMETERS_ALLOWED_TYPES", ")", "integration_type", "=", "_create_integration_object", "(", "config_json", ")", "return", "integration_type" ]
39.428571
19.107143
def set_amount_cb(self, widget, val): """This method is called when 'Zoom Amount' control is adjusted. """ self.zoom_amount = val zoomlevel = self.fitsimage_focus.get_zoom() self._zoomset(self.fitsimage_focus, zoomlevel)
[ "def", "set_amount_cb", "(", "self", ",", "widget", ",", "val", ")", ":", "self", ".", "zoom_amount", "=", "val", "zoomlevel", "=", "self", ".", "fitsimage_focus", ".", "get_zoom", "(", ")", "self", ".", "_zoomset", "(", "self", ".", "fitsimage_focus", ",", "zoomlevel", ")" ]
42.5
6.333333
def get_users_in_organization(self, organization_id, start=0, limit=50): """ Get all the users of a specified organization :param organization_id: str :param start: OPTIONAL: int :param limit: OPTIONAL: int :return: Users list in organization """ url = 'rest/servicedeskapi/organization/{}/user'.format(organization_id) params = {} if start is not None: params['start'] = int(start) if limit is not None: params['limit'] = int(limit) return self.get(url, headers=self.experimental_headers, params=params)
[ "def", "get_users_in_organization", "(", "self", ",", "organization_id", ",", "start", "=", "0", ",", "limit", "=", "50", ")", ":", "url", "=", "'rest/servicedeskapi/organization/{}/user'", ".", "format", "(", "organization_id", ")", "params", "=", "{", "}", "if", "start", "is", "not", "None", ":", "params", "[", "'start'", "]", "=", "int", "(", "start", ")", "if", "limit", "is", "not", "None", ":", "params", "[", "'limit'", "]", "=", "int", "(", "limit", ")", "return", "self", ".", "get", "(", "url", ",", "headers", "=", "self", ".", "experimental_headers", ",", "params", "=", "params", ")" ]
35.882353
15.529412