repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
tensorflow/tensor2tensor
tensor2tensor/data_generators/multi_problem_v2.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L183-L197
def generate_data_with_shared_vocab(self, data_dir, tmp_dir, task_id=-1): """Generates TF-Records for problems using a global vocabulary file.""" global_vocab_filename = os.path.join(data_dir, self.vocab_filename) if not tf.gfile.Exists(global_vocab_filename): raise ValueError( 'Global vocabulary file: %s does not exist, ' 'please create one using build_vocab.py' % global_vocab_filename) # Before generating data, we copy the global vocabulary file to the children # locations. Although this is not the most disk efficient strategy, it # imposes the fewest changes to the text-to-text API. for p in self.problems: local_vocab_filename = os.path.join(data_dir, p.vocab_filename) if not tf.gfile.Exists(local_vocab_filename): tf.gfile.Copy(global_vocab_filename, local_vocab_filename) p.generate_data(data_dir, tmp_dir, task_id)
[ "def", "generate_data_with_shared_vocab", "(", "self", ",", "data_dir", ",", "tmp_dir", ",", "task_id", "=", "-", "1", ")", ":", "global_vocab_filename", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "self", ".", "vocab_filename", ")", "if", "...
Generates TF-Records for problems using a global vocabulary file.
[ "Generates", "TF", "-", "Records", "for", "problems", "using", "a", "global", "vocabulary", "file", "." ]
python
train
Yelp/venv-update
pip_faster.py
https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L230-L241
def pip_get_installed(): """Code extracted from the middle of the pip freeze command. FIXME: does not list anything installed via -e """ from pip._internal.utils.misc import dist_is_local return tuple( dist_to_req(dist) for dist in fresh_working_set() if dist_is_local(dist) if dist.key != 'python' # See #220 )
[ "def", "pip_get_installed", "(", ")", ":", "from", "pip", ".", "_internal", ".", "utils", ".", "misc", "import", "dist_is_local", "return", "tuple", "(", "dist_to_req", "(", "dist", ")", "for", "dist", "in", "fresh_working_set", "(", ")", "if", "dist_is_loca...
Code extracted from the middle of the pip freeze command. FIXME: does not list anything installed via -e
[ "Code", "extracted", "from", "the", "middle", "of", "the", "pip", "freeze", "command", ".", "FIXME", ":", "does", "not", "list", "anything", "installed", "via", "-", "e" ]
python
train
stuaxo/vext
vext/cmdline/__init__.py
https://github.com/stuaxo/vext/blob/fa98a21ecfbbc1c3d1b84085d69ec42defdd2f69/vext/cmdline/__init__.py#L32-L63
def do_enable(): """ Uncomment any lines that start with #import in the .pth file """ try: _lines = [] with open(vext_pth, mode='r') as f: for line in f.readlines(): if line.startswith('#') and line[1:].lstrip().startswith('import '): _lines.append(line[1:].lstrip()) else: _lines.append(line) try: os.unlink('%s.tmp' % vext_pth) except: pass with open('%s.tmp' % vext_pth, mode='w+') as f: f.writelines(_lines) try: os.unlink('%s~' % vext_pth) except: pass os.rename(vext_pth, '%s~' % vext_pth) os.rename('%s.tmp' % vext_pth, vext_pth) except IOError as e: if e.errno == 2: # vext file doesn't exist, recreate it. create_pth()
[ "def", "do_enable", "(", ")", ":", "try", ":", "_lines", "=", "[", "]", "with", "open", "(", "vext_pth", ",", "mode", "=", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "if", "line", ".", "startswith", "...
Uncomment any lines that start with #import in the .pth file
[ "Uncomment", "any", "lines", "that", "start", "with", "#import", "in", "the", ".", "pth", "file" ]
python
train
DeepHorizons/iarm
iarm_kernel/iarmkernel.py
https://github.com/DeepHorizons/iarm/blob/b913c9fd577b793a6bbced78b78a5d8d7cd88de4/iarm_kernel/iarmkernel.py#L183-L215
def magic_memory(self, line): """ Print out the current value of memory Usage: Pass in the byte of memory to read, separated by spaced A list of memory contents can be entered by separating them by a hyphen `%mem 4 5` or `%mem 8-12` """ # TODO add support for directives message = "" for address in [i.strip() for i in line.replace(',', '').split()]: if '-' in address: # We have a range (n-k) m1, m2 = address.split('-') n1 = re.search(self.interpreter.IMMEDIATE_NUMBER, m1).groups()[0] n2 = re.search(self.interpreter.IMMEDIATE_NUMBER, m2).groups()[0] n1 = self.interpreter.convert_to_integer(n1) n2 = self.interpreter.convert_to_integer(n2) for i in range(n1, n2 + 1): val = self.interpreter.memory[i] val = self.convert_representation(val) message += "{}: {}\n".format(str(i), val) else: # TODO fix what is the key for memory (currently it's an int, but registers are strings, should it be the same?) val = self.interpreter.memory[self.interpreter.convert_to_integer(address)] val = self.convert_representation(val) message += "{}: {}\n".format(address, val) stream_content = {'name': 'stdout', 'text': message} self.send_response(self.iopub_socket, 'stream', stream_content)
[ "def", "magic_memory", "(", "self", ",", "line", ")", ":", "# TODO add support for directives", "message", "=", "\"\"", "for", "address", "in", "[", "i", ".", "strip", "(", ")", "for", "i", "in", "line", ".", "replace", "(", "','", ",", "''", ")", ".",...
Print out the current value of memory Usage: Pass in the byte of memory to read, separated by spaced A list of memory contents can be entered by separating them by a hyphen `%mem 4 5` or `%mem 8-12`
[ "Print", "out", "the", "current", "value", "of", "memory" ]
python
train
mabuchilab/QNET
src/qnet/algebra/core/super_operator_algebra.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/super_operator_algebra.py#L351-L367
def anti_commutator(A, B=None): """If ``B != None``, return the anti-commutator :math:`\{A,B\}`, otherwise return the super-operator :math:`\{A,\cdot\}`. The super-operator :math:`\{A,\cdot\}` maps any other operator ``B`` to the anti-commutator :math:`\{A, B\} = A B + B A`. Args: A: The first operator to form all anti-commutators of. B: The second operator to form the anti-commutator of, or None. Returns: SuperOperator: The linear superoperator :math:`[A,\cdot]` """ if B: return A * B + B * A return SPre(A) + SPost(A)
[ "def", "anti_commutator", "(", "A", ",", "B", "=", "None", ")", ":", "if", "B", ":", "return", "A", "*", "B", "+", "B", "*", "A", "return", "SPre", "(", "A", ")", "+", "SPost", "(", "A", ")" ]
If ``B != None``, return the anti-commutator :math:`\{A,B\}`, otherwise return the super-operator :math:`\{A,\cdot\}`. The super-operator :math:`\{A,\cdot\}` maps any other operator ``B`` to the anti-commutator :math:`\{A, B\} = A B + B A`. Args: A: The first operator to form all anti-commutators of. B: The second operator to form the anti-commutator of, or None. Returns: SuperOperator: The linear superoperator :math:`[A,\cdot]`
[ "If", "B", "!", "=", "None", "return", "the", "anti", "-", "commutator", ":", "math", ":", "\\", "{", "A", "B", "\\", "}", "otherwise", "return", "the", "super", "-", "operator", ":", "math", ":", "\\", "{", "A", "\\", "cdot", "\\", "}", ".", "...
python
train
RedHatInsights/insights-core
insights/client/config.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/config.py#L456-L491
def _load_command_line(self, conf_only=False): ''' Load config from command line switches. NOTE: Not all config is available on the command line. ''' # did we already parse cli (i.e. to get conf file)? don't run twice if self._cli_opts: self._update_dict(self._cli_opts) return parser = argparse.ArgumentParser() debug_grp = parser.add_argument_group('Debug options') platf_grp = parser.add_argument_group('Platform options') cli_options = dict((k, v) for k, v in DEFAULT_OPTS.items() if ( 'opt' in v)) for _, o in cli_options.items(): group = o.pop('group', None) if group == 'debug': g = debug_grp elif group == 'platform': g = platf_grp else: g = parser optnames = o.pop('opt') # use argparse.SUPPRESS as CLI defaults so it won't parse # options that weren't specified o['default'] = argparse.SUPPRESS g.add_argument(*optnames, **o) options = parser.parse_args() self._cli_opts = vars(options) if conf_only and 'conf' in self._cli_opts: self._update_dict({'conf': self._cli_opts['conf']}) return self._update_dict(self._cli_opts)
[ "def", "_load_command_line", "(", "self", ",", "conf_only", "=", "False", ")", ":", "# did we already parse cli (i.e. to get conf file)? don't run twice", "if", "self", ".", "_cli_opts", ":", "self", ".", "_update_dict", "(", "self", ".", "_cli_opts", ")", "return", ...
Load config from command line switches. NOTE: Not all config is available on the command line.
[ "Load", "config", "from", "command", "line", "switches", ".", "NOTE", ":", "Not", "all", "config", "is", "available", "on", "the", "command", "line", "." ]
python
train
csparpa/pyowm
pyowm/weatherapi25/historian.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/historian.py#L58-L67
def humidity_series(self): """Returns the humidity time series relative to the meteostation, in the form of a list of tuples, each one containing the couple timestamp-value :returns: a list of tuples """ return [(tstamp, \ self._station_history.get_measurements()[tstamp]['humidity']) \ for tstamp in self._station_history.get_measurements()]
[ "def", "humidity_series", "(", "self", ")", ":", "return", "[", "(", "tstamp", ",", "self", ".", "_station_history", ".", "get_measurements", "(", ")", "[", "tstamp", "]", "[", "'humidity'", "]", ")", "for", "tstamp", "in", "self", ".", "_station_history",...
Returns the humidity time series relative to the meteostation, in the form of a list of tuples, each one containing the couple timestamp-value :returns: a list of tuples
[ "Returns", "the", "humidity", "time", "series", "relative", "to", "the", "meteostation", "in", "the", "form", "of", "a", "list", "of", "tuples", "each", "one", "containing", "the", "couple", "timestamp", "-", "value" ]
python
train
saltstack/salt
salt/modules/libcloud_storage.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/libcloud_storage.py#L152-L177
def create_container(container_name, profile, **libcloud_kwargs): ''' Create a container in the cloud :param container_name: Container name :type container_name: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's create_container method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_storage.create_container MyFolder profile1 ''' conn = _get_driver(profile=profile) libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) container = conn.create_container(container_name, **libcloud_kwargs) return { 'name': container.name, 'extra': container.extra }
[ "def", "create_container", "(", "container_name", ",", "profile", ",", "*", "*", "libcloud_kwargs", ")", ":", "conn", "=", "_get_driver", "(", "profile", "=", "profile", ")", "libcloud_kwargs", "=", "salt", ".", "utils", ".", "args", ".", "clean_kwargs", "("...
Create a container in the cloud :param container_name: Container name :type container_name: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's create_container method :type libcloud_kwargs: ``dict`` CLI Example: .. code-block:: bash salt myminion libcloud_storage.create_container MyFolder profile1
[ "Create", "a", "container", "in", "the", "cloud" ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L3430-L3438
def noNsProp(self, name): """Search and get the value of an attribute associated to a node This does the entity substitution. This function looks in DTD attribute declaration for #FIXED or default declaration values unless DTD use has been turned off. This function is similar to xmlGetProp except it will accept only an attribute in no namespace. """ ret = libxml2mod.xmlGetNoNsProp(self._o, name) return ret
[ "def", "noNsProp", "(", "self", ",", "name", ")", ":", "ret", "=", "libxml2mod", ".", "xmlGetNoNsProp", "(", "self", ".", "_o", ",", "name", ")", "return", "ret" ]
Search and get the value of an attribute associated to a node This does the entity substitution. This function looks in DTD attribute declaration for #FIXED or default declaration values unless DTD use has been turned off. This function is similar to xmlGetProp except it will accept only an attribute in no namespace.
[ "Search", "and", "get", "the", "value", "of", "an", "attribute", "associated", "to", "a", "node", "This", "does", "the", "entity", "substitution", ".", "This", "function", "looks", "in", "DTD", "attribute", "declaration", "for", "#FIXED", "or", "default", "d...
python
train
shoeffner/pandoc-source-exec
pandoc_source_exec.py
https://github.com/shoeffner/pandoc-source-exec/blob/9a13b9054d629a60b63196a906fafe2673722d13/pandoc_source_exec.py#L60-L85
def execute_code_block(elem, doc): """Executes a code block by passing it to the executor. Args: elem The AST element. doc The document. Returns: The output of the command. """ command = select_executor(elem, doc).split(' ') code = elem.text if 'plt' in elem.attributes or 'plt' in elem.classes: code = save_plot(code, elem) command.append(code) if 'args' in elem.attributes: for arg in elem.attributes['args'].split(): command.append(arg) cwd = elem.attributes['wd'] if 'wd' in elem.attributes else None return subprocess.run(command, encoding='utf8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd).stdout
[ "def", "execute_code_block", "(", "elem", ",", "doc", ")", ":", "command", "=", "select_executor", "(", "elem", ",", "doc", ")", ".", "split", "(", "' '", ")", "code", "=", "elem", ".", "text", "if", "'plt'", "in", "elem", ".", "attributes", "or", "'...
Executes a code block by passing it to the executor. Args: elem The AST element. doc The document. Returns: The output of the command.
[ "Executes", "a", "code", "block", "by", "passing", "it", "to", "the", "executor", "." ]
python
train
Othernet-Project/hwd
hwd/network.py
https://github.com/Othernet-Project/hwd/blob/7f4445bac61aa2305806aa80338e2ce5baa1093c/hwd/network.py#L58-L67
def _get_ipv6addrs(self): """ Returns the IPv6 addresses associated with this NIC. If no IPv6 addresses are used, empty dict is returned. """ addrs = self._get_addrs() ipv6addrs = addrs.get(netifaces.AF_INET6) if not ipv6addrs: return {} return ipv6addrs[0]
[ "def", "_get_ipv6addrs", "(", "self", ")", ":", "addrs", "=", "self", ".", "_get_addrs", "(", ")", "ipv6addrs", "=", "addrs", ".", "get", "(", "netifaces", ".", "AF_INET6", ")", "if", "not", "ipv6addrs", ":", "return", "{", "}", "return", "ipv6addrs", ...
Returns the IPv6 addresses associated with this NIC. If no IPv6 addresses are used, empty dict is returned.
[ "Returns", "the", "IPv6", "addresses", "associated", "with", "this", "NIC", ".", "If", "no", "IPv6", "addresses", "are", "used", "empty", "dict", "is", "returned", "." ]
python
train
jrief/djangocms-cascade
cmsplugin_cascade/plugin_base.py
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/plugin_base.py#L35-L53
def create_proxy_model(name, model_mixins, base_model, attrs=None, module=None): """ Create a Django Proxy Model on the fly, to be used by any Cascade Plugin. """ from django.apps import apps class Meta: proxy = True app_label = 'cmsplugin_cascade' name = str(name + 'Model') try: Model = apps.get_registered_model(Meta.app_label, name) except LookupError: bases = model_mixins + (base_model,) attrs = dict(attrs or {}, Meta=Meta, __module__=module) Model = type(name, bases, attrs) fake_proxy_models[name] = bases return Model
[ "def", "create_proxy_model", "(", "name", ",", "model_mixins", ",", "base_model", ",", "attrs", "=", "None", ",", "module", "=", "None", ")", ":", "from", "django", ".", "apps", "import", "apps", "class", "Meta", ":", "proxy", "=", "True", "app_label", "...
Create a Django Proxy Model on the fly, to be used by any Cascade Plugin.
[ "Create", "a", "Django", "Proxy", "Model", "on", "the", "fly", "to", "be", "used", "by", "any", "Cascade", "Plugin", "." ]
python
train
globality-corp/microcosm
microcosm/loaders/keys.py
https://github.com/globality-corp/microcosm/blob/6856200ca295da4269c8c1c9de7db0b97c1f4523/microcosm/loaders/keys.py#L7-L37
def expand_config(dct, separator='.', skip_to=0, key_func=lambda key: key.lower(), key_parts_filter=lambda key_parts: True, value_func=lambda value: value): """ Expand a dictionary recursively by splitting keys along the separator. :param dct: a non-recursive dictionary :param separator: a separator charactor for splitting dictionary keys :param skip_to: index to start splitting keys on; can be used to skip over a key prefix :param key_func: a key mapping function :param key_parts_filter: a filter function for excluding keys :param value_func: a value mapping func """ config = {} for key, value in dct.items(): key_separator = separator(key) if callable(separator) else separator key_parts = key.split(key_separator) if not key_parts_filter(key_parts): continue key_config = config # skip prefix for key_part in key_parts[skip_to:-1]: key_config = key_config.setdefault(key_func(key_part), dict()) key_config[key_func(key_parts[-1])] = value_func(value) return config
[ "def", "expand_config", "(", "dct", ",", "separator", "=", "'.'", ",", "skip_to", "=", "0", ",", "key_func", "=", "lambda", "key", ":", "key", ".", "lower", "(", ")", ",", "key_parts_filter", "=", "lambda", "key_parts", ":", "True", ",", "value_func", ...
Expand a dictionary recursively by splitting keys along the separator. :param dct: a non-recursive dictionary :param separator: a separator charactor for splitting dictionary keys :param skip_to: index to start splitting keys on; can be used to skip over a key prefix :param key_func: a key mapping function :param key_parts_filter: a filter function for excluding keys :param value_func: a value mapping func
[ "Expand", "a", "dictionary", "recursively", "by", "splitting", "keys", "along", "the", "separator", "." ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_snmp.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_snmp.py#L201-L213
def snmp_server_user_ipv6_acl(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp") user = ET.SubElement(snmp_server, "user") username_key = ET.SubElement(user, "username") username_key.text = kwargs.pop('username') ipv6_acl = ET.SubElement(user, "ipv6-acl") ipv6_acl.text = kwargs.pop('ipv6_acl') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "snmp_server_user_ipv6_acl", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "snmp_server", "=", "ET", ".", "SubElement", "(", "config", ",", "\"snmp-server\"", ",", "xmlns", "=", "\"urn:b...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
pkgw/pwkit
pwkit/cli/latexdriver.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/cli/latexdriver.py#L105-L123
def write_bibtex_dict(stream, entries): """bibtexparser.write converts the entire database to one big string and writes it out in one go. I'm sure it will always all fit in RAM but some things just will not stand. """ from bibtexparser.bwriter import BibTexWriter writer = BibTexWriter() writer.indent = ' ' writer.entry_separator = '' first = True for rec in entries: if first: first = False else: stream.write(b'\n') stream.write(writer._entry_to_bibtex(rec).encode('utf8'))
[ "def", "write_bibtex_dict", "(", "stream", ",", "entries", ")", ":", "from", "bibtexparser", ".", "bwriter", "import", "BibTexWriter", "writer", "=", "BibTexWriter", "(", ")", "writer", ".", "indent", "=", "' '", "writer", ".", "entry_separator", "=", "''", ...
bibtexparser.write converts the entire database to one big string and writes it out in one go. I'm sure it will always all fit in RAM but some things just will not stand.
[ "bibtexparser", ".", "write", "converts", "the", "entire", "database", "to", "one", "big", "string", "and", "writes", "it", "out", "in", "one", "go", ".", "I", "m", "sure", "it", "will", "always", "all", "fit", "in", "RAM", "but", "some", "things", "ju...
python
train
materialsproject/pymatgen
pymatgen/analysis/adsorption.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/adsorption.py#L503-L564
def generate_substitution_structures(self, atom, target_species=[], sub_both_sides=False, range_tol=1e-2, dist_from_surf=0): """ Function that performs substitution-type doping on the surface and returns all possible configurations where one dopant is substituted per surface. Can substitute one surface or both. Args: atom (str): atom corresponding to substitutional dopant sub_both_sides (bool): If true, substitute an equivalent site on the other surface target_species (list): List of specific species to substitute range_tol (float): Find viable substitution sites at a specific distance from the surface +- this tolerance dist_from_surf (float): Distance from the surface to find viable substitution sites, defaults to 0 to substitute at the surface """ # Get symmetrized structure in case we want to substitue both sides sym_slab = SpacegroupAnalyzer(self.slab).get_symmetrized_structure() # Define a function for substituting a site def substitute(site, i): slab = self.slab.copy() props = self.slab.site_properties if sub_both_sides: # Find an equivalent site on the other surface eq_indices = [indices for indices in sym_slab.equivalent_indices if i in indices][0] for ii in eq_indices: if "%.6f" % (sym_slab[ii].frac_coords[2]) != \ "%.6f" % (site.frac_coords[2]): props["surface_properties"][ii] = "substitute" slab.replace(ii, atom) break props["surface_properties"][i] = "substitute" slab.replace(i, atom) slab.add_site_property("surface_properties", props["surface_properties"]) return slab # Get all possible substitution sites substituted_slabs = [] # Sort sites so that we can define a range relative to the position of the # surface atoms, i.e. search for sites above (below) the bottom (top) surface sorted_sites = sorted(sym_slab, key=lambda site: site.frac_coords[2]) if sorted_sites[0].surface_properties == "surface": d = sorted_sites[0].frac_coords[2] + dist_from_surf else: d = sorted_sites[-1].frac_coords[2] - dist_from_surf for i, site in enumerate(sym_slab): if d - range_tol < site.frac_coords[2] < d + range_tol: if target_species and site.species_string in target_species: substituted_slabs.append(substitute(site, i)) elif not target_species: substituted_slabs.append(substitute(site, i)) matcher = StructureMatcher() return [s[0] for s in matcher.group_structures(substituted_slabs)]
[ "def", "generate_substitution_structures", "(", "self", ",", "atom", ",", "target_species", "=", "[", "]", ",", "sub_both_sides", "=", "False", ",", "range_tol", "=", "1e-2", ",", "dist_from_surf", "=", "0", ")", ":", "# Get symmetrized structure in case we want to ...
Function that performs substitution-type doping on the surface and returns all possible configurations where one dopant is substituted per surface. Can substitute one surface or both. Args: atom (str): atom corresponding to substitutional dopant sub_both_sides (bool): If true, substitute an equivalent site on the other surface target_species (list): List of specific species to substitute range_tol (float): Find viable substitution sites at a specific distance from the surface +- this tolerance dist_from_surf (float): Distance from the surface to find viable substitution sites, defaults to 0 to substitute at the surface
[ "Function", "that", "performs", "substitution", "-", "type", "doping", "on", "the", "surface", "and", "returns", "all", "possible", "configurations", "where", "one", "dopant", "is", "substituted", "per", "surface", ".", "Can", "substitute", "one", "surface", "or...
python
train
duointeractive/sea-cucumber
seacucumber/backend.py
https://github.com/duointeractive/sea-cucumber/blob/069637e2cbab561116e23b6723cfc30e779fce03/seacucumber/backend.py#L18-L42
def send_messages(self, email_messages): """ Sends one or more EmailMessage objects and returns the number of email messages sent. :param EmailMessage email_messages: A list of Django's EmailMessage object instances. :rtype: int :returns: The number of EmailMessage objects that were successfully queued up. Note that these are not in a state where we can guarantee delivery just yet. """ queue = getattr(settings, 'CUCUMBER_ROUTE_QUEUE', '') num_sent = 0 for message in email_messages: # Hand this off to a celery task. SendEmailTask.apply_async(args=[ message.from_email, message.recipients(), message.message().as_string().decode('utf8'),], queue=queue, ) num_sent += 1 return num_sent
[ "def", "send_messages", "(", "self", ",", "email_messages", ")", ":", "queue", "=", "getattr", "(", "settings", ",", "'CUCUMBER_ROUTE_QUEUE'", ",", "''", ")", "num_sent", "=", "0", "for", "message", "in", "email_messages", ":", "# Hand this off to a celery task.",...
Sends one or more EmailMessage objects and returns the number of email messages sent. :param EmailMessage email_messages: A list of Django's EmailMessage object instances. :rtype: int :returns: The number of EmailMessage objects that were successfully queued up. Note that these are not in a state where we can guarantee delivery just yet.
[ "Sends", "one", "or", "more", "EmailMessage", "objects", "and", "returns", "the", "number", "of", "email", "messages", "sent", "." ]
python
train
unt-libraries/codalib
codalib/bagatom.py
https://github.com/unt-libraries/codalib/blob/458d117bb48938c1a0e26d9161cb5f730461b4c7/codalib/bagatom.py#L71-L84
def getOxum(dataPath): """ Calculate the oxum for a given path """ fileCount = 0L fileSizeTotal = 0L for root, dirs, files in os.walk(dataPath): for fileName in files: fullName = os.path.join(root, fileName) stats = os.stat(fullName) fileSizeTotal += stats.st_size fileCount += 1 return "%s.%s" % (fileSizeTotal, fileCount)
[ "def", "getOxum", "(", "dataPath", ")", ":", "fileCount", "=", "0L", "fileSizeTotal", "=", "0L", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "dataPath", ")", ":", "for", "fileName", "in", "files", ":", "fullName", "=", "os...
Calculate the oxum for a given path
[ "Calculate", "the", "oxum", "for", "a", "given", "path" ]
python
train
taizilongxu/douban.fm
doubanfm/controller/manager_controller.py
https://github.com/taizilongxu/douban.fm/blob/d65126d3bd3e12d8a7109137caff8da0efc22b2f/doubanfm/controller/manager_controller.py#L29-L37
def run(self, switch_queue): """ 每个controller需要提供run方法, 来提供启动 """ self.switch_queue = switch_queue self.quit = False Thread(target=self._watchdog_queue).start() Thread(target=self._watchdog_time).start()
[ "def", "run", "(", "self", ",", "switch_queue", ")", ":", "self", ".", "switch_queue", "=", "switch_queue", "self", ".", "quit", "=", "False", "Thread", "(", "target", "=", "self", ".", "_watchdog_queue", ")", ".", "start", "(", ")", "Thread", "(", "ta...
每个controller需要提供run方法, 来提供启动
[ "每个controller需要提供run方法", "来提供启动" ]
python
train
inveniosoftware/invenio-communities
invenio_communities/permissions.py
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/permissions.py#L46-L49
def can(self): """Grant permission if owner or admin.""" return str(current_user.get_id()) == str(self.community.id_user) or \ DynamicPermission(ActionNeed('admin-access')).can()
[ "def", "can", "(", "self", ")", ":", "return", "str", "(", "current_user", ".", "get_id", "(", ")", ")", "==", "str", "(", "self", ".", "community", ".", "id_user", ")", "or", "DynamicPermission", "(", "ActionNeed", "(", "'admin-access'", ")", ")", "."...
Grant permission if owner or admin.
[ "Grant", "permission", "if", "owner", "or", "admin", "." ]
python
train
MisterY/asset-allocation
asset_allocation/app.py
https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/app.py#L26-L39
def add_stock_to_class(self, assetclass_id: int, symbol: str): """ Add a stock link to an asset class """ assert isinstance(symbol, str) assert isinstance(assetclass_id, int) item = AssetClassStock() item.assetclassid = assetclass_id item.symbol = symbol session = self.open_session() session.add(item) self.save() return item
[ "def", "add_stock_to_class", "(", "self", ",", "assetclass_id", ":", "int", ",", "symbol", ":", "str", ")", ":", "assert", "isinstance", "(", "symbol", ",", "str", ")", "assert", "isinstance", "(", "assetclass_id", ",", "int", ")", "item", "=", "AssetClass...
Add a stock link to an asset class
[ "Add", "a", "stock", "link", "to", "an", "asset", "class" ]
python
train
DeV1doR/aioethereum
aioethereum/management/eth.py
https://github.com/DeV1doR/aioethereum/blob/85eb46550d862b3ccc309914ea871ca1c7b42157/aioethereum/management/eth.py#L550-L575
def eth_newFilter(self, from_block=BLOCK_TAG_LATEST, to_block=BLOCK_TAG_LATEST, address=None, topics=None): """https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newfilter :param from_block: Block tag or number (optional) :type from_block: int or BLOCK_TAGS :param to_block: Block tag or number (optional) :type to_block: int or BLOCK_TAGS :param address: Contract address (optional) :type address: str :param topics: Topics (optional) :type topics: list :return: filter_id :rtype: str """ obj = { 'fromBlock': validate_block(from_block), 'toBlock': validate_block(to_block), 'address': address, 'topics': topics } return (yield from self.rpc_call('eth_newFilter', [obj]))
[ "def", "eth_newFilter", "(", "self", ",", "from_block", "=", "BLOCK_TAG_LATEST", ",", "to_block", "=", "BLOCK_TAG_LATEST", ",", "address", "=", "None", ",", "topics", "=", "None", ")", ":", "obj", "=", "{", "'fromBlock'", ":", "validate_block", "(", "from_bl...
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newfilter :param from_block: Block tag or number (optional) :type from_block: int or BLOCK_TAGS :param to_block: Block tag or number (optional) :type to_block: int or BLOCK_TAGS :param address: Contract address (optional) :type address: str :param topics: Topics (optional) :type topics: list :return: filter_id :rtype: str
[ "https", ":", "//", "github", ".", "com", "/", "ethereum", "/", "wiki", "/", "wiki", "/", "JSON", "-", "RPC#eth_newfilter" ]
python
train
prompt-toolkit/pymux
pymux/commands/completer.py
https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/commands/completer.py#L135-L182
def parse(cls, text): """ Parse the given text. Returns a tuple: (list_of_parts, start_pos_of_the_last_part). """ OUTSIDE, IN_DOUBLE, IN_SINGLE = 0, 1, 2 iterator = enumerate(text) state = OUTSIDE parts = [] current_part = '' part_start_pos = 0 for i, c in iterator: # XXX: correctly handle empty strings. if state == OUTSIDE: if c.isspace(): # New part. if current_part: parts.append(current_part) part_start_pos = i + 1 current_part = '' elif c == '"': state = IN_DOUBLE elif c == "'": state = IN_SINGLE else: current_part += c elif state == IN_SINGLE: if c == "'": state = OUTSIDE elif c == "\\": next(iterator) current_part += c else: current_part += c elif state == IN_DOUBLE: if c == '"': state = OUTSIDE elif c == "\\": next(iterator) current_part += c else: current_part += c parts.append(current_part) return parts, part_start_pos
[ "def", "parse", "(", "cls", ",", "text", ")", ":", "OUTSIDE", ",", "IN_DOUBLE", ",", "IN_SINGLE", "=", "0", ",", "1", ",", "2", "iterator", "=", "enumerate", "(", "text", ")", "state", "=", "OUTSIDE", "parts", "=", "[", "]", "current_part", "=", "'...
Parse the given text. Returns a tuple: (list_of_parts, start_pos_of_the_last_part).
[ "Parse", "the", "given", "text", ".", "Returns", "a", "tuple", ":", "(", "list_of_parts", "start_pos_of_the_last_part", ")", "." ]
python
train
rflamary/POT
ot/stochastic.py
https://github.com/rflamary/POT/blob/c5108efc7b6702e1af3928bef1032e6b37734d1c/ot/stochastic.py#L550-L642
def sgd_entropic_regularization(a, b, M, reg, batch_size, numItermax, lr): ''' Compute the sgd algorithm to solve the regularized discrete measures optimal transport dual problem The function solves the following optimization problem: .. math:: \gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma) s.t. \gamma 1 = a \gamma^T 1= b \gamma \geq 0 Where : - M is the (ns,nt) metric cost matrix - :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - a and b are source and target weights (sum to 1) Parameters ---------- a : np.ndarray(ns,) source measure b : np.ndarray(nt,) target measure M : np.ndarray(ns, nt) cost matrix reg : float number Regularization term > 0 batch_size : int number size of the batch numItermax : int number number of iteration lr : float number learning rate Returns ------- alpha : np.ndarray(ns,) dual variable beta : np.ndarray(nt,) dual variable Examples -------- >>> n_source = 7 >>> n_target = 4 >>> reg = 1 >>> numItermax = 20000 >>> lr = 0.1 >>> batch_size = 3 >>> log = True >>> a = ot.utils.unif(n_source) >>> b = ot.utils.unif(n_target) >>> rng = np.random.RandomState(0) >>> X_source = rng.randn(n_source, 2) >>> Y_target = rng.randn(n_target, 2) >>> M = ot.dist(X_source, Y_target) >>> sgd_dual_pi, log = stochastic.solve_dual_entropic(a, b, M, reg, batch_size, numItermax, lr, log) >>> print(log['alpha'], log['beta']) >>> print(sgd_dual_pi) References ---------- [Seguy et al., 2018] : International Conference on Learning Representation (2018), arXiv preprint arxiv:1711.02283. ''' n_source = np.shape(M)[0] n_target = np.shape(M)[1] cur_alpha = np.zeros(n_source) cur_beta = np.zeros(n_target) for cur_iter in range(numItermax): k = np.sqrt(cur_iter + 1) batch_alpha = np.random.choice(n_source, batch_size, replace=False) batch_beta = np.random.choice(n_target, batch_size, replace=False) update_alpha, update_beta = batch_grad_dual(a, b, M, reg, cur_alpha, cur_beta, batch_size, batch_alpha, batch_beta) cur_alpha[batch_alpha] += (lr / k) * update_alpha[batch_alpha] cur_beta[batch_beta] += (lr / k) * update_beta[batch_beta] return cur_alpha, cur_beta
[ "def", "sgd_entropic_regularization", "(", "a", ",", "b", ",", "M", ",", "reg", ",", "batch_size", ",", "numItermax", ",", "lr", ")", ":", "n_source", "=", "np", ".", "shape", "(", "M", ")", "[", "0", "]", "n_target", "=", "np", ".", "shape", "(", ...
Compute the sgd algorithm to solve the regularized discrete measures optimal transport dual problem The function solves the following optimization problem: .. math:: \gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma) s.t. \gamma 1 = a \gamma^T 1= b \gamma \geq 0 Where : - M is the (ns,nt) metric cost matrix - :math:`\Omega` is the entropic regularization term with :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})` - a and b are source and target weights (sum to 1) Parameters ---------- a : np.ndarray(ns,) source measure b : np.ndarray(nt,) target measure M : np.ndarray(ns, nt) cost matrix reg : float number Regularization term > 0 batch_size : int number size of the batch numItermax : int number number of iteration lr : float number learning rate Returns ------- alpha : np.ndarray(ns,) dual variable beta : np.ndarray(nt,) dual variable Examples -------- >>> n_source = 7 >>> n_target = 4 >>> reg = 1 >>> numItermax = 20000 >>> lr = 0.1 >>> batch_size = 3 >>> log = True >>> a = ot.utils.unif(n_source) >>> b = ot.utils.unif(n_target) >>> rng = np.random.RandomState(0) >>> X_source = rng.randn(n_source, 2) >>> Y_target = rng.randn(n_target, 2) >>> M = ot.dist(X_source, Y_target) >>> sgd_dual_pi, log = stochastic.solve_dual_entropic(a, b, M, reg, batch_size, numItermax, lr, log) >>> print(log['alpha'], log['beta']) >>> print(sgd_dual_pi) References ---------- [Seguy et al., 2018] : International Conference on Learning Representation (2018), arXiv preprint arxiv:1711.02283.
[ "Compute", "the", "sgd", "algorithm", "to", "solve", "the", "regularized", "discrete", "measures", "optimal", "transport", "dual", "problem" ]
python
train
draperjames/qtpandas
qtpandas/models/DataFrameModel.py
https://github.com/draperjames/qtpandas/blob/64294fb69f1839e53dee5ea453337266bfaf24f4/qtpandas/models/DataFrameModel.py#L36-L47
def read_file(filepath, **kwargs): """ Read a data file into a DataFrameModel. :param filepath: The rows/columns filepath to read. :param kwargs: xls/x files - see pandas.read_excel(**kwargs) .csv/.txt/etc - see pandas.read_csv(**kwargs) :return: DataFrameModel """ return DataFrameModel(dataFrame=superReadFile(filepath, **kwargs), filePath=filepath)
[ "def", "read_file", "(", "filepath", ",", "*", "*", "kwargs", ")", ":", "return", "DataFrameModel", "(", "dataFrame", "=", "superReadFile", "(", "filepath", ",", "*", "*", "kwargs", ")", ",", "filePath", "=", "filepath", ")" ]
Read a data file into a DataFrameModel. :param filepath: The rows/columns filepath to read. :param kwargs: xls/x files - see pandas.read_excel(**kwargs) .csv/.txt/etc - see pandas.read_csv(**kwargs) :return: DataFrameModel
[ "Read", "a", "data", "file", "into", "a", "DataFrameModel", "." ]
python
train
soravux/scoop
scoop/broker/brokerzmq.py
https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/scoop/broker/brokerzmq.py#L173-L182
def processConfig(self, worker_config): """Update the pool configuration with a worker configuration. """ self.config['headless'] |= worker_config.get("headless", False) if self.config['headless']: # Launch discovery process if not self.discovery_thread: self.discovery_thread = discovery.Advertise( port=",".join(str(a) for a in self.getPorts()), )
[ "def", "processConfig", "(", "self", ",", "worker_config", ")", ":", "self", ".", "config", "[", "'headless'", "]", "|=", "worker_config", ".", "get", "(", "\"headless\"", ",", "False", ")", "if", "self", ".", "config", "[", "'headless'", "]", ":", "# La...
Update the pool configuration with a worker configuration.
[ "Update", "the", "pool", "configuration", "with", "a", "worker", "configuration", "." ]
python
train
django-danceschool/django-danceschool
danceschool/core/mixins.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/mixins.py#L446-L457
def get_ordering(self, reverseTime=False): ''' This method provides the tuple for ordering of querysets. However, this will only work if the annotations generated by the get_annotations() method above have been added to the queryset. Otherwise, the use of this ordering tuple will fail because the appropriate column names will not exist to sort with. ''' # Reverse ordering can be optionally specified in the view class definition. reverseTime = getattr(self,'reverse_time_ordering',reverseTime) timeParameter = '-startTime' if reverseTime is True else 'startTime' return ('nullParam', 'paramOne', 'paramTwo', timeParameter)
[ "def", "get_ordering", "(", "self", ",", "reverseTime", "=", "False", ")", ":", "# Reverse ordering can be optionally specified in the view class definition.\r", "reverseTime", "=", "getattr", "(", "self", ",", "'reverse_time_ordering'", ",", "reverseTime", ")", "timeParame...
This method provides the tuple for ordering of querysets. However, this will only work if the annotations generated by the get_annotations() method above have been added to the queryset. Otherwise, the use of this ordering tuple will fail because the appropriate column names will not exist to sort with.
[ "This", "method", "provides", "the", "tuple", "for", "ordering", "of", "querysets", ".", "However", "this", "will", "only", "work", "if", "the", "annotations", "generated", "by", "the", "get_annotations", "()", "method", "above", "have", "been", "added", "to",...
python
train
saltstack/salt
salt/modules/boto_s3.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_s3.py#L135-L169
def upload_file( source, name, extra_args=None, region=None, key=None, keyid=None, profile=None, ): ''' Upload a local file as an S3 object. CLI Example: .. code-block:: bash salt myminion boto_s3.upload_file \\ /path/to/local/file \\ my_bucket/path/to/object \\ region=us-east-1 \\ key=key \\ keyid=keyid \\ profile=profile \\ ''' bucket, _, s3_key = name.partition('/') conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.upload_file(source, bucket, s3_key, ExtraArgs=extra_args) except boto3.exceptions.S3UploadFailedError as e: return {'error': __utils__['boto3.get_error'](e)} log.info('S3 object uploaded to %s', name) return {'result': True}
[ "def", "upload_file", "(", "source", ",", "name", ",", "extra_args", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ",", ")", ":", "bucket", ",", "_", ",", "s3_key", "=", ...
Upload a local file as an S3 object. CLI Example: .. code-block:: bash salt myminion boto_s3.upload_file \\ /path/to/local/file \\ my_bucket/path/to/object \\ region=us-east-1 \\ key=key \\ keyid=keyid \\ profile=profile \\
[ "Upload", "a", "local", "file", "as", "an", "S3", "object", "." ]
python
train
stevelittlefish/littlefish
littlefish/pager.py
https://github.com/stevelittlefish/littlefish/blob/6deee7f81fab30716c743efe2e94e786c6e17016/littlefish/pager.py#L171-L179
def last_item_number(self): """ :return: The last "item number", used when displaying messages to the user like "Displaying items 1 to 10 of 123" - in this example 10 would be returned """ n = self.first_item_number + self.page_size - 1 if n > self.total_items: return self.total_items return n
[ "def", "last_item_number", "(", "self", ")", ":", "n", "=", "self", ".", "first_item_number", "+", "self", ".", "page_size", "-", "1", "if", "n", ">", "self", ".", "total_items", ":", "return", "self", ".", "total_items", "return", "n" ]
:return: The last "item number", used when displaying messages to the user like "Displaying items 1 to 10 of 123" - in this example 10 would be returned
[ ":", "return", ":", "The", "last", "item", "number", "used", "when", "displaying", "messages", "to", "the", "user", "like", "Displaying", "items", "1", "to", "10", "of", "123", "-", "in", "this", "example", "10", "would", "be", "returned" ]
python
test
CiscoTestAutomation/yang
ncdiff/src/yang/ncdiff/model.py
https://github.com/CiscoTestAutomation/yang/blob/c70ec5ac5a91f276c4060009203770ece92e76b4/ncdiff/src/yang/ncdiff/model.py#L940-L970
def process_attrib(element, msg): '''process_attrib High-level api: Delete four attributes from an ElementTree node if they exist: operation, insert, value and key. Then a new attribute 'diff' is added. Parameters ---------- element : `Element` A node needs to be looked at. msg : `str` Message to be added in attribute 'diff'. Returns ------- Element Argument 'element' is returned after processing. ''' attrib_required = ['type', 'access', 'mandatory'] for node in element.iter(): for attrib in node.attrib.keys(): if attrib not in attrib_required: del node.attrib[attrib] if msg: node.attrib['diff'] = msg return element
[ "def", "process_attrib", "(", "element", ",", "msg", ")", ":", "attrib_required", "=", "[", "'type'", ",", "'access'", ",", "'mandatory'", "]", "for", "node", "in", "element", ".", "iter", "(", ")", ":", "for", "attrib", "in", "node", ".", "attrib", "....
process_attrib High-level api: Delete four attributes from an ElementTree node if they exist: operation, insert, value and key. Then a new attribute 'diff' is added. Parameters ---------- element : `Element` A node needs to be looked at. msg : `str` Message to be added in attribute 'diff'. Returns ------- Element Argument 'element' is returned after processing.
[ "process_attrib" ]
python
train
boriel/zxbasic
zxbparser.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbparser.py#L196-L208
def make_builtin(lineno, fname, operands, func=None, type_=None): """ Wrapper: returns a Builtin function node. Can be a Symbol, tuple or list of Symbols If operand is an iterable, they will be expanded. """ if operands is None: operands = [] assert isinstance(operands, Symbol) or isinstance(operands, tuple) or isinstance(operands, list) # TODO: In the future, builtin functions will be implemented in an external library, like POINT or ATTR __DEBUG__('Creating BUILTIN "{}"'.format(fname), 1) if not isinstance(operands, collections.Iterable): operands = [operands] return symbols.BUILTIN.make_node(lineno, fname, func, type_, *operands)
[ "def", "make_builtin", "(", "lineno", ",", "fname", ",", "operands", ",", "func", "=", "None", ",", "type_", "=", "None", ")", ":", "if", "operands", "is", "None", ":", "operands", "=", "[", "]", "assert", "isinstance", "(", "operands", ",", "Symbol", ...
Wrapper: returns a Builtin function node. Can be a Symbol, tuple or list of Symbols If operand is an iterable, they will be expanded.
[ "Wrapper", ":", "returns", "a", "Builtin", "function", "node", ".", "Can", "be", "a", "Symbol", "tuple", "or", "list", "of", "Symbols", "If", "operand", "is", "an", "iterable", "they", "will", "be", "expanded", "." ]
python
train
molmod/molmod
molmod/minimizer.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/minimizer.py#L820-L835
def get_header(self): """Returns the header for screen logging of the minimization""" result = " " if self.step_rms is not None: result += " Step RMS" if self.step_max is not None: result += " Step MAX" if self.grad_rms is not None: result += " Grad RMS" if self.grad_max is not None: result += " Grad MAX" if self.rel_grad_rms is not None: result += " Grad/F RMS" if self.rel_grad_max is not None: result += " Grad/F MAX" return result
[ "def", "get_header", "(", "self", ")", ":", "result", "=", "\" \"", "if", "self", ".", "step_rms", "is", "not", "None", ":", "result", "+=", "\" Step RMS\"", "if", "self", ".", "step_max", "is", "not", "None", ":", "result", "+=", "\" Step MAX\"", ...
Returns the header for screen logging of the minimization
[ "Returns", "the", "header", "for", "screen", "logging", "of", "the", "minimization" ]
python
train
stevearc/dynamo3
dynamo3/fields.py
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/fields.py#L174-L176
def all(cls, name, hash_key, range_key=None, throughput=None): """ Create an index that projects all attributes """ return cls(cls.ALL, name, hash_key, range_key, throughput=throughput)
[ "def", "all", "(", "cls", ",", "name", ",", "hash_key", ",", "range_key", "=", "None", ",", "throughput", "=", "None", ")", ":", "return", "cls", "(", "cls", ".", "ALL", ",", "name", ",", "hash_key", ",", "range_key", ",", "throughput", "=", "through...
Create an index that projects all attributes
[ "Create", "an", "index", "that", "projects", "all", "attributes" ]
python
train
PyCQA/pylint
pylint/config.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/config.py#L240-L250
def _validate(value, optdict, name=""): """return a validated value for an option according to its type optional argument name is only used for error message formatting """ try: _type = optdict["type"] except KeyError: # FIXME return value return _call_validator(_type, optdict, name, value)
[ "def", "_validate", "(", "value", ",", "optdict", ",", "name", "=", "\"\"", ")", ":", "try", ":", "_type", "=", "optdict", "[", "\"type\"", "]", "except", "KeyError", ":", "# FIXME", "return", "value", "return", "_call_validator", "(", "_type", ",", "opt...
return a validated value for an option according to its type optional argument name is only used for error message formatting
[ "return", "a", "validated", "value", "for", "an", "option", "according", "to", "its", "type" ]
python
test
rosenbrockc/fortpy
fortpy/scripts/analyze.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/scripts/analyze.py#L185-L202
def _redirect_split(self, args): """Determines whether the specified shell args have a redirect specification for the output. Returns a tuple of (usable args, filename, append). """ if ">>" in args: append = True usable, filename = args.split(">>") elif ">" in args: append = False usable, filename = args.split(">") else: append = filename = None usable = args if filename is not None: return (usable, filename.strip(), append) else: return (usable, filename, append)
[ "def", "_redirect_split", "(", "self", ",", "args", ")", ":", "if", "\">>\"", "in", "args", ":", "append", "=", "True", "usable", ",", "filename", "=", "args", ".", "split", "(", "\">>\"", ")", "elif", "\">\"", "in", "args", ":", "append", "=", "Fals...
Determines whether the specified shell args have a redirect specification for the output. Returns a tuple of (usable args, filename, append).
[ "Determines", "whether", "the", "specified", "shell", "args", "have", "a", "redirect", "specification", "for", "the", "output", ".", "Returns", "a", "tuple", "of", "(", "usable", "args", "filename", "append", ")", "." ]
python
train
ambitioninc/newrelic-api
newrelic_api/base.py
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/base.py#L94-L112
def _delete(self, *args, **kwargs): """ A wrapper for deleting things :returns: The response of your delete :rtype: dict :raises: This will raise a :class:`NewRelicAPIServerException<newrelic_api.exceptions.NewRelicAPIServerException>` if there is an error from New Relic """ response = requests.delete(*args, **kwargs) if not response.ok: raise NewRelicAPIServerException('{}: {}'.format(response.status_code, response.text)) if response.text: return response.json() return {}
[ "def", "_delete", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response", "=", "requests", ".", "delete", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "not", "response", ".", "ok", ":", "raise", "NewRelicAPIServerExcept...
A wrapper for deleting things :returns: The response of your delete :rtype: dict :raises: This will raise a :class:`NewRelicAPIServerException<newrelic_api.exceptions.NewRelicAPIServerException>` if there is an error from New Relic
[ "A", "wrapper", "for", "deleting", "things" ]
python
train
markovmodel/PyEMMA
pyemma/coordinates/transform/vamp.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/transform/vamp.py#L264-L301
def _diagonalize(self): """Performs SVD on covariance matrices and save left, right singular vectors and values in the model. Parameters ---------- scaling : None or string, default=None Scaling to be applied to the VAMP modes upon transformation * None: no scaling will be applied, variance of the singular functions is 1 * 'kinetic map' or 'km': singular functions are scaled by singular value. Note that only the left singular functions induce a kinetic map. """ L0 = spd_inv_split(self.C00, epsilon=self.epsilon) self._rank0 = L0.shape[1] if L0.ndim == 2 else 1 Lt = spd_inv_split(self.Ctt, epsilon=self.epsilon) self._rankt = Lt.shape[1] if Lt.ndim == 2 else 1 W = np.dot(L0.T, self.C0t).dot(Lt) from scipy.linalg import svd A, s, BT = svd(W, compute_uv=True, lapack_driver='gesvd') self._singular_values = s # don't pass any values in the argument list that call _diagonalize again!!! m = VAMPModel._dimension(self._rank0, self._rankt, self.dim, self._singular_values) U = np.dot(L0, A[:, :m]) V = np.dot(Lt, BT[:m, :].T) # scale vectors if self.scaling is not None: U *= s[np.newaxis, 0:m] # scaled left singular functions induce a kinetic map V *= s[np.newaxis, 0:m] # scaled right singular functions induce a kinetic map wrt. backward propagator self._U = U self._V = V self._svd_performed = True
[ "def", "_diagonalize", "(", "self", ")", ":", "L0", "=", "spd_inv_split", "(", "self", ".", "C00", ",", "epsilon", "=", "self", ".", "epsilon", ")", "self", ".", "_rank0", "=", "L0", ".", "shape", "[", "1", "]", "if", "L0", ".", "ndim", "==", "2"...
Performs SVD on covariance matrices and save left, right singular vectors and values in the model. Parameters ---------- scaling : None or string, default=None Scaling to be applied to the VAMP modes upon transformation * None: no scaling will be applied, variance of the singular functions is 1 * 'kinetic map' or 'km': singular functions are scaled by singular value. Note that only the left singular functions induce a kinetic map.
[ "Performs", "SVD", "on", "covariance", "matrices", "and", "save", "left", "right", "singular", "vectors", "and", "values", "in", "the", "model", "." ]
python
train
apache/incubator-heron
third_party/python/cpplint/cpplint.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/third_party/python/cpplint/cpplint.py#L6308-L6407
def ParseArguments(args): """Parses the command line arguments. This may set the output format and verbosity level as side-effects. Args: args: The command line arguments: Returns: The list of filenames to lint. """ try: (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', 'counting=', 'filter=', 'root=', 'repository=', 'linelength=', 'extensions=', 'exclude=', 'headers=', 'quiet', 'recursive']) except getopt.GetoptError: PrintUsage('Invalid arguments.') verbosity = _VerboseLevel() output_format = _OutputFormat() filters = '' counting_style = '' recursive = False for (opt, val) in opts: if opt == '--help': PrintUsage(None) elif opt == '--output': if val not in ('emacs', 'vs7', 'eclipse', 'junit'): PrintUsage('The only allowed output formats are emacs, vs7, eclipse ' 'and junit.') output_format = val elif opt == '--verbose': verbosity = int(val) elif opt == '--filter': filters = val if not filters: PrintCategories() elif opt == '--counting': if val not in ('total', 'toplevel', 'detailed'): PrintUsage('Valid counting options are total, toplevel, and detailed') counting_style = val elif opt == '--root': global _root _root = val elif opt == '--repository': global _repository _repository = val elif opt == '--linelength': global _line_length try: _line_length = int(val) except ValueError: PrintUsage('Line length must be digits.') elif opt == '--exclude': global _excludes if not _excludes: _excludes = set() _excludes.update(glob.glob(val)) elif opt == '--extensions': global _valid_extensions try: _valid_extensions = set(val.split(',')) except ValueError: PrintUsage('Extensions must be comma seperated list.') elif opt == '--headers': global _header_extensions try: _header_extensions = set(val.split(',')) except ValueError: PrintUsage('Extensions must be comma seperated list.') elif opt == '--recursive': recursive = True elif opt == '--quiet': global _quiet _quiet = True if not filenames: PrintUsage('No files were specified.') if recursive: filenames = _ExpandDirectories(filenames) if _excludes: filenames = _FilterExcludedFiles(filenames) _SetOutputFormat(output_format) _SetVerboseLevel(verbosity) _SetFilters(filters) _SetCountingStyle(counting_style) return filenames
[ "def", "ParseArguments", "(", "args", ")", ":", "try", ":", "(", "opts", ",", "filenames", ")", "=", "getopt", ".", "getopt", "(", "args", ",", "''", ",", "[", "'help'", ",", "'output='", ",", "'verbose='", ",", "'counting='", ",", "'filter='", ",", ...
Parses the command line arguments. This may set the output format and verbosity level as side-effects. Args: args: The command line arguments: Returns: The list of filenames to lint.
[ "Parses", "the", "command", "line", "arguments", "." ]
python
valid
allianceauth/allianceauth
allianceauth/timerboard/views.py
https://github.com/allianceauth/allianceauth/blob/6585b07e96571a99a4d6dc03cc03f9b8c8f690ca/allianceauth/timerboard/views.py#L58-L64
def get_form_kwargs(self): """ Inject the request user into the kwargs passed to the form """ kwargs = super(AddUpdateMixin, self).get_form_kwargs() kwargs.update({'user': self.request.user}) return kwargs
[ "def", "get_form_kwargs", "(", "self", ")", ":", "kwargs", "=", "super", "(", "AddUpdateMixin", ",", "self", ")", ".", "get_form_kwargs", "(", ")", "kwargs", ".", "update", "(", "{", "'user'", ":", "self", ".", "request", ".", "user", "}", ")", "return...
Inject the request user into the kwargs passed to the form
[ "Inject", "the", "request", "user", "into", "the", "kwargs", "passed", "to", "the", "form" ]
python
train
StellarCN/py-stellar-base
stellar_base/operation.py
https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/operation.py#L1054-L1071
def to_xdr_object(self): """Creates an XDR Operation object that represents this :class:`ManageData`. """ data_name = bytearray(self.data_name, encoding='utf-8') if self.data_value is not None: if isinstance(self.data_value, bytes): data_value = [bytearray(self.data_value)] else: data_value = [bytearray(self.data_value, 'utf-8')] else: data_value = [] manage_data_op = Xdr.types.ManageDataOp(data_name, data_value) self.body.type = Xdr.const.MANAGE_DATA self.body.manageDataOp = manage_data_op return super(ManageData, self).to_xdr_object()
[ "def", "to_xdr_object", "(", "self", ")", ":", "data_name", "=", "bytearray", "(", "self", ".", "data_name", ",", "encoding", "=", "'utf-8'", ")", "if", "self", ".", "data_value", "is", "not", "None", ":", "if", "isinstance", "(", "self", ".", "data_valu...
Creates an XDR Operation object that represents this :class:`ManageData`.
[ "Creates", "an", "XDR", "Operation", "object", "that", "represents", "this", ":", "class", ":", "ManageData", "." ]
python
train
wummel/linkchecker
linkcheck/configuration/__init__.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/configuration/__init__.py#L637-L647
def resolve_indirect (data, key, splithosts=False): """Replace name of environment variable with its value.""" value = data[key] env_value = os.environ.get(value) if env_value: if splithosts: data[key] = split_hosts(env_value) else: data[key] = env_value else: del data[key]
[ "def", "resolve_indirect", "(", "data", ",", "key", ",", "splithosts", "=", "False", ")", ":", "value", "=", "data", "[", "key", "]", "env_value", "=", "os", ".", "environ", ".", "get", "(", "value", ")", "if", "env_value", ":", "if", "splithosts", "...
Replace name of environment variable with its value.
[ "Replace", "name", "of", "environment", "variable", "with", "its", "value", "." ]
python
train
cltk/cltk
cltk/utils/file_operations.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/utils/file_operations.py#L13-L27
def make_cltk_path(*fp_list): """Take arbitrary number of str arguments (not list) and return expanded, absolute path to a user's cltk_data dir. Example: In [8]: make_cltk_path('greek', 'model', 'greek_models_cltk') Out[8]: '/Users/kyle/cltk_data/greek/model/greek_models_cltk' :type fp_list: str positional arguments :param: : fp_list tokens to join together beginning from cltk_root folder :rtype: str """ home = os.path.expanduser('~') return os.path.join(home, 'cltk_data', *fp_list)
[ "def", "make_cltk_path", "(", "*", "fp_list", ")", ":", "home", "=", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", "return", "os", ".", "path", ".", "join", "(", "home", ",", "'cltk_data'", ",", "*", "fp_list", ")" ]
Take arbitrary number of str arguments (not list) and return expanded, absolute path to a user's cltk_data dir. Example: In [8]: make_cltk_path('greek', 'model', 'greek_models_cltk') Out[8]: '/Users/kyle/cltk_data/greek/model/greek_models_cltk' :type fp_list: str positional arguments :param: : fp_list tokens to join together beginning from cltk_root folder :rtype: str
[ "Take", "arbitrary", "number", "of", "str", "arguments", "(", "not", "list", ")", "and", "return", "expanded", "absolute", "path", "to", "a", "user", "s", "cltk_data", "dir", "." ]
python
train
ianmiell/shutit
shutit_pexpect.py
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_pexpect.py#L2656-L2933
def send(self, sendspec): """Send string as a shell command, and wait until the expected output is seen (either a string or any from a list of strings) before returning. The expected string will default to the currently-set default expected string (see get_default_shutit_pexpect_session_expect) Returns the pexpect return value (ie which expected string in the list matched) @return: The pexpect return value (ie which expected string in the list matched). If return is -1, the task was backgrounded. See also multisend. @rtype: int """ shutit = self.shutit shutit.log('In session: ' + self.pexpect_session_id + ', trying to send: ' + str(sendspec.send), level=logging.DEBUG) if self._check_blocked(sendspec): shutit.log('In send for ' + str(sendspec.send) + ', check_blocked called and returned True.', level=logging.DEBUG) # _check_blocked will add to the list of background tasks and handle dupes, so leave there. return -1 shutit = self.shutit cfg = shutit.cfg # Set up what we expect. sendspec.expect = sendspec.expect or self.default_expect assert sendspec.send is not None, shutit_util.print_debug() if sendspec.send.strip() == '': sendspec.fail_on_empty_before=False sendspec.check_exit=False if isinstance(sendspec.expect, dict): return self.multisend(ShutItSendSpec(self, send=sendspec.send, send_dict=sendspec.expect, expect=shutit.get_default_shutit_pexpect_session_expect(), timeout=sendspec.timeout, check_exit=sendspec.check_exit, fail_on_empty_before=sendspec.fail_on_empty_before, record_command=sendspec.record_command, exit_values=sendspec.exit_values, echo=sendspec.echo, note=sendspec.note, secret=sendspec.secret, check_sudo=sendspec.check_sudo, nonewline=sendspec.nonewline, loglevel=sendspec.loglevel)) # Before gathering expect, detect whether this is a sudo command and act accordingly. command_list = sendspec.send.strip().split() # If there is a first command, there is a sudo in there (we ignore # whether it's quoted in the command), and we do not have sudo rights # cached... # TODO: check for sudo in pipelines, eg 'cmd | sudo' or 'cmd |sudo' but not 'echo " sudo "' if sendspec.check_sudo and command_list and command_list[0] == 'sudo' and not self.check_sudo(): sudo_pass = self.get_sudo_pass_if_needed(shutit) # Turn expect into a dict. return self.multisend(ShutItSendSpec(self, send=sendspec.send, send_dict={'assword':[sudo_pass, True]}, expect=shutit.get_default_shutit_pexpect_session_expect(), timeout=sendspec.timeout, check_exit=sendspec.check_exit, fail_on_empty_before=sendspec.fail_on_empty_before, record_command=sendspec.record_command, exit_values=sendspec.exit_values, echo=sendspec.echo, note=sendspec.note, check_sudo=False, nonewline=sendspec.nonewline, loglevel=sendspec.loglevel)) shutit.handle_note(sendspec.note, command=str(sendspec.send), training_input=str(sendspec.send)) if sendspec.timeout is None: sendspec.timeout = 3600 sendspec.echo = shutit.get_echo_override(sendspec.echo) # Handle OSX to get the GNU version of the command if sendspec.assume_gnu: sendspec.send = shutit.get_send_command(sendspec.send) # If check_exit is not passed in # - if the expect matches the default, use the default check exit # - otherwise, default to doing the check if sendspec.check_exit is None: # If we are in video mode, ignore exit value if (shutit.build['video'] != -1 or shutit.build['video'] is True) or shutit.build['training'] or shutit.build['walkthrough'] or shutit.build['exam']: sendspec.check_exit = False elif sendspec.expect == shutit.get_default_shutit_pexpect_session_expect(): sendspec.check_exit = shutit.get_default_shutit_pexpect_session_check_exit() else: # If expect given doesn't match the defaults and no argument # was passed in (ie check_exit was passed in as None), set # check_exit to true iff it matches a prompt. expect_matches_prompt = False for prompt in shutit.expect_prompts: if prompt == sendspec.expect: expect_matches_prompt = True if not expect_matches_prompt: sendspec.check_exit = False else: sendspec.check_exit = True # Determine whether we record this command. ok_to_record = False if not sendspec.echo and sendspec.record_command is None: sendspec.record_command = False if sendspec.record_command is None or sendspec.record_command: ok_to_record = True for i in cfg.keys(): if isinstance(cfg[i], dict): for j in cfg[i].keys(): if (j == 'password' or j == 'passphrase') and cfg[i][j] == sendspec.send: shutit.build['shutit_command_history'].append ('#redacted command, password') ok_to_record = False break if not ok_to_record or sendspec.send in shutit_global.shutit_global_object.secret_words_set: sendspec.secret = True break if ok_to_record: shutit.build['shutit_command_history'].append(sendspec.send) # Log - tho not if secret. if sendspec.send != None: shutit.log('================================================================================', level=logging.DEBUG) send_and_expect_summary_msg = '' if not sendspec.echo and not sendspec.secret: send_and_expect_summary_msg += 'Sending: ' + sendspec.send elif not sendspec.echo and sendspec.secret: send_and_expect_summary_msg += 'Sending: ' + sendspec.send if not sendspec.secret: send_and_expect_summary_msg += 'Sending>>>' + sendspec.send + '<<<' else: send_and_expect_summary_msg += 'Sending>>>[SECRET]<<<' send_and_expect_summary_msg += ', expecting>>>' + str(sendspec.expect) + '<<<' shutit.log(send_and_expect_summary_msg, level=logging.DEBUG) while sendspec.retry > 0: if sendspec.escape: escaped_str = "eval $'" _count = 7 for char in sendspec.send: if char in string.ascii_letters: escaped_str += char _count += 1 else: escaped_str += shutit_util.get_wide_hex(char) _count += 4 if _count > shutit_global.shutit_global_object.line_limit: # The newline here is deliberate! escaped_str += r"""'\ $'""" _count = 0 escaped_str += "'" if sendspec.secret: shutit.log('The string was sent safely.', level=logging.DEBUG) else: shutit.log('This string was sent safely: ' + sendspec.send, level=logging.DEBUG) string_to_send = escaped_str else: string_to_send = sendspec.send if string_to_send is not None: if len(string_to_send) > shutit_global.shutit_global_object.line_limit: fname = self._create_command_file(sendspec.expect,string_to_send) res = self.send(ShutItSendSpec(self, send=' command source ' + fname, expect=sendspec.expect, timeout=sendspec.timeout, check_exit=sendspec.check_exit, fail_on_empty_before=False, record_command=False, exit_values=sendspec.exit_values, echo=False, escape=False, retry=sendspec.retry, loglevel=sendspec.loglevel, follow_on_commands=sendspec.follow_on_commands, delaybeforesend=sendspec.delaybeforesend, nonewline=sendspec.nonewline, run_in_background=sendspec.run_in_background, ignore_background=True, block_other_commands=sendspec.block_other_commands)) if not self.sendline(ShutItSendSpec(self, send=' rm -f ' + fname, nonewline=sendspec.nonewline, run_in_background=sendspec.run_in_background, echo=False, ignore_background=True)): self.expect(sendspec.expect, searchwindowsize=sendspec.searchwindowsize, maxread=sendspec.maxread) # Before returning, determine whether we are now in a shell by comparing the expect we have with the originally-created 'shell_expect'. self.in_shell = sendspec.expect == self.shell_expect return res else: if sendspec.echo: shutit.divert_output(sys.stdout) if not self.sendline(sendspec): expect_res = shutit.expect_allow_interrupt(self.pexpect_child, sendspec.expect, sendspec.timeout) else: expect_res = -1 if sendspec.echo: shutit.divert_output(None) else: expect_res = shutit.expect_allow_interrupt(self.pexpect_child, sendspec.expect, sendspec.timeout) if isinstance(self.pexpect_child.after, type) or isinstance(self.pexpect_child.before, type): shutit.log('End of pexpect session detected, bailing.', level=logging.CRITICAL) shutit_global.shutit_global_object.handle_exit(exit_code=1) # Massage the output for summary sending. logged_output = ''.join((self.pexpect_child.before + str(self.pexpect_child.after)).split('\n')).replace(sendspec.send,'',1).replace('\r','')[:160] + ' [...]' if not sendspec.secret: if not sendspec.echo: shutit.log('Output (squashed): ' + logged_output, level=logging.DEBUG) if shutit_global.shutit_global_object.ispy3: shutit.log('pexpect: buffer: ' + str(base64.b64encode(bytes(self.pexpect_child.buffer,shutit_global.shutit_global_object.default_encoding))) + ' before: ' + str(base64.b64encode(bytes(self.pexpect_child.before,shutit_global.shutit_global_object.default_encoding))) + ' after: ' + str(base64.b64encode(bytes(self.pexpect_child.after,shutit_global.shutit_global_object.default_encoding))), level=logging.DEBUG) else: shutit.log('pexpect: buffer: ' + base64.b64encode(self.pexpect_child.buffer) + ' before: ' + base64.b64encode(self.pexpect_child.before) + ' after: ' + base64.b64encode(self.pexpect_child.after), level=logging.DEBUG) else: shutit.log('[Send was marked secret; getting output debug will require code change]', level=logging.DEBUG) if sendspec.fail_on_empty_before: if self.pexpect_child.before.strip() == '': shutit.fail('before empty after sending: ' + str(sendspec.send) + '\n\nThis is expected after some commands that take a password.\nIf so, add fail_on_empty_before=False to the send call.\n\nIf that is not the problem, did you send an empty string to a prompt by mistake?', shutit_pexpect_child=self.pexpect_child) # pragma: no cover else: # Don't check exit if fail_on_empty_before is False sendspec.check_exit = False for prompt in shutit.expect_prompts: if prompt == sendspec.expect: # Reset prompt self.setup_prompt('reset_tmp_prompt') self.revert_prompt('reset_tmp_prompt', sendspec.expect) break # Last output - remove the first line, as it is the previous command. # Get this before we check exit. last_output = '\n'.join(self.pexpect_child.before.split('\n')[1:]) if sendspec.check_exit: # store the output if not self.check_last_exit_values(sendspec.send, check_exit=sendspec.check_exit, expect=sendspec.expect, exit_values=sendspec.exit_values, retry=sendspec.retry): if not sendspec.secret: shutit.log('Sending: ' + sendspec.send + ' : failed, retrying', level=logging.DEBUG) else: shutit.log('Send failed, retrying', level=logging.DEBUG) sendspec.retry -= 1 assert sendspec.retry > 0, shutit_util.print_debug() continue break # check self.pexpect_child.before for matches for follow-on commands if sendspec.follow_on_commands is not None: for match in sendspec.follow_on_commands: sendspec.send = sendspec.follow_on_commands[match] if shutit.match_string(last_output, match): # send (with no follow-on commands) self.send(ShutItSendSpec(self, send=sendspec.send, expect=sendspec.expect, timeout=sendspec.timeout, check_exit=sendspec.check_exit, fail_on_empty_before=False, record_command=sendspec.record_command, exit_values=sendspec.exit_values, echo=sendspec.echo, escape=sendspec.escape, retry=sendspec.retry, loglevel=sendspec.loglevel, delaybeforesend=sendspec.delaybeforesend, run_in_background=False, ignore_background=True, block_other_commands=sendspec.block_other_commands)) if shutit.build['step_through']: self.pause_point('pause point: stepping through') if shutit.build['ctrlc_stop']: shutit.build['ctrlc_stop'] = False self.pause_point('pause point: interrupted by CTRL-c') shutit.handle_note_after(note=sendspec.note, training_input=str(sendspec.send)) # Before returning, determine whether we are now in a shell by comparing the expect we have with the originally-created 'shell_expect'. self.in_shell = sendspec.expect == self.shell_expect return expect_res
[ "def", "send", "(", "self", ",", "sendspec", ")", ":", "shutit", "=", "self", ".", "shutit", "shutit", ".", "log", "(", "'In session: '", "+", "self", ".", "pexpect_session_id", "+", "', trying to send: '", "+", "str", "(", "sendspec", ".", "send", ")", ...
Send string as a shell command, and wait until the expected output is seen (either a string or any from a list of strings) before returning. The expected string will default to the currently-set default expected string (see get_default_shutit_pexpect_session_expect) Returns the pexpect return value (ie which expected string in the list matched) @return: The pexpect return value (ie which expected string in the list matched). If return is -1, the task was backgrounded. See also multisend. @rtype: int
[ "Send", "string", "as", "a", "shell", "command", "and", "wait", "until", "the", "expected", "output", "is", "seen", "(", "either", "a", "string", "or", "any", "from", "a", "list", "of", "strings", ")", "before", "returning", ".", "The", "expected", "stri...
python
train
sony/nnabla
python/src/nnabla/utils/data_source.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/data_source.py#L181-L272
def _save_cache_to_file(self): ''' Store cache data into file. Data will be stored as hdf5 format, placed at config.. Cache file name format is "cache_START_END.h5" ''' if self._cache_dir is None: raise DataSourceWithFileCacheError( 'Use this class with "with statement" if you don\'t specify cache dir.') cache_data = OrderedDict() def get_data(args): pos = args[0] q = args[1] retry = 1 while True: if retry > 10: logger.log( 99, '_get_current_data() retry count over give up.') raise d = self._data_source._get_data(pos) if d is not None: break logger.log(99, '_get_data() fails. retrying count {}/10.'.format( retry)) retry += 1 q.put((pos, d)) q = Queue() with closing(ThreadPool(processes=self._num_of_threads)) as pool: pool.map(get_data, [(pos, q) for pos in self._cache_positions]) while len(cache_data) < len(self._cache_positions): index, data = q.get() cache_data[index] = data start_position = self.position - len(cache_data) + 1 end_position = self.position cache_filename = os.path.join( self._cache_dir, '{}_{:08d}_{:08d}{}'.format(self._cache_file_name_prefix, start_position, end_position, self._cache_file_format)) data = OrderedDict([(n, []) for n in self._data_source.variables]) for pos in sorted(cache_data): cd = cache_data[pos] for i, n in enumerate(self._data_source.variables): if isinstance(cd[i], numpy.ndarray): d = cd[i] else: d = numpy.array(cd[i]).astype(numpy.float32) data[n].append(d) logger.info('Creating cache file {}'.format(cache_filename)) try: if self._cache_file_format == ".h5": h5 = h5py.File(cache_filename, 'w') for k, v in data.items(): h5.create_dataset(k, data=v) h5.close() else: retry_count = 1 is_create_cache_imcomplete = True while is_create_cache_imcomplete: try: with open(cache_filename, 'wb') as f: for v in data.values(): numpy.save(f, v) is_create_cache_imcomplete = False except OSError: retry_count += 1 if retry_count > 10: raise logger.info( 'Creating cache retry {}/10'.format(retry_count)) except: logger.critical( 'An error occurred while creating cache file from dataset.') for k, v in data.items(): size = v[0].shape for d in v: if size != d.shape: logger.critical('The sizes of data "{}" are not the same. ({} != {})'.format( k, size, d.shape)) raise self._cache_file_names.append(cache_filename) self._cache_file_order.append(len(self._cache_file_order)) self._cache_file_data_orders.append(list(range(len(cache_data)))) self._cache_positions = []
[ "def", "_save_cache_to_file", "(", "self", ")", ":", "if", "self", ".", "_cache_dir", "is", "None", ":", "raise", "DataSourceWithFileCacheError", "(", "'Use this class with \"with statement\" if you don\\'t specify cache dir.'", ")", "cache_data", "=", "OrderedDict", "(", ...
Store cache data into file. Data will be stored as hdf5 format, placed at config.. Cache file name format is "cache_START_END.h5"
[ "Store", "cache", "data", "into", "file", "." ]
python
train
PSPC-SPAC-buyandsell/von_agent
von_agent/agent/issuer.py
https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/issuer.py#L139-L178
async def _sync_revoc(self, rr_id: str, rr_size: int = None) -> None: """ Create revoc registry if need be for input revocation registry identifier; open and cache tails file reader. :param rr_id: revocation registry identifier :param rr_size: if new revocation registry necessary, its size (default as per _create_rev_reg()) """ LOGGER.debug('Issuer._sync_revoc >>> rr_id: %s, rr_size: %s', rr_id, rr_size) (cd_id, tag) = rev_reg_id2cred_def_id__tag(rr_id) try: await self.get_cred_def(cd_id) except AbsentCredDef: LOGGER.debug( 'Issuer._sync_revoc: <!< tails tree %s may be for another ledger; no cred def found on %s', self._dir_tails, cd_id) raise AbsentCredDef('Tails tree {} may be for another ledger; no cred def found on {}'.format( self._dir_tails, cd_id)) with REVO_CACHE.lock: revo_cache_entry = REVO_CACHE.get(rr_id, None) tails = None if revo_cache_entry is None else revo_cache_entry.tails if tails is None: # it's a new revocation registry, or not yet set in cache try: tails = await Tails(self._dir_tails, cd_id, tag).open() except AbsentTails: await self._create_rev_reg(rr_id, rr_size) # it's a new revocation registry tails = await Tails(self._dir_tails, cd_id, tag).open() # symlink should exist now if revo_cache_entry is None: REVO_CACHE[rr_id] = RevoCacheEntry(None, tails) else: REVO_CACHE[rr_id].tails = tails LOGGER.debug('Issuer._sync_revoc <<<')
[ "async", "def", "_sync_revoc", "(", "self", ",", "rr_id", ":", "str", ",", "rr_size", ":", "int", "=", "None", ")", "->", "None", ":", "LOGGER", ".", "debug", "(", "'Issuer._sync_revoc >>> rr_id: %s, rr_size: %s'", ",", "rr_id", ",", "rr_size", ")", "(", "...
Create revoc registry if need be for input revocation registry identifier; open and cache tails file reader. :param rr_id: revocation registry identifier :param rr_size: if new revocation registry necessary, its size (default as per _create_rev_reg())
[ "Create", "revoc", "registry", "if", "need", "be", "for", "input", "revocation", "registry", "identifier", ";", "open", "and", "cache", "tails", "file", "reader", "." ]
python
train
BlueBrain/hpcbench
hpcbench/cli/bennett.py
https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/cli/bennett.py#L19-L25
def main(argv=None): """ben-nett entry point""" arguments = cli_common(__doc__, argv=argv) benet = BeNet(arguments['CAMPAIGN_FILE']) benet.run() if argv is not None: return benet
[ "def", "main", "(", "argv", "=", "None", ")", ":", "arguments", "=", "cli_common", "(", "__doc__", ",", "argv", "=", "argv", ")", "benet", "=", "BeNet", "(", "arguments", "[", "'CAMPAIGN_FILE'", "]", ")", "benet", ".", "run", "(", ")", "if", "argv", ...
ben-nett entry point
[ "ben", "-", "nett", "entry", "point" ]
python
train
bwohlberg/sporco
sporco/cnvrep.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/cnvrep.py#L655-L677
def normalise(v, dimN=2): r"""Normalise vectors, corresponding to slices along specified number of initial spatial dimensions of an array, to have unit :math:`\ell_2` norm. The remaining axes enumerate the distinct vectors to be normalised. Parameters ---------- v : array_like Array with components to be normalised dimN : int, optional (default 2) Number of initial dimensions over which norm should be computed Returns ------- vnrm : ndarray Normalised array """ axisN = tuple(range(0, dimN)) vn = np.sqrt(np.sum(v**2, axisN, keepdims=True)) vn[vn == 0] = 1.0 return np.asarray(v / vn, dtype=v.dtype)
[ "def", "normalise", "(", "v", ",", "dimN", "=", "2", ")", ":", "axisN", "=", "tuple", "(", "range", "(", "0", ",", "dimN", ")", ")", "vn", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "v", "**", "2", ",", "axisN", ",", "keepdims", "=...
r"""Normalise vectors, corresponding to slices along specified number of initial spatial dimensions of an array, to have unit :math:`\ell_2` norm. The remaining axes enumerate the distinct vectors to be normalised. Parameters ---------- v : array_like Array with components to be normalised dimN : int, optional (default 2) Number of initial dimensions over which norm should be computed Returns ------- vnrm : ndarray Normalised array
[ "r", "Normalise", "vectors", "corresponding", "to", "slices", "along", "specified", "number", "of", "initial", "spatial", "dimensions", "of", "an", "array", "to", "have", "unit", ":", "math", ":", "\\", "ell_2", "norm", ".", "The", "remaining", "axes", "enum...
python
train
Fuyukai/ConfigMaster
configmaster/ConfigFile.py
https://github.com/Fuyukai/ConfigMaster/blob/8018aa415da55c84edaa8a49664f674758a14edd/configmaster/ConfigFile.py#L81-L90
def apply_defaults(self, other_config): """ Applies default values from a different ConfigObject or ConfigKey object to this ConfigObject. If there are any values in this object that are also in the default object, it will use the values from this object. """ if isinstance(other_config, self.__class__): self.config.load_from_dict(other_config.config, overwrite=False) else: self.config.load_from_dict(other_config, overwrite=False)
[ "def", "apply_defaults", "(", "self", ",", "other_config", ")", ":", "if", "isinstance", "(", "other_config", ",", "self", ".", "__class__", ")", ":", "self", ".", "config", ".", "load_from_dict", "(", "other_config", ".", "config", ",", "overwrite", "=", ...
Applies default values from a different ConfigObject or ConfigKey object to this ConfigObject. If there are any values in this object that are also in the default object, it will use the values from this object.
[ "Applies", "default", "values", "from", "a", "different", "ConfigObject", "or", "ConfigKey", "object", "to", "this", "ConfigObject", "." ]
python
train
google/grr
grr/server/grr_response_server/aff4.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4.py#L600-L627
def Copy(self, old_urn, new_urn, age=NEWEST_TIME, limit=None, update_timestamps=False): """Make a copy of one AFF4 object to a different URN.""" new_urn = rdfvalue.RDFURN(new_urn) if update_timestamps and age != NEWEST_TIME: raise ValueError( "Can't update timestamps unless reading the latest version.") values = {} for predicate, value, ts in data_store.DB.ResolvePrefix( old_urn, AFF4_PREFIXES, timestamp=self.ParseAgeSpecification(age), limit=limit): if update_timestamps: values.setdefault(predicate, []).append((value, None)) else: values.setdefault(predicate, []).append((value, ts)) if values: with data_store.DB.GetMutationPool() as pool: pool.MultiSet(new_urn, values, replace=False) self._UpdateChildIndex(new_urn, pool)
[ "def", "Copy", "(", "self", ",", "old_urn", ",", "new_urn", ",", "age", "=", "NEWEST_TIME", ",", "limit", "=", "None", ",", "update_timestamps", "=", "False", ")", ":", "new_urn", "=", "rdfvalue", ".", "RDFURN", "(", "new_urn", ")", "if", "update_timesta...
Make a copy of one AFF4 object to a different URN.
[ "Make", "a", "copy", "of", "one", "AFF4", "object", "to", "a", "different", "URN", "." ]
python
train
tanghaibao/jcvi
jcvi/compara/synteny.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synteny.py#L379-L390
def batch_scan(points, xdist=20, ydist=20, N=5): """ runs synteny_scan() per chromosome pair """ chr_pair_points = group_hits(points) clusters = [] for chr_pair in sorted(chr_pair_points.keys()): points = chr_pair_points[chr_pair] clusters.extend(synteny_scan(points, xdist, ydist, N)) return clusters
[ "def", "batch_scan", "(", "points", ",", "xdist", "=", "20", ",", "ydist", "=", "20", ",", "N", "=", "5", ")", ":", "chr_pair_points", "=", "group_hits", "(", "points", ")", "clusters", "=", "[", "]", "for", "chr_pair", "in", "sorted", "(", "chr_pair...
runs synteny_scan() per chromosome pair
[ "runs", "synteny_scan", "()", "per", "chromosome", "pair" ]
python
train
LuminosoInsight/langcodes
langcodes/__init__.py
https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L662-L727
def describe(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> dict: """ Return a dictionary that describes a given language tag in a specified natural language. See `language_name` and related methods for more specific versions of this. The desired `language` will in fact be matched against the available options using the matching technique that this module provides. We can illustrate many aspects of this by asking for a description of Shavian script (a script devised by author George Bernard Shaw), and where you might find it, in various languages. >>> from pprint import pprint >>> shaw = Language.make(script='Shaw').maximize() >>> pprint(shaw.describe('en')) {'language': 'English', 'region': 'United Kingdom', 'script': 'Shavian'} >>> pprint(shaw.describe('fr')) {'language': 'anglais', 'region': 'Royaume-Uni', 'script': 'shavien'} >>> pprint(shaw.describe('es')) {'language': 'inglés', 'region': 'Reino Unido', 'script': 'shaviano'} >>> pprint(shaw.describe('pt')) {'language': 'inglês', 'region': 'Reino Unido', 'script': 'shaviano'} >>> pprint(shaw.describe('uk')) {'language': 'англійська', 'region': 'Велика Британія', 'script': 'шоу'} >>> pprint(shaw.describe('arb')) {'language': 'الإنجليزية', 'region': 'المملكة المتحدة', 'script': 'الشواني'} >>> pprint(shaw.describe('th')) {'language': 'อังกฤษ', 'region': 'สหราชอาณาจักร', 'script': 'ซอเวียน'} >>> pprint(shaw.describe('zh-Hans')) {'language': '英语', 'region': '英国', 'script': '萧伯纳式文'} >>> pprint(shaw.describe('zh-Hant')) {'language': '英文', 'region': '英國', 'script': '簫柏納字符'} >>> pprint(shaw.describe('ja')) {'language': '英語', 'region': 'イギリス', 'script': 'ショー文字'} When we don't have a localization for the language, we fall back on 'und', which just shows the language codes. >>> pprint(shaw.describe('lol')) {'language': 'en', 'region': 'GB', 'script': 'Shaw'} Wait, is that a real language? >>> pprint(Language.get('lol').maximize().describe()) {'language': 'Mongo', 'region': 'Congo - Kinshasa', 'script': 'Latin'} """ names = {} if self.language: names['language'] = self.language_name(language, min_score) if self.script: names['script'] = self.script_name(language, min_score) if self.region: names['region'] = self.region_name(language, min_score) if self.variants: names['variants'] = self.variant_names(language, min_score) return names
[ "def", "describe", "(", "self", ",", "language", "=", "DEFAULT_LANGUAGE", ",", "min_score", ":", "int", "=", "75", ")", "->", "dict", ":", "names", "=", "{", "}", "if", "self", ".", "language", ":", "names", "[", "'language'", "]", "=", "self", ".", ...
Return a dictionary that describes a given language tag in a specified natural language. See `language_name` and related methods for more specific versions of this. The desired `language` will in fact be matched against the available options using the matching technique that this module provides. We can illustrate many aspects of this by asking for a description of Shavian script (a script devised by author George Bernard Shaw), and where you might find it, in various languages. >>> from pprint import pprint >>> shaw = Language.make(script='Shaw').maximize() >>> pprint(shaw.describe('en')) {'language': 'English', 'region': 'United Kingdom', 'script': 'Shavian'} >>> pprint(shaw.describe('fr')) {'language': 'anglais', 'region': 'Royaume-Uni', 'script': 'shavien'} >>> pprint(shaw.describe('es')) {'language': 'inglés', 'region': 'Reino Unido', 'script': 'shaviano'} >>> pprint(shaw.describe('pt')) {'language': 'inglês', 'region': 'Reino Unido', 'script': 'shaviano'} >>> pprint(shaw.describe('uk')) {'language': 'англійська', 'region': 'Велика Британія', 'script': 'шоу'} >>> pprint(shaw.describe('arb')) {'language': 'الإنجليزية', 'region': 'المملكة المتحدة', 'script': 'الشواني'} >>> pprint(shaw.describe('th')) {'language': 'อังกฤษ', 'region': 'สหราชอาณาจักร', 'script': 'ซอเวียน'} >>> pprint(shaw.describe('zh-Hans')) {'language': '英语', 'region': '英国', 'script': '萧伯纳式文'} >>> pprint(shaw.describe('zh-Hant')) {'language': '英文', 'region': '英國', 'script': '簫柏納字符'} >>> pprint(shaw.describe('ja')) {'language': '英語', 'region': 'イギリス', 'script': 'ショー文字'} When we don't have a localization for the language, we fall back on 'und', which just shows the language codes. >>> pprint(shaw.describe('lol')) {'language': 'en', 'region': 'GB', 'script': 'Shaw'} Wait, is that a real language? >>> pprint(Language.get('lol').maximize().describe()) {'language': 'Mongo', 'region': 'Congo - Kinshasa', 'script': 'Latin'}
[ "Return", "a", "dictionary", "that", "describes", "a", "given", "language", "tag", "in", "a", "specified", "natural", "language", "." ]
python
train
ibis-project/ibis
ibis/impala/client.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/impala/client.py#L631-L664
def alter( self, location=None, format=None, tbl_properties=None, serde_properties=None, ): """ Change setting and parameters of the table. Parameters ---------- location : string, optional For partitioned tables, you may want the alter_partition function format : string, optional tbl_properties : dict, optional serde_properties : dict, optional Returns ------- None (for now) """ def _run_ddl(**kwds): stmt = ddl.AlterTable(self._qualified_name, **kwds) return self._execute(stmt) return self._alter_table_helper( _run_ddl, location=location, format=format, tbl_properties=tbl_properties, serde_properties=serde_properties, )
[ "def", "alter", "(", "self", ",", "location", "=", "None", ",", "format", "=", "None", ",", "tbl_properties", "=", "None", ",", "serde_properties", "=", "None", ",", ")", ":", "def", "_run_ddl", "(", "*", "*", "kwds", ")", ":", "stmt", "=", "ddl", ...
Change setting and parameters of the table. Parameters ---------- location : string, optional For partitioned tables, you may want the alter_partition function format : string, optional tbl_properties : dict, optional serde_properties : dict, optional Returns ------- None (for now)
[ "Change", "setting", "and", "parameters", "of", "the", "table", "." ]
python
train
pinax/pinax-cli
pinaxcli/cli.py
https://github.com/pinax/pinax-cli/blob/7dac21907a2ac22a0efd06054ddea56f562efbaf/pinaxcli/cli.py#L27-L30
def list_commands(self, ctx): """Override for showing commands in particular order""" commands = super(PinaxGroup, self).list_commands(ctx) return [cmd for cmd in order_manually(commands)]
[ "def", "list_commands", "(", "self", ",", "ctx", ")", ":", "commands", "=", "super", "(", "PinaxGroup", ",", "self", ")", ".", "list_commands", "(", "ctx", ")", "return", "[", "cmd", "for", "cmd", "in", "order_manually", "(", "commands", ")", "]" ]
Override for showing commands in particular order
[ "Override", "for", "showing", "commands", "in", "particular", "order" ]
python
train
CyberZHG/keras-transformer
keras_transformer/transformer.py
https://github.com/CyberZHG/keras-transformer/blob/4c42baa030539c62ef5ace92df0408b13f26d928/keras_transformer/transformer.py#L95-L142
def get_encoder_component(name, input_layer, head_num, hidden_dim, attention_activation=None, feed_forward_activation='relu', dropout_rate=0.0, trainable=True): """Multi-head self-attention and feed-forward layer. :param name: Prefix of names for internal layers. :param input_layer: Input layer. :param head_num: Number of heads in multi-head self-attention. :param hidden_dim: Hidden dimension of feed forward layer. :param attention_activation: Activation for multi-head self-attention. :param feed_forward_activation: Activation for feed-forward layer. :param dropout_rate: Dropout rate. :param trainable: Whether the layers are trainable. :return: Output layer. """ attention_name = '%s-MultiHeadSelfAttention' % name feed_forward_name = '%s-FeedForward' % name attention_layer = _wrap_layer( name=attention_name, input_layer=input_layer, build_func=attention_builder( name=attention_name, head_num=head_num, activation=attention_activation, history_only=False, trainable=trainable, ), dropout_rate=dropout_rate, trainable=trainable, ) feed_forward_layer = _wrap_layer( name=feed_forward_name, input_layer=attention_layer, build_func=feed_forward_builder( name=feed_forward_name, hidden_dim=hidden_dim, activation=feed_forward_activation, trainable=trainable, ), dropout_rate=dropout_rate, trainable=trainable, ) return feed_forward_layer
[ "def", "get_encoder_component", "(", "name", ",", "input_layer", ",", "head_num", ",", "hidden_dim", ",", "attention_activation", "=", "None", ",", "feed_forward_activation", "=", "'relu'", ",", "dropout_rate", "=", "0.0", ",", "trainable", "=", "True", ")", ":"...
Multi-head self-attention and feed-forward layer. :param name: Prefix of names for internal layers. :param input_layer: Input layer. :param head_num: Number of heads in multi-head self-attention. :param hidden_dim: Hidden dimension of feed forward layer. :param attention_activation: Activation for multi-head self-attention. :param feed_forward_activation: Activation for feed-forward layer. :param dropout_rate: Dropout rate. :param trainable: Whether the layers are trainable. :return: Output layer.
[ "Multi", "-", "head", "self", "-", "attention", "and", "feed", "-", "forward", "layer", "." ]
python
train
rwl/pylon
pylon/io/matpower.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/io/matpower.py#L779-L804
def write_bus_data(self, file): """ Writes bus data in MATPOWER format. """ # labels = ["bus_id", "type", "Pd", "Qd", "Gs", "Bs", "area", "Vm", "Va", # "baseKV", "Vmax", "Vmin"] bus_attrs = ["_i", "type", "p_demand", "q_demand", "g_shunt","b_shunt", "area", "v_magnitude", "v_angle", "v_base", "zone", "v_max", "v_min", "p_lmbda", "q_lmbda", "mu_vmin", "mu_vmax"] file.write("\n%%%% bus data\n") file.write("%%\tbus_i\ttype\tPd\tQd\tGs\tBs\tarea\tVm\tVa\tbaseKV" "\tzone\tVmax\tVmin\tlam_P\tlam_Q\tmu_Vmax\tmu_Vmin") file.write("\n%sbus = [\n" % self._prefix) for bus in self.case.buses: vals = [getattr(bus, a) for a in bus_attrs] d = {PQ: 1, PV: 2, REFERENCE: 3, ISOLATED: 4} vals[1] = d[vals[1]] assert len(vals) == 17 file.write("\t%d\t%d\t%g\t%g\t%g\t%g\t%d\t%.8g\t%.8g\t%g\t%d\t%g" "\t%g\t%.4f\t%.4f\t%.4f\t%.4f;\n" % tuple(vals[:])) file.write("];\n")
[ "def", "write_bus_data", "(", "self", ",", "file", ")", ":", "# labels = [\"bus_id\", \"type\", \"Pd\", \"Qd\", \"Gs\", \"Bs\", \"area\", \"Vm\", \"Va\",", "# \"baseKV\", \"Vmax\", \"Vmin\"]", "bus_attrs", "=", "[", "\"_i\"", ",", "\"type\"", ",", "\"p_demand\"", ...
Writes bus data in MATPOWER format.
[ "Writes", "bus", "data", "in", "MATPOWER", "format", "." ]
python
train
chaoss/grimoirelab-elk
grimoire_elk/enriched/enrich.py
https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/enrich.py#L658-L674
def get_enrollment(self, uuid, item_date): """ Get the enrollment for the uuid when the item was done """ # item_date must be offset-naive (utc) if item_date and item_date.tzinfo: item_date = (item_date - item_date.utcoffset()).replace(tzinfo=None) enrollments = self.get_enrollments(uuid) enroll = self.unaffiliated_group if enrollments: for enrollment in enrollments: if not item_date: enroll = enrollment.organization.name break elif item_date >= enrollment.start and item_date <= enrollment.end: enroll = enrollment.organization.name break return enroll
[ "def", "get_enrollment", "(", "self", ",", "uuid", ",", "item_date", ")", ":", "# item_date must be offset-naive (utc)", "if", "item_date", "and", "item_date", ".", "tzinfo", ":", "item_date", "=", "(", "item_date", "-", "item_date", ".", "utcoffset", "(", ")", ...
Get the enrollment for the uuid when the item was done
[ "Get", "the", "enrollment", "for", "the", "uuid", "when", "the", "item", "was", "done" ]
python
train
Rapptz/discord.py
discord/ext/tasks/__init__.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/ext/tasks/__init__.py#L101-L129
def start(self, *args, **kwargs): r"""Starts the internal task in the event loop. Parameters ------------ \*args The arguments to to use. \*\*kwargs The keyword arguments to use. Raises -------- RuntimeError A task has already been launched. Returns --------- :class:`asyncio.Task` The task that has been created. """ if self._task is not None: raise RuntimeError('Task is already launched.') if self._injected is not None: args = (self._injected, *args) self._task = self.loop.create_task(self._loop(*args, **kwargs)) return self._task
[ "def", "start", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_task", "is", "not", "None", ":", "raise", "RuntimeError", "(", "'Task is already launched.'", ")", "if", "self", ".", "_injected", "is", "not", "None...
r"""Starts the internal task in the event loop. Parameters ------------ \*args The arguments to to use. \*\*kwargs The keyword arguments to use. Raises -------- RuntimeError A task has already been launched. Returns --------- :class:`asyncio.Task` The task that has been created.
[ "r", "Starts", "the", "internal", "task", "in", "the", "event", "loop", "." ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/api.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L867-L873
def org_find_members(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /org-xxxx/findMembers API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Organizations#API-method%3A-%2Forg-xxxx%2FfindMembers """ return DXHTTPRequest('/%s/findMembers' % object_id, input_params, always_retry=always_retry, **kwargs)
[ "def", "org_find_members", "(", "object_id", ",", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/%s/findMembers'", "%", "object_id", ",", "input_params", ",", "always_retry...
Invokes the /org-xxxx/findMembers API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Organizations#API-method%3A-%2Forg-xxxx%2FfindMembers
[ "Invokes", "the", "/", "org", "-", "xxxx", "/", "findMembers", "API", "method", "." ]
python
train
zetaops/zengine
zengine/wf_daemon.py
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/wf_daemon.py#L76-L87
def clear_queue(self): """ clear outs all messages from INPUT_QUEUE_NAME """ def remove_message(ch, method, properties, body): print("Removed message: %s" % body) self.input_channel.basic_consume(remove_message, queue=self.INPUT_QUEUE_NAME, no_ack=True) try: self.input_channel.start_consuming() except (KeyboardInterrupt, SystemExit): log.info(" Exiting") self.exit()
[ "def", "clear_queue", "(", "self", ")", ":", "def", "remove_message", "(", "ch", ",", "method", ",", "properties", ",", "body", ")", ":", "print", "(", "\"Removed message: %s\"", "%", "body", ")", "self", ".", "input_channel", ".", "basic_consume", "(", "r...
clear outs all messages from INPUT_QUEUE_NAME
[ "clear", "outs", "all", "messages", "from", "INPUT_QUEUE_NAME" ]
python
train
myaooo/pysbrl
pysbrl/utils.py
https://github.com/myaooo/pysbrl/blob/74bba8c6913a7f82e32313108f8c3e025b89d9c7/pysbrl/utils.py#L29-L97
def categorical2pysbrl_data( x, y, data_filename, label_filename, method='eclat', supp=0.05, zmin=1, zmax=3): """ Run a frequent item mining algorithm to extract candidate rules. :param x: 2D np.ndarray, categorical data of shape [n_instances, n_features] :param y: 1D np.ndarray, label array of shape [n_instances, ] :param data_filename: the path to store data file :param label_filename: the path to store label file :param method: a str denoting the method to use, default to 'eclat' :param supp: the minimum support of a rule (item) :param zmin: :param zmax: :return: """ # Safely cast data types x = x.astype(np.int, casting='safe') y = y.astype(np.int, casting='safe') labels = np.unique(y) labels = np.arange(np.max(labels) + 1) # assert max(labels) + 1 == len(labels) mine = get_fim_method(method) x_by_labels = [] for label in labels: x_by_labels.append(x[y == label]) transactions_by_labels = [categorical2transactions(_x) for _x in x_by_labels] itemsets = transactions2freqitems(transactions_by_labels, mine, supp=supp, zmin=zmin, zmax=zmax) rules = [itemset2feature_categories(itemset) for itemset in itemsets] data_by_rule = [] for features, categories in rules: satisfied = rule_satisfied(x, features, categories) data_by_rule.append(satisfied) # Write data file # data_filename = get_path(_datasets_path, data_name+'.data') before_save(data_filename) with open(data_filename, 'w') as f: f.write('n_items: %d\n' % len(itemsets)) f.write('n_samples: %d\n' % len(y)) for itemset, data in zip(itemsets, data_by_rule): rule_str = '{' + ','.join(itemset) + '}' + ' ' f.write(rule_str) bit_s = ' '.join(['1' if bit else '0' for bit in data]) f.write(bit_s) f.write('\n') # Write label file # label_filename = get_path(_datasets_path, data_name+'.label') before_save(label_filename) with open(label_filename, 'w') as f: f.write('n_items: %d\n' % len(labels)) f.write('n_samples: %d\n' % len(y)) for label in labels: f.write('{label=%d} ' % label) bits = np.equal(y, label) bit_s = ' '.join(['1' if bit else '0' for bit in bits]) f.write(bit_s) f.write('\n') return rules
[ "def", "categorical2pysbrl_data", "(", "x", ",", "y", ",", "data_filename", ",", "label_filename", ",", "method", "=", "'eclat'", ",", "supp", "=", "0.05", ",", "zmin", "=", "1", ",", "zmax", "=", "3", ")", ":", "# Safely cast data types", "x", "=", "x",...
Run a frequent item mining algorithm to extract candidate rules. :param x: 2D np.ndarray, categorical data of shape [n_instances, n_features] :param y: 1D np.ndarray, label array of shape [n_instances, ] :param data_filename: the path to store data file :param label_filename: the path to store label file :param method: a str denoting the method to use, default to 'eclat' :param supp: the minimum support of a rule (item) :param zmin: :param zmax: :return:
[ "Run", "a", "frequent", "item", "mining", "algorithm", "to", "extract", "candidate", "rules", ".", ":", "param", "x", ":", "2D", "np", ".", "ndarray", "categorical", "data", "of", "shape", "[", "n_instances", "n_features", "]", ":", "param", "y", ":", "1...
python
train
theislab/scvelo
scvelo/tools/velocity.py
https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/tools/velocity.py#L186-L217
def velocity_genes(data, vkey='velocity', min_r2=0.01, highly_variable=None, copy=False): """Estimates velocities in a gene-specific manner Arguments --------- data: :class:`~anndata.AnnData` Annotated data matrix. vkey: `str` (default: `'velocity'`) Name under which to refer to the computed velocities for `velocity_graph` and `velocity_embedding`. min_r2: `float` (default: 0.01) Minimum threshold for coefficient of determination highly_variable: `bool` (default: `None`) Whether to include highly variable genes only. copy: `bool` (default: `False`) Return a copy instead of writing to `adata`. Returns ------- Updates `adata` attributes velocity_genes: `.var` genes to be used for further velocity analysis (velocity graph and embedding) """ adata = data.copy() if copy else data if vkey + '_genes' not in adata.var.keys(): velocity(data, vkey) adata.var[vkey + '_genes'] = np.array(adata.var[vkey + '_genes'], dtype=bool) & (adata.var[vkey + '_r2'] > min_r2) if highly_variable and 'highly_variable' in adata.var.keys(): adata.var[vkey + '_genes'] &= adata.var['highly_variable'] logg.info('Number of obtained velocity_genes:', np.sum(adata.var[vkey + '_genes'])) return adata if copy else None
[ "def", "velocity_genes", "(", "data", ",", "vkey", "=", "'velocity'", ",", "min_r2", "=", "0.01", ",", "highly_variable", "=", "None", ",", "copy", "=", "False", ")", ":", "adata", "=", "data", ".", "copy", "(", ")", "if", "copy", "else", "data", "if...
Estimates velocities in a gene-specific manner Arguments --------- data: :class:`~anndata.AnnData` Annotated data matrix. vkey: `str` (default: `'velocity'`) Name under which to refer to the computed velocities for `velocity_graph` and `velocity_embedding`. min_r2: `float` (default: 0.01) Minimum threshold for coefficient of determination highly_variable: `bool` (default: `None`) Whether to include highly variable genes only. copy: `bool` (default: `False`) Return a copy instead of writing to `adata`. Returns ------- Updates `adata` attributes velocity_genes: `.var` genes to be used for further velocity analysis (velocity graph and embedding)
[ "Estimates", "velocities", "in", "a", "gene", "-", "specific", "manner" ]
python
train
google/grr
grr/server/grr_response_server/aff4.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4.py#L834-L866
def MultiOpenOrdered(self, urns, **kwargs): """Opens many URNs and returns handles in the same order. `MultiOpen` can return file handles in arbitrary order. This makes it more efficient and in most cases the order does not matter. However, there are cases where order is important and this function should be used instead. Args: urns: A list of URNs to open. **kwargs: Same keyword arguments as in `MultiOpen`. Returns: A list of file-like objects corresponding to the specified URNs. Raises: IOError: If one of the specified URNs does not correspond to the AFF4 object. """ precondition.AssertIterableType(urns, rdfvalue.RDFURN) urn_filedescs = {} for filedesc in self.MultiOpen(urns, **kwargs): urn_filedescs[filedesc.urn] = filedesc filedescs = [] for urn in urns: try: filedescs.append(urn_filedescs[urn]) except KeyError: raise IOError("No associated AFF4 object for `%s`" % urn) return filedescs
[ "def", "MultiOpenOrdered", "(", "self", ",", "urns", ",", "*", "*", "kwargs", ")", ":", "precondition", ".", "AssertIterableType", "(", "urns", ",", "rdfvalue", ".", "RDFURN", ")", "urn_filedescs", "=", "{", "}", "for", "filedesc", "in", "self", ".", "Mu...
Opens many URNs and returns handles in the same order. `MultiOpen` can return file handles in arbitrary order. This makes it more efficient and in most cases the order does not matter. However, there are cases where order is important and this function should be used instead. Args: urns: A list of URNs to open. **kwargs: Same keyword arguments as in `MultiOpen`. Returns: A list of file-like objects corresponding to the specified URNs. Raises: IOError: If one of the specified URNs does not correspond to the AFF4 object.
[ "Opens", "many", "URNs", "and", "returns", "handles", "in", "the", "same", "order", "." ]
python
train
F5Networks/f5-common-python
f5/bigip/tm/ltm/pool.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/bigip/tm/ltm/pool.py#L86-L91
def modify(self, **patch): """Custom modify method to implement monitor parameter formatting.""" if 'monitor' in patch: value = self._format_monitor_parameter(patch['monitor']) patch['monitor'] = value return super(Pool, self)._modify(**patch)
[ "def", "modify", "(", "self", ",", "*", "*", "patch", ")", ":", "if", "'monitor'", "in", "patch", ":", "value", "=", "self", ".", "_format_monitor_parameter", "(", "patch", "[", "'monitor'", "]", ")", "patch", "[", "'monitor'", "]", "=", "value", "retu...
Custom modify method to implement monitor parameter formatting.
[ "Custom", "modify", "method", "to", "implement", "monitor", "parameter", "formatting", "." ]
python
train
quantmind/pulsar
pulsar/apps/wsgi/wrappers.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/wsgi/wrappers.py#L344-L351
def get_client_address(self, use_x_forwarded=True): """Obtain the client IP address """ xfor = self.environ.get('HTTP_X_FORWARDED_FOR') if use_x_forwarded and xfor: return xfor.split(',')[-1].strip() else: return self.environ['REMOTE_ADDR']
[ "def", "get_client_address", "(", "self", ",", "use_x_forwarded", "=", "True", ")", ":", "xfor", "=", "self", ".", "environ", ".", "get", "(", "'HTTP_X_FORWARDED_FOR'", ")", "if", "use_x_forwarded", "and", "xfor", ":", "return", "xfor", ".", "split", "(", ...
Obtain the client IP address
[ "Obtain", "the", "client", "IP", "address" ]
python
train
slickqa/python-client
slickqa/micromodels/packages/PySO8601/durations.py
https://github.com/slickqa/python-client/blob/1d36b4977cd4140d7d24917cab2b3f82b60739c2/slickqa/micromodels/packages/PySO8601/durations.py#L58-L83
def parse_duration(duration): """Attepmts to parse an ISO8601 formatted ``duration``. Returns a ``datetime.timedelta`` object. """ duration = str(duration).upper().strip() elements = ELEMENTS.copy() for pattern in (SIMPLE_DURATION, COMBINED_DURATION): if pattern.match(duration): found = pattern.match(duration).groupdict() del found['time'] elements.update(dict((k, int(v or 0)) for k, v in found.items())) return datetime.timedelta(days=(elements['days'] + _months_to_days(elements['months']) + _years_to_days(elements['years'])), hours=elements['hours'], minutes=elements['minutes'], seconds=elements['seconds']) return ParseError()
[ "def", "parse_duration", "(", "duration", ")", ":", "duration", "=", "str", "(", "duration", ")", ".", "upper", "(", ")", ".", "strip", "(", ")", "elements", "=", "ELEMENTS", ".", "copy", "(", ")", "for", "pattern", "in", "(", "SIMPLE_DURATION", ",", ...
Attepmts to parse an ISO8601 formatted ``duration``. Returns a ``datetime.timedelta`` object.
[ "Attepmts", "to", "parse", "an", "ISO8601", "formatted", "duration", "." ]
python
train
arkottke/pysra
pysra/site.py
https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/site.py#L129-L149
def _update(self): """Initialize the 1D interpolation.""" if self.strains.size and self.strains.size == self.values.size: x = np.log(self.strains) y = self.values if x.size < 4: self._interpolater = interp1d( x, y, 'linear', bounds_error=False, fill_value=(y[0], y[-1])) else: self._interpolater = interp1d( x, y, 'cubic', bounds_error=False, fill_value=(y[0], y[-1]))
[ "def", "_update", "(", "self", ")", ":", "if", "self", ".", "strains", ".", "size", "and", "self", ".", "strains", ".", "size", "==", "self", ".", "values", ".", "size", ":", "x", "=", "np", ".", "log", "(", "self", ".", "strains", ")", "y", "=...
Initialize the 1D interpolation.
[ "Initialize", "the", "1D", "interpolation", "." ]
python
train
chimera0/accel-brain-code
Automatic-Summarization/pysummarization/vectorizabletoken/tfidf_vectorizer.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Automatic-Summarization/pysummarization/vectorizabletoken/tfidf_vectorizer.py#L22-L33
def vectorize(self, token_list): ''' Tokenize token list. Args: token_list: The list of tokens.. Returns: [vector of token, vector of token, vector of token, ...] ''' vector_list = [self.__collection.tf_idf(token, self.__collection) for token in token_list] return vector_list
[ "def", "vectorize", "(", "self", ",", "token_list", ")", ":", "vector_list", "=", "[", "self", ".", "__collection", ".", "tf_idf", "(", "token", ",", "self", ".", "__collection", ")", "for", "token", "in", "token_list", "]", "return", "vector_list" ]
Tokenize token list. Args: token_list: The list of tokens.. Returns: [vector of token, vector of token, vector of token, ...]
[ "Tokenize", "token", "list", ".", "Args", ":", "token_list", ":", "The", "list", "of", "tokens", "..", "Returns", ":", "[", "vector", "of", "token", "vector", "of", "token", "vector", "of", "token", "...", "]" ]
python
train
facetoe/zenpy
zenpy/lib/cache.py
https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/cache.py#L147-L156
def get(self, object_type, cache_key): """ Query the cache for a Zenpy object """ if object_type not in self.mapping or self.disabled: return None cache = self.mapping[object_type] if cache_key in cache: log.debug("Cache HIT: [%s %s]" % (object_type.capitalize(), cache_key)) return cache[cache_key] else: log.debug('Cache MISS: [%s %s]' % (object_type.capitalize(), cache_key))
[ "def", "get", "(", "self", ",", "object_type", ",", "cache_key", ")", ":", "if", "object_type", "not", "in", "self", ".", "mapping", "or", "self", ".", "disabled", ":", "return", "None", "cache", "=", "self", ".", "mapping", "[", "object_type", "]", "i...
Query the cache for a Zenpy object
[ "Query", "the", "cache", "for", "a", "Zenpy", "object" ]
python
train
ipinfo/python
ipinfo/handler.py
https://github.com/ipinfo/python/blob/62fef9136069eab280806cc772dc578d3f1d8d63/ipinfo/handler.py#L86-L93
def _read_country_names(self, countries_file=None): """Read list of countries from specified country file or default file.""" if not countries_file: countries_file = os.path.join(os.path.dirname(__file__), self.COUNTRY_FILE_DEFAULT) with open(countries_file) as f: countries_json = f.read() return json.loads(countries_json)
[ "def", "_read_country_names", "(", "self", ",", "countries_file", "=", "None", ")", ":", "if", "not", "countries_file", ":", "countries_file", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "self",...
Read list of countries from specified country file or default file.
[ "Read", "list", "of", "countries", "from", "specified", "country", "file", "or", "default", "file", "." ]
python
train
authomatic/liveandletdie
liveandletdie/__init__.py
https://github.com/authomatic/liveandletdie/blob/bf3bcdbd679452ec7c248e9910d85c7fcdca586b/liveandletdie/__init__.py#L411-L454
def wrap(cls, app): """ Adds test live server capability to a Flask app module. :param app: A :class:`flask.Flask` app instance. """ host, port = cls.parse_args() ssl = cls._argument_parser.parse_args().ssl ssl_context = None if host: if ssl: try: import OpenSSL except ImportError: # OSX fix sys.path.append( '/System/Library/Frameworks/Python.framework/Versions/' '{0}.{1}/Extras/lib/python/' .format(sys.version_info.major, sys.version_info.minor) ) try: import OpenSSL except ImportError: # Linux fix sys.path.append( '/usr/lib/python{0}.{1}/dist-packages/' .format(sys.version_info.major, sys.version_info.minor) ) try: import OpenSSL except ImportError: raise LiveAndLetDieError( 'Flask app could not be launched because the pyopenssl ' 'library is not installed on your system!' ) ssl_context = 'adhoc' app.run(host=host, port=port, ssl_context=ssl_context) sys.exit()
[ "def", "wrap", "(", "cls", ",", "app", ")", ":", "host", ",", "port", "=", "cls", ".", "parse_args", "(", ")", "ssl", "=", "cls", ".", "_argument_parser", ".", "parse_args", "(", ")", ".", "ssl", "ssl_context", "=", "None", "if", "host", ":", "if",...
Adds test live server capability to a Flask app module. :param app: A :class:`flask.Flask` app instance.
[ "Adds", "test", "live", "server", "capability", "to", "a", "Flask", "app", "module", ".", ":", "param", "app", ":", "A", ":", "class", ":", "flask", ".", "Flask", "app", "instance", "." ]
python
train
awslabs/sockeye
sockeye/lexicon.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/lexicon.py#L59-L85
def read_lexicon(path: str, vocab_source: Dict[str, int], vocab_target: Dict[str, int]) -> np.ndarray: """ Loads lexical translation probabilities from a translation table of format: src, trg, logprob. Source words unknown to vocab_source are discarded. Target words unknown to vocab_target contribute to p(unk|source_word). See Incorporating Discrete Translation Lexicons into Neural Machine Translation, Section 3.1 & Equation 5 (https://arxiv.org/pdf/1606.02006.pdf)) :param path: Path to lexicon file. :param vocab_source: Source vocabulary. :param vocab_target: Target vocabulary. :return: Lexicon array. Shape: (vocab_source_size, vocab_target_size). """ src_unk_id = vocab_source[C.UNK_SYMBOL] trg_unk_id = vocab_target[C.UNK_SYMBOL] lexicon = np.zeros((len(vocab_source), len(vocab_target))) n = 0 for src_id, trg_id, prob in lexicon_iterator(path, vocab_source, vocab_target): if src_id == src_unk_id: continue if trg_id == trg_unk_id: lexicon[src_id, trg_unk_id] += prob else: lexicon[src_id, trg_id] = prob n += 1 logger.info("Loaded lexicon from '%s' with %d entries", path, n) return lexicon
[ "def", "read_lexicon", "(", "path", ":", "str", ",", "vocab_source", ":", "Dict", "[", "str", ",", "int", "]", ",", "vocab_target", ":", "Dict", "[", "str", ",", "int", "]", ")", "->", "np", ".", "ndarray", ":", "src_unk_id", "=", "vocab_source", "["...
Loads lexical translation probabilities from a translation table of format: src, trg, logprob. Source words unknown to vocab_source are discarded. Target words unknown to vocab_target contribute to p(unk|source_word). See Incorporating Discrete Translation Lexicons into Neural Machine Translation, Section 3.1 & Equation 5 (https://arxiv.org/pdf/1606.02006.pdf)) :param path: Path to lexicon file. :param vocab_source: Source vocabulary. :param vocab_target: Target vocabulary. :return: Lexicon array. Shape: (vocab_source_size, vocab_target_size).
[ "Loads", "lexical", "translation", "probabilities", "from", "a", "translation", "table", "of", "format", ":", "src", "trg", "logprob", ".", "Source", "words", "unknown", "to", "vocab_source", "are", "discarded", ".", "Target", "words", "unknown", "to", "vocab_ta...
python
train
polyaxon/polyaxon
polyaxon/event_manager/event_service.py
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/event_manager/event_service.py#L27-L46
def record(self, event_type: str, event_data: Mapping = None, instance: Any = None, **kwargs) -> 'Event': """ Validate and record an event. >>> record('event.action', object_instance) """ if not self.is_setup: return if not self.can_handle(event_type=event_type): return event = self.get_event(event_type=event_type, event_data=event_data, instance=instance, **kwargs) self.record_event(event) return event
[ "def", "record", "(", "self", ",", "event_type", ":", "str", ",", "event_data", ":", "Mapping", "=", "None", ",", "instance", ":", "Any", "=", "None", ",", "*", "*", "kwargs", ")", "->", "'Event'", ":", "if", "not", "self", ".", "is_setup", ":", "r...
Validate and record an event. >>> record('event.action', object_instance)
[ "Validate", "and", "record", "an", "event", "." ]
python
train
Bystroushaak/pyDHTMLParser
src/dhtmlparser/__init__.py
https://github.com/Bystroushaak/pyDHTMLParser/blob/4756f93dd048500b038ece2323fe26e46b6bfdea/src/dhtmlparser/__init__.py#L43-L150
def _raw_split(itxt): """ Parse HTML from text into array filled with tags end text. Source code is little bit unintutive, because it is state machine parser. For better understanding, look at http://bit.ly/1rXRcJj Example:: >>> dhtmlparser._raw_split('<html><tag params="true"></html>') ['<html>', '<tag params="true">', '</html>'] Args: itxt (str): Input HTML text, which will be parsed. Returns: list: List of strings (input splitted to tags and text). """ echr = "" buff = ["", "", "", ""] content = "" array = [] next_state = 0 inside_tag = False escaped = False COMMENT_START = ["-", "!", "<"] COMMENT_END = ["-", "-"] gc.disable() for c in itxt: # content if next_state == StateEnum.content: if c == "<": if content: array.append(content) content = c next_state = StateEnum.tag inside_tag = False else: content += c # html tag elif next_state == StateEnum.tag: if c == ">": array.append(content + c) content = "" next_state = StateEnum.content elif c == "'" or c == '"': echr = c content += c next_state = StateEnum.parameter elif c == "-" and buff[:3] == COMMENT_START: if content[:-3]: array.append(content[:-3]) content = content[-3:] + c next_state = StateEnum.comment else: if c == "<": # jump back into tag instead of content array.append(content) inside_tag = True content = "" content += c # quotes "" / '' elif next_state == StateEnum.parameter: if c == echr and not escaped: # end of quotes next_state = StateEnum.tag # unescaped end of line - this is good for invalid HTML like # <a href=something">..., because it allows recovery if c == "\n" and not escaped and buff[0] == ">": next_state = StateEnum.content inside_tag = False content += c escaped = not escaped if c == "\\" else False # html comments elif next_state == StateEnum.comment: if c == ">" and buff[:2] == COMMENT_END: next_state = StateEnum.tag if inside_tag else StateEnum.content inside_tag = False array.append(content + c) content = "" else: content += c # rotate buffer buff = _rotate_buff(buff) buff[0] = c gc.enable() if content: array.append(content) return array
[ "def", "_raw_split", "(", "itxt", ")", ":", "echr", "=", "\"\"", "buff", "=", "[", "\"\"", ",", "\"\"", ",", "\"\"", ",", "\"\"", "]", "content", "=", "\"\"", "array", "=", "[", "]", "next_state", "=", "0", "inside_tag", "=", "False", "escaped", "=...
Parse HTML from text into array filled with tags end text. Source code is little bit unintutive, because it is state machine parser. For better understanding, look at http://bit.ly/1rXRcJj Example:: >>> dhtmlparser._raw_split('<html><tag params="true"></html>') ['<html>', '<tag params="true">', '</html>'] Args: itxt (str): Input HTML text, which will be parsed. Returns: list: List of strings (input splitted to tags and text).
[ "Parse", "HTML", "from", "text", "into", "array", "filled", "with", "tags", "end", "text", "." ]
python
train
earlye/nephele
nephele/AwsProcessor.py
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsProcessor.py#L193-L219
def do_profile(self,args): """ Select nephele profile profile -h for more details """ parser = CommandArgumentParser("profile") parser.add_argument(dest="profile",help="Profile name") parser.add_argument('-v','--verbose',dest="verbose",action='store_true',help='verbose') args = vars(parser.parse_args(args)) profile = args['profile'] verbose = args['verbose'] if verbose: print "Selecting profile '{}'".format(profile) selectedProfile = {} if profile in Config.config['profiles']: selectedProfile = Config.config['profiles'][profile] selectedProfile['name'] = profile Config.config['selectedProfile'] = selectedProfile awsProfile = profile if 'awsProfile' in selectedProfile: awsProfile = selectedProfile['awsProfile'] AwsConnectionFactory.resetInstance(profile=awsProfile)
[ "def", "do_profile", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", "\"profile\"", ")", "parser", ".", "add_argument", "(", "dest", "=", "\"profile\"", ",", "help", "=", "\"Profile name\"", ")", "parser", ".", "add_argument", ...
Select nephele profile profile -h for more details
[ "Select", "nephele", "profile" ]
python
train
rigetti/grove
grove/tomography/process_tomography.py
https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/tomography/process_tomography.py#L263-L272
def plot(self): """ Visualize the process. :return: The generated figure. :rtype: matplotlib.Figure """ fig, (ax1) = plt.subplots(1, 1, figsize=(10, 8)) self.plot_pauli_transfer_matrix(ax1) return fig
[ "def", "plot", "(", "self", ")", ":", "fig", ",", "(", "ax1", ")", "=", "plt", ".", "subplots", "(", "1", ",", "1", ",", "figsize", "=", "(", "10", ",", "8", ")", ")", "self", ".", "plot_pauli_transfer_matrix", "(", "ax1", ")", "return", "fig" ]
Visualize the process. :return: The generated figure. :rtype: matplotlib.Figure
[ "Visualize", "the", "process", "." ]
python
train
sporsh/carnifex
carnifex/ssh/userauth.py
https://github.com/sporsh/carnifex/blob/82dd3bd2bc134dfb69a78f43171e227f2127060b/carnifex/ssh/userauth.py#L20-L28
def getGenericAnswers(self, name, instruction, prompts): """Called when the server requests keyboard interactive authentication """ responses = [] for prompt, _echo in prompts: password = self.getPassword(prompt) responses.append(password) return defer.succeed(responses)
[ "def", "getGenericAnswers", "(", "self", ",", "name", ",", "instruction", ",", "prompts", ")", ":", "responses", "=", "[", "]", "for", "prompt", ",", "_echo", "in", "prompts", ":", "password", "=", "self", ".", "getPassword", "(", "prompt", ")", "respons...
Called when the server requests keyboard interactive authentication
[ "Called", "when", "the", "server", "requests", "keyboard", "interactive", "authentication" ]
python
train
manns/pyspread
pyspread/src/lib/vlc.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L3906-L3917
def libvlc_event_attach(p_event_manager, i_event_type, f_callback, user_data): '''Register for an event notification. @param p_event_manager: the event manager to which you want to attach to. Generally it is obtained by vlc_my_object_event_manager() where my_object is the object you want to listen to. @param i_event_type: the desired event to which we want to listen. @param f_callback: the function to call when i_event_type occurs. @param user_data: user provided data to carry with the event. @return: 0 on success, ENOMEM on error. ''' f = _Cfunctions.get('libvlc_event_attach', None) or \ _Cfunction('libvlc_event_attach', ((1,), (1,), (1,), (1,),), None, ctypes.c_int, EventManager, ctypes.c_uint, Callback, ctypes.c_void_p) return f(p_event_manager, i_event_type, f_callback, user_data)
[ "def", "libvlc_event_attach", "(", "p_event_manager", ",", "i_event_type", ",", "f_callback", ",", "user_data", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_event_attach'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_event_attach'", ",", ...
Register for an event notification. @param p_event_manager: the event manager to which you want to attach to. Generally it is obtained by vlc_my_object_event_manager() where my_object is the object you want to listen to. @param i_event_type: the desired event to which we want to listen. @param f_callback: the function to call when i_event_type occurs. @param user_data: user provided data to carry with the event. @return: 0 on success, ENOMEM on error.
[ "Register", "for", "an", "event", "notification", "." ]
python
train
wtsi-hgi/python-baton-wrapper
baton/_baton/_baton_runner.py
https://github.com/wtsi-hgi/python-baton-wrapper/blob/ae0c9e3630e2c4729a0614cc86f493688436b0b7/baton/_baton/_baton_runner.py#L56-L78
def _raise_any_errors_given_in_baton_out(baton_out_as_json: List[Dict]): """ Raises any errors that baton has expressed in its output. :param baton_out_as_json: the output baton gave as parsed serialization """ if not isinstance(baton_out_as_json, list): baton_out_as_json = [baton_out_as_json] for baton_item_as_json in baton_out_as_json: if BATON_ERROR_PROPERTY in baton_item_as_json: error = baton_item_as_json[BATON_ERROR_PROPERTY] error_message = error[BATON_ERROR_MESSAGE_KEY] error_code = error[BATON_ERROR_CODE_KEY] # Working around baton issue: https://github.com/wtsi-npg/baton/issues/155 if error_code == IRODS_ERROR_USER_FILE_DOES_NOT_EXIST or \ (error_code == IRODS_ERROR_CAT_INVALID_ARGUMENT and "Failed to modify permissions" in error_message): raise FileNotFoundError(error_message) elif error_code == IRODS_ERROR_CATALOG_ALREADY_HAS_ITEM_BY_THAT_NAME \ or error_code == IRODS_ERROR_CAT_SUCCESS_BUT_WITH_NO_INFO: raise KeyError(error_message) else: raise RuntimeError(error_message)
[ "def", "_raise_any_errors_given_in_baton_out", "(", "baton_out_as_json", ":", "List", "[", "Dict", "]", ")", ":", "if", "not", "isinstance", "(", "baton_out_as_json", ",", "list", ")", ":", "baton_out_as_json", "=", "[", "baton_out_as_json", "]", "for", "baton_ite...
Raises any errors that baton has expressed in its output. :param baton_out_as_json: the output baton gave as parsed serialization
[ "Raises", "any", "errors", "that", "baton", "has", "expressed", "in", "its", "output", ".", ":", "param", "baton_out_as_json", ":", "the", "output", "baton", "gave", "as", "parsed", "serialization" ]
python
train
SheffieldML/GPy
GPy/models/gradient_checker.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/models/gradient_checker.py#L196-L289
def checkgrad_block(self, analytic_hess, numeric_hess, verbose=False, step=1e-6, tolerance=1e-3, block_indices=None, plot=False): """ Checkgrad a block matrix """ if analytic_hess.dtype is np.dtype('object'): #Make numeric hessian also into a block matrix real_size = get_block_shapes(analytic_hess) num_elements = np.sum(real_size) if (num_elements, num_elements) == numeric_hess.shape: #If the sizes are the same we assume they are the same #(we have not fixed any values so the numeric is the whole hessian) numeric_hess = get_blocks(numeric_hess, real_size) else: #Make a fake empty matrix and fill out the correct block tmp_numeric_hess = get_blocks(np.zeros((num_elements, num_elements)), real_size) tmp_numeric_hess[block_indices] = numeric_hess.copy() numeric_hess = tmp_numeric_hess if block_indices is not None: #Extract the right block analytic_hess = analytic_hess[block_indices] numeric_hess = numeric_hess[block_indices] else: #Unblock them if they are in blocks and you aren't checking a single block (checking whole hessian) if analytic_hess.dtype is np.dtype('object'): analytic_hess = unblock(analytic_hess) numeric_hess = unblock(numeric_hess) ratio = numeric_hess / (numpy.where(analytic_hess==0, 1e-10, analytic_hess)) difference = numpy.abs(analytic_hess - numeric_hess) check_passed = numpy.all((numpy.abs(1 - ratio)) < tolerance) or numpy.allclose(numeric_hess, analytic_hess, atol = tolerance) if verbose: if block_indices: print("\nBlock {}".format(block_indices)) else: print("\nAll blocks") header = ['Checked', 'Max-Ratio', 'Min-Ratio', 'Min-Difference', 'Max-Difference'] header_string = map(lambda x: ' | '.join(header), [header]) separator = '-' * len(header_string[0]) print('\n'.join([header_string[0], separator])) min_r = '%.6f' % float(numpy.min(ratio)) max_r = '%.6f' % float(numpy.max(ratio)) max_d = '%.6f' % float(numpy.max(difference)) min_d = '%.6f' % float(numpy.min(difference)) cols = [max_r, min_r, min_d, max_d] if check_passed: checked = "\033[92m True \033[0m" else: checked = "\033[91m False \033[0m" grad_string = "{} | {} | {} | {} | {} ".format(checked, cols[0], cols[1], cols[2], cols[3]) print(grad_string) if plot: from matplotlib import pyplot as pb fig, axes = pb.subplots(2, 2) max_lim = numpy.max(numpy.vstack((analytic_hess, numeric_hess))) min_lim = numpy.min(numpy.vstack((analytic_hess, numeric_hess))) msa = axes[0,0].matshow(analytic_hess, vmin=min_lim, vmax=max_lim) axes[0,0].set_title('Analytic hessian') axes[0,0].xaxis.set_ticklabels([None]) axes[0,0].yaxis.set_ticklabels([None]) axes[0,0].xaxis.set_ticks([None]) axes[0,0].yaxis.set_ticks([None]) msn = axes[0,1].matshow(numeric_hess, vmin=min_lim, vmax=max_lim) pb.colorbar(msn, ax=axes[0,1]) axes[0,1].set_title('Numeric hessian') axes[0,1].xaxis.set_ticklabels([None]) axes[0,1].yaxis.set_ticklabels([None]) axes[0,1].xaxis.set_ticks([None]) axes[0,1].yaxis.set_ticks([None]) msr = axes[1,0].matshow(ratio) pb.colorbar(msr, ax=axes[1,0]) axes[1,0].set_title('Ratio') axes[1,0].xaxis.set_ticklabels([None]) axes[1,0].yaxis.set_ticklabels([None]) axes[1,0].xaxis.set_ticks([None]) axes[1,0].yaxis.set_ticks([None]) msd = axes[1,1].matshow(difference) pb.colorbar(msd, ax=axes[1,1]) axes[1,1].set_title('difference') axes[1,1].xaxis.set_ticklabels([None]) axes[1,1].yaxis.set_ticklabels([None]) axes[1,1].xaxis.set_ticks([None]) axes[1,1].yaxis.set_ticks([None]) if block_indices: fig.suptitle("Block: {}".format(block_indices)) pb.show() return check_passed
[ "def", "checkgrad_block", "(", "self", ",", "analytic_hess", ",", "numeric_hess", ",", "verbose", "=", "False", ",", "step", "=", "1e-6", ",", "tolerance", "=", "1e-3", ",", "block_indices", "=", "None", ",", "plot", "=", "False", ")", ":", "if", "analyt...
Checkgrad a block matrix
[ "Checkgrad", "a", "block", "matrix" ]
python
train
benoitkugler/abstractDataLibrary
pyDLib/Core/sql.py
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/sql.py#L259-L272
def jsonise(dic): """Renvoie un dictionnaire dont les champs dont compatibles avec SQL Utilise Json. Attention à None : il faut laisser None et non pas null""" d = {} for k, v in dic.items(): if type(v) in abstractRequetesSQL.TYPES_PERMIS: d[k] = v else: try: d[k] = json.dumps(v, ensure_ascii=False, cls=formats.JsonEncoder) except ValueError as e: logging.exception("Erreur d'encodage JSON !") raise e return d
[ "def", "jsonise", "(", "dic", ")", ":", "d", "=", "{", "}", "for", "k", ",", "v", "in", "dic", ".", "items", "(", ")", ":", "if", "type", "(", "v", ")", "in", "abstractRequetesSQL", ".", "TYPES_PERMIS", ":", "d", "[", "k", "]", "=", "v", "els...
Renvoie un dictionnaire dont les champs dont compatibles avec SQL Utilise Json. Attention à None : il faut laisser None et non pas null
[ "Renvoie", "un", "dictionnaire", "dont", "les", "champs", "dont", "compatibles", "avec", "SQL", "Utilise", "Json", ".", "Attention", "à", "None", ":", "il", "faut", "laisser", "None", "et", "non", "pas", "null" ]
python
train
tanghaibao/goatools
goatools/grouper/sorter_gos.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/sorter_gos.py#L33-L38
def sortby(self, ntd): """Return function for sorting.""" if 'reldepth' in self.grprobj.gosubdag.prt_attr['flds']: return [ntd.NS, -1*ntd.dcnt, ntd.reldepth] else: return [ntd.NS, -1*ntd.dcnt, ntd.depth]
[ "def", "sortby", "(", "self", ",", "ntd", ")", ":", "if", "'reldepth'", "in", "self", ".", "grprobj", ".", "gosubdag", ".", "prt_attr", "[", "'flds'", "]", ":", "return", "[", "ntd", ".", "NS", ",", "-", "1", "*", "ntd", ".", "dcnt", ",", "ntd", ...
Return function for sorting.
[ "Return", "function", "for", "sorting", "." ]
python
train
wangwenpei/cliez
cliez/component.py
https://github.com/wangwenpei/cliez/blob/d6fe775544cd380735c56c8a4a79bc2ad22cb6c4/cliez/component.py#L257-L280
def load_description(name, root=''): """ .. warning:: Experiment feature. BE CAREFUL! WE MAY REMOVE THIS FEATURE! Load resource file as description, if resource file not exist,will return empty string. :param str path: name resource path :param str root: same as `load_resource` root :return: `str` """ desc = '' try: desc = Component.load_resource(name, root=root) except (IOError, ImportError): pass return desc
[ "def", "load_description", "(", "name", ",", "root", "=", "''", ")", ":", "desc", "=", "''", "try", ":", "desc", "=", "Component", ".", "load_resource", "(", "name", ",", "root", "=", "root", ")", "except", "(", "IOError", ",", "ImportError", ")", ":...
.. warning:: Experiment feature. BE CAREFUL! WE MAY REMOVE THIS FEATURE! Load resource file as description, if resource file not exist,will return empty string. :param str path: name resource path :param str root: same as `load_resource` root :return: `str`
[ "..", "warning", "::" ]
python
valid
gem/oq-engine
openquake/hazardlib/gsim/abrahamson_2015.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/abrahamson_2015.py#L134-L148
def _compute_magnitude_term(self, C, dc1, mag): """ Computes the magnitude scaling term given by equation (2) """ base = C['theta1'] + (self.CONSTS['theta4'] * dc1) dmag = self.CONSTS["C1"] + dc1 if mag > dmag: f_mag = (self.CONSTS['theta5'] * (mag - dmag)) +\ C['theta13'] * ((10. - mag) ** 2.) else: f_mag = (self.CONSTS['theta4'] * (mag - dmag)) +\ C['theta13'] * ((10. - mag) ** 2.) return base + f_mag
[ "def", "_compute_magnitude_term", "(", "self", ",", "C", ",", "dc1", ",", "mag", ")", ":", "base", "=", "C", "[", "'theta1'", "]", "+", "(", "self", ".", "CONSTS", "[", "'theta4'", "]", "*", "dc1", ")", "dmag", "=", "self", ".", "CONSTS", "[", "\...
Computes the magnitude scaling term given by equation (2)
[ "Computes", "the", "magnitude", "scaling", "term", "given", "by", "equation", "(", "2", ")" ]
python
train
PythonRails/rails
rails/views/__init__.py
https://github.com/PythonRails/rails/blob/1e199b9da4da5b24fef39fc6212d71fc9fbb18a5/rails/views/__init__.py#L18-L29
def _load_view(self, template_engine_name, template_dir): """ Load view by name and return an instance. """ file_name = template_engine_name.lower() class_name = "{}View".format(template_engine_name.title()) try: view_module = import_module("rails.views.{}".format(file_name)) except ImportError: raise Exception("Template engine '{}' not found in 'rails.views'".format(file_name)) view_class = getattr(view_module, class_name) return view_class(template_dir)
[ "def", "_load_view", "(", "self", ",", "template_engine_name", ",", "template_dir", ")", ":", "file_name", "=", "template_engine_name", ".", "lower", "(", ")", "class_name", "=", "\"{}View\"", ".", "format", "(", "template_engine_name", ".", "title", "(", ")", ...
Load view by name and return an instance.
[ "Load", "view", "by", "name", "and", "return", "an", "instance", "." ]
python
train
fhcrc/seqmagick
seqmagick/subcommands/quality_filter.py
https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/quality_filter.py#L575-L583
def filter_record(self, record): """ Filter record, dropping any that don't meet minimum length """ if len(record) >= self.min_length: return record else: raise FailedFilter(len(record))
[ "def", "filter_record", "(", "self", ",", "record", ")", ":", "if", "len", "(", "record", ")", ">=", "self", ".", "min_length", ":", "return", "record", "else", ":", "raise", "FailedFilter", "(", "len", "(", "record", ")", ")" ]
Filter record, dropping any that don't meet minimum length
[ "Filter", "record", "dropping", "any", "that", "don", "t", "meet", "minimum", "length" ]
python
train
pyviz/holoviews
holoviews/core/spaces.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/spaces.py#L1435-L1461
def relabel(self, label=None, group=None, depth=1): """Clone object and apply new group and/or label. Applies relabeling to children up to the supplied depth. Args: label (str, optional): New label to apply to returned object group (str, optional): New group to apply to returned object depth (int, optional): Depth to which relabel will be applied If applied to container allows applying relabeling to contained objects up to the specified depth Returns: Returns relabelled object """ relabelled = super(DynamicMap, self).relabel(label, group, depth) if depth > 0: from ..util import Dynamic def dynamic_relabel(obj, **dynkwargs): return obj.relabel(group=group, label=label, depth=depth-1) dmap = Dynamic(self, streams=self.streams, operation=dynamic_relabel) dmap.data = relabelled.data with util.disable_constant(dmap): dmap.group = relabelled.group dmap.label = relabelled.label return dmap return relabelled
[ "def", "relabel", "(", "self", ",", "label", "=", "None", ",", "group", "=", "None", ",", "depth", "=", "1", ")", ":", "relabelled", "=", "super", "(", "DynamicMap", ",", "self", ")", ".", "relabel", "(", "label", ",", "group", ",", "depth", ")", ...
Clone object and apply new group and/or label. Applies relabeling to children up to the supplied depth. Args: label (str, optional): New label to apply to returned object group (str, optional): New group to apply to returned object depth (int, optional): Depth to which relabel will be applied If applied to container allows applying relabeling to contained objects up to the specified depth Returns: Returns relabelled object
[ "Clone", "object", "and", "apply", "new", "group", "and", "/", "or", "label", "." ]
python
train
jterrace/pyssim
ssim/ssimlib.py
https://github.com/jterrace/pyssim/blob/ff9bd90c3eb7525013ad46babf66b7cc78391e89/ssim/ssimlib.py#L193-L246
def main(): """Main function for pyssim.""" description = '\n'.join([ 'Compares an image with a list of images using the SSIM metric.', ' Example:', ' pyssim test-images/test1-1.png "test-images/*"' ]) parser = argparse.ArgumentParser( prog='pyssim', formatter_class=argparse.RawTextHelpFormatter, description=description) parser.add_argument('--cw', help='compute the complex wavelet SSIM', action='store_true') parser.add_argument( 'base_image', metavar='image1.png', type=argparse.FileType('r')) parser.add_argument( 'comparison_images', metavar='image path with* or image2.png') parser.add_argument('--width', type=int, default=None, help='scales the image before computing SSIM') parser.add_argument('--height', type=int, default=None, help='scales the image before computing SSIM') args = parser.parse_args() if args.width and args.height: size = (args.width, args.height) else: size = None if not args.cw: gaussian_kernel_sigma = 1.5 gaussian_kernel_width = 11 gaussian_kernel_1d = get_gaussian_kernel( gaussian_kernel_width, gaussian_kernel_sigma) comparison_images = glob.glob(args.comparison_images) is_a_single_image = len(comparison_images) == 1 for comparison_image in comparison_images: if args.cw: ssim = SSIM(args.base_image.name, size=size) ssim_value = ssim.cw_ssim_value(comparison_image) else: ssim = SSIM(args.base_image.name, gaussian_kernel_1d, size=size) ssim_value = ssim.ssim_value(comparison_image) if is_a_single_image: sys.stdout.write('%.7g' % ssim_value) else: sys.stdout.write('%s - %s: %.7g' % ( args.base_image.name, comparison_image, ssim_value)) sys.stdout.write('\n')
[ "def", "main", "(", ")", ":", "description", "=", "'\\n'", ".", "join", "(", "[", "'Compares an image with a list of images using the SSIM metric.'", ",", "' Example:'", ",", "' pyssim test-images/test1-1.png \"test-images/*\"'", "]", ")", "parser", "=", "argparse", "...
Main function for pyssim.
[ "Main", "function", "for", "pyssim", "." ]
python
test
sibirrer/lenstronomy
lenstronomy/LensModel/Profiles/nfw.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Profiles/nfw.py#L286-L306
def g_(self, X): """ computes h() :param X: :return: """ if self._interpol: if not hasattr(self, '_g_interp'): if self._lookup: x = self._x_lookup g_x = self._g_lookup else: x = np.linspace(0, self._max_interp_X, self._num_interp_X) g_x = self._g(x) self._g_interp = interp.interp1d(x, g_x, kind='linear', axis=-1, copy=False, bounds_error=False, fill_value=0, assume_sorted=True) return self._g_interp(X) else: return self._g(X)
[ "def", "g_", "(", "self", ",", "X", ")", ":", "if", "self", ".", "_interpol", ":", "if", "not", "hasattr", "(", "self", ",", "'_g_interp'", ")", ":", "if", "self", ".", "_lookup", ":", "x", "=", "self", ".", "_x_lookup", "g_x", "=", "self", ".", ...
computes h() :param X: :return:
[ "computes", "h", "()" ]
python
train
xi/ldif3
ldif3.py
https://github.com/xi/ldif3/blob/debc4222bb48492de0d3edcc3c71fdae5bc612a4/ldif3.py#L159-L181
def _unparse_change_record(self, modlist): """ :type modlist: List[Tuple] :param modlist: List of additions (2-tuple) or modifications (3-tuple) """ mod_len = len(modlist[0]) self._unparse_changetype(mod_len) for mod in modlist: if len(mod) != mod_len: raise ValueError("Subsequent modlist item of wrong length") if mod_len == 2: mod_type, mod_vals = mod elif mod_len == 3: mod_op, mod_type, mod_vals = mod self._unparse_attr(MOD_OPS[mod_op], mod_type) for mod_val in mod_vals: self._unparse_attr(mod_type, mod_val) if mod_len == 3: self._output_file.write(b'-' + self._line_sep)
[ "def", "_unparse_change_record", "(", "self", ",", "modlist", ")", ":", "mod_len", "=", "len", "(", "modlist", "[", "0", "]", ")", "self", ".", "_unparse_changetype", "(", "mod_len", ")", "for", "mod", "in", "modlist", ":", "if", "len", "(", "mod", ")"...
:type modlist: List[Tuple] :param modlist: List of additions (2-tuple) or modifications (3-tuple)
[ ":", "type", "modlist", ":", "List", "[", "Tuple", "]", ":", "param", "modlist", ":", "List", "of", "additions", "(", "2", "-", "tuple", ")", "or", "modifications", "(", "3", "-", "tuple", ")" ]
python
train
elastic/apm-agent-python
elasticapm/metrics/base_metrics.py
https://github.com/elastic/apm-agent-python/blob/2975663d7bd22282dc39336b2c37b37c12c7a774/elasticapm/metrics/base_metrics.py#L128-L143
def gauge(self, name): """ Returns an existing or creates and returns a new gauge :param name: name of the gauge :return: the gauge object """ with self._lock: if name not in self._gauges: if self._registry._ignore_patterns and any( pattern.match(name) for pattern in self._registry._ignore_patterns ): gauge = noop_metric else: gauge = Gauge(name) self._gauges[name] = gauge return self._gauges[name]
[ "def", "gauge", "(", "self", ",", "name", ")", ":", "with", "self", ".", "_lock", ":", "if", "name", "not", "in", "self", ".", "_gauges", ":", "if", "self", ".", "_registry", ".", "_ignore_patterns", "and", "any", "(", "pattern", ".", "match", "(", ...
Returns an existing or creates and returns a new gauge :param name: name of the gauge :return: the gauge object
[ "Returns", "an", "existing", "or", "creates", "and", "returns", "a", "new", "gauge", ":", "param", "name", ":", "name", "of", "the", "gauge", ":", "return", ":", "the", "gauge", "object" ]
python
train
rameshg87/pyremotevbox
pyremotevbox/ZSI/wstools/c14n.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/c14n.py#L413-L433
def Canonicalize(node, output=None, **kw): '''Canonicalize(node, output=None, **kw) -> UTF-8 Canonicalize a DOM document/element node and all descendents. Return the text; if output is specified then output.write will be called to output the text and None will be returned Keyword parameters: nsdict: a dictionary of prefix:uri namespace entries assumed to exist in the surrounding context comments: keep comments if non-zero (default is 0) subset: Canonical XML subsetting resulting from XPath (default is []) unsuppressedPrefixes: do exclusive C14N, and this specifies the prefixes that should be inherited. ''' if output: apply(_implementation, (node, output.write), kw) else: s = StringIO.StringIO() apply(_implementation, (node, s.write), kw) return s.getvalue()
[ "def", "Canonicalize", "(", "node", ",", "output", "=", "None", ",", "*", "*", "kw", ")", ":", "if", "output", ":", "apply", "(", "_implementation", ",", "(", "node", ",", "output", ".", "write", ")", ",", "kw", ")", "else", ":", "s", "=", "Strin...
Canonicalize(node, output=None, **kw) -> UTF-8 Canonicalize a DOM document/element node and all descendents. Return the text; if output is specified then output.write will be called to output the text and None will be returned Keyword parameters: nsdict: a dictionary of prefix:uri namespace entries assumed to exist in the surrounding context comments: keep comments if non-zero (default is 0) subset: Canonical XML subsetting resulting from XPath (default is []) unsuppressedPrefixes: do exclusive C14N, and this specifies the prefixes that should be inherited.
[ "Canonicalize", "(", "node", "output", "=", "None", "**", "kw", ")", "-", ">", "UTF", "-", "8" ]
python
train
treethought/flask-assistant
flask_assistant/core.py
https://github.com/treethought/flask-assistant/blob/9331b9796644dfa987bcd97a13e78e9ab62923d3/flask_assistant/core.py#L275-L311
def action( self, intent_name, is_fallback=False, mapping={}, convert={}, default={}, with_context=[], events=[], *args, **kw ): """Decorates an intent_name's Action view function. The wrapped function is called when a request with the given intent_name is recieved along with all required parameters. """ def decorator(f): action_funcs = self._intent_action_funcs.get(intent_name, []) action_funcs.append(f) self._intent_action_funcs[intent_name] = action_funcs self._intent_mappings[intent_name] = mapping self._intent_converts[intent_name] = convert self._intent_defaults[intent_name] = default self._intent_fallbacks[intent_name] = is_fallback self._intent_events[intent_name] = events self._register_context_to_func(intent_name, with_context) @wraps(f) def wrapper(*args, **kw): self._flask_assitant_view_func(*args, **kw) return f return decorator
[ "def", "action", "(", "self", ",", "intent_name", ",", "is_fallback", "=", "False", ",", "mapping", "=", "{", "}", ",", "convert", "=", "{", "}", ",", "default", "=", "{", "}", ",", "with_context", "=", "[", "]", ",", "events", "=", "[", "]", ","...
Decorates an intent_name's Action view function. The wrapped function is called when a request with the given intent_name is recieved along with all required parameters.
[ "Decorates", "an", "intent_name", "s", "Action", "view", "function", "." ]
python
train
pytroll/satpy
satpy/writers/__init__.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/writers/__init__.py#L641-L684
def save_datasets(self, datasets, compute=True, **kwargs): """Save all datasets to one or more files. Subclasses can use this method to save all datasets to one single file or optimize the writing of individual datasets. By default this simply calls `save_dataset` for each dataset provided. Args: datasets (iterable): Iterable of `xarray.DataArray` objects to save using this writer. compute (bool): If `True` (default), compute all of the saves to disk. If `False` then the return value is either a `dask.delayed.Delayed` object or two lists to be passed to a `dask.array.store` call. See return values below for more details. **kwargs: Keyword arguments to pass to `save_dataset`. See that documentation for more details. Returns: Value returned depends on `compute` keyword argument. If `compute` is `True` the value is the result of a either a `dask.array.store` operation or a `dask.delayed.Delayed` compute, typically this is `None`. If `compute` is `False` then the result is either a `dask.delayed.Delayed` object that can be computed with `delayed.compute()` or a two element tuple of sources and targets to be passed to `dask.array.store`. If `targets` is provided then it is the caller's responsibility to close any objects that have a "close" method. """ results = [] for ds in datasets: results.append(self.save_dataset(ds, compute=False, **kwargs)) if compute: LOG.info("Computing and writing results...") return compute_writer_results([results]) targets, sources, delayeds = split_results([results]) if delayeds: # This writer had only delayed writes return delayeds else: return targets, sources
[ "def", "save_datasets", "(", "self", ",", "datasets", ",", "compute", "=", "True", ",", "*", "*", "kwargs", ")", ":", "results", "=", "[", "]", "for", "ds", "in", "datasets", ":", "results", ".", "append", "(", "self", ".", "save_dataset", "(", "ds",...
Save all datasets to one or more files. Subclasses can use this method to save all datasets to one single file or optimize the writing of individual datasets. By default this simply calls `save_dataset` for each dataset provided. Args: datasets (iterable): Iterable of `xarray.DataArray` objects to save using this writer. compute (bool): If `True` (default), compute all of the saves to disk. If `False` then the return value is either a `dask.delayed.Delayed` object or two lists to be passed to a `dask.array.store` call. See return values below for more details. **kwargs: Keyword arguments to pass to `save_dataset`. See that documentation for more details. Returns: Value returned depends on `compute` keyword argument. If `compute` is `True` the value is the result of a either a `dask.array.store` operation or a `dask.delayed.Delayed` compute, typically this is `None`. If `compute` is `False` then the result is either a `dask.delayed.Delayed` object that can be computed with `delayed.compute()` or a two element tuple of sources and targets to be passed to `dask.array.store`. If `targets` is provided then it is the caller's responsibility to close any objects that have a "close" method.
[ "Save", "all", "datasets", "to", "one", "or", "more", "files", "." ]
python
train
JukeboxPipeline/jukeboxmaya
src/jukeboxmaya/common.py
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/common.py#L105-L123
def disconnect_node(node, src=True, dst=True): """Disconnect all connections from node :param node: the node to disconnect :type node: str :returns: None :rtype: None :raises: None """ if dst: destconns = cmds.listConnections(node, connections=True, plugs=True, source=False) or [] for i in range(0, len(destconns), 2): source, dest = destconns[i], destconns[i+1] cmds.disconnectAttr(source, dest) if src: srcconns = cmds.listConnections(node, connections=True, plugs=True, destination=False) or [] for i in range(0, len(srcconns), 2): source, dest = srcconns[i+1], srcconns[i] cmds.disconnectAttr(source, dest)
[ "def", "disconnect_node", "(", "node", ",", "src", "=", "True", ",", "dst", "=", "True", ")", ":", "if", "dst", ":", "destconns", "=", "cmds", ".", "listConnections", "(", "node", ",", "connections", "=", "True", ",", "plugs", "=", "True", ",", "sour...
Disconnect all connections from node :param node: the node to disconnect :type node: str :returns: None :rtype: None :raises: None
[ "Disconnect", "all", "connections", "from", "node" ]
python
train
lambdamusic/Ontospy
ontospy/core/entities.py
https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/entities.py#L243-L253
def annotations(self, qname=True): """ wrapper that returns all triples for an onto. By default resources URIs are transformed into qnames """ if qname: return sorted([(uri2niceString(x, self.namespaces) ), (uri2niceString(y, self.namespaces)), z] for x, y, z in self.triples) else: return sorted(self.triples)
[ "def", "annotations", "(", "self", ",", "qname", "=", "True", ")", ":", "if", "qname", ":", "return", "sorted", "(", "[", "(", "uri2niceString", "(", "x", ",", "self", ".", "namespaces", ")", ")", ",", "(", "uri2niceString", "(", "y", ",", "self", ...
wrapper that returns all triples for an onto. By default resources URIs are transformed into qnames
[ "wrapper", "that", "returns", "all", "triples", "for", "an", "onto", ".", "By", "default", "resources", "URIs", "are", "transformed", "into", "qnames" ]
python
train
shapiromatron/bmds
bmds/reporter.py
https://github.com/shapiromatron/bmds/blob/395c6ce84ad82876fd9fa4a89a3497fb61616de0/bmds/reporter.py#L312-L319
def _get_session_for_table(self, base_session): """ Only present session for modeling when doses were dropped if it's succesful; otherwise show the original modeling session. """ if base_session.recommended_model is None and base_session.doses_dropped > 0: return base_session.doses_dropped_sessions[0] return base_session
[ "def", "_get_session_for_table", "(", "self", ",", "base_session", ")", ":", "if", "base_session", ".", "recommended_model", "is", "None", "and", "base_session", ".", "doses_dropped", ">", "0", ":", "return", "base_session", ".", "doses_dropped_sessions", "[", "0"...
Only present session for modeling when doses were dropped if it's succesful; otherwise show the original modeling session.
[ "Only", "present", "session", "for", "modeling", "when", "doses", "were", "dropped", "if", "it", "s", "succesful", ";", "otherwise", "show", "the", "original", "modeling", "session", "." ]
python
train
ethereum/py-evm
eth/chains/base.py
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L579-L599
def build_block_with_transactions( self, transactions: Tuple[BaseTransaction, ...], parent_header: BlockHeader=None ) -> Tuple[BaseBlock, Tuple[Receipt, ...], Tuple[BaseComputation, ...]]: """ Generate a block with the provided transactions. This does *not* import that block into your chain. If you want this new block in your chain, run :meth:`~import_block` with the result block from this method. :param transactions: an iterable of transactions to insert to the block :param parent_header: parent of the new block -- or canonical head if ``None`` :return: (new block, receipts, computations) """ base_header = self.ensure_header(parent_header) vm = self.get_vm(base_header) new_header, receipts, computations = vm.apply_all_transactions(transactions, base_header) new_block = vm.set_block_transactions(vm.block, new_header, transactions, receipts) return new_block, receipts, computations
[ "def", "build_block_with_transactions", "(", "self", ",", "transactions", ":", "Tuple", "[", "BaseTransaction", ",", "...", "]", ",", "parent_header", ":", "BlockHeader", "=", "None", ")", "->", "Tuple", "[", "BaseBlock", ",", "Tuple", "[", "Receipt", ",", "...
Generate a block with the provided transactions. This does *not* import that block into your chain. If you want this new block in your chain, run :meth:`~import_block` with the result block from this method. :param transactions: an iterable of transactions to insert to the block :param parent_header: parent of the new block -- or canonical head if ``None`` :return: (new block, receipts, computations)
[ "Generate", "a", "block", "with", "the", "provided", "transactions", ".", "This", "does", "*", "not", "*", "import", "that", "block", "into", "your", "chain", ".", "If", "you", "want", "this", "new", "block", "in", "your", "chain", "run", ":", "meth", ...
python
train
foremast/foremast
src/foremast/s3/s3apps.py
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/s3/s3apps.py#L177-L190
def _put_bucket_encryption(self): """Adds bucket encryption configuration.""" if self.s3props['encryption']['enabled']: encryption_config = {'Rules': [{}]} encryption_config = { 'Rules': self.s3props['encryption']['encryption_rules'] } LOG.debug(encryption_config) _response = self.s3client.put_bucket_encryption(Bucket=self.bucket, ServerSideEncryptionConfiguration=encryption_config) else: _response = self.s3client.delete_bucket_encryption(Bucket=self.bucket) LOG.debug('Response setting up S3 encryption: %s', _response) LOG.info('S3 encryption configuration updated')
[ "def", "_put_bucket_encryption", "(", "self", ")", ":", "if", "self", ".", "s3props", "[", "'encryption'", "]", "[", "'enabled'", "]", ":", "encryption_config", "=", "{", "'Rules'", ":", "[", "{", "}", "]", "}", "encryption_config", "=", "{", "'Rules'", ...
Adds bucket encryption configuration.
[ "Adds", "bucket", "encryption", "configuration", "." ]
python
train
streamlink/streamlink
src/streamlink/utils/url.py
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/utils/url.py#L66-L97
def update_qsd(url, qsd=None, remove=None): """ Update or remove keys from a query string in a URL :param url: URL to update :param qsd: dict of keys to update, a None value leaves it unchanged :param remove: list of keys to remove, or "*" to remove all note: updated keys are never removed, even if unchanged :return: updated URL """ qsd = qsd or {} remove = remove or [] # parse current query string parsed = urlparse(url) current_qsd = OrderedDict(parse_qsl(parsed.query)) # * removes all possible keys if remove == "*": remove = list(current_qsd.keys()) # remove keys before updating, but leave updated keys untouched for key in remove: if key not in qsd: del current_qsd[key] # and update the query string for key, value in qsd.items(): if value: current_qsd[key] = value return parsed._replace(query=urlencode(current_qsd)).geturl()
[ "def", "update_qsd", "(", "url", ",", "qsd", "=", "None", ",", "remove", "=", "None", ")", ":", "qsd", "=", "qsd", "or", "{", "}", "remove", "=", "remove", "or", "[", "]", "# parse current query string", "parsed", "=", "urlparse", "(", "url", ")", "c...
Update or remove keys from a query string in a URL :param url: URL to update :param qsd: dict of keys to update, a None value leaves it unchanged :param remove: list of keys to remove, or "*" to remove all note: updated keys are never removed, even if unchanged :return: updated URL
[ "Update", "or", "remove", "keys", "from", "a", "query", "string", "in", "a", "URL" ]
python
test
hubo1016/vlcp
vlcp/utils/walkerlib.py
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/walkerlib.py#L9-L19
def ensure_keys(walk, *keys): """ Use walk to try to retrieve all keys """ all_retrieved = True for k in keys: try: walk(k) except WalkKeyNotRetrieved: all_retrieved = False return all_retrieved
[ "def", "ensure_keys", "(", "walk", ",", "*", "keys", ")", ":", "all_retrieved", "=", "True", "for", "k", "in", "keys", ":", "try", ":", "walk", "(", "k", ")", "except", "WalkKeyNotRetrieved", ":", "all_retrieved", "=", "False", "return", "all_retrieved" ]
Use walk to try to retrieve all keys
[ "Use", "walk", "to", "try", "to", "retrieve", "all", "keys" ]
python
train