repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Yelp/detect-secrets
detect_secrets/core/secrets_collection.py
https://github.com/Yelp/detect-secrets/blob/473923ea71f1ac2b5ea1eacc49b98f97967e3d05/detect_secrets/core/secrets_collection.py#L213-L247
def get_secret(self, filename, secret, type_=None): """Checks to see whether a secret is found in the collection. :type filename: str :param filename: the file to search in. :type secret: str :param secret: secret hash of secret to search for. :type type_: str :param type_: type of secret, if known. :rtype: PotentialSecret|None """ if filename not in self.data: return None if type_: # Optimized lookup, because we know the type of secret # (and therefore, its hash) tmp_secret = PotentialSecret(type_, filename, secret='will be overriden') tmp_secret.secret_hash = secret if tmp_secret in self.data[filename]: return self.data[filename][tmp_secret] return None # NOTE: We can only optimize this, if we knew the type of secret. # Otherwise, we need to iterate through the set and find out. for obj in self.data[filename]: if obj.secret_hash == secret: return obj return None
[ "def", "get_secret", "(", "self", ",", "filename", ",", "secret", ",", "type_", "=", "None", ")", ":", "if", "filename", "not", "in", "self", ".", "data", ":", "return", "None", "if", "type_", ":", "# Optimized lookup, because we know the type of secret", "# (...
Checks to see whether a secret is found in the collection. :type filename: str :param filename: the file to search in. :type secret: str :param secret: secret hash of secret to search for. :type type_: str :param type_: type of secret, if known. :rtype: PotentialSecret|None
[ "Checks", "to", "see", "whether", "a", "secret", "is", "found", "in", "the", "collection", "." ]
python
train
QuantEcon/QuantEcon.py
quantecon/game_theory/repeated_game.py
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/game_theory/repeated_game.py#L178-L202
def _best_dev_gains(rpg): """ Calculate the normalized payoff gains from deviating from the current action to the best response for each player. Parameters ---------- rpg : RepeatedGame Two player repeated game. Returns ------- best_dev_gains : tuple(ndarray(float, ndim=2)) The normalized best deviation payoff gain arrays. best_dev_gains[i][ai, a-i] is normalized payoff gain player i can get if originally players are choosing ai and a-i, and player i deviates to the best response action. """ sg, delta = rpg.sg, rpg.delta best_dev_gains = ((1-delta)/delta * (np.max(sg.payoff_arrays[i], 0) - sg.payoff_arrays[i]) for i in range(2)) return tuple(best_dev_gains)
[ "def", "_best_dev_gains", "(", "rpg", ")", ":", "sg", ",", "delta", "=", "rpg", ".", "sg", ",", "rpg", ".", "delta", "best_dev_gains", "=", "(", "(", "1", "-", "delta", ")", "/", "delta", "*", "(", "np", ".", "max", "(", "sg", ".", "payoff_arrays...
Calculate the normalized payoff gains from deviating from the current action to the best response for each player. Parameters ---------- rpg : RepeatedGame Two player repeated game. Returns ------- best_dev_gains : tuple(ndarray(float, ndim=2)) The normalized best deviation payoff gain arrays. best_dev_gains[i][ai, a-i] is normalized payoff gain player i can get if originally players are choosing ai and a-i, and player i deviates to the best response action.
[ "Calculate", "the", "normalized", "payoff", "gains", "from", "deviating", "from", "the", "current", "action", "to", "the", "best", "response", "for", "each", "player", "." ]
python
train
timofurrer/w1thermsensor
w1thermsensor/cli.py
https://github.com/timofurrer/w1thermsensor/blob/8ac4fbb85e0c247dbb39e8b178cca0a975adc332/w1thermsensor/cli.py#L65-L86
def ls(types, as_json): # pylint: disable=invalid-name """List all available sensors""" sensors = W1ThermSensor.get_available_sensors(types) if as_json: data = [ {"id": i, "hwid": s.id, "type": s.type_name} for i, s in enumerate(sensors, 1) ] click.echo(json.dumps(data, indent=4, sort_keys=True)) else: click.echo( "Found {0} sensors:".format(click.style(str(len(sensors)), bold=True)) ) for i, sensor in enumerate(sensors, 1): click.echo( " {0}. HWID: {1} Type: {2}".format( click.style(str(i), bold=True), click.style(sensor.id, bold=True), click.style(sensor.type_name, bold=True), ) )
[ "def", "ls", "(", "types", ",", "as_json", ")", ":", "# pylint: disable=invalid-name", "sensors", "=", "W1ThermSensor", ".", "get_available_sensors", "(", "types", ")", "if", "as_json", ":", "data", "=", "[", "{", "\"id\"", ":", "i", ",", "\"hwid\"", ":", ...
List all available sensors
[ "List", "all", "available", "sensors" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/envs/env_problem_utils.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/envs/env_problem_utils.py#L30-L46
def play_env_problem_randomly(env_problem, num_steps): """Plays the env problem by randomly sampling actions for `num_steps`.""" # Reset all environments. env_problem.reset() # Play all environments, sampling random actions each time. for _ in range(num_steps): # Sample batch_size actions from the action space and stack them. actions = np.stack([env_problem.action_space.sample() for _ in range( env_problem.batch_size)]) # Execute actions, observations are stored in `env_problem`. _, _, dones, _ = env_problem.step(actions) # Get the indices where we are done and reset those. env_problem.reset(indices=done_indices(dones))
[ "def", "play_env_problem_randomly", "(", "env_problem", ",", "num_steps", ")", ":", "# Reset all environments.", "env_problem", ".", "reset", "(", ")", "# Play all environments, sampling random actions each time.", "for", "_", "in", "range", "(", "num_steps", ")", ":", ...
Plays the env problem by randomly sampling actions for `num_steps`.
[ "Plays", "the", "env", "problem", "by", "randomly", "sampling", "actions", "for", "num_steps", "." ]
python
train
alphatwirl/alphatwirl
alphatwirl/parallel/build.py
https://github.com/alphatwirl/alphatwirl/blob/5138eeba6cd8a334ba52d6c2c022b33c61e3ba38/alphatwirl/parallel/build.py#L14-L66
def build_parallel(parallel_mode, quiet=True, processes=4, user_modules=None, dispatcher_options=None): """initializes `Parallel` Parameters ---------- parallel_mode : str "multiprocessing" (default), "htcondor" or "subprocess" quiet : bool, optional if True, progress bars will not be shown in the "multiprocessing" mode. process : int, optional The number of processes when ``parallel_mode`` is "multiprocessing" user_modules : list, optional The names of modules to be sent to worker nodes when parallel_mode is "htcondor" dispatcher_options : dict, optional Options to dispatcher Returns ------- parallel an instance of the class `Parallel` """ if user_modules is None: user_modules = [ ] if dispatcher_options is None: dispatcher_options = dict() dispatchers = ('subprocess', 'htcondor') parallel_modes = ('multiprocessing', ) + dispatchers default_parallel_mode = 'multiprocessing' if not parallel_mode in parallel_modes: logger = logging.getLogger(__name__) logger.warning('unknown parallel_mode "{}", use default "{}"'.format( parallel_mode, default_parallel_mode )) parallel_mode = default_parallel_mode if parallel_mode == 'multiprocessing': if quiet: atpbar.disable() return _build_parallel_multiprocessing(processes=processes) return _build_parallel_dropbox( parallel_mode=parallel_mode, user_modules=user_modules, dispatcher_options=dispatcher_options )
[ "def", "build_parallel", "(", "parallel_mode", ",", "quiet", "=", "True", ",", "processes", "=", "4", ",", "user_modules", "=", "None", ",", "dispatcher_options", "=", "None", ")", ":", "if", "user_modules", "is", "None", ":", "user_modules", "=", "[", "]"...
initializes `Parallel` Parameters ---------- parallel_mode : str "multiprocessing" (default), "htcondor" or "subprocess" quiet : bool, optional if True, progress bars will not be shown in the "multiprocessing" mode. process : int, optional The number of processes when ``parallel_mode`` is "multiprocessing" user_modules : list, optional The names of modules to be sent to worker nodes when parallel_mode is "htcondor" dispatcher_options : dict, optional Options to dispatcher Returns ------- parallel an instance of the class `Parallel`
[ "initializes", "Parallel" ]
python
valid
EasyPost/pystalk
pystalk/client.py
https://github.com/EasyPost/pystalk/blob/96759ad1fda264b9897ee5346eef7926892a3a4c/pystalk/client.py#L536-L559
def using(self, tube): """Context-manager to insert jobs into a specific tube :param tube: Tube to insert to Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube .. seealso:: :func:`use()` Change the default tube :func:`put_job()` Put a job into whatever the current tube is :func:`put_job_into()` Put a job into a specific tube """ try: current_tube = self.current_tube self.use(tube) yield BeanstalkInsertingProxy(self, tube) finally: self.use(current_tube)
[ "def", "using", "(", "self", ",", "tube", ")", ":", "try", ":", "current_tube", "=", "self", ".", "current_tube", "self", ".", "use", "(", "tube", ")", "yield", "BeanstalkInsertingProxy", "(", "self", ",", "tube", ")", "finally", ":", "self", ".", "use...
Context-manager to insert jobs into a specific tube :param tube: Tube to insert to Yields out an instance of :class:`BeanstalkInsertingProxy` to insert items into that tube .. seealso:: :func:`use()` Change the default tube :func:`put_job()` Put a job into whatever the current tube is :func:`put_job_into()` Put a job into a specific tube
[ "Context", "-", "manager", "to", "insert", "jobs", "into", "a", "specific", "tube" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_speech.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_speech.py#L27-L42
def kill_speech_dispatcher(self): '''kill speech dispatcher processs''' if not 'HOME' in os.environ: return pidpath = os.path.join(os.environ['HOME'], '.speech-dispatcher', 'pid', 'speech-dispatcher.pid') if os.path.exists(pidpath): try: import signal pid = int(open(pidpath).read()) if pid > 1 and os.kill(pid, 0) is None: print("Killing speech dispatcher pid %u" % pid) os.kill(pid, signal.SIGINT) time.sleep(1) except Exception as e: pass
[ "def", "kill_speech_dispatcher", "(", "self", ")", ":", "if", "not", "'HOME'", "in", "os", ".", "environ", ":", "return", "pidpath", "=", "os", ".", "path", ".", "join", "(", "os", ".", "environ", "[", "'HOME'", "]", ",", "'.speech-dispatcher'", ",", "...
kill speech dispatcher processs
[ "kill", "speech", "dispatcher", "processs" ]
python
train
palantir/typedjsonrpc
contrib/multi-module-example/typedjsonrpc_example/valid.py
https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/contrib/multi-module-example/typedjsonrpc_example/valid.py#L22-L36
def histogram(data): """Returns a histogram of your data. :param data: The data to histogram :type data: list[object] :return: The histogram :rtype: dict[object, int] """ ret = {} for datum in data: if datum in ret: ret[datum] += 1 else: ret[datum] = 1 return ret
[ "def", "histogram", "(", "data", ")", ":", "ret", "=", "{", "}", "for", "datum", "in", "data", ":", "if", "datum", "in", "ret", ":", "ret", "[", "datum", "]", "+=", "1", "else", ":", "ret", "[", "datum", "]", "=", "1", "return", "ret" ]
Returns a histogram of your data. :param data: The data to histogram :type data: list[object] :return: The histogram :rtype: dict[object, int]
[ "Returns", "a", "histogram", "of", "your", "data", "." ]
python
train
marl/jams
jams/display.py
https://github.com/marl/jams/blob/b16778399b9528efbd71434842a079f7691a7a66/jams/display.py#L32-L61
def pprint_jobject(obj, **kwargs): '''Pretty-print a jobject. Parameters ---------- obj : jams.JObject kwargs additional parameters to `json.dumps` Returns ------- string A simplified display of `obj` contents. ''' obj_simple = {k: v for k, v in six.iteritems(obj.__json__) if v} string = json.dumps(obj_simple, **kwargs) # Suppress braces and quotes string = re.sub(r'[{}"]', '', string) # Kill trailing commas string = re.sub(r',\n', '\n', string) # Kill blank lines string = re.sub(r'^\s*$', '', string) return string
[ "def", "pprint_jobject", "(", "obj", ",", "*", "*", "kwargs", ")", ":", "obj_simple", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "obj", ".", "__json__", ")", "if", "v", "}", "string", "=", "json", ".", "d...
Pretty-print a jobject. Parameters ---------- obj : jams.JObject kwargs additional parameters to `json.dumps` Returns ------- string A simplified display of `obj` contents.
[ "Pretty", "-", "print", "a", "jobject", "." ]
python
valid
stanfordnlp/stanza
stanza/monitoring/trigger.py
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/monitoring/trigger.py#L124-L131
def slope(self): """ :return: the esitmated slope for points in the current window """ x = range(self.window_size) y = self.vals slope, bias = np.polyfit(x, y, 1) return slope
[ "def", "slope", "(", "self", ")", ":", "x", "=", "range", "(", "self", ".", "window_size", ")", "y", "=", "self", ".", "vals", "slope", ",", "bias", "=", "np", ".", "polyfit", "(", "x", ",", "y", ",", "1", ")", "return", "slope" ]
:return: the esitmated slope for points in the current window
[ ":", "return", ":", "the", "esitmated", "slope", "for", "points", "in", "the", "current", "window" ]
python
train
pandas-dev/pandas
pandas/io/pytables.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3407-L3429
def read_axes(self, where, **kwargs): """create and return the axes sniffed from the table: return boolean for success """ # validate the version self.validate_version(where) # infer the data kind if not self.infer_axes(): return False # create the selection self.selection = Selection(self, where=where, **kwargs) values = self.selection.select() # convert the data for a in self.axes: a.set_info(self.info) a.convert(values, nan_rep=self.nan_rep, encoding=self.encoding, errors=self.errors) return True
[ "def", "read_axes", "(", "self", ",", "where", ",", "*", "*", "kwargs", ")", ":", "# validate the version", "self", ".", "validate_version", "(", "where", ")", "# infer the data kind", "if", "not", "self", ".", "infer_axes", "(", ")", ":", "return", "False",...
create and return the axes sniffed from the table: return boolean for success
[ "create", "and", "return", "the", "axes", "sniffed", "from", "the", "table", ":", "return", "boolean", "for", "success" ]
python
train
osrg/ryu
ryu/services/protocols/bgp/speaker.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/speaker.py#L300-L357
def _data_received(self, next_bytes): """Maintains buffer of bytes received from peer and extracts bgp message from this buffer if enough data is received. Validates bgp message marker, length, type and data and constructs appropriate bgp message instance and calls handler. :Parameters: - `next_bytes`: next set of bytes received from peer. """ # Append buffer with received bytes. self._recv_buff += next_bytes while True: # If current buffer size is less then minimum bgp message size, we # return as we do not have a complete bgp message to work with. if len(self._recv_buff) < BGP_MIN_MSG_LEN: return # Parse message header into elements. auth, length, ptype = BgpProtocol.parse_msg_header( self._recv_buff[:BGP_MIN_MSG_LEN]) # Check if we have valid bgp message marker. # We should get default marker since we are not supporting any # authentication. if (auth != BgpProtocol.MESSAGE_MARKER): LOG.error('Invalid message marker received: %s', auth) raise bgp.NotSync() # Check if we have valid bgp message length. check = (length < BGP_MIN_MSG_LEN or length > BGP_MAX_MSG_LEN) # RFC says: The minimum length of the OPEN message is 29 # octets (including the message header). check2 = (ptype == BGP_MSG_OPEN and length < BGPOpen._MIN_LEN) # RFC says: A KEEPALIVE message consists of only the # message header and has a length of 19 octets. check3 = (ptype == BGP_MSG_KEEPALIVE and length != BGPKeepAlive._MIN_LEN) # RFC says: The minimum length of the UPDATE message is 23 # octets. check4 = (ptype == BGP_MSG_UPDATE and length < BGPUpdate._MIN_LEN) if any((check, check2, check3, check4)): raise bgp.BadLen(ptype, length) # If we have partial message we wait for rest of the message. if len(self._recv_buff) < length: return msg, _, rest = BGPMessage.parser(self._recv_buff) self._recv_buff = rest # If we have a valid bgp message we call message handler. self._handle_msg(msg)
[ "def", "_data_received", "(", "self", ",", "next_bytes", ")", ":", "# Append buffer with received bytes.", "self", ".", "_recv_buff", "+=", "next_bytes", "while", "True", ":", "# If current buffer size is less then minimum bgp message size, we", "# return as we do not have a comp...
Maintains buffer of bytes received from peer and extracts bgp message from this buffer if enough data is received. Validates bgp message marker, length, type and data and constructs appropriate bgp message instance and calls handler. :Parameters: - `next_bytes`: next set of bytes received from peer.
[ "Maintains", "buffer", "of", "bytes", "received", "from", "peer", "and", "extracts", "bgp", "message", "from", "this", "buffer", "if", "enough", "data", "is", "received", "." ]
python
train
qacafe/cdrouter.py
cdrouter/packages.py
https://github.com/qacafe/cdrouter.py/blob/aacf2c6ab0b987250f7b1892f4bba14bb2b7dbe5/cdrouter/packages.py#L241-L253
def create(self, resource): """Create a new package. :param resource: :class:`packages.Package <packages.Package>` object :return: :class:`packages.Package <packages.Package>` object :rtype: packages.Package """ schema = PackageSchema(exclude=('id', 'created', 'updated', 'test_count', 'agent_id', 'result_id')) json = self.service.encode(schema, resource) schema = PackageSchema() resp = self.service.create(self.base, json) return self.service.decode(schema, resp)
[ "def", "create", "(", "self", ",", "resource", ")", ":", "schema", "=", "PackageSchema", "(", "exclude", "=", "(", "'id'", ",", "'created'", ",", "'updated'", ",", "'test_count'", ",", "'agent_id'", ",", "'result_id'", ")", ")", "json", "=", "self", ".",...
Create a new package. :param resource: :class:`packages.Package <packages.Package>` object :return: :class:`packages.Package <packages.Package>` object :rtype: packages.Package
[ "Create", "a", "new", "package", "." ]
python
train
saltstack/salt
salt/modules/elasticsearch.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/elasticsearch.py#L754-L773
def index_template_delete(name, hosts=None, profile=None): ''' Delete an index template (type) along with its data name Index template name CLI example:: salt myminion elasticsearch.index_template_delete testindex_templ user ''' es = _get_instance(hosts, profile) try: result = es.indices.delete_template(name=name) return result.get('acknowledged', False) except elasticsearch.exceptions.NotFoundError: return True except elasticsearch.TransportError as e: raise CommandExecutionError("Cannot delete template {0}, server returned code {1} with message {2}".format(name, e.status_code, e.error))
[ "def", "index_template_delete", "(", "name", ",", "hosts", "=", "None", ",", "profile", "=", "None", ")", ":", "es", "=", "_get_instance", "(", "hosts", ",", "profile", ")", "try", ":", "result", "=", "es", ".", "indices", ".", "delete_template", "(", ...
Delete an index template (type) along with its data name Index template name CLI example:: salt myminion elasticsearch.index_template_delete testindex_templ user
[ "Delete", "an", "index", "template", "(", "type", ")", "along", "with", "its", "data" ]
python
train
edx/opaque-keys
opaque_keys/edx/locator.py
https://github.com/edx/opaque-keys/blob/9807168660c12e0551c8fdd58fd1bc6b0bcb0a54/opaque_keys/edx/locator.py#L946-L955
def _to_deprecated_string(self): """ Returns an old-style location, represented as: i4x://org/course/category/name[@revision] # Revision is optional """ # pylint: disable=missing-format-attribute url = u"{0.DEPRECATED_TAG}://{0.course_key.org}/{0.course_key.course}/{0.block_type}/{0.block_id}".format(self) if self.course_key.branch: url += u"@{rev}".format(rev=self.course_key.branch) return url
[ "def", "_to_deprecated_string", "(", "self", ")", ":", "# pylint: disable=missing-format-attribute", "url", "=", "u\"{0.DEPRECATED_TAG}://{0.course_key.org}/{0.course_key.course}/{0.block_type}/{0.block_id}\"", ".", "format", "(", "self", ")", "if", "self", ".", "course_key", "...
Returns an old-style location, represented as: i4x://org/course/category/name[@revision] # Revision is optional
[ "Returns", "an", "old", "-", "style", "location", "represented", "as", ":", "i4x", ":", "//", "org", "/", "course", "/", "category", "/", "name", "[" ]
python
train
jaumebonet/libconfig
libconfig/config.py
https://github.com/jaumebonet/libconfig/blob/9b34cefcbaf9a326e3f3cd517896c2933cf61a3b/libconfig/config.py#L72-L89
def unregister_option(self, key, subkey): """Removes an option from the manager. :param str key: First identifier of the option. :param str subkey: Second identifier of the option. raise: :NotRegisteredError: If ``key`` or ``subkey`` do not define any option. """ if not self.open: return key, subkey = _lower_keys(key, subkey) _entry_must_exist(self.gc, key, subkey) self.gc = self.gc[~((self.gc['k1'] == key) & (self.gc['k2'] == subkey))]
[ "def", "unregister_option", "(", "self", ",", "key", ",", "subkey", ")", ":", "if", "not", "self", ".", "open", ":", "return", "key", ",", "subkey", "=", "_lower_keys", "(", "key", ",", "subkey", ")", "_entry_must_exist", "(", "self", ".", "gc", ",", ...
Removes an option from the manager. :param str key: First identifier of the option. :param str subkey: Second identifier of the option. raise: :NotRegisteredError: If ``key`` or ``subkey`` do not define any option.
[ "Removes", "an", "option", "from", "the", "manager", "." ]
python
train
rosenbrockc/acorn
acorn/logging/decoration.py
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/decoration.py#L818-L849
def _safe_setattr(obj, name, value): """Safely sets the attribute of the specified object. This includes not setting attributes for final objects and setting __func__ for instancemethod typed objects. Args: obj: object to set an attribute for. name (str): new attribute name. value: new attribute value. Returns: bool: True if the set attribute was successful. """ okey = id(obj) if okey in _set_failures or okey in _final_objs: return False import inspect try: if inspect.ismethod(obj): setattr(obj.__func__, name, value) return True else: if isinstance(obj, dict): # pragma: no cover obj[name] = value else: setattr(obj, name, value) return True except (TypeError, AttributeError):# pragma: no cover _set_failures.append(okey) msg.warn("Failed {}:{} attribute set on {}.".format(name, value, obj)) return False
[ "def", "_safe_setattr", "(", "obj", ",", "name", ",", "value", ")", ":", "okey", "=", "id", "(", "obj", ")", "if", "okey", "in", "_set_failures", "or", "okey", "in", "_final_objs", ":", "return", "False", "import", "inspect", "try", ":", "if", "inspect...
Safely sets the attribute of the specified object. This includes not setting attributes for final objects and setting __func__ for instancemethod typed objects. Args: obj: object to set an attribute for. name (str): new attribute name. value: new attribute value. Returns: bool: True if the set attribute was successful.
[ "Safely", "sets", "the", "attribute", "of", "the", "specified", "object", ".", "This", "includes", "not", "setting", "attributes", "for", "final", "objects", "and", "setting", "__func__", "for", "instancemethod", "typed", "objects", "." ]
python
train
elifesciences/elife-tools
elifetools/parseJATS.py
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L2303-L2309
def body_block_paragraph_content(text): "for formatting of simple paragraphs of text only, and check if it is all whitespace" tag_content = OrderedDict() if text and text != '': tag_content["type"] = "paragraph" tag_content["text"] = clean_whitespace(text) return tag_content
[ "def", "body_block_paragraph_content", "(", "text", ")", ":", "tag_content", "=", "OrderedDict", "(", ")", "if", "text", "and", "text", "!=", "''", ":", "tag_content", "[", "\"type\"", "]", "=", "\"paragraph\"", "tag_content", "[", "\"text\"", "]", "=", "cle...
for formatting of simple paragraphs of text only, and check if it is all whitespace
[ "for", "formatting", "of", "simple", "paragraphs", "of", "text", "only", "and", "check", "if", "it", "is", "all", "whitespace" ]
python
train
tensorflow/lucid
lucid/misc/io/serialize_array.py
https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/serialize_array.py#L31-L77
def _normalize_array(array, domain=(0, 1)): """Given an arbitrary rank-3 NumPy array, produce one representing an image. This ensures the resulting array has a dtype of uint8 and a domain of 0-255. Args: array: NumPy array representing the image domain: expected range of values in array, defaults to (0, 1), if explicitly set to None will use the array's own range of values and normalize them. Returns: normalized PIL.Image """ # first copy the input so we're never mutating the user's data array = np.array(array) # squeeze helps both with batch=1 and B/W and PIL's mode inference array = np.squeeze(array) assert len(array.shape) <= 3 assert np.issubdtype(array.dtype, np.number) assert not np.isnan(array).any() low, high = np.min(array), np.max(array) if domain is None: message = "No domain specified, normalizing from measured (~%.2f, ~%.2f)" log.debug(message, low, high) domain = (low, high) # clip values if domain was specified and array contains values outside of it if low < domain[0] or high > domain[1]: message = "Clipping domain from (~{:.2f}, ~{:.2f}) to (~{:.2f}, ~{:.2f})." log.info(message.format(low, high, domain[0], domain[1])) array = array.clip(*domain) min_value, max_value = np.iinfo(np.uint8).min, np.iinfo(np.uint8).max # 0, 255 # convert signed to unsigned if needed if np.issubdtype(array.dtype, np.inexact): offset = domain[0] if offset != 0: array -= offset log.debug("Converting inexact array by subtracting -%.2f.", offset) scalar = max_value / (domain[1] - domain[0]) if scalar != 1: array *= scalar log.debug("Converting inexact array by scaling by %.2f.", scalar) return array.clip(min_value, max_value).astype(np.uint8)
[ "def", "_normalize_array", "(", "array", ",", "domain", "=", "(", "0", ",", "1", ")", ")", ":", "# first copy the input so we're never mutating the user's data", "array", "=", "np", ".", "array", "(", "array", ")", "# squeeze helps both with batch=1 and B/W and PIL's mo...
Given an arbitrary rank-3 NumPy array, produce one representing an image. This ensures the resulting array has a dtype of uint8 and a domain of 0-255. Args: array: NumPy array representing the image domain: expected range of values in array, defaults to (0, 1), if explicitly set to None will use the array's own range of values and normalize them. Returns: normalized PIL.Image
[ "Given", "an", "arbitrary", "rank", "-", "3", "NumPy", "array", "produce", "one", "representing", "an", "image", "." ]
python
train
combust/mleap
python/mleap/sklearn/preprocessing/data.py
https://github.com/combust/mleap/blob/dc6b79db03ec27a0ba08b289842551e73d517ab3/python/mleap/sklearn/preprocessing/data.py#L907-L926
def transform(self, y): """ Transform features per specified math function. :param y: :return: """ if self.transform_type == 'log': return np.log(y) elif self.transform_type == 'exp': return np.exp(y) elif self.transform_type == 'sqrt': return np.sqrt(y) elif self.transform_type == 'sin': return np.sin(y) elif self.transform_type == 'cos': return np.cos(y) elif self.transform_type == 'tan': return np.tan(y) elif self.transform_type == 'abs': return np.abs(y)
[ "def", "transform", "(", "self", ",", "y", ")", ":", "if", "self", ".", "transform_type", "==", "'log'", ":", "return", "np", ".", "log", "(", "y", ")", "elif", "self", ".", "transform_type", "==", "'exp'", ":", "return", "np", ".", "exp", "(", "y"...
Transform features per specified math function. :param y: :return:
[ "Transform", "features", "per", "specified", "math", "function", ".", ":", "param", "y", ":", ":", "return", ":" ]
python
train
mabuchilab/QNET
src/qnet/printing/base.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/printing/base.py#L99-L112
def _isinstance(expr, classname): """Check whether `expr` is an instance of the class with name `classname` This is like the builtin `isinstance`, but it take the `classname` a string, instead of the class directly. Useful for when we don't want to import the class for which we want to check (also, remember that printer choose rendering method based on the class name, so this is totally ok) """ for cls in type(expr).__mro__: if cls.__name__ == classname: return True return False
[ "def", "_isinstance", "(", "expr", ",", "classname", ")", ":", "for", "cls", "in", "type", "(", "expr", ")", ".", "__mro__", ":", "if", "cls", ".", "__name__", "==", "classname", ":", "return", "True", "return", "False" ]
Check whether `expr` is an instance of the class with name `classname` This is like the builtin `isinstance`, but it take the `classname` a string, instead of the class directly. Useful for when we don't want to import the class for which we want to check (also, remember that printer choose rendering method based on the class name, so this is totally ok)
[ "Check", "whether", "expr", "is", "an", "instance", "of", "the", "class", "with", "name", "classname" ]
python
train
DataONEorg/d1_python
lib_client/src/d1_client/solr_client.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_client/src/d1_client/solr_client.py#L185-L192
def get_ids(self, start=0, rows=1000, **query_dict): """Retrieve a list of identifiers for documents matching the query.""" resp_dict = self._get_query(start=start, rows=rows, **query_dict) return { 'matches': resp_dict['response']['numFound'], 'start': start, 'ids': [d['id'] for d in resp_dict['response']['docs']], }
[ "def", "get_ids", "(", "self", ",", "start", "=", "0", ",", "rows", "=", "1000", ",", "*", "*", "query_dict", ")", ":", "resp_dict", "=", "self", ".", "_get_query", "(", "start", "=", "start", ",", "rows", "=", "rows", ",", "*", "*", "query_dict", ...
Retrieve a list of identifiers for documents matching the query.
[ "Retrieve", "a", "list", "of", "identifiers", "for", "documents", "matching", "the", "query", "." ]
python
train
learningequality/ricecooker
ricecooker/utils/tokens.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/tokens.py#L45-L55
def prompt_token(domain): """ Prompt user to enter content curation server authentication token. Args: domain (str): domain to authenticate user Returns: token """ token = input("\nEnter content curation server token ('q' to quit): ").lower() if token == 'q': sys.exit() else: return token.strip()
[ "def", "prompt_token", "(", "domain", ")", ":", "token", "=", "input", "(", "\"\\nEnter content curation server token ('q' to quit): \"", ")", ".", "lower", "(", ")", "if", "token", "==", "'q'", ":", "sys", ".", "exit", "(", ")", "else", ":", "return", "toke...
Prompt user to enter content curation server authentication token. Args: domain (str): domain to authenticate user Returns: token
[ "Prompt", "user", "to", "enter", "content", "curation", "server", "authentication", "token", ".", "Args", ":", "domain", "(", "str", ")", ":", "domain", "to", "authenticate", "user", "Returns", ":", "token" ]
python
train
ponty/pyscreenshot
pyscreenshot/__init__.py
https://github.com/ponty/pyscreenshot/blob/51010195cbb5361dcd4b414ff132b87244c9e1cb/pyscreenshot/__init__.py#L70-L81
def grab_to_file(filename, childprocess=None, backend=None): """Copy the contents of the screen to a file. Internal function! Use PIL.Image.save() for saving image to file. :param filename: file for saving :param childprocess: see :py:func:`grab` :param backend: see :py:func:`grab` """ if childprocess is None: childprocess = childprocess_default_value() return _grab(to_file=True, childprocess=childprocess, backend=backend, filename=filename)
[ "def", "grab_to_file", "(", "filename", ",", "childprocess", "=", "None", ",", "backend", "=", "None", ")", ":", "if", "childprocess", "is", "None", ":", "childprocess", "=", "childprocess_default_value", "(", ")", "return", "_grab", "(", "to_file", "=", "Tr...
Copy the contents of the screen to a file. Internal function! Use PIL.Image.save() for saving image to file. :param filename: file for saving :param childprocess: see :py:func:`grab` :param backend: see :py:func:`grab`
[ "Copy", "the", "contents", "of", "the", "screen", "to", "a", "file", ".", "Internal", "function!", "Use", "PIL", ".", "Image", ".", "save", "()", "for", "saving", "image", "to", "file", "." ]
python
valid
pebble/libpebble2
libpebble2/protocol/base/__init__.py
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/protocol/base/__init__.py#L112-L141
def serialise(self, default_endianness=None): """ Serialise a message, without including any framing. :param default_endianness: The default endianness, unless overridden by the fields or class metadata. Should usually be left at ``None``. Otherwise, use ``'<'`` for little endian and ``'>'`` for big endian. :type default_endianness: str :return: The serialised message. :rtype: bytes """ # Figure out an endianness. endianness = (default_endianness or DEFAULT_ENDIANNESS) if hasattr(self, '_Meta'): endianness = self._Meta.get('endianness', endianness) inferred_fields = set() for k, v in iteritems(self._type_mapping): inferred_fields |= {x._name for x in v.dependent_fields()} for field in inferred_fields: setattr(self, field, None) # Some fields want to manipulate other fields that appear before them (e.g. Unions) for k, v in iteritems(self._type_mapping): v.prepare(self, getattr(self, k)) message = b'' for k, v in iteritems(self._type_mapping): message += v.value_to_bytes(self, getattr(self, k), default_endianness=endianness) return message
[ "def", "serialise", "(", "self", ",", "default_endianness", "=", "None", ")", ":", "# Figure out an endianness.", "endianness", "=", "(", "default_endianness", "or", "DEFAULT_ENDIANNESS", ")", "if", "hasattr", "(", "self", ",", "'_Meta'", ")", ":", "endianness", ...
Serialise a message, without including any framing. :param default_endianness: The default endianness, unless overridden by the fields or class metadata. Should usually be left at ``None``. Otherwise, use ``'<'`` for little endian and ``'>'`` for big endian. :type default_endianness: str :return: The serialised message. :rtype: bytes
[ "Serialise", "a", "message", "without", "including", "any", "framing", "." ]
python
train
peterldowns/djoauth2
djoauth2/authorization.py
https://github.com/peterldowns/djoauth2/blob/151c7619d1d7a91d720397cfecf3a29fcc9747a9/djoauth2/authorization.py#L359-L416
def make_authorization_endpoint(missing_redirect_uri, authorization_endpoint_uri, authorization_template_name): """ Returns a endpoint that handles OAuth authorization requests. The template described by ``authorization_template_name`` is rendered with a Django ``RequestContext`` with the following variables: * ``form``: a Django ``Form`` that may hold data internal to the ``djoauth2`` application. * ``client``: The :py:class:`djoauth2.models.Client` requesting access to the user's scopes. * ``scopes``: A list of :py:class:`djoauth2.models.Scope`, one for each of the scopes requested by the client. * ``form_action``: The URI to which the form should be submitted -- use this value in the ``action=""`` attribute on a ``<form>`` element. :param missing_redirect_uri: a string, the URI to which to redirect the user when the request is made by a client without a valid redirect URI. :param authorization_endpoint_uri: a string, the URI of this endpoint. Used by the authorization form so that the form is submitted to this same endpoint. :param authorization_template_name: a string, the name of the template to render when handling authorization requests. :rtype: A view function endpoint. """ @login_required @require_http_methods(['GET', 'POST']) def authorization_endpoint(request): auth_code_generator = AuthorizationCodeGenerator(missing_redirect_uri) try: auth_code_generator.validate(request) except AuthorizationError as authorization_error: return auth_code_generator.make_error_redirect(authorization_error) if request.method == 'GET': return render(request, authorization_template_name, { 'form': Form(), 'client': auth_code_generator.client, 'scopes': auth_code_generator.valid_scope_objects, 'form_action': update_parameters( authorization_endpoint_uri, auth_code_generator.get_request_uri_parameters(as_dict=True)), }) if request.method == 'POST': form = Form(request) if form.is_valid() and request.POST.get('user_action') == 'Accept': return auth_code_generator.make_success_redirect() else: return auth_code_generator.make_error_redirect() return authorization_endpoint
[ "def", "make_authorization_endpoint", "(", "missing_redirect_uri", ",", "authorization_endpoint_uri", ",", "authorization_template_name", ")", ":", "@", "login_required", "@", "require_http_methods", "(", "[", "'GET'", ",", "'POST'", "]", ")", "def", "authorization_endpoi...
Returns a endpoint that handles OAuth authorization requests. The template described by ``authorization_template_name`` is rendered with a Django ``RequestContext`` with the following variables: * ``form``: a Django ``Form`` that may hold data internal to the ``djoauth2`` application. * ``client``: The :py:class:`djoauth2.models.Client` requesting access to the user's scopes. * ``scopes``: A list of :py:class:`djoauth2.models.Scope`, one for each of the scopes requested by the client. * ``form_action``: The URI to which the form should be submitted -- use this value in the ``action=""`` attribute on a ``<form>`` element. :param missing_redirect_uri: a string, the URI to which to redirect the user when the request is made by a client without a valid redirect URI. :param authorization_endpoint_uri: a string, the URI of this endpoint. Used by the authorization form so that the form is submitted to this same endpoint. :param authorization_template_name: a string, the name of the template to render when handling authorization requests. :rtype: A view function endpoint.
[ "Returns", "a", "endpoint", "that", "handles", "OAuth", "authorization", "requests", "." ]
python
train
python-openxml/python-docx
docx/image/helpers.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/image/helpers.py#L43-L51
def read_long(self, base, offset=0): """ Return the int value of the four bytes at the file position defined by self._base_offset + *base* + *offset*. If *base* is None, the long is read from the current position in the stream. The endian setting of this instance is used to interpret the byte layout of the long. """ fmt = '<L' if self._byte_order is LITTLE_ENDIAN else '>L' return self._read_int(fmt, base, offset)
[ "def", "read_long", "(", "self", ",", "base", ",", "offset", "=", "0", ")", ":", "fmt", "=", "'<L'", "if", "self", ".", "_byte_order", "is", "LITTLE_ENDIAN", "else", "'>L'", "return", "self", ".", "_read_int", "(", "fmt", ",", "base", ",", "offset", ...
Return the int value of the four bytes at the file position defined by self._base_offset + *base* + *offset*. If *base* is None, the long is read from the current position in the stream. The endian setting of this instance is used to interpret the byte layout of the long.
[ "Return", "the", "int", "value", "of", "the", "four", "bytes", "at", "the", "file", "position", "defined", "by", "self", ".", "_base_offset", "+", "*", "base", "*", "+", "*", "offset", "*", ".", "If", "*", "base", "*", "is", "None", "the", "long", ...
python
train
saltstack/salt
salt/modules/file.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L112-L131
def _binary_replace(old, new): ''' This function does NOT do any diffing, it just checks the old and new files to see if either is binary, and provides an appropriate string noting the difference between the two files. If neither file is binary, an empty string is returned. This function should only be run AFTER it has been determined that the files differ. ''' old_isbin = not __utils__['files.is_text'](old) new_isbin = not __utils__['files.is_text'](new) if any((old_isbin, new_isbin)): if all((old_isbin, new_isbin)): return 'Replace binary file' elif old_isbin: return 'Replace binary file with text file' elif new_isbin: return 'Replace text file with binary file' return ''
[ "def", "_binary_replace", "(", "old", ",", "new", ")", ":", "old_isbin", "=", "not", "__utils__", "[", "'files.is_text'", "]", "(", "old", ")", "new_isbin", "=", "not", "__utils__", "[", "'files.is_text'", "]", "(", "new", ")", "if", "any", "(", "(", "...
This function does NOT do any diffing, it just checks the old and new files to see if either is binary, and provides an appropriate string noting the difference between the two files. If neither file is binary, an empty string is returned. This function should only be run AFTER it has been determined that the files differ.
[ "This", "function", "does", "NOT", "do", "any", "diffing", "it", "just", "checks", "the", "old", "and", "new", "files", "to", "see", "if", "either", "is", "binary", "and", "provides", "an", "appropriate", "string", "noting", "the", "difference", "between", ...
python
train
coinbase/coinbase-python
coinbase/wallet/client.py
https://github.com/coinbase/coinbase-python/blob/497c28158f529e8c7d0228521b4386a890baf088/coinbase/wallet/client.py#L217-L221
def get_buy_price(self, **params): """https://developers.coinbase.com/api/v2#get-buy-price""" currency_pair = params.get('currency_pair', 'BTC-USD') response = self._get('v2', 'prices', currency_pair, 'buy', params=params) return self._make_api_object(response, APIObject)
[ "def", "get_buy_price", "(", "self", ",", "*", "*", "params", ")", ":", "currency_pair", "=", "params", ".", "get", "(", "'currency_pair'", ",", "'BTC-USD'", ")", "response", "=", "self", ".", "_get", "(", "'v2'", ",", "'prices'", ",", "currency_pair", "...
https://developers.coinbase.com/api/v2#get-buy-price
[ "https", ":", "//", "developers", ".", "coinbase", ".", "com", "/", "api", "/", "v2#get", "-", "buy", "-", "price" ]
python
train
inasafe/inasafe
safe/definitions/earthquake.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/definitions/earthquake.py#L357-L369
def current_earthquake_model_name(): """Human friendly name for the currently active earthquake fatality model. :returns: Name of the current EQ fatality model as defined in users settings. """ default_earthquake_function = setting( 'earthquake_function', EARTHQUAKE_FUNCTIONS[0]['key'], str) current_function = None for model in EARTHQUAKE_FUNCTIONS: if model['key'] == default_earthquake_function: current_function = model['name'] return current_function
[ "def", "current_earthquake_model_name", "(", ")", ":", "default_earthquake_function", "=", "setting", "(", "'earthquake_function'", ",", "EARTHQUAKE_FUNCTIONS", "[", "0", "]", "[", "'key'", "]", ",", "str", ")", "current_function", "=", "None", "for", "model", "in...
Human friendly name for the currently active earthquake fatality model. :returns: Name of the current EQ fatality model as defined in users settings.
[ "Human", "friendly", "name", "for", "the", "currently", "active", "earthquake", "fatality", "model", "." ]
python
train
KelSolaar/Umbra
umbra/ui/completers.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/completers.py#L91-L102
def language(self, value): """ Setter for **self.__language** attribute. :param value: Attribute value. :type value: unicode """ if value is not None: assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format( "language", value) self.__language = value
[ "def", "language", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "unicode", ",", "\"'{0}' attribute: '{1}' type is not 'unicode'!\"", ".", "format", "(", "\"language\"", ",", "value", ...
Setter for **self.__language** attribute. :param value: Attribute value. :type value: unicode
[ "Setter", "for", "**", "self", ".", "__language", "**", "attribute", "." ]
python
train
pavlin-policar/openTSNE
openTSNE/initialization.py
https://github.com/pavlin-policar/openTSNE/blob/28513a0d669f2f20e7b971c0c6373dc375f72771/openTSNE/initialization.py#L64-L89
def weighted_mean(X, embedding, neighbors, distances): """Initialize points onto an existing embedding by placing them in the weighted mean position of their nearest neighbors on the reference embedding. Parameters ---------- X: np.ndarray embedding: TSNEEmbedding neighbors: np.ndarray distances: np.ndarray Returns ------- np.ndarray """ n_samples = X.shape[0] n_components = embedding.shape[1] partial_embedding = np.zeros((n_samples, n_components)) for i in range(n_samples): partial_embedding[i] = np.average( embedding[neighbors[i]], axis=0, weights=distances[i], ) return partial_embedding
[ "def", "weighted_mean", "(", "X", ",", "embedding", ",", "neighbors", ",", "distances", ")", ":", "n_samples", "=", "X", ".", "shape", "[", "0", "]", "n_components", "=", "embedding", ".", "shape", "[", "1", "]", "partial_embedding", "=", "np", ".", "z...
Initialize points onto an existing embedding by placing them in the weighted mean position of their nearest neighbors on the reference embedding. Parameters ---------- X: np.ndarray embedding: TSNEEmbedding neighbors: np.ndarray distances: np.ndarray Returns ------- np.ndarray
[ "Initialize", "points", "onto", "an", "existing", "embedding", "by", "placing", "them", "in", "the", "weighted", "mean", "position", "of", "their", "nearest", "neighbors", "on", "the", "reference", "embedding", "." ]
python
train
open511/open511
open511/converter/__init__.py
https://github.com/open511/open511/blob/3d573f59d7efa06ff1b5419ea5ff4d90a90b3cf8/open511/converter/__init__.py#L42-L59
def open511_convert(input_doc, output_format, serialize=True, **kwargs): """ Convert an Open511 document between formats. input_doc - either an lxml open511 Element or a deserialized JSON dict output_format - short string name of a valid output format, as listed above """ try: output_format_info = FORMATS[output_format] except KeyError: raise ValueError("Unrecognized output format %s" % output_format) input_doc = ensure_format(input_doc, output_format_info.input_format) result = output_format_info.func(input_doc, **kwargs) if serialize: result = output_format_info.serializer(result) return result
[ "def", "open511_convert", "(", "input_doc", ",", "output_format", ",", "serialize", "=", "True", ",", "*", "*", "kwargs", ")", ":", "try", ":", "output_format_info", "=", "FORMATS", "[", "output_format", "]", "except", "KeyError", ":", "raise", "ValueError", ...
Convert an Open511 document between formats. input_doc - either an lxml open511 Element or a deserialized JSON dict output_format - short string name of a valid output format, as listed above
[ "Convert", "an", "Open511", "document", "between", "formats", ".", "input_doc", "-", "either", "an", "lxml", "open511", "Element", "or", "a", "deserialized", "JSON", "dict", "output_format", "-", "short", "string", "name", "of", "a", "valid", "output", "format...
python
valid
soravux/scoop
scoop/_comm/scooptcp.py
https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/scoop/_comm/scooptcp.py#L294-L313
def sendFuture(self, future): """Send a Future to be executed remotely.""" try: if shared.getConst(hash(future.callable), timeout=0): # Enforce name reference passing if already shared future.callable = SharedElementEncapsulation(hash(future.callable)) self.socket.send_multipart([b"TASK", pickle.dumps(future, pickle.HIGHEST_PROTOCOL)]) except pickle.PicklingError as e: # If element not picklable, pickle its name # TODO: use its fully qualified name scoop.logger.warn("Pickling Error: {0}".format(e)) previousCallable = future.callable future.callable = hash(future.callable) self.socket.send_multipart([b"TASK", pickle.dumps(future, pickle.HIGHEST_PROTOCOL)]) future.callable = previousCallable
[ "def", "sendFuture", "(", "self", ",", "future", ")", ":", "try", ":", "if", "shared", ".", "getConst", "(", "hash", "(", "future", ".", "callable", ")", ",", "timeout", "=", "0", ")", ":", "# Enforce name reference passing if already shared\r", "future", "....
Send a Future to be executed remotely.
[ "Send", "a", "Future", "to", "be", "executed", "remotely", "." ]
python
train
eyeseast/python-tablefu
table_fu/__init__.py
https://github.com/eyeseast/python-tablefu/blob/d8761c1f87e3f89d9b89b0b6b9283fc4738b6676/table_fu/__init__.py#L326-L333
def get(self, column_name, default=None): """ Return the Datum for column_name, or default. """ if column_name in self.table.default_columns: index = self.table.default_columns.index(column_name) return Datum(self.cells[index], self.row_num, column_name, self.table) return default
[ "def", "get", "(", "self", ",", "column_name", ",", "default", "=", "None", ")", ":", "if", "column_name", "in", "self", ".", "table", ".", "default_columns", ":", "index", "=", "self", ".", "table", ".", "default_columns", ".", "index", "(", "column_nam...
Return the Datum for column_name, or default.
[ "Return", "the", "Datum", "for", "column_name", "or", "default", "." ]
python
train
cloudera/cm_api
python/src/cm_api/endpoints/services.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/services.py#L1150-L1174
def enable_oozie_ha(self, new_oozie_server_host_ids, new_oozie_server_role_names=None, zk_service_name=None, load_balancer_host_port=None): """ Enable high availability for Oozie. @param new_oozie_server_host_ids: List of IDs of the hosts on which new Oozie Servers will be added. @param new_oozie_server_role_names: List of names of the new Oozie Servers. This is an optional argument, but if provided, it should match the length of host IDs provided. @param zk_service_name: Name of the ZooKeeper service that will be used for Oozie HA. This is an optional parameter if the Oozie to ZooKeeper dependency is already set. @param load_balancer_host_port: Address and port of the load balancer used for Oozie HA. This is an optional parameter if this config is already set. @return: Reference to the submitted command. @since: API v6 """ args = dict( newOozieServerHostIds = new_oozie_server_host_ids, newOozieServerRoleNames = new_oozie_server_role_names, zkServiceName = zk_service_name, loadBalancerHostPort = load_balancer_host_port ) return self._cmd('oozieEnableHa', data=args, api_version=6)
[ "def", "enable_oozie_ha", "(", "self", ",", "new_oozie_server_host_ids", ",", "new_oozie_server_role_names", "=", "None", ",", "zk_service_name", "=", "None", ",", "load_balancer_host_port", "=", "None", ")", ":", "args", "=", "dict", "(", "newOozieServerHostIds", "...
Enable high availability for Oozie. @param new_oozie_server_host_ids: List of IDs of the hosts on which new Oozie Servers will be added. @param new_oozie_server_role_names: List of names of the new Oozie Servers. This is an optional argument, but if provided, it should match the length of host IDs provided. @param zk_service_name: Name of the ZooKeeper service that will be used for Oozie HA. This is an optional parameter if the Oozie to ZooKeeper dependency is already set. @param load_balancer_host_port: Address and port of the load balancer used for Oozie HA. This is an optional parameter if this config is already set. @return: Reference to the submitted command. @since: API v6
[ "Enable", "high", "availability", "for", "Oozie", "." ]
python
train
snipsco/snipsmanagercore
snipsmanagercore/thread_handler.py
https://github.com/snipsco/snipsmanagercore/blob/93eaaa665887f790a30ba86af5ffee394bfd8ede/snipsmanagercore/thread_handler.py#L19-L30
def run(self, target, args=()): """ Run a function in a separate thread. :param target: the function to run. :param args: the parameters to pass to the function. """ run_event = threading.Event() run_event.set() thread = threading.Thread(target=target, args=args + (run_event, )) self.thread_pool.append(thread) self.run_events.append(run_event) thread.start()
[ "def", "run", "(", "self", ",", "target", ",", "args", "=", "(", ")", ")", ":", "run_event", "=", "threading", ".", "Event", "(", ")", "run_event", ".", "set", "(", ")", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "target", ",", ...
Run a function in a separate thread. :param target: the function to run. :param args: the parameters to pass to the function.
[ "Run", "a", "function", "in", "a", "separate", "thread", "." ]
python
train
serge-sans-paille/pythran
pythran/transformations/expand_imports.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/transformations/expand_imports.py#L63-L73
def visit_Import(self, node): """ Register imported modules and usage symbols. """ for alias in node.names: alias_name = tuple(alias.name.split('.')) self.imports.add(alias_name[0]) if alias.asname: self.symbols[alias.asname] = alias_name else: self.symbols[alias_name[0]] = alias_name[:1] self.update = True return None
[ "def", "visit_Import", "(", "self", ",", "node", ")", ":", "for", "alias", "in", "node", ".", "names", ":", "alias_name", "=", "tuple", "(", "alias", ".", "name", ".", "split", "(", "'.'", ")", ")", "self", ".", "imports", ".", "add", "(", "alias_n...
Register imported modules and usage symbols.
[ "Register", "imported", "modules", "and", "usage", "symbols", "." ]
python
train
klmitch/tendril
tendril/framers.py
https://github.com/klmitch/tendril/blob/207102c83e88f8f1fa7ba605ef0aab2ae9078b36/tendril/framers.py#L603-L650
def streamify(self, state, frame): """Prepare frame for output as a COBS-encoded stream.""" # Get the encoding table enc_tab = self._tables[1][:] # Need the special un-trailed block length and code untrail_len, untrail_code = enc_tab.pop(0) # Set up a repository to receive the encoded blocks result = [] # Break the frame into blocks blocks = frame.split('\0') # Now, walk the block list; done carefully because we need # look-ahead in some cases skip = False for i in range(len(blocks)): # Skip handled blocks if skip: skip = False continue blk = blocks[i] # Encode un-trailed blocks while len(blk) >= untrail_len - 1: result.append(untrail_code + blk[:untrail_len - 1]) blk = blk[untrail_len - 1:] # Do we care about look-ahead? if (len(enc_tab) > 1 and i + 1 < len(blocks) and blocks[i + 1] == '' and len(blk) <= 30): # Use the second encoder table tab = enc_tab[1] # Skip the following empty block skip = True else: # Use the regular encoder table tab = enc_tab[0] # Encode the block result.append(tab[len(blk) + 1] + blk) # Stitch together the result blocks return ''.join(result) + '\0'
[ "def", "streamify", "(", "self", ",", "state", ",", "frame", ")", ":", "# Get the encoding table", "enc_tab", "=", "self", ".", "_tables", "[", "1", "]", "[", ":", "]", "# Need the special un-trailed block length and code", "untrail_len", ",", "untrail_code", "=",...
Prepare frame for output as a COBS-encoded stream.
[ "Prepare", "frame", "for", "output", "as", "a", "COBS", "-", "encoded", "stream", "." ]
python
train
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1412-L1431
def _store_parameters(self): """Store startup params and config in datadir/.mlaunch_startup.""" datapath = self.dir out_dict = { 'protocol_version': 2, 'mtools_version': __version__, 'parsed_args': self.args, 'unknown_args': self.unknown_args, 'startup_info': self.startup_info } if not os.path.exists(datapath): os.makedirs(datapath) try: json.dump(out_dict, open(os.path.join(datapath, '.mlaunch_startup'), 'w'), indent=-1) except Exception as ex: print("ERROR STORING Parameters:", ex)
[ "def", "_store_parameters", "(", "self", ")", ":", "datapath", "=", "self", ".", "dir", "out_dict", "=", "{", "'protocol_version'", ":", "2", ",", "'mtools_version'", ":", "__version__", ",", "'parsed_args'", ":", "self", ".", "args", ",", "'unknown_args'", ...
Store startup params and config in datadir/.mlaunch_startup.
[ "Store", "startup", "params", "and", "config", "in", "datadir", "/", ".", "mlaunch_startup", "." ]
python
train
kubernetes-client/python
kubernetes/client/apis/core_v1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/core_v1_api.py#L9008-L9033
def delete_namespace(self, name, **kwargs): """ delete a Namespace This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespace(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Namespace (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespace_with_http_info(name, **kwargs) else: (data) = self.delete_namespace_with_http_info(name, **kwargs) return data
[ "def", "delete_namespace", "(", "self", ",", "name", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "delete_namespace_with_http_...
delete a Namespace This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespace(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Namespace (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread.
[ "delete", "a", "Namespace", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "delete_n...
python
train
Azure/blobxfer
blobxfer/models/metadata.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/models/metadata.py#L92-L111
def get_md5_from_metadata(ase): # type: (blobxfer.models.azure.StorageEntity) -> str """Get MD5 from properties or metadata :param blobxfer.models.azure.StorageEntity ase: Azure Storage Entity :rtype: str or None :return: md5 """ # if encryption metadata is present, check for pre-encryption # md5 in blobxfer extensions md5 = None if ase.is_encrypted: try: md5 = ase.encryption_metadata.blobxfer_extensions.\ pre_encrypted_content_md5 except AttributeError: # this can happen if partial metadata is present md5 = None if blobxfer.util.is_none_or_empty(md5): md5 = ase.md5 return md5
[ "def", "get_md5_from_metadata", "(", "ase", ")", ":", "# type: (blobxfer.models.azure.StorageEntity) -> str", "# if encryption metadata is present, check for pre-encryption", "# md5 in blobxfer extensions", "md5", "=", "None", "if", "ase", ".", "is_encrypted", ":", "try", ":", ...
Get MD5 from properties or metadata :param blobxfer.models.azure.StorageEntity ase: Azure Storage Entity :rtype: str or None :return: md5
[ "Get", "MD5", "from", "properties", "or", "metadata", ":", "param", "blobxfer", ".", "models", ".", "azure", ".", "StorageEntity", "ase", ":", "Azure", "Storage", "Entity", ":", "rtype", ":", "str", "or", "None", ":", "return", ":", "md5" ]
python
train
Jarn/jarn.viewdoc
jarn/viewdoc/viewdoc.py
https://github.com/Jarn/jarn.viewdoc/blob/59ae82fd1658889c41096c1d8c08dcb1047dc349/jarn/viewdoc/viewdoc.py#L509-L520
def list_styles(self): """Print available styles and exit. """ known = sorted(self.defaults.known_styles) if not known: err_exit('No styles', 0) for style in known: if style == self.defaults.default_style: print(style, '(default)') else: print(style) sys.exit(0)
[ "def", "list_styles", "(", "self", ")", ":", "known", "=", "sorted", "(", "self", ".", "defaults", ".", "known_styles", ")", "if", "not", "known", ":", "err_exit", "(", "'No styles'", ",", "0", ")", "for", "style", "in", "known", ":", "if", "style", ...
Print available styles and exit.
[ "Print", "available", "styles", "and", "exit", "." ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L14112-L14137
def twovec(axdef, indexa, plndef, indexp): """ Find the transformation to the right-handed frame having a given vector as a specified axis and having a second given vector lying in a specified coordinate plane. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/twovec_c.html :param axdef: Vector defining a principal axis. :type axdef: 3-Element Array of floats :param indexa: Principal axis number of axdef (X=1, Y=2, Z=3). :type indexa: int :param plndef: Vector defining (with axdef) a principal plane. :type plndef: 3-Element Array of floats :param indexp: Second axis number (with indexa) of principal plane. :type indexp: int :return: Output rotation matrix. :rtype: 3x3-Element Array of floats """ axdef = stypes.toDoubleVector(axdef) indexa = ctypes.c_int(indexa) plndef = stypes.toDoubleVector(plndef) indexp = ctypes.c_int(indexp) mout = stypes.emptyDoubleMatrix() libspice.twovec_c(axdef, indexa, plndef, indexp, mout) return stypes.cMatrixToNumpy(mout)
[ "def", "twovec", "(", "axdef", ",", "indexa", ",", "plndef", ",", "indexp", ")", ":", "axdef", "=", "stypes", ".", "toDoubleVector", "(", "axdef", ")", "indexa", "=", "ctypes", ".", "c_int", "(", "indexa", ")", "plndef", "=", "stypes", ".", "toDoubleVe...
Find the transformation to the right-handed frame having a given vector as a specified axis and having a second given vector lying in a specified coordinate plane. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/twovec_c.html :param axdef: Vector defining a principal axis. :type axdef: 3-Element Array of floats :param indexa: Principal axis number of axdef (X=1, Y=2, Z=3). :type indexa: int :param plndef: Vector defining (with axdef) a principal plane. :type plndef: 3-Element Array of floats :param indexp: Second axis number (with indexa) of principal plane. :type indexp: int :return: Output rotation matrix. :rtype: 3x3-Element Array of floats
[ "Find", "the", "transformation", "to", "the", "right", "-", "handed", "frame", "having", "a", "given", "vector", "as", "a", "specified", "axis", "and", "having", "a", "second", "given", "vector", "lying", "in", "a", "specified", "coordinate", "plane", "." ]
python
train
elkan1788/ppytools
ppytools/ip2location2.py
https://github.com/elkan1788/ppytools/blob/117aeed9f669ae46e0dd6cb11c5687a5f797816c/ppytools/ip2location2.py#L111-L194
def btreeSearch(self, ip): """ " b-tree search method " param: ip """ if not ip.isdigit(): ip = self.ip2Long(ip) if len(self.__headerSip) < 1: #pass the super block self.__f.seek(8) #read the header block b = self.__f.read(8192) #parse the header block sip = None ptr = None for i in range(0, len(b)-1, 8): sip = self.getLong(b, i) ptr = self.getLong(b, i+4) if ptr == 0: break self.__headerSip.append(sip) self.__headerPtr.append(ptr) headerLen = len(self.__headerSip) - 1 l, h, sptr, eptr = (0, headerLen, 0, 0) while l <= h: m = int((l+h)/2) if ip == self.__headerSip[m]: if m > 0: sptr = self.__headerPtr[m-1] eptr = self.__headerPtr[m] break; else: sptr = self.__headerPtr[m] eptr = self.__headerPtr[m+1] break; if ip > self.__headerSip[m]: if m == headerLen: sptr = self.__headerPtr[m-1] eptr = self.__headerPtr[m] break; elif ip < self.__headerSip[m+1]: sptr = self.__headerPtr[m] eptr = self.__headerPtr[m+1] break; l = m + 1 else: if m == 0: sptr = self.__headerPtr[m] eptr = self.__headerPtr[m+1] break; elif ip > self.__headerSip[m-1]: sptr = self.__headerPtr[m-1] eptr = self.__headerPtr[m] break; h = m - 1 if sptr == 0: return "N1" indexLen = eptr - sptr self.__f.seek(sptr) b = self.__f.read(indexLen + 12) l, h, mixPtr = (0, int(indexLen/12), 0) while l <= h: m = int((l+h)/2) offset = m * 12 if ip >= self.getLong(b, offset): if ip > self.getLong(b, offset+4): l = m + 1 else: mixPtr = self.getLong(b, offset+8) break; else: h = m - 1 if mixPtr == 0: return "N2" return self.returnData(mixPtr)
[ "def", "btreeSearch", "(", "self", ",", "ip", ")", ":", "if", "not", "ip", ".", "isdigit", "(", ")", ":", "ip", "=", "self", ".", "ip2Long", "(", "ip", ")", "if", "len", "(", "self", ".", "__headerSip", ")", "<", "1", ":", "#pass the super block", ...
" b-tree search method " param: ip
[ "b", "-", "tree", "search", "method", "param", ":", "ip" ]
python
train
tk0miya/tk.phpautodoc
src/phply/phpparse.py
https://github.com/tk0miya/tk.phpautodoc/blob/cf789f64abaf76351485cee231a075227e665fb6/src/phply/phpparse.py#L289-L295
def p_declare_list(p): '''declare_list : STRING EQUALS static_scalar | declare_list COMMA STRING EQUALS static_scalar''' if len(p) == 4: p[0] = [ast.Directive(p[1], p[3], lineno=p.lineno(1))] else: p[0] = p[1] + [ast.Directive(p[3], p[5], lineno=p.lineno(2))]
[ "def", "p_declare_list", "(", "p", ")", ":", "if", "len", "(", "p", ")", "==", "4", ":", "p", "[", "0", "]", "=", "[", "ast", ".", "Directive", "(", "p", "[", "1", "]", ",", "p", "[", "3", "]", ",", "lineno", "=", "p", ".", "lineno", "(",...
declare_list : STRING EQUALS static_scalar | declare_list COMMA STRING EQUALS static_scalar
[ "declare_list", ":", "STRING", "EQUALS", "static_scalar", "|", "declare_list", "COMMA", "STRING", "EQUALS", "static_scalar" ]
python
train
horazont/aioxmpp
aioxmpp/roster/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/roster/service.py#L612-L668
def set_entry(self, jid, *, name=_Sentinel, add_to_groups=frozenset(), remove_from_groups=frozenset(), timeout=None): """ Set properties of a roster entry or add a new roster entry. The roster entry is identified by its bare `jid`. If an entry already exists, all values default to those stored in the existing entry. For example, if no `name` is given, the current name of the entry is re-used, if any. If the entry does not exist, it will be created on the server side. The `remove_from_groups` and `add_to_groups` arguments have to be based on the locally cached state, as XMPP does not support sending diffs. `remove_from_groups` takes precedence over `add_to_groups`. `timeout` is the time in seconds to wait for a confirmation by the server. Note that the changes may not be visible immediately after his coroutine returns in the :attr:`items` and :attr:`groups` attributes. The :class:`Service` waits for the "official" roster push from the server for updating the data structures and firing events, to ensure that consistent state with other clients is achieved. This may raise arbitrary :class:`.errors.XMPPError` exceptions if the server replies with an error and also any kind of connection error if the connection gets fatally terminated while waiting for a response. """ existing = self.items.get(jid, Item(jid)) post_groups = (existing.groups | add_to_groups) - remove_from_groups post_name = existing.name if name is not _Sentinel: post_name = name item = roster_xso.Item( jid=jid, name=post_name, groups=[ roster_xso.Group(name=group_name) for group_name in post_groups ]) yield from self.client.send( stanza.IQ( structs.IQType.SET, payload=roster_xso.Query(items=[ item ]) ), timeout=timeout )
[ "def", "set_entry", "(", "self", ",", "jid", ",", "*", ",", "name", "=", "_Sentinel", ",", "add_to_groups", "=", "frozenset", "(", ")", ",", "remove_from_groups", "=", "frozenset", "(", ")", ",", "timeout", "=", "None", ")", ":", "existing", "=", "self...
Set properties of a roster entry or add a new roster entry. The roster entry is identified by its bare `jid`. If an entry already exists, all values default to those stored in the existing entry. For example, if no `name` is given, the current name of the entry is re-used, if any. If the entry does not exist, it will be created on the server side. The `remove_from_groups` and `add_to_groups` arguments have to be based on the locally cached state, as XMPP does not support sending diffs. `remove_from_groups` takes precedence over `add_to_groups`. `timeout` is the time in seconds to wait for a confirmation by the server. Note that the changes may not be visible immediately after his coroutine returns in the :attr:`items` and :attr:`groups` attributes. The :class:`Service` waits for the "official" roster push from the server for updating the data structures and firing events, to ensure that consistent state with other clients is achieved. This may raise arbitrary :class:`.errors.XMPPError` exceptions if the server replies with an error and also any kind of connection error if the connection gets fatally terminated while waiting for a response.
[ "Set", "properties", "of", "a", "roster", "entry", "or", "add", "a", "new", "roster", "entry", ".", "The", "roster", "entry", "is", "identified", "by", "its", "bare", "jid", "." ]
python
train
indico/indico-plugins
livesync/indico_livesync/uploader.py
https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/livesync/indico_livesync/uploader.py#L57-L69
def run_initial(self, events): """Runs the initial batch upload :param events: an iterable containing events """ self_name = type(self).__name__ for i, batch in enumerate(grouper(events, self.INITIAL_BATCH_SIZE, skip_missing=True), 1): self.logger.debug('%s processing initial batch %d', self_name, i) for j, processed_batch in enumerate(grouper( batch, self.BATCH_SIZE, skip_missing=True), 1): self.logger.info('%s uploading initial chunk #%d (batch %d)', self_name, j, i) self.upload_records(processed_batch, from_queue=False)
[ "def", "run_initial", "(", "self", ",", "events", ")", ":", "self_name", "=", "type", "(", "self", ")", ".", "__name__", "for", "i", ",", "batch", "in", "enumerate", "(", "grouper", "(", "events", ",", "self", ".", "INITIAL_BATCH_SIZE", ",", "skip_missin...
Runs the initial batch upload :param events: an iterable containing events
[ "Runs", "the", "initial", "batch", "upload" ]
python
train
ssato/python-anyconfig
src/anyconfig/schema.py
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/schema.py#L174-L202
def gen_schema(data, **options): """ Generate a node represents JSON schema object with type annotation added for given object node. :param data: Configuration data object (dict[-like] or namedtuple) :param options: Other keyword options such as: - ac_schema_strict: True if more strict (precise) schema is needed - ac_schema_typemap: Type to JSON schema type mappings :return: A dict represents JSON schema of this node """ if data is None: return dict(type="null") _type = type(data) if _type in _SIMPLE_TYPES: typemap = options.get("ac_schema_typemap", _SIMPLETYPE_MAP) scm = dict(type=typemap[_type]) elif anyconfig.utils.is_dict_like(data): scm = object_to_schema(data, **options) elif anyconfig.utils.is_list_like(data): scm = array_to_schema(data, **options) return scm
[ "def", "gen_schema", "(", "data", ",", "*", "*", "options", ")", ":", "if", "data", "is", "None", ":", "return", "dict", "(", "type", "=", "\"null\"", ")", "_type", "=", "type", "(", "data", ")", "if", "_type", "in", "_SIMPLE_TYPES", ":", "typemap", ...
Generate a node represents JSON schema object with type annotation added for given object node. :param data: Configuration data object (dict[-like] or namedtuple) :param options: Other keyword options such as: - ac_schema_strict: True if more strict (precise) schema is needed - ac_schema_typemap: Type to JSON schema type mappings :return: A dict represents JSON schema of this node
[ "Generate", "a", "node", "represents", "JSON", "schema", "object", "with", "type", "annotation", "added", "for", "given", "object", "node", "." ]
python
train
AguaClara/aguaclara
aguaclara/design/cdc.py
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/design/cdc.py#L61-L73
def viscosity_kinematic_chem(conc_chem, temp, en_chem): """Return the dynamic viscosity of water at a given temperature. If given units, the function will automatically convert to Kelvin. If not given units, the function will assume Kelvin. """ if en_chem == 0: nu = viscosity_kinematic_alum(conc_chem, temp).magnitude if en_chem == 1: nu = viscosity_kinematic_pacl(conc_chem, temp).magnitude if en_chem not in [0,1]: nu = pc.viscosity_kinematic(temp).magnitude return nu
[ "def", "viscosity_kinematic_chem", "(", "conc_chem", ",", "temp", ",", "en_chem", ")", ":", "if", "en_chem", "==", "0", ":", "nu", "=", "viscosity_kinematic_alum", "(", "conc_chem", ",", "temp", ")", ".", "magnitude", "if", "en_chem", "==", "1", ":", "nu",...
Return the dynamic viscosity of water at a given temperature. If given units, the function will automatically convert to Kelvin. If not given units, the function will assume Kelvin.
[ "Return", "the", "dynamic", "viscosity", "of", "water", "at", "a", "given", "temperature", "." ]
python
train
abarker/pdfCropMargins
src/pdfCropMargins/external_program_calls.py
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L673-L682
def show_preview(viewer_path, pdf_file_name): """Run the PDF viewer at the path viewer_path on the file pdf_file_name.""" try: cmd = [viewer_path, pdf_file_name] run_external_subprocess_in_background(cmd) except (subprocess.CalledProcessError, OSError, IOError) as e: print("\nWarning from pdfCropMargins: The argument to the '--viewer' option:" "\n ", viewer_path, "\nwas not found or failed to execute correctly.\n", file=sys.stderr) return
[ "def", "show_preview", "(", "viewer_path", ",", "pdf_file_name", ")", ":", "try", ":", "cmd", "=", "[", "viewer_path", ",", "pdf_file_name", "]", "run_external_subprocess_in_background", "(", "cmd", ")", "except", "(", "subprocess", ".", "CalledProcessError", ",",...
Run the PDF viewer at the path viewer_path on the file pdf_file_name.
[ "Run", "the", "PDF", "viewer", "at", "the", "path", "viewer_path", "on", "the", "file", "pdf_file_name", "." ]
python
train
OSSOS/MOP
src/ossos/core/scripts/update_header.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/scripts/update_header.py#L48-L108
def main(): """Do the script.""" parser = argparse.ArgumentParser( description='replace image header') parser.add_argument('--extname', help='name of extension to in header') parser.add_argument('expnum', type=str, help='exposure to update') parser.add_argument('-r', '--replace', action='store_true', help='store modified image back to VOSpace?') parser.add_argument('-v', '--verbose', action='store_true') parser.add_argument('--debug', action='store_true') parser.add_argument('--force', action='store_true', help="Re-run even if previous success recorded") parser.add_argument('--dbimages', help="VOSpace DATA storage area.", default="vos:OSSOS/dbimages") args = parser.parse_args() task = util.task() dependency = 'preproc' prefix = "" storage.DBIMAGES = args.dbimages level = logging.CRITICAL message_format = "%(message)s" if args.verbose: level = logging.INFO if args.debug: level = logging.DEBUG message_format = "%(module)s %(funcName)s %(lineno)s %(message)s" logging.basicConfig(level=level, format=message_format) storage.set_logger(task, prefix, args.expnum, None, None, False) message = storage.SUCCESS expnum = args.expnum exit_status = 0 try: # skip if already succeeded and not in force mode if storage.get_status(task, prefix, expnum, "p", 36) and not args.force: logging.info("Already updated, skipping") sys.exit(0) image_hdulist = storage.get_image(args.expnum, return_file=False) ast_hdulist = storage.get_astheader(expnum, ccd=None) run_update_header(image_hdulist, ast_hdulist) image_filename = os.path.basename(storage.get_uri(expnum)) image_hdulist.writeto(image_filename) if args.replace: dest = storage.dbimages_uri(expnum) storage.copy(image_filename, dest) storage.set_status('update_header', "", expnum, 'p', 36, message) except Exception as e: message = str(e) if args.replace: storage.set_status(task, prefix, expnum, 'p', 36, message) exit_status = message logging.error(message) return exit_status
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'replace image header'", ")", "parser", ".", "add_argument", "(", "'--extname'", ",", "help", "=", "'name of extension to in header'", ")", "parser", ".", "...
Do the script.
[ "Do", "the", "script", "." ]
python
train
sibirrer/lenstronomy
lenstronomy/Analysis/lens_analysis.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/Analysis/lens_analysis.py#L42-L66
def ellipticity_lens_light(self, kwargs_lens_light, center_x=0, center_y=0, model_bool_list=None, deltaPix=None, numPix=None): """ make sure that the window covers all the light, otherwise the moments may give to low answers. :param kwargs_lens_light: :param center_x: :param center_y: :param model_bool_list: :param deltaPix: :param numPix: :return: """ if model_bool_list is None: model_bool_list = [True] * len(kwargs_lens_light) if numPix is None: numPix = 100 if deltaPix is None: deltaPix = 0.05 x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix) x_grid += center_x y_grid += center_y I_xy = self._lens_light_internal(x_grid, y_grid, kwargs_lens_light, model_bool_list=model_bool_list) e1, e2 = analysis_util.ellipticities(I_xy, x_grid, y_grid) return e1, e2
[ "def", "ellipticity_lens_light", "(", "self", ",", "kwargs_lens_light", ",", "center_x", "=", "0", ",", "center_y", "=", "0", ",", "model_bool_list", "=", "None", ",", "deltaPix", "=", "None", ",", "numPix", "=", "None", ")", ":", "if", "model_bool_list", ...
make sure that the window covers all the light, otherwise the moments may give to low answers. :param kwargs_lens_light: :param center_x: :param center_y: :param model_bool_list: :param deltaPix: :param numPix: :return:
[ "make", "sure", "that", "the", "window", "covers", "all", "the", "light", "otherwise", "the", "moments", "may", "give", "to", "low", "answers", "." ]
python
train
OzymandiasTheGreat/python-libinput
libinput/device.py
https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/device.py#L1735-L1762
def set_seat_logical_name(self, seat): """Change the logical seat associated with this device by removing the device and adding it to the new seat. This command is identical to physically unplugging the device, then re-plugging it as a member of the new seat. libinput will generate a :attr:`~libinput.constant.EventType.DEVICE_REMOVED` event and this :class:`Device` is considered removed from the context; it will not generate further events. A :attr:`~libinput.constant.EventType.DEVICE_ADDED` event is generated with a new :class:`Device`. It is the caller's responsibility to update references to the new device accordingly. If the logical seat name already exists in the device's physical seat, the device is added to this seat. Otherwise, a new seat is created. Note: This change applies to this device until removal or :meth:`~libinput.LibInput.suspend`, whichever happens earlier. Args: seat (str): The new logical seat name. Raises: AssertionError """ rc = self._libinput.libinput_device_set_seat_logical_name( self._handle, seat.encode()) assert rc == 0, 'Cannot assign device to {}'.format(seat)
[ "def", "set_seat_logical_name", "(", "self", ",", "seat", ")", ":", "rc", "=", "self", ".", "_libinput", ".", "libinput_device_set_seat_logical_name", "(", "self", ".", "_handle", ",", "seat", ".", "encode", "(", ")", ")", "assert", "rc", "==", "0", ",", ...
Change the logical seat associated with this device by removing the device and adding it to the new seat. This command is identical to physically unplugging the device, then re-plugging it as a member of the new seat. libinput will generate a :attr:`~libinput.constant.EventType.DEVICE_REMOVED` event and this :class:`Device` is considered removed from the context; it will not generate further events. A :attr:`~libinput.constant.EventType.DEVICE_ADDED` event is generated with a new :class:`Device`. It is the caller's responsibility to update references to the new device accordingly. If the logical seat name already exists in the device's physical seat, the device is added to this seat. Otherwise, a new seat is created. Note: This change applies to this device until removal or :meth:`~libinput.LibInput.suspend`, whichever happens earlier. Args: seat (str): The new logical seat name. Raises: AssertionError
[ "Change", "the", "logical", "seat", "associated", "with", "this", "device", "by", "removing", "the", "device", "and", "adding", "it", "to", "the", "new", "seat", "." ]
python
train
althonos/pronto
pronto/term.py
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/term.py#L290-L325
def rparents(self, level=-1, intermediate=True): """Create a recursive list of children. Note that the :param:`intermediate` can be used to include every parents to the returned list, not only the most nested ones. Parameters: level (int): The depth level to continue fetching parents from (default is -1, to get parents to the utter depths) intermediate (bool): Also include the intermediate parents (default is True) Returns: :obj:`pronto.TermList`: The recursive children of the Term following the parameters """ try: return self._rparents[(level, intermediate)] except KeyError: rparents = [] if self.parents and level: if intermediate or level==1: rparents.extend(self.parents) for parent in self.parents: rparents.extend(parent.rparents(level=level-1, intermediate=intermediate)) rparents = TermList(unique_everseen(rparents)) self._rparents[(level, intermediate)] = rparents return rparents
[ "def", "rparents", "(", "self", ",", "level", "=", "-", "1", ",", "intermediate", "=", "True", ")", ":", "try", ":", "return", "self", ".", "_rparents", "[", "(", "level", ",", "intermediate", ")", "]", "except", "KeyError", ":", "rparents", "=", "["...
Create a recursive list of children. Note that the :param:`intermediate` can be used to include every parents to the returned list, not only the most nested ones. Parameters: level (int): The depth level to continue fetching parents from (default is -1, to get parents to the utter depths) intermediate (bool): Also include the intermediate parents (default is True) Returns: :obj:`pronto.TermList`: The recursive children of the Term following the parameters
[ "Create", "a", "recursive", "list", "of", "children", "." ]
python
train
rosenbrockc/ci
pyci/config.py
https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/config.py#L118-L127
def _parse_repo(self, xml): """Parses a <repo> tag to update settings on this Repository instance. :arg xml: the <repo> tag XMLElement. """ self.name = get_attrib(xml, "name", "repo") self.username = get_attrib(xml, "user", "repo") self.apikey = get_attrib(xml, "apikey", "repo") self.organization = get_attrib(xml, "organization") self.staging = get_attrib(xml, "staging", "repo")
[ "def", "_parse_repo", "(", "self", ",", "xml", ")", ":", "self", ".", "name", "=", "get_attrib", "(", "xml", ",", "\"name\"", ",", "\"repo\"", ")", "self", ".", "username", "=", "get_attrib", "(", "xml", ",", "\"user\"", ",", "\"repo\"", ")", "self", ...
Parses a <repo> tag to update settings on this Repository instance. :arg xml: the <repo> tag XMLElement.
[ "Parses", "a", "<repo", ">", "tag", "to", "update", "settings", "on", "this", "Repository", "instance", ".", ":", "arg", "xml", ":", "the", "<repo", ">", "tag", "XMLElement", "." ]
python
train
saltstack/salt
salt/cloud/clouds/virtualbox.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/virtualbox.py#L327-L368
def destroy(name, call=None): """ This function irreversibly destroys a virtual machine on the cloud provider. Before doing so, it should fire an event on the Salt event bus. The tag for this event is `salt/cloud/<vm name>/destroying`. Once the virtual machine has been destroyed, another event is fired. The tag for that event is `salt/cloud/<vm name>/destroyed`. Dependencies: list_nodes @param name: @type name: str @param call: @type call: @return: True if all went well, otherwise an error message @rtype: bool|str """ log.info("Attempting to delete instance %s", name) if not vb_machine_exists(name): return "{0} doesn't exist and can't be deleted".format(name) __utils__['cloud.fire_event']( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) vb_destroy_machine(name) __utils__['cloud.fire_event']( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={'name': name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] )
[ "def", "destroy", "(", "name", ",", "call", "=", "None", ")", ":", "log", ".", "info", "(", "\"Attempting to delete instance %s\"", ",", "name", ")", "if", "not", "vb_machine_exists", "(", "name", ")", ":", "return", "\"{0} doesn't exist and can't be deleted\"", ...
This function irreversibly destroys a virtual machine on the cloud provider. Before doing so, it should fire an event on the Salt event bus. The tag for this event is `salt/cloud/<vm name>/destroying`. Once the virtual machine has been destroyed, another event is fired. The tag for that event is `salt/cloud/<vm name>/destroyed`. Dependencies: list_nodes @param name: @type name: str @param call: @type call: @return: True if all went well, otherwise an error message @rtype: bool|str
[ "This", "function", "irreversibly", "destroys", "a", "virtual", "machine", "on", "the", "cloud", "provider", ".", "Before", "doing", "so", "it", "should", "fire", "an", "event", "on", "the", "Salt", "event", "bus", "." ]
python
train
saltstack/salt
salt/modules/win_file.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_file.py#L759-L799
def chgrp(path, group): ''' Change the group of a file Under Windows, this will do nothing. While a file in Windows does have a 'primary group', this rarely used attribute generally has no bearing on permissions unless intentionally configured and is only used to support Unix compatibility features (e.g. Services For Unix, NFS services). Salt, therefore, remaps this function to do nothing while still being compatible with Unix behavior. When managing Windows systems, this function is superfluous and will generate an info level log entry if used directly. If you do actually want to set the 'primary group' of a file, use ``file .chpgrp``. To set group permissions use ``file.set_perms`` Args: path (str): The path to the file or directory group (str): The group (unused) Returns: None CLI Example: .. code-block:: bash salt '*' file.chpgrp c:\\temp\\test.txt administrators ''' func_name = '{0}.chgrp'.format(__virtualname__) if __opts__.get('fun', '') == func_name: log.info('The function %s should not be used on Windows systems; see ' 'function docs for details.', func_name) log.debug('win_file.py %s Doing nothing for %s', func_name, path) return None
[ "def", "chgrp", "(", "path", ",", "group", ")", ":", "func_name", "=", "'{0}.chgrp'", ".", "format", "(", "__virtualname__", ")", "if", "__opts__", ".", "get", "(", "'fun'", ",", "''", ")", "==", "func_name", ":", "log", ".", "info", "(", "'The functio...
Change the group of a file Under Windows, this will do nothing. While a file in Windows does have a 'primary group', this rarely used attribute generally has no bearing on permissions unless intentionally configured and is only used to support Unix compatibility features (e.g. Services For Unix, NFS services). Salt, therefore, remaps this function to do nothing while still being compatible with Unix behavior. When managing Windows systems, this function is superfluous and will generate an info level log entry if used directly. If you do actually want to set the 'primary group' of a file, use ``file .chpgrp``. To set group permissions use ``file.set_perms`` Args: path (str): The path to the file or directory group (str): The group (unused) Returns: None CLI Example: .. code-block:: bash salt '*' file.chpgrp c:\\temp\\test.txt administrators
[ "Change", "the", "group", "of", "a", "file" ]
python
train
chrisrink10/basilisp
src/basilisp/lang/compiler/optimizer.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/compiler/optimizer.py#L31-L45
def visit_Expr(self, node: ast.Expr) -> Optional[ast.Expr]: """Eliminate no-op constant expressions which are in the tree as standalone statements.""" if isinstance( node.value, ( ast.Constant, # type: ignore ast.Name, ast.NameConstant, ast.Num, ast.Str, ), ): return None return node
[ "def", "visit_Expr", "(", "self", ",", "node", ":", "ast", ".", "Expr", ")", "->", "Optional", "[", "ast", ".", "Expr", "]", ":", "if", "isinstance", "(", "node", ".", "value", ",", "(", "ast", ".", "Constant", ",", "# type: ignore", "ast", ".", "N...
Eliminate no-op constant expressions which are in the tree as standalone statements.
[ "Eliminate", "no", "-", "op", "constant", "expressions", "which", "are", "in", "the", "tree", "as", "standalone", "statements", "." ]
python
test
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L392-L450
def beatExtraction(st_features, win_len, PLOT=False): """ This function extracts an estimate of the beat rate for a musical signal. ARGUMENTS: - st_features: a numpy array (n_feats x numOfShortTermWindows) - win_len: window size in seconds RETURNS: - BPM: estimates of beats per minute - Ratio: a confidence measure """ # Features that are related to the beat tracking task: toWatch = [0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] max_beat_time = int(round(2.0 / win_len)) hist_all = numpy.zeros((max_beat_time,)) for ii, i in enumerate(toWatch): # for each feature DifThres = 2.0 * (numpy.abs(st_features[i, 0:-1] - st_features[i, 1::])).mean() # dif threshold (3 x Mean of Difs) if DifThres<=0: DifThres = 0.0000000000000001 [pos1, _] = utilities.peakdet(st_features[i, :], DifThres) # detect local maxima posDifs = [] # compute histograms of local maxima changes for j in range(len(pos1)-1): posDifs.append(pos1[j+1]-pos1[j]) [hist_times, HistEdges] = numpy.histogram(posDifs, numpy.arange(0.5, max_beat_time + 1.5)) hist_centers = (HistEdges[0:-1] + HistEdges[1::]) / 2.0 hist_times = hist_times.astype(float) / st_features.shape[1] hist_all += hist_times if PLOT: plt.subplot(9, 2, ii + 1) plt.plot(st_features[i, :], 'k') for k in pos1: plt.plot(k, st_features[i, k], 'k*') f1 = plt.gca() f1.axes.get_xaxis().set_ticks([]) f1.axes.get_yaxis().set_ticks([]) if PLOT: plt.show(block=False) plt.figure() # Get beat as the argmax of the agregated histogram: I = numpy.argmax(hist_all) bpms = 60 / (hist_centers * win_len) BPM = bpms[I] # ... and the beat ratio: Ratio = hist_all[I] / hist_all.sum() if PLOT: # filter out >500 beats from plotting: hist_all = hist_all[bpms < 500] bpms = bpms[bpms < 500] plt.plot(bpms, hist_all, 'k') plt.xlabel('Beats per minute') plt.ylabel('Freq Count') plt.show(block=True) return BPM, Ratio
[ "def", "beatExtraction", "(", "st_features", ",", "win_len", ",", "PLOT", "=", "False", ")", ":", "# Features that are related to the beat tracking task:", "toWatch", "=", "[", "0", ",", "1", ",", "3", ",", "4", ",", "5", ",", "6", ",", "7", ",", "8", ",...
This function extracts an estimate of the beat rate for a musical signal. ARGUMENTS: - st_features: a numpy array (n_feats x numOfShortTermWindows) - win_len: window size in seconds RETURNS: - BPM: estimates of beats per minute - Ratio: a confidence measure
[ "This", "function", "extracts", "an", "estimate", "of", "the", "beat", "rate", "for", "a", "musical", "signal", ".", "ARGUMENTS", ":", "-", "st_features", ":", "a", "numpy", "array", "(", "n_feats", "x", "numOfShortTermWindows", ")", "-", "win_len", ":", "...
python
train
StackStorm/pybind
pybind/slxos/v17r_1_01a/routing_system/router/router_bgp/address_family/vpnv6/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/routing_system/router/router_bgp/address_family/vpnv6/__init__.py#L92-L113
def _set_vpnv6_unicast(self, v, load=False): """ Setter method for vpnv6_unicast, mapped from YANG variable /routing_system/router/router_bgp/address_family/vpnv6/vpnv6_unicast (container) If this variable is read-only (config: false) in the source YANG file, then _set_vpnv6_unicast is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vpnv6_unicast() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=vpnv6_unicast.vpnv6_unicast, is_container='container', presence=True, yang_name="vpnv6-unicast", rest_name="unicast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VPNV6 Address Family', u'callpoint': u'AfVpnV6Ucast', u'cli-add-mode': None, u'cli-full-command': None, u'alt-name': u'unicast', u'cli-mode-name': u'config-bgp-vpnv6u'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """vpnv6_unicast must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=vpnv6_unicast.vpnv6_unicast, is_container='container', presence=True, yang_name="vpnv6-unicast", rest_name="unicast", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VPNV6 Address Family', u'callpoint': u'AfVpnV6Ucast', u'cli-add-mode': None, u'cli-full-command': None, u'alt-name': u'unicast', u'cli-mode-name': u'config-bgp-vpnv6u'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""", }) self.__vpnv6_unicast = t if hasattr(self, '_set'): self._set()
[ "def", "_set_vpnv6_unicast", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "...
Setter method for vpnv6_unicast, mapped from YANG variable /routing_system/router/router_bgp/address_family/vpnv6/vpnv6_unicast (container) If this variable is read-only (config: false) in the source YANG file, then _set_vpnv6_unicast is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vpnv6_unicast() directly.
[ "Setter", "method", "for", "vpnv6_unicast", "mapped", "from", "YANG", "variable", "/", "routing_system", "/", "router", "/", "router_bgp", "/", "address_family", "/", "vpnv6", "/", "vpnv6_unicast", "(", "container", ")", "If", "this", "variable", "is", "read", ...
python
train
saltstack/salt
salt/modules/junos.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/junos.py#L1002-L1112
def install_os(path=None, **kwargs): ''' Installs the given image on the device. After the installation is complete\ the device is rebooted, if reboot=True is given as a keyworded argument. path (required) Path where the image file is present on the proxy minion remote_path : If the value of path is a file path on the local (Salt host's) filesystem, then the image is copied from the local filesystem to the :remote_path: directory on the target Junos device. The default is ``/var/tmp``. If the value of :path: or is a URL, then the value of :remote_path: is unused. dev_timeout : 30 The NETCONF RPC timeout (in seconds). This argument was added since most of the time the "package add" RPC takes a significant amount of time. The default RPC timeout is 30 seconds. So this :timeout: value will be used in the context of the SW installation process. Defaults to 30 minutes (30*60=1800) reboot : False Whether to reboot after installation no_copy : False If ``True`` the software package will not be SCP’d to the device bool validate: When ``True`` this method will perform a config validation against the new image bool issu: When ``True`` allows unified in-service software upgrade (ISSU) feature enables you to upgrade between two different Junos OS releases with no disruption on the control plane and with minimal disruption of traffic. bool nssu: When ``True`` allows nonstop software upgrade (NSSU) enables you to upgrade the software running on a Juniper Networks EX Series Virtual Chassis or a Juniper Networks EX Series Ethernet Switch with redundant Routing Engines with a single command and minimal disruption to network traffic. CLI Examples: .. code-block:: bash salt 'device_name' junos.install_os 'salt://images/junos_image.tgz' reboot=True salt 'device_name' junos.install_os 'salt://junos_16_1.tgz' dev_timeout=300 ''' conn = __proxy__['junos.conn']() ret = {} ret['out'] = True op = {} if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) no_copy_ = op.get('no_copy', False) if path is None: ret['message'] = \ 'Please provide the salt path where the junos image is present.' ret['out'] = False return ret if not no_copy_: image_cached_path = salt.utils.files.mkstemp() __salt__['cp.get_file'](path, image_cached_path) if not os.path.isfile(image_cached_path): ret['message'] = 'Invalid image path.' ret['out'] = False return ret if os.path.getsize(image_cached_path) == 0: ret['message'] = 'Failed to copy image' ret['out'] = False return ret path = image_cached_path try: conn.sw.install(path, progress=True, **op) ret['message'] = 'Installed the os.' except Exception as exception: ret['message'] = 'Installation failed due to: "{0}"'.format(exception) ret['out'] = False return ret finally: if not no_copy_: salt.utils.files.safe_rm(image_cached_path) if 'reboot' in op and op['reboot'] is True: try: conn.sw.reboot() except Exception as exception: ret['message'] = \ 'Installation successful but reboot failed due to : "{0}"' \ .format(exception) ret['out'] = False return ret ret['message'] = 'Successfully installed and rebooted!' return ret
[ "def", "install_os", "(", "path", "=", "None", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "__proxy__", "[", "'junos.conn'", "]", "(", ")", "ret", "=", "{", "}", "ret", "[", "'out'", "]", "=", "True", "op", "=", "{", "}", "if", "'__pub_arg'",...
Installs the given image on the device. After the installation is complete\ the device is rebooted, if reboot=True is given as a keyworded argument. path (required) Path where the image file is present on the proxy minion remote_path : If the value of path is a file path on the local (Salt host's) filesystem, then the image is copied from the local filesystem to the :remote_path: directory on the target Junos device. The default is ``/var/tmp``. If the value of :path: or is a URL, then the value of :remote_path: is unused. dev_timeout : 30 The NETCONF RPC timeout (in seconds). This argument was added since most of the time the "package add" RPC takes a significant amount of time. The default RPC timeout is 30 seconds. So this :timeout: value will be used in the context of the SW installation process. Defaults to 30 minutes (30*60=1800) reboot : False Whether to reboot after installation no_copy : False If ``True`` the software package will not be SCP’d to the device bool validate: When ``True`` this method will perform a config validation against the new image bool issu: When ``True`` allows unified in-service software upgrade (ISSU) feature enables you to upgrade between two different Junos OS releases with no disruption on the control plane and with minimal disruption of traffic. bool nssu: When ``True`` allows nonstop software upgrade (NSSU) enables you to upgrade the software running on a Juniper Networks EX Series Virtual Chassis or a Juniper Networks EX Series Ethernet Switch with redundant Routing Engines with a single command and minimal disruption to network traffic. CLI Examples: .. code-block:: bash salt 'device_name' junos.install_os 'salt://images/junos_image.tgz' reboot=True salt 'device_name' junos.install_os 'salt://junos_16_1.tgz' dev_timeout=300
[ "Installs", "the", "given", "image", "on", "the", "device", ".", "After", "the", "installation", "is", "complete", "\\", "the", "device", "is", "rebooted", "if", "reboot", "=", "True", "is", "given", "as", "a", "keyworded", "argument", "." ]
python
train
pydata/xarray
xarray/plot/utils.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/plot/utils.py#L652-L704
def _process_cmap_cbar_kwargs(func, kwargs, data): """ Parameters ========== func : plotting function kwargs : dict, Dictionary with arguments that need to be parsed data : ndarray, Data values Returns ======= cmap_params cbar_kwargs """ cmap = kwargs.pop('cmap', None) colors = kwargs.pop('colors', None) cbar_kwargs = kwargs.pop('cbar_kwargs', {}) cbar_kwargs = {} if cbar_kwargs is None else dict(cbar_kwargs) levels = kwargs.pop('levels', None) if 'contour' in func.__name__ and levels is None: levels = 7 # this is the matplotlib default # colors is mutually exclusive with cmap if cmap and colors: raise ValueError("Can't specify both cmap and colors.") # colors is only valid when levels is supplied or the plot is of type # contour or contourf if colors and (('contour' not in func.__name__) and (not levels)): raise ValueError("Can only specify colors with contour or levels") # we should not be getting a list of colors in cmap anymore # is there a better way to do this test? if isinstance(cmap, (list, tuple)): warnings.warn("Specifying a list of colors in cmap is deprecated. " "Use colors keyword instead.", DeprecationWarning, stacklevel=3) cmap_kwargs = {'plot_data': data, 'levels': levels, 'cmap': colors if colors else cmap, 'filled': func.__name__ != 'contour'} cmap_args = getfullargspec(_determine_cmap_params).args cmap_kwargs.update((a, kwargs[a]) for a in cmap_args if a in kwargs) cmap_params = _determine_cmap_params(**cmap_kwargs) return cmap_params, cbar_kwargs
[ "def", "_process_cmap_cbar_kwargs", "(", "func", ",", "kwargs", ",", "data", ")", ":", "cmap", "=", "kwargs", ".", "pop", "(", "'cmap'", ",", "None", ")", "colors", "=", "kwargs", ".", "pop", "(", "'colors'", ",", "None", ")", "cbar_kwargs", "=", "kwar...
Parameters ========== func : plotting function kwargs : dict, Dictionary with arguments that need to be parsed data : ndarray, Data values Returns ======= cmap_params cbar_kwargs
[ "Parameters", "==========", "func", ":", "plotting", "function", "kwargs", ":", "dict", "Dictionary", "with", "arguments", "that", "need", "to", "be", "parsed", "data", ":", "ndarray", "Data", "values" ]
python
train
piglei/uwsgi-sloth
uwsgi_sloth/analyzer.py
https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/analyzer.py#L69-L77
def classify(self, url_path): """Classify an url""" for dict_api_url in self.user_defined_rules: api_url = dict_api_url['str'] re_api_url = dict_api_url['re'] if re_api_url.match(url_path[1:]): return api_url return self.RE_SIMPLIFY_URL.sub(r'(\\d+)/', url_path)
[ "def", "classify", "(", "self", ",", "url_path", ")", ":", "for", "dict_api_url", "in", "self", ".", "user_defined_rules", ":", "api_url", "=", "dict_api_url", "[", "'str'", "]", "re_api_url", "=", "dict_api_url", "[", "'re'", "]", "if", "re_api_url", ".", ...
Classify an url
[ "Classify", "an", "url" ]
python
train
Kane610/axis
axis/rtsp.py
https://github.com/Kane610/axis/blob/b2b44ce595c7b722b5e13eabcab7b91f048e1808/axis/rtsp.py#L290-L294
def transport(self): """Generate transport string.""" transport = "Transport: RTP/AVP;unicast;client_port={}-{}\r\n" return transport.format( str(self.session.rtp_port), str(self.session.rtcp_port))
[ "def", "transport", "(", "self", ")", ":", "transport", "=", "\"Transport: RTP/AVP;unicast;client_port={}-{}\\r\\n\"", "return", "transport", ".", "format", "(", "str", "(", "self", ".", "session", ".", "rtp_port", ")", ",", "str", "(", "self", ".", "session", ...
Generate transport string.
[ "Generate", "transport", "string", "." ]
python
train
pytorch/vision
torchvision/transforms/functional.py
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L468-L499
def five_crop(img, size): """Crop the given PIL Image into four corners and the central crop. .. Note:: This transform returns a tuple of images and there may be a mismatch in the number of inputs and targets your ``Dataset`` returns. Args: size (sequence or int): Desired output size of the crop. If size is an int instead of sequence like (h, w), a square crop (size, size) is made. Returns: tuple: tuple (tl, tr, bl, br, center) Corresponding top left, top right, bottom left, bottom right and center crop. """ if isinstance(size, numbers.Number): size = (int(size), int(size)) else: assert len(size) == 2, "Please provide only two dimensions (h, w) for size." w, h = img.size crop_h, crop_w = size if crop_w > w or crop_h > h: raise ValueError("Requested crop size {} is bigger than input size {}".format(size, (h, w))) tl = img.crop((0, 0, crop_w, crop_h)) tr = img.crop((w - crop_w, 0, w, crop_h)) bl = img.crop((0, h - crop_h, crop_w, h)) br = img.crop((w - crop_w, h - crop_h, w, h)) center = center_crop(img, (crop_h, crop_w)) return (tl, tr, bl, br, center)
[ "def", "five_crop", "(", "img", ",", "size", ")", ":", "if", "isinstance", "(", "size", ",", "numbers", ".", "Number", ")", ":", "size", "=", "(", "int", "(", "size", ")", ",", "int", "(", "size", ")", ")", "else", ":", "assert", "len", "(", "s...
Crop the given PIL Image into four corners and the central crop. .. Note:: This transform returns a tuple of images and there may be a mismatch in the number of inputs and targets your ``Dataset`` returns. Args: size (sequence or int): Desired output size of the crop. If size is an int instead of sequence like (h, w), a square crop (size, size) is made. Returns: tuple: tuple (tl, tr, bl, br, center) Corresponding top left, top right, bottom left, bottom right and center crop.
[ "Crop", "the", "given", "PIL", "Image", "into", "four", "corners", "and", "the", "central", "crop", "." ]
python
test
simoninireland/epyc
epyc/labnotebook.py
https://github.com/simoninireland/epyc/blob/b3b61007741a0ab3de64df89070a6f30de8ec268/epyc/labnotebook.py#L175-L194
def cancelPendingResult( self, jobid ): """Cancel a particular pending result. Note that this only affects the notebook's record, not any job running in a lab. :param jobid: job id for pending result""" if jobid in self._pending.keys(): k = self._pending[jobid] del self._pending[jobid] if k in self._results.keys(): rs = self._results[k] j = rs.index(jobid) del rs[j] else: # we've screwed-up the internal data structures raise RuntimeError('Internal structure error for {j} -> {ps}'.format(j = jobid, ps = k)) else: # no such job # sd: should this just fail silently? raise KeyError('No pending result with id {j}'.format(j = jobid))
[ "def", "cancelPendingResult", "(", "self", ",", "jobid", ")", ":", "if", "jobid", "in", "self", ".", "_pending", ".", "keys", "(", ")", ":", "k", "=", "self", ".", "_pending", "[", "jobid", "]", "del", "self", ".", "_pending", "[", "jobid", "]", "i...
Cancel a particular pending result. Note that this only affects the notebook's record, not any job running in a lab. :param jobid: job id for pending result
[ "Cancel", "a", "particular", "pending", "result", ".", "Note", "that", "this", "only", "affects", "the", "notebook", "s", "record", "not", "any", "job", "running", "in", "a", "lab", "." ]
python
train
MAVENSDC/cdflib
cdflib/cdfread.py
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfread.py#L1726-L1741
def _convert_option(self): ''' Determines how to convert CDF byte ordering to the system byte ordering. ''' if sys.byteorder == 'little' and self._endian() == 'big-endian': # big->little order = '>' elif sys.byteorder == 'big' and self._endian() == 'little-endian': # little->big order = '<' else: # no conversion order = '=' return order
[ "def", "_convert_option", "(", "self", ")", ":", "if", "sys", ".", "byteorder", "==", "'little'", "and", "self", ".", "_endian", "(", ")", "==", "'big-endian'", ":", "# big->little", "order", "=", "'>'", "elif", "sys", ".", "byteorder", "==", "'big'", "a...
Determines how to convert CDF byte ordering to the system byte ordering.
[ "Determines", "how", "to", "convert", "CDF", "byte", "ordering", "to", "the", "system", "byte", "ordering", "." ]
python
train
Dallinger/Dallinger
dallinger/experiment_server/experiment_server.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/experiment_server/experiment_server.py#L967-L1011
def create_question(participant_id): """Send a POST request to the question table. Questions store information at the participant level, not the node level. You should pass the question (string) number (int) and response (string) as arguments. """ # Get the participant. try: ppt = models.Participant.query.filter_by(id=participant_id).one() except NoResultFound: return error_response( error_type="/question POST no participant found", status=403 ) question = request_parameter(parameter="question") response = request_parameter(parameter="response") number = request_parameter(parameter="number", parameter_type="int") for x in [question, response, number]: if isinstance(x, Response): return x # Consult the recruiter regarding whether to accept a questionnaire # from the participant: rejection = ppt.recruiter.rejects_questionnaire_from(ppt) if rejection: return error_response( error_type="/question POST, status = {}, reason: {}".format( ppt.status, rejection ), participant=ppt, ) try: # execute the request models.Question( participant=ppt, question=question, response=response, number=number ) session.commit() except Exception: return error_response(error_type="/question POST server error", status=403) # return the data return success_response()
[ "def", "create_question", "(", "participant_id", ")", ":", "# Get the participant.", "try", ":", "ppt", "=", "models", ".", "Participant", ".", "query", ".", "filter_by", "(", "id", "=", "participant_id", ")", ".", "one", "(", ")", "except", "NoResultFound", ...
Send a POST request to the question table. Questions store information at the participant level, not the node level. You should pass the question (string) number (int) and response (string) as arguments.
[ "Send", "a", "POST", "request", "to", "the", "question", "table", "." ]
python
train
lowandrew/OLCTools
sipprCommon/sippingmethods.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/sipprCommon/sippingmethods.py#L232-L264
def subsample_reads(self): """ Subsampling of reads to 20X coverage of rMLST genes (roughly). To be called after rMLST extraction and read trimming, in that order. """ logging.info('Subsampling {at} reads'.format(at=self.analysistype)) with progressbar(self.runmetadata) as bar: for sample in bar: if sample.general.bestassemblyfile != 'NA': # Create the name of the subsampled read file sample[self.analysistype].subsampledreads = os.path.join( sample[self.analysistype].outputdir, '{at}_targetMatches_subsampled.fastq.gz'.format(at=self.analysistype)) # Set the reformat.sh command. It will be run multiple times, overwrite previous iterations # each time. Use samplebasestarget to provide an approximate number of bases to include in the # subsampled reads e.g. for rMLST: 700000 (approx. 35000 bp total length of genes x 20X coverage) sample[self.analysistype].subsamplecmd = \ 'reformat.sh in={bf} out={ssr} overwrite samplebasestarget=700000' \ .format(bf=sample[self.analysistype].baitedfastq, ssr=sample[self.analysistype].subsampledreads) if not os.path.isfile(sample[self.analysistype].subsampledreads): # Run the call out, err = run_subprocess(sample[self.analysistype].subsamplecmd) write_to_logfile(sample[self.analysistype].subsamplecmd, sample[self.analysistype].subsamplecmd, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) # Update the variable to store the baited reads sample[self.analysistype].baitedfastq = sample[self.analysistype].subsampledreads
[ "def", "subsample_reads", "(", "self", ")", ":", "logging", ".", "info", "(", "'Subsampling {at} reads'", ".", "format", "(", "at", "=", "self", ".", "analysistype", ")", ")", "with", "progressbar", "(", "self", ".", "runmetadata", ")", "as", "bar", ":", ...
Subsampling of reads to 20X coverage of rMLST genes (roughly). To be called after rMLST extraction and read trimming, in that order.
[ "Subsampling", "of", "reads", "to", "20X", "coverage", "of", "rMLST", "genes", "(", "roughly", ")", ".", "To", "be", "called", "after", "rMLST", "extraction", "and", "read", "trimming", "in", "that", "order", "." ]
python
train
LasLabs/python-five9
five9/models/base_model.py
https://github.com/LasLabs/python-five9/blob/ef53160d6658604524a2577391280d2b4501a7ce/five9/models/base_model.py#L180-L184
def _zeep_to_dict(cls, obj): """Convert a zeep object to a dictionary.""" res = serialize_object(obj) res = cls._get_non_empty_dict(res) return res
[ "def", "_zeep_to_dict", "(", "cls", ",", "obj", ")", ":", "res", "=", "serialize_object", "(", "obj", ")", "res", "=", "cls", ".", "_get_non_empty_dict", "(", "res", ")", "return", "res" ]
Convert a zeep object to a dictionary.
[ "Convert", "a", "zeep", "object", "to", "a", "dictionary", "." ]
python
train
proycon/pynlpl
pynlpl/formats/giza.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/giza.py#L108-L126
def getalignedtarget(self, index): """Returns target range only if source index aligns to a single consecutive range of target tokens.""" targetindices = [] target = None foundindex = -1 for sourceindex, targetindex in self.alignment: if sourceindex == index: targetindices.append(targetindex) if len(targetindices) > 1: for i in range(1,len(targetindices)): if abs(targetindices[i] - targetindices[i-1]) != 1: break # not consecutive foundindex = (min(targetindices), max(targetindices)) target = ' '.join(self.target[min(targetindices):max(targetindices)+1]) elif targetindices: foundindex = targetindices[0] target = self.target[foundindex] return target, foundindex
[ "def", "getalignedtarget", "(", "self", ",", "index", ")", ":", "targetindices", "=", "[", "]", "target", "=", "None", "foundindex", "=", "-", "1", "for", "sourceindex", ",", "targetindex", "in", "self", ".", "alignment", ":", "if", "sourceindex", "==", ...
Returns target range only if source index aligns to a single consecutive range of target tokens.
[ "Returns", "target", "range", "only", "if", "source", "index", "aligns", "to", "a", "single", "consecutive", "range", "of", "target", "tokens", "." ]
python
train
brainiak/brainiak
brainiak/factoranalysis/tfa.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L309-L326
def get_map_offset(self): """Compute offset of prior/posterior Returns ------- map_offest : 1D array The offset to different fields in prior/posterior """ nfield = 4 self.map_offset = np.zeros(nfield).astype(int) field_size = self.K * np.array([self.n_dim, 1, self.cov_vec_size, 1]) for i in np.arange(nfield - 1) + 1: self.map_offset[i] = self.map_offset[i - 1] + field_size[i - 1] return self.map_offset
[ "def", "get_map_offset", "(", "self", ")", ":", "nfield", "=", "4", "self", ".", "map_offset", "=", "np", ".", "zeros", "(", "nfield", ")", ".", "astype", "(", "int", ")", "field_size", "=", "self", ".", "K", "*", "np", ".", "array", "(", "[", "s...
Compute offset of prior/posterior Returns ------- map_offest : 1D array The offset to different fields in prior/posterior
[ "Compute", "offset", "of", "prior", "/", "posterior" ]
python
train
googleapis/google-cloud-python
trace/google/cloud/trace/client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/trace/google/cloud/trace/client.py#L103-L226
def create_span( self, name, span_id, display_name, start_time, end_time, parent_span_id=None, attributes=None, stack_trace=None, time_events=None, links=None, status=None, same_process_as_parent_span=None, child_span_count=None, retry=None, timeout=None, ): """ Creates a new Span. Example: >>> from google.cloud import trace_v2 >>> >>> client = trace_v2.Client() >>> >>> name = 'projects/{project}/traces/{trace_id}/spans/{span_id}'. format('[PROJECT]', '[TRACE_ID]', '[SPAN_ID]') >>> span_id = '[SPAN_ID]' >>> display_name = {} >>> start_time = {} >>> end_time = {} >>> >>> response = client.create_span(name, span_id, display_name, start_time, end_time) Args: name (str): The resource name of the span in the following format: :: projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID] [TRACE_ID] is a unique identifier for a trace within a project. [SPAN_ID] is a unique identifier for a span within a trace, assigned when the span is created. span_id (str): The [SPAN_ID] portion of the span's resource name. The ID is a 16-character hexadecimal encoding of an 8-byte array. display_name (dict): A description of the span's operation (up to 128 bytes). Stackdriver Trace displays the description in the {% dynamic print site_values.console_name %}. For example, the display name can be a qualified method name or a file name and a line number where the operation is called. A best practice is to use the same display name within an application and at the same call point. This makes it easier to correlate spans in different traces. Contains two fields, value is the truncated name, truncatedByteCount is the number of bytes removed from the original string. If 0, then the string was not shortened. start_time (:class:`~datetime.datetime`): The start time of the span. On the client side, this is the time kept by the local machine where the span execution starts. On the server side, this is the time when the server's application handler starts running. end_time (:class:`~datetime.datetime`): The end time of the span. On the client side, this is the time kept by the local machine where the span execution ends. On the server side, this is the time when the server application handler stops running. parent_span_id (str): The [SPAN_ID] of this span's parent span. If this is a root span, then this field must be empty. attributes (dict): A set of attributes on the span. There is a limit of 32 attributes per span. stack_trace (dict): Stack trace captured at the start of the span. Contains two fields, stackFrames is a list of stack frames in this call stack, a maximum of 128 frames are allowed per StackFrame; stackTraceHashId is used to conserve network bandwidth for duplicate stack traces within a single trace. time_events (dict): The included time events. There can be up to 32 annotations and 128 message events per span. links (dict): A maximum of 128 links are allowed per Span. status (dict): An optional final status for this span. same_process_as_parent_span (bool): A highly recommended but not required flag that identifies when a trace crosses a process boundary. True when the parent_span belongs to the same process as the current span. child_span_count (int): An optional number of child spans that were generated while this span was active. If set, allows implementation to detect missing child spans. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. Returns: A :class:`~google.cloud.trace_v2.types.Span` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ return self.trace_api.create_span( name=name, span_id=span_id, display_name=display_name, start_time=start_time, end_time=end_time, parent_span_id=parent_span_id, attributes=attributes, stack_trace=stack_trace, time_events=time_events, links=links, status=status, same_process_as_parent_span=same_process_as_parent_span, child_span_count=child_span_count, )
[ "def", "create_span", "(", "self", ",", "name", ",", "span_id", ",", "display_name", ",", "start_time", ",", "end_time", ",", "parent_span_id", "=", "None", ",", "attributes", "=", "None", ",", "stack_trace", "=", "None", ",", "time_events", "=", "None", "...
Creates a new Span. Example: >>> from google.cloud import trace_v2 >>> >>> client = trace_v2.Client() >>> >>> name = 'projects/{project}/traces/{trace_id}/spans/{span_id}'. format('[PROJECT]', '[TRACE_ID]', '[SPAN_ID]') >>> span_id = '[SPAN_ID]' >>> display_name = {} >>> start_time = {} >>> end_time = {} >>> >>> response = client.create_span(name, span_id, display_name, start_time, end_time) Args: name (str): The resource name of the span in the following format: :: projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID] [TRACE_ID] is a unique identifier for a trace within a project. [SPAN_ID] is a unique identifier for a span within a trace, assigned when the span is created. span_id (str): The [SPAN_ID] portion of the span's resource name. The ID is a 16-character hexadecimal encoding of an 8-byte array. display_name (dict): A description of the span's operation (up to 128 bytes). Stackdriver Trace displays the description in the {% dynamic print site_values.console_name %}. For example, the display name can be a qualified method name or a file name and a line number where the operation is called. A best practice is to use the same display name within an application and at the same call point. This makes it easier to correlate spans in different traces. Contains two fields, value is the truncated name, truncatedByteCount is the number of bytes removed from the original string. If 0, then the string was not shortened. start_time (:class:`~datetime.datetime`): The start time of the span. On the client side, this is the time kept by the local machine where the span execution starts. On the server side, this is the time when the server's application handler starts running. end_time (:class:`~datetime.datetime`): The end time of the span. On the client side, this is the time kept by the local machine where the span execution ends. On the server side, this is the time when the server application handler stops running. parent_span_id (str): The [SPAN_ID] of this span's parent span. If this is a root span, then this field must be empty. attributes (dict): A set of attributes on the span. There is a limit of 32 attributes per span. stack_trace (dict): Stack trace captured at the start of the span. Contains two fields, stackFrames is a list of stack frames in this call stack, a maximum of 128 frames are allowed per StackFrame; stackTraceHashId is used to conserve network bandwidth for duplicate stack traces within a single trace. time_events (dict): The included time events. There can be up to 32 annotations and 128 message events per span. links (dict): A maximum of 128 links are allowed per Span. status (dict): An optional final status for this span. same_process_as_parent_span (bool): A highly recommended but not required flag that identifies when a trace crosses a process boundary. True when the parent_span belongs to the same process as the current span. child_span_count (int): An optional number of child spans that were generated while this span was active. If set, allows implementation to detect missing child spans. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. Returns: A :class:`~google.cloud.trace_v2.types.Span` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Creates", "a", "new", "Span", "." ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L1577-L1599
def cposr(string, chars, start): """ Find the first occurrence in a string of a character belonging to a collection of characters, starting at a specified location, searching in reverse. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/cposr_c.html :param string: Any character string. :type string: str :param chars: A collection of characters. :type chars: str :param start: Position to begin looking for one of chars. :type start: int :return: The index of the last character of str at or before index start that is in the collection chars. :rtype: int """ string = stypes.stringToCharP(string) chars = stypes.stringToCharP(chars) start = ctypes.c_int(start) return libspice.cposr_c(string, chars, start)
[ "def", "cposr", "(", "string", ",", "chars", ",", "start", ")", ":", "string", "=", "stypes", ".", "stringToCharP", "(", "string", ")", "chars", "=", "stypes", ".", "stringToCharP", "(", "chars", ")", "start", "=", "ctypes", ".", "c_int", "(", "start",...
Find the first occurrence in a string of a character belonging to a collection of characters, starting at a specified location, searching in reverse. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/cposr_c.html :param string: Any character string. :type string: str :param chars: A collection of characters. :type chars: str :param start: Position to begin looking for one of chars. :type start: int :return: The index of the last character of str at or before index start that is in the collection chars. :rtype: int
[ "Find", "the", "first", "occurrence", "in", "a", "string", "of", "a", "character", "belonging", "to", "a", "collection", "of", "characters", "starting", "at", "a", "specified", "location", "searching", "in", "reverse", "." ]
python
train
CalebBell/thermo
thermo/utils.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/utils.py#L3368-L3403
def calculate_derivative_P(self, P, T, zs, ws, method, order=1): r'''Method to calculate a derivative of a mixture property with respect to pressure at constant temperature and composition of a given order using a specified method. Uses SciPy's derivative function, with a delta of 0.01 Pa and a number of points equal to 2*order + 1. This method can be overwritten by subclasses who may perfer to add analytical methods for some or all methods as this is much faster. If the calculation does not succeed, returns the actual error encountered. Parameters ---------- P : float Pressure at which to calculate the derivative, [Pa] T : float Temperature at which to calculate the derivative, [K] zs : list[float] Mole fractions of all species in the mixture, [-] ws : list[float] Weight fractions of all species in the mixture, [-] method : str Method for which to find the derivative order : int Order of the derivative, >= 1 Returns ------- d_prop_d_P_at_T : float Calculated derivative property at constant temperature, [`units/Pa^order`] ''' f = lambda P: self.calculate(T, P, zs, ws, method) return derivative(f, P, dx=1e-2, n=order, order=1+order*2)
[ "def", "calculate_derivative_P", "(", "self", ",", "P", ",", "T", ",", "zs", ",", "ws", ",", "method", ",", "order", "=", "1", ")", ":", "f", "=", "lambda", "P", ":", "self", ".", "calculate", "(", "T", ",", "P", ",", "zs", ",", "ws", ",", "m...
r'''Method to calculate a derivative of a mixture property with respect to pressure at constant temperature and composition of a given order using a specified method. Uses SciPy's derivative function, with a delta of 0.01 Pa and a number of points equal to 2*order + 1. This method can be overwritten by subclasses who may perfer to add analytical methods for some or all methods as this is much faster. If the calculation does not succeed, returns the actual error encountered. Parameters ---------- P : float Pressure at which to calculate the derivative, [Pa] T : float Temperature at which to calculate the derivative, [K] zs : list[float] Mole fractions of all species in the mixture, [-] ws : list[float] Weight fractions of all species in the mixture, [-] method : str Method for which to find the derivative order : int Order of the derivative, >= 1 Returns ------- d_prop_d_P_at_T : float Calculated derivative property at constant temperature, [`units/Pa^order`]
[ "r", "Method", "to", "calculate", "a", "derivative", "of", "a", "mixture", "property", "with", "respect", "to", "pressure", "at", "constant", "temperature", "and", "composition", "of", "a", "given", "order", "using", "a", "specified", "method", ".", "Uses", ...
python
valid
pandas-dev/pandas
pandas/io/formats/style.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L1217-L1243
def from_custom_template(cls, searchpath, name): """ Factory function for creating a subclass of ``Styler`` with a custom template and Jinja environment. Parameters ---------- searchpath : str or list Path or paths of directories containing the templates name : str Name of your custom template to use for rendering Returns ------- MyStyler : subclass of Styler Has the correct ``env`` and ``template`` class attributes set. """ loader = ChoiceLoader([ FileSystemLoader(searchpath), cls.loader, ]) class MyStyler(cls): env = Environment(loader=loader) template = env.get_template(name) return MyStyler
[ "def", "from_custom_template", "(", "cls", ",", "searchpath", ",", "name", ")", ":", "loader", "=", "ChoiceLoader", "(", "[", "FileSystemLoader", "(", "searchpath", ")", ",", "cls", ".", "loader", ",", "]", ")", "class", "MyStyler", "(", "cls", ")", ":",...
Factory function for creating a subclass of ``Styler`` with a custom template and Jinja environment. Parameters ---------- searchpath : str or list Path or paths of directories containing the templates name : str Name of your custom template to use for rendering Returns ------- MyStyler : subclass of Styler Has the correct ``env`` and ``template`` class attributes set.
[ "Factory", "function", "for", "creating", "a", "subclass", "of", "Styler", "with", "a", "custom", "template", "and", "Jinja", "environment", "." ]
python
train
alejandroautalan/pygubu
pygubudesigner/previewer.py
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubudesigner/previewer.py#L453-L464
def _over_resizer(self, x, y): "Returns True if mouse is over a resizer" over_resizer = False c = self.canvas ids = c.find_overlapping(x, y, x, y) if ids: o = ids[0] tags = c.gettags(o) if 'resizer' in tags: over_resizer = True return over_resizer
[ "def", "_over_resizer", "(", "self", ",", "x", ",", "y", ")", ":", "over_resizer", "=", "False", "c", "=", "self", ".", "canvas", "ids", "=", "c", ".", "find_overlapping", "(", "x", ",", "y", ",", "x", ",", "y", ")", "if", "ids", ":", "o", "=",...
Returns True if mouse is over a resizer
[ "Returns", "True", "if", "mouse", "is", "over", "a", "resizer" ]
python
train
stanfordnlp/stanza
stanza/research/iterators.py
https://github.com/stanfordnlp/stanza/blob/920c55d8eaa1e7105971059c66eb448a74c100d6/stanza/research/iterators.py#L47-L76
def gen_batches(iterable, batch_size): ''' Returns a generator object that yields batches from `iterable`. See `iter_batches` for more details and caveats. Note that `iter_batches` returns an iterator, which never supports `len()`, `gen_batches` returns an iterable which supports `len()` if and only if `iterable` does. This *may* be an iterator, but could be a `SizedGenerator` object. To obtain an iterator (for example, to use the `next()` function), call `iter()` on this iterable. >>> batches = gen_batches('abcdefghijkl', batch_size=5) >>> len(batches) 3 >>> for batch in batches: ... print(list(batch)) ['a', 'b', 'c', 'd', 'e'] ['f', 'g', 'h', 'i', 'j'] ['k', 'l'] ''' def batches_thunk(): return iter_batches(iterable, batch_size) try: length = len(iterable) except TypeError: return batches_thunk() num_batches = (length - 1) // batch_size + 1 return SizedGenerator(batches_thunk, length=num_batches)
[ "def", "gen_batches", "(", "iterable", ",", "batch_size", ")", ":", "def", "batches_thunk", "(", ")", ":", "return", "iter_batches", "(", "iterable", ",", "batch_size", ")", "try", ":", "length", "=", "len", "(", "iterable", ")", "except", "TypeError", ":"...
Returns a generator object that yields batches from `iterable`. See `iter_batches` for more details and caveats. Note that `iter_batches` returns an iterator, which never supports `len()`, `gen_batches` returns an iterable which supports `len()` if and only if `iterable` does. This *may* be an iterator, but could be a `SizedGenerator` object. To obtain an iterator (for example, to use the `next()` function), call `iter()` on this iterable. >>> batches = gen_batches('abcdefghijkl', batch_size=5) >>> len(batches) 3 >>> for batch in batches: ... print(list(batch)) ['a', 'b', 'c', 'd', 'e'] ['f', 'g', 'h', 'i', 'j'] ['k', 'l']
[ "Returns", "a", "generator", "object", "that", "yields", "batches", "from", "iterable", ".", "See", "iter_batches", "for", "more", "details", "and", "caveats", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/library_tree.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/library_tree.py#L338-L398
def menu_item_remove_libraries_or_root_clicked(self, menu_item): """Removes library from hard drive after request second confirmation""" menu_item_text = self.get_menu_item_text(menu_item) logger.info("Delete item '{0}' pressed.".format(menu_item_text)) model, path = self.view.get_selection().get_selected() if path: # Second confirmation to delete library tree_m_row = self.tree_store[path] library_os_path, library_path, library_name, item_key = self.extract_library_properties_from_selected_row() # assert isinstance(tree_m_row[self.ITEM_STORAGE_ID], str) library_file_system_path = library_os_path if "root" in menu_item_text: button_texts = [menu_item_text + "from tree and config", "Cancel"] partial_message = "This will remove the library root from your configuration (config.yaml)." else: button_texts = [menu_item_text, "Cancel"] partial_message = "This folder will be removed from hard drive! You really wanna do that?" message_string = "You choose to {2} with " \ "\n\nlibrary tree path: {0}" \ "\n\nphysical path: {1}.\n\n\n"\ "{3}" \ "".format(os.path.join(self.convert_if_human_readable(tree_m_row[self.LIB_PATH_STORAGE_ID]), item_key), library_file_system_path, menu_item_text.lower(), partial_message) width = 8*len("physical path: " + library_file_system_path) dialog = RAFCONButtonDialog(message_string, button_texts, message_type=Gtk.MessageType.QUESTION, parent=self.get_root_window(), width=min(width, 1400)) response_id = dialog.run() dialog.destroy() if response_id == 1: if "root" in menu_item_text: logger.info("Remove library root key '{0}' from config.".format(item_key)) from rafcon.gui.singleton import global_config library_paths = global_config.get_config_value('LIBRARY_PATHS') del library_paths[tree_m_row[self.LIB_KEY_STORAGE_ID]] global_config.save_configuration() self.model.library_manager.refresh_libraries() elif "libraries" in menu_item_text: logger.debug("Remove of all libraries in {} is triggered.".format(library_os_path)) import shutil shutil.rmtree(library_os_path) self.model.library_manager.refresh_libraries() else: logger.debug("Remove of Library {} is triggered.".format(library_os_path)) self.model.library_manager.remove_library_from_file_system(library_path, library_name) elif response_id in [2, -4]: pass else: logger.warning("Response id: {} is not considered".format(response_id)) return True return False
[ "def", "menu_item_remove_libraries_or_root_clicked", "(", "self", ",", "menu_item", ")", ":", "menu_item_text", "=", "self", ".", "get_menu_item_text", "(", "menu_item", ")", "logger", ".", "info", "(", "\"Delete item '{0}' pressed.\"", ".", "format", "(", "menu_item_...
Removes library from hard drive after request second confirmation
[ "Removes", "library", "from", "hard", "drive", "after", "request", "second", "confirmation" ]
python
train
ModisWorks/modis
modis/discord_modis/modules/help/on_message.py
https://github.com/ModisWorks/modis/blob/1f1225c9841835ec1d1831fc196306527567db8b/modis/discord_modis/modules/help/on_message.py#L8-L59
async def on_message(message): """The on_message event handler for this module Args: message (discord.Message): Input message """ # Simplify message info server = message.server author = message.author channel = message.channel content = message.content data = datatools.get_data() # Only reply to server messages and don't reply to myself if server is not None and author != channel.server.me: # Commands section prefix = data["discord"]["servers"][server.id]["prefix"] if content.startswith(prefix): # Parse message package = content.split(" ") command = package[0][len(prefix):] args = package[1:] arg = ' '.join(args) # Commands if command == 'help': if args: # Parse message datapacks = api_help.get_help_datapacks(arg, prefix) # Create embed UI if datapacks: await client.send_typing(channel) embed = ui_embed.success(channel, arg, datapacks) try: await embed.send() except discord.errors.HTTPException: embed = ui_embed.http_exception(channel, arg) await embed.send() else: # Parse message datapacks = api_help.get_help_commands(prefix) # Create embed UI if datapacks: await client.send_typing(channel) embed = ui_embed.success(channel, arg, datapacks) try: await embed.send() except discord.errors.HTTPException: embed = ui_embed.http_exception(channel, arg) await embed.send()
[ "async", "def", "on_message", "(", "message", ")", ":", "# Simplify message info", "server", "=", "message", ".", "server", "author", "=", "message", ".", "author", "channel", "=", "message", ".", "channel", "content", "=", "message", ".", "content", "data", ...
The on_message event handler for this module Args: message (discord.Message): Input message
[ "The", "on_message", "event", "handler", "for", "this", "module" ]
python
train
pypyr/pypyr-cli
pypyr/dsl.py
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L538-L578
def retry_loop(self, context, step_method): """Run step inside a retry loop. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context. step_method: (method/function) This is the method/function that will execute on every loop iteration. Signature is: function(context) """ logger.debug("starting") context['retryCounter'] = 0 sleep = context.get_formatted_as_type(self.sleep, out_type=float) if self.max: max = context.get_formatted_as_type(self.max, out_type=int) logger.info(f"retry decorator will try {max} times at {sleep}s " "intervals.") else: max = None logger.info(f"retry decorator will try indefinitely at {sleep}s " "intervals.") # this will never be false. because on counter == max, # exec_iteration raises an exception, breaking out of the loop. # pragma because cov doesn't know the implied else is impossible. # unit test cov is 100%, though. if poll.while_until_true(interval=sleep, max_attempts=max)( self.exec_iteration)(context=context, step_method=step_method ): # pragma: no cover logger.debug("retry loop complete, reporting success.") logger.debug("retry loop done") logger.debug("done")
[ "def", "retry_loop", "(", "self", ",", "context", ",", "step_method", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "context", "[", "'retryCounter'", "]", "=", "0", "sleep", "=", "context", ".", "get_formatted_as_type", "(", "self", ".", "sle...
Run step inside a retry loop. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context. step_method: (method/function) This is the method/function that will execute on every loop iteration. Signature is: function(context)
[ "Run", "step", "inside", "a", "retry", "loop", "." ]
python
train
nvbn/thefuck
thefuck/rules/brew_unknown_command.py
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/rules/brew_unknown_command.py#L21-L41
def _get_brew_tap_specific_commands(brew_path_prefix): """To get tap's specific commands https://github.com/Homebrew/homebrew/blob/master/Library/brew.rb#L115""" commands = [] brew_taps_path = brew_path_prefix + TAP_PATH for user in _get_directory_names_only(brew_taps_path): taps = _get_directory_names_only(brew_taps_path + '/%s' % user) # Brew Taps's naming rule # https://github.com/Homebrew/homebrew/blob/master/share/doc/homebrew/brew-tap.md#naming-conventions-and-limitations taps = (tap for tap in taps if tap.startswith('homebrew-')) for tap in taps: tap_cmd_path = brew_taps_path + TAP_CMD_PATH % (user, tap) if os.path.isdir(tap_cmd_path): commands += (name.replace('brew-', '').replace('.rb', '') for name in os.listdir(tap_cmd_path) if _is_brew_tap_cmd_naming(name)) return commands
[ "def", "_get_brew_tap_specific_commands", "(", "brew_path_prefix", ")", ":", "commands", "=", "[", "]", "brew_taps_path", "=", "brew_path_prefix", "+", "TAP_PATH", "for", "user", "in", "_get_directory_names_only", "(", "brew_taps_path", ")", ":", "taps", "=", "_get_...
To get tap's specific commands https://github.com/Homebrew/homebrew/blob/master/Library/brew.rb#L115
[ "To", "get", "tap", "s", "specific", "commands", "https", ":", "//", "github", ".", "com", "/", "Homebrew", "/", "homebrew", "/", "blob", "/", "master", "/", "Library", "/", "brew", ".", "rb#L115" ]
python
train
jtwhite79/pyemu
pyemu/logger.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/logger.py#L94-L108
def warn(self,message): """write a warning to the log file. Parameters ---------- message : str the warning text """ s = str(datetime.now()) + " WARNING: " + message + '\n' if self.echo: print(s,end='') if self.filename: self.f.write(s) self.f.flush warnings.warn(s,PyemuWarning)
[ "def", "warn", "(", "self", ",", "message", ")", ":", "s", "=", "str", "(", "datetime", ".", "now", "(", ")", ")", "+", "\" WARNING: \"", "+", "message", "+", "'\\n'", "if", "self", ".", "echo", ":", "print", "(", "s", ",", "end", "=", "''", ")...
write a warning to the log file. Parameters ---------- message : str the warning text
[ "write", "a", "warning", "to", "the", "log", "file", "." ]
python
train
olitheolix/qtmacs
qtmacs/extensions/qtmacsscintilla_macros.py
https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/extensions/qtmacsscintilla_macros.py#L1271-L1286
def reverseCommit(self): """ Re-insert the previously deleted line. """ if self.markerPos is None: return # Remove the specified string from the same position in every line # in between the mark and the cursor (inclusive). col = min((self.markerPos[1], self.cursorPos[1])) for line in range(self.markerPos[0], self.cursorPos[0] + 1): self.qteWidget.setSelection(line, col, line, col + len(self.text)) self.baseClass.removeSelectedText() self.qteWidget.setCursorPosition(*self.cursorPos)
[ "def", "reverseCommit", "(", "self", ")", ":", "if", "self", ".", "markerPos", "is", "None", ":", "return", "# Remove the specified string from the same position in every line", "# in between the mark and the cursor (inclusive).", "col", "=", "min", "(", "(", "self", ".",...
Re-insert the previously deleted line.
[ "Re", "-", "insert", "the", "previously", "deleted", "line", "." ]
python
train
mdiener/grace
grace/py27/slimit/parser.py
https://github.com/mdiener/grace/blob/2dab13a2cf636da5da989904c5885166fc94d36d/grace/py27/slimit/parser.py#L334-L348
def p_member_expr(self, p): """member_expr : primary_expr | function_expr | member_expr LBRACKET expr RBRACKET | member_expr PERIOD identifier | NEW member_expr arguments """ if len(p) == 2: p[0] = p[1] elif p[1] == 'new': p[0] = ast.NewExpr(p[2], p[3]) elif p[2] == '.': p[0] = ast.DotAccessor(p[1], p[3]) else: p[0] = ast.BracketAccessor(p[1], p[3])
[ "def", "p_member_expr", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "2", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "elif", "p", "[", "1", "]", "==", "'new'", ":", "p", "[", "0", "]", "=", "ast", ".", "NewE...
member_expr : primary_expr | function_expr | member_expr LBRACKET expr RBRACKET | member_expr PERIOD identifier | NEW member_expr arguments
[ "member_expr", ":", "primary_expr", "|", "function_expr", "|", "member_expr", "LBRACKET", "expr", "RBRACKET", "|", "member_expr", "PERIOD", "identifier", "|", "NEW", "member_expr", "arguments" ]
python
train
googleapis/google-cloud-python
pubsub/google/cloud/pubsub_v1/subscriber/message.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/pubsub/google/cloud/pubsub_v1/subscriber/message.py#L147-L157
def publish_time(self): """Return the time that the message was originally published. Returns: datetime: The date and time that the message was published. """ timestamp = self._message.publish_time delta = datetime.timedelta( seconds=timestamp.seconds, microseconds=timestamp.nanos // 1000 ) return datetime_helpers._UTC_EPOCH + delta
[ "def", "publish_time", "(", "self", ")", ":", "timestamp", "=", "self", ".", "_message", ".", "publish_time", "delta", "=", "datetime", ".", "timedelta", "(", "seconds", "=", "timestamp", ".", "seconds", ",", "microseconds", "=", "timestamp", ".", "nanos", ...
Return the time that the message was originally published. Returns: datetime: The date and time that the message was published.
[ "Return", "the", "time", "that", "the", "message", "was", "originally", "published", "." ]
python
train
mikicz/arca
arca/backend/docker.py
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L665-L684
def start_container(self, image, container_name: str, repo_path: Path): """ Starts a container with the image and name ``container_name`` and copies the repository into the container. :type image: docker.models.images.Image :rtype: docker.models.container.Container """ command = "bash -i" if self.inherit_image: command = "sh -i" container = self.client.containers.run(image, command=command, detach=True, tty=True, name=container_name, working_dir=str((Path("/srv/data") / self.cwd).resolve()), auto_remove=True) container.exec_run(["mkdir", "-p", "/srv/scripts"]) container.put_archive("/srv", self.tar_files(repo_path)) container.put_archive("/srv/scripts", self.tar_runner()) return container
[ "def", "start_container", "(", "self", ",", "image", ",", "container_name", ":", "str", ",", "repo_path", ":", "Path", ")", ":", "command", "=", "\"bash -i\"", "if", "self", ".", "inherit_image", ":", "command", "=", "\"sh -i\"", "container", "=", "self", ...
Starts a container with the image and name ``container_name`` and copies the repository into the container. :type image: docker.models.images.Image :rtype: docker.models.container.Container
[ "Starts", "a", "container", "with", "the", "image", "and", "name", "container_name", "and", "copies", "the", "repository", "into", "the", "container", "." ]
python
train
minimind/dispatch-on-value-for-python
dispatchonvalue/dispatchonvalue.py
https://github.com/minimind/dispatch-on-value-for-python/blob/87e510ee00948854752dfca65264a3eefda365ca/dispatchonvalue/dispatchonvalue.py#L105-L119
def dispatch_strict(self, stream, *args, **kwargs): """ Dispatch to function held internally depending upon the value of stream. Matching on directories is strict. This means dictionaries will match if they are exactly the same. """ for f, pat in self.functions: matched, matched_stream = self._match(stream, pat, {'strict': True}, {}) if matched: return f(matched_stream, *args, **kwargs) raise DispatchFailed()
[ "def", "dispatch_strict", "(", "self", ",", "stream", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "f", ",", "pat", "in", "self", ".", "functions", ":", "matched", ",", "matched_stream", "=", "self", ".", "_match", "(", "stream", ",", ...
Dispatch to function held internally depending upon the value of stream. Matching on directories is strict. This means dictionaries will match if they are exactly the same.
[ "Dispatch", "to", "function", "held", "internally", "depending", "upon", "the", "value", "of", "stream", "." ]
python
train
saltstack/salt
salt/utils/minions.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/minions.py#L345-L353
def _check_pillar_pcre_minions(self, expr, delimiter, greedy): ''' Return the minions found by looking via pillar with PCRE ''' return self._check_cache_minions(expr, delimiter, greedy, 'pillar', regex_match=True)
[ "def", "_check_pillar_pcre_minions", "(", "self", ",", "expr", ",", "delimiter", ",", "greedy", ")", ":", "return", "self", ".", "_check_cache_minions", "(", "expr", ",", "delimiter", ",", "greedy", ",", "'pillar'", ",", "regex_match", "=", "True", ")" ]
Return the minions found by looking via pillar with PCRE
[ "Return", "the", "minions", "found", "by", "looking", "via", "pillar", "with", "PCRE" ]
python
train
dossier/dossier.models
dossier/models/web/routes.py
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/web/routes.py#L397-L689
def v1_highlights_post(request, response, kvlclient, tfidf, min_delay=3, task_master=None): '''Obtain highlights for a document POSTed inside a JSON object. Get our Diffeo Highlighter browser extension here: https://chrome.google.com/webstore/detail/jgfcplgdmjkdepnmbdkmgohaldaiplpo While you're at it, pre-register for a beta account on http://diffeo.com. `min_delay` and `task_master` are used by tests. The route for this endpoint is: ``POST /dossier/v1/highlights``. The expected input structure is a JSON encoded string of an object with these keys: .. code-block:: javascript { // only text/html is supported at this time; hopefully PDF.js // enables this to support PDF rendering too. "content-type": "text/html", // URL of the page (after resolving all redirects) "content-location": "http://...", // If provided by the original host, this will be populated, // otherwise it is empty. "last-modified": "datetime string or empty string", // Boolean indicating whether the content may be stored by the // server. If set to `false`, then server must respond // synchronously with a newly computed response payload, and // must purge any stored copies of this `content-location`. // If `true`, server may respond with `state` of `pending`. "store": false, // full page contents obtained by Javascript in the browser // extension accessing `document.documentElement.innerHTML`. // This must be UTF-8 encoded. // N.B. This needs experimentation to figure out whether the // browser will always encode this as Unicode. "body": "... the body content ...", } The output structure is a JSON UTF-8 encoded string of an object with these keys: .. code-block:: javascript { "highlights": [Highlight, Highlight, ...], "state": State, "id": StoreID, "delay": 10.0, "error": Error } where a `State` is one of these strings: `completed`, `stored`, `pending`, or `error`. The `StoreID` is an opaque string computed by the backend that the client can use to poll this end point with `GET` requests for a `pending` request. The `delay` value is a number of seconds that the client should wait before beginning polling, e.g. ten seconds. An `Error` object has this structure: .. code-block:: javascript { // Error codes are (0, wrong content type), (1, empty body), // (2, JSON decode error), (3, payload structure incorrect), // (4, payload missing required keys), (5, invalid // content-location), (6, too small body content), (7, // internal error), (8, internal time out), (9, file_id does // not exist) "code": 0, "message": "wrong content_type" } A `Highlight` object has this structure: .. code-block:: javascript { // float in the range [0, 1] "score": 0.7 // a string presented with a check box inside the options // bubble when the user clicks the extension icon to choose // which categories of highlights should be displayed. "category": "Organization", // `queries` are strings that are to be presented as // suggestions to the user, and the extension enables the user // to click any of the configured search engines to see // results for a selected query string. "queries": [], // zero or more strings to match in the document and highlight // with a single color. "strings": [], // zero or more xpath highlight objects to lookup in the document // and highlight with a single color. "xranges": [], // zero or more Regex objects to compile and // execute to find spans to highlight with a single color. "regexes": [] } where a Regex object is: .. code-block:: javascript { "regex": "...", // e.g., "[0-9]" "flags": "..." // e.g., "i" for case insensitive } where an xpath highlight object is: .. code-block:: javascript { "range": XPathRange } where an XpathRange object is: .. code-block:: javascript { "start": XPathOffset, "end": XPathOffset } where an XpathOffset object is: .. code-block:: javascript { "node": "/html[1]/body[1]/p[1]/text()[2]", "idx": 4, } All of the `strings`, `ranges`, and `regexes` in a `Highlight` object should be given the same highlight color. A `Highlight` object can provide values in any of the three `strings`, `ranges`, or `regexes` lists, and all should be highlighted. ''' tfidf = tfidf or None content_type = request.headers.get('content-type', '') if not content_type.startswith('application/json'): logger.critical('content-type=%r', content_type) response.status = 415 return { 'state': ERROR, 'error': { 'code': 0, 'message': 'content_type=%r and should be ' 'application/json' % content_type, }, } body = request.body.read() if len(body) == 0: response.status = 400 return { 'state': ERROR, 'error': {'code': 1, 'message': 'empty body'} } try: data = json.loads(body.decode('utf-8')) except Exception, exc: response.status = 400 return { 'state': ERROR, 'error': { 'code': 2, 'message': 'failed to read JSON body: %s' % exc, }, } if not isinstance(data, dict): response.status = 400 return { 'state': ERROR, 'error': { 'code': 3, 'message': 'JSON request payload deserialized to' ' other than an object: %r' % type(data), }, } expected_keys = set([ 'content-type', 'content-location', 'last-modified', 'body', 'store', ]) if set(data.keys()) != expected_keys: response.status = 400 return { 'state': ERROR, 'error': { 'code': 4, 'message': 'other than expected keys in JSON object. ' 'Expected %r and received %r' % (sorted(expected_keys), sorted(data.keys())), }, } if len(data['content-location']) < 3: response.status = 400 return { 'state': ERROR, 'error': { 'code': 5, 'message': 'received invalid content-location=%r' % data['content-location'], }, } if len(data['body']) < 3: response.status = 400 return { 'state': ERROR, 'error': { 'code': 6, 'message': 'received too little body=%r' % data['body'], }, } if data['last-modified']: try: last_modified = int(datetime.datetime(*eut.parsedate(data['last-modified'])[:6]).strftime('%s')) except Exception, exc: logger.info('failed to parse last-modified=%r', data['last-modified']) last_modified = 0 else: last_modified = 0 doc_id = md5(data['content-location']).hexdigest() content_hash = Nilsimsa(data['body']).hexdigest() file_id = (doc_id, last_modified, content_hash) file_id_str = '%s-%d-%s' % file_id kvlclient.setup_namespace(highlights_kvlayer_tables) if data['store'] is False: kvlclient.delete('files', (file_id[0],)) kvlclient.delete('highlights', (file_id[0],)) logger.info('cleared all store records related to doc_id=%r', file_id[0]) else: # storing is allowed payload_strs = list(kvlclient.get('highlights', file_id)) if payload_strs and payload_strs[0][1]: payload_str = payload_strs[0][1] try: payload = json.loads(payload_str) except Exception, exc: logger.critical('failed to decode out of %r', payload_str, exc_info=True) if payload['state'] != ERROR: logger.info('returning stored payload for %r', file_id) return payload else: logger.info('previously stored data was an error so trying again') delay = len(data['body']) / 5000 # one second per 5KB if delay > min_delay: # store the data in `files` table kvlclient.put('files', (file_id, json.dumps(data))) payload = { 'state': HIGHLIGHTS_PENDING, 'id': file_id_str, 'delay': delay, 'start': time.time() } # store the payload, so that it gets returned during # polling until replaced by the work unit. payload_str = json.dumps(payload) kvlclient.put('highlights', (file_id, payload_str)) logger.info('launching highlights async work unit') if task_master is None: conf = yakonfig.get_global_config('coordinate') task_master = coordinate.TaskMaster(conf) task_master.add_work_units('highlights', [(file_id_str, {})]) return payload return maybe_store_highlights(file_id, data, tfidf, kvlclient)
[ "def", "v1_highlights_post", "(", "request", ",", "response", ",", "kvlclient", ",", "tfidf", ",", "min_delay", "=", "3", ",", "task_master", "=", "None", ")", ":", "tfidf", "=", "tfidf", "or", "None", "content_type", "=", "request", ".", "headers", ".", ...
Obtain highlights for a document POSTed inside a JSON object. Get our Diffeo Highlighter browser extension here: https://chrome.google.com/webstore/detail/jgfcplgdmjkdepnmbdkmgohaldaiplpo While you're at it, pre-register for a beta account on http://diffeo.com. `min_delay` and `task_master` are used by tests. The route for this endpoint is: ``POST /dossier/v1/highlights``. The expected input structure is a JSON encoded string of an object with these keys: .. code-block:: javascript { // only text/html is supported at this time; hopefully PDF.js // enables this to support PDF rendering too. "content-type": "text/html", // URL of the page (after resolving all redirects) "content-location": "http://...", // If provided by the original host, this will be populated, // otherwise it is empty. "last-modified": "datetime string or empty string", // Boolean indicating whether the content may be stored by the // server. If set to `false`, then server must respond // synchronously with a newly computed response payload, and // must purge any stored copies of this `content-location`. // If `true`, server may respond with `state` of `pending`. "store": false, // full page contents obtained by Javascript in the browser // extension accessing `document.documentElement.innerHTML`. // This must be UTF-8 encoded. // N.B. This needs experimentation to figure out whether the // browser will always encode this as Unicode. "body": "... the body content ...", } The output structure is a JSON UTF-8 encoded string of an object with these keys: .. code-block:: javascript { "highlights": [Highlight, Highlight, ...], "state": State, "id": StoreID, "delay": 10.0, "error": Error } where a `State` is one of these strings: `completed`, `stored`, `pending`, or `error`. The `StoreID` is an opaque string computed by the backend that the client can use to poll this end point with `GET` requests for a `pending` request. The `delay` value is a number of seconds that the client should wait before beginning polling, e.g. ten seconds. An `Error` object has this structure: .. code-block:: javascript { // Error codes are (0, wrong content type), (1, empty body), // (2, JSON decode error), (3, payload structure incorrect), // (4, payload missing required keys), (5, invalid // content-location), (6, too small body content), (7, // internal error), (8, internal time out), (9, file_id does // not exist) "code": 0, "message": "wrong content_type" } A `Highlight` object has this structure: .. code-block:: javascript { // float in the range [0, 1] "score": 0.7 // a string presented with a check box inside the options // bubble when the user clicks the extension icon to choose // which categories of highlights should be displayed. "category": "Organization", // `queries` are strings that are to be presented as // suggestions to the user, and the extension enables the user // to click any of the configured search engines to see // results for a selected query string. "queries": [], // zero or more strings to match in the document and highlight // with a single color. "strings": [], // zero or more xpath highlight objects to lookup in the document // and highlight with a single color. "xranges": [], // zero or more Regex objects to compile and // execute to find spans to highlight with a single color. "regexes": [] } where a Regex object is: .. code-block:: javascript { "regex": "...", // e.g., "[0-9]" "flags": "..." // e.g., "i" for case insensitive } where an xpath highlight object is: .. code-block:: javascript { "range": XPathRange } where an XpathRange object is: .. code-block:: javascript { "start": XPathOffset, "end": XPathOffset } where an XpathOffset object is: .. code-block:: javascript { "node": "/html[1]/body[1]/p[1]/text()[2]", "idx": 4, } All of the `strings`, `ranges`, and `regexes` in a `Highlight` object should be given the same highlight color. A `Highlight` object can provide values in any of the three `strings`, `ranges`, or `regexes` lists, and all should be highlighted.
[ "Obtain", "highlights", "for", "a", "document", "POSTed", "inside", "a", "JSON", "object", "." ]
python
train
sods/ods
pods/datasets.py
https://github.com/sods/ods/blob/3995c659f25a0a640f6009ed7fcc2559ce659b1d/pods/datasets.py#L1321-L1343
def movie_body_count_r_classify(data_set='movie_body_count'): """Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by Simon Garnier and Randy Olson for exploring differences between Python and R.""" data = movie_body_count()['Y'] import pandas as pd import numpy as np X = data[['Year', 'Body_Count']] Y = data['MPAA_Rating']=='R' # set label to be positive for R rated films. # Create series of movie genres with the relevant index s = data['Genre'].str.split('|').apply(pd.Series, 1).stack() s.index = s.index.droplevel(-1) # to line up with df's index # Extract from the series the unique list of genres. genres = s.unique() # For each genre extract the indices where it is present and add a column to X for genre in genres: index = s[s==genre].index.tolist() values = pd.Series(np.zeros(X.shape[0]), index=X.index) values[index] = 1 X[genre] = values return data_details_return({'X': X, 'Y': Y, 'info' : "Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by Simon Garnier and Randy Olson for exploring differences between Python and R. In this variant we aim to classify whether the film is rated R or not depending on the genre, the years and the body count.", }, data_set)
[ "def", "movie_body_count_r_classify", "(", "data_set", "=", "'movie_body_count'", ")", ":", "data", "=", "movie_body_count", "(", ")", "[", "'Y'", "]", "import", "pandas", "as", "pd", "import", "numpy", "as", "np", "X", "=", "data", "[", "[", "'Year'", ","...
Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by Simon Garnier and Randy Olson for exploring differences between Python and R.
[ "Data", "set", "of", "movies", "and", "body", "count", "for", "movies", "scraped", "from", "www", ".", "MovieBodyCounts", ".", "com", "created", "by", "Simon", "Garnier", "and", "Randy", "Olson", "for", "exploring", "differences", "between", "Python", "and", ...
python
train
rchatterjee/pwmodels
src/pwmodel/models.py
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/models.py#L335-L353
def _gen_next(self, history): """Generate next character sampled from the distribution of characters next. """ orig_history = history if not history: return helper.START history = history[-(self._n-1):] kv = [(k, v) for k, v in self._T.items(history) if k not in reserved_words] total = sum(v for k, v in kv) while total == 0 and len(history) > 0: history = history[1:] kv = [(k, v) for k, v in self._T.items(history) if k not in reserved_words] total = sum(v for k, v in kv) assert total > 0, "Sorry there is no n-gram with {!r}".format(orig_history) _, sampled_k = list(helper.sample_following_dist(kv, 1, total))[0] # print(">>>", repr(sampled_k), len(history)) return sampled_k[len(history)]
[ "def", "_gen_next", "(", "self", ",", "history", ")", ":", "orig_history", "=", "history", "if", "not", "history", ":", "return", "helper", ".", "START", "history", "=", "history", "[", "-", "(", "self", ".", "_n", "-", "1", ")", ":", "]", "kv", "=...
Generate next character sampled from the distribution of characters next.
[ "Generate", "next", "character", "sampled", "from", "the", "distribution", "of", "characters", "next", "." ]
python
train
uralbash/pyramid_pages
pyramid_pages/routes.py
https://github.com/uralbash/pyramid_pages/blob/545b1ecb2e5dee5742135ba2a689b9635dd4efa1/pyramid_pages/routes.py#L31-L92
def page_factory(request): """ Page factory. Config models example: .. code-block:: python models = { '': [WebPage, CatalogResource], 'catalogue': CatalogResource, 'news': NewsResource, } """ prefix = request.matchdict['prefix'] # /{prefix}/page1/page2/page3... settings = request.registry.settings dbsession = settings[CONFIG_DBSESSION] config = settings[CONFIG_MODELS] if prefix not in config: # prepend {prefix} to *traverse request.matchdict['traverse'] =\ tuple([prefix] + list(request.matchdict['traverse'])) prefix = None # Get all resources and models from config with the same prefix. resources = config.get( prefix, config.get( # 1. get resources with prefix same as URL prefix '', config.get( # 2. if not, then try to get empty prefix '/', None))) # 3. else try to get prefix '/' otherwise None if not hasattr(resources, '__iter__'): resources = (resources, ) tree = {} if not resources: return tree # Add top level nodes of resources in the tree for resource in resources: table = None if not hasattr(resource, '__table__')\ and hasattr(resource, 'model'): table = resource.model else: table = resource if not hasattr(table, 'slug'): continue nodes = dbsession.query(table) if hasattr(table, 'parent_id'): nodes = nodes.filter(or_( table.parent_id == None, # noqa table.parent.has(table.slug == '/') )) for node in nodes: if not node.slug: continue resource = resource_of_node(resources, node) tree[node.slug] = resource(node, prefix=prefix) return tree
[ "def", "page_factory", "(", "request", ")", ":", "prefix", "=", "request", ".", "matchdict", "[", "'prefix'", "]", "# /{prefix}/page1/page2/page3...", "settings", "=", "request", ".", "registry", ".", "settings", "dbsession", "=", "settings", "[", "CONFIG_DBSESSIO...
Page factory. Config models example: .. code-block:: python models = { '': [WebPage, CatalogResource], 'catalogue': CatalogResource, 'news': NewsResource, }
[ "Page", "factory", "." ]
python
train
lorien/grab
grab/deprecated.py
https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/deprecated.py#L173-L185
def css_text(self, path, default=NULL, smart=False, normalize_space=True): """ Get normalized text of node which matches the css path. """ try: return get_node_text(self.css_one(path), smart=smart, normalize_space=normalize_space) except IndexError: if default is NULL: raise else: return default
[ "def", "css_text", "(", "self", ",", "path", ",", "default", "=", "NULL", ",", "smart", "=", "False", ",", "normalize_space", "=", "True", ")", ":", "try", ":", "return", "get_node_text", "(", "self", ".", "css_one", "(", "path", ")", ",", "smart", "...
Get normalized text of node which matches the css path.
[ "Get", "normalized", "text", "of", "node", "which", "matches", "the", "css", "path", "." ]
python
train
saltstack/salt
salt/modules/vsphere.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L3943-L4008
def create_dvs(dvs_dict, dvs_name, service_instance=None): ''' Creates a distributed virtual switch (DVS). Note: The ``dvs_name`` param will override any name set in ``dvs_dict``. dvs_dict Dict representation of the new DVS (example in salt.states.dvs) dvs_name Name of the DVS to be created. service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.create_dvs dvs dict=$dvs_dict dvs_name=dvs_name ''' log.trace('Creating dvs \'%s\' with dict = %s', dvs_name, dvs_dict) proxy_type = get_proxy_type() if proxy_type == 'esxdatacenter': datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] dc_ref = _get_proxy_target(service_instance) elif proxy_type == 'esxcluster': datacenter = __salt__['esxcluster.get_details']()['datacenter'] dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) # Make the name of the DVS consistent with the call dvs_dict['name'] = dvs_name # Build the config spec from the input dvs_create_spec = vim.DVSCreateSpec() dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec() _apply_dvs_config(dvs_create_spec.configSpec, dvs_dict) if dvs_dict.get('product_info'): dvs_create_spec.productInfo = vim.DistributedVirtualSwitchProductSpec() _apply_dvs_product_info(dvs_create_spec.productInfo, dvs_dict['product_info']) if dvs_dict.get('capability'): dvs_create_spec.capability = vim.DVSCapability() _apply_dvs_capability(dvs_create_spec.capability, dvs_dict['capability']) if dvs_dict.get('link_discovery_protocol'): dvs_create_spec.configSpec.linkDiscoveryProtocolConfig = \ vim.LinkDiscoveryProtocolConfig() _apply_dvs_link_discovery_protocol( dvs_create_spec.configSpec.linkDiscoveryProtocolConfig, dvs_dict['link_discovery_protocol']) if dvs_dict.get('infrastructure_traffic_resource_pools'): dvs_create_spec.configSpec.infrastructureTrafficResourceConfig = [] _apply_dvs_infrastructure_traffic_resources( dvs_create_spec.configSpec.infrastructureTrafficResourceConfig, dvs_dict['infrastructure_traffic_resource_pools']) log.trace('dvs_create_spec = %s', dvs_create_spec) salt.utils.vmware.create_dvs(dc_ref, dvs_name, dvs_create_spec) if 'network_resource_management_enabled' in dvs_dict: dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs_name]) if not dvs_refs: raise VMwareObjectRetrievalError( 'DVS \'{0}\' wasn\'t found in datacenter \'{1}\'' ''.format(dvs_name, datacenter)) dvs_ref = dvs_refs[0] salt.utils.vmware.set_dvs_network_resource_management_enabled( dvs_ref, dvs_dict['network_resource_management_enabled']) return True
[ "def", "create_dvs", "(", "dvs_dict", ",", "dvs_name", ",", "service_instance", "=", "None", ")", ":", "log", ".", "trace", "(", "'Creating dvs \\'%s\\' with dict = %s'", ",", "dvs_name", ",", "dvs_dict", ")", "proxy_type", "=", "get_proxy_type", "(", ")", "if",...
Creates a distributed virtual switch (DVS). Note: The ``dvs_name`` param will override any name set in ``dvs_dict``. dvs_dict Dict representation of the new DVS (example in salt.states.dvs) dvs_name Name of the DVS to be created. service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.create_dvs dvs dict=$dvs_dict dvs_name=dvs_name
[ "Creates", "a", "distributed", "virtual", "switch", "(", "DVS", ")", "." ]
python
train
ionelmc/python-cogen
cogen/magic/corolets.py
https://github.com/ionelmc/python-cogen/blob/83b0edb88425eba6e5bfda9f1dcd34642517e2a8/cogen/magic/corolets.py#L77-L142
def run_op(self, op, sched): """ Handle the operation: * if coro is in STATE_RUNNING, send or throw the given op * if coro is in STATE_NEED_INIT, call the init function and if it doesn't return a generator, set STATE_COMPLETED and set the result to whatever the function returned. * if StopIteration is raised, set STATE_COMPLETED and return self. * if any other exception is raised, set STATE_FAILED, handle error or send it to the caller, return self """ if op is self: import warnings warnings.warn("Running coro %s with itself. Something is fishy."%op) assert self.state < self.STATE_COMPLETED, \ "%s called with %s op %r, coroutine state (%s) should be less than %s!" % ( self, isinstance(op, CoroutineException) and op or (hasattr(op, 'state') and {0:'RUNNING', 1:'FINALIZED', 2:'ERRORED'}[op.state] or 'NOP'), op, self._state_names[self.state], self._state_names[self.STATE_COMPLETED] ) coroutines.ident = self try: if self.state == self.STATE_RUNNING: if self.debug: traceback.print_stack(self.coro.gr_frame) if isinstance(op, CoroutineException): rop = self.coro.throw(*op.args) else: rop = self.coro.switch(op and op.finalize(sched)) elif self.state == self.STATE_NEED_INIT: assert op is None rop = self.coro.switch(*self.f_args, **self.f_kws) self.state = self.STATE_RUNNING del self.f_args del self.f_kws else: return None except StopIteration, e: self.state = self.STATE_COMPLETED self.result = e.args and e.args[0] #~ del self.coro rop = self except (KeyboardInterrupt, GeneratorExit, SystemExit): raise except: self.state = self.STATE_FAILED self.result = None self.exception = sys.exc_info() if not self.caller: self.handle_error() rop = self sys.exc_clear() #~ del self.coro finally: coroutines.ident = None return rop
[ "def", "run_op", "(", "self", ",", "op", ",", "sched", ")", ":", "if", "op", "is", "self", ":", "import", "warnings", "warnings", ".", "warn", "(", "\"Running coro %s with itself. Something is fishy.\"", "%", "op", ")", "assert", "self", ".", "state", "<", ...
Handle the operation: * if coro is in STATE_RUNNING, send or throw the given op * if coro is in STATE_NEED_INIT, call the init function and if it doesn't return a generator, set STATE_COMPLETED and set the result to whatever the function returned. * if StopIteration is raised, set STATE_COMPLETED and return self. * if any other exception is raised, set STATE_FAILED, handle error or send it to the caller, return self
[ "Handle", "the", "operation", ":", "*", "if", "coro", "is", "in", "STATE_RUNNING", "send", "or", "throw", "the", "given", "op", "*", "if", "coro", "is", "in", "STATE_NEED_INIT", "call", "the", "init", "function", "and", "if", "it", "doesn", "t", "return"...
python
train
nerdvegas/rez
src/rez/vendor/version/requirement.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/version/requirement.py#L196-L216
def conflicts_with(self, other): """Returns True if this requirement conflicts with another `Requirement` or `VersionedObject`.""" if isinstance(other, Requirement): if (self.name_ != other.name_) or (self.range is None) \ or (other.range is None): return False elif self.conflict: return False if other.conflict \ else self.range_.issuperset(other.range_) elif other.conflict: return other.range_.issuperset(self.range_) else: return not self.range_.intersects(other.range_) else: # VersionedObject if (self.name_ != other.name_) or (self.range is None): return False if self.conflict: return (other.version_ in self.range_) else: return (other.version_ not in self.range_)
[ "def", "conflicts_with", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "Requirement", ")", ":", "if", "(", "self", ".", "name_", "!=", "other", ".", "name_", ")", "or", "(", "self", ".", "range", "is", "None", ")", "or...
Returns True if this requirement conflicts with another `Requirement` or `VersionedObject`.
[ "Returns", "True", "if", "this", "requirement", "conflicts", "with", "another", "Requirement", "or", "VersionedObject", "." ]
python
train
linuxsoftware/ls.joyous
ls/joyous/models/events.py
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/models/events.py#L492-L504
def group(self): """ The group this event belongs to. Adding the event as a child of a group automatically assigns the event to that group. """ retval = None parent = self.get_parent() Group = get_group_model() if issubclass(parent.specific_class, Group): retval = parent.specific if retval is None: retval = self.group_page return retval
[ "def", "group", "(", "self", ")", ":", "retval", "=", "None", "parent", "=", "self", ".", "get_parent", "(", ")", "Group", "=", "get_group_model", "(", ")", "if", "issubclass", "(", "parent", ".", "specific_class", ",", "Group", ")", ":", "retval", "="...
The group this event belongs to. Adding the event as a child of a group automatically assigns the event to that group.
[ "The", "group", "this", "event", "belongs", "to", ".", "Adding", "the", "event", "as", "a", "child", "of", "a", "group", "automatically", "assigns", "the", "event", "to", "that", "group", "." ]
python
train
salsita/flask-ecstatic
flask_ecstatic.py
https://github.com/salsita/flask-ecstatic/blob/27f67f36eee3afe5855958e4187ff1268d7a5e69/flask_ecstatic.py#L19-L65
def add(app, url = None, path = None, endpoint=None, decorate=None, index='index.html', **options): """Adds static files endpoint with optional directory index.""" url = url or app.static_url_path or '' path = os.path.abspath(path or app.static_folder or '.') endpoint = endpoint or 'static_' + os.path.basename(path) decorate = decorate or (lambda f: f) endpoints = {} if path == app.static_folder: raise ValueError('Files in `{}` path are already automatically served on `{}` URL by Flask.' ' Set Flask app static_folder to None, if you want to serve them using Flask Ecstatic at `{}` URL' .format(path, app.static_url_path, url)) @app.route(url + '/<path:filename>', endpoint = endpoint) @handle404 @decorate def static_files(filename): if index: filename = safe_join(path, filename) if os.path.isdir(filename): filename = os.path.join(filename, index) return send_file(filename, **options) else: return send_from_directory(path, filename, **options) endpoints[endpoint] = static_files if index: @app.route(url + '/', endpoint = endpoint + '_index') @handle404 @decorate def static_index(): return send_from_directory(path, index, **options) endpoints[endpoint + '_index'] = static_index if url: @app.route(url, endpoint = endpoint + '_index_bare') @handle404 @decorate def static_index_bare(): return send_from_directory(path, index, **options) endpoints[endpoint + '_index_bare'] = static_index_bare return endpoints
[ "def", "add", "(", "app", ",", "url", "=", "None", ",", "path", "=", "None", ",", "endpoint", "=", "None", ",", "decorate", "=", "None", ",", "index", "=", "'index.html'", ",", "*", "*", "options", ")", ":", "url", "=", "url", "or", "app", ".", ...
Adds static files endpoint with optional directory index.
[ "Adds", "static", "files", "endpoint", "with", "optional", "directory", "index", "." ]
python
train