repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
python-bonobo/bonobo
bonobo/nodes/basics.py
https://github.com/python-bonobo/bonobo/blob/70c8e62c4a88576976e5b52e58d380d6e3227ab4/bonobo/nodes/basics.py#L320-L356
def MapFields(function, key=True): """ Transformation factory that maps `function` on the values of a row. It can be applied either to 1. all columns (`key=True`), 2. no column (`key=False`), or 3. a subset of columns by passing a callable, which takes column name and returns `bool` (same as the parameter `function` in `filter`). :param function: callable :param key: bool or callable :return: callable """ @use_raw_input def _MapFields(bag): try: factory = type(bag)._make except AttributeError: factory = type(bag) if callable(key): try: fields = bag._fields except AttributeError as e: raise UnrecoverableAttributeError( 'This transformation works only on objects with named' ' fields (namedtuple, BagType, ...).') from e return factory( function(value) if key(key_) else value for key_, value in zip(fields, bag) ) elif key: return factory(function(value) for value in bag) else: return NOT_MODIFIED return _MapFields
[ "def", "MapFields", "(", "function", ",", "key", "=", "True", ")", ":", "@", "use_raw_input", "def", "_MapFields", "(", "bag", ")", ":", "try", ":", "factory", "=", "type", "(", "bag", ")", ".", "_make", "except", "AttributeError", ":", "factory", "=",...
Transformation factory that maps `function` on the values of a row. It can be applied either to 1. all columns (`key=True`), 2. no column (`key=False`), or 3. a subset of columns by passing a callable, which takes column name and returns `bool` (same as the parameter `function` in `filter`). :param function: callable :param key: bool or callable :return: callable
[ "Transformation", "factory", "that", "maps", "function", "on", "the", "values", "of", "a", "row", ".", "It", "can", "be", "applied", "either", "to", "1", ".", "all", "columns", "(", "key", "=", "True", ")", "2", ".", "no", "column", "(", "key", "=", ...
python
train
anlutro/diay.py
diay/__init__.py
https://github.com/anlutro/diay.py/blob/78cfd2b53c8dca3dbac468d620eaa0bb7af08275/diay/__init__.py#L173-L187
def call(self, func, *args, **kwargs): """ Call a function, resolving any type-hinted arguments. """ guessed_kwargs = self._guess_kwargs(func) for key, val in guessed_kwargs.items(): kwargs.setdefault(key, val) try: return func(*args, **kwargs) except TypeError as exc: msg = ( "tried calling function %r but failed, probably " "because it takes arguments that cannot be resolved" ) % func raise DiayException(msg) from exc
[ "def", "call", "(", "self", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "guessed_kwargs", "=", "self", ".", "_guess_kwargs", "(", "func", ")", "for", "key", ",", "val", "in", "guessed_kwargs", ".", "items", "(", ")", ":", "kwa...
Call a function, resolving any type-hinted arguments.
[ "Call", "a", "function", "resolving", "any", "type", "-", "hinted", "arguments", "." ]
python
train
toidi/hadoop-yarn-api-python-client
yarn_api_client/resource_manager.py
https://github.com/toidi/hadoop-yarn-api-python-client/blob/d245bd41808879be6637acfd7460633c0c7dfdd6/yarn_api_client/resource_manager.py#L68-L120
def cluster_applications(self, state=None, final_status=None, user=None, queue=None, limit=None, started_time_begin=None, started_time_end=None, finished_time_begin=None, finished_time_end=None): """ With the Applications API, you can obtain a collection of resources, each of which represents an application. :param str state: state of the application :param str final_status: the final status of the application - reported by the application itself :param str user: user name :param str queue: queue name :param str limit: total number of app objects to be returned :param str started_time_begin: applications with start time beginning with this time, specified in ms since epoch :param str started_time_end: applications with start time ending with this time, specified in ms since epoch :param str finished_time_begin: applications with finish time beginning with this time, specified in ms since epoch :param str finished_time_end: applications with finish time ending with this time, specified in ms since epoch :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response` :raises yarn_api_client.errors.IllegalArgumentError: if `state` or `final_status` incorrect """ path = '/ws/v1/cluster/apps' legal_states = set([s for s, _ in YarnApplicationState]) if state is not None and state not in legal_states: msg = 'Yarn Application State %s is illegal' % (state,) raise IllegalArgumentError(msg) legal_final_statuses = set([s for s, _ in FinalApplicationStatus]) if final_status is not None and final_status not in legal_final_statuses: msg = 'Final Application Status %s is illegal' % (final_status,) raise IllegalArgumentError(msg) loc_args = ( ('state', state), ('finalStatus', final_status), ('user', user), ('queue', queue), ('limit', limit), ('startedTimeBegin', started_time_begin), ('startedTimeEnd', started_time_end), ('finishedTimeBegin', finished_time_begin), ('finishedTimeEnd', finished_time_end)) params = self.construct_parameters(loc_args) return self.request(path, **params)
[ "def", "cluster_applications", "(", "self", ",", "state", "=", "None", ",", "final_status", "=", "None", ",", "user", "=", "None", ",", "queue", "=", "None", ",", "limit", "=", "None", ",", "started_time_begin", "=", "None", ",", "started_time_end", "=", ...
With the Applications API, you can obtain a collection of resources, each of which represents an application. :param str state: state of the application :param str final_status: the final status of the application - reported by the application itself :param str user: user name :param str queue: queue name :param str limit: total number of app objects to be returned :param str started_time_begin: applications with start time beginning with this time, specified in ms since epoch :param str started_time_end: applications with start time ending with this time, specified in ms since epoch :param str finished_time_begin: applications with finish time beginning with this time, specified in ms since epoch :param str finished_time_end: applications with finish time ending with this time, specified in ms since epoch :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response` :raises yarn_api_client.errors.IllegalArgumentError: if `state` or `final_status` incorrect
[ "With", "the", "Applications", "API", "you", "can", "obtain", "a", "collection", "of", "resources", "each", "of", "which", "represents", "an", "application", "." ]
python
train
benhoff/vexbot
vexbot/messaging.py
https://github.com/benhoff/vexbot/blob/9b844eb20e84eea92a0e7db7d86a90094956c38f/vexbot/messaging.py#L252-L267
def send_request(self, target: str, *args, **kwargs): """ address must a list instance. Or a string which will be transformed into a address """ # TODO: Log error here if not found? address = self._get_address_from_source(target) if address is None: return args = json.dumps(args).encode('utf8') kwargs = json.dumps(kwargs).encode('utf8') # TODO: test that this works # NOTE: pop out command? frame = (*address, b'', b'MSG', args, kwargs) self.add_callback(self.command_socket.send_multipart, frame)
[ "def", "send_request", "(", "self", ",", "target", ":", "str", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# TODO: Log error here if not found?", "address", "=", "self", ".", "_get_address_from_source", "(", "target", ")", "if", "address", "is", "N...
address must a list instance. Or a string which will be transformed into a address
[ "address", "must", "a", "list", "instance", ".", "Or", "a", "string", "which", "will", "be", "transformed", "into", "a", "address" ]
python
train
Julian/Filesystems
filesystems/common.py
https://github.com/Julian/Filesystems/blob/f366e877d6970712bb91d47167209ee2d1e489c5/filesystems/common.py#L173-L186
def _is_dir(fs, path): """ Check that the given path is a directory. Note that unlike `os.path.isdir`, we *do* propagate file system errors other than a non-existent path or non-existent directory component. E.g., should EPERM or ELOOP be raised, an exception will bubble up. """ try: return stat.S_ISDIR(fs.stat(path).st_mode) except exceptions.FileNotFound: return False
[ "def", "_is_dir", "(", "fs", ",", "path", ")", ":", "try", ":", "return", "stat", ".", "S_ISDIR", "(", "fs", ".", "stat", "(", "path", ")", ".", "st_mode", ")", "except", "exceptions", ".", "FileNotFound", ":", "return", "False" ]
Check that the given path is a directory. Note that unlike `os.path.isdir`, we *do* propagate file system errors other than a non-existent path or non-existent directory component. E.g., should EPERM or ELOOP be raised, an exception will bubble up.
[ "Check", "that", "the", "given", "path", "is", "a", "directory", "." ]
python
train
Spinmob/spinmob
egg/_gui.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/egg/_gui.py#L2162-L2209
def get_value(self, name): """ Returns the value of the parameter with the specified name. """ # first clean up the name name = self._clean_up_name(name) # now get the parameter object x = self._find_parameter(name.split('/')) # quit if it pooped. if x == None: return None # get the value and test the bounds value = x.value() # handles the two versions of pyqtgraph bounds = None # For lists, just make sure it's a valid value if x.opts['type'] == 'list': # If it's not one from the master list, choose # and return the default value. if not value in x.opts['values']: # Only choose a default if there exists one if len(x.opts('values')): self.set_value(name, x.opts['values'][0]) return x.opts['values'][0] # Otherwise, just return None and do nothing else: return None # For strings, make sure the returned value is always a string. elif x.opts['type'] in ['str']: return str(value) # Otherwise assume it is a value with bounds or limits (depending on # the version of pyqtgraph) else: if 'limits' in x.opts: bounds = x.opts['limits'] elif 'bounds' in x.opts: bounds = x.opts['bounds'] if not bounds == None: if not bounds[1]==None and value > bounds[1]: value = bounds[1] if not bounds[0]==None and value < bounds[0]: value = bounds[0] # return it return value
[ "def", "get_value", "(", "self", ",", "name", ")", ":", "# first clean up the name", "name", "=", "self", ".", "_clean_up_name", "(", "name", ")", "# now get the parameter object", "x", "=", "self", ".", "_find_parameter", "(", "name", ".", "split", "(", "'/'"...
Returns the value of the parameter with the specified name.
[ "Returns", "the", "value", "of", "the", "parameter", "with", "the", "specified", "name", "." ]
python
train
nerdvegas/rez
src/rez/packages_.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/packages_.py#L563-L575
def get_package_from_string(txt, paths=None): """Get a package given a string. Args: txt (str): String such as 'foo', 'bah-1.3'. paths (list of str, optional): paths to search for package, defaults to `config.packages_path`. Returns: `Package` instance, or None if no package was found. """ o = VersionedObject(txt) return get_package(o.name, o.version, paths=paths)
[ "def", "get_package_from_string", "(", "txt", ",", "paths", "=", "None", ")", ":", "o", "=", "VersionedObject", "(", "txt", ")", "return", "get_package", "(", "o", ".", "name", ",", "o", ".", "version", ",", "paths", "=", "paths", ")" ]
Get a package given a string. Args: txt (str): String such as 'foo', 'bah-1.3'. paths (list of str, optional): paths to search for package, defaults to `config.packages_path`. Returns: `Package` instance, or None if no package was found.
[ "Get", "a", "package", "given", "a", "string", "." ]
python
train
pybel/pybel
src/pybel/struct/graph.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/graph.py#L558-L560
def has_edge_evidence(self, u: BaseEntity, v: BaseEntity, key: str) -> bool: """Check if the given edge has an evidence.""" return self._has_edge_attr(u, v, key, EVIDENCE)
[ "def", "has_edge_evidence", "(", "self", ",", "u", ":", "BaseEntity", ",", "v", ":", "BaseEntity", ",", "key", ":", "str", ")", "->", "bool", ":", "return", "self", ".", "_has_edge_attr", "(", "u", ",", "v", ",", "key", ",", "EVIDENCE", ")" ]
Check if the given edge has an evidence.
[ "Check", "if", "the", "given", "edge", "has", "an", "evidence", "." ]
python
train
PSPC-SPAC-buyandsell/von_anchor
von_anchor/wallet/wallet.py
https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/wallet/wallet.py#L492-L524
async def create_link_secret(self, label: str) -> None: """ Create link secret (a.k.a. master secret) used in proofs by HolderProver, if the current link secret does not already correspond to the input link secret label. Raise WalletState if wallet is closed, or any other IndyError causing failure to set link secret in wallet. :param label: label for link secret; indy-sdk uses label to generate link secret """ LOGGER.debug('Wallet.create_link_secret >>> label: %s', label) if not self.handle: LOGGER.debug('Wallet.create_link_secret <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) try: await anoncreds.prover_create_master_secret(self.handle, label) await self._write_link_secret_label(label) except IndyError as x_indy: if x_indy.error_code == ErrorCode.AnoncredsMasterSecretDuplicateNameError: LOGGER.warning( 'Wallet %s link secret already current: abstaining from updating label record', self.name) await self._write_link_secret_label(label) else: LOGGER.debug( 'Wallet.create_link_secret <!< cannot create link secret for wallet %s, indy error code %s', self.name, x_indy.error_code) raise LOGGER.debug('Wallet.create_link_secret <<<')
[ "async", "def", "create_link_secret", "(", "self", ",", "label", ":", "str", ")", "->", "None", ":", "LOGGER", ".", "debug", "(", "'Wallet.create_link_secret >>> label: %s'", ",", "label", ")", "if", "not", "self", ".", "handle", ":", "LOGGER", ".", "debug",...
Create link secret (a.k.a. master secret) used in proofs by HolderProver, if the current link secret does not already correspond to the input link secret label. Raise WalletState if wallet is closed, or any other IndyError causing failure to set link secret in wallet. :param label: label for link secret; indy-sdk uses label to generate link secret
[ "Create", "link", "secret", "(", "a", ".", "k", ".", "a", ".", "master", "secret", ")", "used", "in", "proofs", "by", "HolderProver", "if", "the", "current", "link", "secret", "does", "not", "already", "correspond", "to", "the", "input", "link", "secret"...
python
train
theolind/pymysensors
mysensors/__init__.py
https://github.com/theolind/pymysensors/blob/a139ab6e2f6b71ebaf37282f69bfd0f7fe6193b6/mysensors/__init__.py#L236-L243
def _poll_queue(self): """Poll the queue for work.""" while not self._stop_event.is_set(): reply = self.run_job() self.send(reply) if self.queue: continue time.sleep(0.02)
[ "def", "_poll_queue", "(", "self", ")", ":", "while", "not", "self", ".", "_stop_event", ".", "is_set", "(", ")", ":", "reply", "=", "self", ".", "run_job", "(", ")", "self", ".", "send", "(", "reply", ")", "if", "self", ".", "queue", ":", "continu...
Poll the queue for work.
[ "Poll", "the", "queue", "for", "work", "." ]
python
train
python-openxml/python-docx
docx/image/tiff.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/image/tiff.py#L197-L205
def iter_entries(self): """ Generate an |_IfdEntry| instance corresponding to each entry in the directory. """ for idx in range(self._entry_count): dir_entry_offset = self._offset + 2 + (idx*12) ifd_entry = _IfdEntryFactory(self._stream_rdr, dir_entry_offset) yield ifd_entry
[ "def", "iter_entries", "(", "self", ")", ":", "for", "idx", "in", "range", "(", "self", ".", "_entry_count", ")", ":", "dir_entry_offset", "=", "self", ".", "_offset", "+", "2", "+", "(", "idx", "*", "12", ")", "ifd_entry", "=", "_IfdEntryFactory", "("...
Generate an |_IfdEntry| instance corresponding to each entry in the directory.
[ "Generate", "an", "|_IfdEntry|", "instance", "corresponding", "to", "each", "entry", "in", "the", "directory", "." ]
python
train
rigetti/pyquil
pyquil/api/_devices.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/api/_devices.py#L49-L99
def list_lattices(device_name: str = None, num_qubits: int = None, connection: ForestConnection = None): """ Query the Forest 2.0 server for its knowledge of lattices. Optionally filters by underlying device name and lattice qubit count. :return: A dictionary keyed on lattice names and valued in dictionaries of the form { "device_name": device_name, "qubits": num_qubits } """ if connection is None: connection = ForestConnection() session = connection.session url = connection.forest_cloud_endpoint + "/lattices" try: response = get_json(session, url, params={"device_name": device_name, "num_qubits": num_qubits}) return response["lattices"] except Exception as e: raise ValueError(""" list_lattices encountered an error when querying the Forest 2.0 endpoint. Some common causes for this error include: * You don't have valid user authentication information. Very likely this is because you haven't yet been invited to try QCS. We plan on making our device information publicly accessible soon, but in the meanwhile, you'll have to use default QVM configurations and to use `list_quantum_computers` with `qpus = False`. * You do have user authentication information, but it is missing or modified. You can find this either in the environment variables FOREST_API_KEY and FOREST_USER_ID or in the config file (stored by default at ~/.qcs_config, but with location settable through the environment variable QCS_CONFIG), which contains the subsection [Rigetti Forest] user_id = your_user_id key = your_api_key * You're missing an address for the Forest 2.0 server endpoint, or the address is invalid. This too can be set through the environment variable FOREST_URL or by changing the following lines in the QCS config file: [Rigetti Forest] url = https://forest-server.qcs.rigetti.com For the record, here's the original exception: {} """.format(repr(e)))
[ "def", "list_lattices", "(", "device_name", ":", "str", "=", "None", ",", "num_qubits", ":", "int", "=", "None", ",", "connection", ":", "ForestConnection", "=", "None", ")", ":", "if", "connection", "is", "None", ":", "connection", "=", "ForestConnection", ...
Query the Forest 2.0 server for its knowledge of lattices. Optionally filters by underlying device name and lattice qubit count. :return: A dictionary keyed on lattice names and valued in dictionaries of the form { "device_name": device_name, "qubits": num_qubits }
[ "Query", "the", "Forest", "2", ".", "0", "server", "for", "its", "knowledge", "of", "lattices", ".", "Optionally", "filters", "by", "underlying", "device", "name", "and", "lattice", "qubit", "count", "." ]
python
train
rocky/python3-trepan
trepan/bwprocessor/main.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/bwprocessor/main.py#L259-L279
def ok_for_running(self, cmd_obj, name, cmd_hash): '''We separate some of the common debugger command checks here: whether it makes sense to run the command in this execution state, if the command has the right number of arguments and so on. ''' if hasattr(cmd_obj, 'execution_set'): if not (self.core.execution_status in cmd_obj.execution_set): part1 = ("Command '%s' is not available for execution " "status:" % name) Mmsg.errmsg(self, Mmisc. wrapped_lines(part1, self.core.execution_status, self.debugger.settings['width'])) return False pass if self.frame is None and cmd_obj.need_stack: self.intf[-1].errmsg("Command '%s' needs an execution stack." % name) return False return True
[ "def", "ok_for_running", "(", "self", ",", "cmd_obj", ",", "name", ",", "cmd_hash", ")", ":", "if", "hasattr", "(", "cmd_obj", ",", "'execution_set'", ")", ":", "if", "not", "(", "self", ".", "core", ".", "execution_status", "in", "cmd_obj", ".", "execut...
We separate some of the common debugger command checks here: whether it makes sense to run the command in this execution state, if the command has the right number of arguments and so on.
[ "We", "separate", "some", "of", "the", "common", "debugger", "command", "checks", "here", ":", "whether", "it", "makes", "sense", "to", "run", "the", "command", "in", "this", "execution", "state", "if", "the", "command", "has", "the", "right", "number", "o...
python
test
smartfile/client-python
smartfile/__init__.py
https://github.com/smartfile/client-python/blob/f9ccc40a2870df447c65b53dc0747e37cab62d63/smartfile/__init__.py#L50-L73
def _do_request(self, request, url, **kwargs): "Actually makes the HTTP request." try: response = request(url, stream=True, **kwargs) except RequestException as e: raise RequestError(e) else: if response.status_code >= 400: raise ResponseError(response) # Try to return the response in the most useful fashion given it's # type. if response.headers.get('content-type') == 'application/json': try: # Try to decode as JSON return response.json() except (TypeError, ValueError): # If that fails, return the text. return response.text else: # This might be a file, so return it. if kwargs.get('params', {}).get('raw', True): return response.raw else: return response
[ "def", "_do_request", "(", "self", ",", "request", ",", "url", ",", "*", "*", "kwargs", ")", ":", "try", ":", "response", "=", "request", "(", "url", ",", "stream", "=", "True", ",", "*", "*", "kwargs", ")", "except", "RequestException", "as", "e", ...
Actually makes the HTTP request.
[ "Actually", "makes", "the", "HTTP", "request", "." ]
python
train
tk0miya/tk.phpautodoc
src/phply/phpparse.py
https://github.com/tk0miya/tk.phpautodoc/blob/cf789f64abaf76351485cee231a075227e665fb6/src/phply/phpparse.py#L1003-L1015
def p_expr_assign_op(p): '''expr : variable PLUS_EQUAL expr | variable MINUS_EQUAL expr | variable MUL_EQUAL expr | variable DIV_EQUAL expr | variable CONCAT_EQUAL expr | variable MOD_EQUAL expr | variable AND_EQUAL expr | variable OR_EQUAL expr | variable XOR_EQUAL expr | variable SL_EQUAL expr | variable SR_EQUAL expr''' p[0] = ast.AssignOp(p[2], p[1], p[3], lineno=p.lineno(2))
[ "def", "p_expr_assign_op", "(", "p", ")", ":", "p", "[", "0", "]", "=", "ast", ".", "AssignOp", "(", "p", "[", "2", "]", ",", "p", "[", "1", "]", ",", "p", "[", "3", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "2", ")", ")" ]
expr : variable PLUS_EQUAL expr | variable MINUS_EQUAL expr | variable MUL_EQUAL expr | variable DIV_EQUAL expr | variable CONCAT_EQUAL expr | variable MOD_EQUAL expr | variable AND_EQUAL expr | variable OR_EQUAL expr | variable XOR_EQUAL expr | variable SL_EQUAL expr | variable SR_EQUAL expr
[ "expr", ":", "variable", "PLUS_EQUAL", "expr", "|", "variable", "MINUS_EQUAL", "expr", "|", "variable", "MUL_EQUAL", "expr", "|", "variable", "DIV_EQUAL", "expr", "|", "variable", "CONCAT_EQUAL", "expr", "|", "variable", "MOD_EQUAL", "expr", "|", "variable", "AN...
python
train
NLeSC/noodles
noodles/run/scheduler.py
https://github.com/NLeSC/noodles/blob/3759e24e6e54a3a1a364431309dbb1061f617c04/noodles/run/scheduler.py#L70-L159
def run(self, connection: Connection, master: Workflow): """Run a workflow. :param connection: A connection giving a sink to the job-queue and a source yielding results. :type connection: Connection :param master: The workflow. :type master: Workflow """ # initiate worker slave army and take up reins ... source, sink = connection.setup() # schedule work self.add_workflow(master, master, master.root, sink) graceful_exit = False errors = [] # process results for job_key, status, result, err_msg in source: wf, n = self.jobs[job_key] if status == 'error': graceful_exit = True errors.append(err_msg) try: sink.send(FlushQueue) except StopIteration: pass print("Uncaught error running job: {}, {}".format(n, err_msg), file=sys.stderr) print("Flushing queue and waiting for threads to close.", file=sys.stderr, flush=True) if status == 'aborted': print("Job {} got aborted: {}".format(n, err_msg), file=sys.stderr) print("Flushing queue and waiting for threads to close.", file=sys.stderr, flush=True) graceful_exit = True errors.append(err_msg) try: sink.send(FlushQueue) except StopIteration: pass if self.verbose: print("sched result [{0}]: ".format(self.key_map[job_key]), result, file=sys.stderr, flush=True) del self.jobs[job_key] if len(self.jobs) == 0 and graceful_exit: for error in errors: print("Exception of type", type(error), ":") print(error) raise errors[0] # if this result is the root of a workflow, pop to parent # we do this before scheduling a child workflow, as to # achieve tail-call elimination. while n == wf.root and wf is not master: child = id(wf) _, wf, n = self.dynamic_links[child] del self.dynamic_links[child] # if we retrieve a workflow, push a child if is_workflow(result) and not graceful_exit: child_wf = get_workflow(result) self.add_workflow(child_wf, wf, n, sink) continue # insert the result in the nodes that need it wf.nodes[n].result = result for (tgt, address) in wf.links[n]: insert_result(wf.nodes[tgt], address, result) if is_node_ready(wf.nodes[tgt]) and not graceful_exit: self.schedule(Job(workflow=wf, node_id=tgt), sink) # see if we're done if wf == master and n == master.root: try: sink.send(EndOfQueue) except StopIteration: pass return result
[ "def", "run", "(", "self", ",", "connection", ":", "Connection", ",", "master", ":", "Workflow", ")", ":", "# initiate worker slave army and take up reins ...", "source", ",", "sink", "=", "connection", ".", "setup", "(", ")", "# schedule work", "self", ".", "ad...
Run a workflow. :param connection: A connection giving a sink to the job-queue and a source yielding results. :type connection: Connection :param master: The workflow. :type master: Workflow
[ "Run", "a", "workflow", "." ]
python
train
buildbot/buildbot
worker/buildbot_worker/runprocess.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/worker/buildbot_worker/runprocess.py#L671-L678
def _sendMessage(self, msg): """ Collapse and send msg to the master """ if not msg: return msg = self._collapseMsg(msg) self.sendStatus(msg)
[ "def", "_sendMessage", "(", "self", ",", "msg", ")", ":", "if", "not", "msg", ":", "return", "msg", "=", "self", ".", "_collapseMsg", "(", "msg", ")", "self", ".", "sendStatus", "(", "msg", ")" ]
Collapse and send msg to the master
[ "Collapse", "and", "send", "msg", "to", "the", "master" ]
python
train
elastic/elasticsearch-py
elasticsearch/client/xpack/ml.py
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/xpack/ml.py#L674-L688
def put_calendar_job(self, calendar_id, job_id, params=None): """ `<>`_ :arg calendar_id: The ID of the calendar to modify :arg job_id: The ID of the job to add to the calendar """ for param in (calendar_id, job_id): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return self.transport.perform_request( "PUT", _make_path("_ml", "calendars", calendar_id, "jobs", job_id), params=params, )
[ "def", "put_calendar_job", "(", "self", ",", "calendar_id", ",", "job_id", ",", "params", "=", "None", ")", ":", "for", "param", "in", "(", "calendar_id", ",", "job_id", ")", ":", "if", "param", "in", "SKIP_IN_PATH", ":", "raise", "ValueError", "(", "\"E...
`<>`_ :arg calendar_id: The ID of the calendar to modify :arg job_id: The ID of the job to add to the calendar
[ "<", ">", "_" ]
python
train
buildbot/buildbot_travis
buildbot_travis/api.py
https://github.com/buildbot/buildbot_travis/blob/350c657b7aabaf5bc6a9fdb55febdd9d8eabd60c/buildbot_travis/api.py#L96-L117
def saveConfig(self, request): """I save the config, and run check_config, potencially returning errors""" res = yield self.assertAllowed(request) if res: defer.returnValue(res) request.setHeader('Content-Type', 'application/json') if self._in_progress: defer.returnValue(json.dumps({'success': False, 'errors': ['reconfig already in progress']})) self._in_progress = True cfg = json.loads(request.content.read()) if cfg != self._cfg: try: err = yield self.saveCfg(cfg) except Exception as e: # noqa err = [repr(e)] if err is not None: self._in_progress = False yield self.saveCfg(self._cfg) defer.returnValue(json.dumps({'success': False, 'errors': err})) yield self.ep.master.reconfig() defer.returnValue(json.dumps({'success': True}))
[ "def", "saveConfig", "(", "self", ",", "request", ")", ":", "res", "=", "yield", "self", ".", "assertAllowed", "(", "request", ")", "if", "res", ":", "defer", ".", "returnValue", "(", "res", ")", "request", ".", "setHeader", "(", "'Content-Type'", ",", ...
I save the config, and run check_config, potencially returning errors
[ "I", "save", "the", "config", "and", "run", "check_config", "potencially", "returning", "errors" ]
python
train
rsheftel/raccoon
raccoon/dataframe.py
https://github.com/rsheftel/raccoon/blob/e5c4b5fb933b51f33aff11e8168c39790e9a7c75/raccoon/dataframe.py#L718-L751
def append_rows(self, indexes, values, new_cols=True): """ Appends rows of values to the end of the data. If there are new columns in the values and new_cols is True they will be added. Be very careful with this function as for sort DataFrames it will not enforce sort order. Use this only for speed when needed, be careful. :param indexes: list of indexes :param values: dictionary of values where the key is the column name and the value is a list :param new_cols: if True add new columns in values, if False ignore :return: nothing """ # check that the values data is less than or equal to the length of the indexes for column in values: if len(values[column]) > len(indexes): raise ValueError('length of %s column in values is longer than indexes' % column) # check the indexes are not duplicates combined_index = self._index + indexes if len(set(combined_index)) != len(combined_index): raise IndexError('duplicate indexes in DataFrames') if new_cols: for col in values: if col not in self._columns: self._add_column(col) # append index value self._index.extend(indexes) # add data values, if not in values then use None for c, col in enumerate(self._columns): self._data[c].extend(values.get(col, [None] * len(indexes))) self._pad_data()
[ "def", "append_rows", "(", "self", ",", "indexes", ",", "values", ",", "new_cols", "=", "True", ")", ":", "# check that the values data is less than or equal to the length of the indexes", "for", "column", "in", "values", ":", "if", "len", "(", "values", "[", "colum...
Appends rows of values to the end of the data. If there are new columns in the values and new_cols is True they will be added. Be very careful with this function as for sort DataFrames it will not enforce sort order. Use this only for speed when needed, be careful. :param indexes: list of indexes :param values: dictionary of values where the key is the column name and the value is a list :param new_cols: if True add new columns in values, if False ignore :return: nothing
[ "Appends", "rows", "of", "values", "to", "the", "end", "of", "the", "data", ".", "If", "there", "are", "new", "columns", "in", "the", "values", "and", "new_cols", "is", "True", "they", "will", "be", "added", ".", "Be", "very", "careful", "with", "this"...
python
train
radjkarl/imgProcessor
imgProcessor/filters/removeSinglePixels.py
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/filters/removeSinglePixels.py#L5-L33
def removeSinglePixels(img): ''' img - boolean array remove all pixels that have no neighbour ''' gx = img.shape[0] gy = img.shape[1] for i in range(gx): for j in range(gy): if img[i, j]: found_neighbour = False for ii in range(max(0, i - 1), min(gx, i + 2)): for jj in range(max(0, j - 1), min(gy, j + 2)): if ii == i and jj == j: continue if img[ii, jj]: found_neighbour = True break if found_neighbour: break if not found_neighbour: img[i, j] = 0
[ "def", "removeSinglePixels", "(", "img", ")", ":", "gx", "=", "img", ".", "shape", "[", "0", "]", "gy", "=", "img", ".", "shape", "[", "1", "]", "for", "i", "in", "range", "(", "gx", ")", ":", "for", "j", "in", "range", "(", "gy", ")", ":", ...
img - boolean array remove all pixels that have no neighbour
[ "img", "-", "boolean", "array", "remove", "all", "pixels", "that", "have", "no", "neighbour" ]
python
train
CivicSpleen/ambry
ambry/bundle/process.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/process.py#L135-L157
def update(self, *args, **kwargs): """Update the last section record""" self.augment_args(args, kwargs) kwargs['log_action'] = kwargs.get('log_action', 'update') if not self.rec: return self.add(**kwargs) else: for k, v in kwargs.items(): # Don't update object; use whatever was set in the original record if k not in ('source', 's_vid', 'table', 't_vid', 'partition', 'p_vid'): setattr(self.rec, k, v) self._session.merge(self.rec) if self._logger: self._logger.info(self.rec.log_str) self._session.commit() self._ai_rec_id = None return self.rec.id
[ "def", "update", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "augment_args", "(", "args", ",", "kwargs", ")", "kwargs", "[", "'log_action'", "]", "=", "kwargs", ".", "get", "(", "'log_action'", ",", "'update'", ")",...
Update the last section record
[ "Update", "the", "last", "section", "record" ]
python
train
rackerlabs/fleece
fleece/requests.py
https://github.com/rackerlabs/fleece/blob/42d79dfa0777e99dbb09bc46105449a9be5dbaa9/fleece/requests.py#L39-L63
def set_default_retries(*args, **kwargs): """ This function installs a default retry mechanism to be used in requests calls. The arguments are those of urllib3's `Retry` object (see http://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#urllib3.util.retry.Retry). Examples: # use 3 retries, for default conditions only (network errors/timeouts) set_default_retries(total=3) set_default_retries(3) # total can be given as a positional argument # use 5 retries, and add 429 and 503 responses to the list of # retryable conditions set_default_retries(5, status_forcelist=[429, 503]) # use 5 retries with an exponential backoff factor of 1 set_default_retries(5, backoff_factor=1) """ global DEFAULT_RETRY_ARGS DEFAULT_RETRY_ARGS = kwargs if len(args) > 1: ValueError('too many arguments') elif len(args) == 1: DEFAULT_RETRY_ARGS['total'] = args[0]
[ "def", "set_default_retries", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "global", "DEFAULT_RETRY_ARGS", "DEFAULT_RETRY_ARGS", "=", "kwargs", "if", "len", "(", "args", ")", ">", "1", ":", "ValueError", "(", "'too many arguments'", ")", "elif", "len...
This function installs a default retry mechanism to be used in requests calls. The arguments are those of urllib3's `Retry` object (see http://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#urllib3.util.retry.Retry). Examples: # use 3 retries, for default conditions only (network errors/timeouts) set_default_retries(total=3) set_default_retries(3) # total can be given as a positional argument # use 5 retries, and add 429 and 503 responses to the list of # retryable conditions set_default_retries(5, status_forcelist=[429, 503]) # use 5 retries with an exponential backoff factor of 1 set_default_retries(5, backoff_factor=1)
[ "This", "function", "installs", "a", "default", "retry", "mechanism", "to", "be", "used", "in", "requests", "calls", ".", "The", "arguments", "are", "those", "of", "urllib3", "s", "Retry", "object", "(", "see", "http", ":", "//", "urllib3", ".", "readthedo...
python
train
duguyue100/minesweeper
minesweeper/msgame.py
https://github.com/duguyue100/minesweeper/blob/38b1910f4c34d0275ac10a300285aba6f1d91d61/minesweeper/msgame.py#L210-L229
def parse_move(self, move_msg): """Parse a move from a string. Parameters ---------- move_msg : string a valid message should be in: "[move type]: [X], [Y]" Returns ------- """ # TODO: some condition check type_idx = move_msg.index(":") move_type = move_msg[:type_idx] pos_idx = move_msg.index(",") move_x = int(move_msg[type_idx+1:pos_idx]) move_y = int(move_msg[pos_idx+1:]) return move_type, move_x, move_y
[ "def", "parse_move", "(", "self", ",", "move_msg", ")", ":", "# TODO: some condition check", "type_idx", "=", "move_msg", ".", "index", "(", "\":\"", ")", "move_type", "=", "move_msg", "[", ":", "type_idx", "]", "pos_idx", "=", "move_msg", ".", "index", "(",...
Parse a move from a string. Parameters ---------- move_msg : string a valid message should be in: "[move type]: [X], [Y]" Returns -------
[ "Parse", "a", "move", "from", "a", "string", "." ]
python
train
emilydolson/avida-spatial-tools
avidaspatial/utils.py
https://github.com/emilydolson/avida-spatial-tools/blob/7beb0166ccefad5fa722215b030ac2a53d62b59e/avidaspatial/utils.py#L217-L241
def phenotype_to_res_set(phenotype, resources): """ Converts a binary string to a set containing the resources indicated by the bits in the string. Inputs: phenotype - a binary string resources - a list of string indicating which resources correspond to which indices of the phenotype returns: A set of strings indicating resources """ assert(phenotype[0:2] == "0b") phenotype = phenotype[2:] # Fill in leading zeroes while len(phenotype) < len(resources): phenotype = "0" + phenotype res_set = set() for i in range(len(phenotype)): if phenotype[i] == "1": res_set.add(resources[i]) assert(phenotype.count("1") == len(res_set)) return res_set
[ "def", "phenotype_to_res_set", "(", "phenotype", ",", "resources", ")", ":", "assert", "(", "phenotype", "[", "0", ":", "2", "]", "==", "\"0b\"", ")", "phenotype", "=", "phenotype", "[", "2", ":", "]", "# Fill in leading zeroes", "while", "len", "(", "phen...
Converts a binary string to a set containing the resources indicated by the bits in the string. Inputs: phenotype - a binary string resources - a list of string indicating which resources correspond to which indices of the phenotype returns: A set of strings indicating resources
[ "Converts", "a", "binary", "string", "to", "a", "set", "containing", "the", "resources", "indicated", "by", "the", "bits", "in", "the", "string", "." ]
python
train
CZ-NIC/python-rt
rt.py
https://github.com/CZ-NIC/python-rt/blob/e7a9f555e136708aec3317f857045145a2271e16/rt.py#L1359-L1389
def edit_link(self, ticket_id, link_name, link_value, delete=False): """ Creates or deletes a link between the specified tickets (undocumented API feature). :param ticket_id: ID of ticket to edit :param link_name: Name of link to edit (DependsOn, DependedOnBy, RefersTo, ReferredToBy, HasMember or MemberOf) :param link_value: Either ticker ID or external link. :param delete: if True the link is deleted instead of created :returns: ``True`` Operation was successful ``False`` Ticket with given ID does not exist or link to delete is not found :raises InvalidUse: When none or more then one links are specified. Also when wrong link name is used. """ valid_link_names = set(('dependson', 'dependedonby', 'refersto', 'referredtoby', 'hasmember', 'memberof')) if not link_name.lower() in valid_link_names: raise InvalidUse("Unsupported name of link.") post_data = {'rel': link_name.lower(), 'to': link_value, 'id': ticket_id, 'del': 1 if delete else 0 } msg = self.__request('ticket/link', post_data=post_data) state = msg.split('\n')[2] if delete: return self.RE_PATTERNS['deleted_link_pattern'].match(state) is not None else: return self.RE_PATTERNS['created_link_pattern'].match(state) is not None
[ "def", "edit_link", "(", "self", ",", "ticket_id", ",", "link_name", ",", "link_value", ",", "delete", "=", "False", ")", ":", "valid_link_names", "=", "set", "(", "(", "'dependson'", ",", "'dependedonby'", ",", "'refersto'", ",", "'referredtoby'", ",", "'ha...
Creates or deletes a link between the specified tickets (undocumented API feature). :param ticket_id: ID of ticket to edit :param link_name: Name of link to edit (DependsOn, DependedOnBy, RefersTo, ReferredToBy, HasMember or MemberOf) :param link_value: Either ticker ID or external link. :param delete: if True the link is deleted instead of created :returns: ``True`` Operation was successful ``False`` Ticket with given ID does not exist or link to delete is not found :raises InvalidUse: When none or more then one links are specified. Also when wrong link name is used.
[ "Creates", "or", "deletes", "a", "link", "between", "the", "specified", "tickets", "(", "undocumented", "API", "feature", ")", "." ]
python
train
obulpathi/cdn-fastly-python
fastly/__init__.py
https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L882-L885
def get_vcl(self, service_id, version_number, name, include_content=True): """Get the uploaded VCL for a particular service and version.""" content = self._fetch("/service/%s/version/%d/vcl/%s?include_content=%d" % (service_id, version_number, name, int(include_content))) return FastlyVCL(self, content)
[ "def", "get_vcl", "(", "self", ",", "service_id", ",", "version_number", ",", "name", ",", "include_content", "=", "True", ")", ":", "content", "=", "self", ".", "_fetch", "(", "\"/service/%s/version/%d/vcl/%s?include_content=%d\"", "%", "(", "service_id", ",", ...
Get the uploaded VCL for a particular service and version.
[ "Get", "the", "uploaded", "VCL", "for", "a", "particular", "service", "and", "version", "." ]
python
train
LEMS/pylems
lems/parser/expr.py
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/parser/expr.py#L263-L315
def tokenize(self): """ Tokenizes the string stored in the parser object into a list of tokens. """ self.token_list = [] ps = self.parse_string.strip() i = 0 last_token = None while i < len(ps) and ps[i].isspace(): i += 1 while i < len(ps): token = '' if ps[i].isalpha(): while i < len(ps) and (ps[i].isalnum() or ps[i] == '_'): token += ps[i] i += 1 elif ps[i].isdigit(): while i < len(ps) and (ps[i].isdigit() or ps[i] == '.' or ps[i] == 'e' or ps[i] == 'E' or (ps[i] == '+' and (ps[i-1] == 'e' or ps[i-1] == 'E')) or (ps[i] == '-' and (ps[i-1] == 'e' or ps[i-1] == 'E'))): token += ps[i] i += 1 elif ps[i] == '.': if ps[i+1].isdigit(): while i < len(ps) and (ps[i].isdigit() or ps[i] == '.'): token += ps[i] i += 1 else: while i < len(ps) and (ps[i].isalpha() or ps[i] == '.'): token += ps[i] i += 1 else: token += ps[i] i += 1 if token == '-' and \ (last_token == None or last_token == '(' or self.is_op(last_token)): token = '~' self.token_list += [token] last_token = token while i < len(ps) and ps[i].isspace(): i += 1
[ "def", "tokenize", "(", "self", ")", ":", "self", ".", "token_list", "=", "[", "]", "ps", "=", "self", ".", "parse_string", ".", "strip", "(", ")", "i", "=", "0", "last_token", "=", "None", "while", "i", "<", "len", "(", "ps", ")", "and", "ps", ...
Tokenizes the string stored in the parser object into a list of tokens.
[ "Tokenizes", "the", "string", "stored", "in", "the", "parser", "object", "into", "a", "list", "of", "tokens", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/bijectors/bijector.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/bijector.py#L122-L129
def _deep_tuple(self, x): """Converts nested `tuple`, `list`, or `dict` to nested `tuple`.""" if isinstance(x, dict): return self._deep_tuple(tuple(sorted(x.items()))) elif isinstance(x, (list, tuple)): return tuple(map(self._deep_tuple, x)) return x
[ "def", "_deep_tuple", "(", "self", ",", "x", ")", ":", "if", "isinstance", "(", "x", ",", "dict", ")", ":", "return", "self", ".", "_deep_tuple", "(", "tuple", "(", "sorted", "(", "x", ".", "items", "(", ")", ")", ")", ")", "elif", "isinstance", ...
Converts nested `tuple`, `list`, or `dict` to nested `tuple`.
[ "Converts", "nested", "tuple", "list", "or", "dict", "to", "nested", "tuple", "." ]
python
test
ApproxEng/approxeng.input
src/python/approxeng/input/__init__.py
https://github.com/ApproxEng/approxeng.input/blob/0cdf8eed6bfd26ca3b20b4478f352787f60ae8ef/src/python/approxeng/input/__init__.py#L40-L72
def map_single_axis(low, high, dead_zone, hot_zone, value): """ Apply dead and hot zones before mapping a value to a range. The dead and hot zones are both expressed as the proportion of the axis range which should be regarded as 0.0 or 1.0 (or -1.0 depending on cardinality) respectively, so for example setting dead zone to 0.2 means the first 20% of the range of the axis will be treated as if it's the low value, and setting the hot zone to 0.4 means the last 40% of the range will be treated as if it's the high value. Note that as with map_into_range, low is not necessarily numerically lower than high, it instead expresses a low value signal as opposed to a high value one (which could include a high negative value). Note that bad things happen if dead_zone + hot_zone == 1.0, so don't do that. This is used by the map_dual_axis call, but can also be used by itself to handle single axes like triggers where the overall range varies from 0.0 to 1.0 rather than -1.0 to 1.0 as a regular joystick axis would. :param low: The value corresponding to no signal :param high: The value corresponding to a full signal :param dead_zone: The proportion of the range of motion away from the no-signal end which should be treated as equivalent to no signal and return 0.0 :param hot_zone: The proportion of the range of motion away from the high signal end which should be treated as equivalent to a full strength input. :param value: The raw value to map :return: The scaled and clipped value, taking into account dead and hot zone boundaries, ranging from 0.0 to either 1.0 or -1.0 depending on whether low or high are numerically larger (low < high means max value is 1.0, high < low means it's -1.0). """ input_range = high - low corrected_low = low + input_range * dead_zone corrected_high = high - input_range * hot_zone return map_into_range(corrected_low, corrected_high, value)
[ "def", "map_single_axis", "(", "low", ",", "high", ",", "dead_zone", ",", "hot_zone", ",", "value", ")", ":", "input_range", "=", "high", "-", "low", "corrected_low", "=", "low", "+", "input_range", "*", "dead_zone", "corrected_high", "=", "high", "-", "in...
Apply dead and hot zones before mapping a value to a range. The dead and hot zones are both expressed as the proportion of the axis range which should be regarded as 0.0 or 1.0 (or -1.0 depending on cardinality) respectively, so for example setting dead zone to 0.2 means the first 20% of the range of the axis will be treated as if it's the low value, and setting the hot zone to 0.4 means the last 40% of the range will be treated as if it's the high value. Note that as with map_into_range, low is not necessarily numerically lower than high, it instead expresses a low value signal as opposed to a high value one (which could include a high negative value). Note that bad things happen if dead_zone + hot_zone == 1.0, so don't do that. This is used by the map_dual_axis call, but can also be used by itself to handle single axes like triggers where the overall range varies from 0.0 to 1.0 rather than -1.0 to 1.0 as a regular joystick axis would. :param low: The value corresponding to no signal :param high: The value corresponding to a full signal :param dead_zone: The proportion of the range of motion away from the no-signal end which should be treated as equivalent to no signal and return 0.0 :param hot_zone: The proportion of the range of motion away from the high signal end which should be treated as equivalent to a full strength input. :param value: The raw value to map :return: The scaled and clipped value, taking into account dead and hot zone boundaries, ranging from 0.0 to either 1.0 or -1.0 depending on whether low or high are numerically larger (low < high means max value is 1.0, high < low means it's -1.0).
[ "Apply", "dead", "and", "hot", "zones", "before", "mapping", "a", "value", "to", "a", "range", ".", "The", "dead", "and", "hot", "zones", "are", "both", "expressed", "as", "the", "proportion", "of", "the", "axis", "range", "which", "should", "be", "regar...
python
train
pri22296/botify
botify/botify.py
https://github.com/pri22296/botify/blob/c3ff022f4c7314e508ffaa3ce1da1ef1e784afb2/botify/botify.py#L89-L120
def add_modifier(self, modifier, keywords, relative_pos, action, parameter=None): """Modify existing tasks based on presence of a keyword. Parameters ---------- modifier : str A string value which would trigger the given Modifier. keywords : iterable of str sequence of strings which are keywords for some task, which has to be modified. relative_pos : int Relative position of the task which should be modified in the presence of `modifier`. It's value can never be 0. Data fields should also be considered when calculating the relative position. action : str String value representing the action which should be performed on the task. Action represents calling a arbitrary function to perform th emodification. parameter : object value required by the `action`.(Default None) """ if relative_pos == 0: raise ValueError("relative_pos cannot be 0") modifier_dict = self._modifiers.get(modifier, {}) value = (action, parameter, relative_pos) for keyword in keywords: action_list = list(modifier_dict.get(keyword, [])) action_list.append(value) modifier_dict[keyword] = tuple(action_list) self._modifiers[modifier] = modifier_dict
[ "def", "add_modifier", "(", "self", ",", "modifier", ",", "keywords", ",", "relative_pos", ",", "action", ",", "parameter", "=", "None", ")", ":", "if", "relative_pos", "==", "0", ":", "raise", "ValueError", "(", "\"relative_pos cannot be 0\"", ")", "modifier_...
Modify existing tasks based on presence of a keyword. Parameters ---------- modifier : str A string value which would trigger the given Modifier. keywords : iterable of str sequence of strings which are keywords for some task, which has to be modified. relative_pos : int Relative position of the task which should be modified in the presence of `modifier`. It's value can never be 0. Data fields should also be considered when calculating the relative position. action : str String value representing the action which should be performed on the task. Action represents calling a arbitrary function to perform th emodification. parameter : object value required by the `action`.(Default None)
[ "Modify", "existing", "tasks", "based", "on", "presence", "of", "a", "keyword", "." ]
python
train
barrust/mediawiki
mediawiki/mediawikipage.py
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawikipage.py#L331-L348
def backlinks(self): """ list: Pages that link to this page Note: Not settable """ if self._backlinks is None: self._backlinks = list() params = { "action": "query", "list": "backlinks", "bltitle": self.title, "bllimit": "max", "blfilterredir": "nonredirects", "blnamespace": 0, } tmp = [link["title"] for link in self._continued_query(params, "backlinks")] self._backlinks = sorted(tmp) return self._backlinks
[ "def", "backlinks", "(", "self", ")", ":", "if", "self", ".", "_backlinks", "is", "None", ":", "self", ".", "_backlinks", "=", "list", "(", ")", "params", "=", "{", "\"action\"", ":", "\"query\"", ",", "\"list\"", ":", "\"backlinks\"", ",", "\"bltitle\""...
list: Pages that link to this page Note: Not settable
[ "list", ":", "Pages", "that", "link", "to", "this", "page" ]
python
train
gamechanger/dusty
dusty/systems/docker/__init__.py
https://github.com/gamechanger/dusty/blob/dc12de90bb6945023d6f43a8071e984313a1d984/dusty/systems/docker/__init__.py#L61-L72
def get_dusty_containers(services, include_exited=False): """Get a list of containers associated with the list of services. If no services are provided, attempts to return all containers associated with Dusty.""" client = get_docker_client() if services: containers = [get_container_for_app_or_service(service, include_exited=include_exited) for service in services] return [container for container in containers if container] else: return [container for container in client.containers(all=include_exited) if any(name.startswith('/dusty') for name in container.get('Names', []))]
[ "def", "get_dusty_containers", "(", "services", ",", "include_exited", "=", "False", ")", ":", "client", "=", "get_docker_client", "(", ")", "if", "services", ":", "containers", "=", "[", "get_container_for_app_or_service", "(", "service", ",", "include_exited", "...
Get a list of containers associated with the list of services. If no services are provided, attempts to return all containers associated with Dusty.
[ "Get", "a", "list", "of", "containers", "associated", "with", "the", "list", "of", "services", ".", "If", "no", "services", "are", "provided", "attempts", "to", "return", "all", "containers", "associated", "with", "Dusty", "." ]
python
valid
scikit-learn-contrib/categorical-encoding
category_encoders/utils.py
https://github.com/scikit-learn-contrib/categorical-encoding/blob/5e9e803c9131b377af305d5302723ba2415001da/category_encoders/utils.py#L27-L36
def get_obj_cols(df): """ Returns names of 'object' columns in the DataFrame. """ obj_cols = [] for idx, dt in enumerate(df.dtypes): if dt == 'object' or is_category(dt): obj_cols.append(df.columns.values[idx]) return obj_cols
[ "def", "get_obj_cols", "(", "df", ")", ":", "obj_cols", "=", "[", "]", "for", "idx", ",", "dt", "in", "enumerate", "(", "df", ".", "dtypes", ")", ":", "if", "dt", "==", "'object'", "or", "is_category", "(", "dt", ")", ":", "obj_cols", ".", "append"...
Returns names of 'object' columns in the DataFrame.
[ "Returns", "names", "of", "object", "columns", "in", "the", "DataFrame", "." ]
python
valid
django-admin-tools/django-admin-tools
admin_tools/utils.py
https://github.com/django-admin-tools/django-admin-tools/blob/ba6f46f51ebd84fcf84f2f79ec9487f45452d79b/admin_tools/utils.py#L20-L28
def uniquify(value, seen_values): """ Adds value to seen_values set and ensures it is unique """ id = 1 new_value = value while new_value in seen_values: new_value = "%s%s" % (value, id) id += 1 seen_values.add(new_value) return new_value
[ "def", "uniquify", "(", "value", ",", "seen_values", ")", ":", "id", "=", "1", "new_value", "=", "value", "while", "new_value", "in", "seen_values", ":", "new_value", "=", "\"%s%s\"", "%", "(", "value", ",", "id", ")", "id", "+=", "1", "seen_values", "...
Adds value to seen_values set and ensures it is unique
[ "Adds", "value", "to", "seen_values", "set", "and", "ensures", "it", "is", "unique" ]
python
train
bethgelab/foolbox
foolbox/criteria.py
https://github.com/bethgelab/foolbox/blob/8ab54248c70e45d8580a7d9ee44c9c0fb5755c4a/foolbox/criteria.py#L140-L157
def name(self): """Concatenates the names of the given criteria in alphabetical order. If a sub-criterion is itself a combined criterion, its name is first split into the individual names and the names of the sub-sub criteria is used instead of the name of the sub-criterion. This is done recursively to ensure that the order and the hierarchy of the criteria does not influence the name. Returns ------- str The alphabetically sorted names of the sub-criteria concatenated using double underscores between them. """ names = (criterion.name() for criterion in self._criteria) return '__'.join(sorted(names))
[ "def", "name", "(", "self", ")", ":", "names", "=", "(", "criterion", ".", "name", "(", ")", "for", "criterion", "in", "self", ".", "_criteria", ")", "return", "'__'", ".", "join", "(", "sorted", "(", "names", ")", ")" ]
Concatenates the names of the given criteria in alphabetical order. If a sub-criterion is itself a combined criterion, its name is first split into the individual names and the names of the sub-sub criteria is used instead of the name of the sub-criterion. This is done recursively to ensure that the order and the hierarchy of the criteria does not influence the name. Returns ------- str The alphabetically sorted names of the sub-criteria concatenated using double underscores between them.
[ "Concatenates", "the", "names", "of", "the", "given", "criteria", "in", "alphabetical", "order", "." ]
python
valid
numenta/htmresearch
projects/nik/nik_htm.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/nik/nik_htm.py#L200-L209
def encodeThetas(self, theta1, theta2): """Return the SDR for theta1 and theta2""" # print >> sys.stderr, "encoded theta1 value = ", theta1 # print >> sys.stderr, "encoded theta2 value = ", theta2 t1e = self.theta1Encoder.encode(theta1) t2e = self.theta2Encoder.encode(theta2) # print >> sys.stderr, "encoded theta1 = ", t1e.nonzero()[0] # print >> sys.stderr, "encoded theta2 = ", t2e.nonzero()[0] ex = numpy.outer(t2e,t1e) return ex.flatten().nonzero()[0]
[ "def", "encodeThetas", "(", "self", ",", "theta1", ",", "theta2", ")", ":", "# print >> sys.stderr, \"encoded theta1 value = \", theta1", "# print >> sys.stderr, \"encoded theta2 value = \", theta2", "t1e", "=", "self", ".", "theta1Encoder", ".", "encode", "(", "theta1", ")...
Return the SDR for theta1 and theta2
[ "Return", "the", "SDR", "for", "theta1", "and", "theta2" ]
python
train
CalebBell/fluids
fluids/safety_valve.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/safety_valve.py#L507-L582
def API520_A_g(m, T, Z, MW, k, P1, P2=101325, Kd=0.975, Kb=1, Kc=1): r'''Calculates required relief valve area for an API 520 valve passing a gas or a vapor, at either critical or sub-critical flow. For critical flow: .. math:: A = \frac{m}{CK_dP_1K_bK_c}\sqrt{\frac{TZ}{M}} For sub-critical flow: .. math:: A = \frac{17.9m}{F_2K_dK_c}\sqrt{\frac{TZ}{MP_1(P_1-P_2)}} Parameters ---------- m : float Mass flow rate of vapor through the valve, [kg/s] T : float Temperature of vapor entering the valve, [K] Z : float Compressibility factor of the vapor, [-] MW : float Molecular weight of the vapor, [g/mol] k : float Isentropic coefficient or ideal gas heat capacity ratio [-] P1 : float Upstream relieving pressure; the set pressure plus the allowable overpressure, plus atmospheric pressure, [Pa] P2 : float, optional Built-up backpressure; the increase in pressure during flow at the outlet of a pressure-relief device after it opens, [Pa] Kd : float, optional The effective coefficient of discharge, from the manufacturer or for preliminary sizing, using 0.975 normally or 0.62 when used with a rupture disc as described in [1]_, [] Kb : float, optional Correction due to vapor backpressure [-] Kc : float, optional Combination correction factor for installation with a ruture disk upstream of the PRV, [] Returns ------- A : float Minimum area for relief valve according to [1]_, [m^2] Notes ----- Units are interlally kg/hr, kPa, and mm^2 to match [1]_. Examples -------- Example 1 from [1]_ for critical flow, matches: >>> API520_A_g(m=24270/3600., T=348., Z=0.90, MW=51., k=1.11, P1=670E3, Kb=1, Kc=1) 0.0036990460646834414 Example 2 from [1]_ for sub-critical flow, matches: >>> API520_A_g(m=24270/3600., T=348., Z=0.90, MW=51., k=1.11, P1=670E3, P2=532E3, Kd=0.975, Kb=1, Kc=1) 0.004248358775943481 References ---------- .. [1] API Standard 520, Part 1 - Sizing and Selection. ''' P1, P2 = P1/1000., P2/1000. # Pa to Kpa in the standard m = m*3600. # kg/s to kg/hr if is_critical_flow(P1, P2, k): C = API520_C(k) A = m/(C*Kd*Kb*Kc*P1)*(T*Z/MW)**0.5 else: F2 = API520_F2(k, P1, P2) A = 17.9*m/(F2*Kd*Kc)*(T*Z/(MW*P1*(P1-P2)))**0.5 return A*0.001**2
[ "def", "API520_A_g", "(", "m", ",", "T", ",", "Z", ",", "MW", ",", "k", ",", "P1", ",", "P2", "=", "101325", ",", "Kd", "=", "0.975", ",", "Kb", "=", "1", ",", "Kc", "=", "1", ")", ":", "P1", ",", "P2", "=", "P1", "/", "1000.", ",", "P2...
r'''Calculates required relief valve area for an API 520 valve passing a gas or a vapor, at either critical or sub-critical flow. For critical flow: .. math:: A = \frac{m}{CK_dP_1K_bK_c}\sqrt{\frac{TZ}{M}} For sub-critical flow: .. math:: A = \frac{17.9m}{F_2K_dK_c}\sqrt{\frac{TZ}{MP_1(P_1-P_2)}} Parameters ---------- m : float Mass flow rate of vapor through the valve, [kg/s] T : float Temperature of vapor entering the valve, [K] Z : float Compressibility factor of the vapor, [-] MW : float Molecular weight of the vapor, [g/mol] k : float Isentropic coefficient or ideal gas heat capacity ratio [-] P1 : float Upstream relieving pressure; the set pressure plus the allowable overpressure, plus atmospheric pressure, [Pa] P2 : float, optional Built-up backpressure; the increase in pressure during flow at the outlet of a pressure-relief device after it opens, [Pa] Kd : float, optional The effective coefficient of discharge, from the manufacturer or for preliminary sizing, using 0.975 normally or 0.62 when used with a rupture disc as described in [1]_, [] Kb : float, optional Correction due to vapor backpressure [-] Kc : float, optional Combination correction factor for installation with a ruture disk upstream of the PRV, [] Returns ------- A : float Minimum area for relief valve according to [1]_, [m^2] Notes ----- Units are interlally kg/hr, kPa, and mm^2 to match [1]_. Examples -------- Example 1 from [1]_ for critical flow, matches: >>> API520_A_g(m=24270/3600., T=348., Z=0.90, MW=51., k=1.11, P1=670E3, Kb=1, Kc=1) 0.0036990460646834414 Example 2 from [1]_ for sub-critical flow, matches: >>> API520_A_g(m=24270/3600., T=348., Z=0.90, MW=51., k=1.11, P1=670E3, P2=532E3, Kd=0.975, Kb=1, Kc=1) 0.004248358775943481 References ---------- .. [1] API Standard 520, Part 1 - Sizing and Selection.
[ "r", "Calculates", "required", "relief", "valve", "area", "for", "an", "API", "520", "valve", "passing", "a", "gas", "or", "a", "vapor", "at", "either", "critical", "or", "sub", "-", "critical", "flow", "." ]
python
train
fugue/credstash
credstash-migrate-autoversion.py
https://github.com/fugue/credstash/blob/56df8e051fc4c8d15d5e7e373e88bf5bc13f3346/credstash-migrate-autoversion.py#L16-L37
def updateVersions(region="us-east-1", table="credential-store"): ''' do a full-table scan of the credential-store, and update the version format of every credential if it is an integer ''' dynamodb = boto3.resource('dynamodb', region_name=region) secrets = dynamodb.Table(table) response = secrets.scan(ProjectionExpression="#N, version, #K, contents, hmac", ExpressionAttributeNames={"#N": "name", "#K": "key"}) items = response["Items"] for old_item in items: if isInt(old_item['version']): new_item = copy.copy(old_item) new_item['version'] = credstash.paddedInt(new_item['version']) if new_item['version'] != old_item['version']: secrets.put_item(Item=new_item) secrets.delete_item(Key={'name': old_item['name'], 'version': old_item['version']}) else: print "Skipping item: %s, %s" % (old_item['name'], old_item['version'])
[ "def", "updateVersions", "(", "region", "=", "\"us-east-1\"", ",", "table", "=", "\"credential-store\"", ")", ":", "dynamodb", "=", "boto3", ".", "resource", "(", "'dynamodb'", ",", "region_name", "=", "region", ")", "secrets", "=", "dynamodb", ".", "Table", ...
do a full-table scan of the credential-store, and update the version format of every credential if it is an integer
[ "do", "a", "full", "-", "table", "scan", "of", "the", "credential", "-", "store", "and", "update", "the", "version", "format", "of", "every", "credential", "if", "it", "is", "an", "integer" ]
python
train
TDG-Platform/cloud-harness
gbdx_task_template/task.py
https://github.com/TDG-Platform/cloud-harness/blob/1d8f972f861816b90785a484e9bec5bd4bc2f569/gbdx_task_template/task.py#L101-L129
def is_valid(self, remote=False): """ Check cloud-harness code is valid. task schema validation is left to the API endpoint. :param remote: Flag indicating if the task is being ran on the platform or not. :return: is valid or not. """ if len(self.input_ports) < 1: return False if remote: # Ignore output ports as value will overriden. ports = [ port for port in self.input_ports if port.type == 'directory' ] for port in ports: # Will raise exception if the port is invalid. port.is_valid_s3_url(port.value) else: all_ports = self.ports[0] + self.ports[1] ports = [ port for port in all_ports if port.type == 'directory' and port.name != 'source_bundle' ] for port in ports: # Will raise exception if the port is invalid. port.is_valid_filesys(port.value) return True
[ "def", "is_valid", "(", "self", ",", "remote", "=", "False", ")", ":", "if", "len", "(", "self", ".", "input_ports", ")", "<", "1", ":", "return", "False", "if", "remote", ":", "# Ignore output ports as value will overriden.", "ports", "=", "[", "port", "f...
Check cloud-harness code is valid. task schema validation is left to the API endpoint. :param remote: Flag indicating if the task is being ran on the platform or not. :return: is valid or not.
[ "Check", "cloud", "-", "harness", "code", "is", "valid", ".", "task", "schema", "validation", "is", "left", "to", "the", "API", "endpoint", ".", ":", "param", "remote", ":", "Flag", "indicating", "if", "the", "task", "is", "being", "ran", "on", "the", ...
python
test
romanz/trezor-agent
libagent/gpg/decode.py
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/decode.py#L140-L187
def _parse_pubkey(stream, packet_type='pubkey'): """See https://tools.ietf.org/html/rfc4880#section-5.5 for details.""" p = {'type': packet_type} packet = io.BytesIO() with stream.capture(packet): p['version'] = stream.readfmt('B') p['created'] = stream.readfmt('>L') p['algo'] = stream.readfmt('B') if p['algo'] in ECDSA_ALGO_IDS: log.debug('parsing elliptic curve key') # https://tools.ietf.org/html/rfc6637#section-11 oid_size = stream.readfmt('B') oid = stream.read(oid_size) assert oid in SUPPORTED_CURVES, util.hexlify(oid) p['curve_oid'] = oid mpi = parse_mpi(stream) log.debug('mpi: %x (%d bits)', mpi, mpi.bit_length()) leftover = stream.read() if leftover: leftover = io.BytesIO(leftover) # https://tools.ietf.org/html/rfc6637#section-8 # should be b'\x03\x01\x08\x07': SHA256 + AES128 size, = util.readfmt(leftover, 'B') p['kdf'] = leftover.read(size) p['secret'] = leftover.read() parse_func, keygrip_func = SUPPORTED_CURVES[oid] keygrip = keygrip_func(parse_func(mpi)) log.debug('keygrip: %s', util.hexlify(keygrip)) p['keygrip'] = keygrip elif p['algo'] == DSA_ALGO_ID: parse_mpis(stream, n=4) # DSA keys are not supported elif p['algo'] == ELGAMAL_ALGO_ID: parse_mpis(stream, n=3) # ElGamal keys are not supported else: # assume RSA parse_mpis(stream, n=2) # RSA keys are not supported assert not stream.read() # https://tools.ietf.org/html/rfc4880#section-12.2 packet_data = packet.getvalue() data_to_hash = (b'\x99' + struct.pack('>H', len(packet_data)) + packet_data) p['key_id'] = hashlib.sha1(data_to_hash).digest()[-8:] p['_to_hash'] = data_to_hash log.debug('key ID: %s', util.hexlify(p['key_id'])) return p
[ "def", "_parse_pubkey", "(", "stream", ",", "packet_type", "=", "'pubkey'", ")", ":", "p", "=", "{", "'type'", ":", "packet_type", "}", "packet", "=", "io", ".", "BytesIO", "(", ")", "with", "stream", ".", "capture", "(", "packet", ")", ":", "p", "["...
See https://tools.ietf.org/html/rfc4880#section-5.5 for details.
[ "See", "https", ":", "//", "tools", ".", "ietf", ".", "org", "/", "html", "/", "rfc4880#section", "-", "5", ".", "5", "for", "details", "." ]
python
train
facelessuser/pyspelling
pyspelling/filters/stylesheets.py
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/stylesheets.py#L62-L71
def setup(self): """Setup.""" self.blocks = self.config['block_comments'] self.lines = self.config['line_comments'] self.group_comments = self.config['group_comments'] # If the style isn't found, just go with CSS, then use the appropriate prefix. self.stylesheets = STYLESHEET_TYPE.get(self.config['stylesheets'].lower(), CSS) self.prefix = [k for k, v in STYLESHEET_TYPE.items() if v == SASS][0] self.pattern = RE_CSS if self.stylesheets == CSS else RE_SCSS
[ "def", "setup", "(", "self", ")", ":", "self", ".", "blocks", "=", "self", ".", "config", "[", "'block_comments'", "]", "self", ".", "lines", "=", "self", ".", "config", "[", "'line_comments'", "]", "self", ".", "group_comments", "=", "self", ".", "con...
Setup.
[ "Setup", "." ]
python
train
ManiacalLabs/BiblioPixel
bibliopixel/layout/matrix_drawing.py
https://github.com/ManiacalLabs/BiblioPixel/blob/fd97e6c651a4bbcade64733847f4eec8f7704b7c/bibliopixel/layout/matrix_drawing.py#L273-L279
def fill_round_rect(setter, x, y, w, h, r, color=None, aa=False): """Draw solid rectangle with top-left corner at x,y, width w, height h, and corner radius r""" fill_rect(setter, x + r, y, w - 2 * r, h, color, aa) _fill_circle_helper(setter, x + w - r - 1, y + r, r, 1, h - 2 * r - 1, color, aa) _fill_circle_helper(setter, x + r, y + r, r, 2, h - 2 * r - 1, color, aa)
[ "def", "fill_round_rect", "(", "setter", ",", "x", ",", "y", ",", "w", ",", "h", ",", "r", ",", "color", "=", "None", ",", "aa", "=", "False", ")", ":", "fill_rect", "(", "setter", ",", "x", "+", "r", ",", "y", ",", "w", "-", "2", "*", "r",...
Draw solid rectangle with top-left corner at x,y, width w, height h, and corner radius r
[ "Draw", "solid", "rectangle", "with", "top", "-", "left", "corner", "at", "x", "y", "width", "w", "height", "h", "and", "corner", "radius", "r" ]
python
valid
mushkevych/scheduler
synergy/system/utils.py
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/system/utils.py#L91-L99
def remove_pid_file(process_name): """ removes pid file """ pid_filename = get_pid_filename(process_name) try: os.remove(pid_filename) print('Removed pid file at: {0}'.format(pid_filename), file=sys.stdout) except Exception as e: print('Unable to remove pid file at: {0}, because of: {1}'.format(pid_filename, e), file=sys.stderr)
[ "def", "remove_pid_file", "(", "process_name", ")", ":", "pid_filename", "=", "get_pid_filename", "(", "process_name", ")", "try", ":", "os", ".", "remove", "(", "pid_filename", ")", "print", "(", "'Removed pid file at: {0}'", ".", "format", "(", "pid_filename", ...
removes pid file
[ "removes", "pid", "file" ]
python
train
blockcypher/blockcypher-python
blockcypher/api.py
https://github.com/blockcypher/blockcypher-python/blob/7601ea21916957ff279384fd699527ff9c28a56e/blockcypher/api.py#L1436-L1508
def verify_unsigned_tx(unsigned_tx, outputs, inputs=None, sweep_funds=False, change_address=None, coin_symbol='btc'): ''' Takes an unsigned transaction and what was used to build it (in create_unsigned_tx) and verifies that tosign_tx matches what is being signed and what was requestsed to be signed. Returns if valid: (True, '') Returns if invalid: (False, 'err_msg') Specifically, this checks that the outputs match what we're expecting (bad inputs would fail signature anyway). Note: it was a mistake to include `inputs` in verify_unsigned_tx as it by definition is not used. It would be removed but that would break compatibility. ''' if not (change_address or sweep_funds): err_msg = 'Cannot Verify Without Developer Supplying Change Address (or Sweeping)' return False, err_msg if 'tosign_tx' not in unsigned_tx: err_msg = 'tosign_tx not in API response:\n%s' % unsigned_tx return False, err_msg output_addr_list = [x['address'] for x in outputs if x.get('address') != None] if change_address: output_addr_list.append(change_address) assert len(unsigned_tx['tosign_tx']) == len(unsigned_tx['tosign']), unsigned_tx for cnt, tosign_tx_toverify in enumerate(unsigned_tx['tosign_tx']): # Confirm tosign is the dsha256 of tosign_tx if double_sha256(tosign_tx_toverify) != unsigned_tx['tosign'][cnt]: err_msg = 'double_sha256(%s) =! %s' % (tosign_tx_toverify, unsigned_tx['tosign'][cnt]) print(unsigned_tx) return False, err_msg try: txn_outputs_response_dict = get_txn_outputs_dict(raw_tx_hex=tosign_tx_toverify, output_addr_list=output_addr_list, coin_symbol=coin_symbol) except Exception as inst: # Could be wrong output addresses, keep print statement for debug print(unsigned_tx) print(coin_symbol) return False, str(inst) if sweep_funds: # output adresses are already confirmed in `get_txn_outputs`, # which was called by `get_txn_outputs_dict` # no point in confirming values for a sweep continue else: # get rid of change address as tx fee (which affects value) # is determined by blockcypher and can't be known up front try: txn_outputs_response_dict.pop(change_address) except KeyError: # This is possible in the case of change address not needed pass user_outputs = compress_txn_outputs(outputs) if txn_outputs_response_dict != user_outputs: # TODO: more helpful error message err_msg = 'API Response Ouputs != Supplied Outputs\n\n%s\n\n%s' % ( txn_outputs_response_dict, user_outputs) return False, err_msg return True, ''
[ "def", "verify_unsigned_tx", "(", "unsigned_tx", ",", "outputs", ",", "inputs", "=", "None", ",", "sweep_funds", "=", "False", ",", "change_address", "=", "None", ",", "coin_symbol", "=", "'btc'", ")", ":", "if", "not", "(", "change_address", "or", "sweep_fu...
Takes an unsigned transaction and what was used to build it (in create_unsigned_tx) and verifies that tosign_tx matches what is being signed and what was requestsed to be signed. Returns if valid: (True, '') Returns if invalid: (False, 'err_msg') Specifically, this checks that the outputs match what we're expecting (bad inputs would fail signature anyway). Note: it was a mistake to include `inputs` in verify_unsigned_tx as it by definition is not used. It would be removed but that would break compatibility.
[ "Takes", "an", "unsigned", "transaction", "and", "what", "was", "used", "to", "build", "it", "(", "in", "create_unsigned_tx", ")", "and", "verifies", "that", "tosign_tx", "matches", "what", "is", "being", "signed", "and", "what", "was", "requestsed", "to", "...
python
train
bitesofcode/projexui
projexui/widgets/xgroupbox.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xgroupbox.py#L99-L126
def matchCollapsedState( self ): """ Matches the collapsed state for this groupbox. """ collapsed = not self.isChecked() if self._inverted: collapsed = not collapsed if ( not self.isCollapsible() or not collapsed ): for child in self.children(): if ( not isinstance(child, QWidget) ): continue child.show() self.setMaximumHeight(MAX_INT) self.adjustSize() if ( self.parent() ): self.parent().adjustSize() else: self.setMaximumHeight(self.collapsedHeight()) for child in self.children(): if ( not isinstance(child, QWidget) ): continue child.hide()
[ "def", "matchCollapsedState", "(", "self", ")", ":", "collapsed", "=", "not", "self", ".", "isChecked", "(", ")", "if", "self", ".", "_inverted", ":", "collapsed", "=", "not", "collapsed", "if", "(", "not", "self", ".", "isCollapsible", "(", ")", "or", ...
Matches the collapsed state for this groupbox.
[ "Matches", "the", "collapsed", "state", "for", "this", "groupbox", "." ]
python
train
apache/incubator-superset
superset/forms.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/forms.py#L50-L57
def filter_not_empty_values(value): """Returns a list of non empty values or None""" if not value: return None data = [x for x in value if x] if not data: return None return data
[ "def", "filter_not_empty_values", "(", "value", ")", ":", "if", "not", "value", ":", "return", "None", "data", "=", "[", "x", "for", "x", "in", "value", "if", "x", "]", "if", "not", "data", ":", "return", "None", "return", "data" ]
Returns a list of non empty values or None
[ "Returns", "a", "list", "of", "non", "empty", "values", "or", "None" ]
python
train
HIPS/autograd
autograd/misc/optimizers.py
https://github.com/HIPS/autograd/blob/e3b525302529d7490769d5c0bcfc7457e24e3b3e/autograd/misc/optimizers.py#L16-L30
def unflatten_optimizer(optimize): """Takes an optimizer that operates on flat 1D numpy arrays and returns a wrapped version that handles trees of nested containers (lists/tuples/dicts) with arrays/scalars at the leaves.""" @wraps(optimize) def _optimize(grad, x0, callback=None, *args, **kwargs): _x0, unflatten = flatten(x0) _grad = lambda x, i: flatten(grad(unflatten(x), i))[0] if callback: _callback = lambda x, i, g: callback(unflatten(x), i, unflatten(g)) else: _callback = None return unflatten(optimize(_grad, _x0, _callback, *args, **kwargs)) return _optimize
[ "def", "unflatten_optimizer", "(", "optimize", ")", ":", "@", "wraps", "(", "optimize", ")", "def", "_optimize", "(", "grad", ",", "x0", ",", "callback", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "_x0", ",", "unflatten", "=", ...
Takes an optimizer that operates on flat 1D numpy arrays and returns a wrapped version that handles trees of nested containers (lists/tuples/dicts) with arrays/scalars at the leaves.
[ "Takes", "an", "optimizer", "that", "operates", "on", "flat", "1D", "numpy", "arrays", "and", "returns", "a", "wrapped", "version", "that", "handles", "trees", "of", "nested", "containers", "(", "lists", "/", "tuples", "/", "dicts", ")", "with", "arrays", ...
python
train
nuagenetworks/bambou
bambou/nurest_modelcontroller.py
https://github.com/nuagenetworks/bambou/blob/d334fea23e384d3df8e552fe1849ad707941c666/bambou/nurest_modelcontroller.py#L121-L133
def get_first_model_with_resource_name(cls, resource_name): """ Get the first model corresponding to a resource_name Args: resource_name: the resource name """ models = cls.get_models_with_resource_name(resource_name) if len(models) > 0: return models[0] return None
[ "def", "get_first_model_with_resource_name", "(", "cls", ",", "resource_name", ")", ":", "models", "=", "cls", ".", "get_models_with_resource_name", "(", "resource_name", ")", "if", "len", "(", "models", ")", ">", "0", ":", "return", "models", "[", "0", "]", ...
Get the first model corresponding to a resource_name Args: resource_name: the resource name
[ "Get", "the", "first", "model", "corresponding", "to", "a", "resource_name" ]
python
train
roclark/sportsreference
sportsreference/ncaaf/rankings.py
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaaf/rankings.py#L49-L75
def _get_team(self, team): """ Retrieve team's name and abbreviation. The team's name and abbreviation are embedded within the 'school_name' tag and, in the case of the abbreviation, require special parsing as it is located in the middle of a URI. The name and abbreviation are returned for the requested school. Parameters ---------- team : PyQuery object A PyQuery object representing a single row in a table on the rankings page. Returns ------- tuple (string, string) Returns a tuple of two strings where the first string is the team's abbreviation, such as 'PURDUE' and the second string is the team's name, such as 'Purdue'. """ name_tag = team('td[data-stat="school_name"]') abbreviation = re.sub(r'.*/cfb/schools/', '', str(name_tag('a'))) abbreviation = re.sub(r'/.*', '', abbreviation) name = team('td[data-stat="school_name"] a').text() return abbreviation, name
[ "def", "_get_team", "(", "self", ",", "team", ")", ":", "name_tag", "=", "team", "(", "'td[data-stat=\"school_name\"]'", ")", "abbreviation", "=", "re", ".", "sub", "(", "r'.*/cfb/schools/'", ",", "''", ",", "str", "(", "name_tag", "(", "'a'", ")", ")", ...
Retrieve team's name and abbreviation. The team's name and abbreviation are embedded within the 'school_name' tag and, in the case of the abbreviation, require special parsing as it is located in the middle of a URI. The name and abbreviation are returned for the requested school. Parameters ---------- team : PyQuery object A PyQuery object representing a single row in a table on the rankings page. Returns ------- tuple (string, string) Returns a tuple of two strings where the first string is the team's abbreviation, such as 'PURDUE' and the second string is the team's name, such as 'Purdue'.
[ "Retrieve", "team", "s", "name", "and", "abbreviation", "." ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/rbridge_id/threshold_monitor/interface/policy/area/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/threshold_monitor/interface/policy/area/__init__.py#L134-L160
def _set_area_value(self, v, load=False): """ Setter method for area_value, mapped from YANG variable /rbridge_id/threshold_monitor/interface/policy/area/area_value (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_area_value is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_area_value() directly. """ parent = getattr(self, "_parent", None) if parent is not None and load is False: raise AttributeError("Cannot set keys directly when" + " within an instantiated list") if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'MissingTerminationCharacter': {'value': 0}, u'CRCAlignErrors': {'value': 1}, u'IFG': {'value': 3}, u'SymbolErrors': {'value': 2}},), is_leaf=True, yang_name="area_value", rest_name="area", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'alt-name': u'area', u'cli-expose-key-name': None, u'cli-incomplete-command': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='enumeration', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """area_value must be of a type compatible with enumeration""", 'defined-type': "brocade-threshold-monitor:enumeration", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'MissingTerminationCharacter': {'value': 0}, u'CRCAlignErrors': {'value': 1}, u'IFG': {'value': 3}, u'SymbolErrors': {'value': 2}},), is_leaf=True, yang_name="area_value", rest_name="area", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'alt-name': u'area', u'cli-expose-key-name': None, u'cli-incomplete-command': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='enumeration', is_config=True)""", }) self.__area_value = t if hasattr(self, '_set'): self._set()
[ "def", "_set_area_value", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "parent", "=", "getattr", "(", "self", ",", "\"_parent\"", ",", "None", ")", "if", "parent", "is", "not", "None", "and", "load", "is", "False", ":", "raise", "Attri...
Setter method for area_value, mapped from YANG variable /rbridge_id/threshold_monitor/interface/policy/area/area_value (enumeration) If this variable is read-only (config: false) in the source YANG file, then _set_area_value is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_area_value() directly.
[ "Setter", "method", "for", "area_value", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "threshold_monitor", "/", "interface", "/", "policy", "/", "area", "/", "area_value", "(", "enumeration", ")", "If", "this", "variable", "is", "read", "-", ...
python
train
ejhigson/nestcheck
nestcheck/diagnostics_tables.py
https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/diagnostics_tables.py#L172-L228
def error_values_summary(error_values, **summary_df_kwargs): """Get summary statistics about calculation errors, including estimated implementation errors. Parameters ---------- error_values: pandas DataFrame Of format output by run_list_error_values (look at it for more details). summary_df_kwargs: dict, optional See pandas_functions.summary_df docstring for more details. Returns ------- df: pandas DataFrame Table showing means and standard deviations of results and diagnostics for the different runs. Also contains estimated numerical uncertainties on results. """ df = pf.summary_df_from_multi(error_values, **summary_df_kwargs) # get implementation stds imp_std, imp_std_unc, imp_frac, imp_frac_unc = \ nestcheck.error_analysis.implementation_std( df.loc[('values std', 'value')], df.loc[('values std', 'uncertainty')], df.loc[('bootstrap std mean', 'value')], df.loc[('bootstrap std mean', 'uncertainty')]) df.loc[('implementation std', 'value'), df.columns] = imp_std df.loc[('implementation std', 'uncertainty'), df.columns] = imp_std_unc df.loc[('implementation std frac', 'value'), :] = imp_frac df.loc[('implementation std frac', 'uncertainty'), :] = imp_frac_unc # Get implementation RMSEs (calculated using the values RMSE instead of # values std) if 'values rmse' in set(df.index.get_level_values('calculation type')): imp_rmse, imp_rmse_unc, imp_frac, imp_frac_unc = \ nestcheck.error_analysis.implementation_std( df.loc[('values rmse', 'value')], df.loc[('values rmse', 'uncertainty')], df.loc[('bootstrap std mean', 'value')], df.loc[('bootstrap std mean', 'uncertainty')]) df.loc[('implementation rmse', 'value'), df.columns] = imp_rmse df.loc[('implementation rmse', 'uncertainty'), df.columns] = \ imp_rmse_unc df.loc[('implementation rmse frac', 'value'), :] = imp_frac df.loc[('implementation rmse frac', 'uncertainty'), :] = imp_frac_unc # Return only the calculation types we are interested in, in order calcs_to_keep = ['true values', 'values mean', 'values std', 'values rmse', 'bootstrap std mean', 'implementation std', 'implementation std frac', 'implementation rmse', 'implementation rmse frac', 'thread ks pvalue mean', 'bootstrap ks distance mean', 'bootstrap energy distance mean', 'bootstrap earth mover distance mean'] df = pd.concat([df.xs(calc, level='calculation type', drop_level=False) for calc in calcs_to_keep if calc in df.index.get_level_values('calculation type')]) return df
[ "def", "error_values_summary", "(", "error_values", ",", "*", "*", "summary_df_kwargs", ")", ":", "df", "=", "pf", ".", "summary_df_from_multi", "(", "error_values", ",", "*", "*", "summary_df_kwargs", ")", "# get implementation stds", "imp_std", ",", "imp_std_unc",...
Get summary statistics about calculation errors, including estimated implementation errors. Parameters ---------- error_values: pandas DataFrame Of format output by run_list_error_values (look at it for more details). summary_df_kwargs: dict, optional See pandas_functions.summary_df docstring for more details. Returns ------- df: pandas DataFrame Table showing means and standard deviations of results and diagnostics for the different runs. Also contains estimated numerical uncertainties on results.
[ "Get", "summary", "statistics", "about", "calculation", "errors", "including", "estimated", "implementation", "errors", "." ]
python
train
decryptus/sonicprobe
sonicprobe/libs/xys.py
https://github.com/decryptus/sonicprobe/blob/72f73f3a40d2982d79ad68686e36aa31d94b76f8/sonicprobe/libs/xys.py#L668-L847
def validate(document, schema, log_qualifier = True): """ If the document is valid according to the schema, this function returns True. If the document is not valid according to the schema, errors are logged then False is returned. """ if isinstance(schema, ValidatorNode): if not validate(document, schema.content): return False if not schema.validator(document, schema.content): if log_qualifier: LOG.error("%r failed to validate with qualifier %s", document, schema.validator.__name__) return False return True elif isinstance(schema, dict): if not isinstance(document, dict): LOG.error("wanted a dictionary, got a %s", document.__class__.__name__) return False generic = [] optional = {} optionalnull = {} mandatory = [] for key, schema_val in schema.iteritems(): if isinstance(key, ValidatorNode): if key.mode == 'mandatory': mandatory.append((key, schema_val)) else: generic.append((key, schema_val)) elif isinstance(schema_val, Optional): optional[key] = schema_val elif isinstance(schema_val, OptionalNull): optional[key] = schema_val optionalnull[key] = True else: mandatory.append((key, schema_val)) doc_copy = document.copy() for key, schema_val in mandatory: if isinstance(key, ValidatorNode): nb = 0 rm = [] for doc_key, doc_val in doc_copy.iteritems(): if not validate(doc_key, key, False): continue nb += 1 if validate(doc_val, schema_val): rm.append(doc_key) else: return False if nb == 0: LOG.error("missing document %r for qualifier: %r", key.content, key.validator.__name__) return False elif key.min is not None and nb < key.min: LOG.error("no enough document %r for qualifier: %r (min: %r, found: %r)", key.content, key.validator.__name__, key.min, nb) return False elif key.max is not None and nb > key.max: LOG.error("too many document %r for qualifier: %r (max: %r, found: %r)", key.content, key.validator.__name__, key.max, nb) return False else: for x in rm: del doc_copy[x] continue doc_val = doc_copy.get(key, Nothing) if doc_val is Nothing: LOG.error("missing key %r in document", key) return False if helpers.is_scalar(schema_val): if not validate(doc_val, schema_val): return False del doc_copy[key] continue if schema_val.modifier: for modname in schema_val.modifier: if modname in _modifiers: document[key] = _modifiers[modname](document[key]) doc_val = _modifiers[modname](doc_val) elif hasattr(doc_val, modname): document[key] = getattr(document[key], modname)() doc_val = getattr(doc_val, modname)() if _valid_len(key, doc_val, schema_val.min_len, schema_val.max_len) is False: return False if not validate(doc_val, schema_val.content): return False del doc_copy[key] for key, schema_val in generic: nb = 0 rm = [] for doc_key, doc_val in doc_copy.iteritems(): if not validate(doc_key, key, False): continue nb += 1 if validate(doc_val, schema_val): rm.append(doc_key) else: return False if key.min is not None and nb < key.min: LOG.error("no enough document %r for qualifier: %r (min: %r, found: %r)", key.content, key.validator.__name__, key.min, nb) return False elif key.max is not None and nb > key.max: LOG.error("too many document %r for qualifier: %r (max: %r, found: %r)", key.content, key.validator.__name__, key.max, nb) return False else: for x in rm: del doc_copy[x] continue for key, doc_val in doc_copy.iteritems(): schema_val = optional.get(key, Nothing) if schema_val is Nothing: LOG.error("forbidden key %s in document", key) return False elif optionalnull.has_key(key) and doc_val is None: continue elif schema_val.min_len == 0 and doc_val is "": continue if schema_val.modifier: for modname in schema_val.modifier: if modname in _modifiers: document[key] = _modifiers[modname](document[key]) doc_val = _modifiers[modname](doc_val) elif hasattr(doc_val, modname): document[key] = getattr(document[key], modname)() doc_val = getattr(doc_val, modname)() if optionalnull.has_key(key) and doc_val is None: continue elif schema_val.min_len == 0 and doc_val is "": continue if _valid_len(key, doc_val, schema_val.min_len, schema_val.max_len) is False: return False if not validate(doc_val, schema_val.content): return False return True elif isinstance(schema, list): if not isinstance(document, list): LOG.error("wanted a list, got a %s", document.__class__.__name__) for elt in document: # XXX: give a meaning when there are multiple element in a sequence of a schema? if len(schema) < 2: if not validate(elt, schema[0]): return False elif isinstance(schema[0], dict): tmp = {} for x in schema: for key, val in x.iteritems(): tmp[key] = val if not validate(elt, tmp): return False else: if not validate(elt, schema[0]): return False return True elif isinstance(schema, Any): return True elif isinstance(schema, Scalar): return helpers.is_scalar(document) else: # scalar if isinstance(schema, str): schema = unicode(schema) if isinstance(document, str): document = unicode(document, 'utf8') if schema.__class__ != document.__class__: LOG.error("wanted a %s, got a %s", schema.__class__.__name__, document.__class__.__name__) return False return True
[ "def", "validate", "(", "document", ",", "schema", ",", "log_qualifier", "=", "True", ")", ":", "if", "isinstance", "(", "schema", ",", "ValidatorNode", ")", ":", "if", "not", "validate", "(", "document", ",", "schema", ".", "content", ")", ":", "return"...
If the document is valid according to the schema, this function returns True. If the document is not valid according to the schema, errors are logged then False is returned.
[ "If", "the", "document", "is", "valid", "according", "to", "the", "schema", "this", "function", "returns", "True", ".", "If", "the", "document", "is", "not", "valid", "according", "to", "the", "schema", "errors", "are", "logged", "then", "False", "is", "re...
python
train
GoogleCloudPlatform/cloud-debug-python
src/googleclouddebugger/capture_collector.py
https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/capture_collector.py#L157-L175
def GetLoggingLocation(): """Search for and return the file and line number from the log collector. Returns: (pathname, lineno, func_name) The full path, line number, and function name for the logpoint location. """ frame = inspect.currentframe() this_file = frame.f_code.co_filename frame = frame.f_back while frame: if this_file == frame.f_code.co_filename: if 'cdbg_logging_location' in frame.f_locals: ret = frame.f_locals['cdbg_logging_location'] if len(ret) != 3: return (None, None, None) return ret frame = frame.f_back return (None, None, None)
[ "def", "GetLoggingLocation", "(", ")", ":", "frame", "=", "inspect", ".", "currentframe", "(", ")", "this_file", "=", "frame", ".", "f_code", ".", "co_filename", "frame", "=", "frame", ".", "f_back", "while", "frame", ":", "if", "this_file", "==", "frame",...
Search for and return the file and line number from the log collector. Returns: (pathname, lineno, func_name) The full path, line number, and function name for the logpoint location.
[ "Search", "for", "and", "return", "the", "file", "and", "line", "number", "from", "the", "log", "collector", "." ]
python
train
BrewBlox/brewblox-service
brewblox_service/scheduler.py
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/scheduler.py#L24-L58
async def create_task(app: web.Application, coro: Coroutine, *args, **kwargs ) -> asyncio.Task: """ Convenience function for calling `TaskScheduler.create(coro)` This will use the default `TaskScheduler` to create a new background task. Example: import asyncio from datetime import datetime from brewblox_service import scheduler, service async def current_time(interval): while True: await asyncio.sleep(interval) print(datetime.now()) async def start(app): await scheduler.create_task(app, current_time(interval=2)) app = service.create_app(default_name='example') scheduler.setup(app) app.on_startup.append(start) service.furnish(app) service.run(app) """ return await get_scheduler(app).create(coro, *args, **kwargs)
[ "async", "def", "create_task", "(", "app", ":", "web", ".", "Application", ",", "coro", ":", "Coroutine", ",", "*", "args", ",", "*", "*", "kwargs", ")", "->", "asyncio", ".", "Task", ":", "return", "await", "get_scheduler", "(", "app", ")", ".", "cr...
Convenience function for calling `TaskScheduler.create(coro)` This will use the default `TaskScheduler` to create a new background task. Example: import asyncio from datetime import datetime from brewblox_service import scheduler, service async def current_time(interval): while True: await asyncio.sleep(interval) print(datetime.now()) async def start(app): await scheduler.create_task(app, current_time(interval=2)) app = service.create_app(default_name='example') scheduler.setup(app) app.on_startup.append(start) service.furnish(app) service.run(app)
[ "Convenience", "function", "for", "calling", "TaskScheduler", ".", "create", "(", "coro", ")" ]
python
train
kontron/python-aardvark
pyaardvark/aardvark.py
https://github.com/kontron/python-aardvark/blob/9827f669fbdc5bceb98e7d08a294b4e4e455d0d5/pyaardvark/aardvark.py#L620-L626
def spi_ss_polarity(self, polarity): """Change the ouput polarity on the SS line. Please note, that this only affects the master functions. """ ret = api.py_aa_spi_master_ss_polarity(self.handle, polarity) _raise_error_if_negative(ret)
[ "def", "spi_ss_polarity", "(", "self", ",", "polarity", ")", ":", "ret", "=", "api", ".", "py_aa_spi_master_ss_polarity", "(", "self", ".", "handle", ",", "polarity", ")", "_raise_error_if_negative", "(", "ret", ")" ]
Change the ouput polarity on the SS line. Please note, that this only affects the master functions.
[ "Change", "the", "ouput", "polarity", "on", "the", "SS", "line", "." ]
python
train
a1ezzz/wasp-general
wasp_general/task/thread_tracker.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/thread_tracker.py#L494-L508
def last_record(self, task_uid, *requested_events): """ Search over registered :class:`.WScheduleTask` instances and return the last record that matches search criteria. :param task_uid: uid of :class:`.WScheduleTask` instance :param requested_events: target events types :return: WSimpleTrackerStorage.Record or None """ for record in self: if isinstance(record.thread_task, WScheduleTask) is False: continue if record.thread_task.uid() == task_uid: if len(requested_events) == 0 or record.record_type in requested_events: return record
[ "def", "last_record", "(", "self", ",", "task_uid", ",", "*", "requested_events", ")", ":", "for", "record", "in", "self", ":", "if", "isinstance", "(", "record", ".", "thread_task", ",", "WScheduleTask", ")", "is", "False", ":", "continue", "if", "record"...
Search over registered :class:`.WScheduleTask` instances and return the last record that matches search criteria. :param task_uid: uid of :class:`.WScheduleTask` instance :param requested_events: target events types :return: WSimpleTrackerStorage.Record or None
[ "Search", "over", "registered", ":", "class", ":", ".", "WScheduleTask", "instances", "and", "return", "the", "last", "record", "that", "matches", "search", "criteria", "." ]
python
train
alephdata/memorious
memorious/ui/views.py
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/ui/views.py#L67-L77
def index(): """Generate a list of all crawlers, alphabetically, with op counts.""" crawlers = [] for crawler in manager: data = Event.get_counts(crawler) data['last_active'] = crawler.last_run data['total_ops'] = crawler.op_count data['running'] = crawler.is_running data['crawler'] = crawler crawlers.append(data) return render_template('index.html', crawlers=crawlers)
[ "def", "index", "(", ")", ":", "crawlers", "=", "[", "]", "for", "crawler", "in", "manager", ":", "data", "=", "Event", ".", "get_counts", "(", "crawler", ")", "data", "[", "'last_active'", "]", "=", "crawler", ".", "last_run", "data", "[", "'total_ops...
Generate a list of all crawlers, alphabetically, with op counts.
[ "Generate", "a", "list", "of", "all", "crawlers", "alphabetically", "with", "op", "counts", "." ]
python
train
CGATOxford/UMI-tools
umi_tools/sam_methods.py
https://github.com/CGATOxford/UMI-tools/blob/c4b5d84aac391d59916d294f8f4f8f5378abcfbe/umi_tools/sam_methods.py#L92-L103
def get_cell_umi_read_string(read_id, sep="_"): ''' extract the umi and cell barcode from the read id (input as a string) using the specified separator ''' try: return (read_id.split(sep)[-1].encode('utf-8'), read_id.split(sep)[-2].encode('utf-8')) except IndexError: raise ValueError( "Could not extract UMI or CB from the read ID, please" "check UMI and CB are encoded in the read name:" "%s" % read_id)
[ "def", "get_cell_umi_read_string", "(", "read_id", ",", "sep", "=", "\"_\"", ")", ":", "try", ":", "return", "(", "read_id", ".", "split", "(", "sep", ")", "[", "-", "1", "]", ".", "encode", "(", "'utf-8'", ")", ",", "read_id", ".", "split", "(", "...
extract the umi and cell barcode from the read id (input as a string) using the specified separator
[ "extract", "the", "umi", "and", "cell", "barcode", "from", "the", "read", "id", "(", "input", "as", "a", "string", ")", "using", "the", "specified", "separator" ]
python
train
twitterdev/tweet_parser
tweet_parser/getter_methods/tweet_user.py
https://github.com/twitterdev/tweet_parser/blob/3435de8367d36b483a6cfd8d46cc28694ee8a42e/tweet_parser/getter_methods/tweet_user.py#L114-L153
def get_bio(tweet): """ Get the bio text of the user who posted the Tweet Args: tweet (Tweet): A Tweet object (or a dictionary) Returns: str: the bio text of the user who posted the Tweet In a payload the abscence of a bio seems to be represented by an empty string or a None, this getter always returns a string (so, empty string if no bio is available). Example: >>> from tweet_parser.getter_methods.tweet_user import get_bio >>> original_format_dict = { ... "created_at": "Wed May 24 20:17:19 +0000 2017", ... "user": ... {"description": "Niche millenial content aggregator"} ... } >>> get_bio(original_format_dict) 'Niche millenial content aggregator' >>> activity_streams_format_dict = { ... "postedTime": "2017-05-24T20:17:19.000Z", ... "actor": ... {"summary": "Niche millenial content aggregator"} ... } >>> get_bio(activity_streams_format_dict) 'Niche millenial content aggregator' """ if is_original_format(tweet): bio_or_none = tweet["user"].get("description", "") else: bio_or_none = tweet["actor"].get("summary", "") if bio_or_none is None: return "" else: return bio_or_none
[ "def", "get_bio", "(", "tweet", ")", ":", "if", "is_original_format", "(", "tweet", ")", ":", "bio_or_none", "=", "tweet", "[", "\"user\"", "]", ".", "get", "(", "\"description\"", ",", "\"\"", ")", "else", ":", "bio_or_none", "=", "tweet", "[", "\"actor...
Get the bio text of the user who posted the Tweet Args: tweet (Tweet): A Tweet object (or a dictionary) Returns: str: the bio text of the user who posted the Tweet In a payload the abscence of a bio seems to be represented by an empty string or a None, this getter always returns a string (so, empty string if no bio is available). Example: >>> from tweet_parser.getter_methods.tweet_user import get_bio >>> original_format_dict = { ... "created_at": "Wed May 24 20:17:19 +0000 2017", ... "user": ... {"description": "Niche millenial content aggregator"} ... } >>> get_bio(original_format_dict) 'Niche millenial content aggregator' >>> activity_streams_format_dict = { ... "postedTime": "2017-05-24T20:17:19.000Z", ... "actor": ... {"summary": "Niche millenial content aggregator"} ... } >>> get_bio(activity_streams_format_dict) 'Niche millenial content aggregator'
[ "Get", "the", "bio", "text", "of", "the", "user", "who", "posted", "the", "Tweet" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/nose/plugins/manager.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/nose/plugins/manager.py#L146-L161
def generate(self, *arg, **kw): """Call all plugins, yielding each item in each non-None result. """ for p, meth in self.plugins: result = None try: result = meth(*arg, **kw) if result is not None: for r in result: yield r except (KeyboardInterrupt, SystemExit): raise except: exc = sys.exc_info() yield Failure(*exc) continue
[ "def", "generate", "(", "self", ",", "*", "arg", ",", "*", "*", "kw", ")", ":", "for", "p", ",", "meth", "in", "self", ".", "plugins", ":", "result", "=", "None", "try", ":", "result", "=", "meth", "(", "*", "arg", ",", "*", "*", "kw", ")", ...
Call all plugins, yielding each item in each non-None result.
[ "Call", "all", "plugins", "yielding", "each", "item", "in", "each", "non", "-", "None", "result", "." ]
python
test
Jammy2211/PyAutoLens
autolens/data/ccd.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/ccd.py#L416-L420
def estimated_noise_map_counts(self): """ The estimated noise_maps mappers of the image (using its background noise_maps mappers and image values in counts) in counts. """ return np.sqrt((np.abs(self.image_counts) + np.square(self.background_noise_map_counts)))
[ "def", "estimated_noise_map_counts", "(", "self", ")", ":", "return", "np", ".", "sqrt", "(", "(", "np", ".", "abs", "(", "self", ".", "image_counts", ")", "+", "np", ".", "square", "(", "self", ".", "background_noise_map_counts", ")", ")", ")" ]
The estimated noise_maps mappers of the image (using its background noise_maps mappers and image values in counts) in counts.
[ "The", "estimated", "noise_maps", "mappers", "of", "the", "image", "(", "using", "its", "background", "noise_maps", "mappers", "and", "image", "values", "in", "counts", ")", "in", "counts", "." ]
python
valid
drewsonne/aws-autodiscovery-templater
awsautodiscoverytemplater/command.py
https://github.com/drewsonne/aws-autodiscovery-templater/blob/9ef2edd6a373aeb5d343b841550c210966efe079/awsautodiscoverytemplater/command.py#L205-L244
def _process_reservations(self, reservations): """ Given a dict with the structure of a response from boto3.ec2.describe_instances(...), find the public/private ips. :param reservations: :return: """ reservations = reservations['Reservations'] private_ip_addresses = [] private_hostnames = [] public_ips = [] public_hostnames = [] for reservation in reservations: for instance in reservation['Instances']: private_ip_addresses.append(instance['PrivateIpAddress']) private_hostnames.append(instance['PrivateDnsName']) if 'PublicIpAddress' in instance: public_ips.append(instance['PublicIpAddress']) elif not self.remove_nones: public_ips.append(None) if ('PublicDnsName' in instance) & (not self.remove_nones): public_hostnames.append(instance['PublicDnsName']) elif not self.remove_nones: public_hostnames.append(None) return { 'private': { 'ips': private_ip_addresses, 'hostnames': private_hostnames }, 'public': { 'ips': public_ips, 'hostnames': public_hostnames }, 'reservations': reservations }
[ "def", "_process_reservations", "(", "self", ",", "reservations", ")", ":", "reservations", "=", "reservations", "[", "'Reservations'", "]", "private_ip_addresses", "=", "[", "]", "private_hostnames", "=", "[", "]", "public_ips", "=", "[", "]", "public_hostnames",...
Given a dict with the structure of a response from boto3.ec2.describe_instances(...), find the public/private ips. :param reservations: :return:
[ "Given", "a", "dict", "with", "the", "structure", "of", "a", "response", "from", "boto3", ".", "ec2", ".", "describe_instances", "(", "...", ")", "find", "the", "public", "/", "private", "ips", ".", ":", "param", "reservations", ":", ":", "return", ":" ]
python
train
maxfischer2781/chainlet
chainlet/genlink.py
https://github.com/maxfischer2781/chainlet/blob/4e17f9992b4780bd0d9309202e2847df640bffe8/chainlet/genlink.py#L203-L239
def genlet(generator_function=None, prime=True): """ Decorator to convert a generator function to a :py:class:`~chainlink.ChainLink` :param generator_function: the generator function to convert :type generator_function: generator :param prime: advance the generator to the next/first yield :type prime: bool When used as a decorator, this function can also be called with and without keywords. .. code:: python @genlet def pingpong(): "Chainlet that passes on its value" last = yield while True: last = yield last @genlet(prime=True) def produce(): "Chainlet that produces a value" while True: yield time.time() @genlet(True) def read(iterable): "Chainlet that reads from an iterable" for item in iterable: yield item """ if generator_function is None: return GeneratorLink.wraplet(prime=prime) elif not callable(generator_function): return GeneratorLink.wraplet(prime=generator_function) return GeneratorLink.wraplet(prime=prime)(generator_function)
[ "def", "genlet", "(", "generator_function", "=", "None", ",", "prime", "=", "True", ")", ":", "if", "generator_function", "is", "None", ":", "return", "GeneratorLink", ".", "wraplet", "(", "prime", "=", "prime", ")", "elif", "not", "callable", "(", "genera...
Decorator to convert a generator function to a :py:class:`~chainlink.ChainLink` :param generator_function: the generator function to convert :type generator_function: generator :param prime: advance the generator to the next/first yield :type prime: bool When used as a decorator, this function can also be called with and without keywords. .. code:: python @genlet def pingpong(): "Chainlet that passes on its value" last = yield while True: last = yield last @genlet(prime=True) def produce(): "Chainlet that produces a value" while True: yield time.time() @genlet(True) def read(iterable): "Chainlet that reads from an iterable" for item in iterable: yield item
[ "Decorator", "to", "convert", "a", "generator", "function", "to", "a", ":", "py", ":", "class", ":", "~chainlink", ".", "ChainLink" ]
python
train
alexhayes/django-pdfkit
django_pdfkit/views.py
https://github.com/alexhayes/django-pdfkit/blob/02774ae2cb67d05dd5e4cb50661c56464ebb2413/django_pdfkit/views.py#L31-L53
def get(self, request, *args, **kwargs): """ Return a HTTPResponse either of a PDF file or HTML. :rtype: HttpResponse """ if 'html' in request.GET: # Output HTML content = self.render_html(*args, **kwargs) return HttpResponse(content) else: # Output PDF content = self.render_pdf(*args, **kwargs) response = HttpResponse(content, content_type='application/pdf') if (not self.inline or 'download' in request.GET) and 'inline' not in request.GET: response['Content-Disposition'] = 'attachment; filename=%s' % self.get_filename() response['Content-Length'] = len(content) return response
[ "def", "get", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "'html'", "in", "request", ".", "GET", ":", "# Output HTML", "content", "=", "self", ".", "render_html", "(", "*", "args", ",", "*", "*", "kwargs"...
Return a HTTPResponse either of a PDF file or HTML. :rtype: HttpResponse
[ "Return", "a", "HTTPResponse", "either", "of", "a", "PDF", "file", "or", "HTML", "." ]
python
train
cloudendpoints/endpoints-python
endpoints/api_config.py
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/api_config.py#L1831-L1871
def __request_message_descriptor(self, request_kind, message_type, method_id, path): """Describes the parameters and body of the request. Args: request_kind: The type of request being made. message_type: messages.Message or ResourceContainer class. The message to describe. method_id: string, Unique method identifier (e.g. 'myapi.items.method') path: string, HTTP path to method. Returns: Dictionary describing the request. Raises: ValueError: if the method path and request required fields do not match """ descriptor = {} params, param_order = self.__params_descriptor(message_type, request_kind, path, method_id) if isinstance(message_type, resource_container.ResourceContainer): message_type = message_type.body_message_class() if (request_kind == self.__NO_BODY or message_type == message_types.VoidMessage()): descriptor['body'] = 'empty' else: descriptor['body'] = 'autoTemplate(backendRequest)' descriptor['bodyName'] = 'resource' self.__request_schema[method_id] = self.__parser.add_message( message_type.__class__) if params: descriptor['parameters'] = params if param_order: descriptor['parameterOrder'] = param_order return descriptor
[ "def", "__request_message_descriptor", "(", "self", ",", "request_kind", ",", "message_type", ",", "method_id", ",", "path", ")", ":", "descriptor", "=", "{", "}", "params", ",", "param_order", "=", "self", ".", "__params_descriptor", "(", "message_type", ",", ...
Describes the parameters and body of the request. Args: request_kind: The type of request being made. message_type: messages.Message or ResourceContainer class. The message to describe. method_id: string, Unique method identifier (e.g. 'myapi.items.method') path: string, HTTP path to method. Returns: Dictionary describing the request. Raises: ValueError: if the method path and request required fields do not match
[ "Describes", "the", "parameters", "and", "body", "of", "the", "request", "." ]
python
train
delicb/mvvm
mvvm.py
https://github.com/delicb/mvvm/blob/29bf0ab2cc0835b58bed75b2606a9b380c38a272/mvvm.py#L87-L101
def instance(cls, interval=5): ''' Returns existing instance of messenger. If one does not exist it will be created and returned. :param int interval: Number of miliseconds that represents interval when messages will be processed. Note that this parameter will be used only the first time when instance is requested, every other time it will be ignored because existing instance of :class:`._Messenger` is returned. ''' if not cls._instance: cls._instance = _Messenger(interval) return cls._instance
[ "def", "instance", "(", "cls", ",", "interval", "=", "5", ")", ":", "if", "not", "cls", ".", "_instance", ":", "cls", ".", "_instance", "=", "_Messenger", "(", "interval", ")", "return", "cls", ".", "_instance" ]
Returns existing instance of messenger. If one does not exist it will be created and returned. :param int interval: Number of miliseconds that represents interval when messages will be processed. Note that this parameter will be used only the first time when instance is requested, every other time it will be ignored because existing instance of :class:`._Messenger` is returned.
[ "Returns", "existing", "instance", "of", "messenger", ".", "If", "one", "does", "not", "exist", "it", "will", "be", "created", "and", "returned", "." ]
python
train
pandas-dev/pandas
pandas/io/sas/sas_xport.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sas/sas_xport.py#L401-L416
def get_chunk(self, size=None): """ Reads lines from Xport file and returns as dataframe Parameters ---------- size : int, defaults to None Number of lines to read. If None, reads whole file. Returns ------- DataFrame """ if size is None: size = self._chunksize return self.read(nrows=size)
[ "def", "get_chunk", "(", "self", ",", "size", "=", "None", ")", ":", "if", "size", "is", "None", ":", "size", "=", "self", ".", "_chunksize", "return", "self", ".", "read", "(", "nrows", "=", "size", ")" ]
Reads lines from Xport file and returns as dataframe Parameters ---------- size : int, defaults to None Number of lines to read. If None, reads whole file. Returns ------- DataFrame
[ "Reads", "lines", "from", "Xport", "file", "and", "returns", "as", "dataframe" ]
python
train
psss/did
did/plugins/trello.py
https://github.com/psss/did/blob/04e4ee6f1aa14c0cae3ba9f9803871f3f98279cb/did/plugins/trello.py#L88-L112
def get_actions(self, filters, since=None, before=None, limit=1000): """ Example of data structure: https://api.trello.com/1/members/ben/actions?limit=2 """ if limit > 1000: raise NotImplementedError( "Fetching more than 1000 items is not implemented") resp = self.stats.session.open( "{0}/members/{1}/actions?{2}".format( self.stats.url, self.username, urllib.urlencode({ "key": self.key, "token": self.token, "filter": filters, "limit": limit, "since": str(since), "before": str(before)}))) actions = json.loads(resp.read()) log.data(pretty(actions)) # print[act for act in actions if "shortLink" not in # act['data']['board'].keys()] actions = [act for act in actions if act['data'] ['board']['id'] in self.board_ids] return actions
[ "def", "get_actions", "(", "self", ",", "filters", ",", "since", "=", "None", ",", "before", "=", "None", ",", "limit", "=", "1000", ")", ":", "if", "limit", ">", "1000", ":", "raise", "NotImplementedError", "(", "\"Fetching more than 1000 items is not impleme...
Example of data structure: https://api.trello.com/1/members/ben/actions?limit=2
[ "Example", "of", "data", "structure", ":", "https", ":", "//", "api", ".", "trello", ".", "com", "/", "1", "/", "members", "/", "ben", "/", "actions?limit", "=", "2" ]
python
train
hmeine/qimage2ndarray
qimage2ndarray/__init__.py
https://github.com/hmeine/qimage2ndarray/blob/023f3c67f90e646ce2fd80418fed85b9c7660bfc/qimage2ndarray/__init__.py#L116-L134
def alpha_view(qimage): """Returns alpha view of a given 32-bit color QImage_'s memory. The result is a 2D numpy.uint8 array, equivalent to byte_view(qimage)[...,3]. The image must have 32 bit pixel size, i.e. be RGB32, ARGB32, or ARGB32_Premultiplied. Note that it is not enforced that the given qimage has a format that actually *uses* the alpha channel -- for Format_RGB32, the alpha channel usually contains 255 everywhere. For your convenience, `qimage` may also be a filename, see `Loading and Saving Images`_ in the documentation. :param qimage: image whose memory shall be accessed via NumPy :type qimage: QImage_ with 32-bit pixel type :rtype: numpy.ndarray_ with shape (height, width) and dtype uint8""" bytes = byte_view(qimage, byteorder = None) if bytes.shape[2] != 4: raise ValueError("For alpha_view, the image must have 32 bit pixel size (use RGB32, ARGB32, or ARGB32_Premultiplied)") return bytes[...,_bgra[3]]
[ "def", "alpha_view", "(", "qimage", ")", ":", "bytes", "=", "byte_view", "(", "qimage", ",", "byteorder", "=", "None", ")", "if", "bytes", ".", "shape", "[", "2", "]", "!=", "4", ":", "raise", "ValueError", "(", "\"For alpha_view, the image must have 32 bit ...
Returns alpha view of a given 32-bit color QImage_'s memory. The result is a 2D numpy.uint8 array, equivalent to byte_view(qimage)[...,3]. The image must have 32 bit pixel size, i.e. be RGB32, ARGB32, or ARGB32_Premultiplied. Note that it is not enforced that the given qimage has a format that actually *uses* the alpha channel -- for Format_RGB32, the alpha channel usually contains 255 everywhere. For your convenience, `qimage` may also be a filename, see `Loading and Saving Images`_ in the documentation. :param qimage: image whose memory shall be accessed via NumPy :type qimage: QImage_ with 32-bit pixel type :rtype: numpy.ndarray_ with shape (height, width) and dtype uint8
[ "Returns", "alpha", "view", "of", "a", "given", "32", "-", "bit", "color", "QImage_", "s", "memory", ".", "The", "result", "is", "a", "2D", "numpy", ".", "uint8", "array", "equivalent", "to", "byte_view", "(", "qimage", ")", "[", "...", "3", "]", "."...
python
train
apriha/lineage
src/lineage/individual.py
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/individual.py#L267-L298
def save_snps(self, filename=None): """ Save SNPs to file. Parameters ---------- filename : str filename for file to save Returns ------- str path to file in output directory if SNPs were saved, else empty str """ comment = ( "# Source(s): {}\n" "# Assembly: {}\n" "# SNPs: {}\n" "# Chromosomes: {}\n".format( self.source, self.assembly, self.snp_count, self.chromosomes_summary ) ) if filename is None: filename = self.get_var_name() + "_lineage_" + self.assembly + ".csv" return lineage.save_df_as_csv( self._snps, self._output_dir, filename, comment=comment, header=["chromosome", "position", "genotype"], )
[ "def", "save_snps", "(", "self", ",", "filename", "=", "None", ")", ":", "comment", "=", "(", "\"# Source(s): {}\\n\"", "\"# Assembly: {}\\n\"", "\"# SNPs: {}\\n\"", "\"# Chromosomes: {}\\n\"", ".", "format", "(", "self", ".", "source", ",", "self", ".", "assembly...
Save SNPs to file. Parameters ---------- filename : str filename for file to save Returns ------- str path to file in output directory if SNPs were saved, else empty str
[ "Save", "SNPs", "to", "file", "." ]
python
train
spookey/photon
photon/meta.py
https://github.com/spookey/photon/blob/57212a26ce713ab7723910ee49e3d0ba1697799f/photon/meta.py#L127-L142
def log(self, elem): ''' .. seealso:: :attr:`log` ''' if elem: self.__meta['log'].update({get_timestamp(precice=True): elem}) mfile = self.__meta['header']['stage'] self.__lock.acquire() try: j = read_json(mfile) if j != self.__meta: write_json(mfile, self.__meta) finally: self.__lock.release()
[ "def", "log", "(", "self", ",", "elem", ")", ":", "if", "elem", ":", "self", ".", "__meta", "[", "'log'", "]", ".", "update", "(", "{", "get_timestamp", "(", "precice", "=", "True", ")", ":", "elem", "}", ")", "mfile", "=", "self", ".", "__meta",...
.. seealso:: :attr:`log`
[ "..", "seealso", "::", ":", "attr", ":", "log" ]
python
train
noahbenson/neuropythy
neuropythy/freesurfer/core.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/freesurfer/core.py#L385-L432
def subject(name, meta_data=None, check_path=True): ''' subject(name) yields a freesurfer Subject object for the subject with the given name. Subjects are cached and not reloaded, so multiple calls to subject(name) will yield the same immutable subject object.. Note that subects returned by freesurfer_subject() are always persistent Immutable objects; this means that you must create a transient version of the subject to modify it via the member function sub.transient(). Better, you can make copies of the objects with desired modifications using the copy method--see the pimms library documentation regarding immutable classes and objects. The following options are accepted: * meta_data (default: None) may optionally be a map that contains meta-data to be passed along to the subject object (note that this meta-data will not be cached). * check_path (default: True) may optionally be set to False to ignore the requirement that a directory contain at least the mri/, label/, and surf/ directories to be considered a valid FreeSurfer subject directory. Subject objects returned using this argument are not cached. Additionally, check_path may be set to None instead of False, indicating that no checks or search should be performed; the string name should be trusted to be an exact relative or absolute path to a valid FreeSurfer subejct. ''' name = os.path.expanduser(os.path.expandvars(name)) if check_path is None: sub = Subject(name, check_path=False) if isinstance(sub, Subject): sub.persist() else: subpath = find_subject_path(name, check_path=check_path) if subpath is None and name == 'fsaverage': # we can use the benson and winawer 2018 dataset import neuropythy as ny try: return ny.data['benson_winawer_2018'].subjects['fsaverage'] except Exception: pass # error message below is more accurate... if subpath is None: raise ValueError('Could not locate subject with name \'%s\'' % name) elif check_path: fpath = '/' + os.path.relpath(subpath, '/') if fpath in subject._cache: sub = subject._cache[fpath] else: sub = Subject(subpath) if isinstance(sub, Subject): subject._cache[fpath] = sub.persist() else: sub = Subject(subpath, check_path=False) if isinstance(sub, Subject): sub.persist() return (None if sub is None else sub.with_meta(meta_data) if meta_data is not None else sub)
[ "def", "subject", "(", "name", ",", "meta_data", "=", "None", ",", "check_path", "=", "True", ")", ":", "name", "=", "os", ".", "path", ".", "expanduser", "(", "os", ".", "path", ".", "expandvars", "(", "name", ")", ")", "if", "check_path", "is", "...
subject(name) yields a freesurfer Subject object for the subject with the given name. Subjects are cached and not reloaded, so multiple calls to subject(name) will yield the same immutable subject object.. Note that subects returned by freesurfer_subject() are always persistent Immutable objects; this means that you must create a transient version of the subject to modify it via the member function sub.transient(). Better, you can make copies of the objects with desired modifications using the copy method--see the pimms library documentation regarding immutable classes and objects. The following options are accepted: * meta_data (default: None) may optionally be a map that contains meta-data to be passed along to the subject object (note that this meta-data will not be cached). * check_path (default: True) may optionally be set to False to ignore the requirement that a directory contain at least the mri/, label/, and surf/ directories to be considered a valid FreeSurfer subject directory. Subject objects returned using this argument are not cached. Additionally, check_path may be set to None instead of False, indicating that no checks or search should be performed; the string name should be trusted to be an exact relative or absolute path to a valid FreeSurfer subejct.
[ "subject", "(", "name", ")", "yields", "a", "freesurfer", "Subject", "object", "for", "the", "subject", "with", "the", "given", "name", ".", "Subjects", "are", "cached", "and", "not", "reloaded", "so", "multiple", "calls", "to", "subject", "(", "name", ")"...
python
train
bosha/pypushalot
pushalot/apis.py
https://github.com/bosha/pypushalot/blob/c1b5c941210ae61716e1695816a40c1676733739/pushalot/apis.py#L97-L145
def _build_params_from_kwargs(self, **kwargs): """Builds parameters from passed arguments Search passed parameters in available methods, prepend specified API key, and return dictionary which can be sent directly to API server. :param kwargs: :type param: dict :raises ValueError: If type of specified parameter doesn't match the expected type. Also raised if some basic validation of passed parameter fails. :raises PushalotException: If required parameter not set. :return: Dictionary with params which can be sent to API server :rtype: dict """ api_methods = self.get_api_params() required_methods = self.get_api_required_params() ret_kwargs = {} for key, val in kwargs.items(): if key not in api_methods: warnings.warn( 'Passed uknown parameter [{}]'.format(key), Warning ) continue if key not in required_methods and val is None: continue if type(val) != api_methods[key]['type']: raise ValueError( "Invalid type specified to param: {}".format(key) ) if 'max_len' in api_methods[key]: if len(val) > api_methods[key]['max_len']: raise ValueError( "Lenght of parameter [{}] more than " "allowed length".format(key) ) ret_kwargs[api_methods[key]['param']] = val for item in required_methods: if item not in ret_kwargs: raise pushalot.exc.PushalotException( "Parameter [{}] required, but not set".format(item) ) return ret_kwargs
[ "def", "_build_params_from_kwargs", "(", "self", ",", "*", "*", "kwargs", ")", ":", "api_methods", "=", "self", ".", "get_api_params", "(", ")", "required_methods", "=", "self", ".", "get_api_required_params", "(", ")", "ret_kwargs", "=", "{", "}", "for", "k...
Builds parameters from passed arguments Search passed parameters in available methods, prepend specified API key, and return dictionary which can be sent directly to API server. :param kwargs: :type param: dict :raises ValueError: If type of specified parameter doesn't match the expected type. Also raised if some basic validation of passed parameter fails. :raises PushalotException: If required parameter not set. :return: Dictionary with params which can be sent to API server :rtype: dict
[ "Builds", "parameters", "from", "passed", "arguments" ]
python
train
quantopian/zipline
zipline/finance/slippage.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/slippage.py#L399-L444
def _get_window_data(self, data, asset, window_length): """ Internal utility method to return the trailing mean volume over the past 'window_length' days, and volatility of close prices for a specific asset. Parameters ---------- data : The BarData from which to fetch the daily windows. asset : The Asset whose data we are fetching. window_length : Number of days of history used to calculate the mean volume and close price volatility. Returns ------- (mean volume, volatility) """ try: values = self._window_data_cache.get(asset, data.current_session) except KeyError: try: # Add a day because we want 'window_length' complete days, # excluding the current day. volume_history = data.history( asset, 'volume', window_length + 1, '1d', ) close_history = data.history( asset, 'close', window_length + 1, '1d', ) except HistoryWindowStartsBeforeData: # If there is not enough data to do a full history call, return # values as if there was no data. return 0, np.NaN # Exclude the first value of the percent change array because it is # always just NaN. close_volatility = close_history[:-1].pct_change()[1:].std( skipna=False, ) values = { 'volume': volume_history[:-1].mean(), 'close': close_volatility * SQRT_252, } self._window_data_cache.set(asset, values, data.current_session) return values['volume'], values['close']
[ "def", "_get_window_data", "(", "self", ",", "data", ",", "asset", ",", "window_length", ")", ":", "try", ":", "values", "=", "self", ".", "_window_data_cache", ".", "get", "(", "asset", ",", "data", ".", "current_session", ")", "except", "KeyError", ":", ...
Internal utility method to return the trailing mean volume over the past 'window_length' days, and volatility of close prices for a specific asset. Parameters ---------- data : The BarData from which to fetch the daily windows. asset : The Asset whose data we are fetching. window_length : Number of days of history used to calculate the mean volume and close price volatility. Returns ------- (mean volume, volatility)
[ "Internal", "utility", "method", "to", "return", "the", "trailing", "mean", "volume", "over", "the", "past", "window_length", "days", "and", "volatility", "of", "close", "prices", "for", "a", "specific", "asset", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/mcmc/random_walk_metropolis.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/random_walk_metropolis.py#L112-L170
def random_walk_uniform_fn(scale=1., name=None): """Returns a callable that adds a random uniform perturbation to the input. For more details on `random_walk_uniform_fn`, see `random_walk_normal_fn`. `scale` might be a `Tensor` or a list of `Tensor`s that should broadcast with state parts of the `current_state`. The generated uniform perturbation is sampled as a uniform point on the rectangle `[-scale, scale]`. Args: scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes` controlling the upper and lower bound of the uniform proposal distribution. name: Python `str` name prefixed to Ops created by this function. Default value: 'random_walk_uniform_fn'. Returns: random_walk_uniform_fn: A callable accepting a Python `list` of `Tensor`s representing the state parts of the `current_state` and an `int` representing the random seed used to generate the proposal. The callable returns the same-type `list` of `Tensor`s as the input and represents the proposal for the RWM algorithm. """ def _fn(state_parts, seed): """Adds a uniform perturbation to the input state. Args: state_parts: A list of `Tensor`s of any shape and real dtype representing the state parts of the `current_state` of the Markov chain. seed: `int` or None. The random seed for this `Op`. If `None`, no seed is applied. Default value: `None`. Returns: perturbed_state_parts: A Python `list` of The `Tensor`s. Has the same shape and type as the `state_parts`. Raises: ValueError: if `scale` does not broadcast with `state_parts`. """ with tf.compat.v1.name_scope( name, 'random_walk_uniform_fn', values=[state_parts, scale, seed]): scales = scale if mcmc_util.is_list_like(scale) else [scale] if len(scales) == 1: scales *= len(state_parts) if len(state_parts) != len(scales): raise ValueError('`scale` must broadcast with `state_parts`.') seed_stream = distributions.SeedStream(seed, salt='RandomWalkUniformFn') next_state_parts = [ tf.random.uniform( minval=state_part - scale_part, maxval=state_part + scale_part, shape=tf.shape(input=state_part), dtype=state_part.dtype.base_dtype, seed=seed_stream()) for scale_part, state_part in zip(scales, state_parts) ] return next_state_parts return _fn
[ "def", "random_walk_uniform_fn", "(", "scale", "=", "1.", ",", "name", "=", "None", ")", ":", "def", "_fn", "(", "state_parts", ",", "seed", ")", ":", "\"\"\"Adds a uniform perturbation to the input state.\n\n Args:\n state_parts: A list of `Tensor`s of any shape and ...
Returns a callable that adds a random uniform perturbation to the input. For more details on `random_walk_uniform_fn`, see `random_walk_normal_fn`. `scale` might be a `Tensor` or a list of `Tensor`s that should broadcast with state parts of the `current_state`. The generated uniform perturbation is sampled as a uniform point on the rectangle `[-scale, scale]`. Args: scale: a `Tensor` or Python `list` of `Tensor`s of any shapes and `dtypes` controlling the upper and lower bound of the uniform proposal distribution. name: Python `str` name prefixed to Ops created by this function. Default value: 'random_walk_uniform_fn'. Returns: random_walk_uniform_fn: A callable accepting a Python `list` of `Tensor`s representing the state parts of the `current_state` and an `int` representing the random seed used to generate the proposal. The callable returns the same-type `list` of `Tensor`s as the input and represents the proposal for the RWM algorithm.
[ "Returns", "a", "callable", "that", "adds", "a", "random", "uniform", "perturbation", "to", "the", "input", "." ]
python
test
Koed00/django-q
django_q/cluster.py
https://github.com/Koed00/django-q/blob/c84fd11a67c9a47d821786dfcdc189bb258c6f54/django_q/cluster.py#L560-L594
def set_cpu_affinity(n, process_ids, actual=not Conf.TESTING): """ Sets the cpu affinity for the supplied processes. Requires the optional psutil module. :param int n: affinity :param list process_ids: a list of pids :param bool actual: Test workaround for Travis not supporting cpu affinity """ # check if we have the psutil module if not psutil: logger.warning('Skipping cpu affinity because psutil was not found.') return # check if the platform supports cpu_affinity if actual and not hasattr(psutil.Process(process_ids[0]), 'cpu_affinity'): logger.warning('Faking cpu affinity because it is not supported on this platform') actual = False # get the available processors cpu_list = list(range(psutil.cpu_count())) # affinities of 0 or gte cpu_count, equals to no affinity if not n or n >= len(cpu_list): return # spread the workers over the available processors. index = 0 for pid in process_ids: affinity = [] for k in range(n): if index == len(cpu_list): index = 0 affinity.append(cpu_list[index]) index += 1 if psutil.pid_exists(pid): p = psutil.Process(pid) if actual: p.cpu_affinity(affinity) logger.info(_('{} will use cpu {}').format(pid, affinity))
[ "def", "set_cpu_affinity", "(", "n", ",", "process_ids", ",", "actual", "=", "not", "Conf", ".", "TESTING", ")", ":", "# check if we have the psutil module", "if", "not", "psutil", ":", "logger", ".", "warning", "(", "'Skipping cpu affinity because psutil was not foun...
Sets the cpu affinity for the supplied processes. Requires the optional psutil module. :param int n: affinity :param list process_ids: a list of pids :param bool actual: Test workaround for Travis not supporting cpu affinity
[ "Sets", "the", "cpu", "affinity", "for", "the", "supplied", "processes", ".", "Requires", "the", "optional", "psutil", "module", ".", ":", "param", "int", "n", ":", "affinity", ":", "param", "list", "process_ids", ":", "a", "list", "of", "pids", ":", "pa...
python
train
inveniosoftware/invenio-records-rest
invenio_records_rest/serializers/base.py
https://github.com/inveniosoftware/invenio-records-rest/blob/e7b63c5f72cef03d06d3f1b4c12c0d37e3a628b9/invenio_records_rest/serializers/base.py#L140-L154
def preprocess_record(self, pid, record, links_factory=None, **kwargs): """Prepare a record and persistent identifier for serialization.""" links_factory = links_factory or (lambda x, record=None, **k: dict()) metadata = copy.deepcopy(record.replace_refs()) if self.replace_refs \ else record.dumps() return dict( pid=pid, metadata=metadata, links=links_factory(pid, record=record, **kwargs), revision=record.revision_id, created=(pytz.utc.localize(record.created).isoformat() if record.created else None), updated=(pytz.utc.localize(record.updated).isoformat() if record.updated else None), )
[ "def", "preprocess_record", "(", "self", ",", "pid", ",", "record", ",", "links_factory", "=", "None", ",", "*", "*", "kwargs", ")", ":", "links_factory", "=", "links_factory", "or", "(", "lambda", "x", ",", "record", "=", "None", ",", "*", "*", "k", ...
Prepare a record and persistent identifier for serialization.
[ "Prepare", "a", "record", "and", "persistent", "identifier", "for", "serialization", "." ]
python
train
twisted/axiom
axiom/userbase.py
https://github.com/twisted/axiom/blob/7de70bc8fe1bb81f9c2339fba8daec9eb2e92b68/axiom/userbase.py#L465-L533
def addAccount(self, username, domain, password, avatars=None, protocol=u'email', disabled=0, internal=False, verified=True): """ Create a user account, add it to this LoginBase, and return it. This method must be called within a transaction in my store. @param username: the user's name. @param domain: the domain part of the user's name [XXX TODO: this really ought to say something about whether it's a Q2Q domain, a SIP domain, an HTTP realm, or an email address domain - right now the assumption is generally that it's an email address domain, but not always] @param password: A shared secret. @param avatars: (Optional). A SubStore which, if passed, will be used by cred as the target of all adaptations for this user. By default, I will create a SubStore, and plugins can be installed on that substore using the powerUp method to provide implementations of cred client interfaces. @raise DuplicateUniqueItem: if the 'avatars' argument already contains a LoginAccount. @return: an instance of a LoginAccount, with all attributes filled out as they are passed in, stored in my store. """ # unicode(None) == u'None', kids. if username is not None: username = unicode(username) if domain is not None: domain = unicode(domain) if password is not None: password = unicode(password) if self.accountByAddress(username, domain) is not None: raise DuplicateUser(username, domain) if avatars is None: avatars = self.makeAvatars(domain, username) subStore = avatars.open() # create this unconditionally; as the docstring says, we must be run # within a transaction, so if something goes wrong in the substore # transaction this item's creation will be reverted... la = LoginAccount(store=self.store, password=password, avatars=avatars, disabled=disabled) def createSubStoreAccountObjects(): LoginAccount(store=subStore, password=password, disabled=disabled, avatars=subStore) la.addLoginMethod(localpart=username, domain=domain, protocol=protocol, internal=internal, verified=verified) subStore.transact(createSubStoreAccountObjects) return la
[ "def", "addAccount", "(", "self", ",", "username", ",", "domain", ",", "password", ",", "avatars", "=", "None", ",", "protocol", "=", "u'email'", ",", "disabled", "=", "0", ",", "internal", "=", "False", ",", "verified", "=", "True", ")", ":", "# unico...
Create a user account, add it to this LoginBase, and return it. This method must be called within a transaction in my store. @param username: the user's name. @param domain: the domain part of the user's name [XXX TODO: this really ought to say something about whether it's a Q2Q domain, a SIP domain, an HTTP realm, or an email address domain - right now the assumption is generally that it's an email address domain, but not always] @param password: A shared secret. @param avatars: (Optional). A SubStore which, if passed, will be used by cred as the target of all adaptations for this user. By default, I will create a SubStore, and plugins can be installed on that substore using the powerUp method to provide implementations of cred client interfaces. @raise DuplicateUniqueItem: if the 'avatars' argument already contains a LoginAccount. @return: an instance of a LoginAccount, with all attributes filled out as they are passed in, stored in my store.
[ "Create", "a", "user", "account", "add", "it", "to", "this", "LoginBase", "and", "return", "it", "." ]
python
train
mgoral/subconvert
src/subconvert/parsing/Offset.py
https://github.com/mgoral/subconvert/blob/59701e5e69ef1ca26ce7d1d766c936664aa2cb32/src/subconvert/parsing/Offset.py#L143-L148
def _getLowestSyncPoint(self, syncPointList, subs): """Get the lowest possible sync point. If it is not the first one on the **sorted** syncPointList, first subtitle will be converted to the one.""" if syncPointList[0].subNo == 0: return syncPointList[0] return SyncPoint(0, subs[0].start, subs[0].end)
[ "def", "_getLowestSyncPoint", "(", "self", ",", "syncPointList", ",", "subs", ")", ":", "if", "syncPointList", "[", "0", "]", ".", "subNo", "==", "0", ":", "return", "syncPointList", "[", "0", "]", "return", "SyncPoint", "(", "0", ",", "subs", "[", "0"...
Get the lowest possible sync point. If it is not the first one on the **sorted** syncPointList, first subtitle will be converted to the one.
[ "Get", "the", "lowest", "possible", "sync", "point", ".", "If", "it", "is", "not", "the", "first", "one", "on", "the", "**", "sorted", "**", "syncPointList", "first", "subtitle", "will", "be", "converted", "to", "the", "one", "." ]
python
train
Jammy2211/PyAutoLens
autolens/model/profiles/geometry_profiles.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/profiles/geometry_profiles.py#L170-L180
def grid_to_grid_radii(self, grid): """Convert a grid of (y, x) coordinates to a grid of their circular radii. If the coordinates have not been transformed to the profile's centre, this is performed automatically. Parameters ---------- grid : TransformedGrid(ndarray) The (y, x) coordinates in the reference frame of the profile. """ return np.sqrt(np.add(np.square(grid[:, 0]), np.square(grid[:, 1])))
[ "def", "grid_to_grid_radii", "(", "self", ",", "grid", ")", ":", "return", "np", ".", "sqrt", "(", "np", ".", "add", "(", "np", ".", "square", "(", "grid", "[", ":", ",", "0", "]", ")", ",", "np", ".", "square", "(", "grid", "[", ":", ",", "1...
Convert a grid of (y, x) coordinates to a grid of their circular radii. If the coordinates have not been transformed to the profile's centre, this is performed automatically. Parameters ---------- grid : TransformedGrid(ndarray) The (y, x) coordinates in the reference frame of the profile.
[ "Convert", "a", "grid", "of", "(", "y", "x", ")", "coordinates", "to", "a", "grid", "of", "their", "circular", "radii", "." ]
python
valid
agile-geoscience/welly
welly/well.py
https://github.com/agile-geoscience/welly/blob/ed4c991011d6290938fef365553041026ba29f42/welly/well.py#L933-L973
def qc_table_html(self, tests, alias=None): """ Makes a nice table out of ``qc_data()`` Returns: str. An HTML string. """ data = self.qc_data(tests, alias=alias) all_tests = [list(d.keys()) for d in data.values()] tests = list(set(utils.flatten_list(all_tests))) # Header row. r = '</th><th>'.join(['Curve', 'Passed', 'Score'] + tests) rows = '<tr><th>{}</th></tr>'.format(r) styles = { True: "#CCEECC", # Green False: "#FFCCCC", # Red } # Quality results. for curve, results in data.items(): if results: norm_score = sum(results.values()) / len(results) else: norm_score = -1 rows += '<tr><th>{}</th>'.format(curve) rows += '<td>{} / {}</td>'.format(sum(results.values()), len(results)) rows += '<td>{:.3f}</td>'.format(norm_score) for test in tests: result = results.get(test, '') style = styles.get(result, "#EEEEEE") rows += '<td style="background-color:{};">'.format(style) rows += '{}</td>'.format(result) rows += '</tr>' html = '<table>{}</table>'.format(rows) return html
[ "def", "qc_table_html", "(", "self", ",", "tests", ",", "alias", "=", "None", ")", ":", "data", "=", "self", ".", "qc_data", "(", "tests", ",", "alias", "=", "alias", ")", "all_tests", "=", "[", "list", "(", "d", ".", "keys", "(", ")", ")", "for"...
Makes a nice table out of ``qc_data()`` Returns: str. An HTML string.
[ "Makes", "a", "nice", "table", "out", "of", "qc_data", "()" ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L317-L340
def audit_logs_list(self, filter_actor_id=None, filter_created_at=None, filter_ip_address=None, filter_source_type=None, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/audit_logs#listing-audit-logs" api_path = "/api/v2/audit_logs.json" api_query = {} if "query" in kwargs.keys(): api_query.update(kwargs["query"]) del kwargs["query"] if filter_actor_id: api_query.update({ "filter[actor_id]": filter_actor_id, }) if filter_created_at: api_query.update({ "filter[created_at]": filter_created_at, }) if filter_ip_address: api_query.update({ "filter[ip_address]": filter_ip_address, }) if filter_source_type: api_query.update({ "filter[source_type]": filter_source_type, }) return self.call(api_path, query=api_query, **kwargs)
[ "def", "audit_logs_list", "(", "self", ",", "filter_actor_id", "=", "None", ",", "filter_created_at", "=", "None", ",", "filter_ip_address", "=", "None", ",", "filter_source_type", "=", "None", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/aud...
https://developer.zendesk.com/rest_api/docs/core/audit_logs#listing-audit-logs
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "audit_logs#listing", "-", "audit", "-", "logs" ]
python
train
plone/plone.app.mosaic
src/plone/app/mosaic/transform.py
https://github.com/plone/plone.app.mosaic/blob/73b6acb18905025a76b239c86de9543ed9350991/src/plone/app/mosaic/transform.py#L26-L34
def getContext(context): """Return a safe context. In case a IBrowserView was passed (e.g. due to a 404 page), return the portal object. """ context = aq_parent(aq_base(context)) if not context or IBrowserView.providedBy(context): return getSite() return context
[ "def", "getContext", "(", "context", ")", ":", "context", "=", "aq_parent", "(", "aq_base", "(", "context", ")", ")", "if", "not", "context", "or", "IBrowserView", ".", "providedBy", "(", "context", ")", ":", "return", "getSite", "(", ")", "return", "con...
Return a safe context. In case a IBrowserView was passed (e.g. due to a 404 page), return the portal object.
[ "Return", "a", "safe", "context", ".", "In", "case", "a", "IBrowserView", "was", "passed", "(", "e", ".", "g", ".", "due", "to", "a", "404", "page", ")", "return", "the", "portal", "object", "." ]
python
train
jgorset/django-respite
respite/urls/routes.py
https://github.com/jgorset/django-respite/blob/719469d11baf91d05917bab1623bd82adc543546/respite/urls/routes.py#L3-L18
def route(regex, view, method, name): """ Route the given view. :param regex: A string describing a regular expression to which the request path will be matched. :param view: A string describing the name of the view to delegate the request to. :param method: A string describing the HTTP method that this view accepts. :param name: A string describing the name of the URL pattern. ``regex`` may also be a lambda that accepts the parent resource's ``prefix`` argument and returns a string describing a regular expression to which the request path will be matched. ``name`` may also be a lambda that accepts the parent resource's ``views`` argument and returns a string describing the name of the URL pattern. """ return _Route(regex, view, method, name)
[ "def", "route", "(", "regex", ",", "view", ",", "method", ",", "name", ")", ":", "return", "_Route", "(", "regex", ",", "view", ",", "method", ",", "name", ")" ]
Route the given view. :param regex: A string describing a regular expression to which the request path will be matched. :param view: A string describing the name of the view to delegate the request to. :param method: A string describing the HTTP method that this view accepts. :param name: A string describing the name of the URL pattern. ``regex`` may also be a lambda that accepts the parent resource's ``prefix`` argument and returns a string describing a regular expression to which the request path will be matched. ``name`` may also be a lambda that accepts the parent resource's ``views`` argument and returns a string describing the name of the URL pattern.
[ "Route", "the", "given", "view", ".", ":", "param", "regex", ":", "A", "string", "describing", "a", "regular", "expression", "to", "which", "the", "request", "path", "will", "be", "matched", ".", ":", "param", "view", ":", "A", "string", "describing", "t...
python
train
blubberdiblub/eztemplate
eztemplate/__main__.py
https://github.com/blubberdiblub/eztemplate/blob/ab5b2b4987c045116d130fd83e216704b8edfb5d/eztemplate/__main__.py#L230-L238
def check_engine(handle): """Check availability of requested template engine.""" if handle == 'help': dump_engines() sys.exit(0) if handle not in engines.engines: print('Engine "%s" is not available.' % (handle,), file=sys.stderr) sys.exit(1)
[ "def", "check_engine", "(", "handle", ")", ":", "if", "handle", "==", "'help'", ":", "dump_engines", "(", ")", "sys", ".", "exit", "(", "0", ")", "if", "handle", "not", "in", "engines", ".", "engines", ":", "print", "(", "'Engine \"%s\" is not available.'"...
Check availability of requested template engine.
[ "Check", "availability", "of", "requested", "template", "engine", "." ]
python
train
sassoftware/saspy
saspy/sasbase.py
https://github.com/sassoftware/saspy/blob/e433f71990f249d3a6c3db323ceb11cb2d462cf9/saspy/sasbase.py#L540-L549
def exist(self, table: str, libref: str = "") -> bool: """ Does the SAS data set currently exist :param table: the name of the SAS Data Set :param libref: the libref for the Data Set, defaults to WORK, or USER if assigned :return: Boolean True it the Data Set exists and False if it does not :rtype: bool """ return self._io.exist(table, libref)
[ "def", "exist", "(", "self", ",", "table", ":", "str", ",", "libref", ":", "str", "=", "\"\"", ")", "->", "bool", ":", "return", "self", ".", "_io", ".", "exist", "(", "table", ",", "libref", ")" ]
Does the SAS data set currently exist :param table: the name of the SAS Data Set :param libref: the libref for the Data Set, defaults to WORK, or USER if assigned :return: Boolean True it the Data Set exists and False if it does not :rtype: bool
[ "Does", "the", "SAS", "data", "set", "currently", "exist" ]
python
train
google/grr
grr/server/grr_response_server/output_plugins/bigquery_plugin.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/output_plugins/bigquery_plugin.py#L215-L251
def RDFValueToBigQuerySchema(self, value): """Convert Exported* rdfvalue into a BigQuery schema.""" fields_array = [] for type_info in value.__class__.type_infos: # Nested structures are indicated by setting type "RECORD" if type_info.__class__.__name__ == "ProtoEmbedded": fields_array.append({ "name": type_info.name, "type": "RECORD", "description": type_info.description, "fields": self.RDFValueToBigQuerySchema(value.Get(type_info.name)) }) else: # If we don't have a specific map use string. bq_type = self.RDF_BIGQUERY_TYPE_MAP.get(type_info.proto_type_name, None) or "STRING" # For protos with RDF types we need to do some more checking to properly # covert types. if hasattr(type_info, "original_proto_type_name"): if type_info.original_proto_type_name in [ "RDFDatetime", "RDFDatetimeSeconds" ]: bq_type = "TIMESTAMP" elif type_info.proto_type_name == "uint64": # This is to catch fields like st_mode which are stored as ints but # exported as more useful strings. Things which are just plain ints # won't have an RDF type specified and so will be exported as # INTEGER bq_type = "STRING" fields_array.append({ "name": type_info.name, "type": bq_type, "description": type_info.description }) return fields_array
[ "def", "RDFValueToBigQuerySchema", "(", "self", ",", "value", ")", ":", "fields_array", "=", "[", "]", "for", "type_info", "in", "value", ".", "__class__", ".", "type_infos", ":", "# Nested structures are indicated by setting type \"RECORD\"", "if", "type_info", ".", ...
Convert Exported* rdfvalue into a BigQuery schema.
[ "Convert", "Exported", "*", "rdfvalue", "into", "a", "BigQuery", "schema", "." ]
python
train
mikedh/trimesh
trimesh/curvature.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/curvature.py#L121-L163
def line_ball_intersection(start_points, end_points, center, radius): """ Compute the length of the intersection of a line segment with a ball. Parameters ---------- start_points : (n,3) float, list of points in space end_points : (n,3) float, list of points in space center : (3,) float, the sphere center radius : float, the sphere radius Returns -------- lengths: (n,) float, the lengths. """ # We solve for the intersection of |x-c|**2 = r**2 and # x = o + dL. This yields # d = (-l.(o-c) +- sqrt[ l.(o-c)**2 - l.l((o-c).(o-c) - r^**2) ]) / l.l L = end_points - start_points oc = start_points - center # o-c r = radius ldotl = np.einsum('ij, ij->i', L, L) # l.l ldotoc = np.einsum('ij, ij->i', L, oc) # l.(o-c) ocdotoc = np.einsum('ij, ij->i', oc, oc) # (o-c).(o-c) discrims = ldotoc**2 - ldotl * (ocdotoc - r**2) # If discriminant is non-positive, then we have zero length lengths = np.zeros(len(start_points)) # Otherwise we solve for the solns with d2 > d1. m = discrims > 0 # mask d1 = (-ldotoc[m] - np.sqrt(discrims[m])) / ldotl[m] d2 = (-ldotoc[m] + np.sqrt(discrims[m])) / ldotl[m] # Line segment means we have 0 <= d <= 1 d1 = np.clip(d1, 0, 1) d2 = np.clip(d2, 0, 1) # Length is |o + d2 l - o + d1 l| = (d2 - d1) |l| lengths[m] = (d2 - d1) * np.sqrt(ldotl[m]) return lengths
[ "def", "line_ball_intersection", "(", "start_points", ",", "end_points", ",", "center", ",", "radius", ")", ":", "# We solve for the intersection of |x-c|**2 = r**2 and", "# x = o + dL. This yields", "# d = (-l.(o-c) +- sqrt[ l.(o-c)**2 - l.l((o-c).(o-c) - r^**2) ]) / l.l", "L", "=",...
Compute the length of the intersection of a line segment with a ball. Parameters ---------- start_points : (n,3) float, list of points in space end_points : (n,3) float, list of points in space center : (3,) float, the sphere center radius : float, the sphere radius Returns -------- lengths: (n,) float, the lengths.
[ "Compute", "the", "length", "of", "the", "intersection", "of", "a", "line", "segment", "with", "a", "ball", "." ]
python
train
gtaylor/python-colormath
colormath/color_objects.py
https://github.com/gtaylor/python-colormath/blob/1d168613718d2d7d31ec4230524e987ef66823c7/colormath/color_objects.py#L530-L542
def _clamp_rgb_coordinate(self, coord): """ Clamps an RGB coordinate, taking into account whether or not the color is upscaled or not. :param float coord: The coordinate value. :rtype: float :returns: The clamped value. """ if not self.is_upscaled: return min(max(coord, 0.0), 1.0) else: return min(max(coord, 1), 255)
[ "def", "_clamp_rgb_coordinate", "(", "self", ",", "coord", ")", ":", "if", "not", "self", ".", "is_upscaled", ":", "return", "min", "(", "max", "(", "coord", ",", "0.0", ")", ",", "1.0", ")", "else", ":", "return", "min", "(", "max", "(", "coord", ...
Clamps an RGB coordinate, taking into account whether or not the color is upscaled or not. :param float coord: The coordinate value. :rtype: float :returns: The clamped value.
[ "Clamps", "an", "RGB", "coordinate", "taking", "into", "account", "whether", "or", "not", "the", "color", "is", "upscaled", "or", "not", "." ]
python
train
limdauto/drf_openapi
drf_openapi/entities.py
https://github.com/limdauto/drf_openapi/blob/1673c6e039eec7f089336a83bdc31613f32f7e21/drf_openapi/entities.py#L294-L308
def fallback_schema_from_field(self, field): """ Fallback schema for field that isn't inspected properly by DRF and probably won't land in upstream canon due to its hacky nature only for doc purposes """ title = force_text(field.label) if field.label else '' description = force_text(field.help_text) if field.help_text else '' # since we can't really inspect dictfield and jsonfield, at least display object as type # instead of string if isinstance(field, (serializers.DictField, serializers.JSONField)): return coreschema.Object( properties={}, title=title, description=description )
[ "def", "fallback_schema_from_field", "(", "self", ",", "field", ")", ":", "title", "=", "force_text", "(", "field", ".", "label", ")", "if", "field", ".", "label", "else", "''", "description", "=", "force_text", "(", "field", ".", "help_text", ")", "if", ...
Fallback schema for field that isn't inspected properly by DRF and probably won't land in upstream canon due to its hacky nature only for doc purposes
[ "Fallback", "schema", "for", "field", "that", "isn", "t", "inspected", "properly", "by", "DRF", "and", "probably", "won", "t", "land", "in", "upstream", "canon", "due", "to", "its", "hacky", "nature", "only", "for", "doc", "purposes" ]
python
train
mitsei/dlkit
dlkit/json_/authorization/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/authorization/sessions.py#L2815-L2842
def delete_vault(self, vault_id): """Deletes a ``Vault``. arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault`` to remove raise: NotFound - ``vault_id`` not found raise: NullArgument - ``vault_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinAdminSession.delete_bin_template if self._catalog_session is not None: return self._catalog_session.delete_catalog(catalog_id=vault_id) collection = JSONClientValidated('authorization', collection='Vault', runtime=self._runtime) if not isinstance(vault_id, ABCId): raise errors.InvalidArgument('the argument is not a valid OSID Id') for object_catalog in ['Authorization', 'Function', 'Qualifier', 'Vault']: obj_collection = JSONClientValidated('authorization', collection=object_catalog, runtime=self._runtime) if obj_collection.find({'assignedVaultIds': {'$in': [str(vault_id)]}}).count() != 0: raise errors.IllegalState('catalog is not empty') collection.delete_one({'_id': ObjectId(vault_id.get_identifier())})
[ "def", "delete_vault", "(", "self", ",", "vault_id", ")", ":", "# Implemented from template for", "# osid.resource.BinAdminSession.delete_bin_template", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "del...
Deletes a ``Vault``. arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault`` to remove raise: NotFound - ``vault_id`` not found raise: NullArgument - ``vault_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Deletes", "a", "Vault", "." ]
python
train
pypa/bandersnatch
src/bandersnatch_filter_plugins/blacklist_name.py
https://github.com/pypa/bandersnatch/blob/8b702c3bc128c5a1cbdd18890adede2f7f17fad4/src/bandersnatch_filter_plugins/blacklist_name.py#L139-L174
def _check_match(self, name, version_string) -> bool: """ Check if the package name and version matches against a blacklisted package version specifier. Parameters ========== name: str Package name version: str Package version Returns ======= bool: True if it matches, False otherwise. """ if not name or not version_string: return False try: version = Version(version_string) except InvalidVersion: logger.debug(f"Package {name}=={version_string} has an invalid version") return False for requirement in self.blacklist_release_requirements: if name != requirement.name: continue if version in requirement.specifier: logger.debug( f"MATCH: Release {name}=={version} matches specifier " f"{requirement.specifier}" ) return True return False
[ "def", "_check_match", "(", "self", ",", "name", ",", "version_string", ")", "->", "bool", ":", "if", "not", "name", "or", "not", "version_string", ":", "return", "False", "try", ":", "version", "=", "Version", "(", "version_string", ")", "except", "Invali...
Check if the package name and version matches against a blacklisted package version specifier. Parameters ========== name: str Package name version: str Package version Returns ======= bool: True if it matches, False otherwise.
[ "Check", "if", "the", "package", "name", "and", "version", "matches", "against", "a", "blacklisted", "package", "version", "specifier", "." ]
python
train
hazelcast/hazelcast-python-client
hazelcast/proxy/map.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/map.py#L316-L329
def force_unlock(self, key): """ Releases the lock for the specified key regardless of the lock owner. It always successfully unlocks the key, never blocks, and returns immediately. **Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), the key to lock. """ check_not_none(key, "key can't be None") key_data = self._to_data(key) return self._encode_invoke_on_key(map_force_unlock_codec, key_data, key=key_data, reference_id=self.reference_id_generator.get_and_increment())
[ "def", "force_unlock", "(", "self", ",", "key", ")", ":", "check_not_none", "(", "key", ",", "\"key can't be None\"", ")", "key_data", "=", "self", ".", "_to_data", "(", "key", ")", "return", "self", ".", "_encode_invoke_on_key", "(", "map_force_unlock_codec", ...
Releases the lock for the specified key regardless of the lock owner. It always successfully unlocks the key, never blocks, and returns immediately. **Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), the key to lock.
[ "Releases", "the", "lock", "for", "the", "specified", "key", "regardless", "of", "the", "lock", "owner", ".", "It", "always", "successfully", "unlocks", "the", "key", "never", "blocks", "and", "returns", "immediately", "." ]
python
train
peakwinter/python-nginx
nginx.py
https://github.com/peakwinter/python-nginx/blob/4ecd1cd2e1f11ffb633d188a578a004712eaae16/nginx.py#L581-L591
def dumpf(obj, path): """ Write an nginx configuration to file. :param obj obj: nginx object (Conf, Server, Container) :param str path: path to nginx configuration on disk :returns: path the configuration was written to """ with open(path, 'w') as f: dump(obj, f) return path
[ "def", "dumpf", "(", "obj", ",", "path", ")", ":", "with", "open", "(", "path", ",", "'w'", ")", "as", "f", ":", "dump", "(", "obj", ",", "f", ")", "return", "path" ]
Write an nginx configuration to file. :param obj obj: nginx object (Conf, Server, Container) :param str path: path to nginx configuration on disk :returns: path the configuration was written to
[ "Write", "an", "nginx", "configuration", "to", "file", "." ]
python
train
SheffieldML/GPy
GPy/likelihoods/poisson.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/likelihoods/poisson.py#L33-L49
def pdf_link(self, link_f, y, Y_metadata=None): """ Likelihood function given link(f) .. math:: p(y_{i}|\\lambda(f_{i})) = \\frac{\\lambda(f_{i})^{y_{i}}}{y_{i}!}e^{-\\lambda(f_{i})} :param link_f: latent variables link(f) :type link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in poisson distribution :returns: likelihood evaluated for this point :rtype: float """ assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape return np.exp(self.logpdf_link(link_f, y, Y_metadata))
[ "def", "pdf_link", "(", "self", ",", "link_f", ",", "y", ",", "Y_metadata", "=", "None", ")", ":", "assert", "np", ".", "atleast_1d", "(", "link_f", ")", ".", "shape", "==", "np", ".", "atleast_1d", "(", "y", ")", ".", "shape", "return", "np", ".",...
Likelihood function given link(f) .. math:: p(y_{i}|\\lambda(f_{i})) = \\frac{\\lambda(f_{i})^{y_{i}}}{y_{i}!}e^{-\\lambda(f_{i})} :param link_f: latent variables link(f) :type link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in poisson distribution :returns: likelihood evaluated for this point :rtype: float
[ "Likelihood", "function", "given", "link", "(", "f", ")" ]
python
train
azogue/dataweb
dataweb/classdataweb.py
https://github.com/azogue/dataweb/blob/085035855df7cef0fe7725bbe9a706832344d946/dataweb/classdataweb.py#L148-L157
def printif(self, obj_print, tipo_print=None): """Color output & logging.""" if self.verbose: print(obj_print) if tipo_print == 'ok': logging.info(obj_print) elif tipo_print == 'error': logging.error(obj_print) elif tipo_print == 'warning': logging.warning(obj_print)
[ "def", "printif", "(", "self", ",", "obj_print", ",", "tipo_print", "=", "None", ")", ":", "if", "self", ".", "verbose", ":", "print", "(", "obj_print", ")", "if", "tipo_print", "==", "'ok'", ":", "logging", ".", "info", "(", "obj_print", ")", "elif", ...
Color output & logging.
[ "Color", "output", "&", "logging", "." ]
python
train
msmbuilder/osprey
osprey/utils.py
https://github.com/msmbuilder/osprey/blob/ea09da24e45820e1300e24a52fefa6c849f7a986/osprey/utils.py#L58-L65
def in_directory(path): """Context manager (with statement) that changes the current directory during the context. """ curdir = os.path.abspath(os.curdir) os.chdir(path) yield os.chdir(curdir)
[ "def", "in_directory", "(", "path", ")", ":", "curdir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "curdir", ")", "os", ".", "chdir", "(", "path", ")", "yield", "os", ".", "chdir", "(", "curdir", ")" ]
Context manager (with statement) that changes the current directory during the context.
[ "Context", "manager", "(", "with", "statement", ")", "that", "changes", "the", "current", "directory", "during", "the", "context", "." ]
python
valid
coursera-dl/coursera-dl
coursera/api.py
https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/api.py#L628-L638
def list_courses(self): """ List enrolled courses. @return: List of enrolled courses. @rtype: [str] """ reply = get_page(self._session, OPENCOURSE_MEMBERSHIPS, json=True) course_list = reply['linked']['courses.v1'] slugs = [element['slug'] for element in course_list] return slugs
[ "def", "list_courses", "(", "self", ")", ":", "reply", "=", "get_page", "(", "self", ".", "_session", ",", "OPENCOURSE_MEMBERSHIPS", ",", "json", "=", "True", ")", "course_list", "=", "reply", "[", "'linked'", "]", "[", "'courses.v1'", "]", "slugs", "=", ...
List enrolled courses. @return: List of enrolled courses. @rtype: [str]
[ "List", "enrolled", "courses", "." ]
python
train
dylanaraps/pywal
pywal/__main__.py
https://github.com/dylanaraps/pywal/blob/c823e3c9dbd0100ca09caf824e77d296685a1c1e/pywal/__main__.py#L29-L102
def get_args(): """Get the script arguments.""" description = "wal - Generate colorschemes on the fly" arg = argparse.ArgumentParser(description=description) arg.add_argument("-a", metavar="\"alpha\"", help="Set terminal background transparency. \ *Only works in URxvt*") arg.add_argument("-b", metavar="background", help="Custom background color to use.") arg.add_argument("--backend", metavar="backend", help="Which color backend to use. \ Use 'wal --backend' to list backends.", const="list_backends", type=str, nargs="?") arg.add_argument("--theme", "-f", metavar="/path/to/file or theme_name", help="Which colorscheme file to use. \ Use 'wal --theme' to list builtin themes.", const="list_themes", nargs="?") arg.add_argument("--iterative", action="store_true", help="When pywal is given a directory as input and this " "flag is used: Go through the images in order " "instead of shuffled.") arg.add_argument("--saturate", metavar="0.0-1.0", help="Set the color saturation.") arg.add_argument("--preview", action="store_true", help="Print the current color palette.") arg.add_argument("--vte", action="store_true", help="Fix text-artifacts printed in VTE terminals.") arg.add_argument("-c", action="store_true", help="Delete all cached colorschemes.") arg.add_argument("-i", metavar="\"/path/to/img.jpg\"", help="Which image or directory to use.") arg.add_argument("-l", action="store_true", help="Generate a light colorscheme.") arg.add_argument("-n", action="store_true", help="Skip setting the wallpaper.") arg.add_argument("-o", metavar="\"script_name\"", action="append", help="External script to run after \"wal\".") arg.add_argument("-q", action="store_true", help="Quiet mode, don\'t print anything.") arg.add_argument("-r", action="store_true", help="'wal -r' is deprecated: Use \ (cat ~/.cache/wal/sequences &) instead.") arg.add_argument("-R", action="store_true", help="Restore previous colorscheme.") arg.add_argument("-s", action="store_true", help="Skip changing colors in terminals.") arg.add_argument("-t", action="store_true", help="Skip changing colors in tty.") arg.add_argument("-v", action="store_true", help="Print \"wal\" version.") arg.add_argument("-e", action="store_true", help="Skip reloading gtk/xrdb/i3/sway/polybar") return arg
[ "def", "get_args", "(", ")", ":", "description", "=", "\"wal - Generate colorschemes on the fly\"", "arg", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "description", ")", "arg", ".", "add_argument", "(", "\"-a\"", ",", "metavar", "=", "\"\\\"a...
Get the script arguments.
[ "Get", "the", "script", "arguments", "." ]
python
train