repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
stevearc/dql
dql/models.py
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/dql/models.py#L167-L180
def schema(self): """ The DQL syntax for creating this item """ schema = "%s %s %s %s('%s'" % ( self.name, self.data_type, self.index_type, self.key_type, self.index_name, ) if self.includes is not None: schema += ", [" schema += ", ".join(("'%s'" % i for i in self.includes)) schema += "]" return schema + ")"
[ "def", "schema", "(", "self", ")", ":", "schema", "=", "\"%s %s %s %s('%s'\"", "%", "(", "self", ".", "name", ",", "self", ".", "data_type", ",", "self", ".", "index_type", ",", "self", ".", "key_type", ",", "self", ".", "index_name", ",", ")", "if", ...
The DQL syntax for creating this item
[ "The", "DQL", "syntax", "for", "creating", "this", "item" ]
python
train
jmcarp/betfair.py
betfair/betfair.py
https://github.com/jmcarp/betfair.py/blob/116df2fdc512575d1b4c4f1749d4a5bf98e519ff/betfair/betfair.py#L432-L444
def update_orders(self, market_id, instructions, customer_ref=None): """Update non-exposure changing fields. :param str market_id: The market id these orders are to be placed on :param list instructions: List of `UpdateInstruction` objects :param str customer_ref: Optional order identifier string """ return self.make_api_request( 'Sports', 'updateOrders', utils.get_kwargs(locals()), model=models.UpdateExecutionReport, )
[ "def", "update_orders", "(", "self", ",", "market_id", ",", "instructions", ",", "customer_ref", "=", "None", ")", ":", "return", "self", ".", "make_api_request", "(", "'Sports'", ",", "'updateOrders'", ",", "utils", ".", "get_kwargs", "(", "locals", "(", ")...
Update non-exposure changing fields. :param str market_id: The market id these orders are to be placed on :param list instructions: List of `UpdateInstruction` objects :param str customer_ref: Optional order identifier string
[ "Update", "non", "-", "exposure", "changing", "fields", "." ]
python
train
openstack/pyghmi
pyghmi/ipmi/command.py
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L787-L825
def set_net_configuration(self, ipv4_address=None, ipv4_configuration=None, ipv4_gateway=None, channel=None): """Set network configuration data. Apply desired network configuration data, leaving unspecified parameters alone. :param ipv4_address: CIDR notation for IP address and netmask Example: '192.168.0.10/16' :param ipv4_configuration: Method to use to configure the network. 'DHCP' or 'Static'. :param ipv4_gateway: IP address of gateway to use. :param channel: LAN channel to configure, defaults to autodetect """ if channel is None: channel = self.get_network_channel() if ipv4_configuration is not None: cmddata = [channel, 4, 0] if ipv4_configuration.lower() == 'dhcp': cmddata[-1] = 2 elif ipv4_configuration.lower() == 'static': cmddata[-1] = 1 else: raise Exception('Unrecognized ipv4cfg parameter {0}'.format( ipv4_configuration)) self.xraw_command(netfn=0xc, command=1, data=cmddata) if ipv4_address is not None: netmask = None if '/' in ipv4_address: ipv4_address, prefix = ipv4_address.split('/') netmask = _cidr_to_mask(int(prefix)) cmddata = bytearray((channel, 3)) + socket.inet_aton(ipv4_address) self.xraw_command(netfn=0xc, command=1, data=cmddata) if netmask is not None: cmddata = bytearray((channel, 6)) + netmask self.xraw_command(netfn=0xc, command=1, data=cmddata) if ipv4_gateway is not None: cmddata = bytearray((channel, 12)) + socket.inet_aton(ipv4_gateway) self.xraw_command(netfn=0xc, command=1, data=cmddata)
[ "def", "set_net_configuration", "(", "self", ",", "ipv4_address", "=", "None", ",", "ipv4_configuration", "=", "None", ",", "ipv4_gateway", "=", "None", ",", "channel", "=", "None", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".",...
Set network configuration data. Apply desired network configuration data, leaving unspecified parameters alone. :param ipv4_address: CIDR notation for IP address and netmask Example: '192.168.0.10/16' :param ipv4_configuration: Method to use to configure the network. 'DHCP' or 'Static'. :param ipv4_gateway: IP address of gateway to use. :param channel: LAN channel to configure, defaults to autodetect
[ "Set", "network", "configuration", "data", "." ]
python
train
thieman/dagobah
dagobah/core/core.py
https://github.com/thieman/dagobah/blob/e624180c2291034960302c9e0b818b65b5a7ee11/dagobah/core/core.py#L364-L373
def delete_dependency(self, from_task_name, to_task_name): """ Delete a dependency between two tasks. """ logger.debug('Deleting dependency from {0} to {1}'.format(from_task_name, to_task_name)) if not self.state.allow_change_graph: raise DagobahError("job's graph is immutable in its current state: %s" % self.state.status) self.delete_edge(from_task_name, to_task_name) self.commit()
[ "def", "delete_dependency", "(", "self", ",", "from_task_name", ",", "to_task_name", ")", ":", "logger", ".", "debug", "(", "'Deleting dependency from {0} to {1}'", ".", "format", "(", "from_task_name", ",", "to_task_name", ")", ")", "if", "not", "self", ".", "s...
Delete a dependency between two tasks.
[ "Delete", "a", "dependency", "between", "two", "tasks", "." ]
python
train
benley/butcher
butcher/buildfile.py
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/buildfile.py#L135-L177
def _parse(self, stream): """Parse a JSON BUILD file. Args: builddata: dictionary of buildfile data reponame: name of the repo that it came from path: directory path within the repo """ builddata = json.load(stream) log.debug('This is a JSON build file.') if 'targets' not in builddata: log.warn('Warning: No targets defined here.') return for tdata in builddata['targets']: # TODO: validate name target = address.new(target=tdata.pop('name'), repo=self.target.repo, path=self.target.path) # Duplicate target definition? Uh oh. if target in self.node and 'target_obj' in self.node[target]: raise error.ButcherError( 'Target is defined more than once: %s', target) rule_obj = targets.new(name=target, ruletype=tdata.pop('type'), **tdata) log.debug('New target: %s', target) self.add_node(target, {'target_obj': rule_obj}) # dep could be ":blabla" or "//foo:blabla" or "//foo/bar:blabla" for dep in rule_obj.composed_deps() or []: d_target = address.new(dep) if not d_target.repo: # ":blabla" d_target.repo = self.target.repo if d_target.repo == self.target.repo and not d_target.path: d_target.path = self.target.path if d_target not in self.nodes(): self.add_node(d_target) log.debug('New dep: %s -> %s', target, d_target) self.add_edge(target, d_target)
[ "def", "_parse", "(", "self", ",", "stream", ")", ":", "builddata", "=", "json", ".", "load", "(", "stream", ")", "log", ".", "debug", "(", "'This is a JSON build file.'", ")", "if", "'targets'", "not", "in", "builddata", ":", "log", ".", "warn", "(", ...
Parse a JSON BUILD file. Args: builddata: dictionary of buildfile data reponame: name of the repo that it came from path: directory path within the repo
[ "Parse", "a", "JSON", "BUILD", "file", "." ]
python
train
sendgrid/sendgrid-python
sendgrid/helpers/mail/mail.py
https://github.com/sendgrid/sendgrid-python/blob/266c2abde7a35dfcce263e06bedc6a0bbdebeac9/sendgrid/helpers/mail/mail.py#L967-L990
def from_EmailMessage(cls, message): """Create a Mail object from an instance of email.message.EmailMessage. :type message: email.message.EmailMessage :rtype: Mail """ mail = cls( from_email=Email(message.get('From')), subject=message.get('Subject'), to_emails=Email(message.get('To')), ) try: body = message.get_content() except AttributeError: # Python2 body = message.get_payload() mail.add_content(Content( message.get_content_type(), body.strip() )) for k, v in message.items(): mail.add_header(Header(k, v)) return mail
[ "def", "from_EmailMessage", "(", "cls", ",", "message", ")", ":", "mail", "=", "cls", "(", "from_email", "=", "Email", "(", "message", ".", "get", "(", "'From'", ")", ")", ",", "subject", "=", "message", ".", "get", "(", "'Subject'", ")", ",", "to_em...
Create a Mail object from an instance of email.message.EmailMessage. :type message: email.message.EmailMessage :rtype: Mail
[ "Create", "a", "Mail", "object", "from", "an", "instance", "of", "email", ".", "message", ".", "EmailMessage", "." ]
python
train
theosysbio/means
src/means/approximation/mea/mea_helpers.py
https://github.com/theosysbio/means/blob/fe164916a1d84ab2a4fa039871d38ccdf638b1db/src/means/approximation/mea/mea_helpers.py#L80-L90
def make_k_chose_e(e_vec, k_vec): """ Computes the product :math:`{\mathbf{n} \choose \mathbf{k}}` :param e_vec: the vector e :type e_vec: :class:`numpy.array` :param k_vec: the vector k :type k_vec: :class:`numpy.array` :return: a scalar """ return product([sp.factorial(k) / (sp.factorial(e) * sp.factorial(k - e)) for e,k in zip(e_vec, k_vec)])
[ "def", "make_k_chose_e", "(", "e_vec", ",", "k_vec", ")", ":", "return", "product", "(", "[", "sp", ".", "factorial", "(", "k", ")", "/", "(", "sp", ".", "factorial", "(", "e", ")", "*", "sp", ".", "factorial", "(", "k", "-", "e", ")", ")", "fo...
Computes the product :math:`{\mathbf{n} \choose \mathbf{k}}` :param e_vec: the vector e :type e_vec: :class:`numpy.array` :param k_vec: the vector k :type k_vec: :class:`numpy.array` :return: a scalar
[ "Computes", "the", "product", ":", "math", ":", "{", "\\", "mathbf", "{", "n", "}", "\\", "choose", "\\", "mathbf", "{", "k", "}}" ]
python
train
ybrs/pydisque
example/poor_consumer.py
https://github.com/ybrs/pydisque/blob/ea5ce1576b66398c1cce32cad0f15709b1ea8df8/example/poor_consumer.py#L27-L78
def main(): """Start the poor_consumer.""" try: opts, args = getopt.getopt(sys.argv[1:], "h:v", ["help", "nack=", "servers=", "queues="]) except getopt.GetoptError as err: print str(err) usage() sys.exit() # defaults nack = 0.0 verbose = False servers = "localhost:7712,localhost:7711" queues = "test" for o, a in opts: if o == "-v": verbose = True elif o in ("-h", "--help"): usage() sys.exit() elif o in ("--nack"): nack = float(a) elif o in ("--servers"): servers = a elif o in ("--queues"): queues = a else: assert False, "unhandled option" # prepare servers and queus for pydisque servers = servers.split(",") queues = queues.split(",") c = Client(servers) c.connect() while True: jobs = c.get_job(queues) for queue_name, job_id, job in jobs: rnd = random.random() # as this is a test processor, we don't do any validation on # the actual job body, so lets just pay attention to id's if rnd >= nack: print ">>> received job:", job_id c.ack_job(job_id) else: print ">>> bouncing job:", job_id c.nack_job(job_id)
[ "def", "main", "(", ")", ":", "try", ":", "opts", ",", "args", "=", "getopt", ".", "getopt", "(", "sys", ".", "argv", "[", "1", ":", "]", ",", "\"h:v\"", ",", "[", "\"help\"", ",", "\"nack=\"", ",", "\"servers=\"", ",", "\"queues=\"", "]", ")", "...
Start the poor_consumer.
[ "Start", "the", "poor_consumer", "." ]
python
train
wonambi-python/wonambi
wonambi/widgets/notes.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/notes.py#L823-L842
def delete_row(self): """Delete bookmarks or event from annotations, based on row.""" sel_model = self.idx_annot_list.selectionModel() for row in sel_model.selectedRows(): i = row.row() start = self.idx_annot_list.property('start')[i] end = self.idx_annot_list.property('end')[i] name = self.idx_annot_list.item(i, 2).text() marker_event = self.idx_annot_list.item(i, 3).text() if marker_event == 'bookmark': self.annot.remove_bookmark(name=name, time=(start, end)) else: self.annot.remove_event(name=name, time=(start, end)) highlight = self.parent.traces.highlight if highlight: self.parent.traces.scene.removeItem(highlight) highlight = None self.parent.traces.event_sel = None self.update_annotations()
[ "def", "delete_row", "(", "self", ")", ":", "sel_model", "=", "self", ".", "idx_annot_list", ".", "selectionModel", "(", ")", "for", "row", "in", "sel_model", ".", "selectedRows", "(", ")", ":", "i", "=", "row", ".", "row", "(", ")", "start", "=", "s...
Delete bookmarks or event from annotations, based on row.
[ "Delete", "bookmarks", "or", "event", "from", "annotations", "based", "on", "row", "." ]
python
train
miyakogi/wdom
wdom/web_node.py
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/web_node.py#L259-L266
def removeChild(self, child: Node) -> Node: """Remove the child node from this node. If the node is not a child of this node, raise ValueError. """ if self.connected: self._remove_child_web(child) return self._remove_child(child)
[ "def", "removeChild", "(", "self", ",", "child", ":", "Node", ")", "->", "Node", ":", "if", "self", ".", "connected", ":", "self", ".", "_remove_child_web", "(", "child", ")", "return", "self", ".", "_remove_child", "(", "child", ")" ]
Remove the child node from this node. If the node is not a child of this node, raise ValueError.
[ "Remove", "the", "child", "node", "from", "this", "node", "." ]
python
train
greyli/flask-avatars
flask_avatars/__init__.py
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L122-L134
def crop_box(endpoint=None, filename=None): """Create a crop box. :param endpoint: The endpoint of view function that serve avatar image file. :param filename: The filename of the image that need to be crop. """ crop_size = current_app.config['AVATARS_CROP_BASE_WIDTH'] if endpoint is None or filename is None: url = url_for('avatars.static', filename='default/default_l.jpg') else: url = url_for(endpoint, filename=filename) return Markup('<img src="%s" id="crop-box" style="max-width: %dpx; display: block;">' % (url, crop_size))
[ "def", "crop_box", "(", "endpoint", "=", "None", ",", "filename", "=", "None", ")", ":", "crop_size", "=", "current_app", ".", "config", "[", "'AVATARS_CROP_BASE_WIDTH'", "]", "if", "endpoint", "is", "None", "or", "filename", "is", "None", ":", "url", "=",...
Create a crop box. :param endpoint: The endpoint of view function that serve avatar image file. :param filename: The filename of the image that need to be crop.
[ "Create", "a", "crop", "box", "." ]
python
train
hotdoc/hotdoc
hotdoc/utils/setup_utils.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/utils/setup_utils.py#L93-L113
def require_clean_submodules(repo_root, submodules): """Check on git submodules before distutils can do anything Since distutils cannot be trusted to update the tree after everything has been set in motion, this is not a distutils command. """ # PACKAGERS: Add a return here to skip checks for git submodules # don't do anything if nothing is actually supposed to happen for do_nothing in ( '-h', '--help', '--help-commands', 'clean', 'submodule'): if do_nothing in sys.argv: return status = _check_submodule_status(repo_root, submodules) if status == "missing": print("checking out submodules for the first time") _update_submodules(repo_root) elif status == "unclean": print(UNCLEAN_SUBMODULES_MSG)
[ "def", "require_clean_submodules", "(", "repo_root", ",", "submodules", ")", ":", "# PACKAGERS: Add a return here to skip checks for git submodules", "# don't do anything if nothing is actually supposed to happen", "for", "do_nothing", "in", "(", "'-h'", ",", "'--help'", ",", "'-...
Check on git submodules before distutils can do anything Since distutils cannot be trusted to update the tree after everything has been set in motion, this is not a distutils command.
[ "Check", "on", "git", "submodules", "before", "distutils", "can", "do", "anything", "Since", "distutils", "cannot", "be", "trusted", "to", "update", "the", "tree", "after", "everything", "has", "been", "set", "in", "motion", "this", "is", "not", "a", "distut...
python
train
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1708-L1771
def continue_abort(self, root_pipeline_key, cursor=None, max_to_notify=_MAX_ABORTS_TO_BEGIN): """Sends the abort signal to all children for a root pipeline. Args: root_pipeline_key: db.Key of the root pipeline to abort. cursor: The query cursor for enumerating _PipelineRecords when inserting tasks to cause child pipelines to terminate. max_to_notify: Used for testing. """ if not isinstance(root_pipeline_key, db.Key): root_pipeline_key = db.Key(root_pipeline_key) # NOTE: The results of this query may include _PipelineRecord instances # that are not actually "reachable", meaning you cannot get to them by # starting at the root pipeline and following "fanned_out" onward. This # is acceptable because even these defunct _PipelineRecords will properly # set their status to ABORTED when the signal comes, regardless of any # other status they may have had. # # The only gotcha here is if a Pipeline's finalize method somehow modifies # its inputs (like deleting an input file). In the case there are # unreachable child pipelines, it will appear as if two finalize methods # have been called instead of just one. The saving grace here is that # finalize must be idempotent, so this *should* be harmless. query = ( _PipelineRecord.all(cursor=cursor) .filter('root_pipeline =', root_pipeline_key)) results = query.fetch(max_to_notify) task_list = [] for pipeline_record in results: if pipeline_record.status not in ( _PipelineRecord.RUN, _PipelineRecord.WAITING): continue pipeline_key = pipeline_record.key() task_list.append(taskqueue.Task( name='%s-%s-abort' % (self.task_name, pipeline_key.name()), url=self.abort_handler_path, params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.ABORT), headers={'X-Ae-Pipeline-Key': pipeline_key})) # Task continuation with sequence number to prevent fork-bombs. if len(results) == max_to_notify: the_match = re.match('(.*)-([0-9]+)', self.task_name) if the_match: prefix = the_match.group(1) end = int(the_match.group(2)) + 1 else: prefix = self.task_name end = 0 task_list.append(taskqueue.Task( name='%s-%d' % (prefix, end), url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key, cursor=query.cursor()))) if task_list: try: taskqueue.Queue(self.queue_name).add(task_list) except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError): pass
[ "def", "continue_abort", "(", "self", ",", "root_pipeline_key", ",", "cursor", "=", "None", ",", "max_to_notify", "=", "_MAX_ABORTS_TO_BEGIN", ")", ":", "if", "not", "isinstance", "(", "root_pipeline_key", ",", "db", ".", "Key", ")", ":", "root_pipeline_key", ...
Sends the abort signal to all children for a root pipeline. Args: root_pipeline_key: db.Key of the root pipeline to abort. cursor: The query cursor for enumerating _PipelineRecords when inserting tasks to cause child pipelines to terminate. max_to_notify: Used for testing.
[ "Sends", "the", "abort", "signal", "to", "all", "children", "for", "a", "root", "pipeline", "." ]
python
train
pandas-dev/pandas
pandas/io/pytables.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3793-L3829
def read_column(self, column, where=None, start=None, stop=None): """return a single column from the table, generally only indexables are interesting """ # validate the version self.validate_version() # infer the data kind if not self.infer_axes(): return False if where is not None: raise TypeError("read_column does not currently accept a where " "clause") # find the axes for a in self.axes: if column == a.name: if not a.is_data_indexable: raise ValueError( "column [{column}] can not be extracted individually; " "it is not data indexable".format(column=column)) # column must be an indexable or a data column c = getattr(self.table.cols, column) a.set_info(self.info) return Series(_set_tz(a.convert(c[start:stop], nan_rep=self.nan_rep, encoding=self.encoding, errors=self.errors ).take_data(), a.tz, True), name=column) raise KeyError( "column [{column}] not found in the table".format(column=column))
[ "def", "read_column", "(", "self", ",", "column", ",", "where", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ")", ":", "# validate the version", "self", ".", "validate_version", "(", ")", "# infer the data kind", "if", "not", "self", "...
return a single column from the table, generally only indexables are interesting
[ "return", "a", "single", "column", "from", "the", "table", "generally", "only", "indexables", "are", "interesting" ]
python
train
wummel/dosage
dosagelib/scraper.py
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/scraper.py#L528-L537
def check_scrapers(): """Check for duplicate scraper class names.""" d = {} for scraperclass in _scraperclasses: name = scraperclass.getName().lower() if name in d: name1 = scraperclass.getName() name2 = d[name].getName() raise ValueError('duplicate scrapers %s and %s found' % (name1, name2)) d[name] = scraperclass
[ "def", "check_scrapers", "(", ")", ":", "d", "=", "{", "}", "for", "scraperclass", "in", "_scraperclasses", ":", "name", "=", "scraperclass", ".", "getName", "(", ")", ".", "lower", "(", ")", "if", "name", "in", "d", ":", "name1", "=", "scraperclass", ...
Check for duplicate scraper class names.
[ "Check", "for", "duplicate", "scraper", "class", "names", "." ]
python
train
contains-io/rcli
rcli/call.py
https://github.com/contains-io/rcli/blob/cdd6191a0e0a19bc767f84921650835d099349cf/rcli/call.py#L45-L78
def call(func, args): """Call the function with args normalized and cast to the correct types. Args: func: The function to call. args: The arguments parsed by docopt. Returns: The return value of func. """ assert hasattr(func, '__call__'), 'Cannot call func: {}'.format( func.__name__) raw_func = ( func if isinstance(func, FunctionType) else func.__class__.__call__) hints = collections.defaultdict(lambda: Any, get_type_hints(raw_func)) argspec = _getargspec(raw_func) named_args = {} varargs = () for k, nk, v in _normalize(args): if nk == argspec.varargs: hints[nk] = Tuple[hints[nk], ...] elif nk not in argspec.args and argspec.varkw in hints: hints[nk] = hints[argspec.varkw] try: value = cast(hints[nk], v) except TypeError as e: _LOGGER.exception(e) six.raise_from(exc.InvalidCliValueError(k, v), e) if nk == argspec.varargs: varargs = value elif (nk in argspec.args or argspec.varkw) and ( nk not in named_args or named_args[nk] is None): named_args[nk] = value return func(*varargs, **named_args)
[ "def", "call", "(", "func", ",", "args", ")", ":", "assert", "hasattr", "(", "func", ",", "'__call__'", ")", ",", "'Cannot call func: {}'", ".", "format", "(", "func", ".", "__name__", ")", "raw_func", "=", "(", "func", "if", "isinstance", "(", "func", ...
Call the function with args normalized and cast to the correct types. Args: func: The function to call. args: The arguments parsed by docopt. Returns: The return value of func.
[ "Call", "the", "function", "with", "args", "normalized", "and", "cast", "to", "the", "correct", "types", "." ]
python
train
OpenTreeOfLife/peyotl
peyotl/utility/input_output.py
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/utility/input_output.py#L53-L58
def download(url, encoding='utf-8'): """Returns the text fetched via http GET from URL, read as `encoding`""" import requests response = requests.get(url) response.encoding = encoding return response.text
[ "def", "download", "(", "url", ",", "encoding", "=", "'utf-8'", ")", ":", "import", "requests", "response", "=", "requests", ".", "get", "(", "url", ")", "response", ".", "encoding", "=", "encoding", "return", "response", ".", "text" ]
Returns the text fetched via http GET from URL, read as `encoding`
[ "Returns", "the", "text", "fetched", "via", "http", "GET", "from", "URL", "read", "as", "encoding" ]
python
train
IdentityPython/pysaml2
src/saml2/assertion.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/assertion.py#L736-L808
def construct(self, sp_entity_id, attrconvs, policy, issuer, farg, authn_class=None, authn_auth=None, authn_decl=None, encrypt=None, sec_context=None, authn_decl_ref=None, authn_instant="", subject_locality="", authn_statem=None, name_id=None, session_not_on_or_after=None): """ Construct the Assertion :param sp_entity_id: The entityid of the SP :param in_response_to: An identifier of the message, this message is a response to :param name_id: An NameID instance :param attrconvs: AttributeConverters :param policy: The policy that should be adhered to when replying :param issuer: Who is issuing the statement :param authn_class: The authentication class :param authn_auth: The authentication instance :param authn_decl: An Authentication Context declaration :param encrypt: Whether to encrypt parts or all of the Assertion :param sec_context: The security context used when encrypting :param authn_decl_ref: An Authentication Context declaration reference :param authn_instant: When the Authentication was performed :param subject_locality: Specifies the DNS domain name and IP address for the system from which the assertion subject was apparently authenticated. :param authn_statem: A AuthnStatement instance :return: An Assertion instance """ if policy: _name_format = policy.get_name_form(sp_entity_id) else: _name_format = NAME_FORMAT_URI attr_statement = saml.AttributeStatement(attribute=from_local( attrconvs, self, _name_format)) if encrypt == "attributes": for attr in attr_statement.attribute: enc = sec_context.encrypt(text="%s" % attr) encd = xmlenc.encrypted_data_from_string(enc) encattr = saml.EncryptedAttribute(encrypted_data=encd) attr_statement.encrypted_attribute.append(encattr) attr_statement.attribute = [] # start using now and for some time conds = policy.conditions(sp_entity_id) if authn_statem: _authn_statement = authn_statem elif authn_auth or authn_class or authn_decl or authn_decl_ref: _authn_statement = authn_statement(authn_class, authn_auth, authn_decl, authn_decl_ref, authn_instant, subject_locality, session_not_on_or_after=session_not_on_or_after) else: _authn_statement = None subject = do_subject(policy, sp_entity_id, name_id, **farg['subject']) _ass = assertion_factory(issuer=issuer, conditions=conds, subject=subject) if _authn_statement: _ass.authn_statement = [_authn_statement] if not attr_statement.empty(): _ass.attribute_statement = [attr_statement] return _ass
[ "def", "construct", "(", "self", ",", "sp_entity_id", ",", "attrconvs", ",", "policy", ",", "issuer", ",", "farg", ",", "authn_class", "=", "None", ",", "authn_auth", "=", "None", ",", "authn_decl", "=", "None", ",", "encrypt", "=", "None", ",", "sec_con...
Construct the Assertion :param sp_entity_id: The entityid of the SP :param in_response_to: An identifier of the message, this message is a response to :param name_id: An NameID instance :param attrconvs: AttributeConverters :param policy: The policy that should be adhered to when replying :param issuer: Who is issuing the statement :param authn_class: The authentication class :param authn_auth: The authentication instance :param authn_decl: An Authentication Context declaration :param encrypt: Whether to encrypt parts or all of the Assertion :param sec_context: The security context used when encrypting :param authn_decl_ref: An Authentication Context declaration reference :param authn_instant: When the Authentication was performed :param subject_locality: Specifies the DNS domain name and IP address for the system from which the assertion subject was apparently authenticated. :param authn_statem: A AuthnStatement instance :return: An Assertion instance
[ "Construct", "the", "Assertion" ]
python
train
pandas-dev/pandas
pandas/io/html.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/html.py#L806-L846
def _parser_dispatch(flavor): """Choose the parser based on the input flavor. Parameters ---------- flavor : str The type of parser to use. This must be a valid backend. Returns ------- cls : _HtmlFrameParser subclass The parser class based on the requested input flavor. Raises ------ ValueError * If `flavor` is not a valid backend. ImportError * If you do not have the requested `flavor` """ valid_parsers = list(_valid_parsers.keys()) if flavor not in valid_parsers: raise ValueError('{invalid!r} is not a valid flavor, valid flavors ' 'are {valid}' .format(invalid=flavor, valid=valid_parsers)) if flavor in ('bs4', 'html5lib'): if not _HAS_HTML5LIB: raise ImportError("html5lib not found, please install it") if not _HAS_BS4: raise ImportError( "BeautifulSoup4 (bs4) not found, please install it") import bs4 if LooseVersion(bs4.__version__) <= LooseVersion('4.2.0'): raise ValueError("A minimum version of BeautifulSoup 4.2.1 " "is required") else: if not _HAS_LXML: raise ImportError("lxml not found, please install it") return _valid_parsers[flavor]
[ "def", "_parser_dispatch", "(", "flavor", ")", ":", "valid_parsers", "=", "list", "(", "_valid_parsers", ".", "keys", "(", ")", ")", "if", "flavor", "not", "in", "valid_parsers", ":", "raise", "ValueError", "(", "'{invalid!r} is not a valid flavor, valid flavors '",...
Choose the parser based on the input flavor. Parameters ---------- flavor : str The type of parser to use. This must be a valid backend. Returns ------- cls : _HtmlFrameParser subclass The parser class based on the requested input flavor. Raises ------ ValueError * If `flavor` is not a valid backend. ImportError * If you do not have the requested `flavor`
[ "Choose", "the", "parser", "based", "on", "the", "input", "flavor", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/textio.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/textio.py#L1816-L1835
def __do_log(self, text): """ Writes the given text verbatim into the log file (if any) and/or standard input (if the verbose flag is turned on). Used internally. @type text: str @param text: Text to print. """ if isinstance(text, compat.unicode): text = text.encode('cp1252') if self.verbose: print(text) if self.logfile: try: self.fd.writelines('%s\n' % text) except IOError: e = sys.exc_info()[1] self.__logfile_error(e)
[ "def", "__do_log", "(", "self", ",", "text", ")", ":", "if", "isinstance", "(", "text", ",", "compat", ".", "unicode", ")", ":", "text", "=", "text", ".", "encode", "(", "'cp1252'", ")", "if", "self", ".", "verbose", ":", "print", "(", "text", ")",...
Writes the given text verbatim into the log file (if any) and/or standard input (if the verbose flag is turned on). Used internally. @type text: str @param text: Text to print.
[ "Writes", "the", "given", "text", "verbatim", "into", "the", "log", "file", "(", "if", "any", ")", "and", "/", "or", "standard", "input", "(", "if", "the", "verbose", "flag", "is", "turned", "on", ")", "." ]
python
train
SMTG-UCL/sumo
sumo/plotting/phonon_bs_plotter.py
https://github.com/SMTG-UCL/sumo/blob/47aec6bbfa033a624435a65bd4edabd18bfb437f/sumo/plotting/phonon_bs_plotter.py#L169-L208
def _maketicks(self, ax, units='THz'): """Utility method to add tick marks to a band structure.""" # set y-ticks ax.yaxis.set_major_locator(MaxNLocator(6)) ax.yaxis.set_minor_locator(AutoMinorLocator(2)) ax.xaxis.set_minor_locator(AutoMinorLocator(2)) # set x-ticks; only plot the unique tick labels ticks = self.get_ticks() unique_d = [] unique_l = [] if ticks['distance']: temp_ticks = list(zip(ticks['distance'], ticks['label'])) unique_d.append(temp_ticks[0][0]) unique_l.append(temp_ticks[0][1]) for i in range(1, len(temp_ticks)): if unique_l[-1] != temp_ticks[i][1]: unique_d.append(temp_ticks[i][0]) unique_l.append(temp_ticks[i][1]) logging.info('\nLabel positions:') for dist, label in list(zip(unique_d, unique_l)): logging.info('\t{:.4f}: {}'.format(dist, label)) ax.set_xticks(unique_d) ax.set_xticklabels(unique_l) ax.xaxis.grid(True, ls='-') trans_xdata_yaxes = blended_transform_factory(ax.transData, ax.transAxes) ax.vlines(unique_d, 0, 1, transform=trans_xdata_yaxes, colors=rcParams['grid.color'], linewidth=rcParams['grid.linewidth']) # Use a text hyphen instead of a minus sign because some nice fonts # like Whitney don't come with a real minus labels = {'thz': 'THz', 'cm-1': r'cm$^{\mathrm{-}\mathregular{1}}$', 'ev': 'eV', 'mev': 'meV'} ax.set_ylabel('Frequency ({0})'.format(labels[units.lower()]))
[ "def", "_maketicks", "(", "self", ",", "ax", ",", "units", "=", "'THz'", ")", ":", "# set y-ticks", "ax", ".", "yaxis", ".", "set_major_locator", "(", "MaxNLocator", "(", "6", ")", ")", "ax", ".", "yaxis", ".", "set_minor_locator", "(", "AutoMinorLocator",...
Utility method to add tick marks to a band structure.
[ "Utility", "method", "to", "add", "tick", "marks", "to", "a", "band", "structure", "." ]
python
train
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1465-L1477
def com_google_fonts_check_metadata_menu_and_latin(family_metadata): """METADATA.pb should contain at least "menu" and "latin" subsets.""" missing = [] for s in ["menu", "latin"]: if s not in list(family_metadata.subsets): missing.append(s) if missing != []: yield FAIL, ("Subsets \"menu\" and \"latin\" are mandatory," " but METADATA.pb is missing" " \"{}\"").format(" and ".join(missing)) else: yield PASS, "METADATA.pb contains \"menu\" and \"latin\" subsets."
[ "def", "com_google_fonts_check_metadata_menu_and_latin", "(", "family_metadata", ")", ":", "missing", "=", "[", "]", "for", "s", "in", "[", "\"menu\"", ",", "\"latin\"", "]", ":", "if", "s", "not", "in", "list", "(", "family_metadata", ".", "subsets", ")", "...
METADATA.pb should contain at least "menu" and "latin" subsets.
[ "METADATA", ".", "pb", "should", "contain", "at", "least", "menu", "and", "latin", "subsets", "." ]
python
train
ming060/robotframework-uiautomatorlibrary
uiautomatorlibrary/Mobile.py
https://github.com/ming060/robotframework-uiautomatorlibrary/blob/b70202b6a8aa68b4efd9d029c2845407fb33451a/uiautomatorlibrary/Mobile.py#L255-L263
def swipe_by_coordinates(self, sx, sy, ex, ey, steps=10): """ Swipe from (sx, sy) to (ex, ey) with *steps* . Example: | Swipe By Coordinates | 540 | 1340 | 940 | 1340 | | # Swipe from (540, 1340) to (940, 100) with default steps 10 | | Swipe By Coordinates | 540 | 1340 | 940 | 1340 | 100 | # Swipe from (540, 1340) to (940, 100) with steps 100 | """ self.device.swipe(sx, sy, ex, ey, steps)
[ "def", "swipe_by_coordinates", "(", "self", ",", "sx", ",", "sy", ",", "ex", ",", "ey", ",", "steps", "=", "10", ")", ":", "self", ".", "device", ".", "swipe", "(", "sx", ",", "sy", ",", "ex", ",", "ey", ",", "steps", ")" ]
Swipe from (sx, sy) to (ex, ey) with *steps* . Example: | Swipe By Coordinates | 540 | 1340 | 940 | 1340 | | # Swipe from (540, 1340) to (940, 100) with default steps 10 | | Swipe By Coordinates | 540 | 1340 | 940 | 1340 | 100 | # Swipe from (540, 1340) to (940, 100) with steps 100 |
[ "Swipe", "from", "(", "sx", "sy", ")", "to", "(", "ex", "ey", ")", "with", "*", "steps", "*", "." ]
python
train
yoavaviram/python-amazon-simple-product-api
amazon/api.py
https://github.com/yoavaviram/python-amazon-simple-product-api/blob/f1cb0e209145fcfac9444e4c733dd19deb59d31a/amazon/api.py#L321-L356
def cart_add(self, items, CartId=None, HMAC=None, **kwargs): """CartAdd. :param items: A dictionary containing the items to be added to the cart. Or a list containing these dictionaries. It is not possible to create an empty cart! example: [{'offer_id': 'rt2ofih3f389nwiuhf8934z87o3f4h', 'quantity': 1}] :param CartId: Id of Cart :param HMAC: HMAC of Cart, see CartCreate for more info :return: An :class:`~.AmazonCart`. """ if not CartId or not HMAC: raise CartException('CartId and HMAC required for CartAdd call') if isinstance(items, dict): items = [items] if len(items) > 10: raise CartException("You can't add more than 10 items at once") offer_id_key_template = 'Item.{0}.OfferListingId' quantity_key_template = 'Item.{0}.Quantity' for i, item in enumerate(items): kwargs[offer_id_key_template.format(i)] = item['offer_id'] kwargs[quantity_key_template.format(i)] = item['quantity'] response = self.api.CartAdd(CartId=CartId, HMAC=HMAC, **kwargs) root = objectify.fromstring(response) new_cart = AmazonCart(root) self._check_for_cart_error(new_cart) return new_cart
[ "def", "cart_add", "(", "self", ",", "items", ",", "CartId", "=", "None", ",", "HMAC", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "CartId", "or", "not", "HMAC", ":", "raise", "CartException", "(", "'CartId and HMAC required for CartAdd ca...
CartAdd. :param items: A dictionary containing the items to be added to the cart. Or a list containing these dictionaries. It is not possible to create an empty cart! example: [{'offer_id': 'rt2ofih3f389nwiuhf8934z87o3f4h', 'quantity': 1}] :param CartId: Id of Cart :param HMAC: HMAC of Cart, see CartCreate for more info :return: An :class:`~.AmazonCart`.
[ "CartAdd", ".", ":", "param", "items", ":", "A", "dictionary", "containing", "the", "items", "to", "be", "added", "to", "the", "cart", ".", "Or", "a", "list", "containing", "these", "dictionaries", ".", "It", "is", "not", "possible", "to", "create", "an"...
python
train
The-Politico/politico-civic-election-night
electionnight/serializers/office.py
https://github.com/The-Politico/politico-civic-election-night/blob/a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6/electionnight/serializers/office.py#L59-L63
def get_content(self, obj): """All content for office's page on an election day.""" election_day = ElectionDay.objects.get( date=self.context['election_date']) return PageContent.objects.office_content(election_day, obj)
[ "def", "get_content", "(", "self", ",", "obj", ")", ":", "election_day", "=", "ElectionDay", ".", "objects", ".", "get", "(", "date", "=", "self", ".", "context", "[", "'election_date'", "]", ")", "return", "PageContent", ".", "objects", ".", "office_conte...
All content for office's page on an election day.
[ "All", "content", "for", "office", "s", "page", "on", "an", "election", "day", "." ]
python
train
AirtestProject/Poco
poco/utils/simplerpc/transport/tcp/protocol.py
https://github.com/AirtestProject/Poco/blob/2c559a586adf3fd11ee81cabc446d4d3f6f2d119/poco/utils/simplerpc/transport/tcp/protocol.py#L25-L37
def input(self, data): """ 小数据片段拼接成完整数据包 如果内容足够则yield数据包 """ self.buf += data while len(self.buf) > HEADER_SIZE: data_len = struct.unpack('i', self.buf[0:HEADER_SIZE])[0] if len(self.buf) >= data_len + HEADER_SIZE: content = self.buf[HEADER_SIZE:data_len + HEADER_SIZE] self.buf = self.buf[data_len + HEADER_SIZE:] yield content else: break
[ "def", "input", "(", "self", ",", "data", ")", ":", "self", ".", "buf", "+=", "data", "while", "len", "(", "self", ".", "buf", ")", ">", "HEADER_SIZE", ":", "data_len", "=", "struct", ".", "unpack", "(", "'i'", ",", "self", ".", "buf", "[", "0", ...
小数据片段拼接成完整数据包 如果内容足够则yield数据包
[ "小数据片段拼接成完整数据包", "如果内容足够则yield数据包" ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/discovery_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L5016-L5027
def _from_dict(cls, _dict): """Initialize a DocumentAccepted object from a json dictionary.""" args = {} if 'document_id' in _dict: args['document_id'] = _dict.get('document_id') if 'status' in _dict: args['status'] = _dict.get('status') if 'notices' in _dict: args['notices'] = [ Notice._from_dict(x) for x in (_dict.get('notices')) ] return cls(**args)
[ "def", "_from_dict", "(", "cls", ",", "_dict", ")", ":", "args", "=", "{", "}", "if", "'document_id'", "in", "_dict", ":", "args", "[", "'document_id'", "]", "=", "_dict", ".", "get", "(", "'document_id'", ")", "if", "'status'", "in", "_dict", ":", "...
Initialize a DocumentAccepted object from a json dictionary.
[ "Initialize", "a", "DocumentAccepted", "object", "from", "a", "json", "dictionary", "." ]
python
train
pyout/pyout
pyout/common.py
https://github.com/pyout/pyout/blob/d9ff954bdedb6fc70f21f4fe77ad4bf926b201b0/pyout/common.py#L283-L304
def _compose(self, name, attributes): """Construct a style taking `attributes` from the column styles. Parameters ---------- name : str Name of main style (e.g., "header_"). attributes : set of str Adopt these elements from the column styles. Returns ------- The composite style for `name`. """ name_style = _safe_get(self.init_style, name, elements.default(name)) if self.init_style is not None and name_style is not None: result = {} for col in self.columns: cstyle = {k: v for k, v in self.style[col].items() if k in attributes} result[col] = dict(cstyle, **name_style) return result
[ "def", "_compose", "(", "self", ",", "name", ",", "attributes", ")", ":", "name_style", "=", "_safe_get", "(", "self", ".", "init_style", ",", "name", ",", "elements", ".", "default", "(", "name", ")", ")", "if", "self", ".", "init_style", "is", "not",...
Construct a style taking `attributes` from the column styles. Parameters ---------- name : str Name of main style (e.g., "header_"). attributes : set of str Adopt these elements from the column styles. Returns ------- The composite style for `name`.
[ "Construct", "a", "style", "taking", "attributes", "from", "the", "column", "styles", "." ]
python
train
jaraco/irc
irc/client.py
https://github.com/jaraco/irc/blob/571c1f448d5d5bb92bbe2605c33148bf6e698413/irc/client.py#L454-L474
def disconnect(self, message=""): """Hang up the connection. Arguments: message -- Quit message. """ try: del self.connected except AttributeError: return self.quit(message) try: self.socket.shutdown(socket.SHUT_WR) self.socket.close() except socket.error: pass del self.socket self._handle_event(Event("disconnect", self.server, "", [message]))
[ "def", "disconnect", "(", "self", ",", "message", "=", "\"\"", ")", ":", "try", ":", "del", "self", ".", "connected", "except", "AttributeError", ":", "return", "self", ".", "quit", "(", "message", ")", "try", ":", "self", ".", "socket", ".", "shutdown...
Hang up the connection. Arguments: message -- Quit message.
[ "Hang", "up", "the", "connection", "." ]
python
train
joyent/python-manta
manta/cmdln.py
https://github.com/joyent/python-manta/blob/f68ef142bdbac058c981e3b28e18d77612f5b7c6/manta/cmdln.py#L1472-L1503
def argv2line(argv): r"""Put together the given argument vector into a command line. "argv" is the argument vector to process. >>> from cmdln import argv2line >>> argv2line(['foo']) 'foo' >>> argv2line(['foo', 'bar']) 'foo bar' >>> argv2line(['foo', 'bar baz']) 'foo "bar baz"' >>> argv2line(['foo"bar']) 'foo"bar' >>> print(argv2line(['foo" bar'])) 'foo" bar' >>> print(argv2line(["foo' bar"])) "foo' bar" >>> argv2line(["foo'bar"]) "foo'bar" """ escapedArgs = [] for arg in argv: if ' ' in arg and '"' not in arg: arg = '"' + arg + '"' elif ' ' in arg and "'" not in arg: arg = "'" + arg + "'" elif ' ' in arg: arg = arg.replace('"', r'\"') arg = '"' + arg + '"' escapedArgs.append(arg) return ' '.join(escapedArgs)
[ "def", "argv2line", "(", "argv", ")", ":", "escapedArgs", "=", "[", "]", "for", "arg", "in", "argv", ":", "if", "' '", "in", "arg", "and", "'\"'", "not", "in", "arg", ":", "arg", "=", "'\"'", "+", "arg", "+", "'\"'", "elif", "' '", "in", "arg", ...
r"""Put together the given argument vector into a command line. "argv" is the argument vector to process. >>> from cmdln import argv2line >>> argv2line(['foo']) 'foo' >>> argv2line(['foo', 'bar']) 'foo bar' >>> argv2line(['foo', 'bar baz']) 'foo "bar baz"' >>> argv2line(['foo"bar']) 'foo"bar' >>> print(argv2line(['foo" bar'])) 'foo" bar' >>> print(argv2line(["foo' bar"])) "foo' bar" >>> argv2line(["foo'bar"]) "foo'bar"
[ "r", "Put", "together", "the", "given", "argument", "vector", "into", "a", "command", "line", "." ]
python
train
Dapid/tmscoring
tmscoring/tmscore.py
https://github.com/Dapid/tmscoring/blob/353c567e201ee9835c8209f6130b80b1cfb5b10f/tmscoring/tmscore.py#L230-L259
def _load_data_alignment(self, chain1, chain2): """ Extract the sequences from the PDB file, perform the alignment, and load the coordinates of the CA of the common residues. """ parser = PDB.PDBParser(QUIET=True) ppb = PDB.PPBuilder() structure1 = parser.get_structure(chain1, self.pdb1) structure2 = parser.get_structure(chain2, self.pdb2) seq1 = str(ppb.build_peptides(structure1)[0].get_sequence()) seq2 = str(ppb.build_peptides(structure2)[0].get_sequence()) # Alignment parameters taken from PconsFold renumbering script. align = pairwise2.align.globalms(seq1, seq2, 2, -1, -0.5, -0.1)[0] indexes = set(i for i, (s1, s2) in enumerate(zip(align[0], align[1])) if s1 != '-' and s2 != '-') coord1 = np.hstack([np.concatenate((r['CA'].get_coord(), (1,)))[:, None] for i, r in enumerate(structure1.get_residues()) if i in indexes and 'CA' in r]).astype(DTYPE, copy=False) coord2 = np.hstack([np.concatenate((r['CA'].get_coord(), (1,)))[:, None] for i, r in enumerate(structure2.get_residues()) if i in indexes and 'CA' in r]).astype(DTYPE, copy=False) self.coord1 = coord1 self.coord2 = coord2 self.N = len(seq1)
[ "def", "_load_data_alignment", "(", "self", ",", "chain1", ",", "chain2", ")", ":", "parser", "=", "PDB", ".", "PDBParser", "(", "QUIET", "=", "True", ")", "ppb", "=", "PDB", ".", "PPBuilder", "(", ")", "structure1", "=", "parser", ".", "get_structure", ...
Extract the sequences from the PDB file, perform the alignment, and load the coordinates of the CA of the common residues.
[ "Extract", "the", "sequences", "from", "the", "PDB", "file", "perform", "the", "alignment", "and", "load", "the", "coordinates", "of", "the", "CA", "of", "the", "common", "residues", "." ]
python
train
robmarkcole/HASS-data-detective
detective/core.py
https://github.com/robmarkcole/HASS-data-detective/blob/f67dfde9dd63a3af411944d1857b0835632617c5/detective/core.py#L106-L153
def fetch_data_by_list(self, entities: List[str], limit=50000): """ Basic query from list of entities. Must be from same domain. Attempts to unpack lists up to 2 deep. Parameters ---------- entities : a list of entities returns a df """ if not len(set([e.split(".")[0] for e in entities])) == 1: print("Error: entities must be from same domain.") return if len(entities) == 1: print("Must pass more than 1 entity.") return query = text( """ SELECT entity_id, state, last_changed FROM states WHERE entity_id in ({}) AND NOT state='unknown' ORDER BY last_changed DESC LIMIT :limit """.format( ",".join("'{}'".format(ent) for ent in entities) ) ) response = self.perform_query(query, limit=limit) df = pd.DataFrame(response.fetchall()) df.columns = ["entity", "state", "last_changed"] df = df.set_index("last_changed") # Set the index on datetime df.index = pd.to_datetime(df.index, errors="ignore", utc=True) try: df["state"] = ( df["state"].mask(df["state"].eq("None")).dropna().astype(float) ) df = df.pivot_table(index="last_changed", columns="entity", values="state") df = df.fillna(method="ffill") df = df.dropna() # Drop any remaining nan. return df except: print("Error: entities were not all numericals, unformatted df.") return df
[ "def", "fetch_data_by_list", "(", "self", ",", "entities", ":", "List", "[", "str", "]", ",", "limit", "=", "50000", ")", ":", "if", "not", "len", "(", "set", "(", "[", "e", ".", "split", "(", "\".\"", ")", "[", "0", "]", "for", "e", "in", "ent...
Basic query from list of entities. Must be from same domain. Attempts to unpack lists up to 2 deep. Parameters ---------- entities : a list of entities returns a df
[ "Basic", "query", "from", "list", "of", "entities", ".", "Must", "be", "from", "same", "domain", ".", "Attempts", "to", "unpack", "lists", "up", "to", "2", "deep", "." ]
python
train
ungarj/mapchete
mapchete/tile.py
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/tile.py#L260-L289
def get_neighbors(self, connectedness=8): """ Return tile neighbors. Tile neighbors are unique, i.e. in some edge cases, where both the left and right neighbor wrapped around the antimeridian is the same. Also, neighbors ouside the northern and southern TilePyramid boundaries are excluded, because they are invalid. ------------- | 8 | 1 | 5 | ------------- | 4 | x | 2 | ------------- | 7 | 3 | 6 | ------------- Parameters ---------- connectedness : int [4 or 8] return four direct neighbors or all eight. Returns ------- list of BufferedTiles """ return [ BufferedTile(t, self.pixelbuffer) for t in self._tile.get_neighbors(connectedness=connectedness) ]
[ "def", "get_neighbors", "(", "self", ",", "connectedness", "=", "8", ")", ":", "return", "[", "BufferedTile", "(", "t", ",", "self", ".", "pixelbuffer", ")", "for", "t", "in", "self", ".", "_tile", ".", "get_neighbors", "(", "connectedness", "=", "connec...
Return tile neighbors. Tile neighbors are unique, i.e. in some edge cases, where both the left and right neighbor wrapped around the antimeridian is the same. Also, neighbors ouside the northern and southern TilePyramid boundaries are excluded, because they are invalid. ------------- | 8 | 1 | 5 | ------------- | 4 | x | 2 | ------------- | 7 | 3 | 6 | ------------- Parameters ---------- connectedness : int [4 or 8] return four direct neighbors or all eight. Returns ------- list of BufferedTiles
[ "Return", "tile", "neighbors", "." ]
python
valid
pndurette/gTTS
gtts/tokenizer/pre_processors.py
https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/pre_processors.py#L19-L28
def end_of_line(text): """Re-form words cut by end-of-line hyphens. Remove "<hyphen><newline>". """ return PreProcessorRegex( search_args=u'-', search_func=lambda x: u"{}\n".format(x), repl='').run(text)
[ "def", "end_of_line", "(", "text", ")", ":", "return", "PreProcessorRegex", "(", "search_args", "=", "u'-'", ",", "search_func", "=", "lambda", "x", ":", "u\"{}\\n\"", ".", "format", "(", "x", ")", ",", "repl", "=", "''", ")", ".", "run", "(", "text", ...
Re-form words cut by end-of-line hyphens. Remove "<hyphen><newline>".
[ "Re", "-", "form", "words", "cut", "by", "end", "-", "of", "-", "line", "hyphens", "." ]
python
train
xiyouMc/ncmbot
ncmbot/core.py
https://github.com/xiyouMc/ncmbot/blob/c4832f3ee7630ba104a89559f09c1fc366d1547b/ncmbot/core.py#L456-L471
def music_comment(id, offset=0, limit=20): """获取歌曲的评论列表 :param id: 歌曲 ID :param offset: (optional) 分段起始位置,默认 0 :param limit: (optional) 数据上限多少行,默认 20 """ if id is None: raise ParamsError() r = NCloudBot() r.method = 'MUSIC_COMMENT' r.params = {'id': id} r.data = {'offset': offset, 'limit': limit, 'rid': id, "csrf_token": ""} r.send() return r.response
[ "def", "music_comment", "(", "id", ",", "offset", "=", "0", ",", "limit", "=", "20", ")", ":", "if", "id", "is", "None", ":", "raise", "ParamsError", "(", ")", "r", "=", "NCloudBot", "(", ")", "r", ".", "method", "=", "'MUSIC_COMMENT'", "r", ".", ...
获取歌曲的评论列表 :param id: 歌曲 ID :param offset: (optional) 分段起始位置,默认 0 :param limit: (optional) 数据上限多少行,默认 20
[ "获取歌曲的评论列表" ]
python
train
SylvanasSun/python-common-cache
common_cache/__init__.py
https://github.com/SylvanasSun/python-common-cache/blob/f113eb3cd751eed5ab5373e8610a31a444220cf8/common_cache/__init__.py#L612-L720
def access_cache(self, key=None, key_location_on_param=0, expire=None, auto_update=True, cache_loader=None, cache_writer=None, timeout=1): """ The decorator for simplifying of use cache, it supports auto-update cache(if parameter auto_update is True), load cache from other level cache system or data source and writes back the update result to the other level cache system or data source if cache miss. The parameter key assigns a key for access cache or update cache and if it is None so select a parameter as a key from the decorated function by key_location_on_param, notice: key and key_location_on_param cannot all is None. For function cache_loader() must is a one-parameter function and the parameter represent a key of the cache, if this parameter is None so use self.cache_loader(), if they all are None so not load cache from other caches system. For function cache_writer() must is a two-parameter function and the first parameter representing a key of the cache and the second parameter representing a value of the cache, notice: if the parameter auto_update is False so it will not execute. >>> import time >>> cache = Cache(log_level=logging.WARNING) >>> @cache.access_cache(key='a') ... def a(): ... return 'a from data source' >>> a() 'a from data source' >>> cache.get('a') 'a from data source' >>> cache.put(key='b', value='b from cache') >>> @cache.access_cache(key='b') ... def b(): ... return 'b from data source' >>> b() 'b from cache' >>> c_key = 'c' >>> @cache.access_cache(key_location_on_param=0) ... def c(key): ... return 'c from data source' >>> c(c_key) 'c from data source' >>> cache.get(c_key) 'c from data source' >>> @cache.access_cache(key='d', auto_update=False) ... def d(): ... return 'd from data source' >>> d() 'd from data source' >>> cache.get('d') == None True >>> @cache.access_cache(key='e', cache_loader=lambda k: '%s from cache loader' % k) ... def e(): ... return 'e from data source' >>> e() 'e from cache loader' >>> out_dict = {} >>> def writer(k, v): ... out_dict[k] = v >>> @cache.access_cache(key='f', cache_writer=writer) ... def f(): ... return 'f from data source' >>> f() 'f from data source' >>> time.sleep(1) # wait to execute complete because it in the other thread >>> out_dict {'f': 'f from data source'} >>> cache.with_cache_loader(lambda k: '%s from cache loader(global)' % k) True >>> @cache.access_cache(key='g') ... def g(): ... return 'g from data source' >>> g() 'g from cache loader(global)' """ def decorate(func): @functools.wraps(func) def wrapper(*args, **kwargs): k = None if len(args) - 1 >= key_location_on_param: k = args[key_location_on_param] if key is not None: k = key cache_result = self.get(key=k, timeout=timeout) # if the cache is miss and cache loader is the existent # then query cache from cache loader if cache_result is None: if cache_loader is not None: cache_result = cache_loader(k) elif self.cache_loader is not None: cache_result = self.cache_loader(k) # if still miss then execute a function that is decorated # then update cache on the basis of parameter auto_update if cache_result is not None: return cache_result else: result = func(*args, **kwargs) if auto_update: self.put(key=k, value=result, expire=expire, timeout=timeout) if cache_writer is not None: self.thread_pool.submit(cache_writer, k, result) elif self.cache_writer is not None: self.thread_pool.submit(self.cache_writer, k, result) return result return wrapper return decorate
[ "def", "access_cache", "(", "self", ",", "key", "=", "None", ",", "key_location_on_param", "=", "0", ",", "expire", "=", "None", ",", "auto_update", "=", "True", ",", "cache_loader", "=", "None", ",", "cache_writer", "=", "None", ",", "timeout", "=", "1"...
The decorator for simplifying of use cache, it supports auto-update cache(if parameter auto_update is True), load cache from other level cache system or data source and writes back the update result to the other level cache system or data source if cache miss. The parameter key assigns a key for access cache or update cache and if it is None so select a parameter as a key from the decorated function by key_location_on_param, notice: key and key_location_on_param cannot all is None. For function cache_loader() must is a one-parameter function and the parameter represent a key of the cache, if this parameter is None so use self.cache_loader(), if they all are None so not load cache from other caches system. For function cache_writer() must is a two-parameter function and the first parameter representing a key of the cache and the second parameter representing a value of the cache, notice: if the parameter auto_update is False so it will not execute. >>> import time >>> cache = Cache(log_level=logging.WARNING) >>> @cache.access_cache(key='a') ... def a(): ... return 'a from data source' >>> a() 'a from data source' >>> cache.get('a') 'a from data source' >>> cache.put(key='b', value='b from cache') >>> @cache.access_cache(key='b') ... def b(): ... return 'b from data source' >>> b() 'b from cache' >>> c_key = 'c' >>> @cache.access_cache(key_location_on_param=0) ... def c(key): ... return 'c from data source' >>> c(c_key) 'c from data source' >>> cache.get(c_key) 'c from data source' >>> @cache.access_cache(key='d', auto_update=False) ... def d(): ... return 'd from data source' >>> d() 'd from data source' >>> cache.get('d') == None True >>> @cache.access_cache(key='e', cache_loader=lambda k: '%s from cache loader' % k) ... def e(): ... return 'e from data source' >>> e() 'e from cache loader' >>> out_dict = {} >>> def writer(k, v): ... out_dict[k] = v >>> @cache.access_cache(key='f', cache_writer=writer) ... def f(): ... return 'f from data source' >>> f() 'f from data source' >>> time.sleep(1) # wait to execute complete because it in the other thread >>> out_dict {'f': 'f from data source'} >>> cache.with_cache_loader(lambda k: '%s from cache loader(global)' % k) True >>> @cache.access_cache(key='g') ... def g(): ... return 'g from data source' >>> g() 'g from cache loader(global)'
[ "The", "decorator", "for", "simplifying", "of", "use", "cache", "it", "supports", "auto", "-", "update", "cache", "(", "if", "parameter", "auto_update", "is", "True", ")", "load", "cache", "from", "other", "level", "cache", "system", "or", "data", "source", ...
python
train
google/dotty
efilter/parsers/common/tokenizer.py
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/tokenizer.py#L237-L240
def skip(self, steps=1): """Skip ahead by 'steps' tokens.""" for _ in six.moves.range(steps): self.next_token()
[ "def", "skip", "(", "self", ",", "steps", "=", "1", ")", ":", "for", "_", "in", "six", ".", "moves", ".", "range", "(", "steps", ")", ":", "self", ".", "next_token", "(", ")" ]
Skip ahead by 'steps' tokens.
[ "Skip", "ahead", "by", "steps", "tokens", "." ]
python
train
allianceauth/allianceauth
allianceauth/eveonline/autogroups/signals.py
https://github.com/allianceauth/allianceauth/blob/6585b07e96571a99a4d6dc03cc03f9b8c8f690ca/allianceauth/eveonline/autogroups/signals.py#L53-L65
def autogroups_states_changed(sender, instance, action, reverse, model, pk_set, *args, **kwargs): """ Trigger group membership update when a state is added or removed from an autogroup config. """ if action.startswith('post_'): for pk in pk_set: try: state = State.objects.get(pk=pk) instance.update_group_membership_for_state(state) except State.DoesNotExist: # Deleted States handled by the profile state change pass
[ "def", "autogroups_states_changed", "(", "sender", ",", "instance", ",", "action", ",", "reverse", ",", "model", ",", "pk_set", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "action", ".", "startswith", "(", "'post_'", ")", ":", "for", "pk...
Trigger group membership update when a state is added or removed from an autogroup config.
[ "Trigger", "group", "membership", "update", "when", "a", "state", "is", "added", "or", "removed", "from", "an", "autogroup", "config", "." ]
python
train
Nachtfeuer/pipeline
spline/matrix.py
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/matrix.py#L115-L134
def can_process_matrix(entry, matrix_tags): """ Check given matrix tags to be in the given list of matric tags. Args: entry (dict): matrix item (in yaml). matrix_tags (list): represents --matrix-tags defined by user in command line. Returns: bool: True when matrix entry can be processed. """ if len(matrix_tags) == 0: return True count = 0 if 'tags' in entry: for tag in matrix_tags: if tag in entry['tags']: count += 1 return count > 0
[ "def", "can_process_matrix", "(", "entry", ",", "matrix_tags", ")", ":", "if", "len", "(", "matrix_tags", ")", "==", "0", ":", "return", "True", "count", "=", "0", "if", "'tags'", "in", "entry", ":", "for", "tag", "in", "matrix_tags", ":", "if", "tag",...
Check given matrix tags to be in the given list of matric tags. Args: entry (dict): matrix item (in yaml). matrix_tags (list): represents --matrix-tags defined by user in command line. Returns: bool: True when matrix entry can be processed.
[ "Check", "given", "matrix", "tags", "to", "be", "in", "the", "given", "list", "of", "matric", "tags", "." ]
python
train
lobocv/crashreporter
crashreporter/injector.py
https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/injector.py#L11-L33
def inject_path(path): """ Imports :func: from a python file at :path: and executes it with *args, **kwargs arguments. Everytime this function is called the module is reloaded so that you can alter your debug code while the application is running. The result of the function is returned, otherwise the exception is returned (if one is raised) """ try: dirname = os.path.dirname(path) if dirname not in sys.path: exists_in_sys = False sys.path.append(dirname) else: exists_in_sys = True module_name = os.path.splitext(os.path.split(path)[1])[0] if module_name in sys.modules: reload(sys.modules[module_name]) else: __import__(module_name) if not exists_in_sys: sys.path.remove(dirname) except Exception as e: return e
[ "def", "inject_path", "(", "path", ")", ":", "try", ":", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", "if", "dirname", "not", "in", "sys", ".", "path", ":", "exists_in_sys", "=", "False", "sys", ".", "path", ".", "append", "...
Imports :func: from a python file at :path: and executes it with *args, **kwargs arguments. Everytime this function is called the module is reloaded so that you can alter your debug code while the application is running. The result of the function is returned, otherwise the exception is returned (if one is raised)
[ "Imports", ":", "func", ":", "from", "a", "python", "file", "at", ":", "path", ":", "and", "executes", "it", "with", "*", "args", "**", "kwargs", "arguments", ".", "Everytime", "this", "function", "is", "called", "the", "module", "is", "reloaded", "so", ...
python
train
limodou/uliweb
uliweb/contrib/model_config/__init__.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/model_config/__init__.py#L69-L100
def get_model_fields(model, add_reserver_flag=True): """ Creating fields suit for model_config , id will be skipped. """ import uliweb.orm as orm fields = [] m = {'type':'type_name', 'hint':'hint', 'default':'default', 'required':'required'} m1 = {'index':'index', 'unique':'unique'} for name, prop in model.properties.items(): if name == 'id': continue d = {} for k, v in m.items(): d[k] = getattr(prop, v) for k, v in m1.items(): d[k] = bool(prop.kwargs.get(v)) d['name'] = prop.fieldname or name d['verbose_name'] = unicode(prop.verbose_name) d['nullable'] = bool(prop.kwargs.get('nullable', orm.__nullable__)) if d['type'] in ('VARCHAR', 'CHAR', 'BINARY', 'VARBINARY'): d['max_length'] = prop.max_length if d['type'] in ('Reference', 'OneToOne', 'ManyToMany'): d['reference_class'] = prop.reference_class #collection_name will be _collection_name, it the original value d['collection_name'] = prop._collection_name d['server_default'] = prop.kwargs.get('server_default') d['_reserved'] = True fields.append(d) return fields
[ "def", "get_model_fields", "(", "model", ",", "add_reserver_flag", "=", "True", ")", ":", "import", "uliweb", ".", "orm", "as", "orm", "fields", "=", "[", "]", "m", "=", "{", "'type'", ":", "'type_name'", ",", "'hint'", ":", "'hint'", ",", "'default'", ...
Creating fields suit for model_config , id will be skipped.
[ "Creating", "fields", "suit", "for", "model_config", "id", "will", "be", "skipped", "." ]
python
train
zkbt/the-friendly-stars
thefriendlystars/constellations/constellation.py
https://github.com/zkbt/the-friendly-stars/blob/50d3f979e79e63c66629065c75595696dc79802e/thefriendlystars/constellations/constellation.py#L281-L314
def plot(self, sizescale=10, color=None, alpha=0.5, label=None, edgecolor='none', **kw): ''' Plot the ra and dec of the coordinates, at a given epoch, scaled by their magnitude. (This does *not* create a new empty figure.) Parameters ---------- sizescale : (optional) float The marker size for scatter for a star at the magnitudelimit. color : (optional) any valid color The color to plot (but there is a default for this catalog.) **kw : dict Additional keywords will be passed on to plt.scatter. Returns ------- plotted : outputs from the plots ''' # calculate the sizes of the stars (logarithmic with brightness?) size = np.maximum(sizescale*(1 + self.magnitudelimit - self.magnitude), 1) # make a scatter plot of the RA + Dec scatter = plt.scatter(self.ra, self.dec, s=size, color=color or self.color, label=label or '{} ({:.1f})'.format(self.name, self.epoch), alpha=alpha, edgecolor=edgecolor, **kw) return scatter
[ "def", "plot", "(", "self", ",", "sizescale", "=", "10", ",", "color", "=", "None", ",", "alpha", "=", "0.5", ",", "label", "=", "None", ",", "edgecolor", "=", "'none'", ",", "*", "*", "kw", ")", ":", "# calculate the sizes of the stars (logarithmic with b...
Plot the ra and dec of the coordinates, at a given epoch, scaled by their magnitude. (This does *not* create a new empty figure.) Parameters ---------- sizescale : (optional) float The marker size for scatter for a star at the magnitudelimit. color : (optional) any valid color The color to plot (but there is a default for this catalog.) **kw : dict Additional keywords will be passed on to plt.scatter. Returns ------- plotted : outputs from the plots
[ "Plot", "the", "ra", "and", "dec", "of", "the", "coordinates", "at", "a", "given", "epoch", "scaled", "by", "their", "magnitude", "." ]
python
train
inspirehep/harvesting-kit
harvestingkit/inspire_cds_package/from_cds.py
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_cds.py#L111-L157
def determine_collections(self): """Try to determine which collections this record should belong to.""" for value in record_get_field_values(self.record, '980', code='a'): if 'NOTE' in value.upper(): self.collections.add('NOTE') if 'THESIS' in value.upper(): self.collections.add('THESIS') if 'CONFERENCEPAPER' in value.upper(): self.collections.add('ConferencePaper') if "HIDDEN" in value.upper(): self.hidden = True if self.is_published(): self.collections.add("PUBLISHED") self.collections.add("CITEABLE") if 'NOTE' not in self.collections: from itertools import product # TODO: Move this to a KB kb = ['ATLAS-CONF-', 'CMS-PAS-', 'ATL-', 'CMS-DP-', 'ALICE-INT-', 'LHCb-PUB-'] values = record_get_field_values(self.record, "088", code='a') for val, rep in product(values, kb): if val.startswith(rep): self.collections.add('NOTE') break # 980 Arxiv tag if record_get_field_values(self.record, '035', filter_subfield_code="a", filter_subfield_value="arXiv"): self.collections.add("arXiv") # 980 HEP && CORE self.collections.add('HEP') self.collections.add('CORE') # 980 Conference Note if 'ConferencePaper' not in self.collections: for value in record_get_field_values(self.record, tag='962', code='n'): if value[-2:].isdigit(): self.collections.add('ConferencePaper') break # Clear out any existing ones. record_delete_fields(self.record, "980")
[ "def", "determine_collections", "(", "self", ")", ":", "for", "value", "in", "record_get_field_values", "(", "self", ".", "record", ",", "'980'", ",", "code", "=", "'a'", ")", ":", "if", "'NOTE'", "in", "value", ".", "upper", "(", ")", ":", "self", "."...
Try to determine which collections this record should belong to.
[ "Try", "to", "determine", "which", "collections", "this", "record", "should", "belong", "to", "." ]
python
valid
CalebBell/thermo
thermo/volume.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/volume.py#L252-L294
def Townsend_Hales(T, Tc, Vc, omega): r'''Calculates saturation liquid density, using the Townsend and Hales CSP method as modified from the original Riedel equation. Uses chemical critical volume and temperature, as well as acentric factor The density of a liquid is given by: .. math:: Vs = V_c/\left(1+0.85(1-T_r)+(1.692+0.986\omega)(1-T_r)^{1/3}\right) Parameters ---------- T : float Temperature of fluid [K] Tc : float Critical temperature of fluid [K] Vc : float Critical volume of fluid [m^3/mol] omega : float Acentric factor for fluid, [-] Returns ------- Vs : float Saturation liquid volume, [m^3/mol] Notes ----- The requirement for critical volume and acentric factor requires all data. Examples -------- >>> Townsend_Hales(300, 647.14, 55.95E-6, 0.3449) 1.8007361992619923e-05 References ---------- .. [1] Hales, J. L, and R Townsend. "Liquid Densities from 293 to 490 K of Nine Aromatic Hydrocarbons." The Journal of Chemical Thermodynamics 4, no. 5 (1972): 763-72. doi:10.1016/0021-9614(72)90050-X ''' Tr = T/Tc return Vc/(1 + 0.85*(1-Tr) + (1.692 + 0.986*omega)*(1-Tr)**(1/3.))
[ "def", "Townsend_Hales", "(", "T", ",", "Tc", ",", "Vc", ",", "omega", ")", ":", "Tr", "=", "T", "/", "Tc", "return", "Vc", "/", "(", "1", "+", "0.85", "*", "(", "1", "-", "Tr", ")", "+", "(", "1.692", "+", "0.986", "*", "omega", ")", "*", ...
r'''Calculates saturation liquid density, using the Townsend and Hales CSP method as modified from the original Riedel equation. Uses chemical critical volume and temperature, as well as acentric factor The density of a liquid is given by: .. math:: Vs = V_c/\left(1+0.85(1-T_r)+(1.692+0.986\omega)(1-T_r)^{1/3}\right) Parameters ---------- T : float Temperature of fluid [K] Tc : float Critical temperature of fluid [K] Vc : float Critical volume of fluid [m^3/mol] omega : float Acentric factor for fluid, [-] Returns ------- Vs : float Saturation liquid volume, [m^3/mol] Notes ----- The requirement for critical volume and acentric factor requires all data. Examples -------- >>> Townsend_Hales(300, 647.14, 55.95E-6, 0.3449) 1.8007361992619923e-05 References ---------- .. [1] Hales, J. L, and R Townsend. "Liquid Densities from 293 to 490 K of Nine Aromatic Hydrocarbons." The Journal of Chemical Thermodynamics 4, no. 5 (1972): 763-72. doi:10.1016/0021-9614(72)90050-X
[ "r", "Calculates", "saturation", "liquid", "density", "using", "the", "Townsend", "and", "Hales", "CSP", "method", "as", "modified", "from", "the", "original", "Riedel", "equation", ".", "Uses", "chemical", "critical", "volume", "and", "temperature", "as", "well...
python
valid
RudolfCardinal/pythonlib
cardinal_pythonlib/source_reformatting.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/source_reformatting.py#L239-L243
def _debug_line(linenum: int, line: str, extramsg: str = "") -> None: """ Writes a debugging report on a line. """ log.critical("{}Line {}: {!r}", extramsg, linenum, line)
[ "def", "_debug_line", "(", "linenum", ":", "int", ",", "line", ":", "str", ",", "extramsg", ":", "str", "=", "\"\"", ")", "->", "None", ":", "log", ".", "critical", "(", "\"{}Line {}: {!r}\"", ",", "extramsg", ",", "linenum", ",", "line", ")" ]
Writes a debugging report on a line.
[ "Writes", "a", "debugging", "report", "on", "a", "line", "." ]
python
train
pypa/setuptools
setuptools/command/build_py.py
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/command/build_py.py#L99-L114
def find_data_files(self, package, src_dir): """Return filenames for package's data files in 'src_dir'""" patterns = self._get_platform_patterns( self.package_data, package, src_dir, ) globs_expanded = map(glob, patterns) # flatten the expanded globs into an iterable of matches globs_matches = itertools.chain.from_iterable(globs_expanded) glob_files = filter(os.path.isfile, globs_matches) files = itertools.chain( self.manifest_files.get(package, []), glob_files, ) return self.exclude_data_files(package, src_dir, files)
[ "def", "find_data_files", "(", "self", ",", "package", ",", "src_dir", ")", ":", "patterns", "=", "self", ".", "_get_platform_patterns", "(", "self", ".", "package_data", ",", "package", ",", "src_dir", ",", ")", "globs_expanded", "=", "map", "(", "glob", ...
Return filenames for package's data files in 'src_dir
[ "Return", "filenames", "for", "package", "s", "data", "files", "in", "src_dir" ]
python
train
SHTOOLS/SHTOOLS
pyshtools/shclasses/shtensor.py
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shtensor.py#L501-L553
def plot_vyz(self, colorbar=True, cb_orientation='vertical', cb_label=None, ax=None, show=True, fname=None, **kwargs): """ Plot the Vyz component of the tensor. Usage ----- x.plot_vyz([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = '$V_{yz}$' Text label for the colorbar. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods. """ if cb_label is None: cb_label = self._vyz_label if ax is None: fig, axes = self.vyz.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, show=False, **kwargs) if show: fig.show() if fname is not None: fig.savefig(fname) return fig, axes else: self.vyz.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, ax=ax, **kwargs)
[ "def", "plot_vyz", "(", "self", ",", "colorbar", "=", "True", ",", "cb_orientation", "=", "'vertical'", ",", "cb_label", "=", "None", ",", "ax", "=", "None", ",", "show", "=", "True", ",", "fname", "=", "None", ",", "*", "*", "kwargs", ")", ":", "i...
Plot the Vyz component of the tensor. Usage ----- x.plot_vyz([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = '$V_{yz}$' Text label for the colorbar. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods.
[ "Plot", "the", "Vyz", "component", "of", "the", "tensor", "." ]
python
train
FutunnOpen/futuquant
futuquant/examples/learn/get_realtime_data.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/examples/learn/get_realtime_data.py#L174-L183
def _example_plate_subplate(quote_ctx): """ 获取板块集合下的子板块列表,输出 市场,板块分类,板块代码,名称,ID """ ret_status, ret_data = quote_ctx.get_plate_list(ft.Market.SZ, ft.Plate.ALL) if ret_status != ft.RET_OK: print(ret_data) exit() print("plate_subplate") print(ret_data)
[ "def", "_example_plate_subplate", "(", "quote_ctx", ")", ":", "ret_status", ",", "ret_data", "=", "quote_ctx", ".", "get_plate_list", "(", "ft", ".", "Market", ".", "SZ", ",", "ft", ".", "Plate", ".", "ALL", ")", "if", "ret_status", "!=", "ft", ".", "RET...
获取板块集合下的子板块列表,输出 市场,板块分类,板块代码,名称,ID
[ "获取板块集合下的子板块列表,输出", "市场,板块分类", "板块代码,名称,ID" ]
python
train
Varkal/chuda
chuda/shell.py
https://github.com/Varkal/chuda/blob/0d93b716dede35231c21be97bcc19a656655983f/chuda/shell.py#L114-L129
def send(self, value): """ Send text to stdin. Can only be used on non blocking commands Args: value (str): the text to write on stdin Raises: TypeError: If command is blocking Returns: ShellCommand: return this ShellCommand instance for chaining """ if not self.block and self._stdin is not None: self.writer.write("{}\n".format(value)) return self else: raise TypeError(NON_BLOCKING_ERROR_MESSAGE)
[ "def", "send", "(", "self", ",", "value", ")", ":", "if", "not", "self", ".", "block", "and", "self", ".", "_stdin", "is", "not", "None", ":", "self", ".", "writer", ".", "write", "(", "\"{}\\n\"", ".", "format", "(", "value", ")", ")", "return", ...
Send text to stdin. Can only be used on non blocking commands Args: value (str): the text to write on stdin Raises: TypeError: If command is blocking Returns: ShellCommand: return this ShellCommand instance for chaining
[ "Send", "text", "to", "stdin", ".", "Can", "only", "be", "used", "on", "non", "blocking", "commands" ]
python
train
lreis2415/PyGeoC
pygeoc/postTauDEM.py
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/postTauDEM.py#L111-L132
def output_compressed_dinf(dinfflowang, compdinffile, weightfile): """Output compressed Dinf flow direction and weight to raster file Args: dinfflowang: Dinf flow direction raster file compdinffile: Compressed D8 flow code weightfile: The correspond weight """ dinf_r = RasterUtilClass.read_raster(dinfflowang) data = dinf_r.data xsize = dinf_r.nCols ysize = dinf_r.nRows nodata_value = dinf_r.noDataValue cal_dir_code = frompyfunc(DinfUtil.compress_dinf, 2, 3) updated_angle, dir_code, weight = cal_dir_code(data, nodata_value) RasterUtilClass.write_gtiff_file(dinfflowang, ysize, xsize, updated_angle, dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Float32) RasterUtilClass.write_gtiff_file(compdinffile, ysize, xsize, dir_code, dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Int16) RasterUtilClass.write_gtiff_file(weightfile, ysize, xsize, weight, dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Float32)
[ "def", "output_compressed_dinf", "(", "dinfflowang", ",", "compdinffile", ",", "weightfile", ")", ":", "dinf_r", "=", "RasterUtilClass", ".", "read_raster", "(", "dinfflowang", ")", "data", "=", "dinf_r", ".", "data", "xsize", "=", "dinf_r", ".", "nCols", "ysi...
Output compressed Dinf flow direction and weight to raster file Args: dinfflowang: Dinf flow direction raster file compdinffile: Compressed D8 flow code weightfile: The correspond weight
[ "Output", "compressed", "Dinf", "flow", "direction", "and", "weight", "to", "raster", "file", "Args", ":", "dinfflowang", ":", "Dinf", "flow", "direction", "raster", "file", "compdinffile", ":", "Compressed", "D8", "flow", "code", "weightfile", ":", "The", "co...
python
train
4Catalyzer/flask-resty
flask_resty/spec/declaration.py
https://github.com/4Catalyzer/flask-resty/blob/a8b6502a799c270ca9ce41c6d8b7297713942097/flask_resty/spec/declaration.py#L85-L94
def get_marshmallow_schema_name(self, plugin, schema): """Get the schema name. If the schema doesn't exist, create it. """ try: return plugin.openapi.refs[schema] except KeyError: plugin.spec.definition(schema.__name__, schema=schema) return schema.__name__
[ "def", "get_marshmallow_schema_name", "(", "self", ",", "plugin", ",", "schema", ")", ":", "try", ":", "return", "plugin", ".", "openapi", ".", "refs", "[", "schema", "]", "except", "KeyError", ":", "plugin", ".", "spec", ".", "definition", "(", "schema", ...
Get the schema name. If the schema doesn't exist, create it.
[ "Get", "the", "schema", "name", "." ]
python
train
ray-project/ray
python/ray/tune/trial.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/trial.py#L346-L365
def init_logger(self): """Init logger.""" if not self.result_logger: if not os.path.exists(self.local_dir): os.makedirs(self.local_dir) if not self.logdir: self.logdir = tempfile.mkdtemp( prefix="{}_{}".format( str(self)[:MAX_LEN_IDENTIFIER], date_str()), dir=self.local_dir) elif not os.path.exists(self.logdir): os.makedirs(self.logdir) self.result_logger = UnifiedLogger( self.config, self.logdir, upload_uri=self.upload_dir, loggers=self.loggers, sync_function=self.sync_function)
[ "def", "init_logger", "(", "self", ")", ":", "if", "not", "self", ".", "result_logger", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "local_dir", ")", ":", "os", ".", "makedirs", "(", "self", ".", "local_dir", ")", "if", "n...
Init logger.
[ "Init", "logger", "." ]
python
train
mrstephenneal/mysql-toolkit
mysql/toolkit/datatypes/text.py
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/datatypes/text.py#L10-L16
def is_varchar(self): """Determine if a data record is of the type VARCHAR.""" dt = DATA_TYPES['varchar'] if type(self.data) is dt['type'] and len(self.data) < dt['max']: self.type = 'VARCHAR' self.len = len(self.data) return True
[ "def", "is_varchar", "(", "self", ")", ":", "dt", "=", "DATA_TYPES", "[", "'varchar'", "]", "if", "type", "(", "self", ".", "data", ")", "is", "dt", "[", "'type'", "]", "and", "len", "(", "self", ".", "data", ")", "<", "dt", "[", "'max'", "]", ...
Determine if a data record is of the type VARCHAR.
[ "Determine", "if", "a", "data", "record", "is", "of", "the", "type", "VARCHAR", "." ]
python
train
har07/PySastrawi
src/Sastrawi/Stemmer/Context/Context.py
https://github.com/har07/PySastrawi/blob/01afc81c579bde14dcb41c33686b26af8afab121/src/Sastrawi/Stemmer/Context/Context.py#L33-L43
def execute(self): """Execute stemming process; the result can be retrieved with result""" #step 1 - 5 self.start_stemming_process() #step 6 if self.dictionary.contains(self.current_word): self.result = self.current_word else: self.result = self.original_word
[ "def", "execute", "(", "self", ")", ":", "#step 1 - 5", "self", ".", "start_stemming_process", "(", ")", "#step 6", "if", "self", ".", "dictionary", ".", "contains", "(", "self", ".", "current_word", ")", ":", "self", ".", "result", "=", "self", ".", "cu...
Execute stemming process; the result can be retrieved with result
[ "Execute", "stemming", "process", ";", "the", "result", "can", "be", "retrieved", "with", "result" ]
python
train
collectiveacuity/labPack
labpack/databases/sql.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/databases/sql.py#L268-L308
def _extract_columns(self, table_name): ''' a method to extract the column properties of an existing table ''' import re from sqlalchemy import MetaData, VARCHAR, INTEGER, BLOB, BOOLEAN, FLOAT from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION, BIT, BYTEA # retrieve list of tables metadata_object = MetaData() table_list = self.engine.table_names() # determine columns prior_columns = {} if table_name in table_list: metadata_object.reflect(self.engine) existing_table = metadata_object.tables[table_name] for column in existing_table.columns: column_type = None column_length = None if column.type.__class__ == FLOAT().__class__: column_type = 'float' elif column.type.__class__ == DOUBLE_PRECISION().__class__: # Postgres column_type = 'float' elif column.type.__class__ == INTEGER().__class__: column_type = 'integer' elif column.type.__class__ == VARCHAR().__class__: column_length = getattr(column.type, 'length', None) if column_length == 1: if column.primary_key: column_length = None column_type = 'string' elif column.type.__class__ == BLOB().__class__: column_type = 'list' elif column.type.__class__ in (BIT().__class__, BYTEA().__class__): column_type = 'list' elif column.type.__class__ == BOOLEAN().__class__: column_type = 'boolean' prior_columns[column.key] = (column.key, column_type, '', column_length) return prior_columns
[ "def", "_extract_columns", "(", "self", ",", "table_name", ")", ":", "import", "re", "from", "sqlalchemy", "import", "MetaData", ",", "VARCHAR", ",", "INTEGER", ",", "BLOB", ",", "BOOLEAN", ",", "FLOAT", "from", "sqlalchemy", ".", "dialects", ".", "postgresq...
a method to extract the column properties of an existing table
[ "a", "method", "to", "extract", "the", "column", "properties", "of", "an", "existing", "table" ]
python
train
apache/spark
python/pyspark/conf.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/conf.py#L191-L196
def getAll(self): """Get all values as a list of key-value pairs.""" if self._jconf is not None: return [(elem._1(), elem._2()) for elem in self._jconf.getAll()] else: return self._conf.items()
[ "def", "getAll", "(", "self", ")", ":", "if", "self", ".", "_jconf", "is", "not", "None", ":", "return", "[", "(", "elem", ".", "_1", "(", ")", ",", "elem", ".", "_2", "(", ")", ")", "for", "elem", "in", "self", ".", "_jconf", ".", "getAll", ...
Get all values as a list of key-value pairs.
[ "Get", "all", "values", "as", "a", "list", "of", "key", "-", "value", "pairs", "." ]
python
train
tensorflow/tensorboard
tensorboard/plugins/debugger/debugger_plugin.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/debugger_plugin.py#L449-L488
def _process_health_pill_value(self, wall_time, step, device_name, output_slot, node_name, tensor_proto, node_name_set=None): """Creates a HealthPillEvent containing various properties of a health pill. Args: wall_time: The wall time in seconds. step: The session run step of the event. device_name: The name of the node's device. output_slot: The numeric output slot. node_name: The name of the node (without the output slot). tensor_proto: A tensor proto of data. node_name_set: An optional set of node names that are relevant. If not provided, no filtering by relevance occurs. Returns: An event_accumulator.HealthPillEvent. Or None if one could not be created. """ if node_name_set and node_name not in node_name_set: # This event is not relevant. return None # Since we seek health pills for a specific step, this function # returns 1 health pill per node per step. The wall time is the # seconds since the epoch. elements = list(tensor_util.make_ndarray(tensor_proto)) return HealthPillEvent( wall_time=wall_time, step=step, device_name=device_name, output_slot=output_slot, node_name=node_name, dtype=repr(tf.as_dtype(elements[12])), shape=elements[14:], value=elements)
[ "def", "_process_health_pill_value", "(", "self", ",", "wall_time", ",", "step", ",", "device_name", ",", "output_slot", ",", "node_name", ",", "tensor_proto", ",", "node_name_set", "=", "None", ")", ":", "if", "node_name_set", "and", "node_name", "not", "in", ...
Creates a HealthPillEvent containing various properties of a health pill. Args: wall_time: The wall time in seconds. step: The session run step of the event. device_name: The name of the node's device. output_slot: The numeric output slot. node_name: The name of the node (without the output slot). tensor_proto: A tensor proto of data. node_name_set: An optional set of node names that are relevant. If not provided, no filtering by relevance occurs. Returns: An event_accumulator.HealthPillEvent. Or None if one could not be created.
[ "Creates", "a", "HealthPillEvent", "containing", "various", "properties", "of", "a", "health", "pill", "." ]
python
train
jobovy/galpy
galpy/orbit/OrbitTop.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/OrbitTop.py#L278-L298
def z(self,*args,**kwargs): """ NAME: z PURPOSE: return vertical height INPUT: t - (optional) time at which to get the vertical height ro= (Object-wide default) physical scale for distances to use to convert use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: z(t) HISTORY: 2010-09-21 - Written - Bovy (NYU) """ if len(self.vxvv) < 5: raise AttributeError("linear and planar orbits do not have z()") thiso= self(*args,**kwargs) onet= (len(thiso.shape) == 1) if onet: return thiso[3] else: return thiso[3,:]
[ "def", "z", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "self", ".", "vxvv", ")", "<", "5", ":", "raise", "AttributeError", "(", "\"linear and planar orbits do not have z()\"", ")", "thiso", "=", "self", "(", "*"...
NAME: z PURPOSE: return vertical height INPUT: t - (optional) time at which to get the vertical height ro= (Object-wide default) physical scale for distances to use to convert use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: z(t) HISTORY: 2010-09-21 - Written - Bovy (NYU)
[ "NAME", ":", "z", "PURPOSE", ":", "return", "vertical", "height", "INPUT", ":", "t", "-", "(", "optional", ")", "time", "at", "which", "to", "get", "the", "vertical", "height", "ro", "=", "(", "Object", "-", "wide", "default", ")", "physical", "scale",...
python
train
yyuu/botornado
boto/manage/server.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/manage/server.py#L270-L348
def create(cls, config_file=None, logical_volume = None, cfg = None, **params): """ Create a new instance based on the specified configuration file or the specified configuration and the passed in parameters. If the config_file argument is not None, the configuration is read from there. Otherwise, the cfg argument is used. The config file may include other config files with a #import reference. The included config files must reside in the same directory as the specified file. The logical_volume argument, if supplied, will be used to get the current physical volume ID and use that as an override of the value specified in the config file. This may be useful for debugging purposes when you want to debug with a production config file but a test Volume. The dictionary argument may be used to override any EC2 configuration values in the config file. """ if config_file: cfg = Config(path=config_file) if cfg.has_section('EC2'): # include any EC2 configuration values that aren't specified in params: for option in cfg.options('EC2'): if option not in params: params[option] = cfg.get('EC2', option) getter = CommandLineGetter() getter.get(cls, params) region = params.get('region') ec2 = region.connect() cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key) ami = params.get('ami') kp = params.get('keypair') group = params.get('group') zone = params.get('zone') # deal with possibly passed in logical volume: if logical_volume != None: cfg.set('EBS', 'logical_volume_name', logical_volume.name) cfg_fp = StringIO.StringIO() cfg.write(cfg_fp) # deal with the possibility that zone and/or keypair are strings read from the config file: if isinstance(zone, Zone): zone = zone.name if isinstance(kp, KeyPair): kp = kp.name reservation = ami.run(min_count=1, max_count=params.get('quantity', 1), key_name=kp, security_groups=[group], instance_type=params.get('instance_type'), placement = zone, user_data = cfg_fp.getvalue()) l = [] i = 0 elastic_ip = params.get('elastic_ip') instances = reservation.instances if elastic_ip != None and instances.__len__() > 0: instance = instances[0] print 'Waiting for instance to start so we can set its elastic IP address...' # Sometimes we get a message from ec2 that says that the instance does not exist. # Hopefully the following delay will giv eec2 enough time to get to a stable state: time.sleep(5) while instance.update() != 'running': time.sleep(1) instance.use_ip(elastic_ip) print 'set the elastic IP of the first instance to %s' % elastic_ip for instance in instances: s = cls() s.ec2 = ec2 s.name = params.get('name') + '' if i==0 else str(i) s.description = params.get('description') s.region_name = region.name s.instance_id = instance.id if elastic_ip and i == 0: s.elastic_ip = elastic_ip s.put() l.append(s) i += 1 return l
[ "def", "create", "(", "cls", ",", "config_file", "=", "None", ",", "logical_volume", "=", "None", ",", "cfg", "=", "None", ",", "*", "*", "params", ")", ":", "if", "config_file", ":", "cfg", "=", "Config", "(", "path", "=", "config_file", ")", "if", ...
Create a new instance based on the specified configuration file or the specified configuration and the passed in parameters. If the config_file argument is not None, the configuration is read from there. Otherwise, the cfg argument is used. The config file may include other config files with a #import reference. The included config files must reside in the same directory as the specified file. The logical_volume argument, if supplied, will be used to get the current physical volume ID and use that as an override of the value specified in the config file. This may be useful for debugging purposes when you want to debug with a production config file but a test Volume. The dictionary argument may be used to override any EC2 configuration values in the config file.
[ "Create", "a", "new", "instance", "based", "on", "the", "specified", "configuration", "file", "or", "the", "specified", "configuration", "and", "the", "passed", "in", "parameters", ".", "If", "the", "config_file", "argument", "is", "not", "None", "the", "confi...
python
train
mlperf/training
reinforcement/tensorflow/minigo/strategies.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/strategies.py#L175-L188
def pick_move(self): """Picks a move to play, based on MCTS readout statistics. Highest N is most robust indicator. In the early stage of the game, pick a move weighted by visit count; later on, pick the absolute max.""" if self.root.position.n >= self.temp_threshold: fcoord = self.root.best_child() else: cdf = self.root.children_as_pi(squash=True).cumsum() cdf /= cdf[-2] # Prevents passing via softpick. selection = random.random() fcoord = cdf.searchsorted(selection) assert self.root.child_N[fcoord] != 0 return coords.from_flat(fcoord)
[ "def", "pick_move", "(", "self", ")", ":", "if", "self", ".", "root", ".", "position", ".", "n", ">=", "self", ".", "temp_threshold", ":", "fcoord", "=", "self", ".", "root", ".", "best_child", "(", ")", "else", ":", "cdf", "=", "self", ".", "root"...
Picks a move to play, based on MCTS readout statistics. Highest N is most robust indicator. In the early stage of the game, pick a move weighted by visit count; later on, pick the absolute max.
[ "Picks", "a", "move", "to", "play", "based", "on", "MCTS", "readout", "statistics", "." ]
python
train
sckott/pygbif
pygbif/occurrences/get.py
https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/get.py#L3-L20
def get(key, **kwargs): ''' Gets details for a single, interpreted occurrence :param key: [int] A GBIF occurrence key :return: A dictionary, of results Usage:: from pygbif import occurrences occurrences.get(key = 1258202889) occurrences.get(key = 1227768771) occurrences.get(key = 1227769518) ''' url = gbif_baseurl + 'occurrence/' + str(key) out = gbif_GET(url, {}, **kwargs) return out
[ "def", "get", "(", "key", ",", "*", "*", "kwargs", ")", ":", "url", "=", "gbif_baseurl", "+", "'occurrence/'", "+", "str", "(", "key", ")", "out", "=", "gbif_GET", "(", "url", ",", "{", "}", ",", "*", "*", "kwargs", ")", "return", "out" ]
Gets details for a single, interpreted occurrence :param key: [int] A GBIF occurrence key :return: A dictionary, of results Usage:: from pygbif import occurrences occurrences.get(key = 1258202889) occurrences.get(key = 1227768771) occurrences.get(key = 1227769518)
[ "Gets", "details", "for", "a", "single", "interpreted", "occurrence" ]
python
train
sublee/zeronimo
zeronimo/core.py
https://github.com/sublee/zeronimo/blob/b216638232932718d2cbc5eabd870c8f5b5e83fb/zeronimo/core.py#L675-L694
def dispatch_reply(self, reply, value): """Dispatches the reply to the proper queue.""" method = reply.method call_id = reply.call_id task_id = reply.task_id if method & ACK: try: result_queue = self.result_queues[call_id] except KeyError: raise KeyError('already established or unprepared call') if method == ACCEPT: worker_info = value result = RemoteResult(self, call_id, task_id, worker_info) self.results[call_id][task_id] = result result_queue.put_nowait(result) elif method == REJECT: result_queue.put_nowait(None) else: result = self.results[call_id][task_id] result.set_reply(reply.method, value)
[ "def", "dispatch_reply", "(", "self", ",", "reply", ",", "value", ")", ":", "method", "=", "reply", ".", "method", "call_id", "=", "reply", ".", "call_id", "task_id", "=", "reply", ".", "task_id", "if", "method", "&", "ACK", ":", "try", ":", "result_qu...
Dispatches the reply to the proper queue.
[ "Dispatches", "the", "reply", "to", "the", "proper", "queue", "." ]
python
test
klen/muffin-session
muffin_session.py
https://github.com/klen/muffin-session/blob/f1d14d12b7d09d8cc40be14b0dfa0b1e2f4ae8e9/muffin_session.py#L80-L85
async def save(self, request, response): """Save session to response cookies.""" if isinstance(response, Response) and SESSION_KEY in request and not response.prepared: session = request[SESSION_KEY] if session.save(response.set_cookie): self.app.logger.debug('Session saved: %s', session)
[ "async", "def", "save", "(", "self", ",", "request", ",", "response", ")", ":", "if", "isinstance", "(", "response", ",", "Response", ")", "and", "SESSION_KEY", "in", "request", "and", "not", "response", ".", "prepared", ":", "session", "=", "request", "...
Save session to response cookies.
[ "Save", "session", "to", "response", "cookies", "." ]
python
train
coursera-dl/coursera-dl
coursera/cookies.py
https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/cookies.py#L111-L157
def login(session, username, password, class_name=None): """ Login on coursera.org with the given credentials. This adds the following cookies to the session: sessionid, maestro_login, maestro_login_flag """ logging.debug('Initiating login.') try: session.cookies.clear('.coursera.org') logging.debug('Cleared .coursera.org cookies.') except KeyError: logging.debug('There were no .coursera.org cookies to be cleared.') # Hit class url if class_name is not None: class_url = CLASS_URL.format(class_name=class_name) r = requests.get(class_url, allow_redirects=False) try: r.raise_for_status() except requests.exceptions.HTTPError as e: logging.error(e) raise ClassNotFound(class_name) headers = prepare_auth_headers(session, include_cauth=False) data = { 'email': username, 'password': password, 'webrequest': 'true' } # Auth API V3 r = session.post(AUTH_URL_V3, data=data, headers=headers, allow_redirects=False) try: r.raise_for_status() # Some how the order of cookies parameters are important # for coursera!!! v = session.cookies.pop('CAUTH') session.cookies.set('CAUTH', v) except requests.exceptions.HTTPError as e: raise AuthenticationFailed('Cannot login on coursera.org: %s' % e) logging.info('Logged in on coursera.org.')
[ "def", "login", "(", "session", ",", "username", ",", "password", ",", "class_name", "=", "None", ")", ":", "logging", ".", "debug", "(", "'Initiating login.'", ")", "try", ":", "session", ".", "cookies", ".", "clear", "(", "'.coursera.org'", ")", "logging...
Login on coursera.org with the given credentials. This adds the following cookies to the session: sessionid, maestro_login, maestro_login_flag
[ "Login", "on", "coursera", ".", "org", "with", "the", "given", "credentials", "." ]
python
train
ContextLab/quail
quail/distance.py
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/distance.py#L19-L21
def euclidean(a, b): "Returns euclidean distance between a and b" return np.linalg.norm(np.subtract(a, b))
[ "def", "euclidean", "(", "a", ",", "b", ")", ":", "return", "np", ".", "linalg", ".", "norm", "(", "np", ".", "subtract", "(", "a", ",", "b", ")", ")" ]
Returns euclidean distance between a and b
[ "Returns", "euclidean", "distance", "between", "a", "and", "b" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/area_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/area_attention.py#L47-L75
def _pool_one_shape(features_2d, area_width, area_height, batch_size, width, height, depth, fn=tf.reduce_max, name=None): """Pools for an area in features_2d. Args: features_2d: a Tensor in a shape of [batch_size, height, width, depth]. area_width: the max width allowed for an area. area_height: the max height allowed for an area. batch_size: the batch size. width: the width of the memory. height: the height of the memory. depth: the depth of the features. fn: the TF function for the pooling. name: the op name. Returns: pool_tensor: A Tensor of shape [batch_size, num_areas, depth] """ with tf.name_scope(name, default_name="pool_one_shape"): images = [] for y_shift in range(area_height): image_height = tf.maximum(height - area_height + 1 + y_shift, 0) for x_shift in range(area_width): image_width = tf.maximum(width - area_width + 1 + x_shift, 0) area = features_2d[:, y_shift:image_height, x_shift:image_width, :] flatten_area = tf.reshape(area, [batch_size, -1, depth, 1]) images.append(flatten_area) image_tensor = tf.concat(images, axis=3) max_tensor = fn(image_tensor, axis=3) return max_tensor
[ "def", "_pool_one_shape", "(", "features_2d", ",", "area_width", ",", "area_height", ",", "batch_size", ",", "width", ",", "height", ",", "depth", ",", "fn", "=", "tf", ".", "reduce_max", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "name_scope...
Pools for an area in features_2d. Args: features_2d: a Tensor in a shape of [batch_size, height, width, depth]. area_width: the max width allowed for an area. area_height: the max height allowed for an area. batch_size: the batch size. width: the width of the memory. height: the height of the memory. depth: the depth of the features. fn: the TF function for the pooling. name: the op name. Returns: pool_tensor: A Tensor of shape [batch_size, num_areas, depth]
[ "Pools", "for", "an", "area", "in", "features_2d", "." ]
python
train
petebachant/PXL
pxl/timeseries.py
https://github.com/petebachant/PXL/blob/d7d06cb74422e1ac0154741351fbecea080cfcc0/pxl/timeseries.py#L199-L202
def student_t(degrees_of_freedom, confidence=0.95): """Return Student-t statistic for given DOF and confidence interval.""" return scipy.stats.t.interval(alpha=confidence, df=degrees_of_freedom)[-1]
[ "def", "student_t", "(", "degrees_of_freedom", ",", "confidence", "=", "0.95", ")", ":", "return", "scipy", ".", "stats", ".", "t", ".", "interval", "(", "alpha", "=", "confidence", ",", "df", "=", "degrees_of_freedom", ")", "[", "-", "1", "]" ]
Return Student-t statistic for given DOF and confidence interval.
[ "Return", "Student", "-", "t", "statistic", "for", "given", "DOF", "and", "confidence", "interval", "." ]
python
train
markchil/gptools
gptools/gaussian_process.py
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/gaussian_process.py#L342-L348
def free_param_bounds(self): """Combined free hyperparameter bounds for the kernel, noise kernel and (if present) mean function. """ fpb = CombinedBounds(self.k.free_param_bounds, self.noise_k.free_param_bounds) if self.mu is not None: fpb = CombinedBounds(fpb, self.mu.free_param_bounds) return fpb
[ "def", "free_param_bounds", "(", "self", ")", ":", "fpb", "=", "CombinedBounds", "(", "self", ".", "k", ".", "free_param_bounds", ",", "self", ".", "noise_k", ".", "free_param_bounds", ")", "if", "self", ".", "mu", "is", "not", "None", ":", "fpb", "=", ...
Combined free hyperparameter bounds for the kernel, noise kernel and (if present) mean function.
[ "Combined", "free", "hyperparameter", "bounds", "for", "the", "kernel", "noise", "kernel", "and", "(", "if", "present", ")", "mean", "function", "." ]
python
train
saltstack/salt
salt/utils/docker/translate/helpers.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/docker/translate/helpers.py#L176-L190
def translate_command(val): ''' Input should either be a single string, or a list of strings. This is used for the two args that deal with commands ("command" and "entrypoint"). ''' if isinstance(val, six.string_types): return val elif isinstance(val, list): for idx in range(len(val)): if not isinstance(val[idx], six.string_types): val[idx] = six.text_type(val[idx]) else: # Make sure we have a string val = six.text_type(val) return val
[ "def", "translate_command", "(", "val", ")", ":", "if", "isinstance", "(", "val", ",", "six", ".", "string_types", ")", ":", "return", "val", "elif", "isinstance", "(", "val", ",", "list", ")", ":", "for", "idx", "in", "range", "(", "len", "(", "val"...
Input should either be a single string, or a list of strings. This is used for the two args that deal with commands ("command" and "entrypoint").
[ "Input", "should", "either", "be", "a", "single", "string", "or", "a", "list", "of", "strings", ".", "This", "is", "used", "for", "the", "two", "args", "that", "deal", "with", "commands", "(", "command", "and", "entrypoint", ")", "." ]
python
train
klen/muffin-peewee
muffin_peewee/plugin.py
https://github.com/klen/muffin-peewee/blob/8e893e3ea1dfc82fbcfc6efe784308c8d4e2852e/muffin_peewee/plugin.py#L43-L109
def setup(self, app): # noqa """Initialize the application.""" super().setup(app) # Setup Database self.database.initialize(connect(self.cfg.connection, **self.cfg.connection_params)) # Fix SQLite in-memory database if self.database.database == ':memory:': self.cfg.connection_manual = True if not self.cfg.migrations_enabled: return # Setup migration engine self.router = Router(self.database, migrate_dir=self.cfg.migrations_path) # Register migration commands def pw_migrate(name: str=None, fake: bool=False): """Run application's migrations. :param name: Choose a migration' name :param fake: Run as fake. Update migration history and don't touch the database """ self.router.run(name, fake=fake) self.app.manage.command(pw_migrate) def pw_rollback(name: str=None): """Rollback a migration. :param name: Migration name (actually it always should be a last one) """ if not name: name = self.router.done[-1] self.router.rollback(name) self.app.manage.command(pw_rollback) def pw_create(name: str='auto', auto: bool=False): """Create a migration. :param name: Set name of migration [auto] :param auto: Track changes and setup migrations automatically """ if auto: auto = list(self.models.values()) self.router.create(name, auto) self.app.manage.command(pw_create) def pw_list(): """List migrations.""" self.router.logger.info('Migrations are done:') self.router.logger.info('\n'.join(self.router.done)) self.router.logger.info('') self.router.logger.info('Migrations are undone:') self.router.logger.info('\n'.join(self.router.diff)) self.app.manage.command(pw_list) @self.app.manage.command def pw_merge(): """Merge migrations into one.""" self.router.merge() self.app.manage.command(pw_merge)
[ "def", "setup", "(", "self", ",", "app", ")", ":", "# noqa", "super", "(", ")", ".", "setup", "(", "app", ")", "# Setup Database", "self", ".", "database", ".", "initialize", "(", "connect", "(", "self", ".", "cfg", ".", "connection", ",", "*", "*", ...
Initialize the application.
[ "Initialize", "the", "application", "." ]
python
valid
Netflix-Skunkworks/cloudaux
cloudaux/orchestration/aws/image.py
https://github.com/Netflix-Skunkworks/cloudaux/blob/c4b0870c3ac68b1c69e71d33cf78b6a8bdf437ea/cloudaux/orchestration/aws/image.py#L50-L88
def get_image(image_id, flags=FLAGS.ALL, **conn): """ Orchestrates all the calls required to fully build out an EC2 Image (AMI, AKI, ARI) { "Architecture": "x86_64", "Arn": "arn:aws:ec2:us-east-1::image/ami-11111111", "BlockDeviceMappings": [], "CreationDate": "2013-07-11T16:04:06.000Z", "Description": "...", "Hypervisor": "xen", "ImageId": "ami-11111111", "ImageLocation": "111111111111/...", "ImageType": "machine", "KernelId": "aki-88888888", "LaunchPermissions": [], "Name": "...", "OwnerId": "111111111111", "ProductCodes": [], "Public": false, "RamdiskId": {}, "RootDeviceName": "/dev/sda1", "RootDeviceType": "ebs", "SriovNetSupport": "simple", "State": "available", "Tags": [], "VirtualizationType": "hvm", "_version": 1 } :param image_id: str ami id :param flags: By default, set to ALL fields :param conn: dict containing enough information to make a connection to the desired account. Must at least have 'assume_role' key. :return: dict containing a fully built out image. """ image = dict(ImageId=image_id) conn['region'] = conn.get('region', 'us-east-1') return registry.build_out(flags, image, **conn)
[ "def", "get_image", "(", "image_id", ",", "flags", "=", "FLAGS", ".", "ALL", ",", "*", "*", "conn", ")", ":", "image", "=", "dict", "(", "ImageId", "=", "image_id", ")", "conn", "[", "'region'", "]", "=", "conn", ".", "get", "(", "'region'", ",", ...
Orchestrates all the calls required to fully build out an EC2 Image (AMI, AKI, ARI) { "Architecture": "x86_64", "Arn": "arn:aws:ec2:us-east-1::image/ami-11111111", "BlockDeviceMappings": [], "CreationDate": "2013-07-11T16:04:06.000Z", "Description": "...", "Hypervisor": "xen", "ImageId": "ami-11111111", "ImageLocation": "111111111111/...", "ImageType": "machine", "KernelId": "aki-88888888", "LaunchPermissions": [], "Name": "...", "OwnerId": "111111111111", "ProductCodes": [], "Public": false, "RamdiskId": {}, "RootDeviceName": "/dev/sda1", "RootDeviceType": "ebs", "SriovNetSupport": "simple", "State": "available", "Tags": [], "VirtualizationType": "hvm", "_version": 1 } :param image_id: str ami id :param flags: By default, set to ALL fields :param conn: dict containing enough information to make a connection to the desired account. Must at least have 'assume_role' key. :return: dict containing a fully built out image.
[ "Orchestrates", "all", "the", "calls", "required", "to", "fully", "build", "out", "an", "EC2", "Image", "(", "AMI", "AKI", "ARI", ")" ]
python
valid
qacafe/cdrouter.py
cdrouter/configs.py
https://github.com/qacafe/cdrouter.py/blob/aacf2c6ab0b987250f7b1892f4bba14bb2b7dbe5/cdrouter/configs.py#L357-L364
def bulk_copy(self, ids): """Bulk copy a set of configs. :param ids: Int list of config IDs. :return: :class:`configs.Config <configs.Config>` list """ schema = self.GET_SCHEMA return self.service.bulk_copy(self.base, self.RESOURCE, ids, schema)
[ "def", "bulk_copy", "(", "self", ",", "ids", ")", ":", "schema", "=", "self", ".", "GET_SCHEMA", "return", "self", ".", "service", ".", "bulk_copy", "(", "self", ".", "base", ",", "self", ".", "RESOURCE", ",", "ids", ",", "schema", ")" ]
Bulk copy a set of configs. :param ids: Int list of config IDs. :return: :class:`configs.Config <configs.Config>` list
[ "Bulk", "copy", "a", "set", "of", "configs", "." ]
python
train
bitprophet/releases
releases/line_manager.py
https://github.com/bitprophet/releases/blob/97a763e41bbe7374106a1c648b89346a0d935429/releases/line_manager.py#L59-L75
def has_stable_releases(self): """ Returns whether stable (post-0.x) releases seem to exist. """ nonzeroes = self.stable_families # Nothing but 0.x releases -> yup we're prehistory if not nonzeroes: return False # Presumably, if there's >1 major family besides 0.x, we're at least # one release into the 1.0 (or w/e) line. if len(nonzeroes) > 1: return True # If there's only one, we may still be in the space before its N.0.0 as # well; we can check by testing for existence of bugfix buckets return any( x for x in self[nonzeroes[0]] if not x.startswith('unreleased') )
[ "def", "has_stable_releases", "(", "self", ")", ":", "nonzeroes", "=", "self", ".", "stable_families", "# Nothing but 0.x releases -> yup we're prehistory", "if", "not", "nonzeroes", ":", "return", "False", "# Presumably, if there's >1 major family besides 0.x, we're at least", ...
Returns whether stable (post-0.x) releases seem to exist.
[ "Returns", "whether", "stable", "(", "post", "-", "0", ".", "x", ")", "releases", "seem", "to", "exist", "." ]
python
train
reorx/torext
torext/handlers/oauth.py
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/oauth.py#L511-L569
def get_authenticated_user(self, redirect_uri, callback, scope=None, **args): """ class RenrenHandler(tornado.web.RequestHandler, RenrenGraphMixin): @tornado.web.asynchronous @gen.engine def get(self): self.get_authenticated_user( callback=(yield gen.Callback('key')), redirect_uri=url) user = yield gen.Wait('key') if not user: raise web.HTTPError(500, "Renren auth failed") # do something else self.finish() """ code = self.get_argument('code', None) if not code: self.authorize_redirect(redirect_uri, scope=scope, **args) return self.get_access_token( code, callback=(yield gen.Callback('_RenrenGraphMixin.get_authenticated_user')), redirect_uri=redirect_uri) response = yield gen.Wait('_RenrenGraphMixin.get_authenticated_user') if not response: callback(None) return try: user = json_decode(response.body) except: logging.warning("Error response %s fetching %s", response.body, response.request.url) callback(None) return if 'error' in user: logging.warning("Error response %s fetching %s", user['error_description'], response.request.url) callback(None) return #{{{ get session key self.renren_request('renren_api/session_key', user['access_token'], callback=(yield gen.Callback('_RenrenGraphMixin._session_key'))) response = yield gen.Wait('_RenrenGraphMixin._session_key') if response.error and not response.body: logging.warning("Error response %s fetching %s", response.error, response.request.url) elif response.error: logging.warning("Error response %s fetching %s: %s", response.error, response.request.url, response.body) else: try: user['session'] = json_decode(response.body) except: pass #}}} #TODO delete when renren graph api released callback(user) return
[ "def", "get_authenticated_user", "(", "self", ",", "redirect_uri", ",", "callback", ",", "scope", "=", "None", ",", "*", "*", "args", ")", ":", "code", "=", "self", ".", "get_argument", "(", "'code'", ",", "None", ")", "if", "not", "code", ":", "self",...
class RenrenHandler(tornado.web.RequestHandler, RenrenGraphMixin): @tornado.web.asynchronous @gen.engine def get(self): self.get_authenticated_user( callback=(yield gen.Callback('key')), redirect_uri=url) user = yield gen.Wait('key') if not user: raise web.HTTPError(500, "Renren auth failed") # do something else self.finish()
[ "class", "RenrenHandler", "(", "tornado", ".", "web", ".", "RequestHandler", "RenrenGraphMixin", ")", ":" ]
python
train
cackharot/suds-py3
suds/wsdl.py
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/wsdl.py#L849-L860
def setlocation(self, url, names=None): """ Override the invocation location (url) for service method. @param url: A url location. @type url: A url. @param names: A list of method names. None=ALL @type names: [str,..] """ for p in self.ports: for m in p.methods.values(): if names is None or m.name in names: m.location = url
[ "def", "setlocation", "(", "self", ",", "url", ",", "names", "=", "None", ")", ":", "for", "p", "in", "self", ".", "ports", ":", "for", "m", "in", "p", ".", "methods", ".", "values", "(", ")", ":", "if", "names", "is", "None", "or", "m", ".", ...
Override the invocation location (url) for service method. @param url: A url location. @type url: A url. @param names: A list of method names. None=ALL @type names: [str,..]
[ "Override", "the", "invocation", "location", "(", "url", ")", "for", "service", "method", "." ]
python
train
pyamg/pyamg
pyamg/multilevel.py
https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/multilevel.py#L260-L269
def grid_complexity(self): """Grid complexity of this multigrid hierarchy. Defined as: Number of unknowns on all levels / Number of unknowns on the finest level """ return sum([level.A.shape[0] for level in self.levels]) /\ float(self.levels[0].A.shape[0])
[ "def", "grid_complexity", "(", "self", ")", ":", "return", "sum", "(", "[", "level", ".", "A", ".", "shape", "[", "0", "]", "for", "level", "in", "self", ".", "levels", "]", ")", "/", "float", "(", "self", ".", "levels", "[", "0", "]", ".", "A"...
Grid complexity of this multigrid hierarchy. Defined as: Number of unknowns on all levels / Number of unknowns on the finest level
[ "Grid", "complexity", "of", "this", "multigrid", "hierarchy", "." ]
python
train
TheEnigmaBlade/praw-script-oauth
praw_script_oauth/praw_script_oauth.py
https://github.com/TheEnigmaBlade/praw-script-oauth/blob/8ff53e226facecd49b6bd2a5b235eaba53dc76b6/praw_script_oauth/praw_script_oauth.py#L43-L66
def get_oauth_token(oauth_key, oauth_secret, username, password, useragent=_DEFAULT_USERAGENT, script_key=None): """ Gets an OAuth token from Reddit or returns a valid locally stored token. Because the retrieved token is stored on the file system (script_key is used to distinguish between files), this function is safe to call across multiple instances or runs. The token is renewed after one hour. This function can be used without PRAW. Note: Only script-based oauth is supported. :param oauth_key: Reddit oauth key :param oauth_secret: Reddit oauth secret :param username: Reddit username :param password: Reddit password :param useragent: Connection useragent (this should be changed, otherwise you'll be heavily rate limited) :param script_key: Key used to distinguish between local token files :return: An OAuth token if one could be retrieved, otherwise None. """ token = _get_local_token(script_key, username) if token is None: token_time = _time_ms() token = _request_oauth_token(oauth_key, oauth_secret, username, password, useragent=useragent) write_config(token, token_time, _get_config_file(script_key, username)) return token
[ "def", "get_oauth_token", "(", "oauth_key", ",", "oauth_secret", ",", "username", ",", "password", ",", "useragent", "=", "_DEFAULT_USERAGENT", ",", "script_key", "=", "None", ")", ":", "token", "=", "_get_local_token", "(", "script_key", ",", "username", ")", ...
Gets an OAuth token from Reddit or returns a valid locally stored token. Because the retrieved token is stored on the file system (script_key is used to distinguish between files), this function is safe to call across multiple instances or runs. The token is renewed after one hour. This function can be used without PRAW. Note: Only script-based oauth is supported. :param oauth_key: Reddit oauth key :param oauth_secret: Reddit oauth secret :param username: Reddit username :param password: Reddit password :param useragent: Connection useragent (this should be changed, otherwise you'll be heavily rate limited) :param script_key: Key used to distinguish between local token files :return: An OAuth token if one could be retrieved, otherwise None.
[ "Gets", "an", "OAuth", "token", "from", "Reddit", "or", "returns", "a", "valid", "locally", "stored", "token", ".", "Because", "the", "retrieved", "token", "is", "stored", "on", "the", "file", "system", "(", "script_key", "is", "used", "to", "distinguish", ...
python
train
tensorlayer/tensorlayer
tensorlayer/layers/recurrent.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/layers/recurrent.py#L596-L648
def _conv_linear(args, filter_size, num_features, bias, bias_start=0.0, scope=None): """convolution: Parameters ---------- args : tensor 4D Tensor or a list of 4D, batch x n, Tensors. filter_size : tuple of int Filter height and width. num_features : int Nnumber of features. bias_start : float Starting value to initialize the bias; 0 by default. scope : VariableScope For the created subgraph; defaults to "Linear". Returns -------- - A 4D Tensor with shape [batch h w num_features] Raises ------- - ValueError : if some of the arguments has unspecified or wrong shape. """ # Calculate the total size of arguments on dimension 1. total_arg_size_depth = 0 shapes = [a.get_shape().as_list() for a in args] for shape in shapes: if len(shape) != 4: raise ValueError("Linear is expecting 4D arguments: %s" % str(shapes)) if not shape[3]: raise ValueError("Linear expects shape[4] of arguments: %s" % str(shapes)) else: total_arg_size_depth += shape[3] dtype = [a.dtype for a in args][0] # Now the computation. with tf.variable_scope(scope or "Conv"): matrix = tf.get_variable( "Matrix", [filter_size[0], filter_size[1], total_arg_size_depth, num_features], dtype=dtype ) if len(args) == 1: res = tf.nn.conv2d(args[0], matrix, strides=[1, 1, 1, 1], padding='SAME') else: res = tf.nn.conv2d(tf.concat(args, 3), matrix, strides=[1, 1, 1, 1], padding='SAME') if not bias: return res bias_term = tf.get_variable( "Bias", [num_features], dtype=dtype, initializer=tf.constant_initializer(bias_start, dtype=dtype) ) return res + bias_term
[ "def", "_conv_linear", "(", "args", ",", "filter_size", ",", "num_features", ",", "bias", ",", "bias_start", "=", "0.0", ",", "scope", "=", "None", ")", ":", "# Calculate the total size of arguments on dimension 1.", "total_arg_size_depth", "=", "0", "shapes", "=", ...
convolution: Parameters ---------- args : tensor 4D Tensor or a list of 4D, batch x n, Tensors. filter_size : tuple of int Filter height and width. num_features : int Nnumber of features. bias_start : float Starting value to initialize the bias; 0 by default. scope : VariableScope For the created subgraph; defaults to "Linear". Returns -------- - A 4D Tensor with shape [batch h w num_features] Raises ------- - ValueError : if some of the arguments has unspecified or wrong shape.
[ "convolution", ":" ]
python
valid
django-extensions/django-extensions
django_extensions/management/mysql.py
https://github.com/django-extensions/django-extensions/blob/7e0bef97ea6cb7f9eea5e2528e3a985a83a7b9b8/django_extensions/management/mysql.py#L5-L43
def parse_mysql_cnf(dbinfo): """ Attempt to parse mysql database config file for connection settings. Ideally we would hook into django's code to do this, but read_default_file is handled by the mysql C libs so we have to emulate the behaviour Settings that are missing will return '' returns (user, password, database_name, database_host, database_port) """ read_default_file = dbinfo.get('OPTIONS', {}).get('read_default_file') if read_default_file: config = configparser.RawConfigParser({ 'user': '', 'password': '', 'database': '', 'host': '', 'port': '', 'socket': '', }) import os config.read(os.path.expanduser(read_default_file)) try: user = config.get('client', 'user') password = config.get('client', 'password') database_name = config.get('client', 'database') database_host = config.get('client', 'host') database_port = config.get('client', 'port') socket = config.get('client', 'socket') if database_host == 'localhost' and socket: # mysql actually uses a socket if host is localhost database_host = socket return user, password, database_name, database_host, database_port except configparser.NoSectionError: pass return '', '', '', '', ''
[ "def", "parse_mysql_cnf", "(", "dbinfo", ")", ":", "read_default_file", "=", "dbinfo", ".", "get", "(", "'OPTIONS'", ",", "{", "}", ")", ".", "get", "(", "'read_default_file'", ")", "if", "read_default_file", ":", "config", "=", "configparser", ".", "RawConf...
Attempt to parse mysql database config file for connection settings. Ideally we would hook into django's code to do this, but read_default_file is handled by the mysql C libs so we have to emulate the behaviour Settings that are missing will return '' returns (user, password, database_name, database_host, database_port)
[ "Attempt", "to", "parse", "mysql", "database", "config", "file", "for", "connection", "settings", ".", "Ideally", "we", "would", "hook", "into", "django", "s", "code", "to", "do", "this", "but", "read_default_file", "is", "handled", "by", "the", "mysql", "C"...
python
train
skyfielders/python-skyfield
skyfield/relativity.py
https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/relativity.py#L23-L97
def add_deflection(position, observer, ephemeris, t, include_earth_deflection, count=3): """Update `position` for how solar system masses will deflect its light. Given the ICRS `position` [x,y,z] of an object (au) that is being viewed from the `observer` also expressed as [x,y,z], and given an ephemeris that can be used to determine solar system body positions, and given the time `t` and Boolean `apply_earth` indicating whether to worry about the effect of Earth's mass, and a `count` of how many major solar system bodies to worry about, this function updates `position` in-place to show how the masses in the solar system will deflect its image. """ # Compute light-time to observed object. tlt = length_of(position) / C_AUDAY # Cycle through gravitating bodies. jd_tdb = t.tdb ts = t.ts for name in deflectors[:count]: try: deflector = ephemeris[name] except KeyError: deflector = ephemeris[name + ' barycenter'] # Get position of gravitating body wrt ss barycenter at time 't_tdb'. bposition = deflector.at(ts.tdb(jd=jd_tdb)).position.au # TODO # Get position of gravitating body wrt observer at time 'jd_tdb'. gpv = bposition - observer # Compute light-time from point on incoming light ray that is closest # to gravitating body. dlt = light_time_difference(position, gpv) # Get position of gravitating body wrt ss barycenter at time when # incoming photons were closest to it. tclose = jd_tdb # if dlt > 0.0: # tclose = jd - dlt tclose = where(dlt > 0.0, jd_tdb - dlt, tclose) tclose = where(tlt < dlt, jd_tdb - tlt, tclose) # if tlt < dlt: # tclose = jd - tlt bposition = deflector.at(ts.tdb(jd=tclose)).position.au # TODO rmass = rmasses[name] _add_deflection(position, observer, bposition, rmass) # If observer is not at geocenter, add in deflection due to Earth. if include_earth_deflection.any(): deflector = ephemeris['earth'] bposition = deflector.at(ts.tdb(jd=tclose)).position.au # TODO rmass = rmasses['earth'] # TODO: Make the following code less messy, maybe by having # _add_deflection() return a new vector instead of modifying the # old one in-place. deflected_position = position.copy() _add_deflection(deflected_position, observer, bposition, rmass) if include_earth_deflection.shape: position[:,include_earth_deflection] = ( deflected_position[:,include_earth_deflection]) else: position[:] = deflected_position[:]
[ "def", "add_deflection", "(", "position", ",", "observer", ",", "ephemeris", ",", "t", ",", "include_earth_deflection", ",", "count", "=", "3", ")", ":", "# Compute light-time to observed object.", "tlt", "=", "length_of", "(", "position", ")", "/", "C_AUDAY", "...
Update `position` for how solar system masses will deflect its light. Given the ICRS `position` [x,y,z] of an object (au) that is being viewed from the `observer` also expressed as [x,y,z], and given an ephemeris that can be used to determine solar system body positions, and given the time `t` and Boolean `apply_earth` indicating whether to worry about the effect of Earth's mass, and a `count` of how many major solar system bodies to worry about, this function updates `position` in-place to show how the masses in the solar system will deflect its image.
[ "Update", "position", "for", "how", "solar", "system", "masses", "will", "deflect", "its", "light", "." ]
python
train
StorjOld/heartbeat
heartbeat/Merkle/Merkle.py
https://github.com/StorjOld/heartbeat/blob/4d54f2011f1e9f688073d4347bc51bb7bd682718/heartbeat/Merkle/Merkle.py#L481-L515
def get_chunk_hash(file, seed, filesz=None, chunksz=DEFAULT_CHUNK_SIZE, bufsz=DEFAULT_BUFFER_SIZE): """returns a hash of a chunk of the file provided. the position of the chunk is determined by the seed. additionally, the hmac of the chunk is calculated from the seed. :param file: a file like object to get the chunk hash from. should support `read()`, `seek()` and `tell()`. :param seed: the seed to use for calculating the chunk position and chunk hash :param chunksz: the size of the chunk to check :param bufsz: an optional buffer size to use for reading the file. """ if (filesz is None): file.seek(0, 2) filesz = file.tell() if (filesz < chunksz): chunksz = filesz prf = KeyedPRF(seed, filesz - chunksz + 1) i = prf.eval(0) file.seek(i) h = hmac.new(seed, None, hashlib.sha256) while (True): if (chunksz < bufsz): bufsz = chunksz buffer = file.read(bufsz) h.update(buffer) chunksz -= len(buffer) assert(chunksz >= 0) if (chunksz == 0): break return h.digest()
[ "def", "get_chunk_hash", "(", "file", ",", "seed", ",", "filesz", "=", "None", ",", "chunksz", "=", "DEFAULT_CHUNK_SIZE", ",", "bufsz", "=", "DEFAULT_BUFFER_SIZE", ")", ":", "if", "(", "filesz", "is", "None", ")", ":", "file", ".", "seek", "(", "0", ",...
returns a hash of a chunk of the file provided. the position of the chunk is determined by the seed. additionally, the hmac of the chunk is calculated from the seed. :param file: a file like object to get the chunk hash from. should support `read()`, `seek()` and `tell()`. :param seed: the seed to use for calculating the chunk position and chunk hash :param chunksz: the size of the chunk to check :param bufsz: an optional buffer size to use for reading the file.
[ "returns", "a", "hash", "of", "a", "chunk", "of", "the", "file", "provided", ".", "the", "position", "of", "the", "chunk", "is", "determined", "by", "the", "seed", ".", "additionally", "the", "hmac", "of", "the", "chunk", "is", "calculated", "from", "the...
python
train
tnkteja/myhelp
virtualEnvironment/lib/python2.7/site-packages/coverage/cmdline.py
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/cmdline.py#L558-L585
def do_execute(self, options, args): """Implementation of 'coverage run'.""" # Set the first path element properly. old_path0 = sys.path[0] # Run the script. self.coverage.start() code_ran = True try: try: if options.module: sys.path[0] = '' self.run_python_module(args[0], args) else: filename = args[0] sys.path[0] = os.path.abspath(os.path.dirname(filename)) self.run_python_file(filename, args) except NoSource: code_ran = False raise finally: self.coverage.stop() if code_ran: self.coverage.save() # Restore the old path sys.path[0] = old_path0
[ "def", "do_execute", "(", "self", ",", "options", ",", "args", ")", ":", "# Set the first path element properly.", "old_path0", "=", "sys", ".", "path", "[", "0", "]", "# Run the script.", "self", ".", "coverage", ".", "start", "(", ")", "code_ran", "=", "Tr...
Implementation of 'coverage run'.
[ "Implementation", "of", "coverage", "run", "." ]
python
test
andycasey/ads
ads/search.py
https://github.com/andycasey/ads/blob/928415e202db80658cd8532fa4c3a00d0296b5c5/ads/search.py#L111-L132
def _get_field(self, field): """ Queries the api for a single field for the record by `id`. This method should only be called indirectly by cached properties. :param field: name of the record field to load """ if not hasattr(self, "id") or self.id is None: raise APIResponseError("Cannot query an article without an id") sq = next(SearchQuery(q="id:{}".format(self.id), fl=field)) # If the requested field is not present in the returning Solr doc, # return None instead of hitting _get_field again. if field not in sq._raw: # These fields will never be in the result solr document; # pass through to __getattribute__ to allow the relevant # secondary service queries if field in ["reference", "citation", "metrics", "bibtex"]: pass else: return None value = sq.__getattribute__(field) self._raw[field] = value return value
[ "def", "_get_field", "(", "self", ",", "field", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"id\"", ")", "or", "self", ".", "id", "is", "None", ":", "raise", "APIResponseError", "(", "\"Cannot query an article without an id\"", ")", "sq", "=", "n...
Queries the api for a single field for the record by `id`. This method should only be called indirectly by cached properties. :param field: name of the record field to load
[ "Queries", "the", "api", "for", "a", "single", "field", "for", "the", "record", "by", "id", ".", "This", "method", "should", "only", "be", "called", "indirectly", "by", "cached", "properties", ".", ":", "param", "field", ":", "name", "of", "the", "record...
python
train
instacart/lore
lore/env.py
https://github.com/instacart/lore/blob/0367bde9a52e69162832906acc61e8d65c5ec5d4/lore/env.py#L174-L191
def launch(): """Ensure that python is running from the Lore virtualenv past this point. """ if launched(): check_version() os.chdir(ROOT) return if not os.path.exists(BIN_LORE): missing = ' %s virtualenv is missing.' % APP if '--launched' in sys.argv: sys.exit(ansi.error() + missing + ' Please check for errors during:\n $ lore install\n') else: print(ansi.warning() + missing) import lore.__main__ lore.__main__.install(None, None) reboot('--env-launched')
[ "def", "launch", "(", ")", ":", "if", "launched", "(", ")", ":", "check_version", "(", ")", "os", ".", "chdir", "(", "ROOT", ")", "return", "if", "not", "os", ".", "path", ".", "exists", "(", "BIN_LORE", ")", ":", "missing", "=", "' %s virtualenv is ...
Ensure that python is running from the Lore virtualenv past this point.
[ "Ensure", "that", "python", "is", "running", "from", "the", "Lore", "virtualenv", "past", "this", "point", "." ]
python
train
gc3-uzh-ch/elasticluster
elasticluster/cluster.py
https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/cluster.py#L372-L396
def add_nodes(self, kind, num, image_id, image_user, flavor, security_group, image_userdata='', **extra): """Helper method to add multiple nodes of the same kind to a cluster. :param str kind: kind of node to start. this refers to the groups defined in the ansible setup provider :py:class:`elasticluster.providers.AnsibleSetupProvider` :param int num: number of nodes to add of this kind :param str image_id: image id to use for the cloud instance (e.g. ami on amazon) :param str image_user: user to login on given image :param str flavor: machine type to use for cloud instance :param str security_group: security group that defines firewall rules to the instance :param str image_userdata: commands to execute after instance starts """ for i in range(num): self.add_node(kind, image_id, image_user, flavor, security_group, image_userdata=image_userdata, **extra)
[ "def", "add_nodes", "(", "self", ",", "kind", ",", "num", ",", "image_id", ",", "image_user", ",", "flavor", ",", "security_group", ",", "image_userdata", "=", "''", ",", "*", "*", "extra", ")", ":", "for", "i", "in", "range", "(", "num", ")", ":", ...
Helper method to add multiple nodes of the same kind to a cluster. :param str kind: kind of node to start. this refers to the groups defined in the ansible setup provider :py:class:`elasticluster.providers.AnsibleSetupProvider` :param int num: number of nodes to add of this kind :param str image_id: image id to use for the cloud instance (e.g. ami on amazon) :param str image_user: user to login on given image :param str flavor: machine type to use for cloud instance :param str security_group: security group that defines firewall rules to the instance :param str image_userdata: commands to execute after instance starts
[ "Helper", "method", "to", "add", "multiple", "nodes", "of", "the", "same", "kind", "to", "a", "cluster", "." ]
python
train
Eyepea/systemDream
src/systemdream/journal/helpers.py
https://github.com/Eyepea/systemDream/blob/018fa5e9ff0f4fdc62fa85b235725d0f8b24f1a8/src/systemdream/journal/helpers.py#L13-L61
def send(MESSAGE, SOCKET, MESSAGE_ID=None, CODE_FILE=None, CODE_LINE=None, CODE_FUNC=None, **kwargs): r"""Send a message to the journal. >>> journal.send('Hello world') >>> journal.send('Hello, again, world', FIELD2='Greetings!') >>> journal.send('Binary message', BINARY=b'\xde\xad\xbe\xef') Value of the MESSAGE argument will be used for the MESSAGE= field. MESSAGE must be a string and will be sent as UTF-8 to the journal. MESSAGE_ID can be given to uniquely identify the type of message. It must be a string or a uuid.UUID object. CODE_LINE, CODE_FILE, and CODE_FUNC can be specified to identify the caller. Unless at least on of the three is given, values are extracted from the stack frame of the caller of send(). CODE_FILE and CODE_FUNC must be strings, CODE_LINE must be an integer. Additional fields for the journal entry can only be specified as keyword arguments. The payload can be either a string or bytes. A string will be sent as UTF-8, and bytes will be sent as-is to the journal. Other useful fields include PRIORITY, SYSLOG_FACILITY, SYSLOG_IDENTIFIER, SYSLOG_PID. """ args = ['MESSAGE=' + MESSAGE] if MESSAGE_ID is not None: id = getattr(MESSAGE_ID, 'hex', MESSAGE_ID) args.append('MESSAGE_ID=' + id) if CODE_LINE == CODE_FILE == CODE_FUNC == None: CODE_FILE, CODE_LINE, CODE_FUNC = \ _traceback.extract_stack(limit=2)[0][:3] if CODE_FILE is not None: args.append('CODE_FILE=' + CODE_FILE) if CODE_LINE is not None: args.append('CODE_LINE={:d}'.format(CODE_LINE)) if CODE_FUNC is not None: args.append('CODE_FUNC=' + CODE_FUNC) args.extend(_make_line(key.upper(), val) for key, val in kwargs.items()) return sendv(SOCKET, *args)
[ "def", "send", "(", "MESSAGE", ",", "SOCKET", ",", "MESSAGE_ID", "=", "None", ",", "CODE_FILE", "=", "None", ",", "CODE_LINE", "=", "None", ",", "CODE_FUNC", "=", "None", ",", "*", "*", "kwargs", ")", ":", "args", "=", "[", "'MESSAGE='", "+", "MESSAG...
r"""Send a message to the journal. >>> journal.send('Hello world') >>> journal.send('Hello, again, world', FIELD2='Greetings!') >>> journal.send('Binary message', BINARY=b'\xde\xad\xbe\xef') Value of the MESSAGE argument will be used for the MESSAGE= field. MESSAGE must be a string and will be sent as UTF-8 to the journal. MESSAGE_ID can be given to uniquely identify the type of message. It must be a string or a uuid.UUID object. CODE_LINE, CODE_FILE, and CODE_FUNC can be specified to identify the caller. Unless at least on of the three is given, values are extracted from the stack frame of the caller of send(). CODE_FILE and CODE_FUNC must be strings, CODE_LINE must be an integer. Additional fields for the journal entry can only be specified as keyword arguments. The payload can be either a string or bytes. A string will be sent as UTF-8, and bytes will be sent as-is to the journal. Other useful fields include PRIORITY, SYSLOG_FACILITY, SYSLOG_IDENTIFIER, SYSLOG_PID.
[ "r", "Send", "a", "message", "to", "the", "journal", "." ]
python
train
infoxchange/supervisor-logging
supervisor_logging/__init__.py
https://github.com/infoxchange/supervisor-logging/blob/2d4411378fb52799bc506a68f1a914cbe671b13b/supervisor_logging/__init__.py#L74-L81
def eventdata(payload): """ Parse a Supervisor event. """ headerinfo, data = payload.split('\n', 1) headers = get_headers(headerinfo) return headers, data
[ "def", "eventdata", "(", "payload", ")", ":", "headerinfo", ",", "data", "=", "payload", ".", "split", "(", "'\\n'", ",", "1", ")", "headers", "=", "get_headers", "(", "headerinfo", ")", "return", "headers", ",", "data" ]
Parse a Supervisor event.
[ "Parse", "a", "Supervisor", "event", "." ]
python
train
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/_helpers.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/_helpers.py#L552-L580
def pbs_for_set_no_merge(document_path, document_data): """Make ``Write`` protobufs for ``set()`` methods. Args: document_path (str): A fully-qualified document path. document_data (dict): Property names and values to use for replacing a document. Returns: List[google.cloud.firestore_v1beta1.types.Write]: One or two ``Write`` protobuf instances for ``set()``. """ extractor = DocumentExtractor(document_data) if extractor.deleted_fields: raise ValueError( "Cannot apply DELETE_FIELD in a set request without " "specifying 'merge=True' or 'merge=[field_paths]'." ) # Conformance tests require send the 'update_pb' even if the document # contains only transforms. write_pbs = [extractor.get_update_pb(document_path)] if extractor.has_transforms: transform_pb = extractor.get_transform_pb(document_path) write_pbs.append(transform_pb) return write_pbs
[ "def", "pbs_for_set_no_merge", "(", "document_path", ",", "document_data", ")", ":", "extractor", "=", "DocumentExtractor", "(", "document_data", ")", "if", "extractor", ".", "deleted_fields", ":", "raise", "ValueError", "(", "\"Cannot apply DELETE_FIELD in a set request ...
Make ``Write`` protobufs for ``set()`` methods. Args: document_path (str): A fully-qualified document path. document_data (dict): Property names and values to use for replacing a document. Returns: List[google.cloud.firestore_v1beta1.types.Write]: One or two ``Write`` protobuf instances for ``set()``.
[ "Make", "Write", "protobufs", "for", "set", "()", "methods", "." ]
python
train
singularityhub/sregistry-cli
sregistry/main/globus/__init__.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/globus/__init__.py#L92-L113
def _update_tokens(self): '''Present the client with authentication flow to get tokens from code. This simply updates the client _response to be used to get tokens for auth and transfer (both use access_token as index). We call this not on client initialization, but when the client is actually needed. ''' self._client.oauth2_start_flow(refresh_tokens=True) authorize_url = self._client.oauth2_get_authorize_url() print('Please go to this URL and login: {0}'.format(authorize_url)) auth_code = raw_input( 'Please enter the code you get after login here: ').strip() # Save to client self._response = self._client.oauth2_exchange_code_for_tokens(auth_code) self.auth = self._response.by_resource_server['auth.globus.org'] self.transfer = self._response.by_resource_server['transfer.api.globus.org'] self._update_setting('GLOBUS_TRANSFER_RESPONSE', self.transfer) self._update_setting('GLOBUS_AUTH_RESPONSE', self.auth)
[ "def", "_update_tokens", "(", "self", ")", ":", "self", ".", "_client", ".", "oauth2_start_flow", "(", "refresh_tokens", "=", "True", ")", "authorize_url", "=", "self", ".", "_client", ".", "oauth2_get_authorize_url", "(", ")", "print", "(", "'Please go to this ...
Present the client with authentication flow to get tokens from code. This simply updates the client _response to be used to get tokens for auth and transfer (both use access_token as index). We call this not on client initialization, but when the client is actually needed.
[ "Present", "the", "client", "with", "authentication", "flow", "to", "get", "tokens", "from", "code", ".", "This", "simply", "updates", "the", "client", "_response", "to", "be", "used", "to", "get", "tokens", "for", "auth", "and", "transfer", "(", "both", "...
python
test
MeirKriheli/python-bidi
bidi/algorithm.py
https://github.com/MeirKriheli/python-bidi/blob/a0e265bb465c1b7ad628487991e33b5ebe364641/bidi/algorithm.py#L580-L596
def apply_mirroring(storage, debug): """Applies L4: mirroring See: http://unicode.org/reports/tr9/#L4 """ # L4. A character is depicted by a mirrored glyph if and only if (a) the # resolved directionality of that character is R, and (b) the # Bidi_Mirrored property value of that character is true. for _ch in storage['chars']: unichar = _ch['ch'] if mirrored(unichar) and \ _embedding_direction(_ch['level']) == 'R': _ch['ch'] = MIRRORED.get(unichar, unichar) if debug: debug_storage(storage)
[ "def", "apply_mirroring", "(", "storage", ",", "debug", ")", ":", "# L4. A character is depicted by a mirrored glyph if and only if (a) the", "# resolved directionality of that character is R, and (b) the", "# Bidi_Mirrored property value of that character is true.", "for", "_ch", "in", ...
Applies L4: mirroring See: http://unicode.org/reports/tr9/#L4
[ "Applies", "L4", ":", "mirroring" ]
python
test
MacHu-GWU/loggerFactory-project
loggerFactory/logger.py
https://github.com/MacHu-GWU/loggerFactory-project/blob/4de19e275e01dc583b1af9ceeacef0c6084cd6e0/loggerFactory/logger.py#L71-L73
def error(self, msg, indent=0, **kwargs): """invoke ``self.logger.error``""" return self.logger.error(self._indent(msg, indent), **kwargs)
[ "def", "error", "(", "self", ",", "msg", ",", "indent", "=", "0", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "logger", ".", "error", "(", "self", ".", "_indent", "(", "msg", ",", "indent", ")", ",", "*", "*", "kwargs", ")" ]
invoke ``self.logger.error``
[ "invoke", "self", ".", "logger", ".", "error" ]
python
train
rbit/pydtls
dtls/sslconnection.py
https://github.com/rbit/pydtls/blob/41a71fccd990347d0de5f42418fea1e4e733359c/dtls/sslconnection.py#L699-L715
def connect(self, peer_address): """Client-side UDP connection establishment This method connects this object's underlying socket. It subsequently performs a handshake if do_handshake_on_connect was set during initialization. Arguments: peer_address - address tuple of server peer """ self._sock.connect(peer_address) peer_address = self._sock.getpeername() # substituted host addrinfo BIO_dgram_set_connected(self._wbio.value, peer_address) assert self._wbio is self._rbio if self._do_handshake_on_connect: self.do_handshake()
[ "def", "connect", "(", "self", ",", "peer_address", ")", ":", "self", ".", "_sock", ".", "connect", "(", "peer_address", ")", "peer_address", "=", "self", ".", "_sock", ".", "getpeername", "(", ")", "# substituted host addrinfo", "BIO_dgram_set_connected", "(", ...
Client-side UDP connection establishment This method connects this object's underlying socket. It subsequently performs a handshake if do_handshake_on_connect was set during initialization. Arguments: peer_address - address tuple of server peer
[ "Client", "-", "side", "UDP", "connection", "establishment" ]
python
train
SpriteLink/NIPAP
utilities/export-template.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/utilities/export-template.py#L40-L54
def get_prefixes(self, query): """ Get prefix data from NIPAP """ try: res = Prefix.smart_search(query, {}) except socket.error: print >> sys.stderr, "Connection refused, please check hostname & port" sys.exit(1) except xmlrpclib.ProtocolError: print >> sys.stderr, "Authentication failed, please check your username / password" sys.exit(1) for p in res['result']: p.prefix_ipy = IPy.IP(p.prefix) self.prefixes.append(p)
[ "def", "get_prefixes", "(", "self", ",", "query", ")", ":", "try", ":", "res", "=", "Prefix", ".", "smart_search", "(", "query", ",", "{", "}", ")", "except", "socket", ".", "error", ":", "print", ">>", "sys", ".", "stderr", ",", "\"Connection refused,...
Get prefix data from NIPAP
[ "Get", "prefix", "data", "from", "NIPAP" ]
python
train
limodou/uliweb
uliweb/utils/common.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/common.py#L612-L617
def application_path(path): """ Join application project_dir and path """ from uliweb import application return os.path.join(application.project_dir, path)
[ "def", "application_path", "(", "path", ")", ":", "from", "uliweb", "import", "application", "return", "os", ".", "path", ".", "join", "(", "application", ".", "project_dir", ",", "path", ")" ]
Join application project_dir and path
[ "Join", "application", "project_dir", "and", "path" ]
python
train
chapel-lang/sphinxcontrib-chapeldomain
sphinxcontrib/chapeldomain.py
https://github.com/chapel-lang/sphinxcontrib-chapeldomain/blob/00970fe1b3aed5deb1186bec19bf0912d2f92853/sphinxcontrib/chapeldomain.py#L1008-L1025
def get_objects(self): """Return iterable of "object descriptions", which are tuple with these items: * `name` * `dispname` * `type` * `docname` * `anchor` * `priority` For details on each item, see :py:meth:`~sphinx.domains.Domain.get_objects`. """ for modname, info in self.data['modules'].items(): yield (modname, modname, 'module', info[0], 'module-' + modname, 0) for refname, (docname, type_name) in self.data['objects'].items(): if type_name != 'module': # modules are already handled yield (refname, refname, type_name, docname, refname, 1)
[ "def", "get_objects", "(", "self", ")", ":", "for", "modname", ",", "info", "in", "self", ".", "data", "[", "'modules'", "]", ".", "items", "(", ")", ":", "yield", "(", "modname", ",", "modname", ",", "'module'", ",", "info", "[", "0", "]", ",", ...
Return iterable of "object descriptions", which are tuple with these items: * `name` * `dispname` * `type` * `docname` * `anchor` * `priority` For details on each item, see :py:meth:`~sphinx.domains.Domain.get_objects`.
[ "Return", "iterable", "of", "object", "descriptions", "which", "are", "tuple", "with", "these", "items", ":" ]
python
train
MillionIntegrals/vel
vel/api/model.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/api/model.py#L139-L142
def loss(self, x_data, y_true): """ Forward propagate network and return a value of loss function """ y_pred = self(x_data) return y_pred, self.loss_value(x_data, y_true, y_pred)
[ "def", "loss", "(", "self", ",", "x_data", ",", "y_true", ")", ":", "y_pred", "=", "self", "(", "x_data", ")", "return", "y_pred", ",", "self", ".", "loss_value", "(", "x_data", ",", "y_true", ",", "y_pred", ")" ]
Forward propagate network and return a value of loss function
[ "Forward", "propagate", "network", "and", "return", "a", "value", "of", "loss", "function" ]
python
train
yinkaisheng/Python-UIAutomation-for-Windows
uiautomation/uiautomation.py
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L2057-L2065
def MessageBox(content: str, title: str, flags: int = MB.Ok) -> int: """ MessageBox from Win32. content: str. title: str. flags: int, a value or some combined values in class `MB`. Return int, a value in MB whose name starts with Id, such as MB.IdOk """ return ctypes.windll.user32.MessageBoxW(ctypes.c_void_p(0), ctypes.c_wchar_p(content), ctypes.c_wchar_p(title), flags)
[ "def", "MessageBox", "(", "content", ":", "str", ",", "title", ":", "str", ",", "flags", ":", "int", "=", "MB", ".", "Ok", ")", "->", "int", ":", "return", "ctypes", ".", "windll", ".", "user32", ".", "MessageBoxW", "(", "ctypes", ".", "c_void_p", ...
MessageBox from Win32. content: str. title: str. flags: int, a value or some combined values in class `MB`. Return int, a value in MB whose name starts with Id, such as MB.IdOk
[ "MessageBox", "from", "Win32", ".", "content", ":", "str", ".", "title", ":", "str", ".", "flags", ":", "int", "a", "value", "or", "some", "combined", "values", "in", "class", "MB", ".", "Return", "int", "a", "value", "in", "MB", "whose", "name", "st...
python
valid
urinieto/msaf
msaf/algorithms/olda/segmenter.py
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/olda/segmenter.py#L154-L166
def gaussian_cost(X): '''Return the average log-likelihood of data under a standard normal ''' d, n = X.shape if n < 2: return 0 sigma = np.var(X, axis=1, ddof=1) cost = -0.5 * d * n * np.log(2. * np.pi) - 0.5 * (n - 1.) * np.sum(sigma) return cost
[ "def", "gaussian_cost", "(", "X", ")", ":", "d", ",", "n", "=", "X", ".", "shape", "if", "n", "<", "2", ":", "return", "0", "sigma", "=", "np", ".", "var", "(", "X", ",", "axis", "=", "1", ",", "ddof", "=", "1", ")", "cost", "=", "-", "0....
Return the average log-likelihood of data under a standard normal
[ "Return", "the", "average", "log", "-", "likelihood", "of", "data", "under", "a", "standard", "normal" ]
python
test
uogbuji/versa
tools/py/writer/csv.py
https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/writer/csv.py#L25-L33
def omap(m): ''' Create a nested mapping from origin to property to values/attributes covering an entire Versa model ''' om = {} for s, p, o, a in m.match(): om.setdefault(s, {}) om[s].setdefault(p, []).append((o, a)) return om
[ "def", "omap", "(", "m", ")", ":", "om", "=", "{", "}", "for", "s", ",", "p", ",", "o", ",", "a", "in", "m", ".", "match", "(", ")", ":", "om", ".", "setdefault", "(", "s", ",", "{", "}", ")", "om", "[", "s", "]", ".", "setdefault", "("...
Create a nested mapping from origin to property to values/attributes covering an entire Versa model
[ "Create", "a", "nested", "mapping", "from", "origin", "to", "property", "to", "values", "/", "attributes", "covering", "an", "entire", "Versa", "model" ]
python
train
edx/edx-django-release-util
release_util/management/commands/__init__.py
https://github.com/edx/edx-django-release-util/blob/de0fde41d6a19885ab7dc309472b94fd0fccbc1d/release_util/management/commands/__init__.py#L159-L184
def _get_unapplied_migrations(self, loader): """ Output a list of unapplied migrations in the form [['migration1', migration2'], ...]. This implementation is mostly copied from the Django 'showmigrations' mgmt command. https://github.com/django/django/blob/stable/1.8.x/django/core/management/commands/showmigrations.py This should only be called from _get_current_migration_state(). """ unapplied = [] graph = loader.graph plan = [] seen = set() # Generate the plan, in the order that migrations have been/should be applied. for target in graph.leaf_nodes(): for migration in graph.forwards_plan(target): if migration not in seen: plan.append(graph.nodes[migration]) seen.add(migration) # Remove the migrations that have already been applied. for migration in plan: if not (migration.app_label, migration.name) in loader.applied_migrations: # NOTE: Unicode Django application names are unsupported. unapplied.append([migration.app_label, str(migration.name)]) return unapplied
[ "def", "_get_unapplied_migrations", "(", "self", ",", "loader", ")", ":", "unapplied", "=", "[", "]", "graph", "=", "loader", ".", "graph", "plan", "=", "[", "]", "seen", "=", "set", "(", ")", "# Generate the plan, in the order that migrations have been/should be ...
Output a list of unapplied migrations in the form [['migration1', migration2'], ...]. This implementation is mostly copied from the Django 'showmigrations' mgmt command. https://github.com/django/django/blob/stable/1.8.x/django/core/management/commands/showmigrations.py This should only be called from _get_current_migration_state().
[ "Output", "a", "list", "of", "unapplied", "migrations", "in", "the", "form", "[[", "migration1", "migration2", "]", "...", "]", ".", "This", "implementation", "is", "mostly", "copied", "from", "the", "Django", "showmigrations", "mgmt", "command", ".", "https",...
python
train