repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
objectrocket/python-client
scripts/check_docs.py
https://github.com/objectrocket/python-client/blob/a65868c7511ff49a5fbe304e53bf592b7fc6d5ef/scripts/check_docs.py#L89-L97
def build_pyfile_path_from_docname(self, docfile): """Build the expected Python file name based on the given documentation file name. :param str docfile: The documentation file name from which to build the Python file name. :rtype: str """ name, ext = os.path.splitext(docfile) expected_py_name = name.replace('.', '/') + '.py' return expected_py_name
[ "def", "build_pyfile_path_from_docname", "(", "self", ",", "docfile", ")", ":", "name", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "docfile", ")", "expected_py_name", "=", "name", ".", "replace", "(", "'.'", ",", "'/'", ")", "+", "'.py'", ...
Build the expected Python file name based on the given documentation file name. :param str docfile: The documentation file name from which to build the Python file name. :rtype: str
[ "Build", "the", "expected", "Python", "file", "name", "based", "on", "the", "given", "documentation", "file", "name", "." ]
python
train
fusionbox/django-backupdb
backupdb/utils/commands.py
https://github.com/fusionbox/django-backupdb/blob/db4aa73049303245ef0182cda5c76b1dd194cd00/backupdb/utils/commands.py#L77-L94
def get_postgresql_args(db_config, extra_args=None): """ Returns an array of argument values that will be passed to a `psql` or `pg_dump` process when it is started based on the given database configuration. """ db = db_config['NAME'] mapping = [('--username={0}', db_config.get('USER')), ('--host={0}', db_config.get('HOST')), ('--port={0}', db_config.get('PORT'))] args = apply_arg_values(mapping) if extra_args is not None: args.extend(shlex.split(extra_args)) args.append(db) return args
[ "def", "get_postgresql_args", "(", "db_config", ",", "extra_args", "=", "None", ")", ":", "db", "=", "db_config", "[", "'NAME'", "]", "mapping", "=", "[", "(", "'--username={0}'", ",", "db_config", ".", "get", "(", "'USER'", ")", ")", ",", "(", "'--host=...
Returns an array of argument values that will be passed to a `psql` or `pg_dump` process when it is started based on the given database configuration.
[ "Returns", "an", "array", "of", "argument", "values", "that", "will", "be", "passed", "to", "a", "psql", "or", "pg_dump", "process", "when", "it", "is", "started", "based", "on", "the", "given", "database", "configuration", "." ]
python
train
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L688-L703
def intersects(self, e): """ Check whether e intersects self. :return: true if this elements intersects the element e """ if self.chrom != e.chrom: return False if e.start >= self.start and e.start < self.end: return True if e.end > self.start and e.end <= self.end: return True if e.start < self.start and e.end > self.end: return True return False
[ "def", "intersects", "(", "self", ",", "e", ")", ":", "if", "self", ".", "chrom", "!=", "e", ".", "chrom", ":", "return", "False", "if", "e", ".", "start", ">=", "self", ".", "start", "and", "e", ".", "start", "<", "self", ".", "end", ":", "ret...
Check whether e intersects self. :return: true if this elements intersects the element e
[ "Check", "whether", "e", "intersects", "self", "." ]
python
train
cloudbase/python-hnvclient
hnv/client.py
https://github.com/cloudbase/python-hnvclient/blob/b019452af01db22629809b8930357a2ebf6494be/hnv/client.py#L542-L565
def process_raw_data(cls, raw_data): """Create a new model using raw API response.""" properties = raw_data["properties"] ip_pools = [] for raw_content in properties.get("ipPools", []): raw_content["parentResourceID"] = raw_data["resourceId"] raw_content["grandParentResourceID"] = raw_data["parentResourceID"] ip_pools.append(IPPools.from_raw_data(raw_content)) properties["ipPools"] = ip_pools ip_configurations = [] for raw_content in properties.get("ipConfigurations", []): resource = Resource.from_raw_data(raw_content) ip_configurations.append(resource) properties["ipConfigurations"] = ip_configurations network_interfaces = [] for raw_content in properties.get("networkInterfaces", []): resource = Resource.from_raw_data(raw_content) network_interfaces.append(resource) properties["networkInterfaces"] = network_interfaces return super(LogicalSubnetworks, cls).process_raw_data(raw_data)
[ "def", "process_raw_data", "(", "cls", ",", "raw_data", ")", ":", "properties", "=", "raw_data", "[", "\"properties\"", "]", "ip_pools", "=", "[", "]", "for", "raw_content", "in", "properties", ".", "get", "(", "\"ipPools\"", ",", "[", "]", ")", ":", "ra...
Create a new model using raw API response.
[ "Create", "a", "new", "model", "using", "raw", "API", "response", "." ]
python
train
aiortc/aioice
aioice/ice.py
https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/ice.py#L752-L774
async def query_consent(self): """ Periodically check consent (RFC 7675). """ failures = 0 while True: # randomize between 0.8 and 1.2 times CONSENT_INTERVAL await asyncio.sleep(CONSENT_INTERVAL * (0.8 + 0.4 * random.random())) for pair in self._nominated.values(): request = self.build_request(pair) try: await pair.protocol.request( request, pair.remote_addr, integrity_key=self.remote_password.encode('utf8'), retransmissions=0) failures = 0 except exceptions.TransactionError: failures += 1 if failures >= CONSENT_FAILURES: self.__log_info('Consent to send expired') self._query_consent_handle = None return await self.close()
[ "async", "def", "query_consent", "(", "self", ")", ":", "failures", "=", "0", "while", "True", ":", "# randomize between 0.8 and 1.2 times CONSENT_INTERVAL", "await", "asyncio", ".", "sleep", "(", "CONSENT_INTERVAL", "*", "(", "0.8", "+", "0.4", "*", "random", "...
Periodically check consent (RFC 7675).
[ "Periodically", "check", "consent", "(", "RFC", "7675", ")", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/roster.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/roster.py#L756-L770
def request_roster(self, version = None): """Request roster from server. :Parameters: - `version`: if not `None` versioned roster will be requested for given local version. Use "" to request full roster. :Types: - `version`: `unicode` """ processor = self.stanza_processor request = Iq(stanza_type = "get") request.set_payload(RosterPayload(version = version)) processor.set_response_handlers(request, self._get_success, self._get_error) processor.send(request)
[ "def", "request_roster", "(", "self", ",", "version", "=", "None", ")", ":", "processor", "=", "self", ".", "stanza_processor", "request", "=", "Iq", "(", "stanza_type", "=", "\"get\"", ")", "request", ".", "set_payload", "(", "RosterPayload", "(", "version"...
Request roster from server. :Parameters: - `version`: if not `None` versioned roster will be requested for given local version. Use "" to request full roster. :Types: - `version`: `unicode`
[ "Request", "roster", "from", "server", "." ]
python
valid
llllllllll/codetransformer
codetransformer/instructions.py
https://github.com/llllllllll/codetransformer/blob/c5f551e915df45adc7da7e0b1b635f0cc6a1bb27/codetransformer/instructions.py#L238-L259
def equiv(self, instr): """Check equivalence of instructions. This checks against the types and the arguments of the instructions Parameters ---------- instr : Instruction The instruction to check against. Returns ------- is_equiv : bool If the instructions are equivalent. Notes ----- This is a separate concept from instruction identity. Two separate instructions can be equivalent without being the same exact instance. This means that two equivalent instructions can be at different points in the bytecode or be targeted by different jumps. """ return type(self) == type(instr) and self.arg == instr.arg
[ "def", "equiv", "(", "self", ",", "instr", ")", ":", "return", "type", "(", "self", ")", "==", "type", "(", "instr", ")", "and", "self", ".", "arg", "==", "instr", ".", "arg" ]
Check equivalence of instructions. This checks against the types and the arguments of the instructions Parameters ---------- instr : Instruction The instruction to check against. Returns ------- is_equiv : bool If the instructions are equivalent. Notes ----- This is a separate concept from instruction identity. Two separate instructions can be equivalent without being the same exact instance. This means that two equivalent instructions can be at different points in the bytecode or be targeted by different jumps.
[ "Check", "equivalence", "of", "instructions", ".", "This", "checks", "against", "the", "types", "and", "the", "arguments", "of", "the", "instructions" ]
python
train
gwpy/gwpy
gwpy/io/datafind.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/datafind.py#L320-L337
def with_connection(func): """Decorate a function to open a new datafind connection if required This method will inspect the ``connection`` keyword, and if `None` (or missing), will use the ``host`` and ``port`` keywords to open a new connection and pass it as ``connection=<new>`` to ``func``. """ @wraps(func) def wrapped(*args, **kwargs): if kwargs.get('connection') is None: kwargs['connection'] = _choose_connection(host=kwargs.get('host'), port=kwargs.get('port')) try: return func(*args, **kwargs) except HTTPException: kwargs['connection'] = reconnect(kwargs['connection']) return func(*args, **kwargs) return wrapped
[ "def", "with_connection", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ".", "get", "(", "'connection'", ")", "is", "None", ":", "kwargs", "[", "'connec...
Decorate a function to open a new datafind connection if required This method will inspect the ``connection`` keyword, and if `None` (or missing), will use the ``host`` and ``port`` keywords to open a new connection and pass it as ``connection=<new>`` to ``func``.
[ "Decorate", "a", "function", "to", "open", "a", "new", "datafind", "connection", "if", "required" ]
python
train
mrstephenneal/dirutility
dirutility/ftp.py
https://github.com/mrstephenneal/dirutility/blob/339378659e2d7e09c53acfc51c5df745bb0cd517/dirutility/ftp.py#L48-L79
def get(self, remote, local=None, keep_dir_structure=False): """ Download a remote file on the fto sever to a local directory. :param remote: File path of remote source file :param local: Directory of local destination directory :param keep_dir_structure: If True, replicates the remote files folder structure """ if local and os.path.isdir(local): os.chdir(local) elif keep_dir_structure: # Replicate the remote files folder structure for directory in remote.split(os.sep)[:-1]: if not os.path.isdir(directory): os.mkdir(directory) os.chdir(directory) # Change to the correct directory if remote is a path not just a name if os.sep in remote: directory, file_name = remote.rsplit(os.sep, 1) self.chdir(directory) else: file_name = remote # Download the file and get response response = self._retrieve_binary(file_name) # Rename downloaded files if local is a file_name string if local and isinstance(local, str): os.rename(file_name, local) return response
[ "def", "get", "(", "self", ",", "remote", ",", "local", "=", "None", ",", "keep_dir_structure", "=", "False", ")", ":", "if", "local", "and", "os", ".", "path", ".", "isdir", "(", "local", ")", ":", "os", ".", "chdir", "(", "local", ")", "elif", ...
Download a remote file on the fto sever to a local directory. :param remote: File path of remote source file :param local: Directory of local destination directory :param keep_dir_structure: If True, replicates the remote files folder structure
[ "Download", "a", "remote", "file", "on", "the", "fto", "sever", "to", "a", "local", "directory", "." ]
python
train
yangl1996/libpagure
libpagure/libpagure.py
https://github.com/yangl1996/libpagure/blob/dd96ed29142407463790c66ed321984a6ea7465a/libpagure/libpagure.py#L374-L384
def issue_info(self, issue_id): """ Get info about a single issue. :param issue_id: the id of the issue :return: """ request_url = "{}issue/{}".format(self.create_basic_url(), issue_id) return_value = self._call_api(request_url) return return_value
[ "def", "issue_info", "(", "self", ",", "issue_id", ")", ":", "request_url", "=", "\"{}issue/{}\"", ".", "format", "(", "self", ".", "create_basic_url", "(", ")", ",", "issue_id", ")", "return_value", "=", "self", ".", "_call_api", "(", "request_url", ")", ...
Get info about a single issue. :param issue_id: the id of the issue :return:
[ "Get", "info", "about", "a", "single", "issue", ".", ":", "param", "issue_id", ":", "the", "id", "of", "the", "issue", ":", "return", ":" ]
python
train
oscarbranson/latools
latools/helpers/config.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/helpers/config.py#L30-L39
def read_latoolscfg(): """ Reads configuration, returns a ConfigParser object. Distinct from read_configuration, which returns a dict. """ config_file = pkgrs.resource_filename('latools', 'latools.cfg') cf = configparser.ConfigParser() cf.read(config_file) return config_file, cf
[ "def", "read_latoolscfg", "(", ")", ":", "config_file", "=", "pkgrs", ".", "resource_filename", "(", "'latools'", ",", "'latools.cfg'", ")", "cf", "=", "configparser", ".", "ConfigParser", "(", ")", "cf", ".", "read", "(", "config_file", ")", "return", "conf...
Reads configuration, returns a ConfigParser object. Distinct from read_configuration, which returns a dict.
[ "Reads", "configuration", "returns", "a", "ConfigParser", "object", "." ]
python
test
lsbardel/python-stdnet
stdnet/odm/session.py
https://github.com/lsbardel/python-stdnet/blob/78db5320bdedc3f28c5e4f38cda13a4469e35db7/stdnet/odm/session.py#L957-L961
def query(self, session=None): '''Returns a new :class:`Query` for :attr:`Manager.model`.''' if session is None or session.router is not self.router: session = self.session() return session.query(self.model)
[ "def", "query", "(", "self", ",", "session", "=", "None", ")", ":", "if", "session", "is", "None", "or", "session", ".", "router", "is", "not", "self", ".", "router", ":", "session", "=", "self", ".", "session", "(", ")", "return", "session", ".", ...
Returns a new :class:`Query` for :attr:`Manager.model`.
[ "Returns", "a", "new", ":", "class", ":", "Query", "for", ":", "attr", ":", "Manager", ".", "model", "." ]
python
train
Gandi/gandi.cli
gandi/cli/modules/dns.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/dns.py#L107-L111
def keys(cls, fqdn, sort_by=None): """Display keys information about a domain.""" meta = cls.get_fqdn_info(fqdn) url = meta['domain_keys_href'] return cls.json_get(cls.get_sort_url(url, sort_by))
[ "def", "keys", "(", "cls", ",", "fqdn", ",", "sort_by", "=", "None", ")", ":", "meta", "=", "cls", ".", "get_fqdn_info", "(", "fqdn", ")", "url", "=", "meta", "[", "'domain_keys_href'", "]", "return", "cls", ".", "json_get", "(", "cls", ".", "get_sor...
Display keys information about a domain.
[ "Display", "keys", "information", "about", "a", "domain", "." ]
python
train
dossier/dossier.fc
python/dossier/fc/feature_collection.py
https://github.com/dossier/dossier.fc/blob/3e969d0cb2592fc06afc1c849d2b22283450b5e2/python/dossier/fc/feature_collection.py#L203-L209
def dumps(self): '''Create a CBOR byte string from a feature collection.''' metadata = {'v': 'fc01'} if self.read_only: metadata['ro'] = 1 rep = [metadata, self.to_dict()] return cbor.dumps(rep)
[ "def", "dumps", "(", "self", ")", ":", "metadata", "=", "{", "'v'", ":", "'fc01'", "}", "if", "self", ".", "read_only", ":", "metadata", "[", "'ro'", "]", "=", "1", "rep", "=", "[", "metadata", ",", "self", ".", "to_dict", "(", ")", "]", "return"...
Create a CBOR byte string from a feature collection.
[ "Create", "a", "CBOR", "byte", "string", "from", "a", "feature", "collection", "." ]
python
train
chaoss/grimoirelab-manuscripts
manuscripts/metrics/its.py
https://github.com/chaoss/grimoirelab-manuscripts/blob/94a3ad4f11bfbcd6c5190e01cb5d3e47a5187cd9/manuscripts/metrics/its.py#L162-L176
def __get_metrics(self): """ Each metric must have its own filters copy to modify it freely""" esfilters_closed = None esfilters_opened = None if self.esfilters: esfilters_closed = self.esfilters.copy() esfilters_opened = self.esfilters.copy() closed = self.closed_class(self.es_url, self.es_index, start=self.start, end=self.end, esfilters=esfilters_closed, interval=self.interval) opened = self.opened_class(self.es_url, self.es_index, start=self.start, end=self.end, esfilters=esfilters_opened, interval=self.interval) return (closed, opened)
[ "def", "__get_metrics", "(", "self", ")", ":", "esfilters_closed", "=", "None", "esfilters_opened", "=", "None", "if", "self", ".", "esfilters", ":", "esfilters_closed", "=", "self", ".", "esfilters", ".", "copy", "(", ")", "esfilters_opened", "=", "self", "...
Each metric must have its own filters copy to modify it freely
[ "Each", "metric", "must", "have", "its", "own", "filters", "copy", "to", "modify", "it", "freely" ]
python
train
openstack/horizon
openstack_dashboard/api/neutron.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/neutron.py#L580-L602
def list(self, all_tenants=False, **search_opts): """Fetches a list of all floating IPs. :returns: List of FloatingIp object """ if not all_tenants: tenant_id = self.request.user.tenant_id # In Neutron, list_floatingips returns Floating IPs from # all tenants when the API is called with admin role, so # we need to filter them with tenant_id. search_opts['tenant_id'] = tenant_id port_search_opts = {'tenant_id': tenant_id} else: port_search_opts = {} fips = self.client.list_floatingips(**search_opts) fips = fips.get('floatingips') # Get port list to add instance_id to floating IP list # instance_id is stored in device_id attribute ports = port_list(self.request, **port_search_opts) port_dict = collections.OrderedDict([(p['id'], p) for p in ports]) for fip in fips: self._set_instance_info(fip, port_dict.get(fip['port_id'])) return [FloatingIp(fip) for fip in fips]
[ "def", "list", "(", "self", ",", "all_tenants", "=", "False", ",", "*", "*", "search_opts", ")", ":", "if", "not", "all_tenants", ":", "tenant_id", "=", "self", ".", "request", ".", "user", ".", "tenant_id", "# In Neutron, list_floatingips returns Floating IPs f...
Fetches a list of all floating IPs. :returns: List of FloatingIp object
[ "Fetches", "a", "list", "of", "all", "floating", "IPs", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/diffusion_analyzer.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/diffusion_analyzer.py#L531-L553
def export_msdt(self, filename): """ Writes MSD data to a csv file that can be easily plotted in other software. Args: filename (str): Filename. Supported formats are csv and dat. If the extension is csv, a csv file is written. Otherwise, a dat format is assumed. """ fmt = "csv" if filename.lower().endswith(".csv") else "dat" delimiter = ", " if fmt == "csv" else " " with open(filename, "wt") as f: if fmt == "dat": f.write("# ") f.write(delimiter.join(["t", "MSD", "MSD_a", "MSD_b", "MSD_c", "MSCD"])) f.write("\n") for dt, msd, msdc, mscd in zip(self.dt, self.msd, self.msd_components, self.mscd): f.write(delimiter.join(["%s" % v for v in [dt, msd] + list( msdc) + [mscd]])) f.write("\n")
[ "def", "export_msdt", "(", "self", ",", "filename", ")", ":", "fmt", "=", "\"csv\"", "if", "filename", ".", "lower", "(", ")", ".", "endswith", "(", "\".csv\"", ")", "else", "\"dat\"", "delimiter", "=", "\", \"", "if", "fmt", "==", "\"csv\"", "else", "...
Writes MSD data to a csv file that can be easily plotted in other software. Args: filename (str): Filename. Supported formats are csv and dat. If the extension is csv, a csv file is written. Otherwise, a dat format is assumed.
[ "Writes", "MSD", "data", "to", "a", "csv", "file", "that", "can", "be", "easily", "plotted", "in", "other", "software", "." ]
python
train
yyuu/botornado
boto/sdb/domain.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/sdb/domain.py#L192-L207
def select(self, query='', next_token=None, consistent_read=False, max_items=None): """ Returns a set of Attributes for item names within domain_name that match the query. The query must be expressed in using the SELECT style syntax rather than the original SimpleDB query language. :type query: string :param query: The SimpleDB query to be performed. :rtype: iter :return: An iterator containing the results. This is actually a generator function that will iterate across all search results, not just the first page. """ return SelectResultSet(self, query, max_items=max_items, next_token=next_token, consistent_read=consistent_read)
[ "def", "select", "(", "self", ",", "query", "=", "''", ",", "next_token", "=", "None", ",", "consistent_read", "=", "False", ",", "max_items", "=", "None", ")", ":", "return", "SelectResultSet", "(", "self", ",", "query", ",", "max_items", "=", "max_item...
Returns a set of Attributes for item names within domain_name that match the query. The query must be expressed in using the SELECT style syntax rather than the original SimpleDB query language. :type query: string :param query: The SimpleDB query to be performed. :rtype: iter :return: An iterator containing the results. This is actually a generator function that will iterate across all search results, not just the first page.
[ "Returns", "a", "set", "of", "Attributes", "for", "item", "names", "within", "domain_name", "that", "match", "the", "query", ".", "The", "query", "must", "be", "expressed", "in", "using", "the", "SELECT", "style", "syntax", "rather", "than", "the", "original...
python
train
i3visio/osrframework
osrframework/utils/configuration.py
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/configuration.py#L33-L63
def changePermissionsRecursively(path, uid, gid): """ Function to recursively change the user id and group id. It sets 700 permissions. """ os.chown(path, uid, gid) for item in os.listdir(path): itempath = os.path.join(path, item) if os.path.isfile(itempath): # Setting owner try: os.chown(itempath, uid, gid) except Exception as e: # If this crashes it may be because we are running the # application in Windows systems, where os.chown does NOT work. pass # Setting permissions os.chmod(itempath, 600) elif os.path.isdir(itempath): # Setting owner try: os.chown(itempath, uid, gid) except Exception as e: # If this crashes it may be because we are running the # application in Windows systems, where os.chown does NOT work. pass # Setting permissions os.chmod(itempath, 6600) # Recursive function to iterate the files changePermissionsRecursively(itempath, uid, gid)
[ "def", "changePermissionsRecursively", "(", "path", ",", "uid", ",", "gid", ")", ":", "os", ".", "chown", "(", "path", ",", "uid", ",", "gid", ")", "for", "item", "in", "os", ".", "listdir", "(", "path", ")", ":", "itempath", "=", "os", ".", "path"...
Function to recursively change the user id and group id. It sets 700 permissions.
[ "Function", "to", "recursively", "change", "the", "user", "id", "and", "group", "id", "." ]
python
train
pyviz/geoviews
geoviews/operation/resample.py
https://github.com/pyviz/geoviews/blob/cc70ac2d5a96307769bc6192eaef8576c3d24b30/geoviews/operation/resample.py#L13-L20
def find_geom(geom, geoms): """ Returns the index of a geometry in a list of geometries avoiding expensive equality checks of `in` operator. """ for i, g in enumerate(geoms): if g is geom: return i
[ "def", "find_geom", "(", "geom", ",", "geoms", ")", ":", "for", "i", ",", "g", "in", "enumerate", "(", "geoms", ")", ":", "if", "g", "is", "geom", ":", "return", "i" ]
Returns the index of a geometry in a list of geometries avoiding expensive equality checks of `in` operator.
[ "Returns", "the", "index", "of", "a", "geometry", "in", "a", "list", "of", "geometries", "avoiding", "expensive", "equality", "checks", "of", "in", "operator", "." ]
python
train
rigetti/pyquil
pyquil/device.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/device.py#L368-L378
def specs_from_graph(graph: nx.Graph): """ Generate a Specs object from a NetworkX graph with placeholder values for the actual specs. :param graph: The graph """ qspecs = [QubitSpecs(id=q, fRO=0.90, f1QRB=0.99, T1=30e-6, T2=30e-6, fActiveReset=0.99) for q in graph.nodes] especs = [EdgeSpecs(targets=(q1, q2), fBellState=0.90, fCZ=0.90, fCZ_std_err=0.05, fCPHASE=0.80) for q1, q2 in graph.edges] return Specs(qspecs, especs)
[ "def", "specs_from_graph", "(", "graph", ":", "nx", ".", "Graph", ")", ":", "qspecs", "=", "[", "QubitSpecs", "(", "id", "=", "q", ",", "fRO", "=", "0.90", ",", "f1QRB", "=", "0.99", ",", "T1", "=", "30e-6", ",", "T2", "=", "30e-6", ",", "fActive...
Generate a Specs object from a NetworkX graph with placeholder values for the actual specs. :param graph: The graph
[ "Generate", "a", "Specs", "object", "from", "a", "NetworkX", "graph", "with", "placeholder", "values", "for", "the", "actual", "specs", "." ]
python
train
Crypto-toolbox/btfxwss
btfxwss/queue_processor.py
https://github.com/Crypto-toolbox/btfxwss/blob/16827fa6aacb2c0e289aa852bf61a18df6905835/btfxwss/queue_processor.py#L205-L215
def _handle_conf(self, dtype, data, ts): """Handles configuration messages. :param dtype: :param data: :param ts: :return: """ self.log.debug("_handle_conf: %s - %s - %s", dtype, data, ts) self.log.info("Configuration accepted: %s", dtype) return
[ "def", "_handle_conf", "(", "self", ",", "dtype", ",", "data", ",", "ts", ")", ":", "self", ".", "log", ".", "debug", "(", "\"_handle_conf: %s - %s - %s\"", ",", "dtype", ",", "data", ",", "ts", ")", "self", ".", "log", ".", "info", "(", "\"Configurati...
Handles configuration messages. :param dtype: :param data: :param ts: :return:
[ "Handles", "configuration", "messages", "." ]
python
test
BerkeleyAutomation/perception
perception/orthographic_intrinsics.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/orthographic_intrinsics.py#L82-L88
def t(self): """:obj:`numpy.ndarray` : The 3x1 translation matrix for this projection """ t = np.array([self._plane_width / 2, self._plane_height / 2, self._depth_scale / 2]) return t
[ "def", "t", "(", "self", ")", ":", "t", "=", "np", ".", "array", "(", "[", "self", ".", "_plane_width", "/", "2", ",", "self", ".", "_plane_height", "/", "2", ",", "self", ".", "_depth_scale", "/", "2", "]", ")", "return", "t" ]
:obj:`numpy.ndarray` : The 3x1 translation matrix for this projection
[ ":", "obj", ":", "numpy", ".", "ndarray", ":", "The", "3x1", "translation", "matrix", "for", "this", "projection" ]
python
train
tensorpack/tensorpack
tensorpack/dataflow/dataset/caltech101.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/dataflow/dataset/caltech101.py#L15-L22
def maybe_download(url, work_directory): """Download the data from Marlin's website, unless it's already here.""" filename = url.split("/")[-1] filepath = os.path.join(work_directory, filename) if not os.path.exists(filepath): logger.info("Downloading to {}...".format(filepath)) download(url, work_directory) return filepath
[ "def", "maybe_download", "(", "url", ",", "work_directory", ")", ":", "filename", "=", "url", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "filepath", "=", "os", ".", "path", ".", "join", "(", "work_directory", ",", "filename", ")", "if", "no...
Download the data from Marlin's website, unless it's already here.
[ "Download", "the", "data", "from", "Marlin", "s", "website", "unless", "it", "s", "already", "here", "." ]
python
train
fhcrc/seqmagick
seqmagick/fileformat.py
https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/fileformat.py#L45-L58
def from_extension(extension): """ Look up the BioPython file type corresponding with input extension. Look up is case insensitive. """ if not extension.startswith('.'): raise ValueError("Extensions must begin with a period.") try: return EXTENSION_TO_TYPE[extension.lower()] except KeyError: raise UnknownExtensionError( "seqmagick does not know how to handle " + "files with extensions like this: " + extension)
[ "def", "from_extension", "(", "extension", ")", ":", "if", "not", "extension", ".", "startswith", "(", "'.'", ")", ":", "raise", "ValueError", "(", "\"Extensions must begin with a period.\"", ")", "try", ":", "return", "EXTENSION_TO_TYPE", "[", "extension", ".", ...
Look up the BioPython file type corresponding with input extension. Look up is case insensitive.
[ "Look", "up", "the", "BioPython", "file", "type", "corresponding", "with", "input", "extension", "." ]
python
train
acorg/dark-matter
dark/btop.py
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/btop.py#L5-L53
def parseBtop(btopString): """ Parse a BTOP string. The format is described at https://www.ncbi.nlm.nih.gov/books/NBK279682/ @param btopString: A C{str} BTOP sequence. @raise ValueError: If C{btopString} is not valid BTOP. @return: A generator that yields a series of integers and 2-tuples of letters, as found in the BTOP string C{btopString}. """ isdigit = str.isdigit value = None queryLetter = None for offset, char in enumerate(btopString): if isdigit(char): if queryLetter is not None: raise ValueError( 'BTOP string %r has a query letter %r at offset %d with ' 'no corresponding subject letter' % (btopString, queryLetter, offset - 1)) value = int(char) if value is None else value * 10 + int(char) else: if value is not None: yield value value = None queryLetter = char else: if queryLetter is None: queryLetter = char else: if queryLetter == '-' and char == '-': raise ValueError( 'BTOP string %r has two consecutive gaps at ' 'offset %d' % (btopString, offset - 1)) elif queryLetter == char: raise ValueError( 'BTOP string %r has two consecutive identical %r ' 'letters at offset %d' % (btopString, char, offset - 1)) yield (queryLetter, char) queryLetter = None if value is not None: yield value elif queryLetter is not None: raise ValueError( 'BTOP string %r has a trailing query letter %r with ' 'no corresponding subject letter' % (btopString, queryLetter))
[ "def", "parseBtop", "(", "btopString", ")", ":", "isdigit", "=", "str", ".", "isdigit", "value", "=", "None", "queryLetter", "=", "None", "for", "offset", ",", "char", "in", "enumerate", "(", "btopString", ")", ":", "if", "isdigit", "(", "char", ")", "...
Parse a BTOP string. The format is described at https://www.ncbi.nlm.nih.gov/books/NBK279682/ @param btopString: A C{str} BTOP sequence. @raise ValueError: If C{btopString} is not valid BTOP. @return: A generator that yields a series of integers and 2-tuples of letters, as found in the BTOP string C{btopString}.
[ "Parse", "a", "BTOP", "string", "." ]
python
train
astorfi/speechpy
speechpy/processing.py
https://github.com/astorfi/speechpy/blob/9e99ae81398e7584e6234db371d6d7b5e8736192/speechpy/processing.py#L142-L159
def fft_spectrum(frames, fft_points=512): """This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT). Please refer to https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.rfft.html for further details. Args: frames (array): The frame array in which each row is a frame. fft_points (int): The length of FFT. If fft_length is greater than frame_len, the frames will be zero-padded. Returns: array: The fft spectrum. If frames is an num_frames x sample_per_frame matrix, output will be num_frames x FFT_LENGTH. """ SPECTRUM_VECTOR = np.fft.rfft(frames, n=fft_points, axis=-1, norm=None) return np.absolute(SPECTRUM_VECTOR)
[ "def", "fft_spectrum", "(", "frames", ",", "fft_points", "=", "512", ")", ":", "SPECTRUM_VECTOR", "=", "np", ".", "fft", ".", "rfft", "(", "frames", ",", "n", "=", "fft_points", ",", "axis", "=", "-", "1", ",", "norm", "=", "None", ")", "return", "...
This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT). Please refer to https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.rfft.html for further details. Args: frames (array): The frame array in which each row is a frame. fft_points (int): The length of FFT. If fft_length is greater than frame_len, the frames will be zero-padded. Returns: array: The fft spectrum. If frames is an num_frames x sample_per_frame matrix, output will be num_frames x FFT_LENGTH.
[ "This", "function", "computes", "the", "one", "-", "dimensional", "n", "-", "point", "discrete", "Fourier", "Transform", "(", "DFT", ")", "of", "a", "real", "-", "valued", "array", "by", "means", "of", "an", "efficient", "algorithm", "called", "the", "Fast...
python
train
ray-project/ray
python/ray/experimental/features.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/features.py#L172-L186
def flush_evicted_objects_unsafe(): """This removes some critical state from the Redis shards. In a multitenant environment, this will flush metadata for all jobs, which may be undesirable. This removes all of the metadata for objects that have been evicted. This can be used to try to address out-of-memory errors caused by the accumulation of metadata in Redis. However, after running this command, fault tolerance will most likely not work. """ ray.worker.global_worker.check_connected() for shard_index in range(len(ray.global_state.redis_clients)): _flush_evicted_objects_unsafe_shard(shard_index)
[ "def", "flush_evicted_objects_unsafe", "(", ")", ":", "ray", ".", "worker", ".", "global_worker", ".", "check_connected", "(", ")", "for", "shard_index", "in", "range", "(", "len", "(", "ray", ".", "global_state", ".", "redis_clients", ")", ")", ":", "_flush...
This removes some critical state from the Redis shards. In a multitenant environment, this will flush metadata for all jobs, which may be undesirable. This removes all of the metadata for objects that have been evicted. This can be used to try to address out-of-memory errors caused by the accumulation of metadata in Redis. However, after running this command, fault tolerance will most likely not work.
[ "This", "removes", "some", "critical", "state", "from", "the", "Redis", "shards", "." ]
python
train
AguaClara/aguaclara
aguaclara/core/physchem.py
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/core/physchem.py#L76-L84
def viscosity_kinematic(temp): """Return the kinematic viscosity of water at a given temperature. If given units, the function will automatically convert to Kelvin. If not given units, the function will assume Kelvin. """ ut.check_range([temp, ">0", "Temperature in Kelvin"]) return (viscosity_dynamic(temp).magnitude / density_water(temp).magnitude)
[ "def", "viscosity_kinematic", "(", "temp", ")", ":", "ut", ".", "check_range", "(", "[", "temp", ",", "\">0\"", ",", "\"Temperature in Kelvin\"", "]", ")", "return", "(", "viscosity_dynamic", "(", "temp", ")", ".", "magnitude", "/", "density_water", "(", "te...
Return the kinematic viscosity of water at a given temperature. If given units, the function will automatically convert to Kelvin. If not given units, the function will assume Kelvin.
[ "Return", "the", "kinematic", "viscosity", "of", "water", "at", "a", "given", "temperature", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/convert.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/convert.py#L76-L83
def convert_to_int(x: Any, default: int = None) -> int: """ Transforms its input into an integer, or returns ``default``. """ try: return int(x) except (TypeError, ValueError): return default
[ "def", "convert_to_int", "(", "x", ":", "Any", ",", "default", ":", "int", "=", "None", ")", "->", "int", ":", "try", ":", "return", "int", "(", "x", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "return", "default" ]
Transforms its input into an integer, or returns ``default``.
[ "Transforms", "its", "input", "into", "an", "integer", "or", "returns", "default", "." ]
python
train
senaite/senaite.core
bika/lims/browser/referencesample.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/referencesample.py#L468-L523
def folderitem(self, obj, item, index): """Applies new properties to the item (Client) that is currently being rendered as a row in the list :param obj: client to be rendered as a row in the list :param item: dict representation of the client, suitable for the list :param index: current position of the item within the list :type obj: ATContentType/DexterityContentType :type item: dict :type index: int :return: the dict representation of the item :rtype: dict """ # XXX Refactor expiration to a proper place # ---------------------------- 8< ------------------------------------- if item.get("review_state", "current") == "current": # Check expiry date exdate = obj.getExpiryDate() if exdate: expirydate = DT2dt(exdate).replace(tzinfo=None) if (datetime.today() > expirydate): # Trigger expiration self.workflow.doActionFor(obj, "expire") item["review_state"] = "expired" item["obj"] = obj if self.contentFilter.get('review_state', '') \ and item.get('review_state', '') == 'expired': # This item must be omitted from the list return None # ---------------------------- >8 ------------------------------------- url = api.get_url(obj) id = api.get_id(obj) item["ID"] = id item["replace"]["ID"] = get_link(url, value=id) item["DateSampled"] = self.ulocalized_time( obj.getDateSampled(), long_format=True) item["DateReceived"] = self.ulocalized_time(obj.getDateReceived()) item["DateOpened"] = self.ulocalized_time(obj.getDateOpened()) item["ExpiryDate"] = self.ulocalized_time(obj.getExpiryDate()) # Icons after_icons = '' if obj.getBlank(): after_icons += get_image( "blank.png", title=t(_("Blank"))) if obj.getHazardous(): after_icons += get_image( "hazardous.png", title=t(_("Hazardous"))) if after_icons: item["after"]["ID"] = after_icons return item
[ "def", "folderitem", "(", "self", ",", "obj", ",", "item", ",", "index", ")", ":", "# XXX Refactor expiration to a proper place", "# ---------------------------- 8< -------------------------------------", "if", "item", ".", "get", "(", "\"review_state\"", ",", "\"current\""...
Applies new properties to the item (Client) that is currently being rendered as a row in the list :param obj: client to be rendered as a row in the list :param item: dict representation of the client, suitable for the list :param index: current position of the item within the list :type obj: ATContentType/DexterityContentType :type item: dict :type index: int :return: the dict representation of the item :rtype: dict
[ "Applies", "new", "properties", "to", "the", "item", "(", "Client", ")", "that", "is", "currently", "being", "rendered", "as", "a", "row", "in", "the", "list" ]
python
train
tjcsl/ion
intranet/apps/lostfound/views.py
https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/lostfound/views.py#L68-L92
def lostitem_modify_view(request, item_id=None): """Modify a lostitem. id: lostitem id """ if request.method == "POST": lostitem = get_object_or_404(LostItem, id=item_id) form = LostItemForm(request.POST, instance=lostitem) if form.is_valid(): obj = form.save() logger.debug(form.cleaned_data) # SAFE HTML obj.description = safe_html(obj.description) obj.save() messages.success(request, "Successfully modified lost item.") return redirect("lostitem_view", obj.id) else: messages.error(request, "Error adding lost item.") else: lostitem = get_object_or_404(LostItem, id=item_id) form = LostItemForm(instance=lostitem) context = {"form": form, "action": "modify", "id": item_id, "lostitem": lostitem} return render(request, "lostfound/lostitem_form.html", context)
[ "def", "lostitem_modify_view", "(", "request", ",", "item_id", "=", "None", ")", ":", "if", "request", ".", "method", "==", "\"POST\"", ":", "lostitem", "=", "get_object_or_404", "(", "LostItem", ",", "id", "=", "item_id", ")", "form", "=", "LostItemForm", ...
Modify a lostitem. id: lostitem id
[ "Modify", "a", "lostitem", "." ]
python
train
Rapptz/discord.py
discord/channel.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/channel.py#L143-L145
def members(self): """Returns a :class:`list` of :class:`Member` that can see this channel.""" return [m for m in self.guild.members if self.permissions_for(m).read_messages]
[ "def", "members", "(", "self", ")", ":", "return", "[", "m", "for", "m", "in", "self", ".", "guild", ".", "members", "if", "self", ".", "permissions_for", "(", "m", ")", ".", "read_messages", "]" ]
Returns a :class:`list` of :class:`Member` that can see this channel.
[ "Returns", "a", ":", "class", ":", "list", "of", ":", "class", ":", "Member", "that", "can", "see", "this", "channel", "." ]
python
train
fedora-python/pyp2rpm
pyp2rpm/virtualenv.py
https://github.com/fedora-python/pyp2rpm/blob/853eb3d226689a5ccdcdb9358b1a3394fafbd2b5/pyp2rpm/virtualenv.py#L39-L45
def fill(self, path): ''' Scans content of directories ''' self.bindir = set(os.listdir(path + 'bin/')) self.lib_sitepackages = set(os.listdir(glob.glob( path + 'lib/python?.?/site-packages/')[0]))
[ "def", "fill", "(", "self", ",", "path", ")", ":", "self", ".", "bindir", "=", "set", "(", "os", ".", "listdir", "(", "path", "+", "'bin/'", ")", ")", "self", ".", "lib_sitepackages", "=", "set", "(", "os", ".", "listdir", "(", "glob", ".", "glob...
Scans content of directories
[ "Scans", "content", "of", "directories" ]
python
train
bokeh/bokeh
bokeh/core/property/bases.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/core/property/bases.py#L153-L161
def _copy_default(cls, default): ''' Return a copy of the default, or a new value if the default is specified by a function. ''' if not isinstance(default, types.FunctionType): return copy(default) else: return default()
[ "def", "_copy_default", "(", "cls", ",", "default", ")", ":", "if", "not", "isinstance", "(", "default", ",", "types", ".", "FunctionType", ")", ":", "return", "copy", "(", "default", ")", "else", ":", "return", "default", "(", ")" ]
Return a copy of the default, or a new value if the default is specified by a function.
[ "Return", "a", "copy", "of", "the", "default", "or", "a", "new", "value", "if", "the", "default", "is", "specified", "by", "a", "function", "." ]
python
train
carpedm20/fbchat
fbchat/_client.py
https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L426-L435
def isLoggedIn(self): """ Sends a request to Facebook to check the login status :return: True if the client is still logged in :rtype: bool """ # Send a request to the login url, to see if we're directed to the home page r = self._cleanGet(self.req_url.LOGIN, allow_redirects=False) return "Location" in r.headers and "home" in r.headers["Location"]
[ "def", "isLoggedIn", "(", "self", ")", ":", "# Send a request to the login url, to see if we're directed to the home page", "r", "=", "self", ".", "_cleanGet", "(", "self", ".", "req_url", ".", "LOGIN", ",", "allow_redirects", "=", "False", ")", "return", "\"Location\...
Sends a request to Facebook to check the login status :return: True if the client is still logged in :rtype: bool
[ "Sends", "a", "request", "to", "Facebook", "to", "check", "the", "login", "status" ]
python
train
klmitch/turnstile
turnstile/tools.py
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/tools.py#L473-L555
def setup_limits(conf_file, limits_file, do_reload=True, dry_run=False, debug=False): """ Set up or update limits in the Redis database. :param conf_file: Name of the configuration file, for connecting to the Redis database. :param limits_file: Name of the XML file describing the limits to configure. :param do_reload: Controls reloading behavior. If True (the default), a reload command is issued. If False, no reload command is issued. String values result in a reload command of the given load type, and integer or float values result in a reload command of type 'spread' with the given spread interval. :param dry_run: If True, no changes are made to the database. Implies debug=True. :param debug: If True, debugging messages are emitted while loading the limits and updating the database. """ # If dry_run is set, default debug to True if dry_run: debug = True # Connect to the database... conf = config.Config(conf_file=conf_file) db = conf.get_database() limits_key = conf['control'].get('limits_key', 'limits') control_channel = conf['control'].get('channel', 'control') # Parse the limits file limits_tree = etree.parse(limits_file) # Now, we parse the limits XML file lims = [] for idx, lim in enumerate(limits_tree.getroot()): # Skip tags we don't recognize if lim.tag != 'limit': warnings.warn("Unrecognized tag %r in limits file at index %d" % (lim.tag, idx)) continue # Construct the limit and add it to the list of limits try: lims.append(parse_limit_node(db, idx, lim)) except Exception as exc: warnings.warn("Couldn't understand limit at index %d: %s" % (idx, exc)) continue # Now that we have the limits, let's install them if debug: print >>sys.stderr, "Installing the following limits:" for lim in lims: print >>sys.stderr, " %r" % lim if not dry_run: database.limit_update(db, limits_key, lims) # Were we requested to reload the limits? if do_reload is False: return # OK, figure out what kind of reload to do params = [] if do_reload is True: # Nothing to do; use default semantics pass elif (isinstance(do_reload, (int, long, float)) or (isinstance(do_reload, basestring) and do_reload.isdigit())): params = ['spread', do_reload] else: params = [str(do_reload)] # Issue the reload command if debug: cmd = ['reload'] cmd.extend(params) print >>sys.stderr, ("Issuing command: %s" % ' '.join(str(c) for c in cmd)) if not dry_run: database.command(db, control_channel, 'reload', *params)
[ "def", "setup_limits", "(", "conf_file", ",", "limits_file", ",", "do_reload", "=", "True", ",", "dry_run", "=", "False", ",", "debug", "=", "False", ")", ":", "# If dry_run is set, default debug to True", "if", "dry_run", ":", "debug", "=", "True", "# Connect t...
Set up or update limits in the Redis database. :param conf_file: Name of the configuration file, for connecting to the Redis database. :param limits_file: Name of the XML file describing the limits to configure. :param do_reload: Controls reloading behavior. If True (the default), a reload command is issued. If False, no reload command is issued. String values result in a reload command of the given load type, and integer or float values result in a reload command of type 'spread' with the given spread interval. :param dry_run: If True, no changes are made to the database. Implies debug=True. :param debug: If True, debugging messages are emitted while loading the limits and updating the database.
[ "Set", "up", "or", "update", "limits", "in", "the", "Redis", "database", "." ]
python
train
gpennington/PyMarvel
marvel/creator.py
https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/creator.py#L151-L160
def get_events(self, *args, **kwargs): """ Returns a full EventDataWrapper object for this creator. /creators/{creatorId}/events :returns: EventDataWrapper -- A new request to API. Contains full results set. """ from .event import Event, EventDataWrapper return self.get_related_resource(Event, EventDataWrapper, args, kwargs)
[ "def", "get_events", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", ".", "event", "import", "Event", ",", "EventDataWrapper", "return", "self", ".", "get_related_resource", "(", "Event", ",", "EventDataWrapper", ",", "args", ",",...
Returns a full EventDataWrapper object for this creator. /creators/{creatorId}/events :returns: EventDataWrapper -- A new request to API. Contains full results set.
[ "Returns", "a", "full", "EventDataWrapper", "object", "for", "this", "creator", "." ]
python
train
onnx/onnx-mxnet
onnx_mxnet/import_onnx.py
https://github.com/onnx/onnx-mxnet/blob/b602d75c5a01f5ed8f68b11150a06374f058a86b/onnx_mxnet/import_onnx.py#L21-L55
def _convert_operator(op_name, attrs, identity_list=None, convert_map=None): """Convert from onnx operator to mxnet operator. The converter must specify conversions explicitly for incompatible name, and apply handlers to operator attributes. Parameters ---------- op_name : str Operator name, such as Convolution, FullyConnected attrs : dict Dict of operator attributes identity_list : list List of operators that don't require conversion convert_map : dict Dict of name : callable, where name is the op's name that require conversion to mxnet, callable are functions which take attrs and return (new_op_name, new_attrs) Returns ------- (op_name, attrs) Converted (op_name, attrs) for mxnet. """ identity_list = identity_list if identity_list else _identity_list convert_map = convert_map if convert_map else _convert_map if op_name in identity_list: pass elif op_name in convert_map: op_name, attrs = convert_map[op_name](attrs) else: raise NotImplementedError("Operator {} not implemented.".format(op_name)) op = getattr(mx.sym, op_name, None) if not op: raise RuntimeError("Unable to map op_name {} to sym".format(op_name)) return op, attrs
[ "def", "_convert_operator", "(", "op_name", ",", "attrs", ",", "identity_list", "=", "None", ",", "convert_map", "=", "None", ")", ":", "identity_list", "=", "identity_list", "if", "identity_list", "else", "_identity_list", "convert_map", "=", "convert_map", "if",...
Convert from onnx operator to mxnet operator. The converter must specify conversions explicitly for incompatible name, and apply handlers to operator attributes. Parameters ---------- op_name : str Operator name, such as Convolution, FullyConnected attrs : dict Dict of operator attributes identity_list : list List of operators that don't require conversion convert_map : dict Dict of name : callable, where name is the op's name that require conversion to mxnet, callable are functions which take attrs and return (new_op_name, new_attrs) Returns ------- (op_name, attrs) Converted (op_name, attrs) for mxnet.
[ "Convert", "from", "onnx", "operator", "to", "mxnet", "operator", ".", "The", "converter", "must", "specify", "conversions", "explicitly", "for", "incompatible", "name", "and", "apply", "handlers", "to", "operator", "attributes", "." ]
python
train
joowani/binarytree
binarytree/__init__.py
https://github.com/joowani/binarytree/blob/23cb6f1e60e66b96133259031e97ec03e932ba13/binarytree/__init__.py#L1388-L1441
def properties(self): """Return various properties of the binary tree. :return: Binary tree properties. :rtype: dict **Example**: .. doctest:: >>> from binarytree import Node >>> >>> root = Node(1) >>> root.left = Node(2) >>> root.right = Node(3) >>> root.left.left = Node(4) >>> root.left.right = Node(5) >>> props = root.properties >>> >>> props['height'] # equivalent to root.height 2 >>> props['size'] # equivalent to root.size 5 >>> props['max_leaf_depth'] # equivalent to root.max_leaf_depth 2 >>> props['min_leaf_depth'] # equivalent to root.min_leaf_depth 1 >>> props['max_node_value'] # equivalent to root.max_node_value 5 >>> props['min_node_value'] # equivalent to root.min_node_value 1 >>> props['leaf_count'] # equivalent to root.leaf_count 3 >>> props['is_balanced'] # equivalent to root.is_balanced True >>> props['is_bst'] # equivalent to root.is_bst False >>> props['is_complete'] # equivalent to root.is_complete True >>> props['is_max_heap'] # equivalent to root.is_max_heap False >>> props['is_min_heap'] # equivalent to root.is_min_heap True >>> props['is_perfect'] # equivalent to root.is_perfect False >>> props['is_strict'] # equivalent to root.is_strict True """ properties = _get_tree_properties(self) properties.update({ 'is_bst': _is_bst(self), 'is_balanced': _is_balanced(self) >= 0 }) return properties
[ "def", "properties", "(", "self", ")", ":", "properties", "=", "_get_tree_properties", "(", "self", ")", "properties", ".", "update", "(", "{", "'is_bst'", ":", "_is_bst", "(", "self", ")", ",", "'is_balanced'", ":", "_is_balanced", "(", "self", ")", ">=",...
Return various properties of the binary tree. :return: Binary tree properties. :rtype: dict **Example**: .. doctest:: >>> from binarytree import Node >>> >>> root = Node(1) >>> root.left = Node(2) >>> root.right = Node(3) >>> root.left.left = Node(4) >>> root.left.right = Node(5) >>> props = root.properties >>> >>> props['height'] # equivalent to root.height 2 >>> props['size'] # equivalent to root.size 5 >>> props['max_leaf_depth'] # equivalent to root.max_leaf_depth 2 >>> props['min_leaf_depth'] # equivalent to root.min_leaf_depth 1 >>> props['max_node_value'] # equivalent to root.max_node_value 5 >>> props['min_node_value'] # equivalent to root.min_node_value 1 >>> props['leaf_count'] # equivalent to root.leaf_count 3 >>> props['is_balanced'] # equivalent to root.is_balanced True >>> props['is_bst'] # equivalent to root.is_bst False >>> props['is_complete'] # equivalent to root.is_complete True >>> props['is_max_heap'] # equivalent to root.is_max_heap False >>> props['is_min_heap'] # equivalent to root.is_min_heap True >>> props['is_perfect'] # equivalent to root.is_perfect False >>> props['is_strict'] # equivalent to root.is_strict True
[ "Return", "various", "properties", "of", "the", "binary", "tree", "." ]
python
train
redcap-tools/PyCap
redcap/project.py
https://github.com/redcap-tools/PyCap/blob/f44c9b62a4f62675aa609c06608663f37e12097e/redcap/project.py#L429-L501
def import_records(self, to_import, overwrite='normal', format='json', return_format='json', return_content='count', date_format='YMD', force_auto_number=False): """ Import data into the RedCap Project Parameters ---------- to_import : array of dicts, csv/xml string, ``pandas.DataFrame`` :note: If you pass a csv or xml string, you should use the ``format`` parameter appropriately. :note: Keys of the dictionaries should be subset of project's, fields, but this isn't a requirement. If you provide keys that aren't defined fields, the returned response will contain an ``'error'`` key. overwrite : ('normal'), 'overwrite' ``'overwrite'`` will erase values previously stored in the database if not specified in the to_import dictionaries. format : ('json'), 'xml', 'csv' Format of incoming data. By default, to_import will be json-encoded return_format : ('json'), 'csv', 'xml' Response format. By default, response will be json-decoded. return_content : ('count'), 'ids', 'nothing' By default, the response contains a 'count' key with the number of records just imported. By specifying 'ids', a list of ids imported will be returned. 'nothing' will only return the HTTP status code and no message. date_format : ('YMD'), 'DMY', 'MDY' Describes the formatting of dates. By default, date strings are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date strings are formatted as 'MM/DD/YYYY' set this parameter as 'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No other formattings are allowed. force_auto_number : ('False') Enables automatic assignment of record IDs of imported records by REDCap. If this is set to true, and auto-numbering for records is enabled for the project, auto-numbering of imported records will be enabled. Returns ------- response : dict, str response from REDCap API, json-decoded if ``return_format`` == ``'json'`` """ pl = self.__basepl('record') if hasattr(to_import, 'to_csv'): # We'll assume it's a df buf = StringIO() if self.is_longitudinal(): csv_kwargs = {'index_label': [self.def_field, 'redcap_event_name']} else: csv_kwargs = {'index_label': self.def_field} to_import.to_csv(buf, **csv_kwargs) pl['data'] = buf.getvalue() buf.close() format = 'csv' elif format == 'json': pl['data'] = json.dumps(to_import, separators=(',', ':')) else: # don't do anything to csv/xml pl['data'] = to_import pl['overwriteBehavior'] = overwrite pl['format'] = format pl['returnFormat'] = return_format pl['returnContent'] = return_content pl['dateFormat'] = date_format pl['forceAutoNumber'] = force_auto_number response = self._call_api(pl, 'imp_record')[0] if 'error' in response: raise RedcapError(str(response)) return response
[ "def", "import_records", "(", "self", ",", "to_import", ",", "overwrite", "=", "'normal'", ",", "format", "=", "'json'", ",", "return_format", "=", "'json'", ",", "return_content", "=", "'count'", ",", "date_format", "=", "'YMD'", ",", "force_auto_number", "="...
Import data into the RedCap Project Parameters ---------- to_import : array of dicts, csv/xml string, ``pandas.DataFrame`` :note: If you pass a csv or xml string, you should use the ``format`` parameter appropriately. :note: Keys of the dictionaries should be subset of project's, fields, but this isn't a requirement. If you provide keys that aren't defined fields, the returned response will contain an ``'error'`` key. overwrite : ('normal'), 'overwrite' ``'overwrite'`` will erase values previously stored in the database if not specified in the to_import dictionaries. format : ('json'), 'xml', 'csv' Format of incoming data. By default, to_import will be json-encoded return_format : ('json'), 'csv', 'xml' Response format. By default, response will be json-decoded. return_content : ('count'), 'ids', 'nothing' By default, the response contains a 'count' key with the number of records just imported. By specifying 'ids', a list of ids imported will be returned. 'nothing' will only return the HTTP status code and no message. date_format : ('YMD'), 'DMY', 'MDY' Describes the formatting of dates. By default, date strings are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date strings are formatted as 'MM/DD/YYYY' set this parameter as 'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No other formattings are allowed. force_auto_number : ('False') Enables automatic assignment of record IDs of imported records by REDCap. If this is set to true, and auto-numbering for records is enabled for the project, auto-numbering of imported records will be enabled. Returns ------- response : dict, str response from REDCap API, json-decoded if ``return_format`` == ``'json'``
[ "Import", "data", "into", "the", "RedCap", "Project" ]
python
train
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/utils.py
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/utils.py#L18-L78
def row_col_maker(app, fromdocname, all_needs, need_info, need_key, make_ref=False, ref_lookup=False, prefix=''): """ Creates and returns a column. :param app: current sphinx app :param fromdocname: current document :param all_needs: Dictionary of all need objects :param need_info: need_info object, which stores all related need data :param need_key: The key to access the needed data from need_info :param make_ref: If true, creates a reference for the given data in need_key :param ref_lookup: If true, it uses the data to lookup for a related need and uses its data to create the reference :param prefix: string, which is used as prefix for the text output :return: column object (nodes.entry) """ row_col = nodes.entry() para_col = nodes.paragraph() if need_key in need_info and need_info[need_key] is not None: if not isinstance(need_info[need_key], (list, set)): data = [need_info[need_key]] else: data = need_info[need_key] for index, datum in enumerate(data): link_id = datum link_part = None if need_key in ['links', 'back_links']: if '.' in datum: link_id = datum.split('.')[0] link_part = datum.split('.')[1] datum_text = prefix + datum text_col = nodes.Text(datum_text, datum_text) if make_ref or ref_lookup: try: ref_col = nodes.reference("", "") if not ref_lookup: ref_col['refuri'] = app.builder.get_relative_uri(fromdocname, need_info['docname']) ref_col['refuri'] += "#" + datum else: temp_need = all_needs[link_id] ref_col['refuri'] = app.builder.get_relative_uri(fromdocname, temp_need['docname']) ref_col['refuri'] += "#" + temp_need["id"] if link_part is not None: ref_col['refuri'] += '.' + link_part except KeyError: para_col += text_col else: ref_col.append(text_col) para_col += ref_col else: para_col += text_col if index + 1 < len(data): para_col += nodes.emphasis("; ", "; ") row_col += para_col return row_col
[ "def", "row_col_maker", "(", "app", ",", "fromdocname", ",", "all_needs", ",", "need_info", ",", "need_key", ",", "make_ref", "=", "False", ",", "ref_lookup", "=", "False", ",", "prefix", "=", "''", ")", ":", "row_col", "=", "nodes", ".", "entry", "(", ...
Creates and returns a column. :param app: current sphinx app :param fromdocname: current document :param all_needs: Dictionary of all need objects :param need_info: need_info object, which stores all related need data :param need_key: The key to access the needed data from need_info :param make_ref: If true, creates a reference for the given data in need_key :param ref_lookup: If true, it uses the data to lookup for a related need and uses its data to create the reference :param prefix: string, which is used as prefix for the text output :return: column object (nodes.entry)
[ "Creates", "and", "returns", "a", "column", "." ]
python
train
metagriffin/pysyncml
pysyncml/items/file.py
https://github.com/metagriffin/pysyncml/blob/a583fe0dbffa8b24e5a3e151524f84868b2382bb/pysyncml/items/file.py#L152-L195
def dump(self, stream, contentType=None, version=None): ''' Serializes this FileItem to a byte-stream and writes it to the file-like object `stream`. `contentType` and `version` must be one of the supported content-types, and if not specified, will default to ``application/vnd.omads-file``. ''' if contentType is None: contentType = constants.TYPE_OMADS_FILE if ctype.getBaseType(contentType) != constants.TYPE_OMADS_FILE: raise common.InvalidContentType('cannot serialize FileItem to "%s"' % (contentType,)) if version is None: version = '1.2' if version != '1.2': raise common.InvalidContentType('invalid file serialization version "%s"' % (version,)) root = ET.Element('File') if self.name is not None: ET.SubElement(root, 'name').text = self.name # todo: do anything with "parent"?... for attr in ('created', 'modified', 'accessed'): if getattr(self, attr) is None: continue ET.SubElement(root, attr).text = common.ts_iso(getattr(self, attr)) if self.contentType is not None: ET.SubElement(root, 'cttype').text = self.contentType attrs = [attr for attr in ('hidden', 'system', 'archived', 'delete', 'writable', 'readable', 'executable') if getattr(self, attr) is not None] if len(attrs) > 0: xa = ET.SubElement(root, 'attributes') for attr in attrs: ET.SubElement(xa, attr[0]).text = 'true' if getattr(self, attr) else 'false' if self.body is not None: ET.SubElement(root, 'body').text = self.body if self.body is None and self.size is not None: ET.SubElement(root, 'size').text = str(self.size) if len(self.extensions) > 0: xe = ET.SubElement(root, 'Ext') for name, values in self.extensions.items(): ET.SubElement(xe, 'XNam').text = name for value in values: ET.SubElement(xe, 'XVal').text = value ET.ElementTree(root).write(stream) return (constants.TYPE_OMADS_FILE + '+xml', '1.2')
[ "def", "dump", "(", "self", ",", "stream", ",", "contentType", "=", "None", ",", "version", "=", "None", ")", ":", "if", "contentType", "is", "None", ":", "contentType", "=", "constants", ".", "TYPE_OMADS_FILE", "if", "ctype", ".", "getBaseType", "(", "c...
Serializes this FileItem to a byte-stream and writes it to the file-like object `stream`. `contentType` and `version` must be one of the supported content-types, and if not specified, will default to ``application/vnd.omads-file``.
[ "Serializes", "this", "FileItem", "to", "a", "byte", "-", "stream", "and", "writes", "it", "to", "the", "file", "-", "like", "object", "stream", ".", "contentType", "and", "version", "must", "be", "one", "of", "the", "supported", "content", "-", "types", ...
python
valid
sqlalchemy-redshift/sqlalchemy-redshift
sqlalchemy_redshift/dialect.py
https://github.com/sqlalchemy-redshift/sqlalchemy-redshift/blob/b1a24872da0c8151aa60da4524605b6243d8d765/sqlalchemy_redshift/dialect.py#L496-L503
def get_table_names(self, connection, schema=None, **kw): """ Return a list of table names for `schema`. Overrides interface :meth:`~sqlalchemy.engine.interfaces.Dialect.get_table_names`. """ return self._get_table_or_view_names('r', connection, schema, **kw)
[ "def", "get_table_names", "(", "self", ",", "connection", ",", "schema", "=", "None", ",", "*", "*", "kw", ")", ":", "return", "self", ".", "_get_table_or_view_names", "(", "'r'", ",", "connection", ",", "schema", ",", "*", "*", "kw", ")" ]
Return a list of table names for `schema`. Overrides interface :meth:`~sqlalchemy.engine.interfaces.Dialect.get_table_names`.
[ "Return", "a", "list", "of", "table", "names", "for", "schema", "." ]
python
train
uber/tchannel-python
tchannel/tornado/peer.py
https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/peer.py#L631-L644
def get(self, hostport): """Get a Peer for the given destination. A new Peer is added to the peer heap and returned if one does not already exist for the given host-port. Otherwise, the existing Peer is returned. """ assert hostport, "hostport is required" assert isinstance(hostport, basestring), "hostport must be a string" if hostport not in self._peers: self._add(hostport) return self._peers[hostport]
[ "def", "get", "(", "self", ",", "hostport", ")", ":", "assert", "hostport", ",", "\"hostport is required\"", "assert", "isinstance", "(", "hostport", ",", "basestring", ")", ",", "\"hostport must be a string\"", "if", "hostport", "not", "in", "self", ".", "_peer...
Get a Peer for the given destination. A new Peer is added to the peer heap and returned if one does not already exist for the given host-port. Otherwise, the existing Peer is returned.
[ "Get", "a", "Peer", "for", "the", "given", "destination", "." ]
python
train
TheHive-Project/Cortex-Analyzers
analyzers/VirusTotal/virustotal_api.py
https://github.com/TheHive-Project/Cortex-Analyzers/blob/8dae6a8c4cf9af5554ae8c844985c4b44d4bd4bf/analyzers/VirusTotal/virustotal_api.py#L743-L755
def get_all_file_report_pages(self, query): """ Get File Report (All Pages). :param query: a VirusTotal Intelligence search string in accordance with the file search documentation. :return: All JSON responses appended together. """ responses = [] next_page, response = self.get_hashes_from_search(self, query) responses.append(_return_response_and_status_code(response)) while next_page: next_page, response = self.get_hashes_from_search(query, next_page) responses.append(_return_response_and_status_code(response)) return dict(results=responses)
[ "def", "get_all_file_report_pages", "(", "self", ",", "query", ")", ":", "responses", "=", "[", "]", "next_page", ",", "response", "=", "self", ".", "get_hashes_from_search", "(", "self", ",", "query", ")", "responses", ".", "append", "(", "_return_response_an...
Get File Report (All Pages). :param query: a VirusTotal Intelligence search string in accordance with the file search documentation. :return: All JSON responses appended together.
[ "Get", "File", "Report", "(", "All", "Pages", ")", "." ]
python
train
rshk/python-libxdo
xdo/__init__.py
https://github.com/rshk/python-libxdo/blob/84cafa5943b005bc423edd28203a5266b3579ac3/xdo/__init__.py#L209-L217
def wait_for_mouse_move_to(self, dest_x, dest_y): """ Wait for the mouse to move to a location. This function will block until the condition has been satisified. :param dest_x: the X position you expect the mouse to move to :param dest_y: the Y position you expect the mouse to move to """ _libxdo.xdo_wait_for_mouse_move_from(self._xdo, dest_x, dest_y)
[ "def", "wait_for_mouse_move_to", "(", "self", ",", "dest_x", ",", "dest_y", ")", ":", "_libxdo", ".", "xdo_wait_for_mouse_move_from", "(", "self", ".", "_xdo", ",", "dest_x", ",", "dest_y", ")" ]
Wait for the mouse to move to a location. This function will block until the condition has been satisified. :param dest_x: the X position you expect the mouse to move to :param dest_y: the Y position you expect the mouse to move to
[ "Wait", "for", "the", "mouse", "to", "move", "to", "a", "location", ".", "This", "function", "will", "block", "until", "the", "condition", "has", "been", "satisified", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L272-L357
def embed(x, hidden_size, z_size, filter_size, bottleneck_kind="dvq", soft_em=False, num_blocks=2, num_residuals=1, block_v_size=None, means=None, name=None): """Embedding function that takes discrete latent and returns embedding. Args: x: Input to the discretization bottleneck. hidden_size: Dimension of the latent state. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. filter_size: Dimension to project embedding by. Used only if bottleneck_kind is semhash. bottleneck_kind: Kind of discretization bottleneck to use; one of dvq, semhash, gumbel-softmax (Default: dvq). soft_em: If True then it uses a multi-sample version of EM (Default: False). num_blocks: Number of blocks in DVQ (Default: 2). num_residuals: Number of residuals (Default: 1). block_v_size: Number of embedding entries per block (Default: None). means: The embedding table for dvq (Default: None). name: Name for the bottleneck scope. Returns: Continuous embedding to be passed on to the decoder. Raises: ValueError: For unknown or missing arguments. """ with tf.variable_scope(name, default_name="embed", reuse=tf.AUTO_REUSE): if bottleneck_kind == "semhash": c = int_to_bit(x, z_size) h1a = tf.layers.dense(c, filter_size, name="vch1a") h1b = tf.layers.dense(1.0 - c, filter_size, name="vch1b") h1 = h1a + h1b elif bottleneck_kind == "gumbel-softmax": hot = tf.one_hot(x, 2**z_size) h1 = tf.layers.dense(hot, hidden_size, name="dae_dense") elif bottleneck_kind in ["dvq", "gumbel-softmax-dvq"]: if block_v_size is None: raise ValueError("Bottleneck kind is dvq but block_v_size is None.") if soft_em: assert num_residuals == 1 x_hot_flat = tf.reshape(x, shape=[-1, num_blocks, block_v_size]) h1 = tf.matmul(tf.transpose(x_hot_flat, perm=[1, 0, 2]), means[0]) h1 = tf.transpose(h1, perm=[1, 0, 2]) new_shape = common_layers.shape_list(x) new_shape[-1] = hidden_size h1 = tf.reshape(h1, shape=new_shape) else: shape_x = common_layers.shape_list(x) x_flat = tf.reshape(x, [-1, 1]) c = int_to_bit(x_flat, num_bits=z_size, base=2) shape = common_layers.shape_list(c) new_shape = shape new_shape[-1] = num_residuals new_shape.append(num_blocks) new_shape.append(int(z_size / (num_residuals * num_blocks))) c = tf.to_int32(tf.reshape(c, shape=new_shape)) h1_shape = shape_x h1_shape.append(hidden_size) h1 = tf.zeros(dtype=tf.float32, shape=h1_shape) for i in range(num_residuals): c_residual = bit_to_int( c[:, :, i, :, :], num_bits=int(z_size / (num_residuals * num_blocks)), base=2) c_hot = tf.one_hot(c_residual, depth=block_v_size, axis=-1) c_hot_flat = tf.reshape(c_hot, shape=[-1, num_blocks, block_v_size]) h1_residual = tf.matmul( tf.transpose(c_hot_flat, perm=[1, 0, 2]), means[i]) h1_residual = tf.transpose(h1_residual, perm=[1, 0, 2]) h1_residual = tf.reshape(h1_residual, shape=h1_shape) h1 += h1_residual elif bottleneck_kind == "rounding": h1 = x else: raise ValueError("Unknown bottleneck kind.") return h1
[ "def", "embed", "(", "x", ",", "hidden_size", ",", "z_size", ",", "filter_size", ",", "bottleneck_kind", "=", "\"dvq\"", ",", "soft_em", "=", "False", ",", "num_blocks", "=", "2", ",", "num_residuals", "=", "1", ",", "block_v_size", "=", "None", ",", "me...
Embedding function that takes discrete latent and returns embedding. Args: x: Input to the discretization bottleneck. hidden_size: Dimension of the latent state. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. filter_size: Dimension to project embedding by. Used only if bottleneck_kind is semhash. bottleneck_kind: Kind of discretization bottleneck to use; one of dvq, semhash, gumbel-softmax (Default: dvq). soft_em: If True then it uses a multi-sample version of EM (Default: False). num_blocks: Number of blocks in DVQ (Default: 2). num_residuals: Number of residuals (Default: 1). block_v_size: Number of embedding entries per block (Default: None). means: The embedding table for dvq (Default: None). name: Name for the bottleneck scope. Returns: Continuous embedding to be passed on to the decoder. Raises: ValueError: For unknown or missing arguments.
[ "Embedding", "function", "that", "takes", "discrete", "latent", "and", "returns", "embedding", "." ]
python
train
hfurubotten/enturclient
enturclient/api.py
https://github.com/hfurubotten/enturclient/blob/8230f9e9cf5b3a4911e860bc8cbe621231aa5ae4/enturclient/api.py#L158-L161
def _process_place(self, place: dict, is_platform: bool) -> None: """Extract information from place dictionary.""" place_id = place['id'] self.info[place_id] = Place(place, is_platform)
[ "def", "_process_place", "(", "self", ",", "place", ":", "dict", ",", "is_platform", ":", "bool", ")", "->", "None", ":", "place_id", "=", "place", "[", "'id'", "]", "self", ".", "info", "[", "place_id", "]", "=", "Place", "(", "place", ",", "is_plat...
Extract information from place dictionary.
[ "Extract", "information", "from", "place", "dictionary", "." ]
python
train
frascoweb/frasco
frasco/declarative/loaders.py
https://github.com/frascoweb/frasco/blob/ea519d69dd5ca6deaf3650175692ee4a1a02518f/frasco/declarative/loaders.py#L113-L122
def add_view_file_mapping(self, pattern, cls): """Adds a mapping between a file and a view class. Pattern can be an extension in the form .EXT or a filename. """ if isinstance(pattern, str): if not pattern.endswith("*"): _, ext = os.path.splitext(pattern) self.allowed_extensions.add(ext) pattern = re.compile("^" + re.escape(pattern).replace("\\*", ".+") + "$", re.I) self.view_class_files_map.append((pattern, cls))
[ "def", "add_view_file_mapping", "(", "self", ",", "pattern", ",", "cls", ")", ":", "if", "isinstance", "(", "pattern", ",", "str", ")", ":", "if", "not", "pattern", ".", "endswith", "(", "\"*\"", ")", ":", "_", ",", "ext", "=", "os", ".", "path", "...
Adds a mapping between a file and a view class. Pattern can be an extension in the form .EXT or a filename.
[ "Adds", "a", "mapping", "between", "a", "file", "and", "a", "view", "class", ".", "Pattern", "can", "be", "an", "extension", "in", "the", "form", ".", "EXT", "or", "a", "filename", "." ]
python
train
tcalmant/ipopo
pelix/services/configadmin.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/services/configadmin.py#L213-L255
def __properties_update(self, properties): """ Internal update of configuration properties. Does not notifies the ConfigurationAdmin of this modification. :param properties: the new set of properties for this configuration :return: True if the properties have been updated, else False """ if not properties: # Nothing to do return False with self.__lock: # Make a copy of the properties properties = properties.copy() # Override properties properties[services.CONFIG_PROP_PID] = self.__pid if self.__location: properties[ services.CONFIG_PROP_BUNDLE_LOCATION ] = self.__location if self.__factory_pid: properties[ services.CONFIG_PROP_FACTORY_PID ] = self.__factory_pid # See if new properties are different if properties == self.__properties: return False # Store the copy (before storing data) self.__properties = properties self.__updated = True # Store the data # it will cause FileInstall to update this configuration again, but # this will ignored because self.__properties has already been # saved self.__persistence.store(self.__pid, properties) return True
[ "def", "__properties_update", "(", "self", ",", "properties", ")", ":", "if", "not", "properties", ":", "# Nothing to do", "return", "False", "with", "self", ".", "__lock", ":", "# Make a copy of the properties", "properties", "=", "properties", ".", "copy", "(", ...
Internal update of configuration properties. Does not notifies the ConfigurationAdmin of this modification. :param properties: the new set of properties for this configuration :return: True if the properties have been updated, else False
[ "Internal", "update", "of", "configuration", "properties", ".", "Does", "not", "notifies", "the", "ConfigurationAdmin", "of", "this", "modification", "." ]
python
train
mwouts/jupytext
jupytext/cli.py
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/cli.py#L411-L447
def load_paired_notebook(notebook, fmt, nb_file, log): """Update the notebook with the inputs and outputs of the most recent paired files""" formats = notebook.metadata.get('jupytext', {}).get('formats') if not formats: raise ValueError("'{}' is not a paired notebook".format(nb_file)) max_mtime_inputs = None max_mtime_outputs = None latest_inputs = None latest_outputs = None for alt_path, alt_fmt in paired_paths(nb_file, fmt, formats): if not os.path.isfile(alt_path): continue info = os.lstat(alt_path) if not max_mtime_inputs or info.st_mtime > max_mtime_inputs: max_mtime_inputs = info.st_mtime latest_inputs, input_fmt = alt_path, alt_fmt if alt_path.endswith('.ipynb'): if not max_mtime_outputs or info.st_mtime > max_mtime_outputs: max_mtime_outputs = info.st_mtime latest_outputs = alt_path if latest_outputs and latest_outputs != latest_inputs: log("[jupytext] Loading input cells from '{}'".format(latest_inputs)) inputs = notebook if latest_inputs == nb_file else readf(latest_inputs, input_fmt) check_file_version(inputs, latest_inputs, latest_outputs) log("[jupytext] Loading output cells from '{}'".format(latest_outputs)) outputs = notebook if latest_outputs == nb_file else readf(latest_outputs) combine_inputs_with_outputs(inputs, outputs, input_fmt) return inputs, latest_inputs, latest_outputs log("[jupytext] Loading notebook from '{}'".format(latest_inputs)) if latest_inputs != nb_file: notebook = readf(latest_inputs, input_fmt) return notebook, latest_inputs, latest_outputs
[ "def", "load_paired_notebook", "(", "notebook", ",", "fmt", ",", "nb_file", ",", "log", ")", ":", "formats", "=", "notebook", ".", "metadata", ".", "get", "(", "'jupytext'", ",", "{", "}", ")", ".", "get", "(", "'formats'", ")", "if", "not", "formats",...
Update the notebook with the inputs and outputs of the most recent paired files
[ "Update", "the", "notebook", "with", "the", "inputs", "and", "outputs", "of", "the", "most", "recent", "paired", "files" ]
python
train
pazz/alot
alot/settings/manager.py
https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/settings/manager.py#L64-L128
def read_config(self, path): """ parse alot's config file :param path: path to alot's config file :type path: str """ spec = os.path.join(DEFAULTSPATH, 'alot.rc.spec') newconfig = read_config(path, spec, report_extra=True, checks={ 'mail_container': checks.mail_container, 'force_list': checks.force_list, 'align': checks.align_mode, 'attrtriple': checks.attr_triple, 'gpg_key_hint': checks.gpg_key}) self._config.merge(newconfig) self._config.walk(self._expand_config_values) hooks_path = os.path.expanduser(self._config.get('hooksfile')) try: spec = importlib.util.spec_from_file_location('hooks', hooks_path) self.hooks = importlib.util.module_from_spec(spec) spec.loader.exec_module(self.hooks) except: logging.exception('unable to load hooks file:%s', hooks_path) if 'bindings' in newconfig: self._update_bindings(newconfig['bindings']) tempdir = self._config.get('template_dir') logging.debug('template directory: `%s`' % tempdir) # themes themestring = newconfig['theme'] themes_dir = self._config.get('themes_dir') logging.debug('themes directory: `%s`' % themes_dir) # if config contains theme string use that data_dirs = [os.path.join(d, 'alot/themes') for d in DATA_DIRS] if themestring: # This is a python for/else loop # https://docs.python.org/3/reference/compound_stmts.html#for # # tl/dr; If the loop loads a theme it breaks. If it doesn't break, # then it raises a ConfigError. for dir_ in itertools.chain([themes_dir], data_dirs): theme_path = os.path.join(dir_, themestring) if not os.path.exists(os.path.expanduser(theme_path)): logging.warning('Theme `%s` does not exist.', theme_path) else: try: self._theme = Theme(theme_path) except ConfigError as e: raise ConfigError('Theme file `%s` failed ' 'validation:\n%s' % (theme_path, e)) else: break else: raise ConfigError('Could not find theme {}, see log for more ' 'information'.format(themestring)) # if still no theme is set, resort to default if self._theme is None: theme_path = os.path.join(DEFAULTSPATH, 'default.theme') self._theme = Theme(theme_path) self._accounts = self._parse_accounts(self._config) self._accountmap = self._account_table(self._accounts)
[ "def", "read_config", "(", "self", ",", "path", ")", ":", "spec", "=", "os", ".", "path", ".", "join", "(", "DEFAULTSPATH", ",", "'alot.rc.spec'", ")", "newconfig", "=", "read_config", "(", "path", ",", "spec", ",", "report_extra", "=", "True", ",", "c...
parse alot's config file :param path: path to alot's config file :type path: str
[ "parse", "alot", "s", "config", "file", ":", "param", "path", ":", "path", "to", "alot", "s", "config", "file", ":", "type", "path", ":", "str" ]
python
train
lark-parser/lark
examples/standalone/json_parser.py
https://github.com/lark-parser/lark/blob/a798dec77907e74520dd7e90c7b6a4acc680633a/examples/standalone/json_parser.py#L539-L545
def v_args(inline=False, meta=False, tree=False): "A convenience decorator factory, for modifying the behavior of user-supplied visitor methods" if [tree, meta, inline].count(True) > 1: raise ValueError("Visitor functions can either accept tree, or meta, or be inlined. These cannot be combined.") def _visitor_args_dec(obj): return _apply_decorator(obj, _visitor_args_func_dec, inline=inline, meta=meta, whole_tree=tree) return _visitor_args_dec
[ "def", "v_args", "(", "inline", "=", "False", ",", "meta", "=", "False", ",", "tree", "=", "False", ")", ":", "if", "[", "tree", ",", "meta", ",", "inline", "]", ".", "count", "(", "True", ")", ">", "1", ":", "raise", "ValueError", "(", "\"Visito...
A convenience decorator factory, for modifying the behavior of user-supplied visitor methods
[ "A", "convenience", "decorator", "factory", "for", "modifying", "the", "behavior", "of", "user", "-", "supplied", "visitor", "methods" ]
python
train
inasafe/inasafe
safe/definitions/earthquake.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/definitions/earthquake.py#L249-L272
def normal_cdf(x, mu=0, sigma=1): """Cumulative Normal Distribution Function. :param x: scalar or array of real numbers. :type x: numpy.ndarray, float :param mu: Mean value. Default 0. :type mu: float, numpy.ndarray :param sigma: Standard deviation. Default 1. :type sigma: float :returns: An approximation of the cdf of the normal. :rtype: numpy.ndarray Note: CDF of the normal distribution is defined as \frac12 [1 + erf(\frac{x - \mu}{\sigma \sqrt{2}})], x \in \R Source: http://en.wikipedia.org/wiki/Normal_distribution """ arg = (x - mu) / (sigma * numpy.sqrt(2)) res = (1 + erf(arg)) / 2 return res
[ "def", "normal_cdf", "(", "x", ",", "mu", "=", "0", ",", "sigma", "=", "1", ")", ":", "arg", "=", "(", "x", "-", "mu", ")", "/", "(", "sigma", "*", "numpy", ".", "sqrt", "(", "2", ")", ")", "res", "=", "(", "1", "+", "erf", "(", "arg", ...
Cumulative Normal Distribution Function. :param x: scalar or array of real numbers. :type x: numpy.ndarray, float :param mu: Mean value. Default 0. :type mu: float, numpy.ndarray :param sigma: Standard deviation. Default 1. :type sigma: float :returns: An approximation of the cdf of the normal. :rtype: numpy.ndarray Note: CDF of the normal distribution is defined as \frac12 [1 + erf(\frac{x - \mu}{\sigma \sqrt{2}})], x \in \R Source: http://en.wikipedia.org/wiki/Normal_distribution
[ "Cumulative", "Normal", "Distribution", "Function", "." ]
python
train
MisterY/gnucash-portfolio
gnucash_portfolio/securitiesaggregate.py
https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L428-L433
def get_aggregate(self, security: Commodity) -> SecurityAggregate: """ Returns the aggregate for the entity """ assert security is not None assert isinstance(security, Commodity) return SecurityAggregate(self.book, security)
[ "def", "get_aggregate", "(", "self", ",", "security", ":", "Commodity", ")", "->", "SecurityAggregate", ":", "assert", "security", "is", "not", "None", "assert", "isinstance", "(", "security", ",", "Commodity", ")", "return", "SecurityAggregate", "(", "self", ...
Returns the aggregate for the entity
[ "Returns", "the", "aggregate", "for", "the", "entity" ]
python
train
bitesofcode/projexui
projexui/widgets/xorbtreewidget/xorbtreewidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbtreewidget/xorbtreewidget.py#L555-L570
def _updateColumnValues(self, index, hidden): """ Updates the column values for the inputed column. :param column | <int> state | <bool> """ if hidden or not self.isVisible(): return column = self.columnOf(index) if not column in self._loadedColumns: self._loadedColumns.add(column) records = self.collectRecords() self.loadColumnsRequested.emit(records, column)
[ "def", "_updateColumnValues", "(", "self", ",", "index", ",", "hidden", ")", ":", "if", "hidden", "or", "not", "self", ".", "isVisible", "(", ")", ":", "return", "column", "=", "self", ".", "columnOf", "(", "index", ")", "if", "not", "column", "in", ...
Updates the column values for the inputed column. :param column | <int> state | <bool>
[ "Updates", "the", "column", "values", "for", "the", "inputed", "column", ".", ":", "param", "column", "|", "<int", ">", "state", "|", "<bool", ">" ]
python
train
inonit/drf-haystack
drf_haystack/generics.py
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/generics.py#L58-L95
def get_object(self): """ Fetch a single document from the data store according to whatever unique identifier is available for that document in the SearchIndex. In cases where the view has multiple ``index_models``, add a ``model`` query parameter containing a single `app_label.model` name to the request in order to override which model to include in the SearchQuerySet. Example: /api/v1/search/42/?model=myapp.person """ queryset = self.get_queryset() if "model" in self.request.query_params: try: app_label, model = map(six.text_type.lower, self.request.query_params["model"].split(".", 1)) ctype = ContentType.objects.get(app_label=app_label, model=model) queryset = self.get_queryset(index_models=[ctype.model_class()]) except (ValueError, ContentType.DoesNotExist): raise Http404("Could not find any models matching '%s'. Make sure to use a valid " "'app_label.model' name for the 'model' query parameter." % self.request.query_params["model"]) lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field if lookup_url_kwarg not in self.kwargs: raise AttributeError( "Expected view %s to be called with a URL keyword argument " "named '%s'. Fix your URL conf, or set the `.lookup_field` " "attribute on the view correctly." % (self.__class__.__name__, lookup_url_kwarg) ) queryset = queryset.filter(self.query_object((self.document_uid_field, self.kwargs[lookup_url_kwarg]))) count = queryset.count() if count == 1: return queryset[0] elif count > 1: raise Http404("Multiple results matches the given query. Expected a single result.") raise Http404("No result matches the given query.")
[ "def", "get_object", "(", "self", ")", ":", "queryset", "=", "self", ".", "get_queryset", "(", ")", "if", "\"model\"", "in", "self", ".", "request", ".", "query_params", ":", "try", ":", "app_label", ",", "model", "=", "map", "(", "six", ".", "text_typ...
Fetch a single document from the data store according to whatever unique identifier is available for that document in the SearchIndex. In cases where the view has multiple ``index_models``, add a ``model`` query parameter containing a single `app_label.model` name to the request in order to override which model to include in the SearchQuerySet. Example: /api/v1/search/42/?model=myapp.person
[ "Fetch", "a", "single", "document", "from", "the", "data", "store", "according", "to", "whatever", "unique", "identifier", "is", "available", "for", "that", "document", "in", "the", "SearchIndex", "." ]
python
train
ultrabug/py3status
py3status/modules/net_rate.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/modules/net_rate.py#L221-L226
def _format_value(self, value): """ Return formatted string """ value, unit = self.py3.format_units(value, unit=self.unit, si=self.si_units) return self.py3.safe_format(self.format_value, {"value": value, "unit": unit})
[ "def", "_format_value", "(", "self", ",", "value", ")", ":", "value", ",", "unit", "=", "self", ".", "py3", ".", "format_units", "(", "value", ",", "unit", "=", "self", ".", "unit", ",", "si", "=", "self", ".", "si_units", ")", "return", "self", "....
Return formatted string
[ "Return", "formatted", "string" ]
python
train
ajenhl/tacl
tacl/catalogue.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/catalogue.py#L60-L79
def load(self, path): """Loads the data from `path` into the catalogue. :param path: path to catalogue file :type path: `str` """ fieldnames = ['work', 'label'] with open(path, 'r', encoding='utf-8', newline='') as fh: reader = csv.DictReader(fh, delimiter=' ', fieldnames=fieldnames, skipinitialspace=True) for row in reader: work, label = row['work'], row['label'] if label: if label not in self._ordered_labels: self._ordered_labels.append(label) if work in self: raise MalformedCatalogueError( CATALOGUE_WORK_RELABELLED_ERROR.format(work)) self[work] = label
[ "def", "load", "(", "self", ",", "path", ")", ":", "fieldnames", "=", "[", "'work'", ",", "'label'", "]", "with", "open", "(", "path", ",", "'r'", ",", "encoding", "=", "'utf-8'", ",", "newline", "=", "''", ")", "as", "fh", ":", "reader", "=", "c...
Loads the data from `path` into the catalogue. :param path: path to catalogue file :type path: `str`
[ "Loads", "the", "data", "from", "path", "into", "the", "catalogue", "." ]
python
train
treycucco/pyebnf
pyebnf/primitive.py
https://github.com/treycucco/pyebnf/blob/3634ddabbe5d73508bcc20f4a591f86a46634e1d/pyebnf/primitive.py#L466-L472
def _count_leading_whitespace(text): """Returns the number of characters at the beginning of text that are whitespace.""" idx = 0 for idx, char in enumerate(text): if not char.isspace(): return idx return idx + 1
[ "def", "_count_leading_whitespace", "(", "text", ")", ":", "idx", "=", "0", "for", "idx", ",", "char", "in", "enumerate", "(", "text", ")", ":", "if", "not", "char", ".", "isspace", "(", ")", ":", "return", "idx", "return", "idx", "+", "1" ]
Returns the number of characters at the beginning of text that are whitespace.
[ "Returns", "the", "number", "of", "characters", "at", "the", "beginning", "of", "text", "that", "are", "whitespace", "." ]
python
test
gamechanger/mongothon
mongothon/events.py
https://github.com/gamechanger/mongothon/blob/5305bdae8e38d09bfe7881f1edc99ac0a2e6b96b/mongothon/events.py#L41-L49
def deregister_all(self, *events): """ Deregisters all handler functions, or those registered against the given event(s). """ if events: for event in events: self._handler_dict[event] = [] else: self._handler_dict = {}
[ "def", "deregister_all", "(", "self", ",", "*", "events", ")", ":", "if", "events", ":", "for", "event", "in", "events", ":", "self", ".", "_handler_dict", "[", "event", "]", "=", "[", "]", "else", ":", "self", ".", "_handler_dict", "=", "{", "}" ]
Deregisters all handler functions, or those registered against the given event(s).
[ "Deregisters", "all", "handler", "functions", "or", "those", "registered", "against", "the", "given", "event", "(", "s", ")", "." ]
python
train
jpablo128/simplystatic
bin/s2.py
https://github.com/jpablo128/simplystatic/blob/91ac579c8f34fa240bef9b87adb0116c6b40b24d/bin/s2.py#L156-L166
def do_rename(argdict): '''Rename a page.''' site = make_site_obj(argdict) slug = argdict['slug'] newtitle = argdict['newtitle'] try: site.rename_page(slug, newtitle) print "Renamed page." except ValueError: # pragma: no cover print "Cannot rename. A page with the given slug does not exist." sys.exit()
[ "def", "do_rename", "(", "argdict", ")", ":", "site", "=", "make_site_obj", "(", "argdict", ")", "slug", "=", "argdict", "[", "'slug'", "]", "newtitle", "=", "argdict", "[", "'newtitle'", "]", "try", ":", "site", ".", "rename_page", "(", "slug", ",", "...
Rename a page.
[ "Rename", "a", "page", "." ]
python
train
IndicoDataSolutions/IndicoIo-python
indicoio/custom/custom.py
https://github.com/IndicoDataSolutions/IndicoIo-python/blob/6f262a23f09d76fede63d1ccb87f9f7cf2cfc8aa/indicoio/custom/custom.py#L433-L440
def vectorize(data, cloud=None, api_key=None, version=None, **kwargs): """ Support for raw features from the custom collections API """ batch = detect_batch(data) data = data_preprocess(data, batch=batch) url_params = {"batch": batch, "api_key": api_key, "version": version, "method": "vectorize"} return api_handler(data, cloud=cloud, api="custom", url_params=url_params, **kwargs)
[ "def", "vectorize", "(", "data", ",", "cloud", "=", "None", ",", "api_key", "=", "None", ",", "version", "=", "None", ",", "*", "*", "kwargs", ")", ":", "batch", "=", "detect_batch", "(", "data", ")", "data", "=", "data_preprocess", "(", "data", ",",...
Support for raw features from the custom collections API
[ "Support", "for", "raw", "features", "from", "the", "custom", "collections", "API" ]
python
train
Richienb/quilt
src/quilt_lang/__init__.py
https://github.com/Richienb/quilt/blob/4a659cac66f5286ad046d54a12fd850be5606643/src/quilt_lang/__init__.py#L1435-L1451
def factors(number): """ Find all of the factors of a number and return it as a list. :type number: integer :param number: The number to find the factors for. """ if not (isinstance(number, int)): raise TypeError( "Incorrect number type provided. Only integers are accepted.") factors = [] for i in range(1, number + 1): if number % i == 0: factors.append(i) return factors
[ "def", "factors", "(", "number", ")", ":", "if", "not", "(", "isinstance", "(", "number", ",", "int", ")", ")", ":", "raise", "TypeError", "(", "\"Incorrect number type provided. Only integers are accepted.\"", ")", "factors", "=", "[", "]", "for", "i", "in", ...
Find all of the factors of a number and return it as a list. :type number: integer :param number: The number to find the factors for.
[ "Find", "all", "of", "the", "factors", "of", "a", "number", "and", "return", "it", "as", "a", "list", "." ]
python
train
westurner/pyrpo
pyrpo/pyrpo.py
https://github.com/westurner/pyrpo/blob/2a910af055dc405b761571a52ef87842397ddadf/pyrpo/pyrpo.py#L1137-L1154
def current_id(self): """ Determine the current revision identifier for the working directory of this Repository Returns: str: git HEAD revision identifier (``git rev-parse --short HEAD``) """ try: cmd = ['git', 'rev-parse', '--short', 'HEAD'] return self.sh(cmd, shell=False, ignore_error=True).rstrip() except subprocess.CalledProcessError as e: log.exception(e) #if e.returncode == 128 # bare repo return None
[ "def", "current_id", "(", "self", ")", ":", "try", ":", "cmd", "=", "[", "'git'", ",", "'rev-parse'", ",", "'--short'", ",", "'HEAD'", "]", "return", "self", ".", "sh", "(", "cmd", ",", "shell", "=", "False", ",", "ignore_error", "=", "True", ")", ...
Determine the current revision identifier for the working directory of this Repository Returns: str: git HEAD revision identifier (``git rev-parse --short HEAD``)
[ "Determine", "the", "current", "revision", "identifier", "for", "the", "working", "directory", "of", "this", "Repository" ]
python
train
fstab50/metal
metal/script_utils.py
https://github.com/fstab50/metal/blob/0488bbdd516a508909267cc44191f632e21156ba/metal/script_utils.py#L364-L388
def json_integrity_multilevel(d1, d2): """ still under development """ keys = [x for x in d2] for key in keys: d1_keys = set(d1.keys()) d2_keys = set(d2.keys()) intersect_keys = d1_keys.intersection(d2_keys) added = d1_keys - d2_keys removed = d2_keys - d1_keys modified = {o : (d1[o], d2[o]) for o in intersect_keys if d1[o] != d2[o]} same = set(o for o in intersect_keys if d1[o] == d2[o]) if added == removed == set(): d1_values = [x for x in d1.values()][0] print('d1_values: ' + str(d1_values)) d2_values = [x for x in d2.values()][0] print('d2_values: ' + str(d2_values)) length = len(d2_values) print('length = %d' % length) pdb.set_trace() if length > 1: d1 = d1_values.items() d2 = d2_values.items() else: return False return True
[ "def", "json_integrity_multilevel", "(", "d1", ",", "d2", ")", ":", "keys", "=", "[", "x", "for", "x", "in", "d2", "]", "for", "key", "in", "keys", ":", "d1_keys", "=", "set", "(", "d1", ".", "keys", "(", ")", ")", "d2_keys", "=", "set", "(", "...
still under development
[ "still", "under", "development" ]
python
train
CamDavidsonPilon/lifelines
lifelines/utils/btree.py
https://github.com/CamDavidsonPilon/lifelines/blob/bdf6be6f1d10eea4c46365ee0ee6a47d8c30edf8/lifelines/utils/btree.py#L66-L79
def insert(self, value): """Insert an occurrence of `value` into the btree.""" i = 0 n = len(self._tree) while i < n: cur = self._tree[i] self._counts[i] += 1 if value < cur: i = 2 * i + 1 elif value > cur: i = 2 * i + 2 else: return raise ValueError("Value %s not contained in tree." "Also, the counts are now messed up." % value)
[ "def", "insert", "(", "self", ",", "value", ")", ":", "i", "=", "0", "n", "=", "len", "(", "self", ".", "_tree", ")", "while", "i", "<", "n", ":", "cur", "=", "self", ".", "_tree", "[", "i", "]", "self", ".", "_counts", "[", "i", "]", "+=",...
Insert an occurrence of `value` into the btree.
[ "Insert", "an", "occurrence", "of", "value", "into", "the", "btree", "." ]
python
train
ShenggaoZhu/midict
midict/__init__.py
https://github.com/ShenggaoZhu/midict/blob/2fad2edcfb753035b443a70fe15852affae1b5bb/midict/__init__.py#L152-L185
def _key_to_index(keys, key, single_only=False): 'convert ``key`` of various types to int or list of int' if isinstance(key, int): # validate the int index try: keys[key] except IndexError: raise KeyError('Index out of range of keys: %s' % (key,)) if key < 0: key += len(keys) # always positive index return key # keys = d.keys() if not single_only: if isinstance(key, (tuple, list)): return [_key_to_index_single(keys, k) for k in key] if isinstance(key, slice): start, stop, step = key.start, key.stop, key.step try: MI_check_index_name(start) start = keys.index(start) except TypeError: pass try: MI_check_index_name(stop) stop = keys.index(stop) except TypeError: pass # return slice(start, stop, step) args = slice(start, stop, step).indices(len(keys)) return force_list(range(*args)) # list of indices try: return keys.index(key) except ValueError: # not IndexError raise KeyError('Key not found: %s' % (key,))
[ "def", "_key_to_index", "(", "keys", ",", "key", ",", "single_only", "=", "False", ")", ":", "if", "isinstance", "(", "key", ",", "int", ")", ":", "# validate the int index", "try", ":", "keys", "[", "key", "]", "except", "IndexError", ":", "raise", "Key...
convert ``key`` of various types to int or list of int
[ "convert", "key", "of", "various", "types", "to", "int", "or", "list", "of", "int" ]
python
train
saltstack/salt
salt/modules/boto_iam.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_iam.py#L86-L105
def instance_profile_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if an instance profile exists. CLI Example: .. code-block:: bash salt myminion boto_iam.instance_profile_exists myiprofile ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: # Boto weirdly returns an exception here if an instance profile doesn't # exist. conn.get_instance_profile(name) return True except boto.exception.BotoServerError: return False
[ "def", "instance_profile_exists", "(", "name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",",...
Check to see if an instance profile exists. CLI Example: .. code-block:: bash salt myminion boto_iam.instance_profile_exists myiprofile
[ "Check", "to", "see", "if", "an", "instance", "profile", "exists", "." ]
python
train
CalebBell/ht
ht/insulation.py
https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/insulation.py#L413-L445
def ASHRAE_k(ID): r'''Returns thermal conductivity of a building or insulating material from a table in [1]_. Thermal conductivity is independent of temperature here. Many entries in the table are listed for varying densities, but the appropriate ID from the table must be selected to account for that. Parameters ---------- ID : str ID corresponding to a material in the dictionary `ASHRAE` Returns ------- k : float Thermal conductivity of the material, [W/m/K] Examples -------- >>> ASHRAE_k(ID='Mineral fiber') 0.036 References ---------- .. [1] ASHRAE Handbook: Fundamentals. American Society of Heating, Refrigerating and Air-Conditioning Engineers, Incorporated, 2013. ''' values = ASHRAE[ID] if values[2]: return values[2] else: R = values[3] t = values[4]/1000. # mm to m return R_to_k(R, t)
[ "def", "ASHRAE_k", "(", "ID", ")", ":", "values", "=", "ASHRAE", "[", "ID", "]", "if", "values", "[", "2", "]", ":", "return", "values", "[", "2", "]", "else", ":", "R", "=", "values", "[", "3", "]", "t", "=", "values", "[", "4", "]", "/", ...
r'''Returns thermal conductivity of a building or insulating material from a table in [1]_. Thermal conductivity is independent of temperature here. Many entries in the table are listed for varying densities, but the appropriate ID from the table must be selected to account for that. Parameters ---------- ID : str ID corresponding to a material in the dictionary `ASHRAE` Returns ------- k : float Thermal conductivity of the material, [W/m/K] Examples -------- >>> ASHRAE_k(ID='Mineral fiber') 0.036 References ---------- .. [1] ASHRAE Handbook: Fundamentals. American Society of Heating, Refrigerating and Air-Conditioning Engineers, Incorporated, 2013.
[ "r", "Returns", "thermal", "conductivity", "of", "a", "building", "or", "insulating", "material", "from", "a", "table", "in", "[", "1", "]", "_", ".", "Thermal", "conductivity", "is", "independent", "of", "temperature", "here", ".", "Many", "entries", "in", ...
python
train
hyperledger/indy-sdk
wrappers/python/indy/crypto.py
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/wrappers/python/indy/crypto.py#L355-L400
async def anon_decrypt(wallet_handle: int, recipient_vk: str, encrypted_msg: bytes) -> bytes: """ Decrypts a message by anonymous-encryption scheme. Sealed boxes are designed to anonymously send messages to a Recipient given its public key. Only the Recipient can decrypt these messages, using its private key. While the Recipient can verify the integrity of the message, it cannot verify the identity of the Sender. Note to use DID keys with this function you can call key_for_did to get key id (verkey) for specific DID. Note: use unpack_message function for A2A goals. :param wallet_handle: wallet handler (created by open_wallet). :param recipient_vk: id (verkey) of my key. The key must be created by calling indy_create_key or create_and_store_my_did :param encrypted_msg: encrypted message :return: decrypted message as an array of bytes """ logger = logging.getLogger(__name__) logger.debug("anon_decrypt: >>> wallet_handle: %r, recipient_vk: %r, encrypted_msg: %r", wallet_handle, recipient_vk, encrypted_msg) def transform_cb(arr_ptr: POINTER(c_uint8), arr_len: c_uint32): return bytes(arr_ptr[:arr_len]), if not hasattr(anon_decrypt, "cb"): logger.debug("anon_decrypt: Creating callback") anon_decrypt.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, POINTER(c_uint8), c_uint32), transform_cb) c_wallet_handle = c_int32(wallet_handle) c_recipient_vk = c_char_p(recipient_vk.encode('utf-8')) c_encrypted_msg_len = c_uint32(len(encrypted_msg)) decrypted_message = await do_call('indy_crypto_anon_decrypt', c_wallet_handle, c_recipient_vk, encrypted_msg, c_encrypted_msg_len, anon_decrypt.cb) logger.debug("crypto_box_seal_open: <<< res: %r", decrypted_message) return decrypted_message
[ "async", "def", "anon_decrypt", "(", "wallet_handle", ":", "int", ",", "recipient_vk", ":", "str", ",", "encrypted_msg", ":", "bytes", ")", "->", "bytes", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", ...
Decrypts a message by anonymous-encryption scheme. Sealed boxes are designed to anonymously send messages to a Recipient given its public key. Only the Recipient can decrypt these messages, using its private key. While the Recipient can verify the integrity of the message, it cannot verify the identity of the Sender. Note to use DID keys with this function you can call key_for_did to get key id (verkey) for specific DID. Note: use unpack_message function for A2A goals. :param wallet_handle: wallet handler (created by open_wallet). :param recipient_vk: id (verkey) of my key. The key must be created by calling indy_create_key or create_and_store_my_did :param encrypted_msg: encrypted message :return: decrypted message as an array of bytes
[ "Decrypts", "a", "message", "by", "anonymous", "-", "encryption", "scheme", "." ]
python
train
bwohlberg/sporco
sporco/admm/tvl2.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/tvl2.py#L240-L247
def ystep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`. """ self.Y = np.asarray(sp.prox_l2( self.AX + self.U, (self.lmbda/self.rho)*self.Wtvna, axis=self.saxes), dtype=self.dtype)
[ "def", "ystep", "(", "self", ")", ":", "self", ".", "Y", "=", "np", ".", "asarray", "(", "sp", ".", "prox_l2", "(", "self", ".", "AX", "+", "self", ".", "U", ",", "(", "self", ".", "lmbda", "/", "self", ".", "rho", ")", "*", "self", ".", "W...
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`.
[ "r", "Minimise", "Augmented", "Lagrangian", "with", "respect", "to", ":", "math", ":", "\\", "mathbf", "{", "y", "}", "." ]
python
train
mitsei/dlkit
dlkit/json_/resource/simple_agent.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/simple_agent.py#L61-L66
def get_display_name(self): """Creates a display name""" return DisplayText(text=self.id_.get_identifier(), language_type=DEFAULT_LANGUAGE_TYPE, script_type=DEFAULT_SCRIPT_TYPE, format_type=DEFAULT_FORMAT_TYPE,)
[ "def", "get_display_name", "(", "self", ")", ":", "return", "DisplayText", "(", "text", "=", "self", ".", "id_", ".", "get_identifier", "(", ")", ",", "language_type", "=", "DEFAULT_LANGUAGE_TYPE", ",", "script_type", "=", "DEFAULT_SCRIPT_TYPE", ",", "format_typ...
Creates a display name
[ "Creates", "a", "display", "name" ]
python
train
shoebot/shoebot
lib/web/BeautifulSoup.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/web/BeautifulSoup.py#L1524-L1569
def start_meta(self, attrs): """Beautiful Soup can detect a charset included in a META tag, try to convert the document to that charset, and re-parse the document from the beginning.""" httpEquiv = None contentType = None contentTypeIndex = None tagNeedsEncodingSubstitution = False for i in range(0, len(attrs)): key, value = attrs[i] key = key.lower() if key == 'http-equiv': httpEquiv = value elif key == 'content': contentType = value contentTypeIndex = i if httpEquiv and contentType: # It's an interesting meta tag. match = self.CHARSET_RE.search(contentType) if match: if (self.declaredHTMLEncoding is not None or self.originalEncoding == self.fromEncoding): # An HTML encoding was sniffed while converting # the document to Unicode, or an HTML encoding was # sniffed during a previous pass through the # document, or an encoding was specified # explicitly and it worked. Rewrite the meta tag. def rewrite(match): return match.group(1) + "%SOUP-ENCODING%" newAttr = self.CHARSET_RE.sub(rewrite, contentType) attrs[contentTypeIndex] = (attrs[contentTypeIndex][0], newAttr) tagNeedsEncodingSubstitution = True else: # This is our first pass through the document. # Go through it again with the encoding information. newCharset = match.group(3) if newCharset and newCharset != self.originalEncoding: self.declaredHTMLEncoding = newCharset self._feed(self.declaredHTMLEncoding) raise StopParsing pass tag = self.unknown_starttag("meta", attrs) if tag and tagNeedsEncodingSubstitution: tag.containsSubstitutions = True
[ "def", "start_meta", "(", "self", ",", "attrs", ")", ":", "httpEquiv", "=", "None", "contentType", "=", "None", "contentTypeIndex", "=", "None", "tagNeedsEncodingSubstitution", "=", "False", "for", "i", "in", "range", "(", "0", ",", "len", "(", "attrs", ")...
Beautiful Soup can detect a charset included in a META tag, try to convert the document to that charset, and re-parse the document from the beginning.
[ "Beautiful", "Soup", "can", "detect", "a", "charset", "included", "in", "a", "META", "tag", "try", "to", "convert", "the", "document", "to", "that", "charset", "and", "re", "-", "parse", "the", "document", "from", "the", "beginning", "." ]
python
valid
google/transitfeed
transitfeed/shapelib.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/shapelib.py#L392-L408
def AddPoly(self, poly, smart_duplicate_handling=True): """ Adds a new polyline to the collection. """ inserted_name = poly.GetName() if poly.GetName() in self._name_to_shape: if not smart_duplicate_handling: raise ShapeError("Duplicate shape found: " + poly.GetName()) print ("Warning: duplicate shape id being added to collection: " + poly.GetName()) if poly.GreedyPolyMatchDist(self._name_to_shape[poly.GetName()]) < 10: print(" (Skipping as it apears to be an exact duplicate)") else: print(" (Adding new shape variant with uniquified name)") inserted_name = "%s-%d" % (inserted_name, len(self._name_to_shape)) self._name_to_shape[inserted_name] = poly
[ "def", "AddPoly", "(", "self", ",", "poly", ",", "smart_duplicate_handling", "=", "True", ")", ":", "inserted_name", "=", "poly", ".", "GetName", "(", ")", "if", "poly", ".", "GetName", "(", ")", "in", "self", ".", "_name_to_shape", ":", "if", "not", "...
Adds a new polyline to the collection.
[ "Adds", "a", "new", "polyline", "to", "the", "collection", "." ]
python
train
Fizzadar/pyinfra
pyinfra/modules/ssh.py
https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/ssh.py#L17-L43
def keyscan(state, host, hostname, force=False): ''' Check/add hosts to the ``~/.ssh/known_hosts`` file. + hostname: hostname that should have a key in ``known_hosts`` + force: if the key already exists, remove and rescan ''' yield files.directory( state, host, '~/.ssh', mode=700, ) hostname_present = host.fact.find_in_file( '~/.ssh/known_hosts', hostname, ) keyscan_command = 'ssh-keyscan {0} >> ~/.ssh/known_hosts'.format(hostname) if not hostname_present: yield keyscan_command elif force: yield 'ssh-keygen -R {0}'.format(hostname) yield keyscan_command
[ "def", "keyscan", "(", "state", ",", "host", ",", "hostname", ",", "force", "=", "False", ")", ":", "yield", "files", ".", "directory", "(", "state", ",", "host", ",", "'~/.ssh'", ",", "mode", "=", "700", ",", ")", "hostname_present", "=", "host", "....
Check/add hosts to the ``~/.ssh/known_hosts`` file. + hostname: hostname that should have a key in ``known_hosts`` + force: if the key already exists, remove and rescan
[ "Check", "/", "add", "hosts", "to", "the", "~", "/", ".", "ssh", "/", "known_hosts", "file", "." ]
python
train
pyusb/pyusb
usb/libloader.py
https://github.com/pyusb/pyusb/blob/ffe6faf42c6ad273880b0b464b9bbf44c1d4b2e9/usb/libloader.py#L54-L86
def locate_library (candidates, find_library=ctypes.util.find_library): """Tries to locate a library listed in candidates using the given find_library() function (or ctypes.util.find_library). Returns the first library found, which can be the library's name or the path to the library file, depending on find_library(). Returns None if no library is found. arguments: * candidates -- iterable with library names * find_library -- function that takes one positional arg (candidate) and returns a non-empty str if a library has been found. Any "false" value (None,False,empty str) is interpreted as "library not found". Defaults to ctypes.util.find_library if not given or None. """ if find_library is None: find_library = ctypes.util.find_library use_dll_workaround = ( sys.platform == 'win32' and find_library is ctypes.util.find_library ) for candidate in candidates: # Workaround for CPython 3.3 issue#16283 / pyusb #14 if use_dll_workaround: candidate += '.dll' libname = find_library(candidate) if libname: return libname # -- end for return None
[ "def", "locate_library", "(", "candidates", ",", "find_library", "=", "ctypes", ".", "util", ".", "find_library", ")", ":", "if", "find_library", "is", "None", ":", "find_library", "=", "ctypes", ".", "util", ".", "find_library", "use_dll_workaround", "=", "("...
Tries to locate a library listed in candidates using the given find_library() function (or ctypes.util.find_library). Returns the first library found, which can be the library's name or the path to the library file, depending on find_library(). Returns None if no library is found. arguments: * candidates -- iterable with library names * find_library -- function that takes one positional arg (candidate) and returns a non-empty str if a library has been found. Any "false" value (None,False,empty str) is interpreted as "library not found". Defaults to ctypes.util.find_library if not given or None.
[ "Tries", "to", "locate", "a", "library", "listed", "in", "candidates", "using", "the", "given", "find_library", "()", "function", "(", "or", "ctypes", ".", "util", ".", "find_library", ")", ".", "Returns", "the", "first", "library", "found", "which", "can", ...
python
train
signetlabdei/sem
sem/manager.py
https://github.com/signetlabdei/sem/blob/5077dd7a6d15644a18790bb6fde320e905f0fef0/sem/manager.py#L480-L516
def get_results_as_xarray(self, parameter_space, result_parsing_function, output_labels, runs): """ Return the results relative to the desired parameter space in the form of an xarray data structure. Args: parameter_space (dict): The space of parameters to export. result_parsing_function (function): user-defined function, taking a result dictionary as argument, that can be used to parse the result files and return a list of values. output_labels (list): a list of labels to apply to the results dimensions, output by the result_parsing_function. runs (int): the number of runs to export for each parameter combination. """ np_array = np.array( self.get_space( self.db.get_complete_results(), {}, collections.OrderedDict([(k, v) for k, v in parameter_space.items()]), runs, result_parsing_function)) # Create a parameter space only containing the variable parameters clean_parameter_space = collections.OrderedDict( [(k, v) for k, v in parameter_space.items()]) clean_parameter_space['runs'] = range(runs) if isinstance(output_labels, list): clean_parameter_space['metrics'] = output_labels xr_array = xr.DataArray(np_array, coords=clean_parameter_space, dims=list(clean_parameter_space.keys())) return xr_array
[ "def", "get_results_as_xarray", "(", "self", ",", "parameter_space", ",", "result_parsing_function", ",", "output_labels", ",", "runs", ")", ":", "np_array", "=", "np", ".", "array", "(", "self", ".", "get_space", "(", "self", ".", "db", ".", "get_complete_res...
Return the results relative to the desired parameter space in the form of an xarray data structure. Args: parameter_space (dict): The space of parameters to export. result_parsing_function (function): user-defined function, taking a result dictionary as argument, that can be used to parse the result files and return a list of values. output_labels (list): a list of labels to apply to the results dimensions, output by the result_parsing_function. runs (int): the number of runs to export for each parameter combination.
[ "Return", "the", "results", "relative", "to", "the", "desired", "parameter", "space", "in", "the", "form", "of", "an", "xarray", "data", "structure", "." ]
python
train
bitesofcode/projex
projex/enum.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/enum.py#L139-L160
def displayText(self, value, blank='', joiner=', '): """ Returns the display text for the value associated with the inputted text. This will result in a comma separated list of labels for the value, or the blank text provided if no text is found. :param value | <variant> blank | <str> joiner | <str> :return <str> """ if value is None: return '' labels = [] for key, my_value in sorted(self.items(), key=lambda x: x[1]): if value & my_value: labels.append(self._labels.get(my_value, text.pretty(key))) return joiner.join(labels) or blank
[ "def", "displayText", "(", "self", ",", "value", ",", "blank", "=", "''", ",", "joiner", "=", "', '", ")", ":", "if", "value", "is", "None", ":", "return", "''", "labels", "=", "[", "]", "for", "key", ",", "my_value", "in", "sorted", "(", "self", ...
Returns the display text for the value associated with the inputted text. This will result in a comma separated list of labels for the value, or the blank text provided if no text is found. :param value | <variant> blank | <str> joiner | <str> :return <str>
[ "Returns", "the", "display", "text", "for", "the", "value", "associated", "with", "the", "inputted", "text", ".", "This", "will", "result", "in", "a", "comma", "separated", "list", "of", "labels", "for", "the", "value", "or", "the", "blank", "text", "provi...
python
train
quantmind/pulsar
pulsar/utils/internet.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/internet.py#L54-L91
def parse_connection_string(connection_string, default_port=8000): """Converts the ``connection_string`` into a three elements tuple ``(scheme, host, params)`` where ``scheme`` is a string, ``host`` could be a string or a two elements tuple (for a tcp address) and ``params`` a dictionary of parameters. The ``default_port`` parameter can be used to set the port if a port is not available in the ``connection_string``. For example:: >>> parse_connection_string('http://127.0.0.1:9080') ('http', ('127.0.0.1', 9080), {}) and this example:: >>> parse_connection_string('redis://127.0.0.1:6379?db=3&password=bla') ('redis', ('127.0.0.1', 6379), {'db': '3', 'password': 'bla'}) """ if '://' not in connection_string: connection_string = 'dummy://%s' % connection_string scheme, host, path, query, fragment = urlsplit(connection_string) if not scheme and not host: host, path = path, '' elif path and not query: query, path = path, '' if query: if query.find('?'): path = query else: query = query[1:] if path: raise ValueError("Address must not have a path. Found '%s'" % path) if query: params = dict(parse_qsl(query)) else: params = {} if scheme == 'dummy': scheme = '' return scheme, parse_address(host, default_port), params
[ "def", "parse_connection_string", "(", "connection_string", ",", "default_port", "=", "8000", ")", ":", "if", "'://'", "not", "in", "connection_string", ":", "connection_string", "=", "'dummy://%s'", "%", "connection_string", "scheme", ",", "host", ",", "path", ",...
Converts the ``connection_string`` into a three elements tuple ``(scheme, host, params)`` where ``scheme`` is a string, ``host`` could be a string or a two elements tuple (for a tcp address) and ``params`` a dictionary of parameters. The ``default_port`` parameter can be used to set the port if a port is not available in the ``connection_string``. For example:: >>> parse_connection_string('http://127.0.0.1:9080') ('http', ('127.0.0.1', 9080), {}) and this example:: >>> parse_connection_string('redis://127.0.0.1:6379?db=3&password=bla') ('redis', ('127.0.0.1', 6379), {'db': '3', 'password': 'bla'})
[ "Converts", "the", "connection_string", "into", "a", "three", "elements", "tuple", "(", "scheme", "host", "params", ")", "where", "scheme", "is", "a", "string", "host", "could", "be", "a", "string", "or", "a", "two", "elements", "tuple", "(", "for", "a", ...
python
train
relekang/python-semantic-release
semantic_release/history/__init__.py
https://github.com/relekang/python-semantic-release/blob/76123f410180599a19e7c48da413880185bbea20/semantic_release/history/__init__.py#L34-L55
def get_current_version_by_config_file() -> str: """ Get current version from the version variable defined in the configuration :return: A string with the current version number :raises ImproperConfigurationError: if version variable cannot be parsed """ debug('get_current_version_by_config_file') filename, variable = config.get('semantic_release', 'version_variable').split(':') variable = variable.strip() debug(filename, variable) with open(filename, 'r') as fd: parts = re.search( r'^{0}\s*=\s*[\'"]([^\'"]*)[\'"]'.format(variable), fd.read(), re.MULTILINE ) if not parts: raise ImproperConfigurationError debug(parts) return parts.group(1)
[ "def", "get_current_version_by_config_file", "(", ")", "->", "str", ":", "debug", "(", "'get_current_version_by_config_file'", ")", "filename", ",", "variable", "=", "config", ".", "get", "(", "'semantic_release'", ",", "'version_variable'", ")", ".", "split", "(", ...
Get current version from the version variable defined in the configuration :return: A string with the current version number :raises ImproperConfigurationError: if version variable cannot be parsed
[ "Get", "current", "version", "from", "the", "version", "variable", "defined", "in", "the", "configuration" ]
python
train
skoczen/will
will/plugins/chat_room/rooms.py
https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/plugins/chat_room/rooms.py#L19-L23
def participants_in_room(self, message): """who is in this room?: List all the participants of this room.""" room = self.get_room_from_message(message) context = {"participants": room.participants, } self.say(rendered_template("participants.html", context), message=message, html=True)
[ "def", "participants_in_room", "(", "self", ",", "message", ")", ":", "room", "=", "self", ".", "get_room_from_message", "(", "message", ")", "context", "=", "{", "\"participants\"", ":", "room", ".", "participants", ",", "}", "self", ".", "say", "(", "ren...
who is in this room?: List all the participants of this room.
[ "who", "is", "in", "this", "room?", ":", "List", "all", "the", "participants", "of", "this", "room", "." ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/natural_language_classifier_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/natural_language_classifier_v1.py#L753-L762
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text if hasattr(self, 'top_class') and self.top_class is not None: _dict['top_class'] = self.top_class if hasattr(self, 'classes') and self.classes is not None: _dict['classes'] = [x._to_dict() for x in self.classes] return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'text'", ")", "and", "self", ".", "text", "is", "not", "None", ":", "_dict", "[", "'text'", "]", "=", "self", ".", "text", "if", "hasattr", "(",...
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xviewwidget/xviewwidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewwidget.py#L179-L202
def exportProfile(self, filename=''): """ Exports the current profile to a file. :param filename | <str> """ if not (filename and isinstance(filename, basestring)): filename = QtGui.QFileDialog.getSaveFileName(self, 'Export Layout as...', QtCore.QDir.currentPath(), 'XView (*.xview)') if type(filename) == tuple: filename = filename[0] filename = nativestring(filename) if not filename: return if not filename.endswith('.xview'): filename += '.xview' profile = self.saveProfile() profile.save(filename)
[ "def", "exportProfile", "(", "self", ",", "filename", "=", "''", ")", ":", "if", "not", "(", "filename", "and", "isinstance", "(", "filename", ",", "basestring", ")", ")", ":", "filename", "=", "QtGui", ".", "QFileDialog", ".", "getSaveFileName", "(", "s...
Exports the current profile to a file. :param filename | <str>
[ "Exports", "the", "current", "profile", "to", "a", "file", ".", ":", "param", "filename", "|", "<str", ">" ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/system.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/system.py#L1151-L1175
def get_service(name): """ Get the service descriptor for the given service name. @see: L{start_service}, L{stop_service}, L{pause_service}, L{resume_service} @type name: str @param name: Service unique name. You can get this value from the C{ServiceName} member of the service descriptors returned by L{get_services} or L{get_active_services}. @rtype: L{win32.ServiceStatusProcess} @return: Service status descriptor. """ with win32.OpenSCManager( dwDesiredAccess = win32.SC_MANAGER_ENUMERATE_SERVICE ) as hSCManager: with win32.OpenService(hSCManager, name, dwDesiredAccess = win32.SERVICE_QUERY_STATUS ) as hService: try: return win32.QueryServiceStatusEx(hService) except AttributeError: return win32.QueryServiceStatus(hService)
[ "def", "get_service", "(", "name", ")", ":", "with", "win32", ".", "OpenSCManager", "(", "dwDesiredAccess", "=", "win32", ".", "SC_MANAGER_ENUMERATE_SERVICE", ")", "as", "hSCManager", ":", "with", "win32", ".", "OpenService", "(", "hSCManager", ",", "name", ",...
Get the service descriptor for the given service name. @see: L{start_service}, L{stop_service}, L{pause_service}, L{resume_service} @type name: str @param name: Service unique name. You can get this value from the C{ServiceName} member of the service descriptors returned by L{get_services} or L{get_active_services}. @rtype: L{win32.ServiceStatusProcess} @return: Service status descriptor.
[ "Get", "the", "service", "descriptor", "for", "the", "given", "service", "name", "." ]
python
train
senaite/senaite.core
bika/lims/upgrade/v01_02_009.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/upgrade/v01_02_009.py#L250-L265
def update_rejection_permissions(portal): """Adds the permission 'Reject Analysis Request' and update the permission mappings accordingly """ updated = update_rejection_permissions_for(portal, "bika_ar_workflow", "Reject Analysis Request") if updated: brains = api.search(dict(portal_type="AnalysisRequest"), CATALOG_ANALYSIS_REQUEST_LISTING) update_rolemappings_for(brains, "bika_ar_workflow") updated = update_rejection_permissions_for(portal, "bika_sample_workflow", "Reject Sample") if updated: brains = api.search(dict(portal_type="Sample"), "bika_catalog") update_rolemappings_for(brains, "bika_sample_workflow")
[ "def", "update_rejection_permissions", "(", "portal", ")", ":", "updated", "=", "update_rejection_permissions_for", "(", "portal", ",", "\"bika_ar_workflow\"", ",", "\"Reject Analysis Request\"", ")", "if", "updated", ":", "brains", "=", "api", ".", "search", "(", "...
Adds the permission 'Reject Analysis Request' and update the permission mappings accordingly
[ "Adds", "the", "permission", "Reject", "Analysis", "Request", "and", "update", "the", "permission", "mappings", "accordingly" ]
python
train
eonpatapon/contrail-api-cli
contrail_api_cli/client.py
https://github.com/eonpatapon/contrail-api-cli/blob/1571bf523fa054f3d6bf83dba43a224fea173a73/contrail_api_cli/client.py#L181-L204
def id_to_fqname(self, uuid, type=None): """ Return fq_name and type for uuid If `type` is provided check that uuid is actually a resource of type `type`. Raise HttpError if it's not the case. :param uuid: resource uuid :type uuid: UUIDv4 str :param type: resource type :type type: str :rtype: dict {'type': str, 'fq_name': FQName} :raises HttpError: uuid not found """ data = { "uuid": uuid } result = self.post_json(self.make_url("/id-to-fqname"), data) result['fq_name'] = FQName(result['fq_name']) if type is not None and not result['type'].replace('_', '-') == type: raise HttpError('uuid %s not found for type %s' % (uuid, type), http_status=404) return result
[ "def", "id_to_fqname", "(", "self", ",", "uuid", ",", "type", "=", "None", ")", ":", "data", "=", "{", "\"uuid\"", ":", "uuid", "}", "result", "=", "self", ".", "post_json", "(", "self", ".", "make_url", "(", "\"/id-to-fqname\"", ")", ",", "data", ")...
Return fq_name and type for uuid If `type` is provided check that uuid is actually a resource of type `type`. Raise HttpError if it's not the case. :param uuid: resource uuid :type uuid: UUIDv4 str :param type: resource type :type type: str :rtype: dict {'type': str, 'fq_name': FQName} :raises HttpError: uuid not found
[ "Return", "fq_name", "and", "type", "for", "uuid" ]
python
train
smarie/python-parsyfiles
parsyfiles/filesystem_mapping.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/filesystem_mapping.py#L505-L547
def find_multifile_object_children(self, parent_location, no_errors: bool = False) -> Dict[str, str]: """ Implementation of the parent abstract method. In this mode, root_path should be a valid folder, and each item is a subfolder (multifile) or a file (singlefile): location/ |-singlefile_sub_item1.<ext> |-singlefile_sub_item2.<ext> |-multifile_sub_item3/ |- ... :param parent_location: the absolute file prefix of the parent item. it may be a folder (non-flat mode) or a folder + a file name prefix (flat mode) :param no_errors: a boolean used in internal recursive calls in order to catch errors. Should not be changed by users. :return: a dictionary of {item_name : item_prefix} """ # (1) Assert that folder_path is a folder if not isdir(parent_location): if no_errors: return dict() else: raise ValueError('Cannot find a multifileobject at location \'' + parent_location + '\' : location is ' 'not a valid folder') else: # (2) List folders (multifile objects or collections) all_subfolders = [dir_ for dir_ in listdir(parent_location) if isdir(join(parent_location, dir_))] items = {item_name: join(parent_location, item_name) for item_name in all_subfolders} # (3) List singlefiles *without* their extension items.update({ item_name: join(parent_location, item_name) for item_name in [file_name[0:file_name.rindex(EXT_SEPARATOR)] for file_name in listdir(parent_location) if isfile(join(parent_location, file_name)) and EXT_SEPARATOR in file_name] }) # (4) return all return items
[ "def", "find_multifile_object_children", "(", "self", ",", "parent_location", ",", "no_errors", ":", "bool", "=", "False", ")", "->", "Dict", "[", "str", ",", "str", "]", ":", "# (1) Assert that folder_path is a folder", "if", "not", "isdir", "(", "parent_location...
Implementation of the parent abstract method. In this mode, root_path should be a valid folder, and each item is a subfolder (multifile) or a file (singlefile): location/ |-singlefile_sub_item1.<ext> |-singlefile_sub_item2.<ext> |-multifile_sub_item3/ |- ... :param parent_location: the absolute file prefix of the parent item. it may be a folder (non-flat mode) or a folder + a file name prefix (flat mode) :param no_errors: a boolean used in internal recursive calls in order to catch errors. Should not be changed by users. :return: a dictionary of {item_name : item_prefix}
[ "Implementation", "of", "the", "parent", "abstract", "method", "." ]
python
train
streamlink/streamlink
src/streamlink/plugins/viasat.py
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/plugins/viasat.py#L121-L125
def _get_stream_id(self, text): """Try to find a stream_id""" m = self._image_re.search(text) if m: return m.group("stream_id")
[ "def", "_get_stream_id", "(", "self", ",", "text", ")", ":", "m", "=", "self", ".", "_image_re", ".", "search", "(", "text", ")", "if", "m", ":", "return", "m", ".", "group", "(", "\"stream_id\"", ")" ]
Try to find a stream_id
[ "Try", "to", "find", "a", "stream_id" ]
python
test
Titan-C/slaveparticles
slaveparticles/spins.py
https://github.com/Titan-C/slaveparticles/blob/e4c2f5afb1a7b195517ef2f1b5cc758965036aab/slaveparticles/spins.py#L25-L32
def fermion_avg(efermi, norm_hopping, func): """calcules for every slave it's average over the desired observable""" if func == 'ekin': func = bethe_ekin_zeroT elif func == 'ocupation': func = bethe_filling_zeroT return np.asarray([func(ef, tz) for ef, tz in zip(efermi, norm_hopping)])
[ "def", "fermion_avg", "(", "efermi", ",", "norm_hopping", ",", "func", ")", ":", "if", "func", "==", "'ekin'", ":", "func", "=", "bethe_ekin_zeroT", "elif", "func", "==", "'ocupation'", ":", "func", "=", "bethe_filling_zeroT", "return", "np", ".", "asarray",...
calcules for every slave it's average over the desired observable
[ "calcules", "for", "every", "slave", "it", "s", "average", "over", "the", "desired", "observable" ]
python
train
marrow/web.db
web/db/sa.py
https://github.com/marrow/web.db/blob/c755fbff7028a5edc223d6a631b8421858274fc4/web/db/sa.py#L36-L56
def start(self, context): """Construct the SQLAlchemy engine and session factory.""" if __debug__: log.info("Connecting SQLAlchemy database layer.", extra=dict( uri = redact_uri(self.uri), config = self.config, alias = self.alias, )) # Construct the engine. engine = self.engine = create_engine(self.uri, **self.config) # Construct the session factory. self.Session = scoped_session(sessionmaker(bind=engine)) # Test the connection. engine.connect().close() # Assign the engine to our database alias. context.db[self.alias] = engine
[ "def", "start", "(", "self", ",", "context", ")", ":", "if", "__debug__", ":", "log", ".", "info", "(", "\"Connecting SQLAlchemy database layer.\"", ",", "extra", "=", "dict", "(", "uri", "=", "redact_uri", "(", "self", ".", "uri", ")", ",", "config", "=...
Construct the SQLAlchemy engine and session factory.
[ "Construct", "the", "SQLAlchemy", "engine", "and", "session", "factory", "." ]
python
test
chigby/mtg
mtglib/card_extractor.py
https://github.com/chigby/mtg/blob/c12976d6c7e13d6859a0fbac4c057dc9529de473/mtglib/card_extractor.py#L38-L49
def _flatten(self, element): """Recursively enter and extract text from all child elements.""" result = [(element.text or '')] if element.attrib.get('alt'): result.append(Symbol(element.attrib.get('alt')).textbox) for sel in element: result.append(self._flatten(sel)) result.append(sel.tail or '') # prevent reminder text from getting too close to mana symbols return ''.join(result).replace('}(', '} (')
[ "def", "_flatten", "(", "self", ",", "element", ")", ":", "result", "=", "[", "(", "element", ".", "text", "or", "''", ")", "]", "if", "element", ".", "attrib", ".", "get", "(", "'alt'", ")", ":", "result", ".", "append", "(", "Symbol", "(", "ele...
Recursively enter and extract text from all child elements.
[ "Recursively", "enter", "and", "extract", "text", "from", "all", "child", "elements", "." ]
python
train
apache/airflow
airflow/contrib/hooks/aws_athena_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/aws_athena_hook.py#L43-L51
def get_conn(self): """ check if aws conn exists already or create one and return it :return: boto3 session """ if not self.conn: self.conn = self.get_client_type('athena') return self.conn
[ "def", "get_conn", "(", "self", ")", ":", "if", "not", "self", ".", "conn", ":", "self", ".", "conn", "=", "self", ".", "get_client_type", "(", "'athena'", ")", "return", "self", ".", "conn" ]
check if aws conn exists already or create one and return it :return: boto3 session
[ "check", "if", "aws", "conn", "exists", "already", "or", "create", "one", "and", "return", "it" ]
python
test
idlesign/uwsgiconf
uwsgiconf/options/routing_routers.py
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/options/routing_routers.py#L399-L434
def set_manage_params( self, chunked_input=None, chunked_output=None, gzip=None, websockets=None, source_method=None, rtsp=None, proxy_protocol=None): """Allows enabling various automatic management mechanics. * http://uwsgi.readthedocs.io/en/latest/Changelog-1.9.html#http-router-keepalive-auto-chunking-auto-gzip-and-transparent-websockets :param bool chunked_input: Automatically detect chunked input requests and put the session in raw mode. :param bool chunked_output: Automatically transform output to chunked encoding during HTTP 1.1 keepalive (if needed). :param bool gzip: Automatically gzip content if uWSGI-Encoding header is set to gzip, but content size (Content-Length/Transfer-Encoding) and Content-Encoding are not specified. :param bool websockets: Automatically detect websockets connections and put the session in raw mode. :param bool source_method: Automatically put the session in raw mode for `SOURCE` HTTP method. * http://uwsgi.readthedocs.io/en/latest/Changelog-2.0.5.html#icecast2-protocol-helpers :param bool rtsp: Allow the HTTP router to detect RTSP and chunked requests automatically. :param bool proxy_protocol: Allows the HTTP router to manage PROXY1 protocol requests, such as those made by Haproxy or Amazon Elastic Load Balancer (ELB). """ self._set_aliased('chunked-input', chunked_input, cast=bool) self._set_aliased('auto-chunked', chunked_output, cast=bool) self._set_aliased('auto-gzip', gzip, cast=bool) self._set_aliased('websockets', websockets, cast=bool) self._set_aliased('manage-source', source_method, cast=bool) self._set_aliased('manage-rtsp', rtsp, cast=bool) self._set_aliased('enable-proxy-protocol', proxy_protocol, cast=bool) return self
[ "def", "set_manage_params", "(", "self", ",", "chunked_input", "=", "None", ",", "chunked_output", "=", "None", ",", "gzip", "=", "None", ",", "websockets", "=", "None", ",", "source_method", "=", "None", ",", "rtsp", "=", "None", ",", "proxy_protocol", "=...
Allows enabling various automatic management mechanics. * http://uwsgi.readthedocs.io/en/latest/Changelog-1.9.html#http-router-keepalive-auto-chunking-auto-gzip-and-transparent-websockets :param bool chunked_input: Automatically detect chunked input requests and put the session in raw mode. :param bool chunked_output: Automatically transform output to chunked encoding during HTTP 1.1 keepalive (if needed). :param bool gzip: Automatically gzip content if uWSGI-Encoding header is set to gzip, but content size (Content-Length/Transfer-Encoding) and Content-Encoding are not specified. :param bool websockets: Automatically detect websockets connections and put the session in raw mode. :param bool source_method: Automatically put the session in raw mode for `SOURCE` HTTP method. * http://uwsgi.readthedocs.io/en/latest/Changelog-2.0.5.html#icecast2-protocol-helpers :param bool rtsp: Allow the HTTP router to detect RTSP and chunked requests automatically. :param bool proxy_protocol: Allows the HTTP router to manage PROXY1 protocol requests, such as those made by Haproxy or Amazon Elastic Load Balancer (ELB).
[ "Allows", "enabling", "various", "automatic", "management", "mechanics", "." ]
python
train
databio/pypiper
pypiper/pipeline.py
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/pipeline.py#L355-L378
def _stop_index(self, stop_point, inclusive): """ Determine index of stage of stopping point for run(). :param str | pypiper.Stage | function stop_point: Stopping point itself or name of it. :param bool inclusive: Whether the stopping point is to be regarded as inclusive (i.e., whether it's the final stage to run, or the one just beyond) :return int: Index into sequence of Pipeline's stages that indicates where to stop; critically, the value of the inclusive parameter here is used to contextualize this index such that it's always returned as an exclusive stopping index (i.e., execute up to the stage indexed by the value returned from this function.) """ if not stop_point: # Null case, no stopping point return len(self._stages) stop_name = parse_stage_name(stop_point) try: stop_index = self.stage_names.index(stop_name) except ValueError: raise UnknownPipelineStageError(stop_name, self) return stop_index + 1 if inclusive else stop_index
[ "def", "_stop_index", "(", "self", ",", "stop_point", ",", "inclusive", ")", ":", "if", "not", "stop_point", ":", "# Null case, no stopping point", "return", "len", "(", "self", ".", "_stages", ")", "stop_name", "=", "parse_stage_name", "(", "stop_point", ")", ...
Determine index of stage of stopping point for run(). :param str | pypiper.Stage | function stop_point: Stopping point itself or name of it. :param bool inclusive: Whether the stopping point is to be regarded as inclusive (i.e., whether it's the final stage to run, or the one just beyond) :return int: Index into sequence of Pipeline's stages that indicates where to stop; critically, the value of the inclusive parameter here is used to contextualize this index such that it's always returned as an exclusive stopping index (i.e., execute up to the stage indexed by the value returned from this function.)
[ "Determine", "index", "of", "stage", "of", "stopping", "point", "for", "run", "()", "." ]
python
train
jtwhite79/pyemu
pyemu/pst/pst_handler.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/pst/pst_handler.py#L1149-L1177
def rectify_pgroups(self): """ private method to synchronize parameter groups section with the parameter data section """ # add any parameters groups pdata_groups = list(self.parameter_data.loc[:,"pargp"].\ value_counts().keys()) #print(pdata_groups) need_groups = [] existing_groups = list(self.parameter_groups.pargpnme) for pg in pdata_groups: if pg not in existing_groups: need_groups.append(pg) if len(need_groups) > 0: #print(need_groups) defaults = copy.copy(pst_utils.pst_config["pargp_defaults"]) for grp in need_groups: defaults["pargpnme"] = grp self.parameter_groups = \ self.parameter_groups.append(defaults,ignore_index=True) # now drop any left over groups that aren't needed for gp in self.parameter_groups.loc[:,"pargpnme"]: if gp in pdata_groups and gp not in need_groups: need_groups.append(gp) self.parameter_groups.index = self.parameter_groups.pargpnme self.parameter_groups = self.parameter_groups.loc[need_groups,:]
[ "def", "rectify_pgroups", "(", "self", ")", ":", "# add any parameters groups", "pdata_groups", "=", "list", "(", "self", ".", "parameter_data", ".", "loc", "[", ":", ",", "\"pargp\"", "]", ".", "value_counts", "(", ")", ".", "keys", "(", ")", ")", "#print...
private method to synchronize parameter groups section with the parameter data section
[ "private", "method", "to", "synchronize", "parameter", "groups", "section", "with", "the", "parameter", "data", "section" ]
python
train
mrcagney/gtfstk
gtfstk/validators.py
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/validators.py#L1363-L1402
def check_transfers( feed: "Feed", *, as_df: bool = False, include_warnings: bool = False ) -> List: """ Analog of :func:`check_agency` for ``feed.transfers``. """ table = "transfers" problems = [] # Preliminary checks if feed.transfers is None: return problems f = feed.transfers.copy() problems = check_for_required_columns(problems, table, f) if problems: return format_problems(problems, as_df=as_df) if include_warnings: problems = check_for_invalid_columns(problems, table, f) # Check from_stop_id and to_stop_id for col in ["from_stop_id", "to_stop_id"]: problems = check_column_linked_id( problems, table, f, col, feed.stops, "stop_id" ) # Check transfer_type v = lambda x: pd.isnull(x) or x in range(5) problems = check_column( problems, table, f, "transfer_type", v, column_required=False ) # Check min_transfer_time v = lambda x: x >= 0 problems = check_column( problems, table, f, "min_transfer_time", v, column_required=False ) return format_problems(problems, as_df=as_df)
[ "def", "check_transfers", "(", "feed", ":", "\"Feed\"", ",", "*", ",", "as_df", ":", "bool", "=", "False", ",", "include_warnings", ":", "bool", "=", "False", ")", "->", "List", ":", "table", "=", "\"transfers\"", "problems", "=", "[", "]", "# Preliminar...
Analog of :func:`check_agency` for ``feed.transfers``.
[ "Analog", "of", ":", "func", ":", "check_agency", "for", "feed", ".", "transfers", "." ]
python
train
pbrisk/businessdate
businessdate/basedate.py
https://github.com/pbrisk/businessdate/blob/79a0c5a4e557cbacca82a430403b18413404a9bc/businessdate/basedate.py#L408-L418
def diff_in_days(start, end): """ calculate difference between given dates in days :param BaseDateTuple start: state date :param BaseDateTuple end: end date :return float: difference between end date and start date in days """ diff = from_ymd_to_excel(*end.date)-from_ymd_to_excel(*start.date) return float(diff)
[ "def", "diff_in_days", "(", "start", ",", "end", ")", ":", "diff", "=", "from_ymd_to_excel", "(", "*", "end", ".", "date", ")", "-", "from_ymd_to_excel", "(", "*", "start", ".", "date", ")", "return", "float", "(", "diff", ")" ]
calculate difference between given dates in days :param BaseDateTuple start: state date :param BaseDateTuple end: end date :return float: difference between end date and start date in days
[ "calculate", "difference", "between", "given", "dates", "in", "days" ]
python
valid
trustar/trustar-python
trustar/models/page.py
https://github.com/trustar/trustar-python/blob/707d51adc58d68aed7de12a4ca37949cb75cf122/trustar/models/page.py#L67-L89
def from_dict(page, content_type=None): """ Create a |Page| object from a dictionary. This method is intended for internal use, to construct a |Page| object from the body of a response json from a paginated endpoint. :param page: The dictionary. :param content_type: The class that the contents should be deserialized into. :return: The resulting |Page| object. """ result = Page(items=page.get('items'), page_number=page.get('pageNumber'), page_size=page.get('pageSize'), total_elements=page.get('totalElements'), has_next=page.get('hasNext')) if content_type is not None: if not issubclass(content_type, ModelBase): raise ValueError("'content_type' must be a subclass of ModelBase.") result.items = [content_type.from_dict(item) for item in result.items] return result
[ "def", "from_dict", "(", "page", ",", "content_type", "=", "None", ")", ":", "result", "=", "Page", "(", "items", "=", "page", ".", "get", "(", "'items'", ")", ",", "page_number", "=", "page", ".", "get", "(", "'pageNumber'", ")", ",", "page_size", "...
Create a |Page| object from a dictionary. This method is intended for internal use, to construct a |Page| object from the body of a response json from a paginated endpoint. :param page: The dictionary. :param content_type: The class that the contents should be deserialized into. :return: The resulting |Page| object.
[ "Create", "a", "|Page|", "object", "from", "a", "dictionary", ".", "This", "method", "is", "intended", "for", "internal", "use", "to", "construct", "a", "|Page|", "object", "from", "the", "body", "of", "a", "response", "json", "from", "a", "paginated", "en...
python
train