repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
bitcraze/crazyflie-lib-python
cflib/crazyflie/param.py
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/crazyflie/param.py#L250-L255
def request_param_update(self, complete_name): """ Request an update of the value for the supplied parameter. """ self.param_updater.request_param_update( self.toc.get_element_id(complete_name))
[ "def", "request_param_update", "(", "self", ",", "complete_name", ")", ":", "self", ".", "param_updater", ".", "request_param_update", "(", "self", ".", "toc", ".", "get_element_id", "(", "complete_name", ")", ")" ]
Request an update of the value for the supplied parameter.
[ "Request", "an", "update", "of", "the", "value", "for", "the", "supplied", "parameter", "." ]
python
train
38.833333
mcs07/PubChemPy
pubchempy.py
https://github.com/mcs07/PubChemPy/blob/e3c4f4a9b6120433e5cc3383464c7a79e9b2b86e/pubchempy.py#L296-L302
def get_json(identifier, namespace='cid', domain='compound', operation=None, searchtype=None, **kwargs): """Request wrapper that automatically parses JSON response and supresses NotFoundError.""" try: return json.loads(get(identifier, namespace, domain, operation, 'JSON', searchtype, **kwargs).decode()) except NotFoundError as e: log.info(e) return None
[ "def", "get_json", "(", "identifier", ",", "namespace", "=", "'cid'", ",", "domain", "=", "'compound'", ",", "operation", "=", "None", ",", "searchtype", "=", "None", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "json", ".", "loads", "(", ...
Request wrapper that automatically parses JSON response and supresses NotFoundError.
[ "Request", "wrapper", "that", "automatically", "parses", "JSON", "response", "and", "supresses", "NotFoundError", "." ]
python
train
55
bstriner/keras-tqdm
keras_tqdm/tqdm_callback.py
https://github.com/bstriner/keras-tqdm/blob/9696a8d6602d098364314f33b746fb868e225c95/keras_tqdm/tqdm_callback.py#L64-L71
def build_tqdm_outer(self, desc, total): """ Extension point. Override to provide custom options to outer progress bars (Epoch loop) :param desc: Description :param total: Number of epochs :return: new progress bar """ return self.tqdm(desc=desc, total=total, leave=self.leave_outer, initial=self.initial)
[ "def", "build_tqdm_outer", "(", "self", ",", "desc", ",", "total", ")", ":", "return", "self", ".", "tqdm", "(", "desc", "=", "desc", ",", "total", "=", "total", ",", "leave", "=", "self", ".", "leave_outer", ",", "initial", "=", "self", ".", "initia...
Extension point. Override to provide custom options to outer progress bars (Epoch loop) :param desc: Description :param total: Number of epochs :return: new progress bar
[ "Extension", "point", ".", "Override", "to", "provide", "custom", "options", "to", "outer", "progress", "bars", "(", "Epoch", "loop", ")", ":", "param", "desc", ":", "Description", ":", "param", "total", ":", "Number", "of", "epochs", ":", "return", ":", ...
python
train
44.25
gnullByte/dotcolors
dotcolors/utils.py
https://github.com/gnullByte/dotcolors/blob/4b09ff9862b88b3125fe9cd86aa054694ed3e46e/dotcolors/utils.py#L81-L90
def decimal_to_alpha(dec): """ expects: decimal between 0 and 100 returns: alpha value for rgba """ dec /= 100.0 alpha = hex(int(dec*65535))[2:] while len(alpha) < 4: alpha = '0' + alpha return alpha
[ "def", "decimal_to_alpha", "(", "dec", ")", ":", "dec", "/=", "100.0", "alpha", "=", "hex", "(", "int", "(", "dec", "*", "65535", ")", ")", "[", "2", ":", "]", "while", "len", "(", "alpha", ")", "<", "4", ":", "alpha", "=", "'0'", "+", "alpha",...
expects: decimal between 0 and 100 returns: alpha value for rgba
[ "expects", ":", "decimal", "between", "0", "and", "100", "returns", ":", "alpha", "value", "for", "rgba" ]
python
train
23.1
sdispater/eloquent
eloquent/orm/collection.py
https://github.com/sdispater/eloquent/blob/0638b688d5fd0c1a46b7471dd465eeb4c2f84666/eloquent/orm/collection.py#L8-L17
def load(self, *relations): """ Load a set of relationships onto the collection. """ if len(self._items) > 0: query = self.first().new_query().with_(*relations) self._items = query.eager_load_relations(self._items) return self
[ "def", "load", "(", "self", ",", "*", "relations", ")", ":", "if", "len", "(", "self", ".", "_items", ")", ">", "0", ":", "query", "=", "self", ".", "first", "(", ")", ".", "new_query", "(", ")", ".", "with_", "(", "*", "relations", ")", "self"...
Load a set of relationships onto the collection.
[ "Load", "a", "set", "of", "relationships", "onto", "the", "collection", "." ]
python
train
28.3
miguelgrinberg/Flask-Migrate
flask_migrate/templates/flask-multidb/env.py
https://github.com/miguelgrinberg/Flask-Migrate/blob/65fbd978681bdf2eddf8940edd04ed7272a94480/flask_migrate/templates/flask-multidb/env.py#L94-L169
def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ # this callback is used to prevent an auto-migration from being generated # when there are no changes to the schema # reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html def process_revision_directives(context, revision, directives): if getattr(config.cmd_opts, 'autogenerate', False): script = directives[0] if len(script.upgrade_ops_list) >= len(bind_names) + 1: empty = True for upgrade_ops in script.upgrade_ops_list: if not upgrade_ops.is_empty(): empty = False if empty: directives[:] = [] logger.info('No changes in schema detected.') # for the direct-to-DB use case, start a transaction on all # engines, then run all migrations, then commit all transactions. engines = { '': { 'engine': engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool, ) } } for name in bind_names: engines[name] = rec = {} rec['engine'] = engine_from_config( context.config.get_section(name), prefix='sqlalchemy.', poolclass=pool.NullPool) for name, rec in engines.items(): engine = rec['engine'] rec['connection'] = conn = engine.connect() if USE_TWOPHASE: rec['transaction'] = conn.begin_twophase() else: rec['transaction'] = conn.begin() try: for name, rec in engines.items(): logger.info("Migrating database %s" % (name or '<default>')) context.configure( connection=rec['connection'], upgrade_token="%s_upgrades" % name, downgrade_token="%s_downgrades" % name, target_metadata=get_metadata(name), process_revision_directives=process_revision_directives, **current_app.extensions['migrate'].configure_args ) context.run_migrations(engine_name=name) if USE_TWOPHASE: for rec in engines.values(): rec['transaction'].prepare() for rec in engines.values(): rec['transaction'].commit() except: for rec in engines.values(): rec['transaction'].rollback() raise finally: for rec in engines.values(): rec['connection'].close()
[ "def", "run_migrations_online", "(", ")", ":", "# this callback is used to prevent an auto-migration from being generated", "# when there are no changes to the schema", "# reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html", "def", "process_revision_directives", "(", "context",...
Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context.
[ "Run", "migrations", "in", "online", "mode", "." ]
python
train
34.934211
abseil/abseil-py
absl/flags/_flagvalues.py
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/flags/_flagvalues.py#L171-L182
def register_flag_by_module(self, module_name, flag): """Records the module that defines a specific flag. We keep track of which flag is defined by which module so that we can later sort the flags by module. Args: module_name: str, the name of a Python module. flag: Flag, the Flag instance that is key to the module. """ flags_by_module = self.flags_by_module_dict() flags_by_module.setdefault(module_name, []).append(flag)
[ "def", "register_flag_by_module", "(", "self", ",", "module_name", ",", "flag", ")", ":", "flags_by_module", "=", "self", ".", "flags_by_module_dict", "(", ")", "flags_by_module", ".", "setdefault", "(", "module_name", ",", "[", "]", ")", ".", "append", "(", ...
Records the module that defines a specific flag. We keep track of which flag is defined by which module so that we can later sort the flags by module. Args: module_name: str, the name of a Python module. flag: Flag, the Flag instance that is key to the module.
[ "Records", "the", "module", "that", "defines", "a", "specific", "flag", "." ]
python
train
37.916667
jmoiron/johnny-cache
johnny/cache.py
https://github.com/jmoiron/johnny-cache/blob/d96ea94c5dfcde517ff8f65d6ba4e435d8a0168c/johnny/cache.py#L79-L116
def get_tables_for_query(query): """ Takes a Django 'query' object and returns all tables that will be used in that query as a list. Note that where clauses can have their own querysets with their own dependent queries, etc. """ from django.db.models.sql.where import WhereNode, SubqueryConstraint from django.db.models.query import QuerySet tables = set([v[0] for v in getattr(query,'alias_map',{}).values()]) def get_sub_query_tables(node): query = node.query_object if not hasattr(query, 'field_names'): query = query.values(*node.targets) else: query = query._clone() query = query.query return set(v[0] for v in getattr(query, 'alias_map',{}).values()) def get_tables(node, tables): if isinstance(node, SubqueryConstraint): return get_sub_query_tables(node) for child in node.children: if isinstance(child, WhereNode): # and child.children: tables |= set(get_tables(child, tables)) elif not hasattr(child, '__iter__'): continue else: for item in (c for c in child if isinstance(c, QuerySet)): tables |= get_tables_for_query(item.query) return tables if query.where and query.where.children: where_nodes = [c for c in query.where.children if isinstance(c, (WhereNode, SubqueryConstraint))] for node in where_nodes: tables |= get_tables(node, tables) return list(tables)
[ "def", "get_tables_for_query", "(", "query", ")", ":", "from", "django", ".", "db", ".", "models", ".", "sql", ".", "where", "import", "WhereNode", ",", "SubqueryConstraint", "from", "django", ".", "db", ".", "models", ".", "query", "import", "QuerySet", "...
Takes a Django 'query' object and returns all tables that will be used in that query as a list. Note that where clauses can have their own querysets with their own dependent queries, etc.
[ "Takes", "a", "Django", "query", "object", "and", "returns", "all", "tables", "that", "will", "be", "used", "in", "that", "query", "as", "a", "list", ".", "Note", "that", "where", "clauses", "can", "have", "their", "own", "querysets", "with", "their", "o...
python
train
40.052632
droope/droopescan
dscan/plugins/internal/base_plugin_internal.py
https://github.com/droope/droopescan/blob/424c48a0f9d12b4536dbef5a786f0fbd4ce9519a/dscan/plugins/internal/base_plugin_internal.py#L237-L286
def _general_init(self, opts, out=None): """ Initializes a variety of variables depending on user input. @return: a tuple containing a boolean value indicating whether progressbars should be hidden, functionality and enabled functionality. """ self.session = Session() if out: self.out = out else: self.out = self._output(opts) is_cms_plugin = self._meta.label != "scan" if is_cms_plugin: self.vf = VersionsFile(self.versions_file) # http://stackoverflow.com/questions/23632794/in-requests-library-how-can-i-avoid-httpconnectionpool-is-full-discarding-con try: a = requests.adapters.HTTPAdapter(pool_maxsize=5000) self.session.mount('http://', a) self.session.mount('https://', a) self.session.cookies.set_policy(BlockAll()) except AttributeError: old_req = """Running a very old version of requests! Please `pip install -U requests`.""" self.out.warn(old_req) self.session.verify = False self.session.headers['User-Agent'] = self.DEFAULT_UA debug_requests = opts['debug_requests'] if debug_requests: hide_progressbar = True opts['threads_identify'] = 1 opts['threads_scan'] = 1 opts['threads_enumerate'] = 1 self.session = RequestsLogger(self.session) else: if opts['hide_progressbar']: hide_progressbar = True else: hide_progressbar = False functionality = self._functionality(opts) enabled_functionality = self._enabled_functionality(functionality, opts) return (hide_progressbar, functionality, enabled_functionality)
[ "def", "_general_init", "(", "self", ",", "opts", ",", "out", "=", "None", ")", ":", "self", ".", "session", "=", "Session", "(", ")", "if", "out", ":", "self", ".", "out", "=", "out", "else", ":", "self", ".", "out", "=", "self", ".", "_output",...
Initializes a variety of variables depending on user input. @return: a tuple containing a boolean value indicating whether progressbars should be hidden, functionality and enabled functionality.
[ "Initializes", "a", "variety", "of", "variables", "depending", "on", "user", "input", "." ]
python
train
36.24
OCHA-DAP/hdx-python-api
src/hdx/data/showcase.py
https://github.com/OCHA-DAP/hdx-python-api/blob/212440f54f73805826a16db77dbcb6033b18a313/src/hdx/data/showcase.py#L179-L193
def get_datasets(self): # type: () -> List[hdx.data.dataset.Dataset] """Get any datasets in the showcase Returns: List[Dataset]: List of datasets """ assoc_result, datasets_dicts = self._read_from_hdx('showcase', self.data['id'], fieldname='showcase_id', action=self.actions()['list_datasets']) datasets = list() if assoc_result: for dataset_dict in datasets_dicts: dataset = hdx.data.dataset.Dataset(dataset_dict, configuration=self.configuration) datasets.append(dataset) return datasets
[ "def", "get_datasets", "(", "self", ")", ":", "# type: () -> List[hdx.data.dataset.Dataset]", "assoc_result", ",", "datasets_dicts", "=", "self", ".", "_read_from_hdx", "(", "'showcase'", ",", "self", ".", "data", "[", "'id'", "]", ",", "fieldname", "=", "'showcas...
Get any datasets in the showcase Returns: List[Dataset]: List of datasets
[ "Get", "any", "datasets", "in", "the", "showcase" ]
python
train
43.666667
lltk/lltk
lltk/nl/scrapers/uitmuntend.py
https://github.com/lltk/lltk/blob/d171de55c1b97695fddedf4b02401ae27bf1d634/lltk/nl/scrapers/uitmuntend.py#L60-L79
def articles(self): ''' Tries to scrape the correct articles for singular and plural from uitmuntend.nl. ''' result = [None, None] element = self._first('NN') if element: element = element.split('\r\n')[0] if ' | ' in element: # This means there is a plural singular, plural = element.split(' | ') singular, plural = singular.strip(), plural.strip() else: # This means there is no plural singular, plural = element.strip(), '' result[1] = '' if singular: result[0] = singular.split(' ')[0].split('/') if plural: result[1] = plural.split(' ')[0].split('/') return result
[ "def", "articles", "(", "self", ")", ":", "result", "=", "[", "None", ",", "None", "]", "element", "=", "self", ".", "_first", "(", "'NN'", ")", "if", "element", ":", "element", "=", "element", ".", "split", "(", "'\\r\\n'", ")", "[", "0", "]", "...
Tries to scrape the correct articles for singular and plural from uitmuntend.nl.
[ "Tries", "to", "scrape", "the", "correct", "articles", "for", "singular", "and", "plural", "from", "uitmuntend", ".", "nl", "." ]
python
train
30.4
python-openxml/python-docx
docx/opc/oxml.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/opc/oxml.py#L212-L219
def add_rel(self, rId, reltype, target, is_external=False): """ Add a child ``<Relationship>`` element with attributes set according to parameter values. """ target_mode = RTM.EXTERNAL if is_external else RTM.INTERNAL relationship = CT_Relationship.new(rId, reltype, target, target_mode) self.append(relationship)
[ "def", "add_rel", "(", "self", ",", "rId", ",", "reltype", ",", "target", ",", "is_external", "=", "False", ")", ":", "target_mode", "=", "RTM", ".", "EXTERNAL", "if", "is_external", "else", "RTM", ".", "INTERNAL", "relationship", "=", "CT_Relationship", "...
Add a child ``<Relationship>`` element with attributes set according to parameter values.
[ "Add", "a", "child", "<Relationship", ">", "element", "with", "attributes", "set", "according", "to", "parameter", "values", "." ]
python
train
45.25
dcos/shakedown
shakedown/dcos/agent.py
https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/agent.py#L11-L19
def get_public_agents_public_ip(): """Provides a list public IPs for public agents in the cluster""" public_ip_list = [] agents = get_public_agents() for agent in agents: status, public_ip = shakedown.run_command_on_agent(agent, "/opt/mesosphere/bin/detect_ip_public") public_ip_list.append(public_ip) return public_ip_list
[ "def", "get_public_agents_public_ip", "(", ")", ":", "public_ip_list", "=", "[", "]", "agents", "=", "get_public_agents", "(", ")", "for", "agent", "in", "agents", ":", "status", ",", "public_ip", "=", "shakedown", ".", "run_command_on_agent", "(", "agent", ",...
Provides a list public IPs for public agents in the cluster
[ "Provides", "a", "list", "public", "IPs", "for", "public", "agents", "in", "the", "cluster" ]
python
train
39.111111
shakefu/pyconfig
pyconfig/__init__.py
https://github.com/shakefu/pyconfig/blob/000cb127db51e03cb4070aae6943e956193cbad5/pyconfig/__init__.py#L483-L491
def start_watching(self): """ Begins watching etcd for changes. """ # Don't create a new watcher thread if we already have one running if self.watcher and self.watcher.is_alive(): return # Create a new watcher thread and start it self.watcher = Watcher() self.watcher.start()
[ "def", "start_watching", "(", "self", ")", ":", "# Don't create a new watcher thread if we already have one running", "if", "self", ".", "watcher", "and", "self", ".", "watcher", ".", "is_alive", "(", ")", ":", "return", "# Create a new watcher thread and start it", "self...
Begins watching etcd for changes.
[ "Begins", "watching", "etcd", "for", "changes", "." ]
python
valid
36.444444
pandas-dev/pandas
pandas/io/sql.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sql.py#L1438-L1452
def _query_iterator(cursor, chunksize, columns, index_col=None, coerce_float=True, parse_dates=None): """Return generator through chunked result set""" while True: data = cursor.fetchmany(chunksize) if type(data) == tuple: data = list(data) if not data: cursor.close() break else: yield _wrap_result(data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates)
[ "def", "_query_iterator", "(", "cursor", ",", "chunksize", ",", "columns", ",", "index_col", "=", "None", ",", "coerce_float", "=", "True", ",", "parse_dates", "=", "None", ")", ":", "while", "True", ":", "data", "=", "cursor", ".", "fetchmany", "(", "ch...
Return generator through chunked result set
[ "Return", "generator", "through", "chunked", "result", "set" ]
python
train
39.733333
frictionlessdata/tableschema-pandas-py
tableschema_pandas/storage.py
https://github.com/frictionlessdata/tableschema-pandas-py/blob/ef941dbc12f5d346e9612f8fec1b4b356b8493ca/tableschema_pandas/storage.py#L47-L71
def create(self, bucket, descriptor, force=False): """https://github.com/frictionlessdata/tableschema-pandas-py#storage """ # Make lists buckets = bucket if isinstance(bucket, six.string_types): buckets = [bucket] descriptors = descriptor if isinstance(descriptor, dict): descriptors = [descriptor] # Check buckets for existence for bucket in buckets: if bucket in self.buckets: if not force: message = 'Bucket "%s" already exists' % bucket raise tableschema.exceptions.StorageError(message) self.delete(bucket) # Define dataframes for bucket, descriptor in zip(buckets, descriptors): tableschema.validate(descriptor) self.__descriptors[bucket] = descriptor self.__dataframes[bucket] = pd.DataFrame()
[ "def", "create", "(", "self", ",", "bucket", ",", "descriptor", ",", "force", "=", "False", ")", ":", "# Make lists", "buckets", "=", "bucket", "if", "isinstance", "(", "bucket", ",", "six", ".", "string_types", ")", ":", "buckets", "=", "[", "bucket", ...
https://github.com/frictionlessdata/tableschema-pandas-py#storage
[ "https", ":", "//", "github", ".", "com", "/", "frictionlessdata", "/", "tableschema", "-", "pandas", "-", "py#storage" ]
python
train
36.44
lingthio/Flask-User
flask_user/db_adapters/pynamo_db_adapter.py
https://github.com/lingthio/Flask-User/blob/a379fa0a281789618c484b459cb41236779b95b1/flask_user/db_adapters/pynamo_db_adapter.py#L48-L57
def find_objects(self, ObjectClass, **kwargs): """ Retrieve all objects of type ``ObjectClass``, matching the specified filters in ``**kwargs`` -- case sensitive. """ filter = None for k, v in kwargs.items(): cond = ObjectClass.getattr(k) == v filter = cond if filter is None else filter & cond return ObjectClass.scan(filter)
[ "def", "find_objects", "(", "self", ",", "ObjectClass", ",", "*", "*", "kwargs", ")", ":", "filter", "=", "None", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "cond", "=", "ObjectClass", ".", "getattr", "(", "k", ")", "==", ...
Retrieve all objects of type ``ObjectClass``, matching the specified filters in ``**kwargs`` -- case sensitive.
[ "Retrieve", "all", "objects", "of", "type", "ObjectClass", "matching", "the", "specified", "filters", "in", "**", "kwargs", "--", "case", "sensitive", "." ]
python
train
39
aparo/pyes
pyes/managers.py
https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/pyes/managers.py#L146-L154
def field_stats(self, indices=''): """ Retrieve the field data stats for one or more indices (See :ref:'es-guide-reference-api-admin-cluster-nodes-stats') :keyword indices: an index or a list of indices """ path = self.conn._make_path(indices, (), '_stats','fielddata') return self.conn._send_request('GET', path)
[ "def", "field_stats", "(", "self", ",", "indices", "=", "''", ")", ":", "path", "=", "self", ".", "conn", ".", "_make_path", "(", "indices", ",", "(", ")", ",", "'_stats'", ",", "'fielddata'", ")", "return", "self", ".", "conn", ".", "_send_request", ...
Retrieve the field data stats for one or more indices (See :ref:'es-guide-reference-api-admin-cluster-nodes-stats') :keyword indices: an index or a list of indices
[ "Retrieve", "the", "field", "data", "stats", "for", "one", "or", "more", "indices", "(", "See", ":", "ref", ":", "es", "-", "guide", "-", "reference", "-", "api", "-", "admin", "-", "cluster", "-", "nodes", "-", "stats", ")" ]
python
train
40.222222
Azure/azure-cosmos-python
azure/cosmos/cosmos_client.py
https://github.com/Azure/azure-cosmos-python/blob/dd01b3c5d308c6da83cfcaa0ab7083351a476353/azure/cosmos/cosmos_client.py#L1329-L1353
def UpsertUserDefinedFunction(self, collection_link, udf, options=None): """Upserts a user defined function in a collection. :param str collection_link: The link to the collection. :param str udf: :param dict options: The request options for the request. :return: The upserted UDF. :rtype: dict """ if options is None: options = {} collection_id, path, udf = self._GetContainerIdWithPathForUDF(collection_link, udf) return self.Upsert(udf, path, 'udfs', collection_id, None, options)
[ "def", "UpsertUserDefinedFunction", "(", "self", ",", "collection_link", ",", "udf", ",", "options", "=", "None", ")", ":", "if", "options", "is", "None", ":", "options", "=", "{", "}", "collection_id", ",", "path", ",", "udf", "=", "self", ".", "_GetCon...
Upserts a user defined function in a collection. :param str collection_link: The link to the collection. :param str udf: :param dict options: The request options for the request. :return: The upserted UDF. :rtype: dict
[ "Upserts", "a", "user", "defined", "function", "in", "a", "collection", "." ]
python
train
29.52
LudovicRousseau/pyscard
smartcard/pyro/server/PyroNameServer.py
https://github.com/LudovicRousseau/pyscard/blob/62e675028086c75656444cc21d563d9f08ebf8e7/smartcard/pyro/server/PyroNameServer.py#L60-L74
def getShutdownArgs(self): """return command line arguments for shutting down the server; this command line is built from the name server startup arguments.""" shutdownArgs = [] if self.host: shutdownArgs += ['-h', self.host] if self.bcport: shutdownArgs += ['-p', self.bcport] if self.bcaddr: shutdownArgs += ['-c', self.bcaddr] if self.identification: shutdownArgs += ['-i', self.identification] return shutdownArgs
[ "def", "getShutdownArgs", "(", "self", ")", ":", "shutdownArgs", "=", "[", "]", "if", "self", ".", "host", ":", "shutdownArgs", "+=", "[", "'-h'", ",", "self", ".", "host", "]", "if", "self", ".", "bcport", ":", "shutdownArgs", "+=", "[", "'-p'", ","...
return command line arguments for shutting down the server; this command line is built from the name server startup arguments.
[ "return", "command", "line", "arguments", "for", "shutting", "down", "the", "server", ";", "this", "command", "line", "is", "built", "from", "the", "name", "server", "startup", "arguments", "." ]
python
train
34.933333
toomore/goristock
grs/goristock.py
https://github.com/toomore/goristock/blob/e61f57f11a626cfbc4afbf66337fd9d1c51e3e71/grs/goristock.py#L135-L145
def ckinv(self,oo): """ check the value is date or not 檢查是否為日期格式 """ pattern = re.compile(r"[0-9]{2}/[0-9]{2}/[0-9]{2}") b = re.search(pattern, oo[0]) try: b.group() return True except: return False
[ "def", "ckinv", "(", "self", ",", "oo", ")", ":", "pattern", "=", "re", ".", "compile", "(", "r\"[0-9]{2}/[0-9]{2}/[0-9]{2}\"", ")", "b", "=", "re", ".", "search", "(", "pattern", ",", "oo", "[", "0", "]", ")", "try", ":", "b", ".", "group", "(", ...
check the value is date or not 檢查是否為日期格式
[ "check", "the", "value", "is", "date", "or", "not", "檢查是否為日期格式" ]
python
train
21.636364
treycucco/bidon
bidon/data_table.py
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/data_table.py#L118-L125
def rows_to_dicts(self, serialize_cell=None): """Generates a sequence of dictionaries of {header[i] => row[i]} for each row.""" if serialize_cell is None: serialize_cell = self.get_cell_value # keys = [serialize_cell(cell) for cell in self.rows[0]] keys = self.headers(serialize_cell) for row in self.rows[1:]: yield dict(zip(keys, [serialize_cell(cell) for cell in row]))
[ "def", "rows_to_dicts", "(", "self", ",", "serialize_cell", "=", "None", ")", ":", "if", "serialize_cell", "is", "None", ":", "serialize_cell", "=", "self", ".", "get_cell_value", "# keys = [serialize_cell(cell) for cell in self.rows[0]]", "keys", "=", "self", ".", ...
Generates a sequence of dictionaries of {header[i] => row[i]} for each row.
[ "Generates", "a", "sequence", "of", "dictionaries", "of", "{", "header", "[", "i", "]", "=", ">", "row", "[", "i", "]", "}", "for", "each", "row", "." ]
python
train
49.625
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_db.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_db.py#L2891-L2893
def get_datatype(self, table: str, column: str) -> str: """Returns database SQL datatype for a column: e.g. VARCHAR.""" return self.flavour.get_datatype(self, table, column).upper()
[ "def", "get_datatype", "(", "self", ",", "table", ":", "str", ",", "column", ":", "str", ")", "->", "str", ":", "return", "self", ".", "flavour", ".", "get_datatype", "(", "self", ",", "table", ",", "column", ")", ".", "upper", "(", ")" ]
Returns database SQL datatype for a column: e.g. VARCHAR.
[ "Returns", "database", "SQL", "datatype", "for", "a", "column", ":", "e", ".", "g", ".", "VARCHAR", "." ]
python
train
65
saltstack/salt
salt/utils/virtualbox.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/virtualbox.py#L250-L266
def vb_wait_for_session_state(xp_session, state='Unlocked', timeout=10, step=None): ''' Waits until a session state has been reached, checking at regular intervals. @param xp_session: @type xp_session: ISession from the Virtualbox API @param state: The constant descriptor according to the docs @type state: str @param timeout: in seconds @type timeout: int | float @param step: Intervals at which the value is checked @type step: int | float @return: Did we reach the state? @rtype: bool ''' args = (xp_session, state) wait_for(_check_session_state, timeout=timeout, step=step, default=False, func_args=args)
[ "def", "vb_wait_for_session_state", "(", "xp_session", ",", "state", "=", "'Unlocked'", ",", "timeout", "=", "10", ",", "step", "=", "None", ")", ":", "args", "=", "(", "xp_session", ",", "state", ")", "wait_for", "(", "_check_session_state", ",", "timeout",...
Waits until a session state has been reached, checking at regular intervals. @param xp_session: @type xp_session: ISession from the Virtualbox API @param state: The constant descriptor according to the docs @type state: str @param timeout: in seconds @type timeout: int | float @param step: Intervals at which the value is checked @type step: int | float @return: Did we reach the state? @rtype: bool
[ "Waits", "until", "a", "session", "state", "has", "been", "reached", "checking", "at", "regular", "intervals", "." ]
python
train
38.470588
CalebBell/fluids
fluids/safety_valve.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/safety_valve.py#L319-L361
def API520_SH(T1, P1): r'''Calculates correction due to steam superheat for steam flow for use in API 520 relief valve sizing. 2D interpolation among a table with 28 pressures and 10 temperatures is performed. Parameters ---------- T1 : float Temperature of the fluid entering the valve [K] P1 : float Upstream relieving pressure; the set pressure plus the allowable overpressure, plus atmospheric pressure, [Pa] Returns ------- KSH : float Correction due to steam superheat [-] Notes ----- For P above 20679 kPag, use the critical flow model. Superheat cannot be above 649 degrees Celsius. If T1 is above 149 degrees Celsius, returns 1. Examples -------- Custom example from table 9: >>> API520_SH(593+273.15, 1066.325E3) 0.7201800000000002 References ---------- .. [1] API Standard 520, Part 1 - Sizing and Selection. ''' if P1 > 20780325.0: # 20679E3+atm raise Exception('For P above 20679 kPag, use the critical flow model') if T1 > 922.15: raise Exception('Superheat cannot be above 649 degrees Celcius') if T1 < 422.15: return 1. # No superheat under 15 psig return float(bisplev(T1, P1, API520_KSH_tck))
[ "def", "API520_SH", "(", "T1", ",", "P1", ")", ":", "if", "P1", ">", "20780325.0", ":", "# 20679E3+atm", "raise", "Exception", "(", "'For P above 20679 kPag, use the critical flow model'", ")", "if", "T1", ">", "922.15", ":", "raise", "Exception", "(", "'Superhe...
r'''Calculates correction due to steam superheat for steam flow for use in API 520 relief valve sizing. 2D interpolation among a table with 28 pressures and 10 temperatures is performed. Parameters ---------- T1 : float Temperature of the fluid entering the valve [K] P1 : float Upstream relieving pressure; the set pressure plus the allowable overpressure, plus atmospheric pressure, [Pa] Returns ------- KSH : float Correction due to steam superheat [-] Notes ----- For P above 20679 kPag, use the critical flow model. Superheat cannot be above 649 degrees Celsius. If T1 is above 149 degrees Celsius, returns 1. Examples -------- Custom example from table 9: >>> API520_SH(593+273.15, 1066.325E3) 0.7201800000000002 References ---------- .. [1] API Standard 520, Part 1 - Sizing and Selection.
[ "r", "Calculates", "correction", "due", "to", "steam", "superheat", "for", "steam", "flow", "for", "use", "in", "API", "520", "relief", "valve", "sizing", ".", "2D", "interpolation", "among", "a", "table", "with", "28", "pressures", "and", "10", "temperature...
python
train
28.906977
Falkonry/falkonry-python-client
falkonryclient/service/falkonry.py
https://github.com/Falkonry/falkonry-python-client/blob/0aeb2b00293ee94944f1634e9667401b03da29c1/falkonryclient/service/falkonry.py#L350-L361
def get_entity_meta(self, datastream): """ To add entity meta data to a datastream :param datastream: string :param options: dict """ url = '/datastream/' + str(datastream) + '/entityMeta' response = self.http.get(url) entityMetaList = [] for entityMeta in response: entityMetaList.append(Schemas.EntityMeta(entityMeta=entityMeta)) return entityMetaList
[ "def", "get_entity_meta", "(", "self", ",", "datastream", ")", ":", "url", "=", "'/datastream/'", "+", "str", "(", "datastream", ")", "+", "'/entityMeta'", "response", "=", "self", ".", "http", ".", "get", "(", "url", ")", "entityMetaList", "=", "[", "]"...
To add entity meta data to a datastream :param datastream: string :param options: dict
[ "To", "add", "entity", "meta", "data", "to", "a", "datastream", ":", "param", "datastream", ":", "string", ":", "param", "options", ":", "dict" ]
python
train
36.166667
thunlp/THULAC-Python
thulac/__init__.py
https://github.com/thunlp/THULAC-Python/blob/3f1f126cd92c3d2aebdf4ab4850de3c9428a3b66/thulac/__init__.py#L112-L158
def __cutline(self, oiraw): '''对单行进行分词,这段函数包含前处理preprogress.py以及一系列后处理,将分词结果返回为一个map''' oiraw = decode(oiraw) vec = [] if(len(oiraw) < self.__maxLength): vec.append(oiraw) else: vec = self.__cutRaw(oiraw, self.__maxLength) ans = [] for oiraw in vec: if(self.__useT2S): traw, __poc_cands = self.__preprocesserpreprocesser.clean(oiraw) raw = self.__preprocesserpreprocesser.T2S(traw) else: raw, __poc_cands = self.__preprocesserpreprocesser.clean(oiraw) # raw = oiraw if(len(raw) > 0): if(self.__seg_only): tmp, tagged = self.__cws_tagging_decoder.segmentTag(raw, __poc_cands) segged = self.__cws_tagging_decoder.get_seg_result() if(self.__userDict is not None): self.__userDict.adjustSeg(segged) if(self.__use_filter): self.__myfilter.adjustSeg(segged) self.__nsDict.adjustSeg(segged) self.__idiomDict.adjustSeg(segged) self.__timeword.adjustSeg(segged) self.__punctuation.adjustSeg(segged) ans.extend(segged) # return list(map(lambda x: encode(x), segged)) else: tmp, tagged = self.__tagging_decoder.segmentTag(raw, __poc_cands) if(self.__userDict is not None): self.__userDict.adjustTag(tagged) if(self.__use_filter): self.__myfilter.adjustTag(tagged) self.__nsDict.adjustTag(tagged) self.__idiomDict.adjustTag(tagged) self.__timeword.adjustTag(tagged) self.__punctuation.adjustTag(tagged) ans.extend(tagged) if(self.__seg_only): return map(lambda x: encode(x), ans) else: return map(lambda x: (encode(x[0]), encode(x[1]), encode(x[2])), ans)
[ "def", "__cutline", "(", "self", ",", "oiraw", ")", ":", "oiraw", "=", "decode", "(", "oiraw", ")", "vec", "=", "[", "]", "if", "(", "len", "(", "oiraw", ")", "<", "self", ".", "__maxLength", ")", ":", "vec", ".", "append", "(", "oiraw", ")", "...
对单行进行分词,这段函数包含前处理preprogress.py以及一系列后处理,将分词结果返回为一个map
[ "对单行进行分词,这段函数包含前处理preprogress", ".", "py以及一系列后处理,将分词结果返回为一个map" ]
python
train
45.255319
hydpy-dev/hydpy
hydpy/core/modeltools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/modeltools.py#L710-L726
def addup_fluxes(self): """Add up the sum of the fluxes calculated so far. >>> from hydpy.models.test_v1 import * >>> parameterstep() >>> fluxes.fastaccess._q_sum = 1.0 >>> fluxes.q(2.0) >>> model.addup_fluxes() >>> fluxes.fastaccess._q_sum 3.0 """ fluxes = self.sequences.fluxes for flux in fluxes.numerics: sum_ = getattr(fluxes.fastaccess, '_%s_sum' % flux.name) sum_ += flux if flux.NDIM == 0: setattr(fluxes.fastaccess, '_%s_sum' % flux.name, sum_)
[ "def", "addup_fluxes", "(", "self", ")", ":", "fluxes", "=", "self", ".", "sequences", ".", "fluxes", "for", "flux", "in", "fluxes", ".", "numerics", ":", "sum_", "=", "getattr", "(", "fluxes", ".", "fastaccess", ",", "'_%s_sum'", "%", "flux", ".", "na...
Add up the sum of the fluxes calculated so far. >>> from hydpy.models.test_v1 import * >>> parameterstep() >>> fluxes.fastaccess._q_sum = 1.0 >>> fluxes.q(2.0) >>> model.addup_fluxes() >>> fluxes.fastaccess._q_sum 3.0
[ "Add", "up", "the", "sum", "of", "the", "fluxes", "calculated", "so", "far", "." ]
python
train
34
mobinrg/rpi_spark_drives
JMRPiSpark/Drives/Screen/SScreen.py
https://github.com/mobinrg/rpi_spark_drives/blob/e1602d8268a5ef48e9e0a8b37de89e0233f946ea/JMRPiSpark/Drives/Screen/SScreen.py#L100-L112
def moveTo(self, newX=0, newY=0): """! \~english Move vertex of rectangles to new point (x,y) @param newX: Coordinated X value @param newY: Coordinated Y value \~chinese 移动矩形到新坐标点 (x,y) @param newX: 坐标 X @param newY: 坐标 Y """ self.x = newX self.y = newY
[ "def", "moveTo", "(", "self", ",", "newX", "=", "0", ",", "newY", "=", "0", ")", ":", "self", ".", "x", "=", "newX", "self", ".", "y", "=", "newY" ]
! \~english Move vertex of rectangles to new point (x,y) @param newX: Coordinated X value @param newY: Coordinated Y value \~chinese 移动矩形到新坐标点 (x,y) @param newX: 坐标 X @param newY: 坐标 Y
[ "!", "\\", "~english", "Move", "vertex", "of", "rectangles", "to", "new", "point", "(", "x", "y", ")" ]
python
train
26
deshima-dev/decode
decode/core/__init__.py
https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/core/__init__.py#L22-L24
def scalarcoords(self): """A dictionary of values that don't label any axes (point-like).""" return {k: v.values for k, v in self.coords.items() if v.dims==()}
[ "def", "scalarcoords", "(", "self", ")", ":", "return", "{", "k", ":", "v", ".", "values", "for", "k", ",", "v", "in", "self", ".", "coords", ".", "items", "(", ")", "if", "v", ".", "dims", "==", "(", ")", "}" ]
A dictionary of values that don't label any axes (point-like).
[ "A", "dictionary", "of", "values", "that", "don", "t", "label", "any", "axes", "(", "point", "-", "like", ")", "." ]
python
train
57.666667
tensorforce/tensorforce
tensorforce/execution/threaded_runner.py
https://github.com/tensorforce/tensorforce/blob/520a8d992230e382f08e315ede5fc477f5e26bfb/tensorforce/execution/threaded_runner.py#L188-L277
def _run_single(self, thread_id, agent, environment, deterministic=False, max_episode_timesteps=-1, episode_finished=None, testing=False, sleep=None): """ The target function for a thread, runs an agent and environment until signaled to stop. Adds rewards to shared episode rewards list. Args: thread_id (int): The ID of the thread that's running this target function. agent (Agent): The Agent object that this particular thread uses. environment (Environment): The Environment object that this particular thread uses. max_episode_timesteps (int): Max. number of timesteps per episode. Use -1 or 0 for non-limited episodes. episode_finished (callable): Function called after each episode that takes an episode summary spec and returns False, if this single run should terminate after this episode. Can be used e.g. to set a particular mean reward threshold. """ # figure out whether we are using the deprecated way of "episode_finished" reporting old_episode_finished = False if episode_finished is not None and len(getargspec(episode_finished).args) == 1: old_episode_finished = True episode = 0 # Run this single worker (episode loop) as long as global count thresholds have not been reached. while not self.should_stop: state = environment.reset() agent.reset() self.global_timestep, self.global_episode = agent.timestep, agent.episode episode_reward = 0 # Time step (within episode) loop time_step = 0 time_start = time.time() while True: action, internals, states = agent.act(states=state, deterministic=deterministic, buffered=False) reward = 0 for repeat in xrange(self.repeat_actions): state, terminal, step_reward = environment.execute(action=action) reward += step_reward if terminal: break if not testing: # agent.observe(reward=reward, terminal=terminal) # Insert everything at once. agent.atomic_observe( states=state, actions=action, internals=internals, reward=reward, terminal=terminal ) if sleep is not None: time.sleep(sleep) time_step += 1 episode_reward += reward if terminal or time_step == max_episode_timesteps: break # Abort the episode (discard its results) when global says so. if self.should_stop: return self.global_timestep += time_step # Avoid race condition where order in episode_rewards won't match order in episode_timesteps. self.episode_list_lock.acquire() self.episode_rewards.append(episode_reward) self.episode_timesteps.append(time_step) self.episode_times.append(time.time() - time_start) self.episode_list_lock.release() if episode_finished is not None: # old way of calling episode_finished if old_episode_finished: summary_data = { "thread_id": thread_id, "episode": episode, "timestep": time_step, "episode_reward": episode_reward } if not episode_finished(summary_data): return # New way with BasicRunner (self) and thread-id. elif not episode_finished(self, thread_id): return episode += 1
[ "def", "_run_single", "(", "self", ",", "thread_id", ",", "agent", ",", "environment", ",", "deterministic", "=", "False", ",", "max_episode_timesteps", "=", "-", "1", ",", "episode_finished", "=", "None", ",", "testing", "=", "False", ",", "sleep", "=", "...
The target function for a thread, runs an agent and environment until signaled to stop. Adds rewards to shared episode rewards list. Args: thread_id (int): The ID of the thread that's running this target function. agent (Agent): The Agent object that this particular thread uses. environment (Environment): The Environment object that this particular thread uses. max_episode_timesteps (int): Max. number of timesteps per episode. Use -1 or 0 for non-limited episodes. episode_finished (callable): Function called after each episode that takes an episode summary spec and returns False, if this single run should terminate after this episode. Can be used e.g. to set a particular mean reward threshold.
[ "The", "target", "function", "for", "a", "thread", "runs", "an", "agent", "and", "environment", "until", "signaled", "to", "stop", ".", "Adds", "rewards", "to", "shared", "episode", "rewards", "list", "." ]
python
valid
43.822222
libtcod/python-tcod
tcod/libtcodpy.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L676-L686
def bsp_traverse_in_order( node: tcod.bsp.BSP, callback: Callable[[tcod.bsp.BSP, Any], None], userData: Any = 0, ) -> None: """Traverse this nodes hierarchy with a callback. .. deprecated:: 2.0 Use :any:`BSP.in_order` instead. """ _bsp_traverse(node.in_order(), callback, userData)
[ "def", "bsp_traverse_in_order", "(", "node", ":", "tcod", ".", "bsp", ".", "BSP", ",", "callback", ":", "Callable", "[", "[", "tcod", ".", "bsp", ".", "BSP", ",", "Any", "]", ",", "None", "]", ",", "userData", ":", "Any", "=", "0", ",", ")", "->"...
Traverse this nodes hierarchy with a callback. .. deprecated:: 2.0 Use :any:`BSP.in_order` instead.
[ "Traverse", "this", "nodes", "hierarchy", "with", "a", "callback", "." ]
python
train
27.909091
pgmpy/pgmpy
pgmpy/models/DynamicBayesianNetwork.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/models/DynamicBayesianNetwork.py#L226-L251
def get_intra_edges(self, time_slice=0): """ Returns the intra slice edges present in the 2-TBN. Parameter --------- time_slice: int (whole number) The time slice for which to get intra edges. The timeslice should be a positive value or zero. Examples -------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> dbn = DBN() >>> dbn.add_nodes_from(['D', 'G', 'I', 'S', 'L']) >>> dbn.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0)), ... (('G', 0), ('L', 0)), (('D', 0), ('D', 1)), ... (('I', 0), ('I', 1)), (('G', 0), ('G', 1)), ... (('G', 0), ('L', 1)), (('L', 0), ('L', 1))]) >>> dbn.get_intra_edges() [(('D', 0), ('G', 0)), (('G', 0), ('L', 0)), (('I', 0), ('G', 0)) """ if not isinstance(time_slice, int) or time_slice < 0: raise ValueError("The timeslice should be a positive value greater than or equal to zero") return [tuple((x[0], time_slice) for x in edge) for edge in self.edges() if edge[0][1] == edge[1][1] == 0]
[ "def", "get_intra_edges", "(", "self", ",", "time_slice", "=", "0", ")", ":", "if", "not", "isinstance", "(", "time_slice", ",", "int", ")", "or", "time_slice", "<", "0", ":", "raise", "ValueError", "(", "\"The timeslice should be a positive value greater than or ...
Returns the intra slice edges present in the 2-TBN. Parameter --------- time_slice: int (whole number) The time slice for which to get intra edges. The timeslice should be a positive value or zero. Examples -------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> dbn = DBN() >>> dbn.add_nodes_from(['D', 'G', 'I', 'S', 'L']) >>> dbn.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0)), ... (('G', 0), ('L', 0)), (('D', 0), ('D', 1)), ... (('I', 0), ('I', 1)), (('G', 0), ('G', 1)), ... (('G', 0), ('L', 1)), (('L', 0), ('L', 1))]) >>> dbn.get_intra_edges() [(('D', 0), ('G', 0)), (('G', 0), ('L', 0)), (('I', 0), ('G', 0))
[ "Returns", "the", "intra", "slice", "edges", "present", "in", "the", "2", "-", "TBN", "." ]
python
train
45.384615
dpgaspar/Flask-AppBuilder
flask_appbuilder/security/sqla/manager.py
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/security/sqla/manager.py#L122-L135
def del_register_user(self, register_user): """ Deletes registration object from database :param register_user: RegisterUser object to delete """ try: self.get_session.delete(register_user) self.get_session.commit() return True except Exception as e: log.error(c.LOGMSG_ERR_SEC_DEL_REGISTER_USER.format(str(e))) self.get_session.rollback() return False
[ "def", "del_register_user", "(", "self", ",", "register_user", ")", ":", "try", ":", "self", ".", "get_session", ".", "delete", "(", "register_user", ")", "self", ".", "get_session", ".", "commit", "(", ")", "return", "True", "except", "Exception", "as", "...
Deletes registration object from database :param register_user: RegisterUser object to delete
[ "Deletes", "registration", "object", "from", "database" ]
python
train
33.428571
pantsbuild/pants
src/python/pants/java/nailgun_client.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/java/nailgun_client.py#L94-L105
def _write_flush(self, fd, payload=None): """Write a payload to a given fd (if provided) and flush the fd.""" try: if payload: fd.write(ensure_binary(payload)) fd.flush() except (IOError, OSError) as e: # If a `Broken Pipe` is encountered during a stdio fd write, we're headless - bail. if e.errno == errno.EPIPE and self._exit_on_broken_pipe: sys.exit() # Otherwise, re-raise. raise
[ "def", "_write_flush", "(", "self", ",", "fd", ",", "payload", "=", "None", ")", ":", "try", ":", "if", "payload", ":", "fd", ".", "write", "(", "ensure_binary", "(", "payload", ")", ")", "fd", ".", "flush", "(", ")", "except", "(", "IOError", ",",...
Write a payload to a given fd (if provided) and flush the fd.
[ "Write", "a", "payload", "to", "a", "given", "fd", "(", "if", "provided", ")", "and", "flush", "the", "fd", "." ]
python
train
36.333333
klen/muffin-rest
muffin_rest/filters.py
https://github.com/klen/muffin-rest/blob/1d85bdd3b72a89eaeab8c4086926260a960408aa/muffin_rest/filters.py#L78-L94
def convert(self, args, handler=None): """Prepare filters.""" name = args field = attr = None opts = () if isinstance(args, (list, tuple)): name, *opts = args if opts: attr = opts.pop() if opts: field = opts.pop() if not field and handler and handler.Schema: field = handler.Schema._declared_fields.get(attr or name) or \ self.FILTER_CLASS.field_cls() field.attribute = field.attribute or attr or name return self.FILTER_CLASS(name, attr=attr, field=field, *opts)
[ "def", "convert", "(", "self", ",", "args", ",", "handler", "=", "None", ")", ":", "name", "=", "args", "field", "=", "attr", "=", "None", "opts", "=", "(", ")", "if", "isinstance", "(", "args", ",", "(", "list", ",", "tuple", ")", ")", ":", "n...
Prepare filters.
[ "Prepare", "filters", "." ]
python
train
36
globocom/GloboNetworkAPI-client-python
networkapiclient/GenericClient.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/GenericClient.py#L47-L68
def submit(self, map, method, postfix): '''Realiza um requisição HTTP para a networkAPI. :param map: Dicionário com os dados para gerar o XML enviado no corpo da requisição HTTP. :param method: Método da requisição HTTP ('GET', 'POST', 'PUT' ou 'DELETE'). :param postfix: Posfixo a ser colocado na URL básica de acesso à networkAPI. Ex: /ambiente :return: Tupla com o código e o corpo da resposta HTTP: (< codigo>, < descricao>) :raise NetworkAPIClientError: Erro durante a chamada HTTP para acesso à networkAPI. ''' try: rest_request = RestRequest( self.get_url(postfix), method, self.user, self.password, self.user_ldap) return rest_request.submit(map) except RestError as e: raise ErrorHandler.handle(None, str(e))
[ "def", "submit", "(", "self", ",", "map", ",", "method", ",", "postfix", ")", ":", "try", ":", "rest_request", "=", "RestRequest", "(", "self", ".", "get_url", "(", "postfix", ")", ",", "method", ",", "self", ".", "user", ",", "self", ".", "password"...
Realiza um requisição HTTP para a networkAPI. :param map: Dicionário com os dados para gerar o XML enviado no corpo da requisição HTTP. :param method: Método da requisição HTTP ('GET', 'POST', 'PUT' ou 'DELETE'). :param postfix: Posfixo a ser colocado na URL básica de acesso à networkAPI. Ex: /ambiente :return: Tupla com o código e o corpo da resposta HTTP: (< codigo>, < descricao>) :raise NetworkAPIClientError: Erro durante a chamada HTTP para acesso à networkAPI.
[ "Realiza", "um", "requisição", "HTTP", "para", "a", "networkAPI", "." ]
python
train
40.863636
LuminosoInsight/wordfreq
wordfreq/chinese.py
https://github.com/LuminosoInsight/wordfreq/blob/170e3c6536854b06dc63da8d873e8cc4f9ef6180/wordfreq/chinese.py#L28-L59
def jieba_tokenize(text, external_wordlist=False): """ Tokenize the given text into tokens whose word frequencies can probably be looked up. This uses Jieba, a word-frequency-based tokenizer. If `external_wordlist` is False, we tell Jieba to default to using wordfreq's own Chinese wordlist, and not to infer unknown words using a hidden Markov model. This ensures that the multi-character tokens that it outputs will be ones whose word frequencies we can look up. If `external_wordlist` is True, this will use the largest version of Jieba's original dictionary, with HMM enabled, so its results will be independent of the data in wordfreq. These results will be better optimized for purposes that aren't looking up word frequencies, such as general- purpose tokenization, or collecting word frequencies in the first place. """ global jieba_tokenizer, jieba_orig_tokenizer if external_wordlist: if jieba_orig_tokenizer is None: jieba_orig_tokenizer = jieba.Tokenizer(dictionary=ORIG_DICT_FILENAME) return jieba_orig_tokenizer.lcut(text) else: if jieba_tokenizer is None: jieba_tokenizer = jieba.Tokenizer(dictionary=DICT_FILENAME) # Tokenize the Simplified Chinese version of the text, but return # those spans from the original text, even if it's in Traditional # Chinese tokens = [] for _token, start, end in jieba_tokenizer.tokenize(simplify_chinese(text), HMM=False): tokens.append(text[start:end]) return tokens
[ "def", "jieba_tokenize", "(", "text", ",", "external_wordlist", "=", "False", ")", ":", "global", "jieba_tokenizer", ",", "jieba_orig_tokenizer", "if", "external_wordlist", ":", "if", "jieba_orig_tokenizer", "is", "None", ":", "jieba_orig_tokenizer", "=", "jieba", "...
Tokenize the given text into tokens whose word frequencies can probably be looked up. This uses Jieba, a word-frequency-based tokenizer. If `external_wordlist` is False, we tell Jieba to default to using wordfreq's own Chinese wordlist, and not to infer unknown words using a hidden Markov model. This ensures that the multi-character tokens that it outputs will be ones whose word frequencies we can look up. If `external_wordlist` is True, this will use the largest version of Jieba's original dictionary, with HMM enabled, so its results will be independent of the data in wordfreq. These results will be better optimized for purposes that aren't looking up word frequencies, such as general- purpose tokenization, or collecting word frequencies in the first place.
[ "Tokenize", "the", "given", "text", "into", "tokens", "whose", "word", "frequencies", "can", "probably", "be", "looked", "up", ".", "This", "uses", "Jieba", "a", "word", "-", "frequency", "-", "based", "tokenizer", "." ]
python
train
48.75
kytos/python-openflow
pyof/foundation/network_types.py
https://github.com/kytos/python-openflow/blob/4f2d0d08ab28e102ed88fe57a4ee17729f1e1bb7/pyof/foundation/network_types.py#L441-L451
def get_size(self, value=None): """Return struct size. Returns: int: Returns the struct size based on inner attributes. """ if isinstance(value, type(self)): return value.get_size() return 2 + self.length
[ "def", "get_size", "(", "self", ",", "value", "=", "None", ")", ":", "if", "isinstance", "(", "value", ",", "type", "(", "self", ")", ")", ":", "return", "value", ".", "get_size", "(", ")", "return", "2", "+", "self", ".", "length" ]
Return struct size. Returns: int: Returns the struct size based on inner attributes.
[ "Return", "struct", "size", "." ]
python
train
23.727273
clalancette/pycdlib
pycdlib/udf.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/udf.py#L2245-L2269
def new(self): # type: () -> None ''' A method to create a new UDF Logical Volume Implementation Use. Parameters: None. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Implementation Use already initialized') self.impl_id = UDFEntityID() self.impl_id.new(0, b'*pycdlib') self.num_files = 0 self.num_dirs = 1 self.min_udf_read_revision = 258 self.min_udf_write_revision = 258 self.max_udf_write_revision = 258 self.impl_use = b'\x00' * 378 # FIXME: let the user set this self._initialized = True
[ "def", "new", "(", "self", ")", ":", "# type: () -> None", "if", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'UDF Logical Volume Implementation Use already initialized'", ")", "self", ".", "impl_id", "=", "UDFEntityI...
A method to create a new UDF Logical Volume Implementation Use. Parameters: None. Returns: Nothing.
[ "A", "method", "to", "create", "a", "new", "UDF", "Logical", "Volume", "Implementation", "Use", "." ]
python
train
27.76
pyusb/pyusb
usb/core.py
https://github.com/pyusb/pyusb/blob/ffe6faf42c6ad273880b0b464b9bbf44c1d4b2e9/usb/core.py#L879-L904
def set_interface_altsetting(self, interface = None, alternate_setting = None): r"""Set the alternate setting for an interface. When you want to use an interface and it has more than one alternate setting, you should call this method to select the appropriate alternate setting. If you call the method without one or the two parameters, it will be selected the first one found in the Device in the same way of the set_configuration method. Commonly, an interface has only one alternate setting and this call is not necessary. For most devices, either it has more than one alternate setting or not, it is not harmful to make a call to this method with no arguments, as devices will silently ignore the request when there is only one alternate setting, though the USB Spec allows devices with no additional alternate setting return an error to the Host in response to a SET_INTERFACE request. If you are in doubt, you may want to call it with no arguments wrapped by a try/except clause: >>> try: >>> dev.set_interface_altsetting() >>> except usb.core.USBError: >>> pass """ self._ctx.managed_set_interface(self, interface, alternate_setting)
[ "def", "set_interface_altsetting", "(", "self", ",", "interface", "=", "None", ",", "alternate_setting", "=", "None", ")", ":", "self", ".", "_ctx", ".", "managed_set_interface", "(", "self", ",", "interface", ",", "alternate_setting", ")" ]
r"""Set the alternate setting for an interface. When you want to use an interface and it has more than one alternate setting, you should call this method to select the appropriate alternate setting. If you call the method without one or the two parameters, it will be selected the first one found in the Device in the same way of the set_configuration method. Commonly, an interface has only one alternate setting and this call is not necessary. For most devices, either it has more than one alternate setting or not, it is not harmful to make a call to this method with no arguments, as devices will silently ignore the request when there is only one alternate setting, though the USB Spec allows devices with no additional alternate setting return an error to the Host in response to a SET_INTERFACE request. If you are in doubt, you may want to call it with no arguments wrapped by a try/except clause: >>> try: >>> dev.set_interface_altsetting() >>> except usb.core.USBError: >>> pass
[ "r", "Set", "the", "alternate", "setting", "for", "an", "interface", "." ]
python
train
49.807692
gnarlychicken/aiohttp_auth
aiohttp_auth/auth/session_ticket_auth.py
https://github.com/gnarlychicken/aiohttp_auth/blob/3d55236889fb14b662279b050de18d43842bb886/aiohttp_auth/auth/session_ticket_auth.py#L22-L29
async def forget_ticket(self, request): """Called to forget the ticket data a request Args: request: aiohttp Request object. """ session = await get_session(request) session.pop(self.cookie_name, '')
[ "async", "def", "forget_ticket", "(", "self", ",", "request", ")", ":", "session", "=", "await", "get_session", "(", "request", ")", "session", ".", "pop", "(", "self", ".", "cookie_name", ",", "''", ")" ]
Called to forget the ticket data a request Args: request: aiohttp Request object.
[ "Called", "to", "forget", "the", "ticket", "data", "a", "request" ]
python
train
30.625
Kentzo/git-archive-all
git_archive_all.py
https://github.com/Kentzo/git-archive-all/blob/fed1f48f1287c84220be08d63181a2816bde7a64/git_archive_all.py#L312-L414
def check_attr(self, repo_abspath, attrs): """ Generator that returns attributes for given paths relative to repo_abspath. >>> g = GitArchiver.check_attr('repo_path', ['export-ignore']) >>> next(g) >>> attrs = g.send('relative_path') >>> print(attrs['export-ignore']) @param repo_abspath: Absolute path to a git repository. @type repo_abspath: str @param attrs: Attributes to check. @type attrs: [str] @rtype: generator """ def make_process(): env = dict(environ, GIT_FLUSH='1') cmd = 'git check-attr --stdin -z {0}'.format(' '.join(attrs)) return Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, cwd=repo_abspath, env=env) def read_attrs(process, repo_file_path): process.stdin.write(repo_file_path.encode('utf-8') + b'\0') process.stdin.flush() # For every attribute check-attr will output: <path> NUL <attribute> NUL <info> NUL path, attr, info = b'', b'', b'' nuls_count = 0 nuls_expected = 3 * len(attrs) while nuls_count != nuls_expected: b = process.stdout.read(1) if b == b'' and process.poll() is not None: raise RuntimeError("check-attr exited prematurely") elif b == b'\0': nuls_count += 1 if nuls_count % 3 == 0: yield map(self.decode_git_output, (path, attr, info)) path, attr, info = b'', b'', b'' elif nuls_count % 3 == 0: path += b elif nuls_count % 3 == 1: attr += b elif nuls_count % 3 == 2: info += b def read_attrs_old(process, repo_file_path): """ Compatibility with versions 1.8.5 and below that do not recognize -z for output. """ process.stdin.write(repo_file_path.encode('utf-8') + b'\0') process.stdin.flush() # For every attribute check-attr will output: <path>: <attribute>: <info>\n # where <path> is c-quoted path, attr, info = b'', b'', b'' lines_count = 0 lines_expected = len(attrs) while lines_count != lines_expected: line = process.stdout.readline() info_start = line.rfind(b': ') if info_start == -1: raise RuntimeError("unexpected output of check-attr: {0}".format(line)) attr_start = line.rfind(b': ', 0, info_start) if attr_start == -1: raise RuntimeError("unexpected output of check-attr: {0}".format(line)) info = line[info_start + 2:len(line) - 1] # trim leading ": " and trailing \n attr = line[attr_start + 2:info_start] # trim leading ": " path = line[:attr_start] yield map(self.decode_git_output, (path, attr, info)) lines_count += 1 if not attrs: return process = make_process() try: while True: repo_file_path = yield repo_file_attrs = {} if self.git_version is None or self.git_version > (1, 8, 5): reader = read_attrs else: reader = read_attrs_old for path, attr, value in reader(process, repo_file_path): repo_file_attrs[attr] = value yield repo_file_attrs finally: process.stdin.close() process.wait()
[ "def", "check_attr", "(", "self", ",", "repo_abspath", ",", "attrs", ")", ":", "def", "make_process", "(", ")", ":", "env", "=", "dict", "(", "environ", ",", "GIT_FLUSH", "=", "'1'", ")", "cmd", "=", "'git check-attr --stdin -z {0}'", ".", "format", "(", ...
Generator that returns attributes for given paths relative to repo_abspath. >>> g = GitArchiver.check_attr('repo_path', ['export-ignore']) >>> next(g) >>> attrs = g.send('relative_path') >>> print(attrs['export-ignore']) @param repo_abspath: Absolute path to a git repository. @type repo_abspath: str @param attrs: Attributes to check. @type attrs: [str] @rtype: generator
[ "Generator", "that", "returns", "attributes", "for", "given", "paths", "relative", "to", "repo_abspath", "." ]
python
train
35.427184
IrvKalb/pygwidgets
pygwidgets/pygwidgets.py
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L2242-L2255
def scale(self, percent, scaleFromCenter=True): """scales an Image object Parameters: | percent - a percent of the original size | numbers bigger than 100 scale up | numbers less than 100 scale down | 100 scales to the original size Optional keyword parameters: | scaleFromCenter - should the image scale from the center or from the upper left hand corner | (default is True, scale from the center) """ self._transmogrophy(self.angle, percent, scaleFromCenter, self.flipH, self.flipV)
[ "def", "scale", "(", "self", ",", "percent", ",", "scaleFromCenter", "=", "True", ")", ":", "self", ".", "_transmogrophy", "(", "self", ".", "angle", ",", "percent", ",", "scaleFromCenter", ",", "self", ".", "flipH", ",", "self", ".", "flipV", ")" ]
scales an Image object Parameters: | percent - a percent of the original size | numbers bigger than 100 scale up | numbers less than 100 scale down | 100 scales to the original size Optional keyword parameters: | scaleFromCenter - should the image scale from the center or from the upper left hand corner | (default is True, scale from the center)
[ "scales", "an", "Image", "object", "Parameters", ":", "|", "percent", "-", "a", "percent", "of", "the", "original", "size", "|", "numbers", "bigger", "than", "100", "scale", "up", "|", "numbers", "less", "than", "100", "scale", "down", "|", "100", "scale...
python
train
45.571429
O365/python-o365
O365/drive.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/drive.py#L1030-L1056
def create_child_folder(self, name, description=None): """ Creates a Child Folder :param str name: the name of the new child folder :param str description: the description of the new child folder :return: newly created folder :rtype: drive.Folder """ if not self.object_id: return None url = self.build_url( self._endpoints.get('list_items').format(id=self.object_id)) data = {'name': name, 'folder': {}} if description: data['description'] = description response = self.con.post(url, data=data) if not response: return None folder = response.json() return self._classifier(folder)(parent=self, **{self._cloud_data_key: folder})
[ "def", "create_child_folder", "(", "self", ",", "name", ",", "description", "=", "None", ")", ":", "if", "not", "self", ".", "object_id", ":", "return", "None", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'li...
Creates a Child Folder :param str name: the name of the new child folder :param str description: the description of the new child folder :return: newly created folder :rtype: drive.Folder
[ "Creates", "a", "Child", "Folder" ]
python
train
29.962963
antocuni/pdb
pdb.py
https://github.com/antocuni/pdb/blob/a88be00d31f1ff38e26711a1d99589d830524c9e/pdb.py#L1232-L1255
def error(self, msg): """Override/enhance default error method to display tracebacks.""" print("***", msg, file=self.stdout) if not self.config.show_traceback_on_error: return etype, evalue, tb = sys.exc_info() if tb and tb.tb_frame.f_code.co_name == "default": tb = tb.tb_next if tb and tb.tb_frame.f_code.co_filename == "<stdin>": tb = tb.tb_next if tb: # only display with actual traceback. self._remove_bdb_context(evalue) tb_limit = self.config.show_traceback_on_error_limit fmt_exc = traceback.format_exception( etype, evalue, tb, limit=tb_limit ) # Remove last line (exception string again). if len(fmt_exc) > 1 and fmt_exc[-1][0] != " ": fmt_exc.pop() print("".join(fmt_exc).rstrip(), file=self.stdout)
[ "def", "error", "(", "self", ",", "msg", ")", ":", "print", "(", "\"***\"", ",", "msg", ",", "file", "=", "self", ".", "stdout", ")", "if", "not", "self", ".", "config", ".", "show_traceback_on_error", ":", "return", "etype", ",", "evalue", ",", "tb"...
Override/enhance default error method to display tracebacks.
[ "Override", "/", "enhance", "default", "error", "method", "to", "display", "tracebacks", "." ]
python
train
41.166667
QuantEcon/QuantEcon.py
quantecon/quadsums.py
https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/quadsums.py#L14-L62
def var_quadratic_sum(A, C, H, beta, x0): r""" Computes the expected discounted quadratic sum .. math:: q(x_0) = \mathbb{E} \Big[ \sum_{t=0}^{\infty} \beta^t x_t' H x_t \Big] Here :math:`{x_t}` is the VAR process :math:`x_{t+1} = A x_t + C w_t` with :math:`{x_t}` standard normal and :math:`x_0` the initial condition. Parameters ---------- A : array_like(float, ndim=2) The matrix described above in description. Should be n x n C : array_like(float, ndim=2) The matrix described above in description. Should be n x n H : array_like(float, ndim=2) The matrix described above in description. Should be n x n beta: scalar(float) Should take a value in (0, 1) x_0: array_like(float, ndim=1) The initial condtion. A conformable array (of length n, or with n rows) Returns ------- q0: scalar(float) Represents the value :math:`q(x_0)` Remarks: The formula for computing :math:`q(x_0)` is :math:`q(x_0) = x_0' Q x_0 + v` where * :math:`Q` is the solution to :math:`Q = H + \beta A' Q A`, and * :math:`v = \frac{trace(C' Q C) \beta}{(1 - \beta)}` """ # == Make sure that A, C, H and x0 are array_like == # A, C, H = list(map(np.atleast_2d, (A, C, H))) x0 = np.atleast_1d(x0) # == Start computations == # Q = scipy.linalg.solve_discrete_lyapunov(sqrt(beta) * A.T, H) cq = dot(dot(C.T, Q), C) v = np.trace(cq) * beta / (1 - beta) q0 = dot(dot(x0.T, Q), x0) + v return q0
[ "def", "var_quadratic_sum", "(", "A", ",", "C", ",", "H", ",", "beta", ",", "x0", ")", ":", "# == Make sure that A, C, H and x0 are array_like == #", "A", ",", "C", ",", "H", "=", "list", "(", "map", "(", "np", ".", "atleast_2d", ",", "(", "A", ",", "C...
r""" Computes the expected discounted quadratic sum .. math:: q(x_0) = \mathbb{E} \Big[ \sum_{t=0}^{\infty} \beta^t x_t' H x_t \Big] Here :math:`{x_t}` is the VAR process :math:`x_{t+1} = A x_t + C w_t` with :math:`{x_t}` standard normal and :math:`x_0` the initial condition. Parameters ---------- A : array_like(float, ndim=2) The matrix described above in description. Should be n x n C : array_like(float, ndim=2) The matrix described above in description. Should be n x n H : array_like(float, ndim=2) The matrix described above in description. Should be n x n beta: scalar(float) Should take a value in (0, 1) x_0: array_like(float, ndim=1) The initial condtion. A conformable array (of length n, or with n rows) Returns ------- q0: scalar(float) Represents the value :math:`q(x_0)` Remarks: The formula for computing :math:`q(x_0)` is :math:`q(x_0) = x_0' Q x_0 + v` where * :math:`Q` is the solution to :math:`Q = H + \beta A' Q A`, and * :math:`v = \frac{trace(C' Q C) \beta}{(1 - \beta)}`
[ "r", "Computes", "the", "expected", "discounted", "quadratic", "sum" ]
python
train
31
tamasgal/km3pipe
km3pipe/hardware.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/hardware.py#L151-L227
def _parse_doms(self): """Extract dom information from detector file""" self.print("Reading PMT information...") self._det_file.seek(0, 0) self._readline() pmts = defaultdict(list) pmt_index = 0 while True: line = self._readline() if line == '': self.print("Done.") break try: dom_id, du, floor, n_pmts = split(line, int) except ValueError: continue if du != self._current_du: log.debug("Next DU, resetting floor to 1.") self._current_du = du self.dus.append(du) self._current_floor = 1 if du == 1 and floor == -1: log.warning( "Floor ID is -1 (Jpp conversion bug), " "using our own floor ID!" ) else: self._current_floor += 1 if floor == -1: log.debug("Setting floor ID to our own ID") floor = self._current_floor self.doms[dom_id] = (du, floor, n_pmts) if self.n_pmts_per_dom is None: self.n_pmts_per_dom = n_pmts if self.n_pmts_per_dom != n_pmts: log.warning( "DOMs with different number of PMTs are " "detected, this can cause some unexpected " "behaviour." ) for i in range(n_pmts): raw_pmt_info = self._readline() pmt_info = raw_pmt_info.split() pmt_id, x, y, z, rest = unpack_nfirst(pmt_info, 4) dx, dy, dz, t0, rest = unpack_nfirst(rest, 4) pmt_id = int(pmt_id) omkey = (du, floor, i) pmts['pmt_id'].append(int(pmt_id)) pmts['pos_x'].append(float(x)) pmts['pos_y'].append(float(y)) pmts['pos_z'].append(float(z)) pmts['dir_x'].append(float(dx)) pmts['dir_y'].append(float(dy)) pmts['dir_z'].append(float(dz)) pmts['t0'].append(float(t0)) pmts['du'].append(int(du)) pmts['floor'].append(int(floor)) pmts['channel_id'].append(int(i)) pmts['dom_id'].append(int(dom_id)) if self.version == 'v3' and rest: status, rest = unpack_nfirst(rest, 1) pmts['status'].append(int(status)) if rest: log.warning("Unexpected PMT values: {0}".format(rest)) self._pmt_index_by_omkey[omkey] = pmt_index self._pmt_index_by_pmt_id[pmt_id] = pmt_index pmt_index += 1 self.pmts = Table(pmts, name='PMT')
[ "def", "_parse_doms", "(", "self", ")", ":", "self", ".", "print", "(", "\"Reading PMT information...\"", ")", "self", ".", "_det_file", ".", "seek", "(", "0", ",", "0", ")", "self", ".", "_readline", "(", ")", "pmts", "=", "defaultdict", "(", "list", ...
Extract dom information from detector file
[ "Extract", "dom", "information", "from", "detector", "file" ]
python
train
36.766234
pycontribs/pyrax
pyrax/clouddns.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddns.py#L1365-L1371
def update_ptr_record(self, device, record, domain_name, data=None, ttl=None, comment=None): """ Updates a PTR record with the supplied values. """ return self._manager.update_ptr_record(device, record, domain_name, data=data, ttl=ttl, comment=comment)
[ "def", "update_ptr_record", "(", "self", ",", "device", ",", "record", ",", "domain_name", ",", "data", "=", "None", ",", "ttl", "=", "None", ",", "comment", "=", "None", ")", ":", "return", "self", ".", "_manager", ".", "update_ptr_record", "(", "device...
Updates a PTR record with the supplied values.
[ "Updates", "a", "PTR", "record", "with", "the", "supplied", "values", "." ]
python
train
43.714286
ambitioninc/django-entity-event
entity_event/context_loader.py
https://github.com/ambitioninc/django-entity-event/blob/70f50df133e42a7bf38d0f07fccc6d2890e5fd12/entity_event/context_loader.py#L141-L155
def load_fetched_objects_into_contexts(events, model_data, context_hints_per_source): """ Given the fetched model data and the context hints for each source, go through each event and populate the contexts with the loaded information. """ for event in events: context_hints = context_hints_per_source.get(event.source, {}) for context_key, hints in context_hints.items(): model = get_model(hints['app_name'], hints['model_name']) for d, value in dict_find(event.context, context_key): if isinstance(value, list): for i, model_id in enumerate(d[context_key]): d[context_key][i] = model_data[model].get(model_id) else: d[context_key] = model_data[model].get(value)
[ "def", "load_fetched_objects_into_contexts", "(", "events", ",", "model_data", ",", "context_hints_per_source", ")", ":", "for", "event", "in", "events", ":", "context_hints", "=", "context_hints_per_source", ".", "get", "(", "event", ".", "source", ",", "{", "}",...
Given the fetched model data and the context hints for each source, go through each event and populate the contexts with the loaded information.
[ "Given", "the", "fetched", "model", "data", "and", "the", "context", "hints", "for", "each", "source", "go", "through", "each", "event", "and", "populate", "the", "contexts", "with", "the", "loaded", "information", "." ]
python
train
53.6
saltstack/salt
salt/modules/snapper.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/snapper.py#L513-L519
def _get_num_interval(config, num_pre, num_post): ''' Returns numerical interval based on optionals num_pre, num_post values ''' post = int(num_post) if num_post else 0 pre = int(num_pre) if num_pre is not None else _get_last_snapshot(config)['id'] return pre, post
[ "def", "_get_num_interval", "(", "config", ",", "num_pre", ",", "num_post", ")", ":", "post", "=", "int", "(", "num_post", ")", "if", "num_post", "else", "0", "pre", "=", "int", "(", "num_pre", ")", "if", "num_pre", "is", "not", "None", "else", "_get_l...
Returns numerical interval based on optionals num_pre, num_post values
[ "Returns", "numerical", "interval", "based", "on", "optionals", "num_pre", "num_post", "values" ]
python
train
40.428571
camsci/meteor-pi
src/pythonModules/meteorpi_db/meteorpi_db/sql_builder.py
https://github.com/camsci/meteor-pi/blob/7b01527650bd1b2b76d6f364e8122e25b8812c8d/src/pythonModules/meteorpi_db/meteorpi_db/sql_builder.py#L289-L319
def get_select_sql(self, columns, order=None, limit=0, skip=0): """ Build a SELECT query based on the current state of the builder. :param columns: SQL fragment describing which columns to select i.e. 'e.obstoryID, s.statusID' :param order: Optional ordering constraint, i.e. 'e.eventTime DESC' :param limit: Optional, used to build the 'LIMIT n' clause. If not specified no limit is imposed. :param skip: Optional, used to build the 'OFFSET n' clause. If not specified results are returned from the first item available. Note that this parameter must be combined with 'order', otherwise there's no ordering imposed on the results and subsequent queries may return overlapping data randomly. It's unlikely that this will actually happen as almost all databases do in fact create an internal ordering, but there's no guarantee of this (and some operations such as indexing will definitely break this property unless explicitly set). :returns: A SQL SELECT query, which will make use of self.sql_args when executed. """ sql = 'SELECT ' sql += '{0} FROM {1} '.format(columns, self.tables) if len(self.where_clauses) > 0: sql += ' WHERE ' sql += ' AND '.join(self.where_clauses) if order is not None: sql += ' ORDER BY {0}'.format(order) if limit > 0: sql += ' LIMIT {0} '.format(limit) if skip > 0: sql += ' OFFSET {0} '.format(skip) return sql
[ "def", "get_select_sql", "(", "self", ",", "columns", ",", "order", "=", "None", ",", "limit", "=", "0", ",", "skip", "=", "0", ")", ":", "sql", "=", "'SELECT '", "sql", "+=", "'{0} FROM {1} '", ".", "format", "(", "columns", ",", "self", ".", "table...
Build a SELECT query based on the current state of the builder. :param columns: SQL fragment describing which columns to select i.e. 'e.obstoryID, s.statusID' :param order: Optional ordering constraint, i.e. 'e.eventTime DESC' :param limit: Optional, used to build the 'LIMIT n' clause. If not specified no limit is imposed. :param skip: Optional, used to build the 'OFFSET n' clause. If not specified results are returned from the first item available. Note that this parameter must be combined with 'order', otherwise there's no ordering imposed on the results and subsequent queries may return overlapping data randomly. It's unlikely that this will actually happen as almost all databases do in fact create an internal ordering, but there's no guarantee of this (and some operations such as indexing will definitely break this property unless explicitly set). :returns: A SQL SELECT query, which will make use of self.sql_args when executed.
[ "Build", "a", "SELECT", "query", "based", "on", "the", "current", "state", "of", "the", "builder", "." ]
python
train
51.612903
ray-project/ray
python/ray/node.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/node.py#L465-L477
def start_head_processes(self): """Start head processes on the node.""" logger.info( "Process STDOUT and STDERR is being redirected to {}.".format( self._logs_dir)) assert self._redis_address is None # If this is the head node, start the relevant head node processes. self.start_redis() self.start_monitor() self.start_raylet_monitor() # The dashboard is Python3.x only. if PY3 and self._ray_params.include_webui: self.start_dashboard()
[ "def", "start_head_processes", "(", "self", ")", ":", "logger", ".", "info", "(", "\"Process STDOUT and STDERR is being redirected to {}.\"", ".", "format", "(", "self", ".", "_logs_dir", ")", ")", "assert", "self", ".", "_redis_address", "is", "None", "# If this is...
Start head processes on the node.
[ "Start", "head", "processes", "on", "the", "node", "." ]
python
train
41.230769
Genida/dependenpy
src/dependenpy/cli.py
https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/cli.py#L82-L151
def main(args=None): """ Main function. This function is the command line entry point. Args: args (list of str): the arguments passed to the program. Returns: int: return code being 0 (OK), 1 (dsm empty) or 2 (error). """ parser = get_parser() args = parser.parse_args(args=args) if not (args.matrix or args.dependencies or args.treemap or args.graph): args.matrix = True # split comma-separated args packages = [] for arg in args.packages: if ',' in arg: for package in arg.split(','): if package not in packages: packages.append(package) elif arg not in packages: packages.append(arg) # guess convenient depth depth = args.depth if depth is None: depth = guess_depth(packages) # open file if not stdout output = args.output if isinstance(output, str): output = open(output, 'w') dsm = DSM(*packages, build_tree=True, build_dependencies=True, enforce_init=not args.greedy) if dsm.empty: return 1 indent = args.indent if indent is None: if args.format == CSV: indent = 0 else: indent = 2 elif indent < 0 and args.format == JSON: # special case for json.dumps indent argument indent = None try: if args.dependencies: dsm.print(format=args.format, output=output, indent=indent) elif args.matrix: dsm.print_matrix(format=args.format, output=output, depth=depth, indent=indent) elif args.treemap: dsm.print_treemap(format=args.format, output=output) elif args.graph: dsm.print_graph(format=args.format, output=output, depth=depth, indent=indent) except BrokenPipeError: # avoid traceback return 2 return 0
[ "def", "main", "(", "args", "=", "None", ")", ":", "parser", "=", "get_parser", "(", ")", "args", "=", "parser", ".", "parse_args", "(", "args", "=", "args", ")", "if", "not", "(", "args", ".", "matrix", "or", "args", ".", "dependencies", "or", "ar...
Main function. This function is the command line entry point. Args: args (list of str): the arguments passed to the program. Returns: int: return code being 0 (OK), 1 (dsm empty) or 2 (error).
[ "Main", "function", "." ]
python
train
27.071429
jterrace/pyssim
ssim/ssimlib.py
https://github.com/jterrace/pyssim/blob/ff9bd90c3eb7525013ad46babf66b7cc78391e89/ssim/ssimlib.py#L109-L145
def ssim_value(self, target): """Compute the SSIM value from the reference image to the target image. Args: target (str or PIL.Image): Input image to compare the reference image to. This may be a PIL Image object or, to save time, an SSIMImage object (e.g. the img member of another SSIM object). Returns: Computed SSIM float value. """ # Performance boost if handed a compatible SSIMImage object. if not isinstance(target, SSIMImage) \ or not np.array_equal(self.gaussian_kernel_1d, target.gaussian_kernel_1d): target = SSIMImage(target, self.gaussian_kernel_1d, self.img.size) img_mat_12 = self.img.img_gray * target.img_gray img_mat_sigma_12 = convolve_gaussian_2d( img_mat_12, self.gaussian_kernel_1d) img_mat_mu_12 = self.img.img_gray_mu * target.img_gray_mu img_mat_sigma_12 = img_mat_sigma_12 - img_mat_mu_12 # Numerator of SSIM num_ssim = ((2 * img_mat_mu_12 + self.c_1) * (2 * img_mat_sigma_12 + self.c_2)) # Denominator of SSIM den_ssim = ( (self.img.img_gray_mu_squared + target.img_gray_mu_squared + self.c_1) * (self.img.img_gray_sigma_squared + target.img_gray_sigma_squared + self.c_2)) ssim_map = num_ssim / den_ssim index = np.average(ssim_map) return index
[ "def", "ssim_value", "(", "self", ",", "target", ")", ":", "# Performance boost if handed a compatible SSIMImage object.", "if", "not", "isinstance", "(", "target", ",", "SSIMImage", ")", "or", "not", "np", ".", "array_equal", "(", "self", ".", "gaussian_kernel_1d",...
Compute the SSIM value from the reference image to the target image. Args: target (str or PIL.Image): Input image to compare the reference image to. This may be a PIL Image object or, to save time, an SSIMImage object (e.g. the img member of another SSIM object). Returns: Computed SSIM float value.
[ "Compute", "the", "SSIM", "value", "from", "the", "reference", "image", "to", "the", "target", "image", "." ]
python
test
39.351351
Duke-GCB/DukeDSClient
ddsc/core/download.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/download.py#L343-L350
def create_context(self, message_queue, task_id): """ Create data needed by upload_project_run(DukeDS connection info). :param message_queue: Queue: queue background process can send messages to us on :param task_id: int: id of this command's task so message will be routed correctly """ params = (self.settings.dest_directory, self.file_url.json_data, self.seek_amt, self.bytes_to_read) return DownloadContext(self.settings, params, message_queue, task_id)
[ "def", "create_context", "(", "self", ",", "message_queue", ",", "task_id", ")", ":", "params", "=", "(", "self", ".", "settings", ".", "dest_directory", ",", "self", ".", "file_url", ".", "json_data", ",", "self", ".", "seek_amt", ",", "self", ".", "byt...
Create data needed by upload_project_run(DukeDS connection info). :param message_queue: Queue: queue background process can send messages to us on :param task_id: int: id of this command's task so message will be routed correctly
[ "Create", "data", "needed", "by", "upload_project_run", "(", "DukeDS", "connection", "info", ")", ".", ":", "param", "message_queue", ":", "Queue", ":", "queue", "background", "process", "can", "send", "messages", "to", "us", "on", ":", "param", "task_id", "...
python
train
63.25
daviddrysdale/python-phonenumbers
python/phonenumbers/phonenumberutil.py
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/phonenumberutil.py#L2133-L2165
def ndd_prefix_for_region(region_code, strip_non_digits): """Returns the national dialling prefix for a specific region. For example, this would be 1 for the United States, and 0 for New Zealand. Set strip_non_digits to True to strip symbols like "~" (which indicates a wait for a dialling tone) from the prefix returned. If no national prefix is present, we return None. Warning: Do not use this method for do-your-own formatting - for some regions, the national dialling prefix is used only for certain types of numbers. Use the library's formatting functions to prefix the national prefix when required. Arguments: region_code -- The region that we want to get the dialling prefix for. strip_non_digits -- whether to strip non-digits from the national dialling prefix. Returns the dialling prefix for the region denoted by region_code. """ if region_code is None: return None metadata = PhoneMetadata.metadata_for_region(region_code.upper(), None) if metadata is None: return None national_prefix = metadata.national_prefix if national_prefix is None or len(national_prefix) == 0: return None if strip_non_digits: # Note: if any other non-numeric symbols are ever used in national # prefixes, these would have to be removed here as well. national_prefix = re.sub(U_TILDE, U_EMPTY_STRING, national_prefix) return national_prefix
[ "def", "ndd_prefix_for_region", "(", "region_code", ",", "strip_non_digits", ")", ":", "if", "region_code", "is", "None", ":", "return", "None", "metadata", "=", "PhoneMetadata", ".", "metadata_for_region", "(", "region_code", ".", "upper", "(", ")", ",", "None"...
Returns the national dialling prefix for a specific region. For example, this would be 1 for the United States, and 0 for New Zealand. Set strip_non_digits to True to strip symbols like "~" (which indicates a wait for a dialling tone) from the prefix returned. If no national prefix is present, we return None. Warning: Do not use this method for do-your-own formatting - for some regions, the national dialling prefix is used only for certain types of numbers. Use the library's formatting functions to prefix the national prefix when required. Arguments: region_code -- The region that we want to get the dialling prefix for. strip_non_digits -- whether to strip non-digits from the national dialling prefix. Returns the dialling prefix for the region denoted by region_code.
[ "Returns", "the", "national", "dialling", "prefix", "for", "a", "specific", "region", "." ]
python
train
43.909091
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_fc_auth.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_fc_auth.py#L54-L66
def fcsp_sa_fcsp_auth_policy_switch(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcsp_sa = ET.SubElement(config, "fcsp-sa", xmlns="urn:brocade.com:mgmt:brocade-fc-auth") fcsp = ET.SubElement(fcsp_sa, "fcsp") auth = ET.SubElement(fcsp, "auth") policy = ET.SubElement(auth, "policy") switch = ET.SubElement(policy, "switch") switch.text = kwargs.pop('switch') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "fcsp_sa_fcsp_auth_policy_switch", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "fcsp_sa", "=", "ET", ".", "SubElement", "(", "config", ",", "\"fcsp-sa\"", ",", "xmlns", "=", "\"urn:bro...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
41.307692
openstack/quark
quark/plugin_modules/subnets.py
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/subnets.py#L97-L231
def create_subnet(context, subnet): """Create a subnet. Create a subnet which represents a range of IP addresses that can be allocated to devices : param context: neutron api request context : param subnet: dictionary describing the subnet, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. """ LOG.info("create_subnet for tenant %s" % context.tenant_id) net_id = subnet["subnet"]["network_id"] with context.session.begin(): net = db_api.network_find(context=context, limit=None, sorts=['id'], marker=None, page_reverse=False, fields=None, id=net_id, scope=db_api.ONE) if not net: raise n_exc.NetworkNotFound(net_id=net_id) sub_attrs = subnet["subnet"] always_pop = ["enable_dhcp", "ip_version", "first_ip", "last_ip", "_cidr"] admin_only = ["segment_id", "do_not_use", "created_at", "next_auto_assign_ip"] utils.filter_body(context, sub_attrs, admin_only, always_pop) _validate_subnet_cidr(context, net_id, sub_attrs["cidr"]) cidr = netaddr.IPNetwork(sub_attrs["cidr"]) err_vals = {'cidr': sub_attrs["cidr"], 'network_id': net_id} err = _("Requested subnet with cidr: %(cidr)s for " "network: %(network_id)s. Prefix is too small, must be a " "larger subnet. A prefix less than /%(prefix)s is required.") if cidr.version == 6 and cidr.prefixlen > 64: err_vals["prefix"] = 65 err_msg = err % err_vals raise n_exc.InvalidInput(error_message=err_msg) elif cidr.version == 4 and cidr.prefixlen > 30: err_vals["prefix"] = 31 err_msg = err % err_vals raise n_exc.InvalidInput(error_message=err_msg) # Enforce subnet quotas net_subnets = get_subnets(context, filters=dict(network_id=net_id)) if not context.is_admin: v4_count, v6_count = 0, 0 for subnet in net_subnets: if netaddr.IPNetwork(subnet['cidr']).version == 6: v6_count += 1 else: v4_count += 1 if cidr.version == 6: tenant_quota_v6 = context.session.query(qdv.Quota).filter_by( tenant_id=context.tenant_id, resource='v6_subnets_per_network').first() if tenant_quota_v6 != -1: quota.QUOTAS.limit_check( context, context.tenant_id, v6_subnets_per_network=v6_count + 1) else: tenant_quota_v4 = context.session.query(qdv.Quota).filter_by( tenant_id=context.tenant_id, resource='v4_subnets_per_network').first() if tenant_quota_v4 != -1: quota.QUOTAS.limit_check( context, context.tenant_id, v4_subnets_per_network=v4_count + 1) # See RM981. The default behavior of setting a gateway unless # explicitly asked to not is no longer desirable. gateway_ip = utils.pop_param(sub_attrs, "gateway_ip") dns_ips = utils.pop_param(sub_attrs, "dns_nameservers", []) host_routes = utils.pop_param(sub_attrs, "host_routes", []) allocation_pools = utils.pop_param(sub_attrs, "allocation_pools", None) sub_attrs["network"] = net new_subnet = db_api.subnet_create(context, **sub_attrs) cidrs = [] alloc_pools = allocation_pool.AllocationPools(sub_attrs["cidr"], allocation_pools) if isinstance(allocation_pools, list): cidrs = alloc_pools.get_policy_cidrs() quota.QUOTAS.limit_check( context, context.tenant_id, alloc_pools_per_subnet=len(alloc_pools)) ip_policies.ensure_default_policy(cidrs, [new_subnet]) new_subnet["ip_policy"] = db_api.ip_policy_create(context, exclude=cidrs) quota.QUOTAS.limit_check(context, context.tenant_id, routes_per_subnet=len(host_routes)) default_route = None for route in host_routes: netaddr_route = netaddr.IPNetwork(route["destination"]) if netaddr_route.value == routes.DEFAULT_ROUTE.value: if default_route: raise q_exc.DuplicateRouteConflict( subnet_id=new_subnet["id"]) default_route = route gateway_ip = default_route["nexthop"] alloc_pools.validate_gateway_excluded(gateway_ip) new_subnet["routes"].append(db_api.route_create( context, cidr=route["destination"], gateway=route["nexthop"])) quota.QUOTAS.limit_check(context, context.tenant_id, dns_nameservers_per_subnet=len(dns_ips)) for dns_ip in dns_ips: new_subnet["dns_nameservers"].append(db_api.dns_create( context, ip=netaddr.IPAddress(dns_ip))) # if the gateway_ip is IN the cidr for the subnet and NOT excluded by # policies, we should raise a 409 conflict if gateway_ip and default_route is None: alloc_pools.validate_gateway_excluded(gateway_ip) new_subnet["routes"].append(db_api.route_create( context, cidr=str(routes.DEFAULT_ROUTE), gateway=gateway_ip)) subnet_dict = v._make_subnet_dict(new_subnet) subnet_dict["gateway_ip"] = gateway_ip return subnet_dict
[ "def", "create_subnet", "(", "context", ",", "subnet", ")", ":", "LOG", ".", "info", "(", "\"create_subnet for tenant %s\"", "%", "context", ".", "tenant_id", ")", "net_id", "=", "subnet", "[", "\"subnet\"", "]", "[", "\"network_id\"", "]", "with", "context", ...
Create a subnet. Create a subnet which represents a range of IP addresses that can be allocated to devices : param context: neutron api request context : param subnet: dictionary describing the subnet, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated.
[ "Create", "a", "subnet", "." ]
python
valid
42.451852
crunchyroll/ef-open
efopen/ef_utils.py
https://github.com/crunchyroll/ef-open/blob/59fff3761af07a59f8f1c1682f2be004bdac15f7/efopen/ef_utils.py#L419-L449
def get_autoscaling_group_properties(asg_client, env, service): """ Gets the autoscaling group properties based on the service name that is provided. This function will attempt the find the autoscaling group base on the following logic: 1. If the service name provided matches the autoscaling group name 2. If the service name provided matches the Name tag of the autoscaling group 3. If the service name provided does not match the above, return None Args: clients: Instantiated boto3 autoscaling client env: Name of the environment to search for the autoscaling group service: Name of the service Returns: JSON object of the autoscaling group properties if it exists """ try: # See if {{ENV}}-{{SERVICE}} matches ASG name response = asg_client.describe_auto_scaling_groups(AutoScalingGroupNames=["{}-{}".format(env, service)]) if len(response["AutoScalingGroups"]) == 0: # See if {{ENV}}-{{SERVICE}} matches ASG tag name response = asg_client.describe_tags(Filters=[{ "Name": "Key", "Values": ["Name"] }, { "Name": "Value", "Values": ["{}-{}".format(env, service)]}]) if len(response["Tags"]) == 0: # Query does not match either of the above, return None return None else: asg_name = response["Tags"][0]["ResourceId"] response = asg_client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name]) return response["AutoScalingGroups"] else: return response["AutoScalingGroups"] except ClientError as error: raise RuntimeError("Error in finding autoscaling group {} {}".format(env, service), error)
[ "def", "get_autoscaling_group_properties", "(", "asg_client", ",", "env", ",", "service", ")", ":", "try", ":", "# See if {{ENV}}-{{SERVICE}} matches ASG name", "response", "=", "asg_client", ".", "describe_auto_scaling_groups", "(", "AutoScalingGroupNames", "=", "[", "\"...
Gets the autoscaling group properties based on the service name that is provided. This function will attempt the find the autoscaling group base on the following logic: 1. If the service name provided matches the autoscaling group name 2. If the service name provided matches the Name tag of the autoscaling group 3. If the service name provided does not match the above, return None Args: clients: Instantiated boto3 autoscaling client env: Name of the environment to search for the autoscaling group service: Name of the service Returns: JSON object of the autoscaling group properties if it exists
[ "Gets", "the", "autoscaling", "group", "properties", "based", "on", "the", "service", "name", "that", "is", "provided", ".", "This", "function", "will", "attempt", "the", "find", "the", "autoscaling", "group", "base", "on", "the", "following", "logic", ":", ...
python
train
51.967742
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/engine.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/engine.py#L201-L216
def register_bjam_action (self, action_name, function=None): """Informs self that 'action_name' is declared in bjam. From this point, 'action_name' is a valid argument to the set_update_action method. The action_name should be callable in the global module of bjam. """ # We allow duplicate calls to this rule for the same # action name. This way, jamfile rules that take action names # can just register them without specially checking if # action is already registered. assert isinstance(action_name, basestring) assert function is None or callable(function) if action_name not in self.actions: self.actions[action_name] = BjamNativeAction(action_name, function)
[ "def", "register_bjam_action", "(", "self", ",", "action_name", ",", "function", "=", "None", ")", ":", "# We allow duplicate calls to this rule for the same", "# action name. This way, jamfile rules that take action names", "# can just register them without specially checking if", "#...
Informs self that 'action_name' is declared in bjam. From this point, 'action_name' is a valid argument to the set_update_action method. The action_name should be callable in the global module of bjam.
[ "Informs", "self", "that", "action_name", "is", "declared", "in", "bjam", "." ]
python
train
47.5
obriencj/python-javatools
javatools/__init__.py
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/__init__.py#L1777-L1783
def info(self): """ tuple of the start_pc, end_pc, handler_pc and catch_type_ref """ return (self.start_pc, self.end_pc, self.handler_pc, self.get_catch_type())
[ "def", "info", "(", "self", ")", ":", "return", "(", "self", ".", "start_pc", ",", "self", ".", "end_pc", ",", "self", ".", "handler_pc", ",", "self", ".", "get_catch_type", "(", ")", ")" ]
tuple of the start_pc, end_pc, handler_pc and catch_type_ref
[ "tuple", "of", "the", "start_pc", "end_pc", "handler_pc", "and", "catch_type_ref" ]
python
train
29
Unidata/siphon
siphon/ncss.py
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/ncss.py#L408-L425
def parse_csv_header(line): """Parse the CSV header returned by TDS.""" units = {} names = [] for var in line.split(','): start = var.find('[') if start < 0: names.append(str(var)) continue else: names.append(str(var[:start])) end = var.find(']', start) unitstr = var[start + 1:end] eq = unitstr.find('=') if eq >= 0: # go past = and ", skip final " units[names[-1]] = unitstr[eq + 2:-1] return names, units
[ "def", "parse_csv_header", "(", "line", ")", ":", "units", "=", "{", "}", "names", "=", "[", "]", "for", "var", "in", "line", ".", "split", "(", "','", ")", ":", "start", "=", "var", ".", "find", "(", "'['", ")", "if", "start", "<", "0", ":", ...
Parse the CSV header returned by TDS.
[ "Parse", "the", "CSV", "header", "returned", "by", "TDS", "." ]
python
train
29.222222
caktus/django-timepiece
timepiece/entries/models.py
https://github.com/caktus/django-timepiece/blob/52515dec027664890efbc535429e1ba1ee152f40/timepiece/entries/models.py#L89-L108
def timespan(self, from_date, to_date=None, span=None, current=False): """ Takes a beginning date a filters entries. An optional to_date can be specified, or a span, which is one of ('month', 'week', 'day'). N.B. - If given a to_date, it does not include that date, only before. """ if span and not to_date: diff = None if span == 'month': diff = relativedelta(months=1) elif span == 'week': diff = relativedelta(days=7) elif span == 'day': diff = relativedelta(days=1) if diff is not None: to_date = from_date + diff datesQ = Q(end_time__gte=from_date) datesQ &= Q(end_time__lt=to_date) if to_date else Q() datesQ |= Q(end_time__isnull=True) if current else Q() return self.filter(datesQ)
[ "def", "timespan", "(", "self", ",", "from_date", ",", "to_date", "=", "None", ",", "span", "=", "None", ",", "current", "=", "False", ")", ":", "if", "span", "and", "not", "to_date", ":", "diff", "=", "None", "if", "span", "==", "'month'", ":", "d...
Takes a beginning date a filters entries. An optional to_date can be specified, or a span, which is one of ('month', 'week', 'day'). N.B. - If given a to_date, it does not include that date, only before.
[ "Takes", "a", "beginning", "date", "a", "filters", "entries", ".", "An", "optional", "to_date", "can", "be", "specified", "or", "a", "span", "which", "is", "one", "of", "(", "month", "week", "day", ")", ".", "N", ".", "B", ".", "-", "If", "given", ...
python
train
43.7
moderngl/moderngl
moderngl/buffer.py
https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/moderngl/buffer.py#L72-L85
def write_chunks(self, data, start, step, count) -> None: ''' Split data to count equal parts. Write the chunks using offsets calculated from start, step and stop. Args: data (bytes): The data. start (int): First offset. step (int): Offset increment. count (int): The number of offsets. ''' self.mglo.write_chunks(data, start, step, count)
[ "def", "write_chunks", "(", "self", ",", "data", ",", "start", ",", "step", ",", "count", ")", "->", "None", ":", "self", ".", "mglo", ".", "write_chunks", "(", "data", ",", "start", ",", "step", ",", "count", ")" ]
Split data to count equal parts. Write the chunks using offsets calculated from start, step and stop. Args: data (bytes): The data. start (int): First offset. step (int): Offset increment. count (int): The number of offsets.
[ "Split", "data", "to", "count", "equal", "parts", "." ]
python
train
32.357143
ingolemo/python-lenses
examples/robots.py
https://github.com/ingolemo/python-lenses/blob/a3a6ed0a31f6674451e542e7380a8aa16e6f8edf/examples/robots.py#L113-L125
def advance_robots(self): '''Produces a new game state in which the robots have advanced towards the player by one step. Handles the robots crashing into one another too.''' # move the robots towards the player self = lens.robots.Each().call_step_towards(self.player)(self) # robots in the same place are crashes self = lens.crashes.call_union(duplicates(self.robots))(self) # remove crashed robots self = lens.robots.modify(lambda r: list(set(r) - self.crashes))(self) return self
[ "def", "advance_robots", "(", "self", ")", ":", "# move the robots towards the player", "self", "=", "lens", ".", "robots", ".", "Each", "(", ")", ".", "call_step_towards", "(", "self", ".", "player", ")", "(", "self", ")", "# robots in the same place are crashes"...
Produces a new game state in which the robots have advanced towards the player by one step. Handles the robots crashing into one another too.
[ "Produces", "a", "new", "game", "state", "in", "which", "the", "robots", "have", "advanced", "towards", "the", "player", "by", "one", "step", ".", "Handles", "the", "robots", "crashing", "into", "one", "another", "too", "." ]
python
test
42.384615
CivicSpleen/ambry
ambry/orm/file.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/file.py#L180-L193
def dict(self): """A dict that holds key/values for all of the properties in the object. :return: """ d = {p.key: getattr(self, p.key) for p in self.__mapper__.attrs if p.key not in ('contents', 'dataset')} d['modified_datetime'] = self.modified_datetime d['modified_ago'] = self.modified_ago return d
[ "def", "dict", "(", "self", ")", ":", "d", "=", "{", "p", ".", "key", ":", "getattr", "(", "self", ",", "p", ".", "key", ")", "for", "p", "in", "self", ".", "__mapper__", ".", "attrs", "if", "p", ".", "key", "not", "in", "(", "'contents'", ",...
A dict that holds key/values for all of the properties in the object. :return:
[ "A", "dict", "that", "holds", "key", "/", "values", "for", "all", "of", "the", "properties", "in", "the", "object", "." ]
python
train
26.357143
marcomusy/vtkplotter
vtkplotter/analysis.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/analysis.py#L488-L504
def extractLargestRegion(actor): """Keep only the largest connected part of a mesh and discard all the smaller pieces. .. hint:: |largestregion.py|_ """ conn = vtk.vtkConnectivityFilter() conn.SetExtractionModeToLargestRegion() conn.ScalarConnectivityOff() poly = actor.GetMapper().GetInput() conn.SetInputData(poly) conn.Update() epoly = conn.GetOutput() eact = Actor(epoly) pr = vtk.vtkProperty() pr.DeepCopy(actor.GetProperty()) eact.SetProperty(pr) return eact
[ "def", "extractLargestRegion", "(", "actor", ")", ":", "conn", "=", "vtk", ".", "vtkConnectivityFilter", "(", ")", "conn", ".", "SetExtractionModeToLargestRegion", "(", ")", "conn", ".", "ScalarConnectivityOff", "(", ")", "poly", "=", "actor", ".", "GetMapper", ...
Keep only the largest connected part of a mesh and discard all the smaller pieces. .. hint:: |largestregion.py|_
[ "Keep", "only", "the", "largest", "connected", "part", "of", "a", "mesh", "and", "discard", "all", "the", "smaller", "pieces", "." ]
python
train
29.941176
vatlab/SoS
src/sos/targets_python.py
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/targets_python.py#L38-L118
def _install(self, name, autoinstall): '''Check existence of Python module and install it using command pip install if necessary.''' import importlib import pkg_resources spam_spec = importlib.util.find_spec(name) reinstall = False if spam_spec is not None: if self._version: mod = importlib.__import__(name) if hasattr(mod, '__version__'): ver = mod.__version__ else: try: ver = pkg_resources.get_distribution(name).version except Exception as e: env.logger.debug( f'Failed to get version of {name}: {e}') env.logger.debug( f'Comparing exiting version {ver} against requested version {self._version}' ) if self._version.startswith( '==') and pkg_resources.parse_version( ver) == pkg_resources.parse_version( self._version[2:]): pass elif self._version.startswith( '<=') and pkg_resources.parse_version( ver) <= pkg_resources.parse_version( self._version[2:]): pass elif self._version.startswith( '<') and not self._version.startswith( '<=') and pkg_resources.parse_version( ver) < pkg_resources.parse_version( self._version[1:]): pass elif self._version.startswith( '>=') and pkg_resources.parse_version( ver) >= pkg_resources.parse_version( self._version[2:]): pass # the case of > elif self._version.startswith( '>') and not self._version.startswith( '>=') and pkg_resources.parse_version( ver) > pkg_resources.parse_version( self._version[1:]): pass elif self._version.startswith( '!=') and pkg_resources.parse_version( ver) != pkg_resources.parse_version( self._version[2:]): pass elif self._version[0] not in ( '=', '>', '<', '!') and pkg_resources.parse_version( ver) == pkg_resources.parse_version(self._version): pass else: env.logger.warning( f'Version {ver} of installed {name} does not match specified version {self._version}.' ) reinstall = True if spam_spec and not reinstall: return True if not autoinstall: return False # try to install it? import subprocess cmd = ['pip', 'install'] + ([] if self._version else ['-U']) + [ self._module + (self._version if self._version else '') if self._autoinstall is True else self._autoinstall ] env.logger.info( f'Installing python module {name} with command {" ".join(cmd)}') ret = subprocess.call(cmd) if reinstall: import sys importlib.reload(sys.modules[name]) # try to check version return ret == 0 and self._install(name, False)
[ "def", "_install", "(", "self", ",", "name", ",", "autoinstall", ")", ":", "import", "importlib", "import", "pkg_resources", "spam_spec", "=", "importlib", ".", "util", ".", "find_spec", "(", "name", ")", "reinstall", "=", "False", "if", "spam_spec", "is", ...
Check existence of Python module and install it using command pip install if necessary.
[ "Check", "existence", "of", "Python", "module", "and", "install", "it", "using", "command", "pip", "install", "if", "necessary", "." ]
python
train
45.444444
gamechanger/dusty
dusty/compiler/spec_assembler.py
https://github.com/gamechanger/dusty/blob/dc12de90bb6945023d6f43a8071e984313a1d984/dusty/compiler/spec_assembler.py#L29-L40
def _get_referenced_apps(specs): """ Returns a set of all apps that are required to run any bundle in specs[constants.CONFIG_BUNDLES_KEY] """ activated_bundles = specs[constants.CONFIG_BUNDLES_KEY].keys() all_active_apps = set() for active_bundle in activated_bundles: bundle_spec = specs[constants.CONFIG_BUNDLES_KEY].get(active_bundle) for app_name in bundle_spec['apps']: all_active_apps.add(app_name) all_active_apps |= _get_dependent('apps', app_name, specs, 'apps') return all_active_apps
[ "def", "_get_referenced_apps", "(", "specs", ")", ":", "activated_bundles", "=", "specs", "[", "constants", ".", "CONFIG_BUNDLES_KEY", "]", ".", "keys", "(", ")", "all_active_apps", "=", "set", "(", ")", "for", "active_bundle", "in", "activated_bundles", ":", ...
Returns a set of all apps that are required to run any bundle in specs[constants.CONFIG_BUNDLES_KEY]
[ "Returns", "a", "set", "of", "all", "apps", "that", "are", "required", "to", "run", "any", "bundle", "in", "specs", "[", "constants", ".", "CONFIG_BUNDLES_KEY", "]" ]
python
valid
45.916667
bjodah/pyodesys
pyodesys/symbolic.py
https://github.com/bjodah/pyodesys/blob/0034a6165b550d8d9808baef58678dca5a493ab7/pyodesys/symbolic.py#L711-L726
def get_j_ty_callback(self): """ Generates a callback for evaluating the jacobian. """ j_exprs = self.get_jac() if j_exprs is False: return None cb = self._callback_factory(j_exprs) if self.sparse: from scipy.sparse import csc_matrix def sparse_cb(x, y, p=()): data = cb(x, y, p).flatten() return csc_matrix((data, self._rowvals, self._colptrs)) return sparse_cb else: return cb
[ "def", "get_j_ty_callback", "(", "self", ")", ":", "j_exprs", "=", "self", ".", "get_jac", "(", ")", "if", "j_exprs", "is", "False", ":", "return", "None", "cb", "=", "self", ".", "_callback_factory", "(", "j_exprs", ")", "if", "self", ".", "sparse", "...
Generates a callback for evaluating the jacobian.
[ "Generates", "a", "callback", "for", "evaluating", "the", "jacobian", "." ]
python
train
31.5625
coldfix/udiskie
udiskie/udisks2.py
https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/udisks2.py#L286-L300
def device_id(self): """ Return a unique and persistent identifier for the device. This is the basename (last path component) of the symlink in `/dev/disk/by-id/`. """ if self.is_block: for filename in self._P.Block.Symlinks: parts = decode_ay(filename).split('/') if parts[-2] == 'by-id': return parts[-1] elif self.is_drive: return self._assocdrive._P.Drive.Id return ''
[ "def", "device_id", "(", "self", ")", ":", "if", "self", ".", "is_block", ":", "for", "filename", "in", "self", ".", "_P", ".", "Block", ".", "Symlinks", ":", "parts", "=", "decode_ay", "(", "filename", ")", ".", "split", "(", "'/'", ")", "if", "pa...
Return a unique and persistent identifier for the device. This is the basename (last path component) of the symlink in `/dev/disk/by-id/`.
[ "Return", "a", "unique", "and", "persistent", "identifier", "for", "the", "device", "." ]
python
train
33.266667
meejah/txtorcon
txtorcon/util.py
https://github.com/meejah/txtorcon/blob/14053b95adf0b4bd9dd9c317bece912a26578a93/txtorcon/util.py#L314-L343
def unescape_quoted_string(string): r''' This function implementes the recommended functionality described in the tor control-spec to be compatible with older tor versions: * Read \\n \\t \\r and \\0 ... \\377 as C escapes. * Treat a backslash followed by any other character as that character. Except the legacy support for the escape sequences above this function implements parsing of QuotedString using qcontent from QuotedString = DQUOTE *qcontent DQUOTE :param string: The escaped quoted string. :returns: The unescaped string. :raises ValueError: If the string is in a invalid form (e.g. a single backslash) ''' match = re.match(r'''^"((?:[^"\\]|\\.)*)"$''', string) if not match: raise ValueError("Invalid quoted string", string) string = match.group(1) # remove backslash before all characters which should not be # handeled as escape codes by string.decode('string-escape'). # This is needed so e.g. '\x00' is not unescaped as '\0' string = re.sub(r'((?:^|[^\\])(?:\\\\)*)\\([^ntr0-7\\])', r'\1\2', string) if six.PY3: # XXX hmmm? return bytes(string, 'ascii').decode('unicode-escape') return string.decode('string-escape')
[ "def", "unescape_quoted_string", "(", "string", ")", ":", "match", "=", "re", ".", "match", "(", "r'''^\"((?:[^\"\\\\]|\\\\.)*)\"$'''", ",", "string", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "\"Invalid quoted string\"", ",", "string", ")", "s...
r''' This function implementes the recommended functionality described in the tor control-spec to be compatible with older tor versions: * Read \\n \\t \\r and \\0 ... \\377 as C escapes. * Treat a backslash followed by any other character as that character. Except the legacy support for the escape sequences above this function implements parsing of QuotedString using qcontent from QuotedString = DQUOTE *qcontent DQUOTE :param string: The escaped quoted string. :returns: The unescaped string. :raises ValueError: If the string is in a invalid form (e.g. a single backslash)
[ "r", "This", "function", "implementes", "the", "recommended", "functionality", "described", "in", "the", "tor", "control", "-", "spec", "to", "be", "compatible", "with", "older", "tor", "versions", ":" ]
python
train
41.466667
pmacosta/pexdoc
pexdoc/exh.py
https://github.com/pmacosta/pexdoc/blob/201ac243e5781347feb75896a4231429fe6da4b1/pexdoc/exh.py#L804-L814
def _flatten_ex_dict(self): """Flatten structure of exceptions dictionary.""" odict = {} for _, fdict in self._ex_dict.items(): for (extype, exmsg), value in fdict.items(): key = value["name"] odict[key] = copy.deepcopy(value) del odict[key]["name"] odict[key]["type"] = extype odict[key]["msg"] = exmsg return odict
[ "def", "_flatten_ex_dict", "(", "self", ")", ":", "odict", "=", "{", "}", "for", "_", ",", "fdict", "in", "self", ".", "_ex_dict", ".", "items", "(", ")", ":", "for", "(", "extype", ",", "exmsg", ")", ",", "value", "in", "fdict", ".", "items", "(...
Flatten structure of exceptions dictionary.
[ "Flatten", "structure", "of", "exceptions", "dictionary", "." ]
python
train
39.090909
tensorflow/datasets
tensorflow_datasets/structured/titanic.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/structured/titanic.py#L130-L150
def _generate_examples(self, file_path): """Generate features and target given the directory path. Args: file_path: path where the csv file is stored Yields: The features and the target """ with tf.io.gfile.GFile(file_path) as f: raw_data = csv.DictReader(f) for row in raw_data: survive_val = row.pop("survived") yield { "survived": convert_to_label(survive_val, _SURVIVED_DICT), "features": { name: FEATURE_DICT[name][1](value) for name, value in row.items() } }
[ "def", "_generate_examples", "(", "self", ",", "file_path", ")", ":", "with", "tf", ".", "io", ".", "gfile", ".", "GFile", "(", "file_path", ")", "as", "f", ":", "raw_data", "=", "csv", ".", "DictReader", "(", "f", ")", "for", "row", "in", "raw_data"...
Generate features and target given the directory path. Args: file_path: path where the csv file is stored Yields: The features and the target
[ "Generate", "features", "and", "target", "given", "the", "directory", "path", "." ]
python
train
27.761905
joesecurity/jbxapi
jbxapi.py
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L279-L285
def server_online(self): """ Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode. """ response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey}) return self._raise_or_extract(response)
[ "def", "server_online", "(", "self", ")", ":", "response", "=", "self", ".", "_post", "(", "self", ".", "apiurl", "+", "'/v2/server/online'", ",", "data", "=", "{", "'apikey'", ":", "self", ".", "apikey", "}", ")", "return", "self", ".", "_raise_or_extra...
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
[ "Returns", "True", "if", "the", "Joe", "Sandbox", "servers", "are", "running", "or", "False", "if", "they", "are", "in", "maintenance", "mode", "." ]
python
train
41.142857
spyder-ide/spyder
spyder/widgets/findreplace.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/findreplace.py#L328-L349
def set_editor(self, editor, refresh=True): """ Set associated editor/web page: codeeditor.base.TextEditBaseWidget browser.WebView """ self.editor = editor # Note: This is necessary to test widgets/editor.py # in Qt builds that don't have web widgets try: from qtpy.QtWebEngineWidgets import QWebEngineView except ImportError: QWebEngineView = type(None) self.words_button.setVisible(not isinstance(editor, QWebEngineView)) self.re_button.setVisible(not isinstance(editor, QWebEngineView)) from spyder.plugins.editor.widgets.codeeditor import CodeEditor self.is_code_editor = isinstance(editor, CodeEditor) self.highlight_button.setVisible(self.is_code_editor) if refresh: self.refresh() if self.isHidden() and editor is not None: self.clear_matches()
[ "def", "set_editor", "(", "self", ",", "editor", ",", "refresh", "=", "True", ")", ":", "self", ".", "editor", "=", "editor", "# Note: This is necessary to test widgets/editor.py\r", "# in Qt builds that don't have web widgets\r", "try", ":", "from", "qtpy", ".", "QtW...
Set associated editor/web page: codeeditor.base.TextEditBaseWidget browser.WebView
[ "Set", "associated", "editor", "/", "web", "page", ":", "codeeditor", ".", "base", ".", "TextEditBaseWidget", "browser", ".", "WebView" ]
python
train
42.863636
PyCQA/astroid
astroid/builder.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/builder.py#L163-L191
def _data_build(self, data, modname, path): """Build tree node from data and add some informations""" try: node = _parse(data + "\n") except (TypeError, ValueError, SyntaxError) as exc: raise exceptions.AstroidSyntaxError( "Parsing Python code failed:\n{error}", source=data, modname=modname, path=path, error=exc, ) from exc if path is not None: node_file = os.path.abspath(path) else: node_file = "<?>" if modname.endswith(".__init__"): modname = modname[:-9] package = True else: package = ( path is not None and os.path.splitext(os.path.basename(path))[0] == "__init__" ) builder = rebuilder.TreeRebuilder(self._manager) module = builder.visit_module(node, modname, node_file, package) module._import_from_nodes = builder._import_from_nodes module._delayed_assattr = builder._delayed_assattr return module
[ "def", "_data_build", "(", "self", ",", "data", ",", "modname", ",", "path", ")", ":", "try", ":", "node", "=", "_parse", "(", "data", "+", "\"\\n\"", ")", "except", "(", "TypeError", ",", "ValueError", ",", "SyntaxError", ")", "as", "exc", ":", "rai...
Build tree node from data and add some informations
[ "Build", "tree", "node", "from", "data", "and", "add", "some", "informations" ]
python
train
37.827586
niemasd/TreeSwift
treeswift/Tree.py
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Tree.py#L890-L900
def rename_nodes(self, renaming_map): '''Rename nodes in this ``Tree`` Args: ``renaming_map`` (``dict``): A dictionary mapping old labels (keys) to new labels (values) ''' if not isinstance(renaming_map, dict): raise TypeError("renaming_map must be a dict") for node in self.traverse_preorder(): if node.label in renaming_map: node.label = renaming_map[node.label]
[ "def", "rename_nodes", "(", "self", ",", "renaming_map", ")", ":", "if", "not", "isinstance", "(", "renaming_map", ",", "dict", ")", ":", "raise", "TypeError", "(", "\"renaming_map must be a dict\"", ")", "for", "node", "in", "self", ".", "traverse_preorder", ...
Rename nodes in this ``Tree`` Args: ``renaming_map`` (``dict``): A dictionary mapping old labels (keys) to new labels (values)
[ "Rename", "nodes", "in", "this", "Tree" ]
python
train
40.636364
Qiskit/qiskit-terra
qiskit/qasm/qasmparser.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/qasm/qasmparser.py#L75-L89
def verify_declared_bit(self, obj): """Verify a qubit id against the gate prototype.""" # We are verifying gate args against the formal parameters of a # gate prototype. if obj.name not in self.current_symtab: raise QasmError("Cannot find symbol '" + obj.name + "' in argument list for gate, line", str(obj.line), 'file', obj.file) # This insures the thing is from the bitlist and not from the # argument list. sym = self.current_symtab[obj.name] if not (sym.type == 'id' and sym.is_bit): raise QasmError("Bit", obj.name, 'is not declared as a bit in the gate.')
[ "def", "verify_declared_bit", "(", "self", ",", "obj", ")", ":", "# We are verifying gate args against the formal parameters of a", "# gate prototype.", "if", "obj", ".", "name", "not", "in", "self", ".", "current_symtab", ":", "raise", "QasmError", "(", "\"Cannot find ...
Verify a qubit id against the gate prototype.
[ "Verify", "a", "qubit", "id", "against", "the", "gate", "prototype", "." ]
python
test
48.066667
ssalentin/plip
plip/modules/detection.py
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/detection.py#L52-L72
def hydrophobic_interactions(atom_set_a, atom_set_b): """Detection of hydrophobic pliprofiler between atom_set_a (binding site) and atom_set_b (ligand). Definition: All pairs of qualified carbon atoms within a distance of HYDROPH_DIST_MAX """ data = namedtuple('hydroph_interaction', 'bsatom bsatom_orig_idx ligatom ligatom_orig_idx ' 'distance restype resnr reschain restype_l, resnr_l, reschain_l') pairings = [] for a, b in itertools.product(atom_set_a, atom_set_b): if a.orig_idx == b.orig_idx: continue e = euclidean3d(a.atom.coords, b.atom.coords) if not config.MIN_DIST < e < config.HYDROPH_DIST_MAX: continue restype, resnr, reschain = whichrestype(a.atom), whichresnumber(a.atom), whichchain(a.atom) restype_l, resnr_l, reschain_l = whichrestype(b.orig_atom), whichresnumber(b.orig_atom), whichchain(b.orig_atom) contact = data(bsatom=a.atom, bsatom_orig_idx=a.orig_idx, ligatom=b.atom, ligatom_orig_idx=b.orig_idx, distance=e, restype=restype, resnr=resnr, reschain=reschain, restype_l=restype_l, resnr_l=resnr_l, reschain_l=reschain_l) pairings.append(contact) return filter_contacts(pairings)
[ "def", "hydrophobic_interactions", "(", "atom_set_a", ",", "atom_set_b", ")", ":", "data", "=", "namedtuple", "(", "'hydroph_interaction'", ",", "'bsatom bsatom_orig_idx ligatom ligatom_orig_idx '", "'distance restype resnr reschain restype_l, resnr_l, reschain_l'", ")", "pairings"...
Detection of hydrophobic pliprofiler between atom_set_a (binding site) and atom_set_b (ligand). Definition: All pairs of qualified carbon atoms within a distance of HYDROPH_DIST_MAX
[ "Detection", "of", "hydrophobic", "pliprofiler", "between", "atom_set_a", "(", "binding", "site", ")", "and", "atom_set_b", "(", "ligand", ")", ".", "Definition", ":", "All", "pairs", "of", "qualified", "carbon", "atoms", "within", "a", "distance", "of", "HYDR...
python
train
62.190476
nir0s/serv
serv/serv.py
https://github.com/nir0s/serv/blob/7af724ed49c0eb766c37c4b5287b043a8cf99e9c/serv/serv.py#L260-L284
def _lookup_by_mapping(): """Return a the init system based on a constant mapping of distribution+version to init system.. See constants.py for the mapping. A failover of the version is proposed for when no version is supplied. For instance, Arch Linux's version will most probably be "rolling" at any given time, which means that the init system cannot be idenfied by the version of the distro. On top of trying to identify by the distro's ID, if /etc/os-release contains an "ID_LIKE" field, it will be tried. That, again is true for Arch where the distro's ID changes (Manjaro, Antergos, etc...) But the "ID_LIKE" field is always (?) `arch`. """ like = distro.like().lower() distribution_id = distro.id().lower() version = distro.major_version() if 'arch' in (distribution_id, like): version = 'any' init_sys = constants.DIST_TO_INITSYS.get( distribution_id, constants.DIST_TO_INITSYS.get(like)) if init_sys: system = init_sys.get(version) return [system] if system else []
[ "def", "_lookup_by_mapping", "(", ")", ":", "like", "=", "distro", ".", "like", "(", ")", ".", "lower", "(", ")", "distribution_id", "=", "distro", ".", "id", "(", ")", ".", "lower", "(", ")", "version", "=", "distro", ".", "major_version", "(", ")",...
Return a the init system based on a constant mapping of distribution+version to init system.. See constants.py for the mapping. A failover of the version is proposed for when no version is supplied. For instance, Arch Linux's version will most probably be "rolling" at any given time, which means that the init system cannot be idenfied by the version of the distro. On top of trying to identify by the distro's ID, if /etc/os-release contains an "ID_LIKE" field, it will be tried. That, again is true for Arch where the distro's ID changes (Manjaro, Antergos, etc...) But the "ID_LIKE" field is always (?) `arch`.
[ "Return", "a", "the", "init", "system", "based", "on", "a", "constant", "mapping", "of", "distribution", "+", "version", "to", "init", "system", ".." ]
python
train
45.8
rigetti/quantumflow
quantumflow/ops.py
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/ops.py#L354-L363
def evolve(self, rho: Density) -> Density: """Apply the action of this channel upon a density""" N = rho.qubit_nb qubits = rho.qubits indices = list([qubits.index(q) for q in self.qubits]) + \ list([qubits.index(q) + N for q in self.qubits]) tensor = bk.tensormul(self.tensor, rho.tensor, indices) return Density(tensor, qubits, rho.memory)
[ "def", "evolve", "(", "self", ",", "rho", ":", "Density", ")", "->", "Density", ":", "N", "=", "rho", ".", "qubit_nb", "qubits", "=", "rho", ".", "qubits", "indices", "=", "list", "(", "[", "qubits", ".", "index", "(", "q", ")", "for", "q", "in",...
Apply the action of this channel upon a density
[ "Apply", "the", "action", "of", "this", "channel", "upon", "a", "density" ]
python
train
39.3
adamzap/landslide
landslide/generator.py
https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L542-L552
def write(self): """ Writes generated presentation code into the destination file. """ html = self.render() if self.file_type == 'pdf': self.write_pdf(html) else: with codecs.open(self.destination_file, 'w', encoding='utf_8') as outfile: outfile.write(html)
[ "def", "write", "(", "self", ")", ":", "html", "=", "self", ".", "render", "(", ")", "if", "self", ".", "file_type", "==", "'pdf'", ":", "self", ".", "write_pdf", "(", "html", ")", "else", ":", "with", "codecs", ".", "open", "(", "self", ".", "de...
Writes generated presentation code into the destination file.
[ "Writes", "generated", "presentation", "code", "into", "the", "destination", "file", "." ]
python
train
32.454545
pallets/werkzeug
src/werkzeug/serving.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/serving.py#L532-L549
def generate_adhoc_ssl_context(): """Generates an adhoc SSL context for the development server.""" crypto = _get_openssl_crypto_module() import tempfile import atexit cert, pkey = generate_adhoc_ssl_pair() cert_handle, cert_file = tempfile.mkstemp() pkey_handle, pkey_file = tempfile.mkstemp() atexit.register(os.remove, pkey_file) atexit.register(os.remove, cert_file) os.write(cert_handle, crypto.dump_certificate(crypto.FILETYPE_PEM, cert)) os.write(pkey_handle, crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)) os.close(cert_handle) os.close(pkey_handle) ctx = load_ssl_context(cert_file, pkey_file) return ctx
[ "def", "generate_adhoc_ssl_context", "(", ")", ":", "crypto", "=", "_get_openssl_crypto_module", "(", ")", "import", "tempfile", "import", "atexit", "cert", ",", "pkey", "=", "generate_adhoc_ssl_pair", "(", ")", "cert_handle", ",", "cert_file", "=", "tempfile", "....
Generates an adhoc SSL context for the development server.
[ "Generates", "an", "adhoc", "SSL", "context", "for", "the", "development", "server", "." ]
python
train
36.722222
h2oai/h2o-3
py2/h2o_util.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/py2/h2o_util.py#L152-L199
def approxEqual(x, y, *args, **kwargs): """approxEqual(float1, float2[, tol=1e-18, rel=1e-7]) -> True|False approxEqual(obj1, obj2[, *args, **kwargs]) -> True|False Return True if x and y are approximately equal, otherwise False. If x and y are floats, return True if y is within either absolute error tol or relative error rel of x. You can disable either the absolute or relative check by passing None as tol or rel (but not both). For any other objects, x and y are checked in that order for a method __approxEqual__, and the result of that is returned as a bool. Any optional arguments are passed to the __approxEqual__ method. __approxEqual__ can return NotImplemented to signal that it doesn't know how to perform that specific comparison, in which case the other object is checked instead. If neither object have the method, or both defer by returning NotImplemented, approxEqual falls back on the same numeric comparison used for floats. >>> almost_equal(1.2345678, 1.2345677) True >>> almost_equal(1.234, 1.235) False """ if not (type(x) is type(y) is float): # Skip checking for __approxEqual__ in the common case of two floats. methodname = '__approxEqual__' # Allow the objects to specify what they consider "approximately equal", # giving precedence to x. If either object has the appropriate method, we # pass on any optional arguments untouched. for a,b in ((x, y), (y, x)): try: method = getattr(a, methodname) except AttributeError: continue else: result = method(b, *args, **kwargs) if result is NotImplemented: print "WARNING: NotImplemented approxEqual for types" continue return bool(result) # If we get here without returning, then neither x nor y knows how to do an # approximate equal comparison (or are both floats). Fall back to a numeric # comparison. return _float_approxEqual(x, y, *args, **kwargs)
[ "def", "approxEqual", "(", "x", ",", "y", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "(", "type", "(", "x", ")", "is", "type", "(", "y", ")", "is", "float", ")", ":", "# Skip checking for __approxEqual__ in the common case of two f...
approxEqual(float1, float2[, tol=1e-18, rel=1e-7]) -> True|False approxEqual(obj1, obj2[, *args, **kwargs]) -> True|False Return True if x and y are approximately equal, otherwise False. If x and y are floats, return True if y is within either absolute error tol or relative error rel of x. You can disable either the absolute or relative check by passing None as tol or rel (but not both). For any other objects, x and y are checked in that order for a method __approxEqual__, and the result of that is returned as a bool. Any optional arguments are passed to the __approxEqual__ method. __approxEqual__ can return NotImplemented to signal that it doesn't know how to perform that specific comparison, in which case the other object is checked instead. If neither object have the method, or both defer by returning NotImplemented, approxEqual falls back on the same numeric comparison used for floats. >>> almost_equal(1.2345678, 1.2345677) True >>> almost_equal(1.234, 1.235) False
[ "approxEqual", "(", "float1", "float2", "[", "tol", "=", "1e", "-", "18", "rel", "=", "1e", "-", "7", "]", ")", "-", ">", "True|False", "approxEqual", "(", "obj1", "obj2", "[", "*", "args", "**", "kwargs", "]", ")", "-", ">", "True|False" ]
python
test
43.479167
mar10/wsgidav
wsgidav/server/server_cli.py
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/server/server_cli.py#L520-L603
def _run__cherrypy(app, config, mode): """Run WsgiDAV using cherrypy.wsgiserver if CherryPy is installed.""" assert mode == "cherrypy-wsgiserver" try: from cherrypy import wsgiserver from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter _logger.warning("WARNING: cherrypy.wsgiserver is deprecated.") _logger.warning( " Starting with CherryPy 9.0 the functionality from cherrypy.wsgiserver" ) _logger.warning(" was moved to the cheroot project.") _logger.warning(" Consider using --server=cheroot.") except ImportError: _logger.error("*" * 78) _logger.error("ERROR: Could not import cherrypy.wsgiserver.") _logger.error( "Try `pip install cherrypy` or specify another server using the --server option." ) _logger.error("Note that starting with CherryPy 9.0, the server was moved to") _logger.error( "the cheroot project, so it is recommended to use `-server=cheroot`" ) _logger.error("and run `pip install cheroot` instead.") _logger.error("*" * 78) raise server_name = "WsgiDAV/{} {} Python/{}".format( __version__, wsgiserver.CherryPyWSGIServer.version, util.PYTHON_VERSION ) wsgiserver.CherryPyWSGIServer.version = server_name # Support SSL ssl_certificate = _get_checked_path(config.get("ssl_certificate"), config) ssl_private_key = _get_checked_path(config.get("ssl_private_key"), config) ssl_certificate_chain = _get_checked_path( config.get("ssl_certificate_chain"), config ) protocol = "http" if ssl_certificate: assert ssl_private_key wsgiserver.CherryPyWSGIServer.ssl_adapter = BuiltinSSLAdapter( ssl_certificate, ssl_private_key, ssl_certificate_chain ) protocol = "https" _logger.info("SSL / HTTPS enabled.") _logger.info("Running {}".format(server_name)) _logger.info( "Serving on {}://{}:{} ...".format(protocol, config["host"], config["port"]) ) server_args = { "bind_addr": (config["host"], config["port"]), "wsgi_app": app, "server_name": server_name, } # Override or add custom args server_args.update(config.get("server_args", {})) server = wsgiserver.CherryPyWSGIServer(**server_args) # If the caller passed a startup event, monkey patch the server to set it # when the request handler loop is entered startup_event = config.get("startup_event") if startup_event: def _patched_tick(): server.tick = org_tick # undo the monkey patch org_tick() _logger.info("CherryPyWSGIServer is ready") startup_event.set() org_tick = server.tick server.tick = _patched_tick try: server.start() except KeyboardInterrupt: _logger.warning("Caught Ctrl-C, shutting down...") finally: server.stop() return
[ "def", "_run__cherrypy", "(", "app", ",", "config", ",", "mode", ")", ":", "assert", "mode", "==", "\"cherrypy-wsgiserver\"", "try", ":", "from", "cherrypy", "import", "wsgiserver", "from", "cherrypy", ".", "wsgiserver", ".", "ssl_builtin", "import", "BuiltinSSL...
Run WsgiDAV using cherrypy.wsgiserver if CherryPy is installed.
[ "Run", "WsgiDAV", "using", "cherrypy", ".", "wsgiserver", "if", "CherryPy", "is", "installed", "." ]
python
valid
35.142857
glue-viz/echo
echo/callback_container.py
https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/callback_container.py#L20-L43
def _wrap(self, value, priority=0): """ Given a function/method, this will automatically wrap a method using weakref to avoid circular references. """ if not callable(value): raise TypeError("Only callable values can be stored in CallbackContainer") elif self.is_bound_method(value): # We are dealing with a bound method. Method references aren't # persistent, so instead we store a reference to the function # and instance. value = (weakref.ref(value.__func__), weakref.ref(value.__self__, self._auto_remove), priority) else: value = (value, priority) return value
[ "def", "_wrap", "(", "self", ",", "value", ",", "priority", "=", "0", ")", ":", "if", "not", "callable", "(", "value", ")", ":", "raise", "TypeError", "(", "\"Only callable values can be stored in CallbackContainer\"", ")", "elif", "self", ".", "is_bound_method"...
Given a function/method, this will automatically wrap a method using weakref to avoid circular references.
[ "Given", "a", "function", "/", "method", "this", "will", "automatically", "wrap", "a", "method", "using", "weakref", "to", "avoid", "circular", "references", "." ]
python
train
30.291667
numenta/htmresearch
projects/sequence_learning/sequence_simulations.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sequence_learning/sequence_simulations.py#L41-L51
def letterSequence(letters, w=40): """ Return a list of input vectors corresponding to sequence of letters. The vector for each letter has w contiguous bits ON and represented as a sequence of non-zero indices. """ sequence = [] for letter in letters: i = ord(letter) - ord('A') sequence.append(set(range(i*w,(i+1)*w))) return sequence
[ "def", "letterSequence", "(", "letters", ",", "w", "=", "40", ")", ":", "sequence", "=", "[", "]", "for", "letter", "in", "letters", ":", "i", "=", "ord", "(", "letter", ")", "-", "ord", "(", "'A'", ")", "sequence", ".", "append", "(", "set", "("...
Return a list of input vectors corresponding to sequence of letters. The vector for each letter has w contiguous bits ON and represented as a sequence of non-zero indices.
[ "Return", "a", "list", "of", "input", "vectors", "corresponding", "to", "sequence", "of", "letters", ".", "The", "vector", "for", "each", "letter", "has", "w", "contiguous", "bits", "ON", "and", "represented", "as", "a", "sequence", "of", "non", "-", "zero...
python
train
31.727273
materialsproject/pymatgen
pymatgen/io/abinit/pseudos.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/pseudos.py#L1020-L1059
def scan_directory(self, dirname, exclude_exts=(), exclude_fnames=()): """ Analyze the files contained in directory dirname. Args: dirname: directory path exclude_exts: list of file extensions that should be skipped. exclude_fnames: list of file names that should be skipped. Returns: List of pseudopotential objects. """ for i, ext in enumerate(exclude_exts): if not ext.strip().startswith("."): exclude_exts[i] = "." + ext.strip() # Exclude files depending on the extension. paths = [] for fname in os.listdir(dirname): root, ext = os.path.splitext(fname) path = os.path.join(dirname, fname) if (ext in exclude_exts or fname in exclude_fnames or fname.startswith(".") or not os.path.isfile(path)): continue paths.append(path) pseudos = [] for path in paths: # Parse the file and generate the pseudo. try: pseudo = self.parse(path) except: pseudo = None if pseudo is not None: pseudos.append(pseudo) self._parsed_paths.extend(path) else: self._wrong_paths.extend(path) return pseudos
[ "def", "scan_directory", "(", "self", ",", "dirname", ",", "exclude_exts", "=", "(", ")", ",", "exclude_fnames", "=", "(", ")", ")", ":", "for", "i", ",", "ext", "in", "enumerate", "(", "exclude_exts", ")", ":", "if", "not", "ext", ".", "strip", "(",...
Analyze the files contained in directory dirname. Args: dirname: directory path exclude_exts: list of file extensions that should be skipped. exclude_fnames: list of file names that should be skipped. Returns: List of pseudopotential objects.
[ "Analyze", "the", "files", "contained", "in", "directory", "dirname", "." ]
python
train
33.2
brechtm/rinohtype
src/rinoh/text.py
https://github.com/brechtm/rinohtype/blob/40a63c4e5ad7550f62b6860f1812cb67cafb9dc7/src/rinoh/text.py#L460-L468
def append(self, item): """Append `item` (:class:`StyledText` or :class:`str`) to the end of this mixed-styled text. The parent of `item` is set to this mixed-styled text.""" if isinstance(item, str): item = SingleStyledText(item) item.parent = self list.append(self, item)
[ "def", "append", "(", "self", ",", "item", ")", ":", "if", "isinstance", "(", "item", ",", "str", ")", ":", "item", "=", "SingleStyledText", "(", "item", ")", "item", ".", "parent", "=", "self", "list", ".", "append", "(", "self", ",", "item", ")" ...
Append `item` (:class:`StyledText` or :class:`str`) to the end of this mixed-styled text. The parent of `item` is set to this mixed-styled text.
[ "Append", "item", "(", ":", "class", ":", "StyledText", "or", ":", "class", ":", "str", ")", "to", "the", "end", "of", "this", "mixed", "-", "styled", "text", "." ]
python
train
36.222222
ThreatResponse/margaritashotgun
margaritashotgun/remote_host.py
https://github.com/ThreatResponse/margaritashotgun/blob/6dee53ef267959b214953439968244cc46a19690/margaritashotgun/remote_host.py#L239-L250
def upload_module(self, local_path=None, remote_path="/tmp/lime.ko"): """ Upload LiME kernel module to remote host :type local_path: str :param local_path: local path to lime kernel module :type remote_path: str :param remote_path: remote path to upload lime kernel module """ if local_path is None: raise FileNotFoundFoundError(local_path) self.shell.upload_file(local_path, remote_path)
[ "def", "upload_module", "(", "self", ",", "local_path", "=", "None", ",", "remote_path", "=", "\"/tmp/lime.ko\"", ")", ":", "if", "local_path", "is", "None", ":", "raise", "FileNotFoundFoundError", "(", "local_path", ")", "self", ".", "shell", ".", "upload_fil...
Upload LiME kernel module to remote host :type local_path: str :param local_path: local path to lime kernel module :type remote_path: str :param remote_path: remote path to upload lime kernel module
[ "Upload", "LiME", "kernel", "module", "to", "remote", "host" ]
python
train
38.5
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_properties.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_properties.py#L96-L100
def validate_supported_property_type_id(property_name, property_type_id): """Ensure that the given property type_id is supported by the graph.""" if property_type_id not in PROPERTY_TYPE_ID_TO_NAME: raise AssertionError(u'Property "{}" has unsupported property type id: ' u'{}'.format(property_name, property_type_id))
[ "def", "validate_supported_property_type_id", "(", "property_name", ",", "property_type_id", ")", ":", "if", "property_type_id", "not", "in", "PROPERTY_TYPE_ID_TO_NAME", ":", "raise", "AssertionError", "(", "u'Property \"{}\" has unsupported property type id: '", "u'{}'", ".", ...
Ensure that the given property type_id is supported by the graph.
[ "Ensure", "that", "the", "given", "property", "type_id", "is", "supported", "by", "the", "graph", "." ]
python
train
71.8
yoavaviram/python-amazon-simple-product-api
amazon/api.py
https://github.com/yoavaviram/python-amazon-simple-product-api/blob/f1cb0e209145fcfac9444e4c733dd19deb59d31a/amazon/api.py#L390-L424
def cart_modify(self, items, CartId=None, HMAC=None, **kwargs): """CartAdd. :param items: A dictionary containing the items to be added to the cart. Or a list containing these dictionaries. example: [{'cart_item_id': 'rt2ofih3f389nwiuhf8934z87o3f4h', 'quantity': 1}] :param CartId: Id of Cart :param HMAC: HMAC of Cart, see CartCreate for more info :return: An :class:`~.AmazonCart`. """ if not CartId or not HMAC: raise CartException('CartId required for CartModify call') if isinstance(items, dict): items = [items] if len(items) > 10: raise CartException("You can't add more than 10 items at once") cart_item_id_key_template = 'Item.{0}.CartItemId' quantity_key_template = 'Item.{0}.Quantity' for i, item in enumerate(items): kwargs[cart_item_id_key_template.format(i)] = item['cart_item_id'] kwargs[quantity_key_template.format(i)] = item['quantity'] response = self.api.CartModify(CartId=CartId, HMAC=HMAC, **kwargs) root = objectify.fromstring(response) new_cart = AmazonCart(root) self._check_for_cart_error(new_cart) return new_cart
[ "def", "cart_modify", "(", "self", ",", "items", ",", "CartId", "=", "None", ",", "HMAC", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "CartId", "or", "not", "HMAC", ":", "raise", "CartException", "(", "'CartId required for CartModify call'...
CartAdd. :param items: A dictionary containing the items to be added to the cart. Or a list containing these dictionaries. example: [{'cart_item_id': 'rt2ofih3f389nwiuhf8934z87o3f4h', 'quantity': 1}] :param CartId: Id of Cart :param HMAC: HMAC of Cart, see CartCreate for more info :return: An :class:`~.AmazonCart`.
[ "CartAdd", ".", ":", "param", "items", ":", "A", "dictionary", "containing", "the", "items", "to", "be", "added", "to", "the", "cart", ".", "Or", "a", "list", "containing", "these", "dictionaries", ".", "example", ":", "[", "{", "cart_item_id", ":", "rt2...
python
train
36.257143
7sDream/zhihu-py3
zhihu/me.py
https://github.com/7sDream/zhihu-py3/blob/bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc/zhihu/me.py#L180-L202
def send_message(self, author, content): """发送私信给一个用户 :param Author author: 接收私信用户对象 :param string content: 发送给用户的私信内容 :return: 成功返回 True,失败返回 False :rtype: bool """ if isinstance(author, Author) is False: raise ValueError('argument answer need to be Zhihu.Author object.') if not content: raise ValueError('answer content cannot be empty') if author.url == self.url: return False data = { 'member_id': author.hash_id, 'content': content, 'token': '', '_xsrf': author.xsrf } res = self._session.post(Send_Message_Url, data=data) return res.json()['r'] == 0
[ "def", "send_message", "(", "self", ",", "author", ",", "content", ")", ":", "if", "isinstance", "(", "author", ",", "Author", ")", "is", "False", ":", "raise", "ValueError", "(", "'argument answer need to be Zhihu.Author object.'", ")", "if", "not", "content", ...
发送私信给一个用户 :param Author author: 接收私信用户对象 :param string content: 发送给用户的私信内容 :return: 成功返回 True,失败返回 False :rtype: bool
[ "发送私信给一个用户" ]
python
train
33.043478
jobovy/galpy
galpy/util/bovy_coords.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/util/bovy_coords.py#L2137-L2179
def lambdanu_to_Rz(l,n,ac=5.,Delta=1.): """ NAME: lambdanu_to_Rz PURPOSE: calculate galactocentric cylindrical coordinates (R,z) from prolate spheroidal coordinates (lambda,nu), cf. eq. (2.2) in Dejonghe & de Zeeuw (1988a) INPUT: l - prolate spheroidal coordinate lambda n - prolate spheroidal coordinate nu ac - axis ratio of the coordinate surfaces (a/c) = sqrt(-a) / sqrt(-g) (default: 5.) Delta - focal distance that defines the spheroidal coordinate system (default: 1.) Delta=sqrt(g-a) OUTPUT: (R,z) HISTORY: 2015-02-13 - Written - Trick (MPIA) """ g = Delta**2 / (1.-ac**2) a = g - Delta**2 r2 = (l + a) * (n + a) / (a - g) z2 = (l + g) * (n + g) / (g - a) index = (r2 < 0.) * ((n+a) > 0.) * ((n+a) < 1e-10) if nu.any(index): if isinstance(r2,nu.ndarray): r2[index] = 0. else: r2 = 0. index = (z2 < 0.) * ((n+g) < 0.) * ((n+g) > -1e-10) if nu.any(index): if isinstance(z2,nu.ndarray): z2[index] = 0. else: z2 = 0. return (nu.sqrt(r2),nu.sqrt(z2))
[ "def", "lambdanu_to_Rz", "(", "l", ",", "n", ",", "ac", "=", "5.", ",", "Delta", "=", "1.", ")", ":", "g", "=", "Delta", "**", "2", "/", "(", "1.", "-", "ac", "**", "2", ")", "a", "=", "g", "-", "Delta", "**", "2", "r2", "=", "(", "l", ...
NAME: lambdanu_to_Rz PURPOSE: calculate galactocentric cylindrical coordinates (R,z) from prolate spheroidal coordinates (lambda,nu), cf. eq. (2.2) in Dejonghe & de Zeeuw (1988a) INPUT: l - prolate spheroidal coordinate lambda n - prolate spheroidal coordinate nu ac - axis ratio of the coordinate surfaces (a/c) = sqrt(-a) / sqrt(-g) (default: 5.) Delta - focal distance that defines the spheroidal coordinate system (default: 1.) Delta=sqrt(g-a) OUTPUT: (R,z) HISTORY: 2015-02-13 - Written - Trick (MPIA)
[ "NAME", ":" ]
python
train
27.976744
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1699-L1707
def TENSES(self): """ Yields a list of tenses for this language, excluding negations. Each tense is a (tense, person, number, mood, aspect)-tuple. """ a = set(TENSES[id] for id in self._format) a = a.union(set(TENSES[id] for id in self._default.keys())) a = a.union(set(TENSES[id] for id in self._default.values())) a = sorted(x[:-2] for x in a if x[-2] is False) # Exclude negation. return a
[ "def", "TENSES", "(", "self", ")", ":", "a", "=", "set", "(", "TENSES", "[", "id", "]", "for", "id", "in", "self", ".", "_format", ")", "a", "=", "a", ".", "union", "(", "set", "(", "TENSES", "[", "id", "]", "for", "id", "in", "self", ".", ...
Yields a list of tenses for this language, excluding negations. Each tense is a (tense, person, number, mood, aspect)-tuple.
[ "Yields", "a", "list", "of", "tenses", "for", "this", "language", "excluding", "negations", ".", "Each", "tense", "is", "a", "(", "tense", "person", "number", "mood", "aspect", ")", "-", "tuple", "." ]
python
train
50.222222
mitsei/dlkit
dlkit/services/repository.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/repository.py#L1653-L1660
def save_asset(self, asset_form, *args, **kwargs): """Pass through to provider AssetAdminSession.update_asset""" # Implemented from kitosid template for - # osid.resource.ResourceAdminSession.update_resource if asset_form.is_for_update(): return self.update_asset(asset_form, *args, **kwargs) else: return self.create_asset(asset_form, *args, **kwargs)
[ "def", "save_asset", "(", "self", ",", "asset_form", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Implemented from kitosid template for -", "# osid.resource.ResourceAdminSession.update_resource", "if", "asset_form", ".", "is_for_update", "(", ")", ":", "ret...
Pass through to provider AssetAdminSession.update_asset
[ "Pass", "through", "to", "provider", "AssetAdminSession", ".", "update_asset" ]
python
train
51.125
blackecho/Deep-Learning-TensorFlow
yadlt/models/boltzmann/rbm.py
https://github.com/blackecho/Deep-Learning-TensorFlow/blob/ddeb1f2848da7b7bee166ad2152b4afc46bb2086/yadlt/models/boltzmann/rbm.py#L114-L153
def build_model(self, n_features, regtype='none'): """Build the Restricted Boltzmann Machine model in TensorFlow. :param n_features: number of features :param regtype: regularization type :return: self """ self._create_placeholders(n_features) self._create_variables(n_features) self.encode = self.sample_hidden_from_visible(self.input_data)[0] self.reconstruction = self.sample_visible_from_hidden( self.encode, n_features) hprob0, hstate0, vprob, hprob1, hstate1 = self.gibbs_sampling_step( self.input_data, n_features) positive = self.compute_positive_association(self.input_data, hprob0, hstate0) nn_input = vprob for step in range(self.gibbs_sampling_steps - 1): hprob, hstate, vprob, hprob1, hstate1 = self.gibbs_sampling_step( nn_input, n_features) nn_input = vprob negative = tf.matmul(tf.transpose(vprob), hprob1) self.w_upd8 = self.W.assign_add( self.learning_rate * (positive - negative) / self.batch_size) self.bh_upd8 = self.bh_.assign_add(tf.multiply(self.learning_rate, tf.reduce_mean( tf.subtract(hprob0, hprob1), 0))) self.bv_upd8 = self.bv_.assign_add(tf.multiply(self.learning_rate, tf.reduce_mean( tf.subtract(self.input_data, vprob), 0))) variables = [self.W, self.bh_, self.bv_] regterm = Layers.regularization(variables, self.regtype, self.regcoef) self.cost = self.loss.compile(vprob, self.input_data, regterm=regterm)
[ "def", "build_model", "(", "self", ",", "n_features", ",", "regtype", "=", "'none'", ")", ":", "self", ".", "_create_placeholders", "(", "n_features", ")", "self", ".", "_create_variables", "(", "n_features", ")", "self", ".", "encode", "=", "self", ".", "...
Build the Restricted Boltzmann Machine model in TensorFlow. :param n_features: number of features :param regtype: regularization type :return: self
[ "Build", "the", "Restricted", "Boltzmann", "Machine", "model", "in", "TensorFlow", "." ]
python
train
40.65
Nic30/hwt
hwt/code.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/code.py#L106-L114
def addCases(self, tupesValStmnts): """ Add multiple case statements from iterable of tuleles (caseVal, statements) """ s = self for val, statements in tupesValStmnts: s = s.Case(val, statements) return s
[ "def", "addCases", "(", "self", ",", "tupesValStmnts", ")", ":", "s", "=", "self", "for", "val", ",", "statements", "in", "tupesValStmnts", ":", "s", "=", "s", ".", "Case", "(", "val", ",", "statements", ")", "return", "s" ]
Add multiple case statements from iterable of tuleles (caseVal, statements)
[ "Add", "multiple", "case", "statements", "from", "iterable", "of", "tuleles", "(", "caseVal", "statements", ")" ]
python
test
29.333333