repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
SylvanasSun/FishFishJump
fish_core/search_engine.py
ElasticsearchClient.transfer_data_from_mongo
def transfer_data_from_mongo(self, index, doc_type, use_mongo_id=False, indexed_flag_field_name='', mongo_query_params={}, mongo_host=default.MONGO_HOST, mongo_port=default.MONGO_PORT, mongo_db=default.MONGO_DB, mongo_collection=default.MONGO_COLLECTION): """ Transfer data from MongoDB into the Elasticsearch, the hostname, port, database and collection name in MongoDB default from load in default.py :param index: The name of the index :param doc_type: The type of the document :param use_mongo_id: Use id of MongoDB in the Elasticsearch if is true otherwise automatic generation :param indexed_flag_field_name: the name of the field of the document, if associated value is False will synchronize data for it :param mongo_client_params: The dictionary for client params of MongoDB :param mongo_query_params: The dictionary for query params of MongoDB :param mongo_host: The name of the hostname from MongoDB :param mongo_port: The number of the port from MongoDB :param mongo_db: The name of the database from MongoDB :param mongo_collection: The name of the collection from MongoDB :return: void """ mongo_client = MongoClient(host=mongo_host, port=int(mongo_port)) try: collection = mongo_client[mongo_db][mongo_collection] if indexed_flag_field_name != '': mongo_query_params.update({indexed_flag_field_name: False}) mongo_docs = collection.find(mongo_query_params) finally: mongo_client.close() # Joint actions of Elasticsearch for execute bulk api actions = [] id_array = [] for doc in mongo_docs: action = { '_op_type': 'index', '_index': index, '_type': doc_type } id_array.append(doc['_id']) if not use_mongo_id: doc.pop('_id') else: doc['id'] = str(doc['_id']) doc.pop('_id') action['_source'] = doc actions.append(action) success, failed = es_helpers.bulk(self.client, actions, request_timeout=60 * 60) logger.info( 'Transfer data from MongoDB(%s:%s) into the Elasticsearch(%s) success: %s, failed: %s' % ( mongo_host, mongo_port, self.client, success, failed)) # Back update flag if indexed_flag_field_name != '': t = threading.Thread(target=ElasticsearchClient._back_update_mongo, args=(self, mongo_host, mongo_port, mongo_db, mongo_collection, id_array, {indexed_flag_field_name: True}), name='mongodb_back_update') t.start() return success, failed
python
def transfer_data_from_mongo(self, index, doc_type, use_mongo_id=False, indexed_flag_field_name='', mongo_query_params={}, mongo_host=default.MONGO_HOST, mongo_port=default.MONGO_PORT, mongo_db=default.MONGO_DB, mongo_collection=default.MONGO_COLLECTION): """ Transfer data from MongoDB into the Elasticsearch, the hostname, port, database and collection name in MongoDB default from load in default.py :param index: The name of the index :param doc_type: The type of the document :param use_mongo_id: Use id of MongoDB in the Elasticsearch if is true otherwise automatic generation :param indexed_flag_field_name: the name of the field of the document, if associated value is False will synchronize data for it :param mongo_client_params: The dictionary for client params of MongoDB :param mongo_query_params: The dictionary for query params of MongoDB :param mongo_host: The name of the hostname from MongoDB :param mongo_port: The number of the port from MongoDB :param mongo_db: The name of the database from MongoDB :param mongo_collection: The name of the collection from MongoDB :return: void """ mongo_client = MongoClient(host=mongo_host, port=int(mongo_port)) try: collection = mongo_client[mongo_db][mongo_collection] if indexed_flag_field_name != '': mongo_query_params.update({indexed_flag_field_name: False}) mongo_docs = collection.find(mongo_query_params) finally: mongo_client.close() # Joint actions of Elasticsearch for execute bulk api actions = [] id_array = [] for doc in mongo_docs: action = { '_op_type': 'index', '_index': index, '_type': doc_type } id_array.append(doc['_id']) if not use_mongo_id: doc.pop('_id') else: doc['id'] = str(doc['_id']) doc.pop('_id') action['_source'] = doc actions.append(action) success, failed = es_helpers.bulk(self.client, actions, request_timeout=60 * 60) logger.info( 'Transfer data from MongoDB(%s:%s) into the Elasticsearch(%s) success: %s, failed: %s' % ( mongo_host, mongo_port, self.client, success, failed)) # Back update flag if indexed_flag_field_name != '': t = threading.Thread(target=ElasticsearchClient._back_update_mongo, args=(self, mongo_host, mongo_port, mongo_db, mongo_collection, id_array, {indexed_flag_field_name: True}), name='mongodb_back_update') t.start() return success, failed
[ "def", "transfer_data_from_mongo", "(", "self", ",", "index", ",", "doc_type", ",", "use_mongo_id", "=", "False", ",", "indexed_flag_field_name", "=", "''", ",", "mongo_query_params", "=", "{", "}", ",", "mongo_host", "=", "default", ".", "MONGO_HOST", ",", "m...
Transfer data from MongoDB into the Elasticsearch, the hostname, port, database and collection name in MongoDB default from load in default.py :param index: The name of the index :param doc_type: The type of the document :param use_mongo_id: Use id of MongoDB in the Elasticsearch if is true otherwise automatic generation :param indexed_flag_field_name: the name of the field of the document, if associated value is False will synchronize data for it :param mongo_client_params: The dictionary for client params of MongoDB :param mongo_query_params: The dictionary for query params of MongoDB :param mongo_host: The name of the hostname from MongoDB :param mongo_port: The number of the port from MongoDB :param mongo_db: The name of the database from MongoDB :param mongo_collection: The name of the collection from MongoDB :return: void
[ "Transfer", "data", "from", "MongoDB", "into", "the", "Elasticsearch", "the", "hostname", "port", "database", "and", "collection", "name", "in", "MongoDB", "default", "from", "load", "in", "default", ".", "py" ]
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/search_engine.py#L99-L163
SylvanasSun/FishFishJump
fish_core/search_engine.py
ElasticsearchClient.bulk
def bulk(self, actions, stats_only=False, **kwargs): """ Executes bulk api by elasticsearch.helpers.bulk. :param actions: iterator containing the actions :param stats_only:if `True` only report number of successful/failed operations instead of just number of successful and a list of error responses Any additional keyword arguments will be passed to :func:`~elasticsearch.helpers.streaming_bulk` which is used to execute the operation, see :func:`~elasticsearch.helpers.streaming_bulk` for more accepted parameters. """ success, failed = es_helpers.bulk(self.client, actions, stats_only, **kwargs) logger.info('Bulk is done success %s failed %s actions: \n %s' % (success, failed, actions))
python
def bulk(self, actions, stats_only=False, **kwargs): """ Executes bulk api by elasticsearch.helpers.bulk. :param actions: iterator containing the actions :param stats_only:if `True` only report number of successful/failed operations instead of just number of successful and a list of error responses Any additional keyword arguments will be passed to :func:`~elasticsearch.helpers.streaming_bulk` which is used to execute the operation, see :func:`~elasticsearch.helpers.streaming_bulk` for more accepted parameters. """ success, failed = es_helpers.bulk(self.client, actions, stats_only, **kwargs) logger.info('Bulk is done success %s failed %s actions: \n %s' % (success, failed, actions))
[ "def", "bulk", "(", "self", ",", "actions", ",", "stats_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "success", ",", "failed", "=", "es_helpers", ".", "bulk", "(", "self", ".", "client", ",", "actions", ",", "stats_only", ",", "*", "*", ...
Executes bulk api by elasticsearch.helpers.bulk. :param actions: iterator containing the actions :param stats_only:if `True` only report number of successful/failed operations instead of just number of successful and a list of error responses Any additional keyword arguments will be passed to :func:`~elasticsearch.helpers.streaming_bulk` which is used to execute the operation, see :func:`~elasticsearch.helpers.streaming_bulk` for more accepted parameters.
[ "Executes", "bulk", "api", "by", "elasticsearch", ".", "helpers", ".", "bulk", "." ]
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/search_engine.py#L235-L248
SylvanasSun/FishFishJump
fish_core/search_engine.py
ElasticsearchClient.automatic_syn_data_from_mongo
def automatic_syn_data_from_mongo(self, index, doc_type, indexed_flag_field_name, thread_name='automatic_syn_data_thread', interval=60, use_mongo_id=False, mongo_query_params={}, mongo_host=default.MONGO_HOST, mongo_port=default.MONGO_PORT, mongo_db=default.MONGO_DB, mongo_collection=default.MONGO_COLLECTION): """ Automatic synchronize data that from MongoDB into the Elasticsearch by schedule task, it will synchronize this data if the indexed_flag_field_name of the field of the document is False. Noteworthy that the function may be no good please you caution use it. :param indexed_flag_field_name: the name of the field of the document, if associated value is False will synchronize data for it :param thread_name: the name of the schedule task thread :param interval: the time that executes interval of the scheduled task every time (unit second) :return: the thread id, you can use this id to cancel associated task """ thread_id = self._generate_thread_id(thread_name) if thread_id in ElasticsearchClient.automatic_syn_data_flag: lock.acquire() try: thread_name = thread_name + '-%s' % ElasticsearchClient.automatic_thread_name_counter ElasticsearchClient.automatic_thread_name_counter += 1 thread_id = self._generate_thread_id(thread_name) finally: lock.release() ElasticsearchClient.automatic_syn_data_flag[thread_id] = True t = threading.Thread(target=ElasticsearchClient._automatic_syn_data_from_mongo_worker, args=(self, thread_id, index, doc_type, indexed_flag_field_name, interval, use_mongo_id, mongo_query_params, mongo_host, mongo_port, mongo_db, mongo_collection), name=thread_name) t.start() return thread_id
python
def automatic_syn_data_from_mongo(self, index, doc_type, indexed_flag_field_name, thread_name='automatic_syn_data_thread', interval=60, use_mongo_id=False, mongo_query_params={}, mongo_host=default.MONGO_HOST, mongo_port=default.MONGO_PORT, mongo_db=default.MONGO_DB, mongo_collection=default.MONGO_COLLECTION): """ Automatic synchronize data that from MongoDB into the Elasticsearch by schedule task, it will synchronize this data if the indexed_flag_field_name of the field of the document is False. Noteworthy that the function may be no good please you caution use it. :param indexed_flag_field_name: the name of the field of the document, if associated value is False will synchronize data for it :param thread_name: the name of the schedule task thread :param interval: the time that executes interval of the scheduled task every time (unit second) :return: the thread id, you can use this id to cancel associated task """ thread_id = self._generate_thread_id(thread_name) if thread_id in ElasticsearchClient.automatic_syn_data_flag: lock.acquire() try: thread_name = thread_name + '-%s' % ElasticsearchClient.automatic_thread_name_counter ElasticsearchClient.automatic_thread_name_counter += 1 thread_id = self._generate_thread_id(thread_name) finally: lock.release() ElasticsearchClient.automatic_syn_data_flag[thread_id] = True t = threading.Thread(target=ElasticsearchClient._automatic_syn_data_from_mongo_worker, args=(self, thread_id, index, doc_type, indexed_flag_field_name, interval, use_mongo_id, mongo_query_params, mongo_host, mongo_port, mongo_db, mongo_collection), name=thread_name) t.start() return thread_id
[ "def", "automatic_syn_data_from_mongo", "(", "self", ",", "index", ",", "doc_type", ",", "indexed_flag_field_name", ",", "thread_name", "=", "'automatic_syn_data_thread'", ",", "interval", "=", "60", ",", "use_mongo_id", "=", "False", ",", "mongo_query_params", "=", ...
Automatic synchronize data that from MongoDB into the Elasticsearch by schedule task, it will synchronize this data if the indexed_flag_field_name of the field of the document is False. Noteworthy that the function may be no good please you caution use it. :param indexed_flag_field_name: the name of the field of the document, if associated value is False will synchronize data for it :param thread_name: the name of the schedule task thread :param interval: the time that executes interval of the scheduled task every time (unit second) :return: the thread id, you can use this id to cancel associated task
[ "Automatic", "synchronize", "data", "that", "from", "MongoDB", "into", "the", "Elasticsearch", "by", "schedule", "task", "it", "will", "synchronize", "this", "data", "if", "the", "indexed_flag_field_name", "of", "the", "field", "of", "the", "document", "is", "Fa...
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/search_engine.py#L261-L304
SylvanasSun/FishFishJump
fish_core/search_engine.py
ElasticsearchClient.get_simple_info_for_index
def get_simple_info_for_index(self, index=None, params={}, **kwargs): """ Return a list of simple info by specified index (default all), each elements is a dictionary such as { 'health' : 'green', 'status' : 'open', 'index' : 'xxxx', 'uuid' : 'xxxx', 'pri' : 1, 'rep' : 1, `docs_count` : 4, 'docs_deleted' : 0, 'store_size' : 10kb, 'pri_store_size' : 10kb } """ raw = self.client.cat.indices(index, params=params, **kwargs).split('\n') list = [] for r in raw: alter = r.split(' ') if len(alter) < 10: continue dict = { 'health': alter[0], 'status': alter[1], 'index': alter[2], } if len(alter) == 11: # May appear split fail (alter[3] is a empty string) dict['uuid'] = alter[4] i = 5 else: dict['uuid'] = alter[3] i = 4 dict['pri'] = alter[i] i += 1 dict['rep'] = alter[i] i += 1 dict['docs_count'] = alter[i] i += 1 dict['docs_deleted'] = alter[i] i += 1 dict['store_size'] = alter[i] i += 1 dict['pri_store_size'] = alter[i] list.append(dict) logger.info('Acquire simple information of the index is done succeeded: %s' % len(list)) return list
python
def get_simple_info_for_index(self, index=None, params={}, **kwargs): """ Return a list of simple info by specified index (default all), each elements is a dictionary such as { 'health' : 'green', 'status' : 'open', 'index' : 'xxxx', 'uuid' : 'xxxx', 'pri' : 1, 'rep' : 1, `docs_count` : 4, 'docs_deleted' : 0, 'store_size' : 10kb, 'pri_store_size' : 10kb } """ raw = self.client.cat.indices(index, params=params, **kwargs).split('\n') list = [] for r in raw: alter = r.split(' ') if len(alter) < 10: continue dict = { 'health': alter[0], 'status': alter[1], 'index': alter[2], } if len(alter) == 11: # May appear split fail (alter[3] is a empty string) dict['uuid'] = alter[4] i = 5 else: dict['uuid'] = alter[3] i = 4 dict['pri'] = alter[i] i += 1 dict['rep'] = alter[i] i += 1 dict['docs_count'] = alter[i] i += 1 dict['docs_deleted'] = alter[i] i += 1 dict['store_size'] = alter[i] i += 1 dict['pri_store_size'] = alter[i] list.append(dict) logger.info('Acquire simple information of the index is done succeeded: %s' % len(list)) return list
[ "def", "get_simple_info_for_index", "(", "self", ",", "index", "=", "None", ",", "params", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "raw", "=", "self", ".", "client", ".", "cat", ".", "indices", "(", "index", ",", "params", "=", "params", ...
Return a list of simple info by specified index (default all), each elements is a dictionary such as { 'health' : 'green', 'status' : 'open', 'index' : 'xxxx', 'uuid' : 'xxxx', 'pri' : 1, 'rep' : 1, `docs_count` : 4, 'docs_deleted' : 0, 'store_size' : 10kb, 'pri_store_size' : 10kb }
[ "Return", "a", "list", "of", "simple", "info", "by", "specified", "index", "(", "default", "all", ")", "each", "elements", "is", "a", "dictionary", "such", "as", "{", "health", ":", "green", "status", ":", "open", "index", ":", "xxxx", "uuid", ":", "xx...
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/search_engine.py#L358-L400
SylvanasSun/FishFishJump
fish_core/search_engine.py
ElasticsearchClient.cluster_health_for_indices
def cluster_health_for_indices(self, index=None, params={}, **kwargs): """ Return a list of cluster health of specified indices(default all), the first element is a dictionary represent a global information of the cluster such as "cluster_name", "number_of_nodes"... the second element represent a indices information list that each element is a dictionary for one index such as [{'index' : 'a', 'status' : 'yellow', ...} , {'index' : 'b', 'status' : 'yellow', ...}, ....] """ params['level'] = 'indices' result = self.cluster_health(index, params, **kwargs) return self._process_cluster_health_info(result)
python
def cluster_health_for_indices(self, index=None, params={}, **kwargs): """ Return a list of cluster health of specified indices(default all), the first element is a dictionary represent a global information of the cluster such as "cluster_name", "number_of_nodes"... the second element represent a indices information list that each element is a dictionary for one index such as [{'index' : 'a', 'status' : 'yellow', ...} , {'index' : 'b', 'status' : 'yellow', ...}, ....] """ params['level'] = 'indices' result = self.cluster_health(index, params, **kwargs) return self._process_cluster_health_info(result)
[ "def", "cluster_health_for_indices", "(", "self", ",", "index", "=", "None", ",", "params", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "params", "[", "'level'", "]", "=", "'indices'", "result", "=", "self", ".", "cluster_health", "(", "index", "...
Return a list of cluster health of specified indices(default all), the first element is a dictionary represent a global information of the cluster such as "cluster_name", "number_of_nodes"... the second element represent a indices information list that each element is a dictionary for one index such as [{'index' : 'a', 'status' : 'yellow', ...} , {'index' : 'b', 'status' : 'yellow', ...}, ....]
[ "Return", "a", "list", "of", "cluster", "health", "of", "specified", "indices", "(", "default", "all", ")", "the", "first", "element", "is", "a", "dictionary", "represent", "a", "global", "information", "of", "the", "cluster", "such", "as", "cluster_name", "...
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/search_engine.py#L412-L422
SylvanasSun/FishFishJump
fish_core/search_engine.py
ElasticsearchClient.cluster_health_for_shards
def cluster_health_for_shards(self, index=None, params={}, **kwargs): """ Return a list of cluster health of specified indices(default all) and append shards information of each index the first element is a dictionary represent a global information of the cluster the second element represent a information of indices and its shards and each element is a dictionary such as [{'index' : 'a', 'status' : 'yellow', ..., 'shards' : {'0' : {...}, '1' : {...}, ...}, ...] """ params['level'] = 'shards' result = self.cluster_health(index, params, **kwargs) return self._process_cluster_health_info(result)
python
def cluster_health_for_shards(self, index=None, params={}, **kwargs): """ Return a list of cluster health of specified indices(default all) and append shards information of each index the first element is a dictionary represent a global information of the cluster the second element represent a information of indices and its shards and each element is a dictionary such as [{'index' : 'a', 'status' : 'yellow', ..., 'shards' : {'0' : {...}, '1' : {...}, ...}, ...] """ params['level'] = 'shards' result = self.cluster_health(index, params, **kwargs) return self._process_cluster_health_info(result)
[ "def", "cluster_health_for_shards", "(", "self", ",", "index", "=", "None", ",", "params", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "params", "[", "'level'", "]", "=", "'shards'", "result", "=", "self", ".", "cluster_health", "(", "index", ","...
Return a list of cluster health of specified indices(default all) and append shards information of each index the first element is a dictionary represent a global information of the cluster the second element represent a information of indices and its shards and each element is a dictionary such as [{'index' : 'a', 'status' : 'yellow', ..., 'shards' : {'0' : {...}, '1' : {...}, ...}, ...]
[ "Return", "a", "list", "of", "cluster", "health", "of", "specified", "indices", "(", "default", "all", ")", "and", "append", "shards", "information", "of", "each", "index", "the", "first", "element", "is", "a", "dictionary", "represent", "a", "global", "info...
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/search_engine.py#L424-L434
SylvanasSun/FishFishJump
fish_core/search_engine.py
ElasticsearchClient.nodes_simple_info
def nodes_simple_info(self, params={}, **kwargs): """ Return a dictionary of the nodes simple info that key is a column name, such as [{"http_address": "192.111.111.111", "name" : "test", ...}, ...] """ h = ['name', 'pid', 'http_address', 'version', 'jdk', 'disk.total', 'disk.used_percent', 'heap.current', 'heap.percent', 'ram.current', 'ram.percent', 'uptime', 'node.role'] result = self.client.cat.nodes(v=True, h=h, **kwargs, params=params) result = [x.strip().split(' ') for x in result.split('\n')] # Clean up the space result.remove(result[-1]) for i in range(len(result)): result[i] = list(filter(lambda x: x != '', result[i])) # Packing into the dictionary dicts = [] for i in range(len(result) - 1): dict = {} for k, v in zip(result[0], result[i + 1]): dict[k] = v dicts.append(dict) logger.info('Acquire simple information of the nodes is done succeeded: %s' % len(dicts)) return dicts
python
def nodes_simple_info(self, params={}, **kwargs): """ Return a dictionary of the nodes simple info that key is a column name, such as [{"http_address": "192.111.111.111", "name" : "test", ...}, ...] """ h = ['name', 'pid', 'http_address', 'version', 'jdk', 'disk.total', 'disk.used_percent', 'heap.current', 'heap.percent', 'ram.current', 'ram.percent', 'uptime', 'node.role'] result = self.client.cat.nodes(v=True, h=h, **kwargs, params=params) result = [x.strip().split(' ') for x in result.split('\n')] # Clean up the space result.remove(result[-1]) for i in range(len(result)): result[i] = list(filter(lambda x: x != '', result[i])) # Packing into the dictionary dicts = [] for i in range(len(result) - 1): dict = {} for k, v in zip(result[0], result[i + 1]): dict[k] = v dicts.append(dict) logger.info('Acquire simple information of the nodes is done succeeded: %s' % len(dicts)) return dicts
[ "def", "nodes_simple_info", "(", "self", ",", "params", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "h", "=", "[", "'name'", ",", "'pid'", ",", "'http_address'", ",", "'version'", ",", "'jdk'", ",", "'disk.total'", ",", "'disk.used_percent'", ",", ...
Return a dictionary of the nodes simple info that key is a column name, such as [{"http_address": "192.111.111.111", "name" : "test", ...}, ...]
[ "Return", "a", "dictionary", "of", "the", "nodes", "simple", "info", "that", "key", "is", "a", "column", "name", "such", "as", "[", "{", "http_address", ":", "192", ".", "111", ".", "111", ".", "111", "name", ":", "test", "...", "}", "...", "]" ]
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/search_engine.py#L469-L491
pgriess/pyblast
pyblast.py
__read_single_fasta_query_lines
def __read_single_fasta_query_lines(f): ''' Read and return sequence of lines (including newlines) that represent a single FASTA query record. The provided file is expected to be blocking. Returns None if there are no more query sequences in the file. ''' def readline(): l = f.readline() if l == '': raise EOFError() return l rec = None try: l = readline() assert l.startswith('>') rec = [l] while True: pos = f.tell() l = readline() if l.startswith('>'): f.seek(pos, 0) break rec += [l] except EOFError: pass return rec
python
def __read_single_fasta_query_lines(f): ''' Read and return sequence of lines (including newlines) that represent a single FASTA query record. The provided file is expected to be blocking. Returns None if there are no more query sequences in the file. ''' def readline(): l = f.readline() if l == '': raise EOFError() return l rec = None try: l = readline() assert l.startswith('>') rec = [l] while True: pos = f.tell() l = readline() if l.startswith('>'): f.seek(pos, 0) break rec += [l] except EOFError: pass return rec
[ "def", "__read_single_fasta_query_lines", "(", "f", ")", ":", "def", "readline", "(", ")", ":", "l", "=", "f", ".", "readline", "(", ")", "if", "l", "==", "''", ":", "raise", "EOFError", "(", ")", "return", "l", "rec", "=", "None", "try", ":", "l",...
Read and return sequence of lines (including newlines) that represent a single FASTA query record. The provided file is expected to be blocking. Returns None if there are no more query sequences in the file.
[ "Read", "and", "return", "sequence", "of", "lines", "(", "including", "newlines", ")", "that", "represent", "a", "single", "FASTA", "query", "record", ".", "The", "provided", "file", "is", "expected", "to", "be", "blocking", "." ]
train
https://github.com/pgriess/pyblast/blob/129dbd411418d88af903bebc6b4d97085098ce26/pyblast.py#L87-L118
pgriess/pyblast
pyblast.py
__read_single_query_result
def __read_single_query_result(rs, field_names): ''' Read the result of a single query from the given string, returning a tuple of (record, remaining-string). If no complete record could be read, the first element of the tuple is None and the second element is the original imput string. ''' rf = StringIO.StringIO(rs) def readline(): l = rf.readline() if not l.endswith('\n'): raise EOFError() return l.strip() result = Result() try: l = readline() assert l.startswith('# BLAST') l = readline() assert l.startswith('# Query: ') query_str = l[len('# Query: '):].strip() if query_str: if ' ' in query_str: result.id, result.description = [ s.strip() for s in query_str.split(' ', 1)] else: result.id = query_str l = readline() assert l.startswith('# Database: ') l = readline() if l.startswith('# Fields: '): fns = l[len('# Fields: '):].split(', ') assert len(field_names) == len(fns) l = readline() assert l.endswith(' hits found') nhits = int(l[len('# '):-1 * len(' hits found')]) while nhits > 0: l = readline() field_vals = l.split('\t') assert len(field_vals) == len(field_names) fields = dict(zip(field_names, field_vals)) result.hits.append(Hit(fields)) nhits -= 1 return result, rf.read() except EOFError: return None, rs
python
def __read_single_query_result(rs, field_names): ''' Read the result of a single query from the given string, returning a tuple of (record, remaining-string). If no complete record could be read, the first element of the tuple is None and the second element is the original imput string. ''' rf = StringIO.StringIO(rs) def readline(): l = rf.readline() if not l.endswith('\n'): raise EOFError() return l.strip() result = Result() try: l = readline() assert l.startswith('# BLAST') l = readline() assert l.startswith('# Query: ') query_str = l[len('# Query: '):].strip() if query_str: if ' ' in query_str: result.id, result.description = [ s.strip() for s in query_str.split(' ', 1)] else: result.id = query_str l = readline() assert l.startswith('# Database: ') l = readline() if l.startswith('# Fields: '): fns = l[len('# Fields: '):].split(', ') assert len(field_names) == len(fns) l = readline() assert l.endswith(' hits found') nhits = int(l[len('# '):-1 * len(' hits found')]) while nhits > 0: l = readline() field_vals = l.split('\t') assert len(field_vals) == len(field_names) fields = dict(zip(field_names, field_vals)) result.hits.append(Hit(fields)) nhits -= 1 return result, rf.read() except EOFError: return None, rs
[ "def", "__read_single_query_result", "(", "rs", ",", "field_names", ")", ":", "rf", "=", "StringIO", ".", "StringIO", "(", "rs", ")", "def", "readline", "(", ")", ":", "l", "=", "rf", ".", "readline", "(", ")", "if", "not", "l", ".", "endswith", "(",...
Read the result of a single query from the given string, returning a tuple of (record, remaining-string). If no complete record could be read, the first element of the tuple is None and the second element is the original imput string.
[ "Read", "the", "result", "of", "a", "single", "query", "from", "the", "given", "string", "returning", "a", "tuple", "of", "(", "record", "remaining", "-", "string", ")", ".", "If", "no", "complete", "record", "could", "be", "read", "the", "first", "eleme...
train
https://github.com/pgriess/pyblast/blob/129dbd411418d88af903bebc6b4d97085098ce26/pyblast.py#L121-L176
pgriess/pyblast
pyblast.py
__run_blast_select_loop
def __run_blast_select_loop(input_file, popens, fields): ''' Run the select(2) loop to handle blast I/O to the given set of Popen objects. Yields records back that have been read from blast processes. ''' def make_nonblocking(f): fl = fcntl.fcntl(f.fileno(), fcntl.F_GETFL) fl |= os.O_NONBLOCK fcntl.fcntl(f.fileno(), fcntl.F_SETFL, fl) rfds = set() wfds = set() fd_map = {} for p in popens: make_nonblocking(p.stdout) rfds.add(p.stdout.fileno()) fd_map[p.stdout.fileno()] = { 'popen': p, 'query_buffer': '', 'result_buffer': ''} make_nonblocking(p.stdin) wfds.add(p.stdin.fileno()) fd_map[p.stdin.fileno()] = fd_map[p.stdout.fileno()] while len(rfds) + len(wfds) > 0: # XXX: Should we be tracking excepted file descriptors as well? rl, wl, _ = select.select(rfds, wfds, []) # For each of our readable blast processes, read response # records and emit them for fd in rl: rs = fd_map[fd]['result_buffer'] rbuf = os.read(fd, select.PIPE_BUF) # The blast process has finished emitting records. Stop # attempting to read from or write to it. If we have # excess data in our result_buffer, c'est la vie. if rbuf == '': p = fd_map[fd]['popen'] rfds.remove(p.stdout.fileno()) p.stdout.close() if not p.stdin.closed: wfds.remove(p.stdin.fileno()) p.stdin.close() continue rs += rbuf while True: rec, rs = __read_single_query_result(rs, fields) if rec is None: break yield rec fd_map[fd]['result_buffer'] = rs # For each of our writable blast processes, grab a new query # sequence and send it off to them. for fd in wl: qs = fd_map[fd]['query_buffer'] if not qs: ql = __read_single_fasta_query_lines(input_file) # No more input records available. Close the pipe to # signal this to the blast process. if ql is None: p = fd_map[fd]['popen'] wfds.remove(p.stdin.fileno()) p.stdin.close() continue qs = ''.join(ql) # XXX: For some reason, despite select(2) indicating that # this file descriptor is writable, writes can fail # with EWOULDBLOCK. Handle this gracefully. try: written = os.write(fd, qs) qs = qs[written:] except OSError, e: assert e.errno == errno.EWOULDBLOCK fd_map[fd]['query_buffer'] = qs
python
def __run_blast_select_loop(input_file, popens, fields): ''' Run the select(2) loop to handle blast I/O to the given set of Popen objects. Yields records back that have been read from blast processes. ''' def make_nonblocking(f): fl = fcntl.fcntl(f.fileno(), fcntl.F_GETFL) fl |= os.O_NONBLOCK fcntl.fcntl(f.fileno(), fcntl.F_SETFL, fl) rfds = set() wfds = set() fd_map = {} for p in popens: make_nonblocking(p.stdout) rfds.add(p.stdout.fileno()) fd_map[p.stdout.fileno()] = { 'popen': p, 'query_buffer': '', 'result_buffer': ''} make_nonblocking(p.stdin) wfds.add(p.stdin.fileno()) fd_map[p.stdin.fileno()] = fd_map[p.stdout.fileno()] while len(rfds) + len(wfds) > 0: # XXX: Should we be tracking excepted file descriptors as well? rl, wl, _ = select.select(rfds, wfds, []) # For each of our readable blast processes, read response # records and emit them for fd in rl: rs = fd_map[fd]['result_buffer'] rbuf = os.read(fd, select.PIPE_BUF) # The blast process has finished emitting records. Stop # attempting to read from or write to it. If we have # excess data in our result_buffer, c'est la vie. if rbuf == '': p = fd_map[fd]['popen'] rfds.remove(p.stdout.fileno()) p.stdout.close() if not p.stdin.closed: wfds.remove(p.stdin.fileno()) p.stdin.close() continue rs += rbuf while True: rec, rs = __read_single_query_result(rs, fields) if rec is None: break yield rec fd_map[fd]['result_buffer'] = rs # For each of our writable blast processes, grab a new query # sequence and send it off to them. for fd in wl: qs = fd_map[fd]['query_buffer'] if not qs: ql = __read_single_fasta_query_lines(input_file) # No more input records available. Close the pipe to # signal this to the blast process. if ql is None: p = fd_map[fd]['popen'] wfds.remove(p.stdin.fileno()) p.stdin.close() continue qs = ''.join(ql) # XXX: For some reason, despite select(2) indicating that # this file descriptor is writable, writes can fail # with EWOULDBLOCK. Handle this gracefully. try: written = os.write(fd, qs) qs = qs[written:] except OSError, e: assert e.errno == errno.EWOULDBLOCK fd_map[fd]['query_buffer'] = qs
[ "def", "__run_blast_select_loop", "(", "input_file", ",", "popens", ",", "fields", ")", ":", "def", "make_nonblocking", "(", "f", ")", ":", "fl", "=", "fcntl", ".", "fcntl", "(", "f", ".", "fileno", "(", ")", ",", "fcntl", ".", "F_GETFL", ")", "fl", ...
Run the select(2) loop to handle blast I/O to the given set of Popen objects. Yields records back that have been read from blast processes.
[ "Run", "the", "select", "(", "2", ")", "loop", "to", "handle", "blast", "I", "/", "O", "to", "the", "given", "set", "of", "Popen", "objects", "." ]
train
https://github.com/pgriess/pyblast/blob/129dbd411418d88af903bebc6b4d97085098ce26/pyblast.py#L179-L270
pgriess/pyblast
pyblast.py
__run_blast
def __run_blast(blast_command, input_file, *args, **kwargs): ''' Run a blast variant on the given input file. ''' # XXX: Eventually, translate results on the fly as requested? Or # just always use our parsed object? if 'outfmt' in kwargs: raise Exception('Use of the -outfmt option is not supported') num_processes = kwargs.get( 'pb_num_processes', os.sysconf('SC_NPROCESSORS_ONLN')) fields = kwargs.get('pb_fields', DEFAULT_HIT_FIELDS) blast_args = [blast_command] blast_args += ['-outfmt', '7 {}'.format(' '.join(fields))] for a in args: blast_args += ['-' + a] for k, v in kwargs.iteritems(): if not k.startswith('pb_'): blast_args += ['-' + k, str(v)] popens = [] for _ in range(num_processes): popens.append( subprocess.Popen( args=blast_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, close_fds=True)) try: for r in __run_blast_select_loop(input_file, popens, fields): yield r finally: for p in popens: if p.poll() is None: p.terminate() p.wait()
python
def __run_blast(blast_command, input_file, *args, **kwargs): ''' Run a blast variant on the given input file. ''' # XXX: Eventually, translate results on the fly as requested? Or # just always use our parsed object? if 'outfmt' in kwargs: raise Exception('Use of the -outfmt option is not supported') num_processes = kwargs.get( 'pb_num_processes', os.sysconf('SC_NPROCESSORS_ONLN')) fields = kwargs.get('pb_fields', DEFAULT_HIT_FIELDS) blast_args = [blast_command] blast_args += ['-outfmt', '7 {}'.format(' '.join(fields))] for a in args: blast_args += ['-' + a] for k, v in kwargs.iteritems(): if not k.startswith('pb_'): blast_args += ['-' + k, str(v)] popens = [] for _ in range(num_processes): popens.append( subprocess.Popen( args=blast_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, close_fds=True)) try: for r in __run_blast_select_loop(input_file, popens, fields): yield r finally: for p in popens: if p.poll() is None: p.terminate() p.wait()
[ "def", "__run_blast", "(", "blast_command", ",", "input_file", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# XXX: Eventually, translate results on the fly as requested? Or", "# just always use our parsed object?", "if", "'outfmt'", "in", "kwargs", ":", "rai...
Run a blast variant on the given input file.
[ "Run", "a", "blast", "variant", "on", "the", "given", "input", "file", "." ]
train
https://github.com/pgriess/pyblast/blob/129dbd411418d88af903bebc6b4d97085098ce26/pyblast.py#L273-L309
ucbvislab/radiotool
radiotool/composer/segment.py
Segment.get_frames
def get_frames(self, channels=2): """Get numpy array of frames corresponding to the segment. :param integer channels: Number of channels in output array :returns: Array of frames in the segment :rtype: numpy array """ tmp_frame = self.track.current_frame self.track.current_frame = self.start frames = self.track.read_frames(self.duration, channels=channels) self.track.current_frame = tmp_frame for effect in self.effects: frames = effect.apply_to(frames, self.samplerate) return frames.copy()
python
def get_frames(self, channels=2): """Get numpy array of frames corresponding to the segment. :param integer channels: Number of channels in output array :returns: Array of frames in the segment :rtype: numpy array """ tmp_frame = self.track.current_frame self.track.current_frame = self.start frames = self.track.read_frames(self.duration, channels=channels) self.track.current_frame = tmp_frame for effect in self.effects: frames = effect.apply_to(frames, self.samplerate) return frames.copy()
[ "def", "get_frames", "(", "self", ",", "channels", "=", "2", ")", ":", "tmp_frame", "=", "self", ".", "track", ".", "current_frame", "self", ".", "track", ".", "current_frame", "=", "self", ".", "start", "frames", "=", "self", ".", "track", ".", "read_...
Get numpy array of frames corresponding to the segment. :param integer channels: Number of channels in output array :returns: Array of frames in the segment :rtype: numpy array
[ "Get", "numpy", "array", "of", "frames", "corresponding", "to", "the", "segment", "." ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/segment.py#L59-L75
ucbvislab/radiotool
radiotool/composer/composition.py
Composition.duration
def duration(self): """Get duration of composition """ return max([x.comp_location + x.duration for x in self.segments])
python
def duration(self): """Get duration of composition """ return max([x.comp_location + x.duration for x in self.segments])
[ "def", "duration", "(", "self", ")", ":", "return", "max", "(", "[", "x", ".", "comp_location", "+", "x", ".", "duration", "for", "x", "in", "self", ".", "segments", "]", ")" ]
Get duration of composition
[ "Get", "duration", "of", "composition" ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/composition.py#L66-L70
ucbvislab/radiotool
radiotool/composer/composition.py
Composition.add_segment
def add_segment(self, segment): """Add a segment to the composition :param segment: Segment to add to composition :type segment: :py:class:`radiotool.composer.Segment` """ self.tracks.add(segment.track) self.segments.append(segment)
python
def add_segment(self, segment): """Add a segment to the composition :param segment: Segment to add to composition :type segment: :py:class:`radiotool.composer.Segment` """ self.tracks.add(segment.track) self.segments.append(segment)
[ "def", "add_segment", "(", "self", ",", "segment", ")", ":", "self", ".", "tracks", ".", "add", "(", "segment", ".", "track", ")", "self", ".", "segments", ".", "append", "(", "segment", ")" ]
Add a segment to the composition :param segment: Segment to add to composition :type segment: :py:class:`radiotool.composer.Segment`
[ "Add", "a", "segment", "to", "the", "composition" ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/composition.py#L89-L96
ucbvislab/radiotool
radiotool/composer/composition.py
Composition.add_segments
def add_segments(self, segments): """Add a list of segments to the composition :param segments: Segments to add to composition :type segments: list of :py:class:`radiotool.composer.Segment` """ self.tracks.update([seg.track for seg in segments]) self.segments.extend(segments)
python
def add_segments(self, segments): """Add a list of segments to the composition :param segments: Segments to add to composition :type segments: list of :py:class:`radiotool.composer.Segment` """ self.tracks.update([seg.track for seg in segments]) self.segments.extend(segments)
[ "def", "add_segments", "(", "self", ",", "segments", ")", ":", "self", ".", "tracks", ".", "update", "(", "[", "seg", ".", "track", "for", "seg", "in", "segments", "]", ")", "self", ".", "segments", ".", "extend", "(", "segments", ")" ]
Add a list of segments to the composition :param segments: Segments to add to composition :type segments: list of :py:class:`radiotool.composer.Segment`
[ "Add", "a", "list", "of", "segments", "to", "the", "composition" ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/composition.py#L98-L105
ucbvislab/radiotool
radiotool/composer/composition.py
Composition.fade_in
def fade_in(self, segment, duration, fade_type="linear"): """Adds a fade in to a segment in the composition :param segment: Segment to fade in to :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-in (in seconds) :type duration: float :returns: The fade that has been added to the composition :rtype: :py:class:`Fade` """ f = Fade(segment.track, segment.comp_location_in_seconds, duration, 0.0, 1.0, fade_type=fade_type) self.add_dynamic(f) return f
python
def fade_in(self, segment, duration, fade_type="linear"): """Adds a fade in to a segment in the composition :param segment: Segment to fade in to :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-in (in seconds) :type duration: float :returns: The fade that has been added to the composition :rtype: :py:class:`Fade` """ f = Fade(segment.track, segment.comp_location_in_seconds, duration, 0.0, 1.0, fade_type=fade_type) self.add_dynamic(f) return f
[ "def", "fade_in", "(", "self", ",", "segment", ",", "duration", ",", "fade_type", "=", "\"linear\"", ")", ":", "f", "=", "Fade", "(", "segment", ".", "track", ",", "segment", ".", "comp_location_in_seconds", ",", "duration", ",", "0.0", ",", "1.0", ",", ...
Adds a fade in to a segment in the composition :param segment: Segment to fade in to :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-in (in seconds) :type duration: float :returns: The fade that has been added to the composition :rtype: :py:class:`Fade`
[ "Adds", "a", "fade", "in", "to", "a", "segment", "in", "the", "composition" ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/composition.py#L139-L152
ucbvislab/radiotool
radiotool/composer/composition.py
Composition.fade_out
def fade_out(self, segment, duration, fade_type="linear"): """Adds a fade out to a segment in the composition :param segment: Segment to fade out :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-out (in seconds) :type duration: float :returns: The fade that has been added to the composition :rtype: :py:class:`Fade` """ score_loc_in_seconds = segment.comp_location_in_seconds +\ segment.duration_in_seconds - duration f = Fade(segment.track, score_loc_in_seconds, duration, 1.0, 0.0, fade_type=fade_type) # bug fixing... perhaps f.comp_location = segment.comp_location + segment.duration -\ int(duration * segment.track.samplerate) self.add_dynamic(f) return f
python
def fade_out(self, segment, duration, fade_type="linear"): """Adds a fade out to a segment in the composition :param segment: Segment to fade out :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-out (in seconds) :type duration: float :returns: The fade that has been added to the composition :rtype: :py:class:`Fade` """ score_loc_in_seconds = segment.comp_location_in_seconds +\ segment.duration_in_seconds - duration f = Fade(segment.track, score_loc_in_seconds, duration, 1.0, 0.0, fade_type=fade_type) # bug fixing... perhaps f.comp_location = segment.comp_location + segment.duration -\ int(duration * segment.track.samplerate) self.add_dynamic(f) return f
[ "def", "fade_out", "(", "self", ",", "segment", ",", "duration", ",", "fade_type", "=", "\"linear\"", ")", ":", "score_loc_in_seconds", "=", "segment", ".", "comp_location_in_seconds", "+", "segment", ".", "duration_in_seconds", "-", "duration", "f", "=", "Fade"...
Adds a fade out to a segment in the composition :param segment: Segment to fade out :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-out (in seconds) :type duration: float :returns: The fade that has been added to the composition :rtype: :py:class:`Fade`
[ "Adds", "a", "fade", "out", "to", "a", "segment", "in", "the", "composition" ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/composition.py#L154-L173
ucbvislab/radiotool
radiotool/composer/composition.py
Composition.extended_fade_in
def extended_fade_in(self, segment, duration): """Add a fade-in to a segment that extends the beginning of the segment. :param segment: Segment to fade in :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-in (in seconds) :returns: The fade that has been added to the composition :rtype: :py:class:`Fade` """ dur = int(duration * segment.track.samplerate) if segment.start - dur >= 0: segment.start -= dur else: raise Exception( "Cannot create fade-in that extends " "past the track's beginning") if segment.comp_location - dur >= 0: segment.comp_location -= dur else: raise Exception( "Cannot create fade-in the extends past the score's beginning") segment.duration += dur f = Fade(segment.track, segment.comp_location_in_seconds, duration, 0.0, 1.0) self.add_dynamic(f) return f
python
def extended_fade_in(self, segment, duration): """Add a fade-in to a segment that extends the beginning of the segment. :param segment: Segment to fade in :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-in (in seconds) :returns: The fade that has been added to the composition :rtype: :py:class:`Fade` """ dur = int(duration * segment.track.samplerate) if segment.start - dur >= 0: segment.start -= dur else: raise Exception( "Cannot create fade-in that extends " "past the track's beginning") if segment.comp_location - dur >= 0: segment.comp_location -= dur else: raise Exception( "Cannot create fade-in the extends past the score's beginning") segment.duration += dur f = Fade(segment.track, segment.comp_location_in_seconds, duration, 0.0, 1.0) self.add_dynamic(f) return f
[ "def", "extended_fade_in", "(", "self", ",", "segment", ",", "duration", ")", ":", "dur", "=", "int", "(", "duration", "*", "segment", ".", "track", ".", "samplerate", ")", "if", "segment", ".", "start", "-", "dur", ">=", "0", ":", "segment", ".", "s...
Add a fade-in to a segment that extends the beginning of the segment. :param segment: Segment to fade in :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-in (in seconds) :returns: The fade that has been added to the composition :rtype: :py:class:`Fade`
[ "Add", "a", "fade", "-", "in", "to", "a", "segment", "that", "extends", "the", "beginning", "of", "the", "segment", "." ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/composition.py#L175-L203
ucbvislab/radiotool
radiotool/composer/composition.py
Composition.extended_fade_out
def extended_fade_out(self, segment, duration): """Add a fade-out to a segment that extends the beginning of the segment. :param segment: Segment to fade out :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-out (in seconds) :returns: The fade that has been added to the composition :rtype: :py:class:`Fade` """ dur = int(duration * segment.track.samplerate) if segment.start + segment.duration + dur <\ segment.track.duration: segment.duration += dur else: raise Exception( "Cannot create fade-out that extends past the track's end") score_loc_in_seconds = segment.comp_location_in_seconds +\ segment.duration_in_seconds - duration f = Fade(segment.track, score_loc_in_seconds, duration, 1.0, 0.0) self.add_dynamic(f) return f
python
def extended_fade_out(self, segment, duration): """Add a fade-out to a segment that extends the beginning of the segment. :param segment: Segment to fade out :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-out (in seconds) :returns: The fade that has been added to the composition :rtype: :py:class:`Fade` """ dur = int(duration * segment.track.samplerate) if segment.start + segment.duration + dur <\ segment.track.duration: segment.duration += dur else: raise Exception( "Cannot create fade-out that extends past the track's end") score_loc_in_seconds = segment.comp_location_in_seconds +\ segment.duration_in_seconds - duration f = Fade(segment.track, score_loc_in_seconds, duration, 1.0, 0.0) self.add_dynamic(f) return f
[ "def", "extended_fade_out", "(", "self", ",", "segment", ",", "duration", ")", ":", "dur", "=", "int", "(", "duration", "*", "segment", ".", "track", ".", "samplerate", ")", "if", "segment", ".", "start", "+", "segment", ".", "duration", "+", "dur", "<...
Add a fade-out to a segment that extends the beginning of the segment. :param segment: Segment to fade out :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-out (in seconds) :returns: The fade that has been added to the composition :rtype: :py:class:`Fade`
[ "Add", "a", "fade", "-", "out", "to", "a", "segment", "that", "extends", "the", "beginning", "of", "the", "segment", "." ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/composition.py#L205-L226
ucbvislab/radiotool
radiotool/composer/composition.py
Composition.cross_fade
def cross_fade(self, seg1, seg2, duration): """Add a linear crossfade to the composition between two segments. :param seg1: First segment (fading out) :type seg1: :py:class:`radiotool.composer.Segment` :param seg2: Second segment (fading in) :type seg2: :py:class:`radiotool.composer.Segment` :param duration: Duration of crossfade (in seconds) """ if seg1.comp_location + seg1.duration - seg2.comp_location < 2: dur = int(duration * seg1.track.samplerate) if dur % 2 == 1: dur -= 1 if dur / 2 > seg1.duration: dur = seg1.duration * 2 if dur / 2 > seg2.duration: dur = seg2.duration * 2 # we're going to compute the crossfade and then create a RawTrack # for the resulting frames if seg2.start - (dur / 2) < 0: diff = seg2.start seg2.start = 0 seg2.duration -= diff seg2.comp_location -= diff dur = 2 * diff else: seg2.start -= (dur / 2) seg2.duration += (dur / 2) seg2.comp_location -= (dur / 2) seg1.duration += (dur / 2) out_frames = seg1.get_frames(channels=self.channels)[-dur:] seg1.duration -= dur in_frames = seg2.get_frames(channels=self.channels)[:dur] seg2.start += dur seg2.duration -= dur seg2.comp_location += dur # compute the crossfade in_frames = in_frames[:min(map(len, [in_frames, out_frames]))] out_frames = out_frames[:min(map(len, [in_frames, out_frames]))] cf_frames = radiotool.utils.linear(out_frames, in_frames) # cf_frames = equal_power(out_frames, in_frames) raw_track = RawTrack(cf_frames, name="crossfade", samplerate=seg1.track.samplerate) rs_comp_location = (seg1.comp_location + seg1.duration) /\ float(seg1.track.samplerate) rs_duration = raw_track.duration / float(raw_track.samplerate) raw_seg = Segment(raw_track, rs_comp_location, 0.0, rs_duration) # will this fix a bug? raw_seg.duration = raw_track.duration raw_seg.comp_location = seg1.comp_location + seg1.duration self.add_track(raw_track) self.add_segment(raw_seg) return raw_seg else: print seg1.comp_location + seg1.duration, seg2.comp_location raise Exception("Segments must be adjacent" "to add a crossfade ({}, {})".format( seg1.comp_location + seg1.duration, seg2.comp_location))
python
def cross_fade(self, seg1, seg2, duration): """Add a linear crossfade to the composition between two segments. :param seg1: First segment (fading out) :type seg1: :py:class:`radiotool.composer.Segment` :param seg2: Second segment (fading in) :type seg2: :py:class:`radiotool.composer.Segment` :param duration: Duration of crossfade (in seconds) """ if seg1.comp_location + seg1.duration - seg2.comp_location < 2: dur = int(duration * seg1.track.samplerate) if dur % 2 == 1: dur -= 1 if dur / 2 > seg1.duration: dur = seg1.duration * 2 if dur / 2 > seg2.duration: dur = seg2.duration * 2 # we're going to compute the crossfade and then create a RawTrack # for the resulting frames if seg2.start - (dur / 2) < 0: diff = seg2.start seg2.start = 0 seg2.duration -= diff seg2.comp_location -= diff dur = 2 * diff else: seg2.start -= (dur / 2) seg2.duration += (dur / 2) seg2.comp_location -= (dur / 2) seg1.duration += (dur / 2) out_frames = seg1.get_frames(channels=self.channels)[-dur:] seg1.duration -= dur in_frames = seg2.get_frames(channels=self.channels)[:dur] seg2.start += dur seg2.duration -= dur seg2.comp_location += dur # compute the crossfade in_frames = in_frames[:min(map(len, [in_frames, out_frames]))] out_frames = out_frames[:min(map(len, [in_frames, out_frames]))] cf_frames = radiotool.utils.linear(out_frames, in_frames) # cf_frames = equal_power(out_frames, in_frames) raw_track = RawTrack(cf_frames, name="crossfade", samplerate=seg1.track.samplerate) rs_comp_location = (seg1.comp_location + seg1.duration) /\ float(seg1.track.samplerate) rs_duration = raw_track.duration / float(raw_track.samplerate) raw_seg = Segment(raw_track, rs_comp_location, 0.0, rs_duration) # will this fix a bug? raw_seg.duration = raw_track.duration raw_seg.comp_location = seg1.comp_location + seg1.duration self.add_track(raw_track) self.add_segment(raw_seg) return raw_seg else: print seg1.comp_location + seg1.duration, seg2.comp_location raise Exception("Segments must be adjacent" "to add a crossfade ({}, {})".format( seg1.comp_location + seg1.duration, seg2.comp_location))
[ "def", "cross_fade", "(", "self", ",", "seg1", ",", "seg2", ",", "duration", ")", ":", "if", "seg1", ".", "comp_location", "+", "seg1", ".", "duration", "-", "seg2", ".", "comp_location", "<", "2", ":", "dur", "=", "int", "(", "duration", "*", "seg1"...
Add a linear crossfade to the composition between two segments. :param seg1: First segment (fading out) :type seg1: :py:class:`radiotool.composer.Segment` :param seg2: Second segment (fading in) :type seg2: :py:class:`radiotool.composer.Segment` :param duration: Duration of crossfade (in seconds)
[ "Add", "a", "linear", "crossfade", "to", "the", "composition", "between", "two", "segments", "." ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/composition.py#L228-L304
ucbvislab/radiotool
radiotool/composer/composition.py
Composition.empty_over_span
def empty_over_span(self, time, duration): """Helper method that tests whether composition contains any segments at a given time for a given duration. :param time: Time (in seconds) to start span :param duration: Duration (in seconds) of span :returns: `True` if there are no segments in the composition that overlap the span starting at `time` and lasting for `duration` seconds. `False` otherwise. """ for seg in self.segments: # starts in range if seg.comp_location_in_seconds >= time and\ seg.comp_location_in_seconds < time + duration: return False # or, ends in range elif seg.comp_location_in_seconds + seg.duration_in_seconds >= time and\ seg.comp_location_in_seconds + seg.duration_in_seconds < time + duration: return False # or, spans entire range elif seg.comp_location_in_seconds < time and\ seg.comp_location_in_seconds + seg.duration_in_seconds >= time + duration: return False return True
python
def empty_over_span(self, time, duration): """Helper method that tests whether composition contains any segments at a given time for a given duration. :param time: Time (in seconds) to start span :param duration: Duration (in seconds) of span :returns: `True` if there are no segments in the composition that overlap the span starting at `time` and lasting for `duration` seconds. `False` otherwise. """ for seg in self.segments: # starts in range if seg.comp_location_in_seconds >= time and\ seg.comp_location_in_seconds < time + duration: return False # or, ends in range elif seg.comp_location_in_seconds + seg.duration_in_seconds >= time and\ seg.comp_location_in_seconds + seg.duration_in_seconds < time + duration: return False # or, spans entire range elif seg.comp_location_in_seconds < time and\ seg.comp_location_in_seconds + seg.duration_in_seconds >= time + duration: return False return True
[ "def", "empty_over_span", "(", "self", ",", "time", ",", "duration", ")", ":", "for", "seg", "in", "self", ".", "segments", ":", "# starts in range", "if", "seg", ".", "comp_location_in_seconds", ">=", "time", "and", "seg", ".", "comp_location_in_seconds", "<"...
Helper method that tests whether composition contains any segments at a given time for a given duration. :param time: Time (in seconds) to start span :param duration: Duration (in seconds) of span :returns: `True` if there are no segments in the composition that overlap the span starting at `time` and lasting for `duration` seconds. `False` otherwise.
[ "Helper", "method", "that", "tests", "whether", "composition", "contains", "any", "segments", "at", "a", "given", "time", "for", "a", "given", "duration", "." ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/composition.py#L316-L337
ucbvislab/radiotool
radiotool/composer/composition.py
Composition.contract
def contract(self, time, duration, min_contraction=0.0): """Remove empty gaps from the composition starting at a given time for a given duration. """ # remove audio from the composition starting at time # for duration contract_dur = 0.0 contract_start = time if self.empty_over_span(time, duration): contract_dur = duration contract_start = time else: starts = [s.comp_location_in_seconds for s in self.segments] ends = [s.comp_location_in_seconds + s.duration_in_seconds for s in self.segments] key_starts = [] key_ends = [] for start in starts: if start >= time and start < time + duration: # does a segment cover the location right before this start? is_key_start = True for seg in self.segments: if seg.comp_location_in_seconds < start and\ seg.comp_location_in_seconds + seg.duration_in_seconds >= start: is_key_start = False break if is_key_start: key_starts.append(start) for end in ends: if end >= time and end < time + duration: # does a segment cover the location right before this start? is_key_end = True for seg in self.segments: if seg.comp_location_in_seconds <= end and\ seg.comp_location_in_seconds + seg.duration_in_seconds > end: is_key_end = False break if is_key_end: key_ends.append(end) if len(key_starts) + len(key_ends) == 0: return 0, 0 # combine key starts and key ends key_both = [s for s in key_starts] key_both.extend([s for s in key_ends]) key_both = sorted(key_both) first_key = key_both[0] if first_key in key_starts: contract_start = time contract_dur = first_key - time else: contract_start = first_key if len(key_both) >= 2: contract_dur = key_both[1] - first_key else: contract_dur = time + duration - first_key if contract_dur > min_contraction: for seg in self.segments: if seg.comp_location_in_seconds > contract_start: dur_samples = int(seg.samplerate * contract_dur) seg.comp_location -= dur_samples for dyn in self.dynamics: if dyn.comp_location_in_seconds > contract_start: dur_samples = int(seg.samplerate * contract_dur) dyn.comp_location -= dur_samples return contract_start, contract_dur else: return 0.0, 0.0
python
def contract(self, time, duration, min_contraction=0.0): """Remove empty gaps from the composition starting at a given time for a given duration. """ # remove audio from the composition starting at time # for duration contract_dur = 0.0 contract_start = time if self.empty_over_span(time, duration): contract_dur = duration contract_start = time else: starts = [s.comp_location_in_seconds for s in self.segments] ends = [s.comp_location_in_seconds + s.duration_in_seconds for s in self.segments] key_starts = [] key_ends = [] for start in starts: if start >= time and start < time + duration: # does a segment cover the location right before this start? is_key_start = True for seg in self.segments: if seg.comp_location_in_seconds < start and\ seg.comp_location_in_seconds + seg.duration_in_seconds >= start: is_key_start = False break if is_key_start: key_starts.append(start) for end in ends: if end >= time and end < time + duration: # does a segment cover the location right before this start? is_key_end = True for seg in self.segments: if seg.comp_location_in_seconds <= end and\ seg.comp_location_in_seconds + seg.duration_in_seconds > end: is_key_end = False break if is_key_end: key_ends.append(end) if len(key_starts) + len(key_ends) == 0: return 0, 0 # combine key starts and key ends key_both = [s for s in key_starts] key_both.extend([s for s in key_ends]) key_both = sorted(key_both) first_key = key_both[0] if first_key in key_starts: contract_start = time contract_dur = first_key - time else: contract_start = first_key if len(key_both) >= 2: contract_dur = key_both[1] - first_key else: contract_dur = time + duration - first_key if contract_dur > min_contraction: for seg in self.segments: if seg.comp_location_in_seconds > contract_start: dur_samples = int(seg.samplerate * contract_dur) seg.comp_location -= dur_samples for dyn in self.dynamics: if dyn.comp_location_in_seconds > contract_start: dur_samples = int(seg.samplerate * contract_dur) dyn.comp_location -= dur_samples return contract_start, contract_dur else: return 0.0, 0.0
[ "def", "contract", "(", "self", ",", "time", ",", "duration", ",", "min_contraction", "=", "0.0", ")", ":", "# remove audio from the composition starting at time", "# for duration", "contract_dur", "=", "0.0", "contract_start", "=", "time", "if", "self", ".", "empty...
Remove empty gaps from the composition starting at a given time for a given duration.
[ "Remove", "empty", "gaps", "from", "the", "composition", "starting", "at", "a", "given", "time", "for", "a", "given", "duration", "." ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/composition.py#L339-L417
ucbvislab/radiotool
radiotool/composer/composition.py
Composition.add_music_cue
def add_music_cue(self, track, comp_cue, song_cue, duration=6.0, padding_before=12.0, padding_after=12.0): """Add a music cue to the composition. This doesn't do any audio analysis, it just aligns a specified point in the track (presumably music) with a location in the composition. See UnderScore_ for a visualization of what this is doing to the music track. .. _UnderScore: http://vis.berkeley.edu/papers/underscore/ :param track: Track to align in the composition :type track: :py:class:`radiotool.composer.Track` :param float comp_cue: Location in composition to align music cue (in seconds) :param float song_cue: Location in the music track to align with the composition cue (in seconds) :param float duration: Duration of music after the song cue before the music starts to fade out (in seconds) :param float padding_before: Duration of music playing softly before the music cue/composition cue (in seconds) :param float padding_after: Duration of music playing softly after the music cue/composition cue (in seconds) """ self.tracks.add(track) pre_fade = 3 post_fade = 3 if padding_before + pre_fade > song_cue: padding_before = song_cue - pre_fade if padding_before + pre_fade > score_cue: padding_before = score_cue - pre_fade s = Segment(track, score_cue - padding_before - pre_fade, song_cue - padding_before - pre_fade, pre_fade + padding_before + duration + padding_after + post_fade) self.add_segment(s) d = [] dyn_adj = 1 track.current_frame = 0 d.append(Fade(track, score_cue - padding_before - pre_fade, pre_fade, 0, .1*dyn_adj, fade_type="linear")) d.append(Fade(track, score_cue - padding_before, padding_before, .1*dyn_adj, .4*dyn_adj, fade_type="exponential")) d.append(Volume(track, score_cue, duration, .4*dyn_adj)) d.append(Fade(track, score_cue + duration, padding_after, .4*dyn_adj, 0, fade_type="exponential")) d.append(Fade(track, score_cue + duration + padding_after, post_fade, .1*dyn_adj, 0, fade_type="linear")) self.add_dynamics(d)
python
def add_music_cue(self, track, comp_cue, song_cue, duration=6.0, padding_before=12.0, padding_after=12.0): """Add a music cue to the composition. This doesn't do any audio analysis, it just aligns a specified point in the track (presumably music) with a location in the composition. See UnderScore_ for a visualization of what this is doing to the music track. .. _UnderScore: http://vis.berkeley.edu/papers/underscore/ :param track: Track to align in the composition :type track: :py:class:`radiotool.composer.Track` :param float comp_cue: Location in composition to align music cue (in seconds) :param float song_cue: Location in the music track to align with the composition cue (in seconds) :param float duration: Duration of music after the song cue before the music starts to fade out (in seconds) :param float padding_before: Duration of music playing softly before the music cue/composition cue (in seconds) :param float padding_after: Duration of music playing softly after the music cue/composition cue (in seconds) """ self.tracks.add(track) pre_fade = 3 post_fade = 3 if padding_before + pre_fade > song_cue: padding_before = song_cue - pre_fade if padding_before + pre_fade > score_cue: padding_before = score_cue - pre_fade s = Segment(track, score_cue - padding_before - pre_fade, song_cue - padding_before - pre_fade, pre_fade + padding_before + duration + padding_after + post_fade) self.add_segment(s) d = [] dyn_adj = 1 track.current_frame = 0 d.append(Fade(track, score_cue - padding_before - pre_fade, pre_fade, 0, .1*dyn_adj, fade_type="linear")) d.append(Fade(track, score_cue - padding_before, padding_before, .1*dyn_adj, .4*dyn_adj, fade_type="exponential")) d.append(Volume(track, score_cue, duration, .4*dyn_adj)) d.append(Fade(track, score_cue + duration, padding_after, .4*dyn_adj, 0, fade_type="exponential")) d.append(Fade(track, score_cue + duration + padding_after, post_fade, .1*dyn_adj, 0, fade_type="linear")) self.add_dynamics(d)
[ "def", "add_music_cue", "(", "self", ",", "track", ",", "comp_cue", ",", "song_cue", ",", "duration", "=", "6.0", ",", "padding_before", "=", "12.0", ",", "padding_after", "=", "12.0", ")", ":", "self", ".", "tracks", ".", "add", "(", "track", ")", "pr...
Add a music cue to the composition. This doesn't do any audio analysis, it just aligns a specified point in the track (presumably music) with a location in the composition. See UnderScore_ for a visualization of what this is doing to the music track. .. _UnderScore: http://vis.berkeley.edu/papers/underscore/ :param track: Track to align in the composition :type track: :py:class:`radiotool.composer.Track` :param float comp_cue: Location in composition to align music cue (in seconds) :param float song_cue: Location in the music track to align with the composition cue (in seconds) :param float duration: Duration of music after the song cue before the music starts to fade out (in seconds) :param float padding_before: Duration of music playing softly before the music cue/composition cue (in seconds) :param float padding_after: Duration of music playing softly after the music cue/composition cue (in seconds)
[ "Add", "a", "music", "cue", "to", "the", "composition", ".", "This", "doesn", "t", "do", "any", "audio", "analysis", "it", "just", "aligns", "a", "specified", "point", "in", "the", "track", "(", "presumably", "music", ")", "with", "a", "location", "in", ...
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/composition.py#L419-L475
ucbvislab/radiotool
radiotool/composer/composition.py
Composition.build
def build(self, track_list=None, adjust_dynamics=False, min_length=None, channels=None): """ Create a numpy array from the composition. :param track_list: List of tracks to include in composition generation (``None`` means all tracks will be used) :type track_list: list of :py:class:`radiotool.composer.Track` :param int min_length: Minimum length of output array (in frames). Will zero pad extra length. :param bool. adjust_dynamics: Automatically adjust dynamics. Will document later. """ if track_list is None: track_list = self.tracks if channels is None: channels = self.channels parts = {} starts = {} # for universal volume adjustment all_frames = np.array([]) song_frames = np.array([]) speech_frames = np.array([]) longest_part = max([x.comp_location + x.duration for x in self.segments]) if len(self.dynamics) > 0: longest_part = max((longest_part, max([x.comp_location + x.duration for x in self.dynamics]))) for track_idx, track in enumerate(track_list): segments = sorted([v for v in self.segments if v.track == track], key=lambda k: k.comp_location + k.duration) dyns = sorted([d for d in self.dynamics if d.track == track], key=lambda k: k.comp_location) if len(segments) > 0: start_loc = min([x.comp_location for x in segments]) end_loc = max([x.comp_location + x.duration for x in segments]) if len(dyns) > 0: start_loc = min((start_loc, min([d.comp_location for d in dyns]))) end_loc = max((end_loc, max([d.comp_location + d.duration for d in dyns]))) starts[track] = start_loc parts[track] = np.zeros((end_loc - start_loc, channels)) for s in segments: frames = s.get_frames(channels=channels).\ reshape(-1, channels) # for universal volume adjustment if adjust_dynamics: all_frames = np.append(all_frames, self._remove_end_silence(frames.flatten())) if isinstance(track, Song): song_frames = np.append(song_frames, self._remove_end_silence(frames.flatten())) elif isinstance(track, Speech): speech_frames = np.append(speech_frames, self._remove_end_silence(frames.flatten())) parts[track][s.comp_location - start_loc: s.comp_location - start_loc + s.duration, :] = frames for d in dyns: vol_frames = d.to_array(channels) parts[track][d.comp_location - start_loc : d.comp_location - start_loc + d.duration, :] *= vol_frames if adjust_dynamics: total_energy = RMS_energy(all_frames) song_energy = RMS_energy(song_frames) speech_energy = RMS_energy(speech_frames) # dyn_adj = 0.10 / total_energy # dyn_adj = speech_energy / sqrt(song_energy) * 5 if adjust_dynamics: if not np.isnan(speech_energy) and not np.isnan(song_energy): dyn_adj = sqrt(speech_energy / song_energy) * 1.15 else: dyn_adj = 1 else: dyn_adj = 1 if longest_part < min_length: longest_part = min_length out = np.zeros((longest_part, channels)) for track, part in parts.iteritems(): out[starts[track]:starts[track] + len(part)] += part return out
python
def build(self, track_list=None, adjust_dynamics=False, min_length=None, channels=None): """ Create a numpy array from the composition. :param track_list: List of tracks to include in composition generation (``None`` means all tracks will be used) :type track_list: list of :py:class:`radiotool.composer.Track` :param int min_length: Minimum length of output array (in frames). Will zero pad extra length. :param bool. adjust_dynamics: Automatically adjust dynamics. Will document later. """ if track_list is None: track_list = self.tracks if channels is None: channels = self.channels parts = {} starts = {} # for universal volume adjustment all_frames = np.array([]) song_frames = np.array([]) speech_frames = np.array([]) longest_part = max([x.comp_location + x.duration for x in self.segments]) if len(self.dynamics) > 0: longest_part = max((longest_part, max([x.comp_location + x.duration for x in self.dynamics]))) for track_idx, track in enumerate(track_list): segments = sorted([v for v in self.segments if v.track == track], key=lambda k: k.comp_location + k.duration) dyns = sorted([d for d in self.dynamics if d.track == track], key=lambda k: k.comp_location) if len(segments) > 0: start_loc = min([x.comp_location for x in segments]) end_loc = max([x.comp_location + x.duration for x in segments]) if len(dyns) > 0: start_loc = min((start_loc, min([d.comp_location for d in dyns]))) end_loc = max((end_loc, max([d.comp_location + d.duration for d in dyns]))) starts[track] = start_loc parts[track] = np.zeros((end_loc - start_loc, channels)) for s in segments: frames = s.get_frames(channels=channels).\ reshape(-1, channels) # for universal volume adjustment if adjust_dynamics: all_frames = np.append(all_frames, self._remove_end_silence(frames.flatten())) if isinstance(track, Song): song_frames = np.append(song_frames, self._remove_end_silence(frames.flatten())) elif isinstance(track, Speech): speech_frames = np.append(speech_frames, self._remove_end_silence(frames.flatten())) parts[track][s.comp_location - start_loc: s.comp_location - start_loc + s.duration, :] = frames for d in dyns: vol_frames = d.to_array(channels) parts[track][d.comp_location - start_loc : d.comp_location - start_loc + d.duration, :] *= vol_frames if adjust_dynamics: total_energy = RMS_energy(all_frames) song_energy = RMS_energy(song_frames) speech_energy = RMS_energy(speech_frames) # dyn_adj = 0.10 / total_energy # dyn_adj = speech_energy / sqrt(song_energy) * 5 if adjust_dynamics: if not np.isnan(speech_energy) and not np.isnan(song_energy): dyn_adj = sqrt(speech_energy / song_energy) * 1.15 else: dyn_adj = 1 else: dyn_adj = 1 if longest_part < min_length: longest_part = min_length out = np.zeros((longest_part, channels)) for track, part in parts.iteritems(): out[starts[track]:starts[track] + len(part)] += part return out
[ "def", "build", "(", "self", ",", "track_list", "=", "None", ",", "adjust_dynamics", "=", "False", ",", "min_length", "=", "None", ",", "channels", "=", "None", ")", ":", "if", "track_list", "is", "None", ":", "track_list", "=", "self", ".", "tracks", ...
Create a numpy array from the composition. :param track_list: List of tracks to include in composition generation (``None`` means all tracks will be used) :type track_list: list of :py:class:`radiotool.composer.Track` :param int min_length: Minimum length of output array (in frames). Will zero pad extra length. :param bool. adjust_dynamics: Automatically adjust dynamics. Will document later.
[ "Create", "a", "numpy", "array", "from", "the", "composition", "." ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/composition.py#L523-L622
ucbvislab/radiotool
radiotool/composer/composition.py
Composition.export
def export(self, **kwargs): """ Generate audio file from composition. :param str. filename: Output filename (no extension) :param str. filetype: Output file type (only .wav supported for now) :param integer samplerate: Sample rate of output audio :param integer channels: Channels in output audio, if different than originally specified :param bool. separate_tracks: Also generate audio file for each track in composition :param int min_length: Minimum length of output array (in frames). Will zero pad extra length. :param bool. adjust_dynamics: Automatically adjust dynamics (will document later) """ # get optional args filename = kwargs.pop('filename', 'out') filetype = kwargs.pop('filetype', 'wav') adjust_dynamics = kwargs.pop('adjust_dynamics', False) samplerate = kwargs.pop('samplerate', None) channels = kwargs.pop('channels', self.channels) separate_tracks = kwargs.pop('separate_tracks', False) min_length = kwargs.pop('min_length', None) if samplerate is None: samplerate = np.min([track.samplerate for track in self.tracks]) encoding = 'pcm16' to_mp3 = False if filetype == 'ogg': encoding = 'vorbis' elif filetype == 'mp3': filetype = 'wav' to_mp3 = True if separate_tracks: # build the separate parts of the composition if desired for track in self.tracks: out = self.build(track_list=[track], adjust_dynamics=adjust_dynamics, min_length=min_length, channels=channels) out_file = Sndfile("%s-%s.%s" % (filename, track.name, filetype), 'w', Format(filetype, encoding=encoding), channels, samplerate) out_file.write_frames(out) out_file.close() # always build the complete composition out = self.build(adjust_dynamics=adjust_dynamics, min_length=min_length, channels=channels) out_filename = "%s.%s" % (filename, filetype) out_file = Sndfile(out_filename, 'w', Format(filetype, encoding=encoding), channels, samplerate) out_file.write_frames(out) out_file.close() if LIBXMP and filetype == "wav": xmp = libxmp.XMPMeta() ns = libxmp.consts.XMP_NS_DM p = xmp.get_prefix_for_namespace(ns) xpath = p + 'Tracks' xmp.append_array_item(ns, xpath, None, array_options={"prop_value_is_array": True}, prop_value_is_struct=True) xpath += '[1]/' + p xmp.set_property(ns, xpath + "trackName", "CuePoint Markers") xmp.set_property(ns, xpath + "trackType", "Cue") xmp.set_property(ns, xpath + "frameRate", "f%d" % samplerate) for i, lab in enumerate(self.labels): xmp.append_array_item(ns, xpath + "markers", None, array_options={"prop_value_is_array": True}, prop_value_is_struct=True) xmp.set_property(ns, xpath + "markers[%d]/%sname" % (i + 1, p), lab.name) xmp.set_property(ns, xpath + "markers[%d]/%sstartTime" % (i + 1, p), str(lab.sample(samplerate))) xmpfile = libxmp.XMPFiles(file_path=out_filename, open_forupdate=True) if xmpfile.can_put_xmp(xmp): xmpfile.put_xmp(xmp) xmpfile.close_file() if to_mp3: wav_to_mp3(out_filename, delete_wav=True) return out
python
def export(self, **kwargs): """ Generate audio file from composition. :param str. filename: Output filename (no extension) :param str. filetype: Output file type (only .wav supported for now) :param integer samplerate: Sample rate of output audio :param integer channels: Channels in output audio, if different than originally specified :param bool. separate_tracks: Also generate audio file for each track in composition :param int min_length: Minimum length of output array (in frames). Will zero pad extra length. :param bool. adjust_dynamics: Automatically adjust dynamics (will document later) """ # get optional args filename = kwargs.pop('filename', 'out') filetype = kwargs.pop('filetype', 'wav') adjust_dynamics = kwargs.pop('adjust_dynamics', False) samplerate = kwargs.pop('samplerate', None) channels = kwargs.pop('channels', self.channels) separate_tracks = kwargs.pop('separate_tracks', False) min_length = kwargs.pop('min_length', None) if samplerate is None: samplerate = np.min([track.samplerate for track in self.tracks]) encoding = 'pcm16' to_mp3 = False if filetype == 'ogg': encoding = 'vorbis' elif filetype == 'mp3': filetype = 'wav' to_mp3 = True if separate_tracks: # build the separate parts of the composition if desired for track in self.tracks: out = self.build(track_list=[track], adjust_dynamics=adjust_dynamics, min_length=min_length, channels=channels) out_file = Sndfile("%s-%s.%s" % (filename, track.name, filetype), 'w', Format(filetype, encoding=encoding), channels, samplerate) out_file.write_frames(out) out_file.close() # always build the complete composition out = self.build(adjust_dynamics=adjust_dynamics, min_length=min_length, channels=channels) out_filename = "%s.%s" % (filename, filetype) out_file = Sndfile(out_filename, 'w', Format(filetype, encoding=encoding), channels, samplerate) out_file.write_frames(out) out_file.close() if LIBXMP and filetype == "wav": xmp = libxmp.XMPMeta() ns = libxmp.consts.XMP_NS_DM p = xmp.get_prefix_for_namespace(ns) xpath = p + 'Tracks' xmp.append_array_item(ns, xpath, None, array_options={"prop_value_is_array": True}, prop_value_is_struct=True) xpath += '[1]/' + p xmp.set_property(ns, xpath + "trackName", "CuePoint Markers") xmp.set_property(ns, xpath + "trackType", "Cue") xmp.set_property(ns, xpath + "frameRate", "f%d" % samplerate) for i, lab in enumerate(self.labels): xmp.append_array_item(ns, xpath + "markers", None, array_options={"prop_value_is_array": True}, prop_value_is_struct=True) xmp.set_property(ns, xpath + "markers[%d]/%sname" % (i + 1, p), lab.name) xmp.set_property(ns, xpath + "markers[%d]/%sstartTime" % (i + 1, p), str(lab.sample(samplerate))) xmpfile = libxmp.XMPFiles(file_path=out_filename, open_forupdate=True) if xmpfile.can_put_xmp(xmp): xmpfile.put_xmp(xmp) xmpfile.close_file() if to_mp3: wav_to_mp3(out_filename, delete_wav=True) return out
[ "def", "export", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# get optional args", "filename", "=", "kwargs", ".", "pop", "(", "'filename'", ",", "'out'", ")", "filetype", "=", "kwargs", ".", "pop", "(", "'filetype'", ",", "'wav'", ")", "adjust_dyna...
Generate audio file from composition. :param str. filename: Output filename (no extension) :param str. filetype: Output file type (only .wav supported for now) :param integer samplerate: Sample rate of output audio :param integer channels: Channels in output audio, if different than originally specified :param bool. separate_tracks: Also generate audio file for each track in composition :param int min_length: Minimum length of output array (in frames). Will zero pad extra length. :param bool. adjust_dynamics: Automatically adjust dynamics (will document later)
[ "Generate", "audio", "file", "from", "composition", "." ]
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/composition.py#L624-L716
darkfeline/animanager
animanager/db/query/eptype.py
get_episode_types
def get_episode_types(db) -> Iterator[EpisodeType]: """Get all episode types.""" cur = db.cursor() cur.execute('SELECT id, name, prefix FROM episode_type') for type_id, name, prefix in cur: yield EpisodeType(type_id, name, prefix)
python
def get_episode_types(db) -> Iterator[EpisodeType]: """Get all episode types.""" cur = db.cursor() cur.execute('SELECT id, name, prefix FROM episode_type') for type_id, name, prefix in cur: yield EpisodeType(type_id, name, prefix)
[ "def", "get_episode_types", "(", "db", ")", "->", "Iterator", "[", "EpisodeType", "]", ":", "cur", "=", "db", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "'SELECT id, name, prefix FROM episode_type'", ")", "for", "type_id", ",", "name", ",", "prefix...
Get all episode types.
[ "Get", "all", "episode", "types", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/db/query/eptype.py#L32-L37
darkfeline/animanager
animanager/db/query/eptype.py
EpisodeTypes.get_epno
def get_epno(self, episode: Episode): """Return epno for an Episode instance. epno is a string formatted with the episode number and type, e.g., S1, T2. >>> x = EpisodeTypes([EpisodeType(1, 'foo', 'F')]) >>> ep = Episode(type=1, number=2) >>> x.get_epno(ep) 'F2' """ return '{}{}'.format(self[episode.type].prefix, episode.number)
python
def get_epno(self, episode: Episode): """Return epno for an Episode instance. epno is a string formatted with the episode number and type, e.g., S1, T2. >>> x = EpisodeTypes([EpisodeType(1, 'foo', 'F')]) >>> ep = Episode(type=1, number=2) >>> x.get_epno(ep) 'F2' """ return '{}{}'.format(self[episode.type].prefix, episode.number)
[ "def", "get_epno", "(", "self", ",", "episode", ":", "Episode", ")", ":", "return", "'{}{}'", ".", "format", "(", "self", "[", "episode", ".", "type", "]", ".", "prefix", ",", "episode", ".", "number", ")" ]
Return epno for an Episode instance. epno is a string formatted with the episode number and type, e.g., S1, T2. >>> x = EpisodeTypes([EpisodeType(1, 'foo', 'F')]) >>> ep = Episode(type=1, number=2) >>> x.get_epno(ep) 'F2'
[ "Return", "epno", "for", "an", "Episode", "instance", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/db/query/eptype.py#L83-L95
wglass/lighthouse
lighthouse/reporter.py
Reporter.on_discovery_update
def on_discovery_update(self, name, new_config): """ Once a Discovery is updated we update each associated Service to reset its up/down status so that the next iteration of the `check_loop` loop does the proper reporting again. """ for service in self.configurables[Service].values(): if service.discovery == name: service.reset_status()
python
def on_discovery_update(self, name, new_config): """ Once a Discovery is updated we update each associated Service to reset its up/down status so that the next iteration of the `check_loop` loop does the proper reporting again. """ for service in self.configurables[Service].values(): if service.discovery == name: service.reset_status()
[ "def", "on_discovery_update", "(", "self", ",", "name", ",", "new_config", ")", ":", "for", "service", "in", "self", ".", "configurables", "[", "Service", "]", ".", "values", "(", ")", ":", "if", "service", ".", "discovery", "==", "name", ":", "service",...
Once a Discovery is updated we update each associated Service to reset its up/down status so that the next iteration of the `check_loop` loop does the proper reporting again.
[ "Once", "a", "Discovery", "is", "updated", "we", "update", "each", "associated", "Service", "to", "reset", "its", "up", "/", "down", "status", "so", "that", "the", "next", "iteration", "of", "the", "check_loop", "loop", "does", "the", "proper", "reporting", ...
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/reporter.py#L32-L40
wglass/lighthouse
lighthouse/reporter.py
Reporter.on_service_add
def on_service_add(self, service): """ When a new service is added, a worker thread is launched to periodically run the checks for that service. """ self.launch_thread(service.name, self.check_loop, service)
python
def on_service_add(self, service): """ When a new service is added, a worker thread is launched to periodically run the checks for that service. """ self.launch_thread(service.name, self.check_loop, service)
[ "def", "on_service_add", "(", "self", ",", "service", ")", ":", "self", ".", "launch_thread", "(", "service", ".", "name", ",", "self", ".", "check_loop", ",", "service", ")" ]
When a new service is added, a worker thread is launched to periodically run the checks for that service.
[ "When", "a", "new", "service", "is", "added", "a", "worker", "thread", "is", "launched", "to", "periodically", "run", "the", "checks", "for", "that", "service", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/reporter.py#L49-L54
wglass/lighthouse
lighthouse/reporter.py
Reporter.check_loop
def check_loop(self, service): """ While the reporter is not shutting down and the service being checked is present in the reporter's configuration, this method will launch a job to run all of the service's checks and then pause for the configured interval. """ logger.info("Starting check loop for service '%s'", service.name) def handle_checks_result(f): try: came_up, went_down = f.result() except Exception: logger.exception("Error checking service '%s'", service.name) return if not came_up and not went_down: return discovery = self.configurables[Discovery][service.discovery] for port in came_up: logger.debug("Reporting %s, port %d up", service.name, port) discovery.report_up(service, port) for port in went_down: logger.debug("Reporting %s, port %d down", service.name, port) discovery.report_down(service, port) while ( service in self.configurables[Service].values() and not self.shutdown.is_set() ): self.work_pool.submit( self.run_checks, service ).add_done_callback( handle_checks_result ) logger.debug("sleeping for %s seconds", service.check_interval) wait_on_event(self.shutdown, timeout=service.check_interval)
python
def check_loop(self, service): """ While the reporter is not shutting down and the service being checked is present in the reporter's configuration, this method will launch a job to run all of the service's checks and then pause for the configured interval. """ logger.info("Starting check loop for service '%s'", service.name) def handle_checks_result(f): try: came_up, went_down = f.result() except Exception: logger.exception("Error checking service '%s'", service.name) return if not came_up and not went_down: return discovery = self.configurables[Discovery][service.discovery] for port in came_up: logger.debug("Reporting %s, port %d up", service.name, port) discovery.report_up(service, port) for port in went_down: logger.debug("Reporting %s, port %d down", service.name, port) discovery.report_down(service, port) while ( service in self.configurables[Service].values() and not self.shutdown.is_set() ): self.work_pool.submit( self.run_checks, service ).add_done_callback( handle_checks_result ) logger.debug("sleeping for %s seconds", service.check_interval) wait_on_event(self.shutdown, timeout=service.check_interval)
[ "def", "check_loop", "(", "self", ",", "service", ")", ":", "logger", ".", "info", "(", "\"Starting check loop for service '%s'\"", ",", "service", ".", "name", ")", "def", "handle_checks_result", "(", "f", ")", ":", "try", ":", "came_up", ",", "went_down", ...
While the reporter is not shutting down and the service being checked is present in the reporter's configuration, this method will launch a job to run all of the service's checks and then pause for the configured interval.
[ "While", "the", "reporter", "is", "not", "shutting", "down", "and", "the", "service", "being", "checked", "is", "present", "in", "the", "reporter", "s", "configuration", "this", "method", "will", "launch", "a", "job", "to", "run", "all", "of", "the", "serv...
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/reporter.py#L62-L101
wglass/lighthouse
lighthouse/reporter.py
Reporter.run_checks
def run_checks(self, service): """ Runs each check for the service and reports to the service's discovery method based on the results. If all checks pass and the service's present node was previously reported as down, the present node is reported as up. Conversely, if any of the checks fail and the service's present node was previously reported as up, the present node will be reported as down. """ logger.debug("Running checks. (%s)", service.name) if service.discovery not in self.configurables[Discovery]: logger.warn( "Service %s is using Unknown/unavailable discovery '%s'.", service.name, service.discovery ) return set(), set() service.update_ports() came_up, went_down = service.run_checks() return came_up, went_down
python
def run_checks(self, service): """ Runs each check for the service and reports to the service's discovery method based on the results. If all checks pass and the service's present node was previously reported as down, the present node is reported as up. Conversely, if any of the checks fail and the service's present node was previously reported as up, the present node will be reported as down. """ logger.debug("Running checks. (%s)", service.name) if service.discovery not in self.configurables[Discovery]: logger.warn( "Service %s is using Unknown/unavailable discovery '%s'.", service.name, service.discovery ) return set(), set() service.update_ports() came_up, went_down = service.run_checks() return came_up, went_down
[ "def", "run_checks", "(", "self", ",", "service", ")", ":", "logger", ".", "debug", "(", "\"Running checks. (%s)\"", ",", "service", ".", "name", ")", "if", "service", ".", "discovery", "not", "in", "self", ".", "configurables", "[", "Discovery", "]", ":",...
Runs each check for the service and reports to the service's discovery method based on the results. If all checks pass and the service's present node was previously reported as down, the present node is reported as up. Conversely, if any of the checks fail and the service's present node was previously reported as up, the present node will be reported as down.
[ "Runs", "each", "check", "for", "the", "service", "and", "reports", "to", "the", "service", "s", "discovery", "method", "based", "on", "the", "results", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/reporter.py#L103-L126
gtaylor/petfinder-api
petfinder/client.py
PetFinderClient._do_api_call
def _do_api_call(self, method, data): """ Convenience method to carry out a standard API call against the Petfinder API. :param basestring method: The API method name to call. :param dict data: Key/value parameters to send to the API method. This varies based on the method. :raises: A number of :py:exc:`petfinder.exceptions.PetfinderAPIError`` sub-classes, depending on what went wrong. :rtype: lxml.etree._Element :returns: The parsed document. """ # Developer API keys, auth tokens, and other standard, required args. data.update({ "key": self.api_key, # No API methods currently use this, but we're ready for it, # should that change. "token": self.api_auth_token, }) # Ends up being a full URL+path. url = "%s%s" % (self.endpoint, method) # Bombs away! response = requests.get(url, params=data) # Parse and return an ElementTree instance containing the document. root = etree.fromstring(response.content) # If this is anything but '100', it's an error. status_code = root.find("header/status/code").text # If this comes back as non-None, we know we've got problems. exc_class = _get_exception_class_from_status_code(status_code) if exc_class: # Sheet, sheet, errar! Raise the appropriate error, and pass # the accompanying error message as the exception message. error_message = root.find("header/status/message").text #noinspection PyCallingNonCallable raise exc_class(error_message) return root
python
def _do_api_call(self, method, data): """ Convenience method to carry out a standard API call against the Petfinder API. :param basestring method: The API method name to call. :param dict data: Key/value parameters to send to the API method. This varies based on the method. :raises: A number of :py:exc:`petfinder.exceptions.PetfinderAPIError`` sub-classes, depending on what went wrong. :rtype: lxml.etree._Element :returns: The parsed document. """ # Developer API keys, auth tokens, and other standard, required args. data.update({ "key": self.api_key, # No API methods currently use this, but we're ready for it, # should that change. "token": self.api_auth_token, }) # Ends up being a full URL+path. url = "%s%s" % (self.endpoint, method) # Bombs away! response = requests.get(url, params=data) # Parse and return an ElementTree instance containing the document. root = etree.fromstring(response.content) # If this is anything but '100', it's an error. status_code = root.find("header/status/code").text # If this comes back as non-None, we know we've got problems. exc_class = _get_exception_class_from_status_code(status_code) if exc_class: # Sheet, sheet, errar! Raise the appropriate error, and pass # the accompanying error message as the exception message. error_message = root.find("header/status/message").text #noinspection PyCallingNonCallable raise exc_class(error_message) return root
[ "def", "_do_api_call", "(", "self", ",", "method", ",", "data", ")", ":", "# Developer API keys, auth tokens, and other standard, required args.", "data", ".", "update", "(", "{", "\"key\"", ":", "self", ".", "api_key", ",", "# No API methods currently use this, but we're...
Convenience method to carry out a standard API call against the Petfinder API. :param basestring method: The API method name to call. :param dict data: Key/value parameters to send to the API method. This varies based on the method. :raises: A number of :py:exc:`petfinder.exceptions.PetfinderAPIError`` sub-classes, depending on what went wrong. :rtype: lxml.etree._Element :returns: The parsed document.
[ "Convenience", "method", "to", "carry", "out", "a", "standard", "API", "call", "against", "the", "Petfinder", "API", "." ]
train
https://github.com/gtaylor/petfinder-api/blob/4f1bc76c276d537208c9b11f7c87282f6d2bb50d/petfinder/client.py#L48-L89
gtaylor/petfinder-api
petfinder/client.py
PetFinderClient._do_autopaginating_api_call
def _do_autopaginating_api_call(self, method, kwargs, parser_func): """ Given an API method, the arguments passed to it, and a function to hand parsing off to, loop through the record sets in the API call until all records have been yielded. This is mostly done this way to reduce duplication through the various API methods. :param basestring method: The API method on the endpoint. :param dict kwargs: The kwargs from the top-level API method. :param callable parser_func: A callable that is used for parsing the output from the API call. :rtype: generator :returns: Returns a generator that may be returned by the top-level API method. """ # Used to determine whether to fail noisily if no results are returned. has_records = {"has_records": False} while True: try: root = self._do_api_call(method, kwargs) except RecordDoesNotExistError: if not has_records["has_records"]: # No records seen yet, this really is empty. raise # We've seen some records come through. We must have hit the # end of the result set. Finish up silently. return # This is used to track whether this go around the call->parse # loop yielded any records. records_returned_by_this_loop = False for record in parser_func(root, has_records): yield record # We saw a record, mark our tracker accordingly. records_returned_by_this_loop = True # There is a really fun bug in the Petfinder API with # shelter.getpets where an offset is returned with no pets, # causing an infinite loop. if not records_returned_by_this_loop: return # This will determine at what offset we start the next query. last_offset = root.find("lastOffset").text kwargs["offset"] = last_offset
python
def _do_autopaginating_api_call(self, method, kwargs, parser_func): """ Given an API method, the arguments passed to it, and a function to hand parsing off to, loop through the record sets in the API call until all records have been yielded. This is mostly done this way to reduce duplication through the various API methods. :param basestring method: The API method on the endpoint. :param dict kwargs: The kwargs from the top-level API method. :param callable parser_func: A callable that is used for parsing the output from the API call. :rtype: generator :returns: Returns a generator that may be returned by the top-level API method. """ # Used to determine whether to fail noisily if no results are returned. has_records = {"has_records": False} while True: try: root = self._do_api_call(method, kwargs) except RecordDoesNotExistError: if not has_records["has_records"]: # No records seen yet, this really is empty. raise # We've seen some records come through. We must have hit the # end of the result set. Finish up silently. return # This is used to track whether this go around the call->parse # loop yielded any records. records_returned_by_this_loop = False for record in parser_func(root, has_records): yield record # We saw a record, mark our tracker accordingly. records_returned_by_this_loop = True # There is a really fun bug in the Petfinder API with # shelter.getpets where an offset is returned with no pets, # causing an infinite loop. if not records_returned_by_this_loop: return # This will determine at what offset we start the next query. last_offset = root.find("lastOffset").text kwargs["offset"] = last_offset
[ "def", "_do_autopaginating_api_call", "(", "self", ",", "method", ",", "kwargs", ",", "parser_func", ")", ":", "# Used to determine whether to fail noisily if no results are returned.", "has_records", "=", "{", "\"has_records\"", ":", "False", "}", "while", "True", ":", ...
Given an API method, the arguments passed to it, and a function to hand parsing off to, loop through the record sets in the API call until all records have been yielded. This is mostly done this way to reduce duplication through the various API methods. :param basestring method: The API method on the endpoint. :param dict kwargs: The kwargs from the top-level API method. :param callable parser_func: A callable that is used for parsing the output from the API call. :rtype: generator :returns: Returns a generator that may be returned by the top-level API method.
[ "Given", "an", "API", "method", "the", "arguments", "passed", "to", "it", "and", "a", "function", "to", "hand", "parsing", "off", "to", "loop", "through", "the", "record", "sets", "in", "the", "API", "call", "until", "all", "records", "have", "been", "yi...
train
https://github.com/gtaylor/petfinder-api/blob/4f1bc76c276d537208c9b11f7c87282f6d2bb50d/petfinder/client.py#L91-L137
gtaylor/petfinder-api
petfinder/client.py
PetFinderClient._parse_datetime_str
def _parse_datetime_str(self, dtime_str): """ Given a standard datetime string (as seen throughout the Petfinder API), spit out the corresponding UTC datetime instance. :param str dtime_str: The datetime string to parse. :rtype: datetime.datetime :returns: The parsed datetime. """ return datetime.datetime.strptime( dtime_str, "%Y-%m-%dT%H:%M:%SZ" ).replace(tzinfo=pytz.utc)
python
def _parse_datetime_str(self, dtime_str): """ Given a standard datetime string (as seen throughout the Petfinder API), spit out the corresponding UTC datetime instance. :param str dtime_str: The datetime string to parse. :rtype: datetime.datetime :returns: The parsed datetime. """ return datetime.datetime.strptime( dtime_str, "%Y-%m-%dT%H:%M:%SZ" ).replace(tzinfo=pytz.utc)
[ "def", "_parse_datetime_str", "(", "self", ",", "dtime_str", ")", ":", "return", "datetime", ".", "datetime", ".", "strptime", "(", "dtime_str", ",", "\"%Y-%m-%dT%H:%M:%SZ\"", ")", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "utc", ")" ]
Given a standard datetime string (as seen throughout the Petfinder API), spit out the corresponding UTC datetime instance. :param str dtime_str: The datetime string to parse. :rtype: datetime.datetime :returns: The parsed datetime.
[ "Given", "a", "standard", "datetime", "string", "(", "as", "seen", "throughout", "the", "Petfinder", "API", ")", "spit", "out", "the", "corresponding", "UTC", "datetime", "instance", "." ]
train
https://github.com/gtaylor/petfinder-api/blob/4f1bc76c276d537208c9b11f7c87282f6d2bb50d/petfinder/client.py#L139-L152
gtaylor/petfinder-api
petfinder/client.py
PetFinderClient._parse_pet_record
def _parse_pet_record(self, root): """ Given a <pet> Element from a pet.get or pet.getRandom response, pluck out the pet record. :param lxml.etree._Element root: A <pet> tag Element. :rtype: dict :returns: An assembled pet record. """ record = { "breeds": [], "photos": [], "options": [], "contact": {}, } # These fields can just have their keys and text values copied # straight over to the dict record. straight_copy_fields = [ "id", "shelterId", "shelterPetId", "name", "animal", "mix", "age", "sex", "size", "description", "status", "lastUpdate", ] for field in straight_copy_fields: # For each field, just take the tag name and the text value to # copy to the record as key/val. node = root.find(field) if node is None: print("SKIPPING %s" % field) continue record[field] = node.text # Pets can be of multiple breeds. Find all of the <breed> tags and # stuff their text (breed names) into the record. for breed in root.findall("breeds/breed"): record["breeds"].append(breed.text) # We'll deviate slightly from the XML format here, and simply append # each photo entry to the record's "photo" key. for photo in root.findall("media/photos/photo"): photo = { "id": photo.get("id"), "size": photo.get("size"), "url": photo.text, } record["photos"].append(photo) # Has shots, no cats, altered, etc. for option in root.findall("options/option"): record["options"].append(option.text) # <contact> tag has some sub-tags that can be straight copied over. contact = root.find("contact") if contact is not None: for field in contact: record["contact"][field.tag] = field.text # Parse lastUpdate so we have a useable datetime.datime object. record["lastUpdate"] = self._parse_datetime_str(record["lastUpdate"]) return record
python
def _parse_pet_record(self, root): """ Given a <pet> Element from a pet.get or pet.getRandom response, pluck out the pet record. :param lxml.etree._Element root: A <pet> tag Element. :rtype: dict :returns: An assembled pet record. """ record = { "breeds": [], "photos": [], "options": [], "contact": {}, } # These fields can just have their keys and text values copied # straight over to the dict record. straight_copy_fields = [ "id", "shelterId", "shelterPetId", "name", "animal", "mix", "age", "sex", "size", "description", "status", "lastUpdate", ] for field in straight_copy_fields: # For each field, just take the tag name and the text value to # copy to the record as key/val. node = root.find(field) if node is None: print("SKIPPING %s" % field) continue record[field] = node.text # Pets can be of multiple breeds. Find all of the <breed> tags and # stuff their text (breed names) into the record. for breed in root.findall("breeds/breed"): record["breeds"].append(breed.text) # We'll deviate slightly from the XML format here, and simply append # each photo entry to the record's "photo" key. for photo in root.findall("media/photos/photo"): photo = { "id": photo.get("id"), "size": photo.get("size"), "url": photo.text, } record["photos"].append(photo) # Has shots, no cats, altered, etc. for option in root.findall("options/option"): record["options"].append(option.text) # <contact> tag has some sub-tags that can be straight copied over. contact = root.find("contact") if contact is not None: for field in contact: record["contact"][field.tag] = field.text # Parse lastUpdate so we have a useable datetime.datime object. record["lastUpdate"] = self._parse_datetime_str(record["lastUpdate"]) return record
[ "def", "_parse_pet_record", "(", "self", ",", "root", ")", ":", "record", "=", "{", "\"breeds\"", ":", "[", "]", ",", "\"photos\"", ":", "[", "]", ",", "\"options\"", ":", "[", "]", ",", "\"contact\"", ":", "{", "}", ",", "}", "# These fields can just ...
Given a <pet> Element from a pet.get or pet.getRandom response, pluck out the pet record. :param lxml.etree._Element root: A <pet> tag Element. :rtype: dict :returns: An assembled pet record.
[ "Given", "a", "<pet", ">", "Element", "from", "a", "pet", ".", "get", "or", "pet", ".", "getRandom", "response", "pluck", "out", "the", "pet", "record", "." ]
train
https://github.com/gtaylor/petfinder-api/blob/4f1bc76c276d537208c9b11f7c87282f6d2bb50d/petfinder/client.py#L154-L214
gtaylor/petfinder-api
petfinder/client.py
PetFinderClient.breed_list
def breed_list(self, **kwargs): """ breed.list wrapper. Returns a list of breed name strings. :rtype: list :returns: A list of breed names. """ root = self._do_api_call("breed.list", kwargs) breeds = [] for breed in root.find("breeds"): breeds.append(breed.text) return breeds
python
def breed_list(self, **kwargs): """ breed.list wrapper. Returns a list of breed name strings. :rtype: list :returns: A list of breed names. """ root = self._do_api_call("breed.list", kwargs) breeds = [] for breed in root.find("breeds"): breeds.append(breed.text) return breeds
[ "def", "breed_list", "(", "self", ",", "*", "*", "kwargs", ")", ":", "root", "=", "self", ".", "_do_api_call", "(", "\"breed.list\"", ",", "kwargs", ")", "breeds", "=", "[", "]", "for", "breed", "in", "root", ".", "find", "(", "\"breeds\"", ")", ":",...
breed.list wrapper. Returns a list of breed name strings. :rtype: list :returns: A list of breed names.
[ "breed", ".", "list", "wrapper", ".", "Returns", "a", "list", "of", "breed", "name", "strings", "." ]
train
https://github.com/gtaylor/petfinder-api/blob/4f1bc76c276d537208c9b11f7c87282f6d2bb50d/petfinder/client.py#L216-L229
gtaylor/petfinder-api
petfinder/client.py
PetFinderClient.pet_get
def pet_get(self, **kwargs): """ pet.get wrapper. Returns a record dict for the requested pet. :rtype: dict :returns: The pet's record dict. """ root = self._do_api_call("pet.get", kwargs) return self._parse_pet_record(root.find("pet"))
python
def pet_get(self, **kwargs): """ pet.get wrapper. Returns a record dict for the requested pet. :rtype: dict :returns: The pet's record dict. """ root = self._do_api_call("pet.get", kwargs) return self._parse_pet_record(root.find("pet"))
[ "def", "pet_get", "(", "self", ",", "*", "*", "kwargs", ")", ":", "root", "=", "self", ".", "_do_api_call", "(", "\"pet.get\"", ",", "kwargs", ")", "return", "self", ".", "_parse_pet_record", "(", "root", ".", "find", "(", "\"pet\"", ")", ")" ]
pet.get wrapper. Returns a record dict for the requested pet. :rtype: dict :returns: The pet's record dict.
[ "pet", ".", "get", "wrapper", ".", "Returns", "a", "record", "dict", "for", "the", "requested", "pet", "." ]
train
https://github.com/gtaylor/petfinder-api/blob/4f1bc76c276d537208c9b11f7c87282f6d2bb50d/petfinder/client.py#L231-L240
gtaylor/petfinder-api
petfinder/client.py
PetFinderClient.pet_getrandom
def pet_getrandom(self, **kwargs): """ pet.getRandom wrapper. Returns a record dict or Petfinder ID for a random pet. :rtype: dict or str :returns: A dict of pet data if ``output`` is ``'basic'`` or ``'full'``, and a string if ``output`` is ``'id'``. """ root = self._do_api_call("pet.getRandom", kwargs) output_brevity = kwargs.get("output", "id") if output_brevity == "id": return root.find("petIds/id").text else: return self._parse_pet_record(root.find("pet"))
python
def pet_getrandom(self, **kwargs): """ pet.getRandom wrapper. Returns a record dict or Petfinder ID for a random pet. :rtype: dict or str :returns: A dict of pet data if ``output`` is ``'basic'`` or ``'full'``, and a string if ``output`` is ``'id'``. """ root = self._do_api_call("pet.getRandom", kwargs) output_brevity = kwargs.get("output", "id") if output_brevity == "id": return root.find("petIds/id").text else: return self._parse_pet_record(root.find("pet"))
[ "def", "pet_getrandom", "(", "self", ",", "*", "*", "kwargs", ")", ":", "root", "=", "self", ".", "_do_api_call", "(", "\"pet.getRandom\"", ",", "kwargs", ")", "output_brevity", "=", "kwargs", ".", "get", "(", "\"output\"", ",", "\"id\"", ")", "if", "out...
pet.getRandom wrapper. Returns a record dict or Petfinder ID for a random pet. :rtype: dict or str :returns: A dict of pet data if ``output`` is ``'basic'`` or ``'full'``, and a string if ``output`` is ``'id'``.
[ "pet", ".", "getRandom", "wrapper", ".", "Returns", "a", "record", "dict", "or", "Petfinder", "ID", "for", "a", "random", "pet", "." ]
train
https://github.com/gtaylor/petfinder-api/blob/4f1bc76c276d537208c9b11f7c87282f6d2bb50d/petfinder/client.py#L242-L258
gtaylor/petfinder-api
petfinder/client.py
PetFinderClient.pet_find
def pet_find(self, **kwargs): """ pet.find wrapper. Returns a generator of pet record dicts matching your search criteria. :rtype: generator :returns: A generator of pet record dicts. :raises: :py:exc:`petfinder.exceptions.LimitExceeded` once you have reached the maximum number of records your credentials allow you to receive. """ def pet_find_parser(root, has_records): """ The parser that is used with the ``_do_autopaginating_api_call`` method for auto-pagination. :param lxml.etree._Element root: The root Element in the response. :param dict has_records: A dict that we track the loop state in. dicts are passed by references, which is how this works. """ for pet in root.findall("pets/pet"): # This is changed in the original record, since it's passed # by reference. has_records["has_records"] = True yield self._parse_pet_record(pet) return self._do_autopaginating_api_call( "pet.find", kwargs, pet_find_parser )
python
def pet_find(self, **kwargs): """ pet.find wrapper. Returns a generator of pet record dicts matching your search criteria. :rtype: generator :returns: A generator of pet record dicts. :raises: :py:exc:`petfinder.exceptions.LimitExceeded` once you have reached the maximum number of records your credentials allow you to receive. """ def pet_find_parser(root, has_records): """ The parser that is used with the ``_do_autopaginating_api_call`` method for auto-pagination. :param lxml.etree._Element root: The root Element in the response. :param dict has_records: A dict that we track the loop state in. dicts are passed by references, which is how this works. """ for pet in root.findall("pets/pet"): # This is changed in the original record, since it's passed # by reference. has_records["has_records"] = True yield self._parse_pet_record(pet) return self._do_autopaginating_api_call( "pet.find", kwargs, pet_find_parser )
[ "def", "pet_find", "(", "self", ",", "*", "*", "kwargs", ")", ":", "def", "pet_find_parser", "(", "root", ",", "has_records", ")", ":", "\"\"\"\n The parser that is used with the ``_do_autopaginating_api_call``\n method for auto-pagination.\n\n :p...
pet.find wrapper. Returns a generator of pet record dicts matching your search criteria. :rtype: generator :returns: A generator of pet record dicts. :raises: :py:exc:`petfinder.exceptions.LimitExceeded` once you have reached the maximum number of records your credentials allow you to receive.
[ "pet", ".", "find", "wrapper", ".", "Returns", "a", "generator", "of", "pet", "record", "dicts", "matching", "your", "search", "criteria", "." ]
train
https://github.com/gtaylor/petfinder-api/blob/4f1bc76c276d537208c9b11f7c87282f6d2bb50d/petfinder/client.py#L260-L289
gtaylor/petfinder-api
petfinder/client.py
PetFinderClient.shelter_find
def shelter_find(self, **kwargs): """ shelter.find wrapper. Returns a generator of shelter record dicts matching your search criteria. :rtype: generator :returns: A generator of shelter record dicts. :raises: :py:exc:`petfinder.exceptions.LimitExceeded` once you have reached the maximum number of records your credentials allow you to receive. """ def shelter_find_parser(root, has_records): """ The parser that is used with the ``_do_autopaginating_api_call`` method for auto-pagination. :param lxml.etree._Element root: The root Element in the response. :param dict has_records: A dict that we track the loop state in. dicts are passed by references, which is how this works. """ for shelter in root.find("shelters"): has_records["has_records"] = True record = {} for field in shelter: record[field.tag] = field.text yield record return self._do_autopaginating_api_call( "shelter.find", kwargs, shelter_find_parser )
python
def shelter_find(self, **kwargs): """ shelter.find wrapper. Returns a generator of shelter record dicts matching your search criteria. :rtype: generator :returns: A generator of shelter record dicts. :raises: :py:exc:`petfinder.exceptions.LimitExceeded` once you have reached the maximum number of records your credentials allow you to receive. """ def shelter_find_parser(root, has_records): """ The parser that is used with the ``_do_autopaginating_api_call`` method for auto-pagination. :param lxml.etree._Element root: The root Element in the response. :param dict has_records: A dict that we track the loop state in. dicts are passed by references, which is how this works. """ for shelter in root.find("shelters"): has_records["has_records"] = True record = {} for field in shelter: record[field.tag] = field.text yield record return self._do_autopaginating_api_call( "shelter.find", kwargs, shelter_find_parser )
[ "def", "shelter_find", "(", "self", ",", "*", "*", "kwargs", ")", ":", "def", "shelter_find_parser", "(", "root", ",", "has_records", ")", ":", "\"\"\"\n The parser that is used with the ``_do_autopaginating_api_call``\n method for auto-pagination.\n\n ...
shelter.find wrapper. Returns a generator of shelter record dicts matching your search criteria. :rtype: generator :returns: A generator of shelter record dicts. :raises: :py:exc:`petfinder.exceptions.LimitExceeded` once you have reached the maximum number of records your credentials allow you to receive.
[ "shelter", ".", "find", "wrapper", ".", "Returns", "a", "generator", "of", "shelter", "record", "dicts", "matching", "your", "search", "criteria", "." ]
train
https://github.com/gtaylor/petfinder-api/blob/4f1bc76c276d537208c9b11f7c87282f6d2bb50d/petfinder/client.py#L291-L321
gtaylor/petfinder-api
petfinder/client.py
PetFinderClient.shelter_get
def shelter_get(self, **kwargs): """ shelter.get wrapper. Given a shelter ID, retrieve its details in dict form. :rtype: dict :returns: The shelter's details. """ root = self._do_api_call("shelter.get", kwargs) shelter = root.find("shelter") for field in shelter: record = {} for field in shelter: record[field.tag] = field.text return record
python
def shelter_get(self, **kwargs): """ shelter.get wrapper. Given a shelter ID, retrieve its details in dict form. :rtype: dict :returns: The shelter's details. """ root = self._do_api_call("shelter.get", kwargs) shelter = root.find("shelter") for field in shelter: record = {} for field in shelter: record[field.tag] = field.text return record
[ "def", "shelter_get", "(", "self", ",", "*", "*", "kwargs", ")", ":", "root", "=", "self", ".", "_do_api_call", "(", "\"shelter.get\"", ",", "kwargs", ")", "shelter", "=", "root", ".", "find", "(", "\"shelter\"", ")", "for", "field", "in", "shelter", "...
shelter.get wrapper. Given a shelter ID, retrieve its details in dict form. :rtype: dict :returns: The shelter's details.
[ "shelter", ".", "get", "wrapper", ".", "Given", "a", "shelter", "ID", "retrieve", "its", "details", "in", "dict", "form", "." ]
train
https://github.com/gtaylor/petfinder-api/blob/4f1bc76c276d537208c9b11f7c87282f6d2bb50d/petfinder/client.py#L323-L339
gtaylor/petfinder-api
petfinder/client.py
PetFinderClient.shelter_getpets
def shelter_getpets(self, **kwargs): """ shelter.getPets wrapper. Given a shelter ID, retrieve either a list of pet IDs (if ``output`` is ``'id'``), or a generator of pet record dicts (if ``output`` is ``'full'`` or ``'basic'``). :rtype: generator :returns: Either a generator of pet ID strings or pet record dicts, depending on the value of the ``output`` keyword. :raises: :py:exc:`petfinder.exceptions.LimitExceeded` once you have reached the maximum number of records your credentials allow you to receive. """ def shelter_getpets_parser_ids(root, has_records): """ Parser for output=id. """ pet_ids = root.findall("petIds/id") for pet_id in pet_ids: yield pet_id.text def shelter_getpets_parser_records(root, has_records): """ Parser for output=full or output=basic. """ for pet in root.findall("pets/pet"): yield self._parse_pet_record(pet) # Depending on the output value, select the correct parser. if kwargs.get("output", "id") == "id": shelter_getpets_parser = shelter_getpets_parser_ids else: shelter_getpets_parser = shelter_getpets_parser_records return self._do_autopaginating_api_call( "shelter.getPets", kwargs, shelter_getpets_parser )
python
def shelter_getpets(self, **kwargs): """ shelter.getPets wrapper. Given a shelter ID, retrieve either a list of pet IDs (if ``output`` is ``'id'``), or a generator of pet record dicts (if ``output`` is ``'full'`` or ``'basic'``). :rtype: generator :returns: Either a generator of pet ID strings or pet record dicts, depending on the value of the ``output`` keyword. :raises: :py:exc:`petfinder.exceptions.LimitExceeded` once you have reached the maximum number of records your credentials allow you to receive. """ def shelter_getpets_parser_ids(root, has_records): """ Parser for output=id. """ pet_ids = root.findall("petIds/id") for pet_id in pet_ids: yield pet_id.text def shelter_getpets_parser_records(root, has_records): """ Parser for output=full or output=basic. """ for pet in root.findall("pets/pet"): yield self._parse_pet_record(pet) # Depending on the output value, select the correct parser. if kwargs.get("output", "id") == "id": shelter_getpets_parser = shelter_getpets_parser_ids else: shelter_getpets_parser = shelter_getpets_parser_records return self._do_autopaginating_api_call( "shelter.getPets", kwargs, shelter_getpets_parser )
[ "def", "shelter_getpets", "(", "self", ",", "*", "*", "kwargs", ")", ":", "def", "shelter_getpets_parser_ids", "(", "root", ",", "has_records", ")", ":", "\"\"\"\n Parser for output=id.\n \"\"\"", "pet_ids", "=", "root", ".", "findall", "(", "\...
shelter.getPets wrapper. Given a shelter ID, retrieve either a list of pet IDs (if ``output`` is ``'id'``), or a generator of pet record dicts (if ``output`` is ``'full'`` or ``'basic'``). :rtype: generator :returns: Either a generator of pet ID strings or pet record dicts, depending on the value of the ``output`` keyword. :raises: :py:exc:`petfinder.exceptions.LimitExceeded` once you have reached the maximum number of records your credentials allow you to receive.
[ "shelter", ".", "getPets", "wrapper", ".", "Given", "a", "shelter", "ID", "retrieve", "either", "a", "list", "of", "pet", "IDs", "(", "if", "output", "is", "id", ")", "or", "a", "generator", "of", "pet", "record", "dicts", "(", "if", "output", "is", ...
train
https://github.com/gtaylor/petfinder-api/blob/4f1bc76c276d537208c9b11f7c87282f6d2bb50d/petfinder/client.py#L341-L379
gtaylor/petfinder-api
petfinder/client.py
PetFinderClient.shelter_listbybreed
def shelter_listbybreed(self, **kwargs): """ shelter.listByBreed wrapper. Given a breed and an animal type, list the shelter IDs with pets of said breed. :rtype: generator :returns: A generator of shelter IDs that have breed matches. """ root = self._do_api_call("shelter.listByBreed", kwargs) shelter_ids = root.findall("shelterIds/id") for shelter_id in shelter_ids: yield shelter_id.text
python
def shelter_listbybreed(self, **kwargs): """ shelter.listByBreed wrapper. Given a breed and an animal type, list the shelter IDs with pets of said breed. :rtype: generator :returns: A generator of shelter IDs that have breed matches. """ root = self._do_api_call("shelter.listByBreed", kwargs) shelter_ids = root.findall("shelterIds/id") for shelter_id in shelter_ids: yield shelter_id.text
[ "def", "shelter_listbybreed", "(", "self", ",", "*", "*", "kwargs", ")", ":", "root", "=", "self", ".", "_do_api_call", "(", "\"shelter.listByBreed\"", ",", "kwargs", ")", "shelter_ids", "=", "root", ".", "findall", "(", "\"shelterIds/id\"", ")", "for", "she...
shelter.listByBreed wrapper. Given a breed and an animal type, list the shelter IDs with pets of said breed. :rtype: generator :returns: A generator of shelter IDs that have breed matches.
[ "shelter", ".", "listByBreed", "wrapper", ".", "Given", "a", "breed", "and", "an", "animal", "type", "list", "the", "shelter", "IDs", "with", "pets", "of", "said", "breed", "." ]
train
https://github.com/gtaylor/petfinder-api/blob/4f1bc76c276d537208c9b11f7c87282f6d2bb50d/petfinder/client.py#L381-L394
gbiggs/rtctree
rtctree/node.py
TreeNode.add_callback
def add_callback(self, event, cb, args=None): '''Add a callback to this node. Callbacks are called when the specified event occurs. The available events depends on the specific node type. Args should be a value to pass to the callback when it is called. The callback should be of the format: def callback(node, value, cb_args): where node will be the node that called the function, value is the relevant information for the event, and cb_args are the arguments you registered with the callback. ''' if event not in self._cbs: raise exceptions.NoSuchEventError self._cbs[event] = [(cb, args)]
python
def add_callback(self, event, cb, args=None): '''Add a callback to this node. Callbacks are called when the specified event occurs. The available events depends on the specific node type. Args should be a value to pass to the callback when it is called. The callback should be of the format: def callback(node, value, cb_args): where node will be the node that called the function, value is the relevant information for the event, and cb_args are the arguments you registered with the callback. ''' if event not in self._cbs: raise exceptions.NoSuchEventError self._cbs[event] = [(cb, args)]
[ "def", "add_callback", "(", "self", ",", "event", ",", "cb", ",", "args", "=", "None", ")", ":", "if", "event", "not", "in", "self", ".", "_cbs", ":", "raise", "exceptions", ".", "NoSuchEventError", "self", ".", "_cbs", "[", "event", "]", "=", "[", ...
Add a callback to this node. Callbacks are called when the specified event occurs. The available events depends on the specific node type. Args should be a value to pass to the callback when it is called. The callback should be of the format: def callback(node, value, cb_args): where node will be the node that called the function, value is the relevant information for the event, and cb_args are the arguments you registered with the callback.
[ "Add", "a", "callback", "to", "this", "node", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/node.py#L78-L95
gbiggs/rtctree
rtctree/node.py
TreeNode.get_node
def get_node(self, path): '''Get a child node of this node, or this node, based on a path. @param path A list of path elements pointing to a node in the tree. For example, ['/', 'localhost', 'dir.host']. The first element in this path should be this node's name. @return The node pointed to by @ref path, or None if the path does not point to a node in the tree below this node. Example: >>> c1 = TreeNode(name='c1') >>> c2 = TreeNode(name='c2') >>> p = TreeNode(name='p', children={'c1':c1, 'c2':c2}) >>> c1._parent = p >>> c2._parent = p >>> p.get_node(['p', 'c1']) == c1 True >>> p.get_node(['p', 'c2']) == c2 True ''' with self._mutex: if path[0] == self._name: if len(path) == 1: return self elif path[1] in self._children: return self._children[path[1]].get_node(path[1:]) else: return None else: return None
python
def get_node(self, path): '''Get a child node of this node, or this node, based on a path. @param path A list of path elements pointing to a node in the tree. For example, ['/', 'localhost', 'dir.host']. The first element in this path should be this node's name. @return The node pointed to by @ref path, or None if the path does not point to a node in the tree below this node. Example: >>> c1 = TreeNode(name='c1') >>> c2 = TreeNode(name='c2') >>> p = TreeNode(name='p', children={'c1':c1, 'c2':c2}) >>> c1._parent = p >>> c2._parent = p >>> p.get_node(['p', 'c1']) == c1 True >>> p.get_node(['p', 'c2']) == c2 True ''' with self._mutex: if path[0] == self._name: if len(path) == 1: return self elif path[1] in self._children: return self._children[path[1]].get_node(path[1:]) else: return None else: return None
[ "def", "get_node", "(", "self", ",", "path", ")", ":", "with", "self", ".", "_mutex", ":", "if", "path", "[", "0", "]", "==", "self", ".", "_name", ":", "if", "len", "(", "path", ")", "==", "1", ":", "return", "self", "elif", "path", "[", "1", ...
Get a child node of this node, or this node, based on a path. @param path A list of path elements pointing to a node in the tree. For example, ['/', 'localhost', 'dir.host']. The first element in this path should be this node's name. @return The node pointed to by @ref path, or None if the path does not point to a node in the tree below this node. Example: >>> c1 = TreeNode(name='c1') >>> c2 = TreeNode(name='c2') >>> p = TreeNode(name='p', children={'c1':c1, 'c2':c2}) >>> c1._parent = p >>> c2._parent = p >>> p.get_node(['p', 'c1']) == c1 True >>> p.get_node(['p', 'c2']) == c2 True
[ "Get", "a", "child", "node", "of", "this", "node", "or", "this", "node", "based", "on", "a", "path", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/node.py#L97-L126
gbiggs/rtctree
rtctree/node.py
TreeNode.has_path
def has_path(self, path): '''Check if a path exists below this node. @param path A list of path elements pointing to a node in the tree. For example, ['/', 'localhost', 'dir.host']. The first element in this path should be this node's name. @return True if the path points to a node in the tree below this node, or this node itself (for paths one element long). False otherwise. Example: >>> c1 = TreeNode(name='c1') >>> c2 = TreeNode(name='c2') >>> p = TreeNode(name='p', children={'c1':c1, 'c2':c2}) >>> c1._parent = p >>> c2._parent = p >>> p.has_path(['p', 'c1']) True >>> p.has_path(['p', 'c3']) False ''' with self._mutex: if path[0] == self._name: if len(path) == 1: return True elif path[1] in self._children: return self._children[path[1]].has_path(path[1:]) else: return False else: return False
python
def has_path(self, path): '''Check if a path exists below this node. @param path A list of path elements pointing to a node in the tree. For example, ['/', 'localhost', 'dir.host']. The first element in this path should be this node's name. @return True if the path points to a node in the tree below this node, or this node itself (for paths one element long). False otherwise. Example: >>> c1 = TreeNode(name='c1') >>> c2 = TreeNode(name='c2') >>> p = TreeNode(name='p', children={'c1':c1, 'c2':c2}) >>> c1._parent = p >>> c2._parent = p >>> p.has_path(['p', 'c1']) True >>> p.has_path(['p', 'c3']) False ''' with self._mutex: if path[0] == self._name: if len(path) == 1: return True elif path[1] in self._children: return self._children[path[1]].has_path(path[1:]) else: return False else: return False
[ "def", "has_path", "(", "self", ",", "path", ")", ":", "with", "self", ".", "_mutex", ":", "if", "path", "[", "0", "]", "==", "self", ".", "_name", ":", "if", "len", "(", "path", ")", "==", "1", ":", "return", "True", "elif", "path", "[", "1", ...
Check if a path exists below this node. @param path A list of path elements pointing to a node in the tree. For example, ['/', 'localhost', 'dir.host']. The first element in this path should be this node's name. @return True if the path points to a node in the tree below this node, or this node itself (for paths one element long). False otherwise. Example: >>> c1 = TreeNode(name='c1') >>> c2 = TreeNode(name='c2') >>> p = TreeNode(name='p', children={'c1':c1, 'c2':c2}) >>> c1._parent = p >>> c2._parent = p >>> p.has_path(['p', 'c1']) True >>> p.has_path(['p', 'c3']) False
[ "Check", "if", "a", "path", "exists", "below", "this", "node", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/node.py#L128-L158
gbiggs/rtctree
rtctree/node.py
TreeNode.iterate
def iterate(self, func, args=None, filter=[]): '''Call a function on this node, and recursively all its children. This is a depth-first iteration. @param func The function to call. Its declaration must be 'def blag(node, args)', where 'node' is the current node in the iteration and args is the value of @ref args. @param args Extra arguments to pass to the function at each iteration. Pass multiple arguments in as a tuple. @param filter A list of filters to apply before calling func for each node in the iteration. If the filter is not True, @ref func will not be called for that node. Each filter entry should be a string, representing one of the is_* properties (is_component, etc), or a function object. @return The results of the calls to @ref func in a list. Example: >>> c1 = TreeNode(name='c1') >>> c2 = TreeNode(name='c2') >>> p = TreeNode(name='p', children={'c1':c1, 'c2':c2}) >>> c1._parent = p >>> c2._parent = p >>> def hello(n, args): ... return args[0] + ' ' + n._name >>> p.iterate(hello, args=['hello']) ['hello p', 'hello c2', 'hello c1'] >>> p.iterate(hello, args=['hello'], filter=['_name=="c1"']) ['hello c1'] ''' with self._mutex: result = [] if filter: filters_passed = True for f in filter: if type(f) == str: if not eval('self.' + f): filters_passed = False break else: if not f(self): filters_passed = False break if filters_passed: result = [func(self, args)] else: result = [func(self, args)] for child in self._children: result += self._children[child].iterate(func, args, filter) return result
python
def iterate(self, func, args=None, filter=[]): '''Call a function on this node, and recursively all its children. This is a depth-first iteration. @param func The function to call. Its declaration must be 'def blag(node, args)', where 'node' is the current node in the iteration and args is the value of @ref args. @param args Extra arguments to pass to the function at each iteration. Pass multiple arguments in as a tuple. @param filter A list of filters to apply before calling func for each node in the iteration. If the filter is not True, @ref func will not be called for that node. Each filter entry should be a string, representing one of the is_* properties (is_component, etc), or a function object. @return The results of the calls to @ref func in a list. Example: >>> c1 = TreeNode(name='c1') >>> c2 = TreeNode(name='c2') >>> p = TreeNode(name='p', children={'c1':c1, 'c2':c2}) >>> c1._parent = p >>> c2._parent = p >>> def hello(n, args): ... return args[0] + ' ' + n._name >>> p.iterate(hello, args=['hello']) ['hello p', 'hello c2', 'hello c1'] >>> p.iterate(hello, args=['hello'], filter=['_name=="c1"']) ['hello c1'] ''' with self._mutex: result = [] if filter: filters_passed = True for f in filter: if type(f) == str: if not eval('self.' + f): filters_passed = False break else: if not f(self): filters_passed = False break if filters_passed: result = [func(self, args)] else: result = [func(self, args)] for child in self._children: result += self._children[child].iterate(func, args, filter) return result
[ "def", "iterate", "(", "self", ",", "func", ",", "args", "=", "None", ",", "filter", "=", "[", "]", ")", ":", "with", "self", ".", "_mutex", ":", "result", "=", "[", "]", "if", "filter", ":", "filters_passed", "=", "True", "for", "f", "in", "filt...
Call a function on this node, and recursively all its children. This is a depth-first iteration. @param func The function to call. Its declaration must be 'def blag(node, args)', where 'node' is the current node in the iteration and args is the value of @ref args. @param args Extra arguments to pass to the function at each iteration. Pass multiple arguments in as a tuple. @param filter A list of filters to apply before calling func for each node in the iteration. If the filter is not True, @ref func will not be called for that node. Each filter entry should be a string, representing one of the is_* properties (is_component, etc), or a function object. @return The results of the calls to @ref func in a list. Example: >>> c1 = TreeNode(name='c1') >>> c2 = TreeNode(name='c2') >>> p = TreeNode(name='p', children={'c1':c1, 'c2':c2}) >>> c1._parent = p >>> c2._parent = p >>> def hello(n, args): ... return args[0] + ' ' + n._name >>> p.iterate(hello, args=['hello']) ['hello p', 'hello c2', 'hello c1'] >>> p.iterate(hello, args=['hello'], filter=['_name=="c1"']) ['hello c1']
[ "Call", "a", "function", "on", "this", "node", "and", "recursively", "all", "its", "children", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/node.py#L169-L218
gbiggs/rtctree
rtctree/node.py
TreeNode.rem_callback
def rem_callback(self, event, cb): '''Remove a callback from this node. The callback is removed from the specified event. @param cb The callback function to remove. ''' if event not in self._cbs: raise exceptions.NoSuchEventError(self.name, event) c = [(x[0], x[1]) for x in self._cbs[event]] if not c: raise exceptions.NoCBError(self.name, event, cb) self._cbs[event].remove(c[0])
python
def rem_callback(self, event, cb): '''Remove a callback from this node. The callback is removed from the specified event. @param cb The callback function to remove. ''' if event not in self._cbs: raise exceptions.NoSuchEventError(self.name, event) c = [(x[0], x[1]) for x in self._cbs[event]] if not c: raise exceptions.NoCBError(self.name, event, cb) self._cbs[event].remove(c[0])
[ "def", "rem_callback", "(", "self", ",", "event", ",", "cb", ")", ":", "if", "event", "not", "in", "self", ".", "_cbs", ":", "raise", "exceptions", ".", "NoSuchEventError", "(", "self", ".", "name", ",", "event", ")", "c", "=", "[", "(", "x", "[", ...
Remove a callback from this node. The callback is removed from the specified event. @param cb The callback function to remove.
[ "Remove", "a", "callback", "from", "this", "node", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/node.py#L220-L233
gbiggs/rtctree
rtctree/node.py
TreeNode.full_path
def full_path(self): '''The full path of this node.''' with self._mutex: if self._parent: return self._parent.full_path + [self._name] else: return [self._name]
python
def full_path(self): '''The full path of this node.''' with self._mutex: if self._parent: return self._parent.full_path + [self._name] else: return [self._name]
[ "def", "full_path", "(", "self", ")", ":", "with", "self", ".", "_mutex", ":", "if", "self", ".", "_parent", ":", "return", "self", ".", "_parent", ".", "full_path", "+", "[", "self", ".", "_name", "]", "else", ":", "return", "[", "self", ".", "_na...
The full path of this node.
[ "The", "full", "path", "of", "this", "node", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/node.py#L277-L283
gbiggs/rtctree
rtctree/node.py
TreeNode.full_path_str
def full_path_str(self): '''The full path of this node as a string.''' with self._mutex: if self._parent: if self._parent._name == '/': return self._parent.full_path_str + self._name else: return self._parent.full_path_str + '/' + self._name else: return self._name
python
def full_path_str(self): '''The full path of this node as a string.''' with self._mutex: if self._parent: if self._parent._name == '/': return self._parent.full_path_str + self._name else: return self._parent.full_path_str + '/' + self._name else: return self._name
[ "def", "full_path_str", "(", "self", ")", ":", "with", "self", ".", "_mutex", ":", "if", "self", ".", "_parent", ":", "if", "self", ".", "_parent", ".", "_name", "==", "'/'", ":", "return", "self", ".", "_parent", ".", "full_path_str", "+", "self", "...
The full path of this node as a string.
[ "The", "full", "path", "of", "this", "node", "as", "a", "string", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/node.py#L286-L295
gbiggs/rtctree
rtctree/node.py
TreeNode.nameserver
def nameserver(self): '''The name server of the node (i.e. its top-most parent below /).''' with self._mutex: if not self._parent: # The root node does not have a name server return None elif self._parent.name == '/': return self else: return self._parent.nameserver
python
def nameserver(self): '''The name server of the node (i.e. its top-most parent below /).''' with self._mutex: if not self._parent: # The root node does not have a name server return None elif self._parent.name == '/': return self else: return self._parent.nameserver
[ "def", "nameserver", "(", "self", ")", ":", "with", "self", ".", "_mutex", ":", "if", "not", "self", ".", "_parent", ":", "# The root node does not have a name server", "return", "None", "elif", "self", ".", "_parent", ".", "name", "==", "'/'", ":", "return"...
The name server of the node (i.e. its top-most parent below /).
[ "The", "name", "server", "of", "the", "node", "(", "i", ".", "e", ".", "its", "top", "-", "most", "parent", "below", "/", ")", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/node.py#L337-L346
gbiggs/rtctree
rtctree/node.py
TreeNode.orb
def orb(self): '''The ORB used to access this object. This property's value will be None if no object above this object is a name server. ''' with self._mutex: if self._parent.name == '/': return None return self._parent.orb
python
def orb(self): '''The ORB used to access this object. This property's value will be None if no object above this object is a name server. ''' with self._mutex: if self._parent.name == '/': return None return self._parent.orb
[ "def", "orb", "(", "self", ")", ":", "with", "self", ".", "_mutex", ":", "if", "self", ".", "_parent", ".", "name", "==", "'/'", ":", "return", "None", "return", "self", ".", "_parent", ".", "orb" ]
The ORB used to access this object. This property's value will be None if no object above this object is a name server.
[ "The", "ORB", "used", "to", "access", "this", "object", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/node.py#L349-L359
gbiggs/rtctree
rtctree/node.py
TreeNode.root
def root(self): '''The root node of the tree this node is in.''' with self._mutex: if self._parent: return self._parent.root else: return self
python
def root(self): '''The root node of the tree this node is in.''' with self._mutex: if self._parent: return self._parent.root else: return self
[ "def", "root", "(", "self", ")", ":", "with", "self", ".", "_mutex", ":", "if", "self", ".", "_parent", ":", "return", "self", ".", "_parent", ".", "root", "else", ":", "return", "self" ]
The root node of the tree this node is in.
[ "The", "root", "node", "of", "the", "tree", "this", "node", "is", "in", "." ]
train
https://github.com/gbiggs/rtctree/blob/bd725a47ac87c259c8bce06156ccc9ab71111c26/rtctree/node.py#L392-L398
darkfeline/animanager
animanager/commands/deleterule.py
command
def command(state, args): """Delete priority rule.""" args = parser.parse_args(args[1:]) query.files.delete_priority_rule(state.db, args.id) del state.file_picker
python
def command(state, args): """Delete priority rule.""" args = parser.parse_args(args[1:]) query.files.delete_priority_rule(state.db, args.id) del state.file_picker
[ "def", "command", "(", "state", ",", "args", ")", ":", "args", "=", "parser", ".", "parse_args", "(", "args", "[", "1", ":", "]", ")", "query", ".", "files", ".", "delete_priority_rule", "(", "state", ".", "db", ",", "args", ".", "id", ")", "del", ...
Delete priority rule.
[ "Delete", "priority", "rule", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/commands/deleterule.py#L22-L26
darkfeline/animanager
animanager/sqlite/utils.py
upsert
def upsert(db, table, key_cols, update_dict): """Fabled upsert for SQLiteDB. Perform an upsert based on primary key. :param SQLiteDB db: database :param str table: table to upsert into :param str key_cols: name of key columns :param dict update_dict: key-value pairs to upsert """ with db: cur = db.cursor() cur.execute( 'UPDATE {} SET {} WHERE {}'.format( table, ','.join(_sqlpformat(col) for col in update_dict.keys()), ' AND '.join(_sqlpformat(col) for col in key_cols), ), update_dict, ) if db.changes() == 0: keys, values = zip(*update_dict.items()) cur.execute( 'INSERT INTO {} ({}) VALUES ({})'.format( table, ','.join(keys), ','.join('?' for _ in values)), values)
python
def upsert(db, table, key_cols, update_dict): """Fabled upsert for SQLiteDB. Perform an upsert based on primary key. :param SQLiteDB db: database :param str table: table to upsert into :param str key_cols: name of key columns :param dict update_dict: key-value pairs to upsert """ with db: cur = db.cursor() cur.execute( 'UPDATE {} SET {} WHERE {}'.format( table, ','.join(_sqlpformat(col) for col in update_dict.keys()), ' AND '.join(_sqlpformat(col) for col in key_cols), ), update_dict, ) if db.changes() == 0: keys, values = zip(*update_dict.items()) cur.execute( 'INSERT INTO {} ({}) VALUES ({})'.format( table, ','.join(keys), ','.join('?' for _ in values)), values)
[ "def", "upsert", "(", "db", ",", "table", ",", "key_cols", ",", "update_dict", ")", ":", "with", "db", ":", "cur", "=", "db", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "'UPDATE {} SET {} WHERE {}'", ".", "format", "(", "table", ",", "','", ...
Fabled upsert for SQLiteDB. Perform an upsert based on primary key. :param SQLiteDB db: database :param str table: table to upsert into :param str key_cols: name of key columns :param dict update_dict: key-value pairs to upsert
[ "Fabled", "upsert", "for", "SQLiteDB", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/sqlite/utils.py#L25-L53
darkfeline/animanager
animanager/commands/add.py
command
def command(state, args): """Add an anime from an AniDB search.""" if len(args) < 2: print(f'Usage: {args[0]} {{ID|aid:AID}}') return aid = state.results.parse_aid(args[1], default_key='anidb') anime = request_anime(aid) query.update.add(state.db, anime)
python
def command(state, args): """Add an anime from an AniDB search.""" if len(args) < 2: print(f'Usage: {args[0]} {{ID|aid:AID}}') return aid = state.results.parse_aid(args[1], default_key='anidb') anime = request_anime(aid) query.update.add(state.db, anime)
[ "def", "command", "(", "state", ",", "args", ")", ":", "if", "len", "(", "args", ")", "<", "2", ":", "print", "(", "f'Usage: {args[0]} {{ID|aid:AID}}'", ")", "return", "aid", "=", "state", ".", "results", ".", "parse_aid", "(", "args", "[", "1", "]", ...
Add an anime from an AniDB search.
[ "Add", "an", "anime", "from", "an", "AniDB", "search", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/commands/add.py#L22-L29
nickhand/classylss
classylss/astropy_compat.py
AstropyCompat.Ode
def Ode(self, z): """ Returns the sum of :func:`~classylss.binding.Background.Omega_lambda` and :func:`~classylss.binding.Background.Omega_fld`. """ return self.bg.Omega_lambda(z) + self.bg.Omega_fld(z)
python
def Ode(self, z): """ Returns the sum of :func:`~classylss.binding.Background.Omega_lambda` and :func:`~classylss.binding.Background.Omega_fld`. """ return self.bg.Omega_lambda(z) + self.bg.Omega_fld(z)
[ "def", "Ode", "(", "self", ",", "z", ")", ":", "return", "self", ".", "bg", ".", "Omega_lambda", "(", "z", ")", "+", "self", ".", "bg", ".", "Omega_fld", "(", "z", ")" ]
Returns the sum of :func:`~classylss.binding.Background.Omega_lambda` and :func:`~classylss.binding.Background.Omega_fld`.
[ "Returns", "the", "sum", "of", ":", "func", ":", "~classylss", ".", "binding", ".", "Background", ".", "Omega_lambda", "and", ":", "func", ":", "~classylss", ".", "binding", ".", "Background", ".", "Omega_fld", "." ]
train
https://github.com/nickhand/classylss/blob/b297cb25bc47ffed845470fe1c052346ea96cddd/classylss/astropy_compat.py#L68-L73
nickhand/classylss
classylss/astropy_compat.py
AstropyCompat.Onu
def Onu(self, z): """ Returns the sum of :func:`~classylss.binding.Background.Omega_ncdm` and :func:`~classylss.binding.Background.Omega_ur`. """ return self.bg.Omega_ncdm(z) + self.bg.Omega_ur(z)
python
def Onu(self, z): """ Returns the sum of :func:`~classylss.binding.Background.Omega_ncdm` and :func:`~classylss.binding.Background.Omega_ur`. """ return self.bg.Omega_ncdm(z) + self.bg.Omega_ur(z)
[ "def", "Onu", "(", "self", ",", "z", ")", ":", "return", "self", ".", "bg", ".", "Omega_ncdm", "(", "z", ")", "+", "self", ".", "bg", ".", "Omega_ur", "(", "z", ")" ]
Returns the sum of :func:`~classylss.binding.Background.Omega_ncdm` and :func:`~classylss.binding.Background.Omega_ur`.
[ "Returns", "the", "sum", "of", ":", "func", ":", "~classylss", ".", "binding", ".", "Background", ".", "Omega_ncdm", "and", ":", "func", ":", "~classylss", ".", "binding", ".", "Background", ".", "Omega_ur", "." ]
train
https://github.com/nickhand/classylss/blob/b297cb25bc47ffed845470fe1c052346ea96cddd/classylss/astropy_compat.py#L82-L87
wglass/lighthouse
lighthouse/node.py
Node.current
def current(cls, service, port): """ Returns a Node instance representing the current service node. Collects the host and IP information for the current machine and the port information from the given service. """ host = socket.getfqdn() return cls( host=host, ip=socket.gethostbyname(host), port=port, metadata=service.metadata )
python
def current(cls, service, port): """ Returns a Node instance representing the current service node. Collects the host and IP information for the current machine and the port information from the given service. """ host = socket.getfqdn() return cls( host=host, ip=socket.gethostbyname(host), port=port, metadata=service.metadata )
[ "def", "current", "(", "cls", ",", "service", ",", "port", ")", ":", "host", "=", "socket", ".", "getfqdn", "(", ")", "return", "cls", "(", "host", "=", "host", ",", "ip", "=", "socket", ".", "gethostbyname", "(", "host", ")", ",", "port", "=", "...
Returns a Node instance representing the current service node. Collects the host and IP information for the current machine and the port information from the given service.
[ "Returns", "a", "Node", "instance", "representing", "the", "current", "service", "node", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/node.py#L35-L48
wglass/lighthouse
lighthouse/node.py
Node.serialize
def serialize(self): """ Serializes the node data as a JSON map string. """ return json.dumps({ "port": self.port, "ip": self.ip, "host": self.host, "peer": self.peer.serialize() if self.peer else None, "metadata": json.dumps(self.metadata or {}, sort_keys=True), }, sort_keys=True)
python
def serialize(self): """ Serializes the node data as a JSON map string. """ return json.dumps({ "port": self.port, "ip": self.ip, "host": self.host, "peer": self.peer.serialize() if self.peer else None, "metadata": json.dumps(self.metadata or {}, sort_keys=True), }, sort_keys=True)
[ "def", "serialize", "(", "self", ")", ":", "return", "json", ".", "dumps", "(", "{", "\"port\"", ":", "self", ".", "port", ",", "\"ip\"", ":", "self", ".", "ip", ",", "\"host\"", ":", "self", ".", "host", ",", "\"peer\"", ":", "self", ".", "peer", ...
Serializes the node data as a JSON map string.
[ "Serializes", "the", "node", "data", "as", "a", "JSON", "map", "string", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/node.py#L50-L60
wglass/lighthouse
lighthouse/node.py
Node.deserialize
def deserialize(cls, value): """ Creates a new Node instance via a JSON map string. Note that `port` and `ip` and are required keys for the JSON map, `peer` and `host` are optional. If `peer` is not present, the new Node instance will use the current peer. If `host` is not present, the hostname of the given `ip` is looked up. """ if getattr(value, "decode", None): value = value.decode() logger.debug("Deserializing node data: '%s'", value) parsed = json.loads(value) if "port" not in parsed: raise ValueError("No port defined for node.") if "ip" not in parsed: raise ValueError("No IP address defined for node.") if "host" not in parsed: host, aliases, ip_list = socket.gethostbyaddr(parsed["ip"]) parsed["host"] = socket.get_fqdn(host) if "peer" in parsed: peer = Peer.deserialize(parsed["peer"]) else: peer = None return cls( parsed["host"], parsed["ip"], parsed["port"], peer=peer, metadata=parsed.get("metadata") )
python
def deserialize(cls, value): """ Creates a new Node instance via a JSON map string. Note that `port` and `ip` and are required keys for the JSON map, `peer` and `host` are optional. If `peer` is not present, the new Node instance will use the current peer. If `host` is not present, the hostname of the given `ip` is looked up. """ if getattr(value, "decode", None): value = value.decode() logger.debug("Deserializing node data: '%s'", value) parsed = json.loads(value) if "port" not in parsed: raise ValueError("No port defined for node.") if "ip" not in parsed: raise ValueError("No IP address defined for node.") if "host" not in parsed: host, aliases, ip_list = socket.gethostbyaddr(parsed["ip"]) parsed["host"] = socket.get_fqdn(host) if "peer" in parsed: peer = Peer.deserialize(parsed["peer"]) else: peer = None return cls( parsed["host"], parsed["ip"], parsed["port"], peer=peer, metadata=parsed.get("metadata") )
[ "def", "deserialize", "(", "cls", ",", "value", ")", ":", "if", "getattr", "(", "value", ",", "\"decode\"", ",", "None", ")", ":", "value", "=", "value", ".", "decode", "(", ")", "logger", ".", "debug", "(", "\"Deserializing node data: '%s'\"", ",", "val...
Creates a new Node instance via a JSON map string. Note that `port` and `ip` and are required keys for the JSON map, `peer` and `host` are optional. If `peer` is not present, the new Node instance will use the current peer. If `host` is not present, the hostname of the given `ip` is looked up.
[ "Creates", "a", "new", "Node", "instance", "via", "a", "JSON", "map", "string", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/node.py#L63-L93
wglass/lighthouse
lighthouse/configs/watcher.py
ConfigWatcher.start
def start(self): """ Iterates over the `watched_configurabes` attribute and starts a config file monitor for each. The resulting observer threads are kept in an `observers` list attribute. """ for config_class in self.watched_configurables: monitor = ConfigFileMonitor(config_class, self.config_dir) self.observers.append( monitor.start( self.add_configurable, self.update_configurable, self.remove_configurable ) ) wait_on_event(self.shutdown)
python
def start(self): """ Iterates over the `watched_configurabes` attribute and starts a config file monitor for each. The resulting observer threads are kept in an `observers` list attribute. """ for config_class in self.watched_configurables: monitor = ConfigFileMonitor(config_class, self.config_dir) self.observers.append( monitor.start( self.add_configurable, self.update_configurable, self.remove_configurable ) ) wait_on_event(self.shutdown)
[ "def", "start", "(", "self", ")", ":", "for", "config_class", "in", "self", ".", "watched_configurables", ":", "monitor", "=", "ConfigFileMonitor", "(", "config_class", ",", "self", ".", "config_dir", ")", "self", ".", "observers", ".", "append", "(", "monit...
Iterates over the `watched_configurabes` attribute and starts a config file monitor for each. The resulting observer threads are kept in an `observers` list attribute.
[ "Iterates", "over", "the", "watched_configurabes", "attribute", "and", "starts", "a", "config", "file", "monitor", "for", "each", ".", "The", "resulting", "observer", "threads", "are", "kept", "in", "an", "observers", "list", "attribute", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/configs/watcher.py#L50-L66
wglass/lighthouse
lighthouse/configs/watcher.py
ConfigWatcher.launch_thread
def launch_thread(self, name, fn, *args, **kwargs): """ Adds a named thread to the "thread pool" dictionary of Thread objects. A daemon thread that executes the passed-in function `fn` with the given args and keyword args is started and tracked in the `thread_pool` attribute with the given `name` as the key. """ logger.debug( "Launching thread '%s': %s(%s, %s)", name, fn, args, kwargs ) self.thread_pool[name] = threading.Thread( target=fn, args=args, kwargs=kwargs ) self.thread_pool[name].daemon = True self.thread_pool[name].start()
python
def launch_thread(self, name, fn, *args, **kwargs): """ Adds a named thread to the "thread pool" dictionary of Thread objects. A daemon thread that executes the passed-in function `fn` with the given args and keyword args is started and tracked in the `thread_pool` attribute with the given `name` as the key. """ logger.debug( "Launching thread '%s': %s(%s, %s)", name, fn, args, kwargs ) self.thread_pool[name] = threading.Thread( target=fn, args=args, kwargs=kwargs ) self.thread_pool[name].daemon = True self.thread_pool[name].start()
[ "def", "launch_thread", "(", "self", ",", "name", ",", "fn", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "debug", "(", "\"Launching thread '%s': %s(%s, %s)\"", ",", "name", ",", "fn", ",", "args", ",", "kwargs", ")", "self", "."...
Adds a named thread to the "thread pool" dictionary of Thread objects. A daemon thread that executes the passed-in function `fn` with the given args and keyword args is started and tracked in the `thread_pool` attribute with the given `name` as the key.
[ "Adds", "a", "named", "thread", "to", "the", "thread", "pool", "dictionary", "of", "Thread", "objects", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/configs/watcher.py#L77-L93
wglass/lighthouse
lighthouse/configs/watcher.py
ConfigWatcher.kill_thread
def kill_thread(self, name): """ Joins the thread in the `thread_pool` dict with the given `name` key. """ if name not in self.thread_pool: return self.thread_pool[name].join() del self.thread_pool[name]
python
def kill_thread(self, name): """ Joins the thread in the `thread_pool` dict with the given `name` key. """ if name not in self.thread_pool: return self.thread_pool[name].join() del self.thread_pool[name]
[ "def", "kill_thread", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "thread_pool", ":", "return", "self", ".", "thread_pool", "[", "name", "]", ".", "join", "(", ")", "del", "self", ".", "thread_pool", "[", "name", "]" ]
Joins the thread in the `thread_pool` dict with the given `name` key.
[ "Joins", "the", "thread", "in", "the", "thread_pool", "dict", "with", "the", "given", "name", "key", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/configs/watcher.py#L95-L103
wglass/lighthouse
lighthouse/configs/watcher.py
ConfigWatcher.update_configurable
def update_configurable(self, configurable_class, name, config): """ Callback fired when a configurable instance is updated. Looks up the existing configurable in the proper "registry" and `apply_config()` is called on it. If a method named "on_<configurable classname>_update" is defined it is called in the work pool and passed the configurable's name, the old config and the new config. If the updated configurable is not present, `add_configurable()` is called instead. """ configurable_class_name = configurable_class.__name__.lower() logger.info( "updating %s: '%s'", configurable_class_name, name ) registry = self.registry_for(configurable_class) if name not in registry: logger.warn( "Tried to update unknown %s: '%s'", configurable_class_name, name ) self.add_configurable( configurable_class, configurable_class.from_config(name, config) ) return registry[name].apply_config(config) hook = self.hook_for(configurable_class, "update") if not hook: return def done(f): try: f.result() except Exception: logger.exception("Error updating configurable '%s'", name) self.work_pool.submit(hook, name, config).add_done_callback(done)
python
def update_configurable(self, configurable_class, name, config): """ Callback fired when a configurable instance is updated. Looks up the existing configurable in the proper "registry" and `apply_config()` is called on it. If a method named "on_<configurable classname>_update" is defined it is called in the work pool and passed the configurable's name, the old config and the new config. If the updated configurable is not present, `add_configurable()` is called instead. """ configurable_class_name = configurable_class.__name__.lower() logger.info( "updating %s: '%s'", configurable_class_name, name ) registry = self.registry_for(configurable_class) if name not in registry: logger.warn( "Tried to update unknown %s: '%s'", configurable_class_name, name ) self.add_configurable( configurable_class, configurable_class.from_config(name, config) ) return registry[name].apply_config(config) hook = self.hook_for(configurable_class, "update") if not hook: return def done(f): try: f.result() except Exception: logger.exception("Error updating configurable '%s'", name) self.work_pool.submit(hook, name, config).add_done_callback(done)
[ "def", "update_configurable", "(", "self", ",", "configurable_class", ",", "name", ",", "config", ")", ":", "configurable_class_name", "=", "configurable_class", ".", "__name__", ".", "lower", "(", ")", "logger", ".", "info", "(", "\"updating %s: '%s'\"", ",", "...
Callback fired when a configurable instance is updated. Looks up the existing configurable in the proper "registry" and `apply_config()` is called on it. If a method named "on_<configurable classname>_update" is defined it is called in the work pool and passed the configurable's name, the old config and the new config. If the updated configurable is not present, `add_configurable()` is called instead.
[ "Callback", "fired", "when", "a", "configurable", "instance", "is", "updated", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/configs/watcher.py#L142-L187
wglass/lighthouse
lighthouse/configs/watcher.py
ConfigWatcher.remove_configurable
def remove_configurable(self, configurable_class, name): """ Callback fired when a configurable instance is removed. Looks up the existing configurable in the proper "registry" and removes it. If a method named "on_<configurable classname>_remove" is defined it is called via the work pooland passed the configurable's name. If the removed configurable is not present, a warning is given and no further action is taken. """ configurable_class_name = configurable_class.__name__.lower() logger.info("Removing %s: '%s'", configurable_class_name, name) registry = self.registry_for(configurable_class) if name not in registry: logger.warn( "Tried to remove unknown active %s: '%s'", configurable_class_name, name ) return hook = self.hook_for(configurable_class, action="remove") if not hook: registry.pop(name) return def done(f): try: f.result() registry.pop(name) except Exception: logger.exception("Error removing configurable '%s'", name) self.work_pool.submit(hook, name).add_done_callback(done)
python
def remove_configurable(self, configurable_class, name): """ Callback fired when a configurable instance is removed. Looks up the existing configurable in the proper "registry" and removes it. If a method named "on_<configurable classname>_remove" is defined it is called via the work pooland passed the configurable's name. If the removed configurable is not present, a warning is given and no further action is taken. """ configurable_class_name = configurable_class.__name__.lower() logger.info("Removing %s: '%s'", configurable_class_name, name) registry = self.registry_for(configurable_class) if name not in registry: logger.warn( "Tried to remove unknown active %s: '%s'", configurable_class_name, name ) return hook = self.hook_for(configurable_class, action="remove") if not hook: registry.pop(name) return def done(f): try: f.result() registry.pop(name) except Exception: logger.exception("Error removing configurable '%s'", name) self.work_pool.submit(hook, name).add_done_callback(done)
[ "def", "remove_configurable", "(", "self", ",", "configurable_class", ",", "name", ")", ":", "configurable_class_name", "=", "configurable_class", ".", "__name__", ".", "lower", "(", ")", "logger", ".", "info", "(", "\"Removing %s: '%s'\"", ",", "configurable_class_...
Callback fired when a configurable instance is removed. Looks up the existing configurable in the proper "registry" and removes it. If a method named "on_<configurable classname>_remove" is defined it is called via the work pooland passed the configurable's name. If the removed configurable is not present, a warning is given and no further action is taken.
[ "Callback", "fired", "when", "a", "configurable", "instance", "is", "removed", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/configs/watcher.py#L189-L227
wglass/lighthouse
lighthouse/configs/watcher.py
ConfigWatcher.hook_for
def hook_for(self, configurable_class, action): """ Helper method for determining if an on_<configurable class>_<action> method is present, to be used as a hook in the add/update/remove configurable methods. """ configurable_class_name = configurable_class.__name__.lower() return getattr( self, "on_" + configurable_class_name + "_" + action, None )
python
def hook_for(self, configurable_class, action): """ Helper method for determining if an on_<configurable class>_<action> method is present, to be used as a hook in the add/update/remove configurable methods. """ configurable_class_name = configurable_class.__name__.lower() return getattr( self, "on_" + configurable_class_name + "_" + action, None )
[ "def", "hook_for", "(", "self", ",", "configurable_class", ",", "action", ")", ":", "configurable_class_name", "=", "configurable_class", ".", "__name__", ".", "lower", "(", ")", "return", "getattr", "(", "self", ",", "\"on_\"", "+", "configurable_class_name", "...
Helper method for determining if an on_<configurable class>_<action> method is present, to be used as a hook in the add/update/remove configurable methods.
[ "Helper", "method", "for", "determining", "if", "an", "on_<configurable", "class", ">", "_<action", ">", "method", "is", "present", "to", "be", "used", "as", "a", "hook", "in", "the", "add", "/", "update", "/", "remove", "configurable", "methods", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/configs/watcher.py#L239-L251
wglass/lighthouse
lighthouse/configs/watcher.py
ConfigWatcher.stop
def stop(self): """ Method for shutting down the watcher. All config file observers are stopped and their threads joined, along with the worker thread pool. """ self.shutdown.set() for monitor in self.observers: monitor.stop() self.wind_down() for monitor in self.observers: monitor.join() for thread in self.thread_pool.values(): thread.join() self.work_pool.shutdown()
python
def stop(self): """ Method for shutting down the watcher. All config file observers are stopped and their threads joined, along with the worker thread pool. """ self.shutdown.set() for monitor in self.observers: monitor.stop() self.wind_down() for monitor in self.observers: monitor.join() for thread in self.thread_pool.values(): thread.join() self.work_pool.shutdown()
[ "def", "stop", "(", "self", ")", ":", "self", ".", "shutdown", ".", "set", "(", ")", "for", "monitor", "in", "self", ".", "observers", ":", "monitor", ".", "stop", "(", ")", "self", ".", "wind_down", "(", ")", "for", "monitor", "in", "self", ".", ...
Method for shutting down the watcher. All config file observers are stopped and their threads joined, along with the worker thread pool.
[ "Method", "for", "shutting", "down", "the", "watcher", "." ]
train
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/configs/watcher.py#L253-L273
phalt/beckett
beckett/resources.py
BaseResource.set_attributes
def set_attributes(self, **kwargs): """ Set the resource attributes from the kwargs. Only sets items in the `self.Meta.attributes` white list. Subclass this method to customise attributes. Args: kwargs: Keyword arguements passed into the init of this class """ if self._subresource_map: self.set_subresources(**kwargs) for key in self._subresource_map.keys(): # Don't let these attributes be overridden later kwargs.pop(key, None) for field, value in kwargs.items(): if field in self.Meta.attributes: setattr(self, field, value)
python
def set_attributes(self, **kwargs): """ Set the resource attributes from the kwargs. Only sets items in the `self.Meta.attributes` white list. Subclass this method to customise attributes. Args: kwargs: Keyword arguements passed into the init of this class """ if self._subresource_map: self.set_subresources(**kwargs) for key in self._subresource_map.keys(): # Don't let these attributes be overridden later kwargs.pop(key, None) for field, value in kwargs.items(): if field in self.Meta.attributes: setattr(self, field, value)
[ "def", "set_attributes", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_subresource_map", ":", "self", ".", "set_subresources", "(", "*", "*", "kwargs", ")", "for", "key", "in", "self", ".", "_subresource_map", ".", "keys", "(", "...
Set the resource attributes from the kwargs. Only sets items in the `self.Meta.attributes` white list. Subclass this method to customise attributes. Args: kwargs: Keyword arguements passed into the init of this class
[ "Set", "the", "resource", "attributes", "from", "the", "kwargs", ".", "Only", "sets", "items", "in", "the", "self", ".", "Meta", ".", "attributes", "white", "list", "." ]
train
https://github.com/phalt/beckett/blob/555a7b1744d0063023fecd70a81ae090096362f3/beckett/resources.py#L99-L116
phalt/beckett
beckett/resources.py
BaseResource.get_resource_url
def get_resource_url(cls, resource, base_url): """ Construct the URL for talking to this resource. i.e.: http://myapi.com/api/resource Note that this is NOT the method for calling individual instances i.e. http://myapi.com/api/resource/1 Args: resource: The resource class instance base_url: The Base URL of this API service. returns: resource_url: The URL for this resource """ if resource.Meta.resource_name: url = '{}/{}'.format(base_url, resource.Meta.resource_name) else: p = inflect.engine() plural_name = p.plural(resource.Meta.name.lower()) url = '{}/{}'.format(base_url, plural_name) return cls._parse_url_and_validate(url)
python
def get_resource_url(cls, resource, base_url): """ Construct the URL for talking to this resource. i.e.: http://myapi.com/api/resource Note that this is NOT the method for calling individual instances i.e. http://myapi.com/api/resource/1 Args: resource: The resource class instance base_url: The Base URL of this API service. returns: resource_url: The URL for this resource """ if resource.Meta.resource_name: url = '{}/{}'.format(base_url, resource.Meta.resource_name) else: p = inflect.engine() plural_name = p.plural(resource.Meta.name.lower()) url = '{}/{}'.format(base_url, plural_name) return cls._parse_url_and_validate(url)
[ "def", "get_resource_url", "(", "cls", ",", "resource", ",", "base_url", ")", ":", "if", "resource", ".", "Meta", ".", "resource_name", ":", "url", "=", "'{}/{}'", ".", "format", "(", "base_url", ",", "resource", ".", "Meta", ".", "resource_name", ")", "...
Construct the URL for talking to this resource. i.e.: http://myapi.com/api/resource Note that this is NOT the method for calling individual instances i.e. http://myapi.com/api/resource/1 Args: resource: The resource class instance base_url: The Base URL of this API service. returns: resource_url: The URL for this resource
[ "Construct", "the", "URL", "for", "talking", "to", "this", "resource", "." ]
train
https://github.com/phalt/beckett/blob/555a7b1744d0063023fecd70a81ae090096362f3/beckett/resources.py#L119-L143
phalt/beckett
beckett/resources.py
BaseResource.get_url
def get_url(cls, url, uid, **kwargs): """ Construct the URL for talking to an individual resource. http://myapi.com/api/resource/1 Args: url: The url for this resource uid: The unique identifier for an individual resource kwargs: Additional keyword argueents returns: final_url: The URL for this individual resource """ if uid: url = '{}/{}'.format(url, uid) else: url = url return cls._parse_url_and_validate(url)
python
def get_url(cls, url, uid, **kwargs): """ Construct the URL for talking to an individual resource. http://myapi.com/api/resource/1 Args: url: The url for this resource uid: The unique identifier for an individual resource kwargs: Additional keyword argueents returns: final_url: The URL for this individual resource """ if uid: url = '{}/{}'.format(url, uid) else: url = url return cls._parse_url_and_validate(url)
[ "def", "get_url", "(", "cls", ",", "url", ",", "uid", ",", "*", "*", "kwargs", ")", ":", "if", "uid", ":", "url", "=", "'{}/{}'", ".", "format", "(", "url", ",", "uid", ")", "else", ":", "url", "=", "url", "return", "cls", ".", "_parse_url_and_va...
Construct the URL for talking to an individual resource. http://myapi.com/api/resource/1 Args: url: The url for this resource uid: The unique identifier for an individual resource kwargs: Additional keyword argueents returns: final_url: The URL for this individual resource
[ "Construct", "the", "URL", "for", "talking", "to", "an", "individual", "resource", "." ]
train
https://github.com/phalt/beckett/blob/555a7b1744d0063023fecd70a81ae090096362f3/beckett/resources.py#L146-L163
phalt/beckett
beckett/resources.py
BaseResource.get_method_name
def get_method_name(resource, method_type): """ Generate a method name for this resource based on the method type. """ return '{}_{}'.format(method_type.lower(), resource.Meta.name.lower())
python
def get_method_name(resource, method_type): """ Generate a method name for this resource based on the method type. """ return '{}_{}'.format(method_type.lower(), resource.Meta.name.lower())
[ "def", "get_method_name", "(", "resource", ",", "method_type", ")", ":", "return", "'{}_{}'", ".", "format", "(", "method_type", ".", "lower", "(", ")", ",", "resource", ".", "Meta", ".", "name", ".", "lower", "(", ")", ")" ]
Generate a method name for this resource based on the method type.
[ "Generate", "a", "method", "name", "for", "this", "resource", "based", "on", "the", "method", "type", "." ]
train
https://github.com/phalt/beckett/blob/555a7b1744d0063023fecd70a81ae090096362f3/beckett/resources.py#L166-L170
phalt/beckett
beckett/resources.py
BaseResource._parse_url_and_validate
def _parse_url_and_validate(cls, url): """ Recieves a URL string and validates it using urlparse. Args: url: A URL string Returns: parsed_url: A validated URL Raises: BadURLException """ parsed_url = urlparse(url) if parsed_url.scheme and parsed_url.netloc: final_url = parsed_url.geturl() else: raise BadURLException return final_url
python
def _parse_url_and_validate(cls, url): """ Recieves a URL string and validates it using urlparse. Args: url: A URL string Returns: parsed_url: A validated URL Raises: BadURLException """ parsed_url = urlparse(url) if parsed_url.scheme and parsed_url.netloc: final_url = parsed_url.geturl() else: raise BadURLException return final_url
[ "def", "_parse_url_and_validate", "(", "cls", ",", "url", ")", ":", "parsed_url", "=", "urlparse", "(", "url", ")", "if", "parsed_url", ".", "scheme", "and", "parsed_url", ".", "netloc", ":", "final_url", "=", "parsed_url", ".", "geturl", "(", ")", "else",...
Recieves a URL string and validates it using urlparse. Args: url: A URL string Returns: parsed_url: A validated URL Raises: BadURLException
[ "Recieves", "a", "URL", "string", "and", "validates", "it", "using", "urlparse", "." ]
train
https://github.com/phalt/beckett/blob/555a7b1744d0063023fecd70a81ae090096362f3/beckett/resources.py#L173-L189
phalt/beckett
beckett/resources.py
HypermediaResource.set_related_method
def set_related_method(self, resource, full_resource_url): """ Using reflection, generate the related method and return it. """ method_name = self.get_method_name(resource, 'get') def get(self, **kwargs): return self._call_api_single_related_resource( resource, full_resource_url, method_name, **kwargs ) def get_list(self, **kwargs): return self._call_api_many_related_resources( resource, full_resource_url, method_name, **kwargs ) if isinstance(full_resource_url, list): setattr( self, method_name, types.MethodType(get_list, self) ) else: setattr( self, method_name, types.MethodType(get, self) )
python
def set_related_method(self, resource, full_resource_url): """ Using reflection, generate the related method and return it. """ method_name = self.get_method_name(resource, 'get') def get(self, **kwargs): return self._call_api_single_related_resource( resource, full_resource_url, method_name, **kwargs ) def get_list(self, **kwargs): return self._call_api_many_related_resources( resource, full_resource_url, method_name, **kwargs ) if isinstance(full_resource_url, list): setattr( self, method_name, types.MethodType(get_list, self) ) else: setattr( self, method_name, types.MethodType(get, self) )
[ "def", "set_related_method", "(", "self", ",", "resource", ",", "full_resource_url", ")", ":", "method_name", "=", "self", ".", "get_method_name", "(", "resource", ",", "'get'", ")", "def", "get", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", ...
Using reflection, generate the related method and return it.
[ "Using", "reflection", "generate", "the", "related", "method", "and", "return", "it", "." ]
train
https://github.com/phalt/beckett/blob/555a7b1744d0063023fecd70a81ae090096362f3/beckett/resources.py#L207-L232
phalt/beckett
beckett/resources.py
HypermediaResource.match_urls_to_resources
def match_urls_to_resources(self, url_values): """ For the list of valid URLs, try and match them up to resources in the related_resources attribute. Args: url_values: A dictionary of keys and URL strings that could be related resources. Returns: valid_values: The values that are valid """ valid_values = {} for resource in self.Meta.related_resources: for k, v in url_values.items(): resource_url = resource.get_resource_url( resource, resource.Meta.base_url) if isinstance(v, list): if all([resource_url in i for i in v]): self.set_related_method(resource, v) valid_values[k] = v elif resource_url in v: self.set_related_method(resource, v) valid_values[k] = v return valid_values
python
def match_urls_to_resources(self, url_values): """ For the list of valid URLs, try and match them up to resources in the related_resources attribute. Args: url_values: A dictionary of keys and URL strings that could be related resources. Returns: valid_values: The values that are valid """ valid_values = {} for resource in self.Meta.related_resources: for k, v in url_values.items(): resource_url = resource.get_resource_url( resource, resource.Meta.base_url) if isinstance(v, list): if all([resource_url in i for i in v]): self.set_related_method(resource, v) valid_values[k] = v elif resource_url in v: self.set_related_method(resource, v) valid_values[k] = v return valid_values
[ "def", "match_urls_to_resources", "(", "self", ",", "url_values", ")", ":", "valid_values", "=", "{", "}", "for", "resource", "in", "self", ".", "Meta", ".", "related_resources", ":", "for", "k", ",", "v", "in", "url_values", ".", "items", "(", ")", ":",...
For the list of valid URLs, try and match them up to resources in the related_resources attribute. Args: url_values: A dictionary of keys and URL strings that could be related resources. Returns: valid_values: The values that are valid
[ "For", "the", "list", "of", "valid", "URLs", "try", "and", "match", "them", "up", "to", "resources", "in", "the", "related_resources", "attribute", "." ]
train
https://github.com/phalt/beckett/blob/555a7b1744d0063023fecd70a81ae090096362f3/beckett/resources.py#L234-L257
phalt/beckett
beckett/resources.py
HypermediaResource.set_attributes
def set_attributes(self, **kwargs): """ Similar to BaseResource.set_attributes except it will attempt to match URL strings with registered related resources, and build their get_* method and attach it to this resource. """ if not self.Meta.related_resources: # Just do what the normal BaseResource does super(HypermediaResource, self).set_attributes(**kwargs) return # Extract all the values that are URLs url_values = {} for k, v in kwargs.items(): try: if isinstance(v, list): [self._parse_url_and_validate(i) for i in v] else: self._parse_url_and_validate(v) url_values[k] = v except BadURLException: # This is a badly formed URL or not a URL at all, so skip pass # Assign the valid method values and then remove them from the kwargs assigned_values = self.match_urls_to_resources(url_values) for k in assigned_values.keys(): kwargs.pop(k, None) # Assign the rest as attributes. for field, value in kwargs.items(): if field in self.Meta.attributes: setattr(self, field, value)
python
def set_attributes(self, **kwargs): """ Similar to BaseResource.set_attributes except it will attempt to match URL strings with registered related resources, and build their get_* method and attach it to this resource. """ if not self.Meta.related_resources: # Just do what the normal BaseResource does super(HypermediaResource, self).set_attributes(**kwargs) return # Extract all the values that are URLs url_values = {} for k, v in kwargs.items(): try: if isinstance(v, list): [self._parse_url_and_validate(i) for i in v] else: self._parse_url_and_validate(v) url_values[k] = v except BadURLException: # This is a badly formed URL or not a URL at all, so skip pass # Assign the valid method values and then remove them from the kwargs assigned_values = self.match_urls_to_resources(url_values) for k in assigned_values.keys(): kwargs.pop(k, None) # Assign the rest as attributes. for field, value in kwargs.items(): if field in self.Meta.attributes: setattr(self, field, value)
[ "def", "set_attributes", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "Meta", ".", "related_resources", ":", "# Just do what the normal BaseResource does", "super", "(", "HypermediaResource", ",", "self", ")", ".", "set_attributes", ...
Similar to BaseResource.set_attributes except it will attempt to match URL strings with registered related resources, and build their get_* method and attach it to this resource.
[ "Similar", "to", "BaseResource", ".", "set_attributes", "except", "it", "will", "attempt", "to", "match", "URL", "strings", "with", "registered", "related", "resources", "and", "build", "their", "get_", "*", "method", "and", "attach", "it", "to", "this", "reso...
train
https://github.com/phalt/beckett/blob/555a7b1744d0063023fecd70a81ae090096362f3/beckett/resources.py#L259-L290
phalt/beckett
beckett/resources.py
SubResource.set_attributes
def set_attributes(self, **kwargs): """ Set the resource attributes from the kwargs. Only sets items in the `self.Meta.attributes` white list. Args: kwargs: Keyword arguements passed into the init of this class """ for field, value in kwargs.items(): if field in self.Meta.attributes: setattr(self, field, value)
python
def set_attributes(self, **kwargs): """ Set the resource attributes from the kwargs. Only sets items in the `self.Meta.attributes` white list. Args: kwargs: Keyword arguements passed into the init of this class """ for field, value in kwargs.items(): if field in self.Meta.attributes: setattr(self, field, value)
[ "def", "set_attributes", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "field", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "if", "field", "in", "self", ".", "Meta", ".", "attributes", ":", "setattr", "(", "self", ",", "fiel...
Set the resource attributes from the kwargs. Only sets items in the `self.Meta.attributes` white list. Args: kwargs: Keyword arguements passed into the init of this class
[ "Set", "the", "resource", "attributes", "from", "the", "kwargs", ".", "Only", "sets", "items", "in", "the", "self", ".", "Meta", ".", "attributes", "white", "list", "." ]
train
https://github.com/phalt/beckett/blob/555a7b1744d0063023fecd70a81ae090096362f3/beckett/resources.py#L331-L341
darkfeline/animanager
animanager/db/query/update.py
add
def add(db, anime): """Add an anime (or update existing). anime is an AnimeTree instance. """ aid = anime.aid values = { 'aid': aid, 'title': anime.title, 'type': anime.type, 'episodecount': anime.episodecount, } if anime.startdate is not None: values['startdate'] = datets.to_ts(anime.startdate) if anime.enddate is not None: values['enddate'] = datets.to_ts(anime.enddate) with db: upsert(db, 'anime', ['aid'], values) our_anime = lookup(db, anime.aid, episode_fields=ALL) our_episodes = our_anime.episodes for episode in anime.episodes: add_episode(db, aid, episode) our_episodes = [ ep for ep in our_episodes if not (ep.type == episode.type and ep.number == episode.number) ] # Remove extra episodes that we have. for episode in our_episodes: delete_episode(db, aid, episode)
python
def add(db, anime): """Add an anime (or update existing). anime is an AnimeTree instance. """ aid = anime.aid values = { 'aid': aid, 'title': anime.title, 'type': anime.type, 'episodecount': anime.episodecount, } if anime.startdate is not None: values['startdate'] = datets.to_ts(anime.startdate) if anime.enddate is not None: values['enddate'] = datets.to_ts(anime.enddate) with db: upsert(db, 'anime', ['aid'], values) our_anime = lookup(db, anime.aid, episode_fields=ALL) our_episodes = our_anime.episodes for episode in anime.episodes: add_episode(db, aid, episode) our_episodes = [ ep for ep in our_episodes if not (ep.type == episode.type and ep.number == episode.number) ] # Remove extra episodes that we have. for episode in our_episodes: delete_episode(db, aid, episode)
[ "def", "add", "(", "db", ",", "anime", ")", ":", "aid", "=", "anime", ".", "aid", "values", "=", "{", "'aid'", ":", "aid", ",", "'title'", ":", "anime", ".", "title", ",", "'type'", ":", "anime", ".", "type", ",", "'episodecount'", ":", "anime", ...
Add an anime (or update existing). anime is an AnimeTree instance.
[ "Add", "an", "anime", "(", "or", "update", "existing", ")", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/db/query/update.py#L30-L59
darkfeline/animanager
animanager/db/query/update.py
add_episode
def add_episode(db, aid, episode): """Add an episode.""" values = { 'aid': aid, 'type': episode.type, 'number': episode.number, 'title': episode.title, 'length': episode.length, } upsert(db, 'episode', ['aid', 'type', 'number'], values)
python
def add_episode(db, aid, episode): """Add an episode.""" values = { 'aid': aid, 'type': episode.type, 'number': episode.number, 'title': episode.title, 'length': episode.length, } upsert(db, 'episode', ['aid', 'type', 'number'], values)
[ "def", "add_episode", "(", "db", ",", "aid", ",", "episode", ")", ":", "values", "=", "{", "'aid'", ":", "aid", ",", "'type'", ":", "episode", ".", "type", ",", "'number'", ":", "episode", ".", "number", ",", "'title'", ":", "episode", ".", "title", ...
Add an episode.
[ "Add", "an", "episode", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/db/query/update.py#L62-L71
darkfeline/animanager
animanager/db/query/update.py
delete_episode
def delete_episode(db, aid, episode): """Delete an episode.""" db.cursor().execute( 'DELETE FROM episode WHERE aid=:aid AND type=:type AND number=:number', { 'aid': aid, 'type': episode.type, 'number': episode.number, })
python
def delete_episode(db, aid, episode): """Delete an episode.""" db.cursor().execute( 'DELETE FROM episode WHERE aid=:aid AND type=:type AND number=:number', { 'aid': aid, 'type': episode.type, 'number': episode.number, })
[ "def", "delete_episode", "(", "db", ",", "aid", ",", "episode", ")", ":", "db", ".", "cursor", "(", ")", ".", "execute", "(", "'DELETE FROM episode WHERE aid=:aid AND type=:type AND number=:number'", ",", "{", "'aid'", ":", "aid", ",", "'type'", ":", "episode", ...
Delete an episode.
[ "Delete", "an", "episode", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/db/query/update.py#L74-L82
darkfeline/animanager
animanager/db/query/update.py
set_watched
def set_watched(db, aid, ep_type, number): """Set episode as watched.""" db.cursor().execute( """UPDATE episode SET user_watched=:watched WHERE aid=:aid AND type=:type AND number=:number""", { 'aid': aid, 'type': ep_type, 'number': number, 'watched': 1, })
python
def set_watched(db, aid, ep_type, number): """Set episode as watched.""" db.cursor().execute( """UPDATE episode SET user_watched=:watched WHERE aid=:aid AND type=:type AND number=:number""", { 'aid': aid, 'type': ep_type, 'number': number, 'watched': 1, })
[ "def", "set_watched", "(", "db", ",", "aid", ",", "ep_type", ",", "number", ")", ":", "db", ".", "cursor", "(", ")", ".", "execute", "(", "\"\"\"UPDATE episode SET user_watched=:watched\n WHERE aid=:aid AND type=:type AND number=:number\"\"\"", ",", "{", "'aid'",...
Set episode as watched.
[ "Set", "episode", "as", "watched", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/db/query/update.py#L85-L95
darkfeline/animanager
animanager/db/query/update.py
bump
def bump(db, aid): """Bump anime regular episode count.""" anime = lookup(db, aid) if anime.complete: return episode = anime.watched_episodes + 1 with db: set_watched(db, aid, get_eptype(db, 'regular').id, episode) set_status( db, aid, anime.enddate and episode >= anime.episodecount, episode)
python
def bump(db, aid): """Bump anime regular episode count.""" anime = lookup(db, aid) if anime.complete: return episode = anime.watched_episodes + 1 with db: set_watched(db, aid, get_eptype(db, 'regular').id, episode) set_status( db, aid, anime.enddate and episode >= anime.episodecount, episode)
[ "def", "bump", "(", "db", ",", "aid", ")", ":", "anime", "=", "lookup", "(", "db", ",", "aid", ")", "if", "anime", ".", "complete", ":", "return", "episode", "=", "anime", ".", "watched_episodes", "+", "1", "with", "db", ":", "set_watched", "(", "d...
Bump anime regular episode count.
[ "Bump", "anime", "regular", "episode", "count", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/db/query/update.py#L98-L109
darkfeline/animanager
animanager/db/query/update.py
reset
def reset(db, aid, episode): """Reset episode count for anime.""" params = { 'aid': aid, 'type': get_eptype(db, 'regular').id, 'watched': 1, 'number': episode, } with db: cur = db.cursor() cur.execute( """UPDATE episode SET user_watched=:watched WHERE aid=:aid AND type=:type AND number<=:number""", params) params['watched'] = 0 cur.execute( """UPDATE episode SET user_watched=:watched WHERE aid=:aid AND type=:type AND number>:number""", params) cache_status(db, aid, force=True)
python
def reset(db, aid, episode): """Reset episode count for anime.""" params = { 'aid': aid, 'type': get_eptype(db, 'regular').id, 'watched': 1, 'number': episode, } with db: cur = db.cursor() cur.execute( """UPDATE episode SET user_watched=:watched WHERE aid=:aid AND type=:type AND number<=:number""", params) params['watched'] = 0 cur.execute( """UPDATE episode SET user_watched=:watched WHERE aid=:aid AND type=:type AND number>:number""", params) cache_status(db, aid, force=True)
[ "def", "reset", "(", "db", ",", "aid", ",", "episode", ")", ":", "params", "=", "{", "'aid'", ":", "aid", ",", "'type'", ":", "get_eptype", "(", "db", ",", "'regular'", ")", ".", "id", ",", "'watched'", ":", "1", ",", "'number'", ":", "episode", ...
Reset episode count for anime.
[ "Reset", "episode", "count", "for", "anime", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/db/query/update.py#L112-L131
frnsys/broca
broca/knowledge/idf.py
train_idf
def train_idf(tokens_stream, out=None, **kwargs): """ Train a IDF model on a list of files (parallelized). """ idfs = parallel(count_idf, tokens_stream, n_jobs=-1) N = len(idfs) # n docs idf = merge(idfs) for k, v in idf.items(): idf[k] = math.log(N/v) # v ~= N/(math.e ** idf[k]) # Keep track of N to update IDFs idf['_n_docs'] = N if out is not None: with open(out, 'w') as f: json.dump(idf, f) return idf
python
def train_idf(tokens_stream, out=None, **kwargs): """ Train a IDF model on a list of files (parallelized). """ idfs = parallel(count_idf, tokens_stream, n_jobs=-1) N = len(idfs) # n docs idf = merge(idfs) for k, v in idf.items(): idf[k] = math.log(N/v) # v ~= N/(math.e ** idf[k]) # Keep track of N to update IDFs idf['_n_docs'] = N if out is not None: with open(out, 'w') as f: json.dump(idf, f) return idf
[ "def", "train_idf", "(", "tokens_stream", ",", "out", "=", "None", ",", "*", "*", "kwargs", ")", ":", "idfs", "=", "parallel", "(", "count_idf", ",", "tokens_stream", ",", "n_jobs", "=", "-", "1", ")", "N", "=", "len", "(", "idfs", ")", "# n docs", ...
Train a IDF model on a list of files (parallelized).
[ "Train", "a", "IDF", "model", "on", "a", "list", "of", "files", "(", "parallelized", ")", "." ]
train
https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/knowledge/idf.py#L8-L27
mistio/mist.client
src/mistclient/__init__.py
MistClient.__authenticate
def __authenticate(self): """ Sends a json payload with the email and password in order to get the authentication api_token to be used with the rest of the requests """ if self.api_token: # verify current API token check_auth_uri = self.uri.split('/api/v1')[0] + '/check_token' req = self.request(check_auth_uri) try: ping = req.get().json() except Exception as exc: if str(exc).startswith('User not authenticated'): self.api_token = None else: if self.email == ping['hello']: return print "Authentication failed" sys.exit(1) auth_uri = self.uri.split('/api/v1')[0] + '/auth' payload = { 'email': self.email, 'password': self.password, 'org_id': self.org_name } data = json.dumps(payload) req = self.request(auth_uri, data=data) response = req.post().json() token = response.get('mist_api_token', None) if token: # backwards compatibility with old Authentication system self.api_token = "mist_1 %s:%s" % (self.email, token) else: self.api_token = response.get('token', None)
python
def __authenticate(self): """ Sends a json payload with the email and password in order to get the authentication api_token to be used with the rest of the requests """ if self.api_token: # verify current API token check_auth_uri = self.uri.split('/api/v1')[0] + '/check_token' req = self.request(check_auth_uri) try: ping = req.get().json() except Exception as exc: if str(exc).startswith('User not authenticated'): self.api_token = None else: if self.email == ping['hello']: return print "Authentication failed" sys.exit(1) auth_uri = self.uri.split('/api/v1')[0] + '/auth' payload = { 'email': self.email, 'password': self.password, 'org_id': self.org_name } data = json.dumps(payload) req = self.request(auth_uri, data=data) response = req.post().json() token = response.get('mist_api_token', None) if token: # backwards compatibility with old Authentication system self.api_token = "mist_1 %s:%s" % (self.email, token) else: self.api_token = response.get('token', None)
[ "def", "__authenticate", "(", "self", ")", ":", "if", "self", ".", "api_token", ":", "# verify current API token", "check_auth_uri", "=", "self", ".", "uri", ".", "split", "(", "'/api/v1'", ")", "[", "0", "]", "+", "'/check_token'", "req", "=", "self", "."...
Sends a json payload with the email and password in order to get the authentication api_token to be used with the rest of the requests
[ "Sends", "a", "json", "payload", "with", "the", "email", "and", "password", "in", "order", "to", "get", "the", "authentication", "api_token", "to", "be", "used", "with", "the", "rest", "of", "the", "requests" ]
train
https://github.com/mistio/mist.client/blob/bc190af2cba358fa556a69b205c12a77a34eb2a8/src/mistclient/__init__.py#L50-L84
mistio/mist.client
src/mistclient/__init__.py
MistClient.user_info
def user_info(self): """ General user info information as returned in /account in our API """ account_uri = self.uri.split('/api/v1')[0] + '/account' req = self.request(account_uri) account_info = req.get() user_data = HTMLParser(account_info.content) return user_data
python
def user_info(self): """ General user info information as returned in /account in our API """ account_uri = self.uri.split('/api/v1')[0] + '/account' req = self.request(account_uri) account_info = req.get() user_data = HTMLParser(account_info.content) return user_data
[ "def", "user_info", "(", "self", ")", ":", "account_uri", "=", "self", ".", "uri", ".", "split", "(", "'/api/v1'", ")", "[", "0", "]", "+", "'/account'", "req", "=", "self", ".", "request", "(", "account_uri", ")", "account_info", "=", "req", ".", "g...
General user info information as returned in /account in our API
[ "General", "user", "info", "information", "as", "returned", "in", "/", "account", "in", "our", "API" ]
train
https://github.com/mistio/mist.client/blob/bc190af2cba358fa556a69b205c12a77a34eb2a8/src/mistclient/__init__.py#L86-L94
mistio/mist.client
src/mistclient/__init__.py
MistClient.supported_providers
def supported_providers(self): """ Request a list of all available providers :returns: A list of all available providers (e.g. {'provider': 'ec2_ap_northeast', 'title': 'EC2 AP NORTHEAST'}) """ req = self.request(self.uri + '/providers', api_version=2) providers = req.get().json() supported_providers = providers['supported_providers'] return supported_providers
python
def supported_providers(self): """ Request a list of all available providers :returns: A list of all available providers (e.g. {'provider': 'ec2_ap_northeast', 'title': 'EC2 AP NORTHEAST'}) """ req = self.request(self.uri + '/providers', api_version=2) providers = req.get().json() supported_providers = providers['supported_providers'] return supported_providers
[ "def", "supported_providers", "(", "self", ")", ":", "req", "=", "self", ".", "request", "(", "self", ".", "uri", "+", "'/providers'", ",", "api_version", "=", "2", ")", "providers", "=", "req", ".", "get", "(", ")", ".", "json", "(", ")", "supported...
Request a list of all available providers :returns: A list of all available providers (e.g. {'provider': 'ec2_ap_northeast', 'title': 'EC2 AP NORTHEAST'})
[ "Request", "a", "list", "of", "all", "available", "providers" ]
train
https://github.com/mistio/mist.client/blob/bc190af2cba358fa556a69b205c12a77a34eb2a8/src/mistclient/__init__.py#L107-L117
mistio/mist.client
src/mistclient/__init__.py
MistClient._list_clouds
def _list_clouds(self): """ Request a list of all added clouds. Populates self._clouds dict with mist.client.model.Cloud instances """ req = self.request(self.uri + '/clouds') clouds = req.get().json() if clouds: for cloud in clouds: self._clouds[cloud['id']] = Cloud(cloud, self) else: self._clouds = {}
python
def _list_clouds(self): """ Request a list of all added clouds. Populates self._clouds dict with mist.client.model.Cloud instances """ req = self.request(self.uri + '/clouds') clouds = req.get().json() if clouds: for cloud in clouds: self._clouds[cloud['id']] = Cloud(cloud, self) else: self._clouds = {}
[ "def", "_list_clouds", "(", "self", ")", ":", "req", "=", "self", ".", "request", "(", "self", ".", "uri", "+", "'/clouds'", ")", "clouds", "=", "req", ".", "get", "(", ")", ".", "json", "(", ")", "if", "clouds", ":", "for", "cloud", "in", "cloud...
Request a list of all added clouds. Populates self._clouds dict with mist.client.model.Cloud instances
[ "Request", "a", "list", "of", "all", "added", "clouds", "." ]
train
https://github.com/mistio/mist.client/blob/bc190af2cba358fa556a69b205c12a77a34eb2a8/src/mistclient/__init__.py#L119-L131
mistio/mist.client
src/mistclient/__init__.py
MistClient.clouds
def clouds(self, id=None, name=None, provider=None, search=None): """ Property-like function to call the _list_clouds function in order to populate self._clouds dict :returns: A list of Cloud instances. """ if self._clouds is None: self._clouds = {} self._list_clouds() if id: return [self._clouds[cloud_id] for cloud_id in self._clouds.keys() if id == self._clouds[cloud_id].id] elif name: return [self._clouds[cloud_id] for cloud_id in self._clouds.keys() if name == self._clouds[cloud_id].title] elif provider: return [self._clouds[cloud_id] for cloud_id in self._clouds.keys() if provider == self._clouds[cloud_id].provider] elif search: return [self._clouds[cloud_id] for cloud_id in self._clouds.keys() if search in self._clouds[cloud_id].title or search in self._clouds[cloud_id].id or search in self._clouds[cloud_id].provider] else: return [self._clouds[cloud_id] for cloud_id in self._clouds.keys()]
python
def clouds(self, id=None, name=None, provider=None, search=None): """ Property-like function to call the _list_clouds function in order to populate self._clouds dict :returns: A list of Cloud instances. """ if self._clouds is None: self._clouds = {} self._list_clouds() if id: return [self._clouds[cloud_id] for cloud_id in self._clouds.keys() if id == self._clouds[cloud_id].id] elif name: return [self._clouds[cloud_id] for cloud_id in self._clouds.keys() if name == self._clouds[cloud_id].title] elif provider: return [self._clouds[cloud_id] for cloud_id in self._clouds.keys() if provider == self._clouds[cloud_id].provider] elif search: return [self._clouds[cloud_id] for cloud_id in self._clouds.keys() if search in self._clouds[cloud_id].title or search in self._clouds[cloud_id].id or search in self._clouds[cloud_id].provider] else: return [self._clouds[cloud_id] for cloud_id in self._clouds.keys()]
[ "def", "clouds", "(", "self", ",", "id", "=", "None", ",", "name", "=", "None", ",", "provider", "=", "None", ",", "search", "=", "None", ")", ":", "if", "self", ".", "_clouds", "is", "None", ":", "self", ".", "_clouds", "=", "{", "}", "self", ...
Property-like function to call the _list_clouds function in order to populate self._clouds dict :returns: A list of Cloud instances.
[ "Property", "-", "like", "function", "to", "call", "the", "_list_clouds", "function", "in", "order", "to", "populate", "self", ".", "_clouds", "dict" ]
train
https://github.com/mistio/mist.client/blob/bc190af2cba358fa556a69b205c12a77a34eb2a8/src/mistclient/__init__.py#L145-L171
mistio/mist.client
src/mistclient/__init__.py
MistClient._list_keys
def _list_keys(self): """ Retrieves a list of all added Keys and populates the self._keys dict with Key instances :returns: A list of Keys instances """ req = self.request(self.uri + '/keys') keys = req.get().json() if keys: self._keys = {} for key in keys: self._keys[key['id']] = Key(key, self) else: self._keys = {}
python
def _list_keys(self): """ Retrieves a list of all added Keys and populates the self._keys dict with Key instances :returns: A list of Keys instances """ req = self.request(self.uri + '/keys') keys = req.get().json() if keys: self._keys = {} for key in keys: self._keys[key['id']] = Key(key, self) else: self._keys = {}
[ "def", "_list_keys", "(", "self", ")", ":", "req", "=", "self", ".", "request", "(", "self", ".", "uri", "+", "'/keys'", ")", "keys", "=", "req", ".", "get", "(", ")", ".", "json", "(", ")", "if", "keys", ":", "self", ".", "_keys", "=", "{", ...
Retrieves a list of all added Keys and populates the self._keys dict with Key instances :returns: A list of Keys instances
[ "Retrieves", "a", "list", "of", "all", "added", "Keys", "and", "populates", "the", "self", ".", "_keys", "dict", "with", "Key", "instances" ]
train
https://github.com/mistio/mist.client/blob/bc190af2cba358fa556a69b205c12a77a34eb2a8/src/mistclient/__init__.py#L378-L392
mistio/mist.client
src/mistclient/__init__.py
MistClient.keys
def keys(self, id=None, search=None): """ Property-like function to call the _list_keys function in order to populate self._keys dict :returns: A list of Key instances """ if self._keys is None: self._keys = {} self._list_keys() if id: return [self._keys[key_id] for key_id in self._keys.keys() if id == self._keys[key_id].id] elif search: return [self._keys[key_id] for key_id in self._keys.keys() if (search in self._keys[key_id].id) or (search in self._keys[key_id].name)] else: return [self._keys[key_id] for key_id in self._keys.keys()]
python
def keys(self, id=None, search=None): """ Property-like function to call the _list_keys function in order to populate self._keys dict :returns: A list of Key instances """ if self._keys is None: self._keys = {} self._list_keys() if id: return [self._keys[key_id] for key_id in self._keys.keys() if id == self._keys[key_id].id] elif search: return [self._keys[key_id] for key_id in self._keys.keys() if (search in self._keys[key_id].id) or (search in self._keys[key_id].name)] else: return [self._keys[key_id] for key_id in self._keys.keys()]
[ "def", "keys", "(", "self", ",", "id", "=", "None", ",", "search", "=", "None", ")", ":", "if", "self", ".", "_keys", "is", "None", ":", "self", ".", "_keys", "=", "{", "}", "self", ".", "_list_keys", "(", ")", "if", "id", ":", "return", "[", ...
Property-like function to call the _list_keys function in order to populate self._keys dict :returns: A list of Key instances
[ "Property", "-", "like", "function", "to", "call", "the", "_list_keys", "function", "in", "order", "to", "populate", "self", ".", "_keys", "dict" ]
train
https://github.com/mistio/mist.client/blob/bc190af2cba358fa556a69b205c12a77a34eb2a8/src/mistclient/__init__.py#L394-L412
mistio/mist.client
src/mistclient/__init__.py
MistClient.generate_key
def generate_key(self): """ Ask mist.io to randomly generate a private ssh-key to be used with the creation of a new Key :returns: A string of a randomly generated ssh private key """ req = self.request(self.uri + "/keys") private_key = req.post().json() return private_key['priv']
python
def generate_key(self): """ Ask mist.io to randomly generate a private ssh-key to be used with the creation of a new Key :returns: A string of a randomly generated ssh private key """ req = self.request(self.uri + "/keys") private_key = req.post().json() return private_key['priv']
[ "def", "generate_key", "(", "self", ")", ":", "req", "=", "self", ".", "request", "(", "self", ".", "uri", "+", "\"/keys\"", ")", "private_key", "=", "req", ".", "post", "(", ")", ".", "json", "(", ")", "return", "private_key", "[", "'priv'", "]" ]
Ask mist.io to randomly generate a private ssh-key to be used with the creation of a new Key :returns: A string of a randomly generated ssh private key
[ "Ask", "mist", ".", "io", "to", "randomly", "generate", "a", "private", "ssh", "-", "key", "to", "be", "used", "with", "the", "creation", "of", "a", "new", "Key" ]
train
https://github.com/mistio/mist.client/blob/bc190af2cba358fa556a69b205c12a77a34eb2a8/src/mistclient/__init__.py#L430-L439
mistio/mist.client
src/mistclient/__init__.py
MistClient.add_key
def add_key(self, key_name, private): """ Add a new key to mist.io :param key_name: Name of the new key (it will be used as the key's id as well). :param private: Private ssh-key in string format (see also generate_key() ). :returns: An updated list of added keys. """ payload = { 'name': key_name, 'priv': private } data = json.dumps(payload) req = self.request(self.uri + '/keys', data=data) response = req.put().json() self.update_keys() return response
python
def add_key(self, key_name, private): """ Add a new key to mist.io :param key_name: Name of the new key (it will be used as the key's id as well). :param private: Private ssh-key in string format (see also generate_key() ). :returns: An updated list of added keys. """ payload = { 'name': key_name, 'priv': private } data = json.dumps(payload) req = self.request(self.uri + '/keys', data=data) response = req.put().json() self.update_keys() return response
[ "def", "add_key", "(", "self", ",", "key_name", ",", "private", ")", ":", "payload", "=", "{", "'name'", ":", "key_name", ",", "'priv'", ":", "private", "}", "data", "=", "json", ".", "dumps", "(", "payload", ")", "req", "=", "self", ".", "request", ...
Add a new key to mist.io :param key_name: Name of the new key (it will be used as the key's id as well). :param private: Private ssh-key in string format (see also generate_key() ). :returns: An updated list of added keys.
[ "Add", "a", "new", "key", "to", "mist", ".", "io" ]
train
https://github.com/mistio/mist.client/blob/bc190af2cba358fa556a69b205c12a77a34eb2a8/src/mistclient/__init__.py#L441-L461
darkfeline/animanager
animanager/commands/addrule.py
command
def command(state, args): """Add a priority rule for files.""" args = parser.parse_args(args[1:]) row_id = query.files.add_priority_rule(state.db, args.regexp, args.priority) del state.file_picker print('Added rule {}'.format(row_id))
python
def command(state, args): """Add a priority rule for files.""" args = parser.parse_args(args[1:]) row_id = query.files.add_priority_rule(state.db, args.regexp, args.priority) del state.file_picker print('Added rule {}'.format(row_id))
[ "def", "command", "(", "state", ",", "args", ")", ":", "args", "=", "parser", ".", "parse_args", "(", "args", "[", "1", ":", "]", ")", "row_id", "=", "query", ".", "files", ".", "add_priority_rule", "(", "state", ".", "db", ",", "args", ".", "regex...
Add a priority rule for files.
[ "Add", "a", "priority", "rule", "for", "files", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/commands/addrule.py#L22-L27
darkfeline/animanager
animanager/anidb.py
request_anime
def request_anime(aid: int) -> 'Anime': """Make an anime API request.""" anime_info = alib.request_anime(_CLIENT, aid) return Anime._make(anime_info)
python
def request_anime(aid: int) -> 'Anime': """Make an anime API request.""" anime_info = alib.request_anime(_CLIENT, aid) return Anime._make(anime_info)
[ "def", "request_anime", "(", "aid", ":", "int", ")", "->", "'Anime'", ":", "anime_info", "=", "alib", ".", "request_anime", "(", "_CLIENT", ",", "aid", ")", "return", "Anime", ".", "_make", "(", "anime_info", ")" ]
Make an anime API request.
[ "Make", "an", "anime", "API", "request", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/anidb.py#L35-L38
darkfeline/animanager
animanager/anidb.py
Episode.number
def number(self) -> int: """Episode number. Unique for an anime and episode type, but not unique across episode types for the same anime. """ match = self._NUMBER_SUFFIX.search(self.epno) return int(match.group(1))
python
def number(self) -> int: """Episode number. Unique for an anime and episode type, but not unique across episode types for the same anime. """ match = self._NUMBER_SUFFIX.search(self.epno) return int(match.group(1))
[ "def", "number", "(", "self", ")", "->", "int", ":", "match", "=", "self", ".", "_NUMBER_SUFFIX", ".", "search", "(", "self", ".", "epno", ")", "return", "int", "(", "match", ".", "group", "(", "1", ")", ")" ]
Episode number. Unique for an anime and episode type, but not unique across episode types for the same anime.
[ "Episode", "number", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/anidb.py#L58-L65
darkfeline/animanager
animanager/anidb.py
Episode.title
def title(self) -> str: """Episode title.""" for title in self.titles: if title.lang == 'ja': return title.title # In case there's no Japanese title. return self.titles[0].title
python
def title(self) -> str: """Episode title.""" for title in self.titles: if title.lang == 'ja': return title.title # In case there's no Japanese title. return self.titles[0].title
[ "def", "title", "(", "self", ")", "->", "str", ":", "for", "title", "in", "self", ".", "titles", ":", "if", "title", ".", "lang", "==", "'ja'", ":", "return", "title", ".", "title", "# In case there's no Japanese title.", "return", "self", ".", "titles", ...
Episode title.
[ "Episode", "title", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/anidb.py#L68-L74
darkfeline/animanager
animanager/anidb.py
TitleSearcher.search
def search(self, query: 're.Pattern') -> 'Iterable[_WorkTitles]': """Search titles using a compiled RE query.""" titles: 'Titles' for titles in self._titles_list: title: 'AnimeTitle' for title in titles.titles: if query.search(title.title): yield WorkTitles( aid=titles.aid, main_title=_get_main_title(titles.titles), titles=[t.title for t in titles.titles], ) continue
python
def search(self, query: 're.Pattern') -> 'Iterable[_WorkTitles]': """Search titles using a compiled RE query.""" titles: 'Titles' for titles in self._titles_list: title: 'AnimeTitle' for title in titles.titles: if query.search(title.title): yield WorkTitles( aid=titles.aid, main_title=_get_main_title(titles.titles), titles=[t.title for t in titles.titles], ) continue
[ "def", "search", "(", "self", ",", "query", ":", "'re.Pattern'", ")", "->", "'Iterable[_WorkTitles]'", ":", "titles", ":", "'Titles'", "for", "titles", "in", "self", ".", "_titles_list", ":", "title", ":", "'AnimeTitle'", "for", "title", "in", "titles", ".",...
Search titles using a compiled RE query.
[ "Search", "titles", "using", "a", "compiled", "RE", "query", "." ]
train
https://github.com/darkfeline/animanager/blob/55d92e4cbdc12aac8ebe302420d2cff3fa9fa148/animanager/anidb.py#L93-L105
elifesciences/proofreader-python
proofreader/license_checker/__init__.py
_get_packages
def _get_packages(): # type: () -> List[Package] """Convert `pkg_resources.working_set` into a list of `Package` objects. :return: list """ return [Package(pkg_obj=pkg) for pkg in sorted(pkg_resources.working_set, key=lambda x: str(x).lower())]
python
def _get_packages(): # type: () -> List[Package] """Convert `pkg_resources.working_set` into a list of `Package` objects. :return: list """ return [Package(pkg_obj=pkg) for pkg in sorted(pkg_resources.working_set, key=lambda x: str(x).lower())]
[ "def", "_get_packages", "(", ")", ":", "# type: () -> List[Package]", "return", "[", "Package", "(", "pkg_obj", "=", "pkg", ")", "for", "pkg", "in", "sorted", "(", "pkg_resources", ".", "working_set", ",", "key", "=", "lambda", "x", ":", "str", "(", "x", ...
Convert `pkg_resources.working_set` into a list of `Package` objects. :return: list
[ "Convert", "pkg_resources", ".", "working_set", "into", "a", "list", "of", "Package", "objects", "." ]
train
https://github.com/elifesciences/proofreader-python/blob/387b3c65ee7777e26b3a7340179dc4ed68f24f58/proofreader/license_checker/__init__.py#L12-L19
elifesciences/proofreader-python
proofreader/license_checker/__init__.py
_get_whitelist_licenses
def _get_whitelist_licenses(config_path): # type: (str) -> List[str] """Get whitelist license names from config file. :param config_path: str :return: list """ whitelist_licenses = [] try: print('config path', config_path) with open(config_path) as config: whitelist_licenses = [line.rstrip() for line in config] except IOError: # pragma: no cover print('Warning: No {} file was found.'.format(LICENSE_CHECKER_CONFIG_NAME)) return whitelist_licenses
python
def _get_whitelist_licenses(config_path): # type: (str) -> List[str] """Get whitelist license names from config file. :param config_path: str :return: list """ whitelist_licenses = [] try: print('config path', config_path) with open(config_path) as config: whitelist_licenses = [line.rstrip() for line in config] except IOError: # pragma: no cover print('Warning: No {} file was found.'.format(LICENSE_CHECKER_CONFIG_NAME)) return whitelist_licenses
[ "def", "_get_whitelist_licenses", "(", "config_path", ")", ":", "# type: (str) -> List[str]", "whitelist_licenses", "=", "[", "]", "try", ":", "print", "(", "'config path'", ",", "config_path", ")", "with", "open", "(", "config_path", ")", "as", "config", ":", "...
Get whitelist license names from config file. :param config_path: str :return: list
[ "Get", "whitelist", "license", "names", "from", "config", "file", "." ]
train
https://github.com/elifesciences/proofreader-python/blob/387b3c65ee7777e26b3a7340179dc4ed68f24f58/proofreader/license_checker/__init__.py#L22-L39