repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
genialis/resolwe
resolwe/flow/managers/listener.py
ExecutorListener.terminate
def terminate(self): """Stop the standalone manager.""" logger.info(__( "Terminating Resolwe listener on channel '{}'.", state.MANAGER_EXECUTOR_CHANNELS.queue )) self._should_stop = True
python
def terminate(self): """Stop the standalone manager.""" logger.info(__( "Terminating Resolwe listener on channel '{}'.", state.MANAGER_EXECUTOR_CHANNELS.queue )) self._should_stop = True
[ "def", "terminate", "(", "self", ")", ":", "logger", ".", "info", "(", "__", "(", "\"Terminating Resolwe listener on channel '{}'.\"", ",", "state", ".", "MANAGER_EXECUTOR_CHANNELS", ".", "queue", ")", ")", "self", ".", "_should_stop", "=", "True" ]
Stop the standalone manager.
[ "Stop", "the", "standalone", "manager", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/listener.py#L145-L151
train
45,200
genialis/resolwe
resolwe/flow/managers/listener.py
ExecutorListener._queue_response_channel
def _queue_response_channel(self, obj): """Generate the feedback channel name from the object's id. :param obj: The Channels message object. """ return '{}.{}'.format(state.MANAGER_EXECUTOR_CHANNELS.queue_response, obj[ExecutorProtocol.DATA_ID])
python
def _queue_response_channel(self, obj): """Generate the feedback channel name from the object's id. :param obj: The Channels message object. """ return '{}.{}'.format(state.MANAGER_EXECUTOR_CHANNELS.queue_response, obj[ExecutorProtocol.DATA_ID])
[ "def", "_queue_response_channel", "(", "self", ",", "obj", ")", ":", "return", "'{}.{}'", ".", "format", "(", "state", ".", "MANAGER_EXECUTOR_CHANNELS", ".", "queue_response", ",", "obj", "[", "ExecutorProtocol", ".", "DATA_ID", "]", ")" ]
Generate the feedback channel name from the object's id. :param obj: The Channels message object.
[ "Generate", "the", "feedback", "channel", "name", "from", "the", "object", "s", "id", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/listener.py#L153-L158
train
45,201
genialis/resolwe
resolwe/flow/managers/listener.py
ExecutorListener._send_reply
async def _send_reply(self, obj, reply): """Send a reply with added standard fields back to executor. :param obj: The original Channels message object to which we're replying. :param reply: The message contents dictionary. The data id is added automatically (``reply`` is modified in place). """ reply.update({ ExecutorProtocol.DATA_ID: obj[ExecutorProtocol.DATA_ID], }) await self._call_redis(aioredis.Redis.rpush, self._queue_response_channel(obj), json.dumps(reply))
python
async def _send_reply(self, obj, reply): """Send a reply with added standard fields back to executor. :param obj: The original Channels message object to which we're replying. :param reply: The message contents dictionary. The data id is added automatically (``reply`` is modified in place). """ reply.update({ ExecutorProtocol.DATA_ID: obj[ExecutorProtocol.DATA_ID], }) await self._call_redis(aioredis.Redis.rpush, self._queue_response_channel(obj), json.dumps(reply))
[ "async", "def", "_send_reply", "(", "self", ",", "obj", ",", "reply", ")", ":", "reply", ".", "update", "(", "{", "ExecutorProtocol", ".", "DATA_ID", ":", "obj", "[", "ExecutorProtocol", ".", "DATA_ID", "]", ",", "}", ")", "await", "self", ".", "_call_...
Send a reply with added standard fields back to executor. :param obj: The original Channels message object to which we're replying. :param reply: The message contents dictionary. The data id is added automatically (``reply`` is modified in place).
[ "Send", "a", "reply", "with", "added", "standard", "fields", "back", "to", "executor", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/listener.py#L165-L176
train
45,202
genialis/resolwe
resolwe/flow/managers/listener.py
ExecutorListener.hydrate_spawned_files
def hydrate_spawned_files(self, exported_files_mapper, filename, data_id): """Pop the given file's map from the exported files mapping. :param exported_files_mapper: The dict of file mappings this process produced. :param filename: The filename to format and remove from the mapping. :param data_id: The id of the :meth:`~resolwe.flow.models.Data` object owning the mapping. :return: The formatted mapping between the filename and temporary file path. :rtype: dict """ # JSON only has string dictionary keys, so the Data object id # needs to be stringified first. data_id = str(data_id) if filename not in exported_files_mapper[data_id]: raise KeyError("Use 're-export' to prepare the file for spawned process: {}".format(filename)) export_fn = exported_files_mapper[data_id].pop(filename) if exported_files_mapper[data_id] == {}: exported_files_mapper.pop(data_id) return {'file_temp': export_fn, 'file': filename}
python
def hydrate_spawned_files(self, exported_files_mapper, filename, data_id): """Pop the given file's map from the exported files mapping. :param exported_files_mapper: The dict of file mappings this process produced. :param filename: The filename to format and remove from the mapping. :param data_id: The id of the :meth:`~resolwe.flow.models.Data` object owning the mapping. :return: The formatted mapping between the filename and temporary file path. :rtype: dict """ # JSON only has string dictionary keys, so the Data object id # needs to be stringified first. data_id = str(data_id) if filename not in exported_files_mapper[data_id]: raise KeyError("Use 're-export' to prepare the file for spawned process: {}".format(filename)) export_fn = exported_files_mapper[data_id].pop(filename) if exported_files_mapper[data_id] == {}: exported_files_mapper.pop(data_id) return {'file_temp': export_fn, 'file': filename}
[ "def", "hydrate_spawned_files", "(", "self", ",", "exported_files_mapper", ",", "filename", ",", "data_id", ")", ":", "# JSON only has string dictionary keys, so the Data object id", "# needs to be stringified first.", "data_id", "=", "str", "(", "data_id", ")", "if", "file...
Pop the given file's map from the exported files mapping. :param exported_files_mapper: The dict of file mappings this process produced. :param filename: The filename to format and remove from the mapping. :param data_id: The id of the :meth:`~resolwe.flow.models.Data` object owning the mapping. :return: The formatted mapping between the filename and temporary file path. :rtype: dict
[ "Pop", "the", "given", "file", "s", "map", "from", "the", "exported", "files", "mapping", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/listener.py#L178-L203
train
45,203
genialis/resolwe
resolwe/flow/managers/listener.py
ExecutorListener.handle_abort
def handle_abort(self, obj): """Handle an incoming ``Data`` abort processing request. .. IMPORTANT:: This only makes manager's state consistent and doesn't affect Data object in any way. Any changes to the Data must be applied over ``handle_update`` method. :param obj: The Channels message object. Command object format: .. code-block:: none { 'command': 'abort', 'data_id': [id of the :class:`~resolwe.flow.models.Data` object this command was triggered by], } """ async_to_sync(consumer.send_event)({ WorkerProtocol.COMMAND: WorkerProtocol.ABORT, WorkerProtocol.DATA_ID: obj[ExecutorProtocol.DATA_ID], WorkerProtocol.FINISH_COMMUNICATE_EXTRA: { 'executor': getattr(settings, 'FLOW_EXECUTOR', {}).get('NAME', 'resolwe.flow.executors.local'), }, })
python
def handle_abort(self, obj): """Handle an incoming ``Data`` abort processing request. .. IMPORTANT:: This only makes manager's state consistent and doesn't affect Data object in any way. Any changes to the Data must be applied over ``handle_update`` method. :param obj: The Channels message object. Command object format: .. code-block:: none { 'command': 'abort', 'data_id': [id of the :class:`~resolwe.flow.models.Data` object this command was triggered by], } """ async_to_sync(consumer.send_event)({ WorkerProtocol.COMMAND: WorkerProtocol.ABORT, WorkerProtocol.DATA_ID: obj[ExecutorProtocol.DATA_ID], WorkerProtocol.FINISH_COMMUNICATE_EXTRA: { 'executor': getattr(settings, 'FLOW_EXECUTOR', {}).get('NAME', 'resolwe.flow.executors.local'), }, })
[ "def", "handle_abort", "(", "self", ",", "obj", ")", ":", "async_to_sync", "(", "consumer", ".", "send_event", ")", "(", "{", "WorkerProtocol", ".", "COMMAND", ":", "WorkerProtocol", ".", "ABORT", ",", "WorkerProtocol", ".", "DATA_ID", ":", "obj", "[", "Ex...
Handle an incoming ``Data`` abort processing request. .. IMPORTANT:: This only makes manager's state consistent and doesn't affect Data object in any way. Any changes to the Data must be applied over ``handle_update`` method. :param obj: The Channels message object. Command object format: .. code-block:: none { 'command': 'abort', 'data_id': [id of the :class:`~resolwe.flow.models.Data` object this command was triggered by], }
[ "Handle", "an", "incoming", "Data", "abort", "processing", "request", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/listener.py#L513-L538
train
45,204
genialis/resolwe
resolwe/flow/managers/listener.py
ExecutorListener.handle_log
def handle_log(self, obj): """Handle an incoming log processing request. :param obj: The Channels message object. Command object format: .. code-block:: none { 'command': 'log', 'message': [log message] } """ record_dict = json.loads(obj[ExecutorProtocol.LOG_MESSAGE]) record_dict['msg'] = record_dict['msg'] executors_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'executors') record_dict['pathname'] = os.path.join(executors_dir, record_dict['pathname']) logger.handle(logging.makeLogRecord(record_dict))
python
def handle_log(self, obj): """Handle an incoming log processing request. :param obj: The Channels message object. Command object format: .. code-block:: none { 'command': 'log', 'message': [log message] } """ record_dict = json.loads(obj[ExecutorProtocol.LOG_MESSAGE]) record_dict['msg'] = record_dict['msg'] executors_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'executors') record_dict['pathname'] = os.path.join(executors_dir, record_dict['pathname']) logger.handle(logging.makeLogRecord(record_dict))
[ "def", "handle_log", "(", "self", ",", "obj", ")", ":", "record_dict", "=", "json", ".", "loads", "(", "obj", "[", "ExecutorProtocol", ".", "LOG_MESSAGE", "]", ")", "record_dict", "[", "'msg'", "]", "=", "record_dict", "[", "'msg'", "]", "executors_dir", ...
Handle an incoming log processing request. :param obj: The Channels message object. Command object format: .. code-block:: none { 'command': 'log', 'message': [log message] }
[ "Handle", "an", "incoming", "log", "processing", "request", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/listener.py#L540-L558
train
45,205
genialis/resolwe
resolwe/flow/managers/listener.py
ExecutorListener.push_stats
async def push_stats(self): """Push current stats to Redis.""" snapshot = self._make_stats() try: serialized = json.dumps(snapshot) await self._call_redis(aioredis.Redis.set, state.MANAGER_LISTENER_STATS, serialized) await self._call_redis(aioredis.Redis.expire, state.MANAGER_LISTENER_STATS, 3600) except TypeError: logger.error(__( "Listener can't serialize statistics:\n\n{}", traceback.format_exc() )) except aioredis.RedisError: logger.error(__( "Listener can't store updated statistics:\n\n{}", traceback.format_exc() ))
python
async def push_stats(self): """Push current stats to Redis.""" snapshot = self._make_stats() try: serialized = json.dumps(snapshot) await self._call_redis(aioredis.Redis.set, state.MANAGER_LISTENER_STATS, serialized) await self._call_redis(aioredis.Redis.expire, state.MANAGER_LISTENER_STATS, 3600) except TypeError: logger.error(__( "Listener can't serialize statistics:\n\n{}", traceback.format_exc() )) except aioredis.RedisError: logger.error(__( "Listener can't store updated statistics:\n\n{}", traceback.format_exc() ))
[ "async", "def", "push_stats", "(", "self", ")", ":", "snapshot", "=", "self", ".", "_make_stats", "(", ")", "try", ":", "serialized", "=", "json", ".", "dumps", "(", "snapshot", ")", "await", "self", ".", "_call_redis", "(", "aioredis", ".", "Redis", "...
Push current stats to Redis.
[ "Push", "current", "stats", "to", "Redis", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/listener.py#L567-L583
train
45,206
genialis/resolwe
resolwe/flow/managers/listener.py
ExecutorListener.check_critical_load
def check_critical_load(self): """Check for critical load and log an error if necessary.""" if self.load_avg.intervals['1m'].value > 1: if self.last_load_level == 1 and time.time() - self.last_load_log < 30: return self.last_load_log = time.time() self.last_load_level = 1 logger.error( "Listener load limit exceeded, the system can't handle this!", extra=self._make_stats() ) elif self.load_avg.intervals['1m'].value > 0.8: if self.last_load_level == 0.8 and time.time() - self.last_load_log < 30: return self.last_load_log = time.time() self.last_load_level = 0.8 logger.warning( "Listener load approaching critical!", extra=self._make_stats() ) else: self.last_load_log = -math.inf self.last_load_level = 0
python
def check_critical_load(self): """Check for critical load and log an error if necessary.""" if self.load_avg.intervals['1m'].value > 1: if self.last_load_level == 1 and time.time() - self.last_load_log < 30: return self.last_load_log = time.time() self.last_load_level = 1 logger.error( "Listener load limit exceeded, the system can't handle this!", extra=self._make_stats() ) elif self.load_avg.intervals['1m'].value > 0.8: if self.last_load_level == 0.8 and time.time() - self.last_load_log < 30: return self.last_load_log = time.time() self.last_load_level = 0.8 logger.warning( "Listener load approaching critical!", extra=self._make_stats() ) else: self.last_load_log = -math.inf self.last_load_level = 0
[ "def", "check_critical_load", "(", "self", ")", ":", "if", "self", ".", "load_avg", ".", "intervals", "[", "'1m'", "]", ".", "value", ">", "1", ":", "if", "self", ".", "last_load_level", "==", "1", "and", "time", ".", "time", "(", ")", "-", "self", ...
Check for critical load and log an error if necessary.
[ "Check", "for", "critical", "load", "and", "log", "an", "error", "if", "necessary", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/listener.py#L585-L609
train
45,207
genialis/resolwe
resolwe/flow/managers/listener.py
ExecutorListener.run
async def run(self): """Run the main listener run loop. Doesn't return until :meth:`terminate` is called. """ logger.info(__( "Starting Resolwe listener on channel '{}'.", state.MANAGER_EXECUTOR_CHANNELS.queue )) while not self._should_stop: await self.push_stats() ret = await self._call_redis(aioredis.Redis.blpop, state.MANAGER_EXECUTOR_CHANNELS.queue, timeout=1) if ret is None: self.load_avg.add(0) continue remaining = await self._call_redis(aioredis.Redis.llen, state.MANAGER_EXECUTOR_CHANNELS.queue) self.load_avg.add(remaining + 1) self.check_critical_load() _, item = ret try: item = item.decode('utf-8') logger.debug(__("Got command from executor: {}", item)) obj = json.loads(item) except json.JSONDecodeError: logger.error( __("Undecodable command packet:\n\n{}"), traceback.format_exc() ) continue command = obj.get(ExecutorProtocol.COMMAND, None) if command is None: continue service_start = time.perf_counter() handler = getattr(self, 'handle_' + command, None) if handler: try: with PrioritizedBatcher.global_instance(): await database_sync_to_async(handler)(obj) except Exception: # pylint: disable=broad-except logger.error(__( "Executor command handling error:\n\n{}", traceback.format_exc() )) else: logger.error( __("Unknown executor command '{}'.", command), extra={'decoded_packet': obj} ) # We do want to measure wall-clock time elapsed, because # system load will impact event handling performance. On # a lagging system, good internal performance is meaningless. service_end = time.perf_counter() self.service_time.update(service_end - service_start) logger.info(__( "Stopping Resolwe listener on channel '{}'.", state.MANAGER_EXECUTOR_CHANNELS.queue ))
python
async def run(self): """Run the main listener run loop. Doesn't return until :meth:`terminate` is called. """ logger.info(__( "Starting Resolwe listener on channel '{}'.", state.MANAGER_EXECUTOR_CHANNELS.queue )) while not self._should_stop: await self.push_stats() ret = await self._call_redis(aioredis.Redis.blpop, state.MANAGER_EXECUTOR_CHANNELS.queue, timeout=1) if ret is None: self.load_avg.add(0) continue remaining = await self._call_redis(aioredis.Redis.llen, state.MANAGER_EXECUTOR_CHANNELS.queue) self.load_avg.add(remaining + 1) self.check_critical_load() _, item = ret try: item = item.decode('utf-8') logger.debug(__("Got command from executor: {}", item)) obj = json.loads(item) except json.JSONDecodeError: logger.error( __("Undecodable command packet:\n\n{}"), traceback.format_exc() ) continue command = obj.get(ExecutorProtocol.COMMAND, None) if command is None: continue service_start = time.perf_counter() handler = getattr(self, 'handle_' + command, None) if handler: try: with PrioritizedBatcher.global_instance(): await database_sync_to_async(handler)(obj) except Exception: # pylint: disable=broad-except logger.error(__( "Executor command handling error:\n\n{}", traceback.format_exc() )) else: logger.error( __("Unknown executor command '{}'.", command), extra={'decoded_packet': obj} ) # We do want to measure wall-clock time elapsed, because # system load will impact event handling performance. On # a lagging system, good internal performance is meaningless. service_end = time.perf_counter() self.service_time.update(service_end - service_start) logger.info(__( "Stopping Resolwe listener on channel '{}'.", state.MANAGER_EXECUTOR_CHANNELS.queue ))
[ "async", "def", "run", "(", "self", ")", ":", "logger", ".", "info", "(", "__", "(", "\"Starting Resolwe listener on channel '{}'.\"", ",", "state", ".", "MANAGER_EXECUTOR_CHANNELS", ".", "queue", ")", ")", "while", "not", "self", ".", "_should_stop", ":", "aw...
Run the main listener run loop. Doesn't return until :meth:`terminate` is called.
[ "Run", "the", "main", "listener", "run", "loop", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/listener.py#L611-L671
train
45,208
DataONEorg/d1_python
client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py
Dokan.dokanMain
def dokanMain(self, dokanOptions, dokanOperations): """Issue callback to start dokan drive. :param DokanOptions: drive options :type DokanOptions: DOKAN_OPTIONS :param DokanOperations: pointers implemented file system calls :type DokanOperations: DokanOperations :return: error code :rtype: int """ return int( self.dokanDLL.DokanMain( PDOKAN_OPTIONS(dokanOptions), PDOKAN_OPERATIONS(dokanOperations) ) )
python
def dokanMain(self, dokanOptions, dokanOperations): """Issue callback to start dokan drive. :param DokanOptions: drive options :type DokanOptions: DOKAN_OPTIONS :param DokanOperations: pointers implemented file system calls :type DokanOperations: DokanOperations :return: error code :rtype: int """ return int( self.dokanDLL.DokanMain( PDOKAN_OPTIONS(dokanOptions), PDOKAN_OPERATIONS(dokanOperations) ) )
[ "def", "dokanMain", "(", "self", ",", "dokanOptions", ",", "dokanOperations", ")", ":", "return", "int", "(", "self", ".", "dokanDLL", ".", "DokanMain", "(", "PDOKAN_OPTIONS", "(", "dokanOptions", ")", ",", "PDOKAN_OPERATIONS", "(", "dokanOperations", ")", ")"...
Issue callback to start dokan drive. :param DokanOptions: drive options :type DokanOptions: DOKAN_OPTIONS :param DokanOperations: pointers implemented file system calls :type DokanOperations: DokanOperations :return: error code :rtype: int
[ "Issue", "callback", "to", "start", "dokan", "drive", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py#L86-L101
train
45,209
DataONEorg/d1_python
client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py
Dokan.findFilesWithPattern
def findFilesWithPattern( self, fileName, searchPattern, fillFindData, dokanFileInfo ): """Find files in a certain path that match the search pattern. :param fileName: path to search :type fileName: ctypes.c_wchar_p :param searchPattern: pattern to search for :type searchPattern: ctypes.c_wchar_p :param fillFindData: function pointer for populating search results :type fillFindData: PFillFindData :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ try: ret = self.operations('findFilesWithPattern', fileName, searchPattern) if ret is None: return d1_onedrive.impl.drivers.dokan.const.DOKAN_ERROR for r in ret: create_ft = self.python_timestamp_to_win32_filetime(r['ctime']) last_access_ft = self.python_timestamp_to_win32_filetime(r['atime']) last_write_ft = self.python_timestamp_to_win32_filetime(r['wtime']) cft = ctypes.wintypes.FILETIME(create_ft[0], create_ft[1]) laft = ctypes.wintypes.FILETIME(last_access_ft[0], last_access_ft[1]) lwft = ctypes.wintypes.FILETIME(last_write_ft[0], last_write_ft[1]) size = self.pyint_to_double_dwords(r['size']) File = ctypes.wintypes.WIN32_FIND_DATAW( ctypes.c_ulong(r['attr']), # attributes cft, # creation time laft, # last access time lwft, # last write time size[1], # upper bits of size size[0], # lower bits of size ctypes.c_ulong(0), # reserved for FS ctypes.c_ulong(0), # reserved for FS r['name'], # file name '', ) # alternate name pFile = ctypes.wintypes.PWIN32_FIND_DATAW(File) fillFindData(pFile, dokanFileInfo) return d1_onedrive.impl.drivers.dokan.const.DOKAN_SUCCESS except Exception as e: logging.error('%s', e) return d1_onedrive.impl.drivers.dokan.const.DOKAN_ERROR
python
def findFilesWithPattern( self, fileName, searchPattern, fillFindData, dokanFileInfo ): """Find files in a certain path that match the search pattern. :param fileName: path to search :type fileName: ctypes.c_wchar_p :param searchPattern: pattern to search for :type searchPattern: ctypes.c_wchar_p :param fillFindData: function pointer for populating search results :type fillFindData: PFillFindData :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ try: ret = self.operations('findFilesWithPattern', fileName, searchPattern) if ret is None: return d1_onedrive.impl.drivers.dokan.const.DOKAN_ERROR for r in ret: create_ft = self.python_timestamp_to_win32_filetime(r['ctime']) last_access_ft = self.python_timestamp_to_win32_filetime(r['atime']) last_write_ft = self.python_timestamp_to_win32_filetime(r['wtime']) cft = ctypes.wintypes.FILETIME(create_ft[0], create_ft[1]) laft = ctypes.wintypes.FILETIME(last_access_ft[0], last_access_ft[1]) lwft = ctypes.wintypes.FILETIME(last_write_ft[0], last_write_ft[1]) size = self.pyint_to_double_dwords(r['size']) File = ctypes.wintypes.WIN32_FIND_DATAW( ctypes.c_ulong(r['attr']), # attributes cft, # creation time laft, # last access time lwft, # last write time size[1], # upper bits of size size[0], # lower bits of size ctypes.c_ulong(0), # reserved for FS ctypes.c_ulong(0), # reserved for FS r['name'], # file name '', ) # alternate name pFile = ctypes.wintypes.PWIN32_FIND_DATAW(File) fillFindData(pFile, dokanFileInfo) return d1_onedrive.impl.drivers.dokan.const.DOKAN_SUCCESS except Exception as e: logging.error('%s', e) return d1_onedrive.impl.drivers.dokan.const.DOKAN_ERROR
[ "def", "findFilesWithPattern", "(", "self", ",", "fileName", ",", "searchPattern", ",", "fillFindData", ",", "dokanFileInfo", ")", ":", "try", ":", "ret", "=", "self", ".", "operations", "(", "'findFilesWithPattern'", ",", "fileName", ",", "searchPattern", ")", ...
Find files in a certain path that match the search pattern. :param fileName: path to search :type fileName: ctypes.c_wchar_p :param searchPattern: pattern to search for :type searchPattern: ctypes.c_wchar_p :param fillFindData: function pointer for populating search results :type fillFindData: PFillFindData :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
[ "Find", "files", "in", "a", "certain", "path", "that", "match", "the", "search", "pattern", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py#L367-L413
train
45,210
DataONEorg/d1_python
client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py
Dokan.setFileTime
def setFileTime( self, fileName, creationTime, lastAccessTime, lastWriteTime, dokanFileInfo ): """Set time values for a file. :param fileName: name of file to set time values for :type fileName: ctypes.c_wchar_p :param creationTime: creation time of file :type creationTime: ctypes.POINTER(ctypes.wintypes.FILETIME) :param lastAccessTime: last access time of file :type lastAccessTime: ctypes.POINTER(ctypes.wintypes.FILETIME) :param lastWriteTime: last write time of file :type lastWriteTime: ctypes.POINTER(ctypes.wintypes.FILETIME) :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations('setFileTime', fileName)
python
def setFileTime( self, fileName, creationTime, lastAccessTime, lastWriteTime, dokanFileInfo ): """Set time values for a file. :param fileName: name of file to set time values for :type fileName: ctypes.c_wchar_p :param creationTime: creation time of file :type creationTime: ctypes.POINTER(ctypes.wintypes.FILETIME) :param lastAccessTime: last access time of file :type lastAccessTime: ctypes.POINTER(ctypes.wintypes.FILETIME) :param lastWriteTime: last write time of file :type lastWriteTime: ctypes.POINTER(ctypes.wintypes.FILETIME) :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations('setFileTime', fileName)
[ "def", "setFileTime", "(", "self", ",", "fileName", ",", "creationTime", ",", "lastAccessTime", ",", "lastWriteTime", ",", "dokanFileInfo", ")", ":", "return", "self", ".", "operations", "(", "'setFileTime'", ",", "fileName", ")" ]
Set time values for a file. :param fileName: name of file to set time values for :type fileName: ctypes.c_wchar_p :param creationTime: creation time of file :type creationTime: ctypes.POINTER(ctypes.wintypes.FILETIME) :param lastAccessTime: last access time of file :type lastAccessTime: ctypes.POINTER(ctypes.wintypes.FILETIME) :param lastWriteTime: last write time of file :type lastWriteTime: ctypes.POINTER(ctypes.wintypes.FILETIME) :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
[ "Set", "time", "values", "for", "a", "file", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py#L430-L449
train
45,211
DataONEorg/d1_python
client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py
Dokan.lockFile
def lockFile(self, fileName, byteOffset, length, dokanFileInfo): """Lock a file. :param fileName: name of file to lock :type fileName: ctypes.c_wchar_p :param byteOffset: location to start lock :type byteOffset: ctypes.c_longlong :param length: number of bytes to lock :type length: ctypes.c_longlong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations('lockFile', fileName, byteOffset, length)
python
def lockFile(self, fileName, byteOffset, length, dokanFileInfo): """Lock a file. :param fileName: name of file to lock :type fileName: ctypes.c_wchar_p :param byteOffset: location to start lock :type byteOffset: ctypes.c_longlong :param length: number of bytes to lock :type length: ctypes.c_longlong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations('lockFile', fileName, byteOffset, length)
[ "def", "lockFile", "(", "self", ",", "fileName", ",", "byteOffset", ",", "length", ",", "dokanFileInfo", ")", ":", "return", "self", ".", "operations", "(", "'lockFile'", ",", "fileName", ",", "byteOffset", ",", "length", ")" ]
Lock a file. :param fileName: name of file to lock :type fileName: ctypes.c_wchar_p :param byteOffset: location to start lock :type byteOffset: ctypes.c_longlong :param length: number of bytes to lock :type length: ctypes.c_longlong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
[ "Lock", "a", "file", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py#L524-L539
train
45,212
DataONEorg/d1_python
client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py
Dokan.unlockFile
def unlockFile(self, fileName, byteOffset, length, dokanFileInfo): """Unlock a file. :param fileName: name of file to unlock :type fileName: ctypes.c_wchar_p :param byteOffset: location to start unlock :type byteOffset: ctypes.c_longlong :param length: number of bytes to unlock :type length: ctypes.c_longlong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations('unlockFile', fileName, byteOffset, length)
python
def unlockFile(self, fileName, byteOffset, length, dokanFileInfo): """Unlock a file. :param fileName: name of file to unlock :type fileName: ctypes.c_wchar_p :param byteOffset: location to start unlock :type byteOffset: ctypes.c_longlong :param length: number of bytes to unlock :type length: ctypes.c_longlong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations('unlockFile', fileName, byteOffset, length)
[ "def", "unlockFile", "(", "self", ",", "fileName", ",", "byteOffset", ",", "length", ",", "dokanFileInfo", ")", ":", "return", "self", ".", "operations", "(", "'unlockFile'", ",", "fileName", ",", "byteOffset", ",", "length", ")" ]
Unlock a file. :param fileName: name of file to unlock :type fileName: ctypes.c_wchar_p :param byteOffset: location to start unlock :type byteOffset: ctypes.c_longlong :param length: number of bytes to unlock :type length: ctypes.c_longlong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
[ "Unlock", "a", "file", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py#L541-L556
train
45,213
DataONEorg/d1_python
client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py
Dokan.getDiskFreeSpace
def getDiskFreeSpace( self, freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes, dokanFileInfo, ): """Get the amount of free space on this volume. :param freeBytesAvailable: pointer for free bytes available :type freeBytesAvailable: ctypes.c_void_p :param totalNumberOfBytes: pointer for total number of bytes :type totalNumberOfBytes: ctypes.c_void_p :param totalNumberOfFreeBytes: pointer for total number of free bytes :type totalNumberOfFreeBytes: ctypes.c_void_p :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ ret = self.operations('getDiskFreeSpace') ctypes.memmove( freeBytesAvailable, ctypes.byref(ctypes.c_longlong(ret['freeBytesAvailable'])), ctypes.sizeof(ctypes.c_longlong), ) ctypes.memmove( totalNumberOfBytes, ctypes.byref(ctypes.c_longlong(ret['totalNumberOfBytes'])), ctypes.sizeof(ctypes.c_longlong), ) ctypes.memmove( totalNumberOfFreeBytes, ctypes.byref(ctypes.c_longlong(ret['totalNumberOfFreeBytes'])), ctypes.sizeof(ctypes.c_longlong), ) return d1_onedrive.impl.drivers.dokan.const.DOKAN_SUCCESS
python
def getDiskFreeSpace( self, freeBytesAvailable, totalNumberOfBytes, totalNumberOfFreeBytes, dokanFileInfo, ): """Get the amount of free space on this volume. :param freeBytesAvailable: pointer for free bytes available :type freeBytesAvailable: ctypes.c_void_p :param totalNumberOfBytes: pointer for total number of bytes :type totalNumberOfBytes: ctypes.c_void_p :param totalNumberOfFreeBytes: pointer for total number of free bytes :type totalNumberOfFreeBytes: ctypes.c_void_p :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ ret = self.operations('getDiskFreeSpace') ctypes.memmove( freeBytesAvailable, ctypes.byref(ctypes.c_longlong(ret['freeBytesAvailable'])), ctypes.sizeof(ctypes.c_longlong), ) ctypes.memmove( totalNumberOfBytes, ctypes.byref(ctypes.c_longlong(ret['totalNumberOfBytes'])), ctypes.sizeof(ctypes.c_longlong), ) ctypes.memmove( totalNumberOfFreeBytes, ctypes.byref(ctypes.c_longlong(ret['totalNumberOfFreeBytes'])), ctypes.sizeof(ctypes.c_longlong), ) return d1_onedrive.impl.drivers.dokan.const.DOKAN_SUCCESS
[ "def", "getDiskFreeSpace", "(", "self", ",", "freeBytesAvailable", ",", "totalNumberOfBytes", ",", "totalNumberOfFreeBytes", ",", "dokanFileInfo", ",", ")", ":", "ret", "=", "self", ".", "operations", "(", "'getDiskFreeSpace'", ")", "ctypes", ".", "memmove", "(", ...
Get the amount of free space on this volume. :param freeBytesAvailable: pointer for free bytes available :type freeBytesAvailable: ctypes.c_void_p :param totalNumberOfBytes: pointer for total number of bytes :type totalNumberOfBytes: ctypes.c_void_p :param totalNumberOfFreeBytes: pointer for total number of free bytes :type totalNumberOfFreeBytes: ctypes.c_void_p :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
[ "Get", "the", "amount", "of", "free", "space", "on", "this", "volume", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py#L558-L595
train
45,214
DataONEorg/d1_python
client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py
Dokan.getVolumeInformation
def getVolumeInformation( self, volumeNameBuffer, volumeNameSize, volumeSerialNumber, maximumComponentLength, fileSystemFlags, fileSystemNameBuffer, fileSystemNameSize, dokanFileInfo, ): """Get information about the volume. :param volumeNameBuffer: buffer for volume name :type volumeNameBuffer: ctypes.c_void_p :param volumeNameSize: volume name buffer size :type volumeNameSize: ctypes.c_ulong :param volumeSerialNumber: buffer for volume serial number :type volumeSerialNumber: ctypes.c_void_p :param maximumComponentLength: buffer for maximum component length :type maximumComponentLength: ctypes.c_void_p :param fileSystemFlags: buffer for file system flags :type fileSystemFlags: ctypes.c_void_p :param fileSystemNameBuffer: buffer for file system name :type fileSystemNameBuffer: ctypes.c_void_p :param fileSystemNameSize: file system name buffer size :type fileSystemNameSize: ctypes.c_ulong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ ret = self.operations('getVolumeInformation') # populate volume name buffer ctypes.memmove( volumeNameBuffer, ret['volumeNameBuffer'], min( ctypes.sizeof(ctypes.c_wchar) * len(ret['volumeNameBuffer']), volumeNameSize, ), ) # populate serial number buffer serialNum = ctypes.c_ulong(self.serialNumber) ctypes.memmove( volumeSerialNumber, ctypes.byref(serialNum), ctypes.sizeof(ctypes.c_ulong) ) # populate max component length maxCompLen = ctypes.c_ulong(ret['maximumComponentLength']) ctypes.memmove( maximumComponentLength, ctypes.byref(maxCompLen), ctypes.sizeof(ctypes.c_ulong), ) # populate filesystem flags buffer fsFlags = ctypes.c_ulong(ret['fileSystemFlags']) ctypes.memmove( fileSystemFlags, ctypes.byref(fsFlags), ctypes.sizeof(ctypes.c_ulong) ) # populate filesystem name ctypes.memmove( fileSystemNameBuffer, ret['fileSystemNameBuffer'], min( ctypes.sizeof(ctypes.c_wchar) * len(ret['fileSystemNameBuffer']), fileSystemNameSize, ), ) return d1_onedrive.impl.drivers.dokan.const.DOKAN_SUCCESS
python
def getVolumeInformation( self, volumeNameBuffer, volumeNameSize, volumeSerialNumber, maximumComponentLength, fileSystemFlags, fileSystemNameBuffer, fileSystemNameSize, dokanFileInfo, ): """Get information about the volume. :param volumeNameBuffer: buffer for volume name :type volumeNameBuffer: ctypes.c_void_p :param volumeNameSize: volume name buffer size :type volumeNameSize: ctypes.c_ulong :param volumeSerialNumber: buffer for volume serial number :type volumeSerialNumber: ctypes.c_void_p :param maximumComponentLength: buffer for maximum component length :type maximumComponentLength: ctypes.c_void_p :param fileSystemFlags: buffer for file system flags :type fileSystemFlags: ctypes.c_void_p :param fileSystemNameBuffer: buffer for file system name :type fileSystemNameBuffer: ctypes.c_void_p :param fileSystemNameSize: file system name buffer size :type fileSystemNameSize: ctypes.c_ulong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ ret = self.operations('getVolumeInformation') # populate volume name buffer ctypes.memmove( volumeNameBuffer, ret['volumeNameBuffer'], min( ctypes.sizeof(ctypes.c_wchar) * len(ret['volumeNameBuffer']), volumeNameSize, ), ) # populate serial number buffer serialNum = ctypes.c_ulong(self.serialNumber) ctypes.memmove( volumeSerialNumber, ctypes.byref(serialNum), ctypes.sizeof(ctypes.c_ulong) ) # populate max component length maxCompLen = ctypes.c_ulong(ret['maximumComponentLength']) ctypes.memmove( maximumComponentLength, ctypes.byref(maxCompLen), ctypes.sizeof(ctypes.c_ulong), ) # populate filesystem flags buffer fsFlags = ctypes.c_ulong(ret['fileSystemFlags']) ctypes.memmove( fileSystemFlags, ctypes.byref(fsFlags), ctypes.sizeof(ctypes.c_ulong) ) # populate filesystem name ctypes.memmove( fileSystemNameBuffer, ret['fileSystemNameBuffer'], min( ctypes.sizeof(ctypes.c_wchar) * len(ret['fileSystemNameBuffer']), fileSystemNameSize, ), ) return d1_onedrive.impl.drivers.dokan.const.DOKAN_SUCCESS
[ "def", "getVolumeInformation", "(", "self", ",", "volumeNameBuffer", ",", "volumeNameSize", ",", "volumeSerialNumber", ",", "maximumComponentLength", ",", "fileSystemFlags", ",", "fileSystemNameBuffer", ",", "fileSystemNameSize", ",", "dokanFileInfo", ",", ")", ":", "re...
Get information about the volume. :param volumeNameBuffer: buffer for volume name :type volumeNameBuffer: ctypes.c_void_p :param volumeNameSize: volume name buffer size :type volumeNameSize: ctypes.c_ulong :param volumeSerialNumber: buffer for volume serial number :type volumeSerialNumber: ctypes.c_void_p :param maximumComponentLength: buffer for maximum component length :type maximumComponentLength: ctypes.c_void_p :param fileSystemFlags: buffer for file system flags :type fileSystemFlags: ctypes.c_void_p :param fileSystemNameBuffer: buffer for file system name :type fileSystemNameBuffer: ctypes.c_void_p :param fileSystemNameSize: file system name buffer size :type fileSystemNameSize: ctypes.c_ulong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
[ "Get", "information", "about", "the", "volume", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py#L597-L666
train
45,215
DataONEorg/d1_python
client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py
Dokan.getFileSecurity
def getFileSecurity( self, fileName, securityInformation, securityDescriptor, lengthSecurityDescriptorBuffer, lengthNeeded, dokanFileInfo, ): """Get security attributes of a file. :param fileName: name of file to get security for :type fileName: ctypes.c_wchar_p :param securityInformation: buffer for security information :type securityInformation: PSECURITY_INFORMATION :param securityDescriptor: buffer for security descriptor :type securityDescriptor: PSECURITY_DESCRIPTOR :param lengthSecurityDescriptorBuffer: length of descriptor buffer :type lengthSecurityDescriptorBuffer: ctypes.c_ulong :param lengthNeeded: length needed for the buffer :type lengthNeeded: ctypes.POINTER(ctypes.c_ulong) :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations('getFileSecurity', fileName)
python
def getFileSecurity( self, fileName, securityInformation, securityDescriptor, lengthSecurityDescriptorBuffer, lengthNeeded, dokanFileInfo, ): """Get security attributes of a file. :param fileName: name of file to get security for :type fileName: ctypes.c_wchar_p :param securityInformation: buffer for security information :type securityInformation: PSECURITY_INFORMATION :param securityDescriptor: buffer for security descriptor :type securityDescriptor: PSECURITY_DESCRIPTOR :param lengthSecurityDescriptorBuffer: length of descriptor buffer :type lengthSecurityDescriptorBuffer: ctypes.c_ulong :param lengthNeeded: length needed for the buffer :type lengthNeeded: ctypes.POINTER(ctypes.c_ulong) :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations('getFileSecurity', fileName)
[ "def", "getFileSecurity", "(", "self", ",", "fileName", ",", "securityInformation", ",", "securityDescriptor", ",", "lengthSecurityDescriptorBuffer", ",", "lengthNeeded", ",", "dokanFileInfo", ",", ")", ":", "return", "self", ".", "operations", "(", "'getFileSecurity'...
Get security attributes of a file. :param fileName: name of file to get security for :type fileName: ctypes.c_wchar_p :param securityInformation: buffer for security information :type securityInformation: PSECURITY_INFORMATION :param securityDescriptor: buffer for security descriptor :type securityDescriptor: PSECURITY_DESCRIPTOR :param lengthSecurityDescriptorBuffer: length of descriptor buffer :type lengthSecurityDescriptorBuffer: ctypes.c_ulong :param lengthNeeded: length needed for the buffer :type lengthNeeded: ctypes.POINTER(ctypes.c_ulong) :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
[ "Get", "security", "attributes", "of", "a", "file", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py#L679-L706
train
45,216
DataONEorg/d1_python
client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py
Dokan.setFileSecurity
def setFileSecurity( self, fileName, securityInformation, securityDescriptor, lengthSecurityDescriptorBuffer, dokanFileInfo, ): """Set security attributes of a file. :param fileName: name of file to set security for :type fileName: ctypes.c_wchar_p :param securityInformation: new security information :type securityInformation: PSECURITY_INFORMATION :param securityDescriptor: newsecurity descriptor :type securityDescriptor: PSECURITY_DESCRIPTOR :param lengthSecurityDescriptorBuffer: length of descriptor buffer :type lengthSecurityDescriptorBuffer: ctypes.c_ulong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations('setFileSecurity', fileName)
python
def setFileSecurity( self, fileName, securityInformation, securityDescriptor, lengthSecurityDescriptorBuffer, dokanFileInfo, ): """Set security attributes of a file. :param fileName: name of file to set security for :type fileName: ctypes.c_wchar_p :param securityInformation: new security information :type securityInformation: PSECURITY_INFORMATION :param securityDescriptor: newsecurity descriptor :type securityDescriptor: PSECURITY_DESCRIPTOR :param lengthSecurityDescriptorBuffer: length of descriptor buffer :type lengthSecurityDescriptorBuffer: ctypes.c_ulong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int """ return self.operations('setFileSecurity', fileName)
[ "def", "setFileSecurity", "(", "self", ",", "fileName", ",", "securityInformation", ",", "securityDescriptor", ",", "lengthSecurityDescriptorBuffer", ",", "dokanFileInfo", ",", ")", ":", "return", "self", ".", "operations", "(", "'setFileSecurity'", ",", "fileName", ...
Set security attributes of a file. :param fileName: name of file to set security for :type fileName: ctypes.c_wchar_p :param securityInformation: new security information :type securityInformation: PSECURITY_INFORMATION :param securityDescriptor: newsecurity descriptor :type securityDescriptor: PSECURITY_DESCRIPTOR :param lengthSecurityDescriptorBuffer: length of descriptor buffer :type lengthSecurityDescriptorBuffer: ctypes.c_ulong :param dokanFileInfo: used by Dokan :type dokanFileInfo: PDOKAN_FILE_INFO :return: error code :rtype: ctypes.c_int
[ "Set", "security", "attributes", "of", "a", "file", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/drivers/dokan/dokan.py#L708-L732
train
45,217
DataONEorg/d1_python
utilities/src/d1_util/generate_data_package_from_stream.py
createSimpleResourceMap
def createSimpleResourceMap(ore_pid, sci_meta_pid, data_pids): """Create a simple resource map with one metadata document and n data objects.""" ore = ResourceMap() ore.initialize(ore_pid) ore.addMetadataDocument(sci_meta_pid) ore.addDataDocuments(data_pids, sci_meta_pid) return ore
python
def createSimpleResourceMap(ore_pid, sci_meta_pid, data_pids): """Create a simple resource map with one metadata document and n data objects.""" ore = ResourceMap() ore.initialize(ore_pid) ore.addMetadataDocument(sci_meta_pid) ore.addDataDocuments(data_pids, sci_meta_pid) return ore
[ "def", "createSimpleResourceMap", "(", "ore_pid", ",", "sci_meta_pid", ",", "data_pids", ")", ":", "ore", "=", "ResourceMap", "(", ")", "ore", ".", "initialize", "(", "ore_pid", ")", "ore", ".", "addMetadataDocument", "(", "sci_meta_pid", ")", "ore", ".", "a...
Create a simple resource map with one metadata document and n data objects.
[ "Create", "a", "simple", "resource", "map", "with", "one", "metadata", "document", "and", "n", "data", "objects", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/utilities/src/d1_util/generate_data_package_from_stream.py#L64-L70
train
45,218
DataONEorg/d1_python
utilities/src/d1_util/generate_data_package_from_stream.py
pids2ore
def pids2ore(in_stream, fmt='xml', base_url='https://cn.dataone.org/cn'): """read pids from in_stream and generate a resource map. first pid is the ore_pid second is the sci meta pid remainder are data pids """ pids = [] for line in in_stream: pid = line.strip() if len(pid) > 0: if not pid.startswith("# "): pids.append(pid) if (len(pids)) < 2: raise ValueError("Insufficient identifiers provided.") logging.info("Read %d identifiers", len(pids)) ore = ResourceMap(base_url=base_url) logging.info("ORE PID = %s", pids[0]) ore.initialize(pids[0]) logging.info("Metadata PID = %s", pids[1]) ore.addMetadataDocument(pids[1]) ore.addDataDocuments(pids[2:], pids[1]) return ore.serialize_to_display(doc_format=fmt)
python
def pids2ore(in_stream, fmt='xml', base_url='https://cn.dataone.org/cn'): """read pids from in_stream and generate a resource map. first pid is the ore_pid second is the sci meta pid remainder are data pids """ pids = [] for line in in_stream: pid = line.strip() if len(pid) > 0: if not pid.startswith("# "): pids.append(pid) if (len(pids)) < 2: raise ValueError("Insufficient identifiers provided.") logging.info("Read %d identifiers", len(pids)) ore = ResourceMap(base_url=base_url) logging.info("ORE PID = %s", pids[0]) ore.initialize(pids[0]) logging.info("Metadata PID = %s", pids[1]) ore.addMetadataDocument(pids[1]) ore.addDataDocuments(pids[2:], pids[1]) return ore.serialize_to_display(doc_format=fmt)
[ "def", "pids2ore", "(", "in_stream", ",", "fmt", "=", "'xml'", ",", "base_url", "=", "'https://cn.dataone.org/cn'", ")", ":", "pids", "=", "[", "]", "for", "line", "in", "in_stream", ":", "pid", "=", "line", ".", "strip", "(", ")", "if", "len", "(", ...
read pids from in_stream and generate a resource map. first pid is the ore_pid second is the sci meta pid remainder are data pids
[ "read", "pids", "from", "in_stream", "and", "generate", "a", "resource", "map", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/utilities/src/d1_util/generate_data_package_from_stream.py#L73-L99
train
45,219
genialis/resolwe
resolwe/flow/managers/workload_connectors/celery.py
Connector.submit
def submit(self, data, runtime_dir, argv): """Run process. For details, see :meth:`~resolwe.flow.managers.workload_connectors.base.BaseConnector.submit`. """ queue = 'ordinary' if data.process.scheduling_class == Process.SCHEDULING_CLASS_INTERACTIVE: queue = 'hipri' logger.debug(__( "Connector '{}' running for Data with id {} ({}) in celery queue {}, EAGER is {}.", self.__class__.__module__, data.id, repr(argv), queue, getattr(settings, 'CELERY_ALWAYS_EAGER', None) )) celery_run.apply_async((data.id, runtime_dir, argv), queue=queue)
python
def submit(self, data, runtime_dir, argv): """Run process. For details, see :meth:`~resolwe.flow.managers.workload_connectors.base.BaseConnector.submit`. """ queue = 'ordinary' if data.process.scheduling_class == Process.SCHEDULING_CLASS_INTERACTIVE: queue = 'hipri' logger.debug(__( "Connector '{}' running for Data with id {} ({}) in celery queue {}, EAGER is {}.", self.__class__.__module__, data.id, repr(argv), queue, getattr(settings, 'CELERY_ALWAYS_EAGER', None) )) celery_run.apply_async((data.id, runtime_dir, argv), queue=queue)
[ "def", "submit", "(", "self", ",", "data", ",", "runtime_dir", ",", "argv", ")", ":", "queue", "=", "'ordinary'", "if", "data", ".", "process", ".", "scheduling_class", "==", "Process", ".", "SCHEDULING_CLASS_INTERACTIVE", ":", "queue", "=", "'hipri'", "logg...
Run process. For details, see :meth:`~resolwe.flow.managers.workload_connectors.base.BaseConnector.submit`.
[ "Run", "process", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/workload_connectors/celery.py#L35-L53
train
45,220
DataONEorg/d1_python
client_onedrive/src/d1_onedrive/impl/object_tree.py
ObjectTree.refresh
def refresh(self): """Synchronize the local tree of Solr records for DataONE identifiers and queries with the reference tree.""" if self._source_tree.cache_is_stale(): self._source_tree.refresh() logging.info('Refreshing object tree') self._init_cache() self.sync_cache_with_source_tree()
python
def refresh(self): """Synchronize the local tree of Solr records for DataONE identifiers and queries with the reference tree.""" if self._source_tree.cache_is_stale(): self._source_tree.refresh() logging.info('Refreshing object tree') self._init_cache() self.sync_cache_with_source_tree()
[ "def", "refresh", "(", "self", ")", ":", "if", "self", ".", "_source_tree", ".", "cache_is_stale", "(", ")", ":", "self", ".", "_source_tree", ".", "refresh", "(", ")", "logging", ".", "info", "(", "'Refreshing object tree'", ")", "self", ".", "_init_cache...
Synchronize the local tree of Solr records for DataONE identifiers and queries with the reference tree.
[ "Synchronize", "the", "local", "tree", "of", "Solr", "records", "for", "DataONE", "identifiers", "and", "queries", "with", "the", "reference", "tree", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/object_tree.py#L56-L63
train
45,221
DataONEorg/d1_python
client_onedrive/src/d1_onedrive/impl/object_tree.py
ObjectTree.get_object_record
def get_object_record(self, pid): """Get an object that has already been cached in the object tree. Caching happens when the object tree is refreshed. """ try: return self._cache['records'][pid] except KeyError: raise d1_onedrive.impl.onedrive_exceptions.ONEDriveException('Unknown PID')
python
def get_object_record(self, pid): """Get an object that has already been cached in the object tree. Caching happens when the object tree is refreshed. """ try: return self._cache['records'][pid] except KeyError: raise d1_onedrive.impl.onedrive_exceptions.ONEDriveException('Unknown PID')
[ "def", "get_object_record", "(", "self", ",", "pid", ")", ":", "try", ":", "return", "self", ".", "_cache", "[", "'records'", "]", "[", "pid", "]", "except", "KeyError", ":", "raise", "d1_onedrive", ".", "impl", ".", "onedrive_exceptions", ".", "ONEDriveEx...
Get an object that has already been cached in the object tree. Caching happens when the object tree is refreshed.
[ "Get", "an", "object", "that", "has", "already", "been", "cached", "in", "the", "object", "tree", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/object_tree.py#L72-L81
train
45,222
DataONEorg/d1_python
client_onedrive/src/d1_onedrive/impl/object_tree.py
ObjectTree.get_object_record_with_sync
def get_object_record_with_sync(self, pid): """Get an object that may not currently be in the cache. If the object is not in the cache, an attempt is made to retrieve the record from a CN on the fly. If the object is found, it is cached before being returned to the user. This allows the object tree caching system to be used for objects that are not in the object tree. ONEDrive uses this functionality for the FlatSpace folder. """ try: return self._cache['records'][pid] except KeyError: return self._get_uncached_object_record(pid)
python
def get_object_record_with_sync(self, pid): """Get an object that may not currently be in the cache. If the object is not in the cache, an attempt is made to retrieve the record from a CN on the fly. If the object is found, it is cached before being returned to the user. This allows the object tree caching system to be used for objects that are not in the object tree. ONEDrive uses this functionality for the FlatSpace folder. """ try: return self._cache['records'][pid] except KeyError: return self._get_uncached_object_record(pid)
[ "def", "get_object_record_with_sync", "(", "self", ",", "pid", ")", ":", "try", ":", "return", "self", ".", "_cache", "[", "'records'", "]", "[", "pid", "]", "except", "KeyError", ":", "return", "self", ".", "_get_uncached_object_record", "(", "pid", ")" ]
Get an object that may not currently be in the cache. If the object is not in the cache, an attempt is made to retrieve the record from a CN on the fly. If the object is found, it is cached before being returned to the user. This allows the object tree caching system to be used for objects that are not in the object tree. ONEDrive uses this functionality for the FlatSpace folder.
[ "Get", "an", "object", "that", "may", "not", "currently", "be", "in", "the", "cache", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/object_tree.py#L83-L96
train
45,223
genialis/resolwe
resolwe/flow/managers/dispatcher.py
dependency_status
def dependency_status(data): """Return abstracted status of dependencies. - ``STATUS_ERROR`` .. one dependency has error status or was deleted - ``STATUS_DONE`` .. all dependencies have done status - ``None`` .. other """ parents_statuses = set( DataDependency.objects.filter( child=data, kind=DataDependency.KIND_IO ).distinct('parent__status').values_list('parent__status', flat=True) ) if not parents_statuses: return Data.STATUS_DONE if None in parents_statuses: # Some parents have been deleted. return Data.STATUS_ERROR if Data.STATUS_ERROR in parents_statuses: return Data.STATUS_ERROR if len(parents_statuses) == 1 and Data.STATUS_DONE in parents_statuses: return Data.STATUS_DONE return None
python
def dependency_status(data): """Return abstracted status of dependencies. - ``STATUS_ERROR`` .. one dependency has error status or was deleted - ``STATUS_DONE`` .. all dependencies have done status - ``None`` .. other """ parents_statuses = set( DataDependency.objects.filter( child=data, kind=DataDependency.KIND_IO ).distinct('parent__status').values_list('parent__status', flat=True) ) if not parents_statuses: return Data.STATUS_DONE if None in parents_statuses: # Some parents have been deleted. return Data.STATUS_ERROR if Data.STATUS_ERROR in parents_statuses: return Data.STATUS_ERROR if len(parents_statuses) == 1 and Data.STATUS_DONE in parents_statuses: return Data.STATUS_DONE return None
[ "def", "dependency_status", "(", "data", ")", ":", "parents_statuses", "=", "set", "(", "DataDependency", ".", "objects", ".", "filter", "(", "child", "=", "data", ",", "kind", "=", "DataDependency", ".", "KIND_IO", ")", ".", "distinct", "(", "'parent__statu...
Return abstracted status of dependencies. - ``STATUS_ERROR`` .. one dependency has error status or was deleted - ``STATUS_DONE`` .. all dependencies have done status - ``None`` .. other
[ "Return", "abstracted", "status", "of", "dependencies", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L60-L87
train
45,224
genialis/resolwe
resolwe/flow/managers/dispatcher.py
Manager.discover_engines
def discover_engines(self, executor=None): """Discover configured engines. :param executor: Optional executor module override """ if executor is None: executor = getattr(settings, 'FLOW_EXECUTOR', {}).get('NAME', 'resolwe.flow.executors.local') self.executor = self.load_executor(executor) logger.info( __("Loaded '{}' executor.", str(self.executor.__class__.__module__).replace('.prepare', '')) ) expression_engines = getattr(settings, 'FLOW_EXPRESSION_ENGINES', ['resolwe.flow.expression_engines.jinja']) self.expression_engines = self.load_expression_engines(expression_engines) logger.info(__( "Found {} expression engines: {}", len(self.expression_engines), ', '.join(self.expression_engines.keys()) )) execution_engines = getattr(settings, 'FLOW_EXECUTION_ENGINES', ['resolwe.flow.execution_engines.bash']) self.execution_engines = self.load_execution_engines(execution_engines) logger.info(__( "Found {} execution engines: {}", len(self.execution_engines), ', '.join(self.execution_engines.keys()) ))
python
def discover_engines(self, executor=None): """Discover configured engines. :param executor: Optional executor module override """ if executor is None: executor = getattr(settings, 'FLOW_EXECUTOR', {}).get('NAME', 'resolwe.flow.executors.local') self.executor = self.load_executor(executor) logger.info( __("Loaded '{}' executor.", str(self.executor.__class__.__module__).replace('.prepare', '')) ) expression_engines = getattr(settings, 'FLOW_EXPRESSION_ENGINES', ['resolwe.flow.expression_engines.jinja']) self.expression_engines = self.load_expression_engines(expression_engines) logger.info(__( "Found {} expression engines: {}", len(self.expression_engines), ', '.join(self.expression_engines.keys()) )) execution_engines = getattr(settings, 'FLOW_EXECUTION_ENGINES', ['resolwe.flow.execution_engines.bash']) self.execution_engines = self.load_execution_engines(execution_engines) logger.info(__( "Found {} execution engines: {}", len(self.execution_engines), ', '.join(self.execution_engines.keys()) ))
[ "def", "discover_engines", "(", "self", ",", "executor", "=", "None", ")", ":", "if", "executor", "is", "None", ":", "executor", "=", "getattr", "(", "settings", ",", "'FLOW_EXECUTOR'", ",", "{", "}", ")", ".", "get", "(", "'NAME'", ",", "'resolwe.flow.e...
Discover configured engines. :param executor: Optional executor module override
[ "Discover", "configured", "engines", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L230-L252
train
45,225
genialis/resolwe
resolwe/flow/managers/dispatcher.py
Manager.reset
def reset(self, keep_state=False): """Reset the shared state and drain Django Channels. :param keep_state: If ``True``, do not reset the shared manager state (useful in tests, where the settings overrides need to be kept). Defaults to ``False``. """ if not keep_state: self.state = state.ManagerState(state.MANAGER_STATE_PREFIX) self.state.reset() async_to_sync(consumer.run_consumer)(timeout=1) async_to_sync(self.sync_counter.reset)()
python
def reset(self, keep_state=False): """Reset the shared state and drain Django Channels. :param keep_state: If ``True``, do not reset the shared manager state (useful in tests, where the settings overrides need to be kept). Defaults to ``False``. """ if not keep_state: self.state = state.ManagerState(state.MANAGER_STATE_PREFIX) self.state.reset() async_to_sync(consumer.run_consumer)(timeout=1) async_to_sync(self.sync_counter.reset)()
[ "def", "reset", "(", "self", ",", "keep_state", "=", "False", ")", ":", "if", "not", "keep_state", ":", "self", ".", "state", "=", "state", ".", "ManagerState", "(", "state", ".", "MANAGER_STATE_PREFIX", ")", "self", ".", "state", ".", "reset", "(", ")...
Reset the shared state and drain Django Channels. :param keep_state: If ``True``, do not reset the shared manager state (useful in tests, where the settings overrides need to be kept). Defaults to ``False``.
[ "Reset", "the", "shared", "state", "and", "drain", "Django", "Channels", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L254-L265
train
45,226
genialis/resolwe
resolwe/flow/managers/dispatcher.py
Manager._marshal_settings
def _marshal_settings(self): """Marshal Django settings into a serializable object. :return: The serialized settings. :rtype: dict """ result = {} for key in dir(settings): if any(map(key.startswith, ['FLOW_', 'RESOLWE_', 'CELERY_'])): result[key] = getattr(settings, key) return result
python
def _marshal_settings(self): """Marshal Django settings into a serializable object. :return: The serialized settings. :rtype: dict """ result = {} for key in dir(settings): if any(map(key.startswith, ['FLOW_', 'RESOLWE_', 'CELERY_'])): result[key] = getattr(settings, key) return result
[ "def", "_marshal_settings", "(", "self", ")", ":", "result", "=", "{", "}", "for", "key", "in", "dir", "(", "settings", ")", ":", "if", "any", "(", "map", "(", "key", ".", "startswith", ",", "[", "'FLOW_'", ",", "'RESOLWE_'", ",", "'CELERY_'", "]", ...
Marshal Django settings into a serializable object. :return: The serialized settings. :rtype: dict
[ "Marshal", "Django", "settings", "into", "a", "serializable", "object", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L343-L353
train
45,227
genialis/resolwe
resolwe/flow/managers/dispatcher.py
Manager._include_environment_variables
def _include_environment_variables(self, program, executor_vars): """Define environment variables.""" env_vars = { 'RESOLWE_HOST_URL': self.settings_actual.get('RESOLWE_HOST_URL', 'localhost'), } set_env = self.settings_actual.get('FLOW_EXECUTOR', {}).get('SET_ENV', {}) env_vars.update(executor_vars) env_vars.update(set_env) export_commands = ['export {}={}'.format(key, shlex.quote(value)) for key, value in env_vars.items()] return os.linesep.join(export_commands) + os.linesep + program
python
def _include_environment_variables(self, program, executor_vars): """Define environment variables.""" env_vars = { 'RESOLWE_HOST_URL': self.settings_actual.get('RESOLWE_HOST_URL', 'localhost'), } set_env = self.settings_actual.get('FLOW_EXECUTOR', {}).get('SET_ENV', {}) env_vars.update(executor_vars) env_vars.update(set_env) export_commands = ['export {}={}'.format(key, shlex.quote(value)) for key, value in env_vars.items()] return os.linesep.join(export_commands) + os.linesep + program
[ "def", "_include_environment_variables", "(", "self", ",", "program", ",", "executor_vars", ")", ":", "env_vars", "=", "{", "'RESOLWE_HOST_URL'", ":", "self", ".", "settings_actual", ".", "get", "(", "'RESOLWE_HOST_URL'", ",", "'localhost'", ")", ",", "}", "set_...
Define environment variables.
[ "Define", "environment", "variables", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L355-L366
train
45,228
genialis/resolwe
resolwe/flow/managers/dispatcher.py
Manager.run
def run(self, data, runtime_dir, argv): """Select a concrete connector and run the process through it. :param data: The :class:`~resolwe.flow.models.Data` object that is to be run. :param runtime_dir: The directory the executor is run from. :param argv: The argument vector used to spawn the executor. """ process_scheduling = self.scheduling_class_map[data.process.scheduling_class] if 'DISPATCHER_MAPPING' in getattr(settings, 'FLOW_MANAGER', {}): class_name = settings.FLOW_MANAGER['DISPATCHER_MAPPING'][process_scheduling] else: class_name = getattr(settings, 'FLOW_MANAGER', {}).get('NAME', DEFAULT_CONNECTOR) data.scheduled = now() data.save(update_fields=['scheduled']) async_to_sync(self.sync_counter.inc)('executor') return self.connectors[class_name].submit(data, runtime_dir, argv)
python
def run(self, data, runtime_dir, argv): """Select a concrete connector and run the process through it. :param data: The :class:`~resolwe.flow.models.Data` object that is to be run. :param runtime_dir: The directory the executor is run from. :param argv: The argument vector used to spawn the executor. """ process_scheduling = self.scheduling_class_map[data.process.scheduling_class] if 'DISPATCHER_MAPPING' in getattr(settings, 'FLOW_MANAGER', {}): class_name = settings.FLOW_MANAGER['DISPATCHER_MAPPING'][process_scheduling] else: class_name = getattr(settings, 'FLOW_MANAGER', {}).get('NAME', DEFAULT_CONNECTOR) data.scheduled = now() data.save(update_fields=['scheduled']) async_to_sync(self.sync_counter.inc)('executor') return self.connectors[class_name].submit(data, runtime_dir, argv)
[ "def", "run", "(", "self", ",", "data", ",", "runtime_dir", ",", "argv", ")", ":", "process_scheduling", "=", "self", ".", "scheduling_class_map", "[", "data", ".", "process", ".", "scheduling_class", "]", "if", "'DISPATCHER_MAPPING'", "in", "getattr", "(", ...
Select a concrete connector and run the process through it. :param data: The :class:`~resolwe.flow.models.Data` object that is to be run. :param runtime_dir: The directory the executor is run from. :param argv: The argument vector used to spawn the executor.
[ "Select", "a", "concrete", "connector", "and", "run", "the", "process", "through", "it", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L368-L386
train
45,229
genialis/resolwe
resolwe/flow/managers/dispatcher.py
Manager._get_per_data_dir
def _get_per_data_dir(self, dir_base, subpath): """Extend the given base directory with a per-data component. The method creates a private path for the :class:`~resolwe.flow.models.Data` object, such as:: ./test_data/1/ if ``base_dir`` is ``'./test_data'`` and ``subpath`` is ``1``. :param dir_base: The base path to be extended. This will usually be one of the directories configured in the ``FLOW_EXECUTOR`` setting. :param subpath: Objects's subpath used for the extending. :return: The new path for the :class:`~resolwe.flow.models.Data` object. :rtype: str """ # Use Django settings here, because the state must be preserved # across events. This also implies the directory settings can't # be patched outside the manager and then just sent along in the # command packets. result = self.settings_actual.get('FLOW_EXECUTOR', {}).get(dir_base, '') return os.path.join(result, subpath)
python
def _get_per_data_dir(self, dir_base, subpath): """Extend the given base directory with a per-data component. The method creates a private path for the :class:`~resolwe.flow.models.Data` object, such as:: ./test_data/1/ if ``base_dir`` is ``'./test_data'`` and ``subpath`` is ``1``. :param dir_base: The base path to be extended. This will usually be one of the directories configured in the ``FLOW_EXECUTOR`` setting. :param subpath: Objects's subpath used for the extending. :return: The new path for the :class:`~resolwe.flow.models.Data` object. :rtype: str """ # Use Django settings here, because the state must be preserved # across events. This also implies the directory settings can't # be patched outside the manager and then just sent along in the # command packets. result = self.settings_actual.get('FLOW_EXECUTOR', {}).get(dir_base, '') return os.path.join(result, subpath)
[ "def", "_get_per_data_dir", "(", "self", ",", "dir_base", ",", "subpath", ")", ":", "# Use Django settings here, because the state must be preserved", "# across events. This also implies the directory settings can't", "# be patched outside the manager and then just sent along in the", "# c...
Extend the given base directory with a per-data component. The method creates a private path for the :class:`~resolwe.flow.models.Data` object, such as:: ./test_data/1/ if ``base_dir`` is ``'./test_data'`` and ``subpath`` is ``1``. :param dir_base: The base path to be extended. This will usually be one of the directories configured in the ``FLOW_EXECUTOR`` setting. :param subpath: Objects's subpath used for the extending. :return: The new path for the :class:`~resolwe.flow.models.Data` object. :rtype: str
[ "Extend", "the", "given", "base", "directory", "with", "a", "per", "-", "data", "component", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L388-L411
train
45,230
genialis/resolwe
resolwe/flow/managers/dispatcher.py
Manager._prepare_data_dir
def _prepare_data_dir(self, data): """Prepare destination directory where the data will live. :param data: The :class:`~resolwe.flow.models.Data` object for which to prepare the private execution directory. :return: The prepared data directory path. :rtype: str """ logger.debug(__("Preparing data directory for Data with id {}.", data.id)) with transaction.atomic(): # Create a temporary random location and then override it with data # location id since object has to be created first. # TODO Find a better solution, e.g. defer the database constraint. temporary_location_string = uuid.uuid4().hex[:10] data_location = DataLocation.objects.create(subpath=temporary_location_string) data_location.subpath = str(data_location.id) data_location.save() data_location.data.add(data) output_path = self._get_per_data_dir('DATA_DIR', data_location.subpath) dir_mode = self.settings_actual.get('FLOW_EXECUTOR', {}).get('DATA_DIR_MODE', 0o755) os.mkdir(output_path, mode=dir_mode) # os.mkdir is not guaranteed to set the given mode os.chmod(output_path, dir_mode) return output_path
python
def _prepare_data_dir(self, data): """Prepare destination directory where the data will live. :param data: The :class:`~resolwe.flow.models.Data` object for which to prepare the private execution directory. :return: The prepared data directory path. :rtype: str """ logger.debug(__("Preparing data directory for Data with id {}.", data.id)) with transaction.atomic(): # Create a temporary random location and then override it with data # location id since object has to be created first. # TODO Find a better solution, e.g. defer the database constraint. temporary_location_string = uuid.uuid4().hex[:10] data_location = DataLocation.objects.create(subpath=temporary_location_string) data_location.subpath = str(data_location.id) data_location.save() data_location.data.add(data) output_path = self._get_per_data_dir('DATA_DIR', data_location.subpath) dir_mode = self.settings_actual.get('FLOW_EXECUTOR', {}).get('DATA_DIR_MODE', 0o755) os.mkdir(output_path, mode=dir_mode) # os.mkdir is not guaranteed to set the given mode os.chmod(output_path, dir_mode) return output_path
[ "def", "_prepare_data_dir", "(", "self", ",", "data", ")", ":", "logger", ".", "debug", "(", "__", "(", "\"Preparing data directory for Data with id {}.\"", ",", "data", ".", "id", ")", ")", "with", "transaction", ".", "atomic", "(", ")", ":", "# Create a temp...
Prepare destination directory where the data will live. :param data: The :class:`~resolwe.flow.models.Data` object for which to prepare the private execution directory. :return: The prepared data directory path. :rtype: str
[ "Prepare", "destination", "directory", "where", "the", "data", "will", "live", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L413-L438
train
45,231
genialis/resolwe
resolwe/flow/managers/dispatcher.py
Manager._prepare_context
def _prepare_context(self, data_id, data_dir, runtime_dir, **kwargs): """Prepare settings and constants JSONs for the executor. Settings and constants provided by other ``resolwe`` modules and :class:`~django.conf.settings` are all inaccessible in the executor once it is deployed, so they need to be serialized into the runtime directory. :param data_id: The :class:`~resolwe.flow.models.Data` object id being prepared for. :param data_dir: The target execution directory for this :class:`~resolwe.flow.models.Data` object. :param runtime_dir: The target runtime support directory for this :class:`~resolwe.flow.models.Data` object; this is where the environment is serialized into. :param kwargs: Extra settings to include in the main settings file. """ files = {} secrets = {} settings_dict = {} settings_dict['DATA_DIR'] = data_dir settings_dict['REDIS_CHANNEL_PAIR'] = state.MANAGER_EXECUTOR_CHANNELS files[ExecutorFiles.EXECUTOR_SETTINGS] = settings_dict django_settings = {} django_settings.update(self.settings_actual) django_settings.update(kwargs) files[ExecutorFiles.DJANGO_SETTINGS] = django_settings # Add scheduling classes. files[ExecutorFiles.PROCESS_META] = { k: getattr(Process, k) for k in dir(Process) if k.startswith('SCHEDULING_CLASS_') and isinstance(getattr(Process, k), str) } # Add Data status constants. files[ExecutorFiles.DATA_META] = { k: getattr(Data, k) for k in dir(Data) if k.startswith('STATUS_') and isinstance(getattr(Data, k), str) } # Extend the settings with whatever the executor wants. self.executor.extend_settings(data_id, files, secrets) # Save the settings into the various files in the runtime dir. settings_dict[ExecutorFiles.FILE_LIST_KEY] = list(files.keys()) for file_name in files: file_path = os.path.join(runtime_dir, file_name) with open(file_path, 'wt') as json_file: json.dump(files[file_name], json_file, cls=SettingsJSONifier) # Save the secrets in the runtime dir, with permissions to prevent listing the given # directory. secrets_dir = os.path.join(runtime_dir, ExecutorFiles.SECRETS_DIR) os.makedirs(secrets_dir, mode=0o300) for file_name, value in secrets.items(): file_path = os.path.join(secrets_dir, file_name) # Set umask to 0 to ensure that we set the correct permissions. old_umask = os.umask(0) try: # We need to use os.open in order to correctly enforce file creation. Otherwise, # there is a race condition which can be used to create the file with different # ownership/permissions. file_descriptor = os.open(file_path, os.O_WRONLY | os.O_CREAT, mode=0o600) with os.fdopen(file_descriptor, 'w') as raw_file: raw_file.write(value) finally: os.umask(old_umask)
python
def _prepare_context(self, data_id, data_dir, runtime_dir, **kwargs): """Prepare settings and constants JSONs for the executor. Settings and constants provided by other ``resolwe`` modules and :class:`~django.conf.settings` are all inaccessible in the executor once it is deployed, so they need to be serialized into the runtime directory. :param data_id: The :class:`~resolwe.flow.models.Data` object id being prepared for. :param data_dir: The target execution directory for this :class:`~resolwe.flow.models.Data` object. :param runtime_dir: The target runtime support directory for this :class:`~resolwe.flow.models.Data` object; this is where the environment is serialized into. :param kwargs: Extra settings to include in the main settings file. """ files = {} secrets = {} settings_dict = {} settings_dict['DATA_DIR'] = data_dir settings_dict['REDIS_CHANNEL_PAIR'] = state.MANAGER_EXECUTOR_CHANNELS files[ExecutorFiles.EXECUTOR_SETTINGS] = settings_dict django_settings = {} django_settings.update(self.settings_actual) django_settings.update(kwargs) files[ExecutorFiles.DJANGO_SETTINGS] = django_settings # Add scheduling classes. files[ExecutorFiles.PROCESS_META] = { k: getattr(Process, k) for k in dir(Process) if k.startswith('SCHEDULING_CLASS_') and isinstance(getattr(Process, k), str) } # Add Data status constants. files[ExecutorFiles.DATA_META] = { k: getattr(Data, k) for k in dir(Data) if k.startswith('STATUS_') and isinstance(getattr(Data, k), str) } # Extend the settings with whatever the executor wants. self.executor.extend_settings(data_id, files, secrets) # Save the settings into the various files in the runtime dir. settings_dict[ExecutorFiles.FILE_LIST_KEY] = list(files.keys()) for file_name in files: file_path = os.path.join(runtime_dir, file_name) with open(file_path, 'wt') as json_file: json.dump(files[file_name], json_file, cls=SettingsJSONifier) # Save the secrets in the runtime dir, with permissions to prevent listing the given # directory. secrets_dir = os.path.join(runtime_dir, ExecutorFiles.SECRETS_DIR) os.makedirs(secrets_dir, mode=0o300) for file_name, value in secrets.items(): file_path = os.path.join(secrets_dir, file_name) # Set umask to 0 to ensure that we set the correct permissions. old_umask = os.umask(0) try: # We need to use os.open in order to correctly enforce file creation. Otherwise, # there is a race condition which can be used to create the file with different # ownership/permissions. file_descriptor = os.open(file_path, os.O_WRONLY | os.O_CREAT, mode=0o600) with os.fdopen(file_descriptor, 'w') as raw_file: raw_file.write(value) finally: os.umask(old_umask)
[ "def", "_prepare_context", "(", "self", ",", "data_id", ",", "data_dir", ",", "runtime_dir", ",", "*", "*", "kwargs", ")", ":", "files", "=", "{", "}", "secrets", "=", "{", "}", "settings_dict", "=", "{", "}", "settings_dict", "[", "'DATA_DIR'", "]", "...
Prepare settings and constants JSONs for the executor. Settings and constants provided by other ``resolwe`` modules and :class:`~django.conf.settings` are all inaccessible in the executor once it is deployed, so they need to be serialized into the runtime directory. :param data_id: The :class:`~resolwe.flow.models.Data` object id being prepared for. :param data_dir: The target execution directory for this :class:`~resolwe.flow.models.Data` object. :param runtime_dir: The target runtime support directory for this :class:`~resolwe.flow.models.Data` object; this is where the environment is serialized into. :param kwargs: Extra settings to include in the main settings file.
[ "Prepare", "settings", "and", "constants", "JSONs", "for", "the", "executor", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L440-L510
train
45,232
genialis/resolwe
resolwe/flow/managers/dispatcher.py
Manager._prepare_executor
def _prepare_executor(self, data, executor): """Copy executor sources into the destination directory. :param data: The :class:`~resolwe.flow.models.Data` object being prepared for. :param executor: The fully qualified name of the executor that is to be used for this data object. :return: Tuple containing the relative fully qualified name of the executor class ('relative' to how the executor will be run) and the path to the directory where the executor will be deployed. :rtype: (str, str) """ logger.debug(__("Preparing executor for Data with id {}", data.id)) # Both of these imports are here only to get the packages' paths. import resolwe.flow.executors as executor_package exec_dir = os.path.dirname(inspect.getsourcefile(executor_package)) dest_dir = self._get_per_data_dir('RUNTIME_DIR', data.location.subpath) dest_package_dir = os.path.join(dest_dir, 'executors') shutil.copytree(exec_dir, dest_package_dir) dir_mode = self.settings_actual.get('FLOW_EXECUTOR', {}).get('RUNTIME_DIR_MODE', 0o755) os.chmod(dest_dir, dir_mode) class_name = executor.rpartition('.executors.')[-1] return '.{}'.format(class_name), dest_dir
python
def _prepare_executor(self, data, executor): """Copy executor sources into the destination directory. :param data: The :class:`~resolwe.flow.models.Data` object being prepared for. :param executor: The fully qualified name of the executor that is to be used for this data object. :return: Tuple containing the relative fully qualified name of the executor class ('relative' to how the executor will be run) and the path to the directory where the executor will be deployed. :rtype: (str, str) """ logger.debug(__("Preparing executor for Data with id {}", data.id)) # Both of these imports are here only to get the packages' paths. import resolwe.flow.executors as executor_package exec_dir = os.path.dirname(inspect.getsourcefile(executor_package)) dest_dir = self._get_per_data_dir('RUNTIME_DIR', data.location.subpath) dest_package_dir = os.path.join(dest_dir, 'executors') shutil.copytree(exec_dir, dest_package_dir) dir_mode = self.settings_actual.get('FLOW_EXECUTOR', {}).get('RUNTIME_DIR_MODE', 0o755) os.chmod(dest_dir, dir_mode) class_name = executor.rpartition('.executors.')[-1] return '.{}'.format(class_name), dest_dir
[ "def", "_prepare_executor", "(", "self", ",", "data", ",", "executor", ")", ":", "logger", ".", "debug", "(", "__", "(", "\"Preparing executor for Data with id {}\"", ",", "data", ".", "id", ")", ")", "# Both of these imports are here only to get the packages' paths.", ...
Copy executor sources into the destination directory. :param data: The :class:`~resolwe.flow.models.Data` object being prepared for. :param executor: The fully qualified name of the executor that is to be used for this data object. :return: Tuple containing the relative fully qualified name of the executor class ('relative' to how the executor will be run) and the path to the directory where the executor will be deployed. :rtype: (str, str)
[ "Copy", "executor", "sources", "into", "the", "destination", "directory", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L512-L538
train
45,233
genialis/resolwe
resolwe/flow/managers/dispatcher.py
Manager._prepare_script
def _prepare_script(self, dest_dir, program): """Copy the script into the destination directory. :param dest_dir: The target directory where the script will be saved. :param program: The script text to be saved. :return: The name of the script file. :rtype: str """ script_name = ExecutorFiles.PROCESS_SCRIPT dest_file = os.path.join(dest_dir, script_name) with open(dest_file, 'wt') as dest_file_obj: dest_file_obj.write(program) os.chmod(dest_file, 0o700) return script_name
python
def _prepare_script(self, dest_dir, program): """Copy the script into the destination directory. :param dest_dir: The target directory where the script will be saved. :param program: The script text to be saved. :return: The name of the script file. :rtype: str """ script_name = ExecutorFiles.PROCESS_SCRIPT dest_file = os.path.join(dest_dir, script_name) with open(dest_file, 'wt') as dest_file_obj: dest_file_obj.write(program) os.chmod(dest_file, 0o700) return script_name
[ "def", "_prepare_script", "(", "self", ",", "dest_dir", ",", "program", ")", ":", "script_name", "=", "ExecutorFiles", ".", "PROCESS_SCRIPT", "dest_file", "=", "os", ".", "path", ".", "join", "(", "dest_dir", ",", "script_name", ")", "with", "open", "(", "...
Copy the script into the destination directory. :param dest_dir: The target directory where the script will be saved. :param program: The script text to be saved. :return: The name of the script file. :rtype: str
[ "Copy", "the", "script", "into", "the", "destination", "directory", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L540-L554
train
45,234
genialis/resolwe
resolwe/flow/managers/dispatcher.py
Manager.handle_control_event
async def handle_control_event(self, message): """Handle an event from the Channels layer. Channels layer callback, do not call directly. """ cmd = message[WorkerProtocol.COMMAND] logger.debug(__("Manager worker got channel command '{}'.", cmd)) # Prepare settings for use; Django overlaid by state overlaid by # anything immediate in the current packet. immediates = {} if cmd == WorkerProtocol.COMMUNICATE: immediates = message.get(WorkerProtocol.COMMUNICATE_SETTINGS, {}) or {} override = self.state.settings_override or {} override.update(immediates) self.settings_actual = self._marshal_settings() self.settings_actual.update(override) if cmd == WorkerProtocol.COMMUNICATE: try: await database_sync_to_async(self._data_scan)(**message[WorkerProtocol.COMMUNICATE_EXTRA]) except Exception: logger.exception("Unknown error occured while processing communicate control command.") raise finally: await self.sync_counter.dec('communicate') elif cmd == WorkerProtocol.FINISH: try: data_id = message[WorkerProtocol.DATA_ID] data_location = DataLocation.objects.get(data__id=data_id) if not getattr(settings, 'FLOW_MANAGER_KEEP_DATA', False): try: def handle_error(func, path, exc_info): """Handle permission errors while removing data directories.""" if isinstance(exc_info[1], PermissionError): os.chmod(path, 0o700) shutil.rmtree(path) # Remove secrets directory, but leave the rest of the runtime directory # intact. Runtime directory will be removed during data purge, when the # data object is removed. secrets_dir = os.path.join( self._get_per_data_dir('RUNTIME_DIR', data_location.subpath), ExecutorFiles.SECRETS_DIR ) shutil.rmtree(secrets_dir, onerror=handle_error) except OSError: logger.exception("Manager exception while removing data runtime directory.") if message[WorkerProtocol.FINISH_SPAWNED]: await database_sync_to_async(self._data_scan)(**message[WorkerProtocol.FINISH_COMMUNICATE_EXTRA]) except Exception: logger.exception( "Unknown error occured while processing finish control command.", extra={'data_id': data_id} ) raise finally: await self.sync_counter.dec('executor') elif cmd == WorkerProtocol.ABORT: await self.sync_counter.dec('executor') else: logger.error(__("Ignoring unknown manager control command '{}'.", cmd))
python
async def handle_control_event(self, message): """Handle an event from the Channels layer. Channels layer callback, do not call directly. """ cmd = message[WorkerProtocol.COMMAND] logger.debug(__("Manager worker got channel command '{}'.", cmd)) # Prepare settings for use; Django overlaid by state overlaid by # anything immediate in the current packet. immediates = {} if cmd == WorkerProtocol.COMMUNICATE: immediates = message.get(WorkerProtocol.COMMUNICATE_SETTINGS, {}) or {} override = self.state.settings_override or {} override.update(immediates) self.settings_actual = self._marshal_settings() self.settings_actual.update(override) if cmd == WorkerProtocol.COMMUNICATE: try: await database_sync_to_async(self._data_scan)(**message[WorkerProtocol.COMMUNICATE_EXTRA]) except Exception: logger.exception("Unknown error occured while processing communicate control command.") raise finally: await self.sync_counter.dec('communicate') elif cmd == WorkerProtocol.FINISH: try: data_id = message[WorkerProtocol.DATA_ID] data_location = DataLocation.objects.get(data__id=data_id) if not getattr(settings, 'FLOW_MANAGER_KEEP_DATA', False): try: def handle_error(func, path, exc_info): """Handle permission errors while removing data directories.""" if isinstance(exc_info[1], PermissionError): os.chmod(path, 0o700) shutil.rmtree(path) # Remove secrets directory, but leave the rest of the runtime directory # intact. Runtime directory will be removed during data purge, when the # data object is removed. secrets_dir = os.path.join( self._get_per_data_dir('RUNTIME_DIR', data_location.subpath), ExecutorFiles.SECRETS_DIR ) shutil.rmtree(secrets_dir, onerror=handle_error) except OSError: logger.exception("Manager exception while removing data runtime directory.") if message[WorkerProtocol.FINISH_SPAWNED]: await database_sync_to_async(self._data_scan)(**message[WorkerProtocol.FINISH_COMMUNICATE_EXTRA]) except Exception: logger.exception( "Unknown error occured while processing finish control command.", extra={'data_id': data_id} ) raise finally: await self.sync_counter.dec('executor') elif cmd == WorkerProtocol.ABORT: await self.sync_counter.dec('executor') else: logger.error(__("Ignoring unknown manager control command '{}'.", cmd))
[ "async", "def", "handle_control_event", "(", "self", ",", "message", ")", ":", "cmd", "=", "message", "[", "WorkerProtocol", ".", "COMMAND", "]", "logger", ".", "debug", "(", "__", "(", "\"Manager worker got channel command '{}'.\"", ",", "cmd", ")", ")", "# P...
Handle an event from the Channels layer. Channels layer callback, do not call directly.
[ "Handle", "an", "event", "from", "the", "Channels", "layer", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L564-L629
train
45,235
genialis/resolwe
resolwe/flow/managers/dispatcher.py
Manager._ensure_counter
def _ensure_counter(self): """Ensure the sync counter is a valid non-dummy object.""" if not isinstance(self.sync_counter, self._SynchronizationManager): self.sync_counter = self._SynchronizationManager()
python
def _ensure_counter(self): """Ensure the sync counter is a valid non-dummy object.""" if not isinstance(self.sync_counter, self._SynchronizationManager): self.sync_counter = self._SynchronizationManager()
[ "def", "_ensure_counter", "(", "self", ")", ":", "if", "not", "isinstance", "(", "self", ".", "sync_counter", ",", "self", ".", "_SynchronizationManager", ")", ":", "self", ".", "sync_counter", "=", "self", ".", "_SynchronizationManager", "(", ")" ]
Ensure the sync counter is a valid non-dummy object.
[ "Ensure", "the", "sync", "counter", "is", "a", "valid", "non", "-", "dummy", "object", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L631-L634
train
45,236
genialis/resolwe
resolwe/flow/managers/dispatcher.py
Manager.execution_barrier
async def execution_barrier(self): """Wait for executors to finish. At least one must finish after this point to avoid a deadlock. """ async def _barrier(): """Enter the sync block and exit the app afterwards.""" async with self.sync_counter: pass await consumer.exit_consumer() self._ensure_counter() await asyncio.wait([ _barrier(), consumer.run_consumer(), ]) self.sync_counter = self._SynchronizationManagerDummy()
python
async def execution_barrier(self): """Wait for executors to finish. At least one must finish after this point to avoid a deadlock. """ async def _barrier(): """Enter the sync block and exit the app afterwards.""" async with self.sync_counter: pass await consumer.exit_consumer() self._ensure_counter() await asyncio.wait([ _barrier(), consumer.run_consumer(), ]) self.sync_counter = self._SynchronizationManagerDummy()
[ "async", "def", "execution_barrier", "(", "self", ")", ":", "async", "def", "_barrier", "(", ")", ":", "\"\"\"Enter the sync block and exit the app afterwards.\"\"\"", "async", "with", "self", ".", "sync_counter", ":", "pass", "await", "consumer", ".", "exit_consumer"...
Wait for executors to finish. At least one must finish after this point to avoid a deadlock.
[ "Wait", "for", "executors", "to", "finish", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L636-L652
train
45,237
genialis/resolwe
resolwe/flow/managers/dispatcher.py
Manager.communicate
async def communicate(self, data_id=None, run_sync=False, save_settings=True): """Scan database for resolving Data objects and process them. This is submitted as a task to the manager's channel workers. :param data_id: Optional id of Data object which (+ its children) should be processes. If it is not given, all resolving objects are processed. :param run_sync: If ``True``, wait until all processes spawned from this point on have finished processing. If no processes are spawned, this results in a deadlock, since counts are handled on process finish. :param save_settings: If ``True``, save the current Django settings context to the global state. This should never be ``True`` for "automatic" calls, such as from Django signals, which can be invoked from inappropriate contexts (such as in the listener). For user code, it should be left at the default value. The saved settings are in effect until the next such call. """ executor = getattr(settings, 'FLOW_EXECUTOR', {}).get('NAME', 'resolwe.flow.executors.local') logger.debug(__( "Manager sending communicate command on '{}' triggered by Data with id {}.", state.MANAGER_CONTROL_CHANNEL, data_id, )) saved_settings = self.state.settings_override if save_settings: saved_settings = self._marshal_settings() self.state.settings_override = saved_settings if run_sync: self._ensure_counter() await self.sync_counter.inc('communicate') try: await consumer.send_event({ WorkerProtocol.COMMAND: WorkerProtocol.COMMUNICATE, WorkerProtocol.COMMUNICATE_SETTINGS: saved_settings, WorkerProtocol.COMMUNICATE_EXTRA: { 'data_id': data_id, 'executor': executor, }, }) except ChannelFull: logger.exception("ChannelFull error occurred while sending communicate message.") await self.sync_counter.dec('communicate') if run_sync and not self.sync_counter.active: logger.debug(__( "Manager on channel '{}' entering synchronization block.", state.MANAGER_CONTROL_CHANNEL )) await self.execution_barrier() logger.debug(__( "Manager on channel '{}' exiting synchronization block.", state.MANAGER_CONTROL_CHANNEL ))
python
async def communicate(self, data_id=None, run_sync=False, save_settings=True): """Scan database for resolving Data objects and process them. This is submitted as a task to the manager's channel workers. :param data_id: Optional id of Data object which (+ its children) should be processes. If it is not given, all resolving objects are processed. :param run_sync: If ``True``, wait until all processes spawned from this point on have finished processing. If no processes are spawned, this results in a deadlock, since counts are handled on process finish. :param save_settings: If ``True``, save the current Django settings context to the global state. This should never be ``True`` for "automatic" calls, such as from Django signals, which can be invoked from inappropriate contexts (such as in the listener). For user code, it should be left at the default value. The saved settings are in effect until the next such call. """ executor = getattr(settings, 'FLOW_EXECUTOR', {}).get('NAME', 'resolwe.flow.executors.local') logger.debug(__( "Manager sending communicate command on '{}' triggered by Data with id {}.", state.MANAGER_CONTROL_CHANNEL, data_id, )) saved_settings = self.state.settings_override if save_settings: saved_settings = self._marshal_settings() self.state.settings_override = saved_settings if run_sync: self._ensure_counter() await self.sync_counter.inc('communicate') try: await consumer.send_event({ WorkerProtocol.COMMAND: WorkerProtocol.COMMUNICATE, WorkerProtocol.COMMUNICATE_SETTINGS: saved_settings, WorkerProtocol.COMMUNICATE_EXTRA: { 'data_id': data_id, 'executor': executor, }, }) except ChannelFull: logger.exception("ChannelFull error occurred while sending communicate message.") await self.sync_counter.dec('communicate') if run_sync and not self.sync_counter.active: logger.debug(__( "Manager on channel '{}' entering synchronization block.", state.MANAGER_CONTROL_CHANNEL )) await self.execution_barrier() logger.debug(__( "Manager on channel '{}' exiting synchronization block.", state.MANAGER_CONTROL_CHANNEL ))
[ "async", "def", "communicate", "(", "self", ",", "data_id", "=", "None", ",", "run_sync", "=", "False", ",", "save_settings", "=", "True", ")", ":", "executor", "=", "getattr", "(", "settings", ",", "'FLOW_EXECUTOR'", ",", "{", "}", ")", ".", "get", "(...
Scan database for resolving Data objects and process them. This is submitted as a task to the manager's channel workers. :param data_id: Optional id of Data object which (+ its children) should be processes. If it is not given, all resolving objects are processed. :param run_sync: If ``True``, wait until all processes spawned from this point on have finished processing. If no processes are spawned, this results in a deadlock, since counts are handled on process finish. :param save_settings: If ``True``, save the current Django settings context to the global state. This should never be ``True`` for "automatic" calls, such as from Django signals, which can be invoked from inappropriate contexts (such as in the listener). For user code, it should be left at the default value. The saved settings are in effect until the next such call.
[ "Scan", "database", "for", "resolving", "Data", "objects", "and", "process", "them", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L654-L711
train
45,238
genialis/resolwe
resolwe/flow/managers/dispatcher.py
Manager._data_execute
def _data_execute(self, data, program, executor): """Execute the Data object. The activities carried out here include target directory preparation, executor copying, setting serialization and actual execution of the object. :param data: The :class:`~resolwe.flow.models.Data` object to execute. :param program: The process text the manager got out of execution engine evaluation. :param executor: The executor to use for this object. """ if not program: return logger.debug(__("Manager preparing Data with id {} for processing.", data.id)) # Prepare the executor's environment. try: executor_env_vars = self.get_executor().get_environment_variables() program = self._include_environment_variables(program, executor_env_vars) data_dir = self._prepare_data_dir(data) executor_module, runtime_dir = self._prepare_executor(data, executor) # Execute execution engine specific runtime preparation. execution_engine = data.process.run.get('language', None) volume_maps = self.get_execution_engine(execution_engine).prepare_runtime(runtime_dir, data) self._prepare_context(data.id, data_dir, runtime_dir, RUNTIME_VOLUME_MAPS=volume_maps) self._prepare_script(runtime_dir, program) argv = [ '/bin/bash', '-c', self.settings_actual.get('FLOW_EXECUTOR', {}).get('PYTHON', '/usr/bin/env python') + ' -m executors ' + executor_module ] except PermissionDenied as error: data.status = Data.STATUS_ERROR data.process_error.append("Permission denied for process: {}".format(error)) data.save() return except OSError as err: logger.error(__( "OSError occurred while preparing data {} (will skip): {}", data.id, err )) return # Hand off to the run() method for execution. logger.info(__("Running {}", runtime_dir)) self.run(data, runtime_dir, argv)
python
def _data_execute(self, data, program, executor): """Execute the Data object. The activities carried out here include target directory preparation, executor copying, setting serialization and actual execution of the object. :param data: The :class:`~resolwe.flow.models.Data` object to execute. :param program: The process text the manager got out of execution engine evaluation. :param executor: The executor to use for this object. """ if not program: return logger.debug(__("Manager preparing Data with id {} for processing.", data.id)) # Prepare the executor's environment. try: executor_env_vars = self.get_executor().get_environment_variables() program = self._include_environment_variables(program, executor_env_vars) data_dir = self._prepare_data_dir(data) executor_module, runtime_dir = self._prepare_executor(data, executor) # Execute execution engine specific runtime preparation. execution_engine = data.process.run.get('language', None) volume_maps = self.get_execution_engine(execution_engine).prepare_runtime(runtime_dir, data) self._prepare_context(data.id, data_dir, runtime_dir, RUNTIME_VOLUME_MAPS=volume_maps) self._prepare_script(runtime_dir, program) argv = [ '/bin/bash', '-c', self.settings_actual.get('FLOW_EXECUTOR', {}).get('PYTHON', '/usr/bin/env python') + ' -m executors ' + executor_module ] except PermissionDenied as error: data.status = Data.STATUS_ERROR data.process_error.append("Permission denied for process: {}".format(error)) data.save() return except OSError as err: logger.error(__( "OSError occurred while preparing data {} (will skip): {}", data.id, err )) return # Hand off to the run() method for execution. logger.info(__("Running {}", runtime_dir)) self.run(data, runtime_dir, argv)
[ "def", "_data_execute", "(", "self", ",", "data", ",", "program", ",", "executor", ")", ":", "if", "not", "program", ":", "return", "logger", ".", "debug", "(", "__", "(", "\"Manager preparing Data with id {} for processing.\"", ",", "data", ".", "id", ")", ...
Execute the Data object. The activities carried out here include target directory preparation, executor copying, setting serialization and actual execution of the object. :param data: The :class:`~resolwe.flow.models.Data` object to execute. :param program: The process text the manager got out of execution engine evaluation. :param executor: The executor to use for this object.
[ "Execute", "the", "Data", "object", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L713-L765
train
45,239
genialis/resolwe
resolwe/flow/managers/dispatcher.py
Manager.get_expression_engine
def get_expression_engine(self, name): """Return an expression engine instance.""" try: return self.expression_engines[name] except KeyError: raise InvalidEngineError("Unsupported expression engine: {}".format(name))
python
def get_expression_engine(self, name): """Return an expression engine instance.""" try: return self.expression_engines[name] except KeyError: raise InvalidEngineError("Unsupported expression engine: {}".format(name))
[ "def", "get_expression_engine", "(", "self", ",", "name", ")", ":", "try", ":", "return", "self", ".", "expression_engines", "[", "name", "]", "except", "KeyError", ":", "raise", "InvalidEngineError", "(", "\"Unsupported expression engine: {}\"", ".", "format", "(...
Return an expression engine instance.
[ "Return", "an", "expression", "engine", "instance", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L916-L921
train
45,240
genialis/resolwe
resolwe/flow/managers/dispatcher.py
Manager.get_execution_engine
def get_execution_engine(self, name): """Return an execution engine instance.""" try: return self.execution_engines[name] except KeyError: raise InvalidEngineError("Unsupported execution engine: {}".format(name))
python
def get_execution_engine(self, name): """Return an execution engine instance.""" try: return self.execution_engines[name] except KeyError: raise InvalidEngineError("Unsupported execution engine: {}".format(name))
[ "def", "get_execution_engine", "(", "self", ",", "name", ")", ":", "try", ":", "return", "self", ".", "execution_engines", "[", "name", "]", "except", "KeyError", ":", "raise", "InvalidEngineError", "(", "\"Unsupported execution engine: {}\"", ".", "format", "(", ...
Return an execution engine instance.
[ "Return", "an", "execution", "engine", "instance", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L923-L928
train
45,241
genialis/resolwe
resolwe/flow/managers/dispatcher.py
Manager.load_executor
def load_executor(self, executor_name): """Load process executor.""" executor_name = executor_name + '.prepare' module = import_module(executor_name) return module.FlowExecutorPreparer()
python
def load_executor(self, executor_name): """Load process executor.""" executor_name = executor_name + '.prepare' module = import_module(executor_name) return module.FlowExecutorPreparer()
[ "def", "load_executor", "(", "self", ",", "executor_name", ")", ":", "executor_name", "=", "executor_name", "+", "'.prepare'", "module", "=", "import_module", "(", "executor_name", ")", "return", "module", ".", "FlowExecutorPreparer", "(", ")" ]
Load process executor.
[ "Load", "process", "executor", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/dispatcher.py#L930-L934
train
45,242
genialis/resolwe
resolwe/flow/views/entity.py
EntityViewSet.get_queryset
def get_queryset(self): # pylint: disable=method-hidden """Return queryset.""" if self.request and self.request.query_params.get('hydrate_data', False): return self.queryset.prefetch_related('data__entity_set') return self.queryset
python
def get_queryset(self): # pylint: disable=method-hidden """Return queryset.""" if self.request and self.request.query_params.get('hydrate_data', False): return self.queryset.prefetch_related('data__entity_set') return self.queryset
[ "def", "get_queryset", "(", "self", ")", ":", "# pylint: disable=method-hidden", "if", "self", ".", "request", "and", "self", ".", "request", ".", "query_params", ".", "get", "(", "'hydrate_data'", ",", "False", ")", ":", "return", "self", ".", "queryset", "...
Return queryset.
[ "Return", "queryset", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/entity.py#L38-L43
train
45,243
genialis/resolwe
resolwe/flow/views/entity.py
EntityViewSet._get_collection_for_user
def _get_collection_for_user(self, collection_id, user): """Check that collection exists and user has `add` permission.""" collection_query = Collection.objects.filter(pk=collection_id) if not collection_query.exists(): raise exceptions.ValidationError('Collection id does not exist') collection = collection_query.first() if not user.has_perm('add_collection', obj=collection): if user.is_authenticated: raise exceptions.PermissionDenied() else: raise exceptions.NotFound() return collection
python
def _get_collection_for_user(self, collection_id, user): """Check that collection exists and user has `add` permission.""" collection_query = Collection.objects.filter(pk=collection_id) if not collection_query.exists(): raise exceptions.ValidationError('Collection id does not exist') collection = collection_query.first() if not user.has_perm('add_collection', obj=collection): if user.is_authenticated: raise exceptions.PermissionDenied() else: raise exceptions.NotFound() return collection
[ "def", "_get_collection_for_user", "(", "self", ",", "collection_id", ",", "user", ")", ":", "collection_query", "=", "Collection", ".", "objects", ".", "filter", "(", "pk", "=", "collection_id", ")", "if", "not", "collection_query", ".", "exists", "(", ")", ...
Check that collection exists and user has `add` permission.
[ "Check", "that", "collection", "exists", "and", "user", "has", "add", "permission", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/entity.py#L45-L58
train
45,244
genialis/resolwe
resolwe/flow/views/entity.py
EntityViewSet._get_entities
def _get_entities(self, user, ids): """Return entities queryset based on provided entity ids.""" queryset = get_objects_for_user(user, 'view_entity', Entity.objects.filter(id__in=ids)) actual_ids = queryset.values_list('id', flat=True) missing_ids = list(set(ids) - set(actual_ids)) if missing_ids: raise exceptions.ParseError( "Entities with the following ids not found: {}" .format(', '.join(map(str, missing_ids))) ) return queryset
python
def _get_entities(self, user, ids): """Return entities queryset based on provided entity ids.""" queryset = get_objects_for_user(user, 'view_entity', Entity.objects.filter(id__in=ids)) actual_ids = queryset.values_list('id', flat=True) missing_ids = list(set(ids) - set(actual_ids)) if missing_ids: raise exceptions.ParseError( "Entities with the following ids not found: {}" .format(', '.join(map(str, missing_ids))) ) return queryset
[ "def", "_get_entities", "(", "self", ",", "user", ",", "ids", ")", ":", "queryset", "=", "get_objects_for_user", "(", "user", ",", "'view_entity'", ",", "Entity", ".", "objects", ".", "filter", "(", "id__in", "=", "ids", ")", ")", "actual_ids", "=", "que...
Return entities queryset based on provided entity ids.
[ "Return", "entities", "queryset", "based", "on", "provided", "entity", "ids", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/entity.py#L60-L70
train
45,245
genialis/resolwe
resolwe/flow/views/entity.py
EntityViewSet.set_content_permissions
def set_content_permissions(self, user, obj, payload): """Apply permissions to data objects in ``Entity``.""" # Data doesn't have "ADD" permission, so it has to be removed payload = remove_permission(payload, 'add') for data in obj.data.all(): if user.has_perm('share_data', data): update_permission(data, payload)
python
def set_content_permissions(self, user, obj, payload): """Apply permissions to data objects in ``Entity``.""" # Data doesn't have "ADD" permission, so it has to be removed payload = remove_permission(payload, 'add') for data in obj.data.all(): if user.has_perm('share_data', data): update_permission(data, payload)
[ "def", "set_content_permissions", "(", "self", ",", "user", ",", "obj", ",", "payload", ")", ":", "# Data doesn't have \"ADD\" permission, so it has to be removed", "payload", "=", "remove_permission", "(", "payload", ",", "'add'", ")", "for", "data", "in", "obj", "...
Apply permissions to data objects in ``Entity``.
[ "Apply", "permissions", "to", "data", "objects", "in", "Entity", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/entity.py#L72-L79
train
45,246
genialis/resolwe
resolwe/flow/views/entity.py
EntityViewSet.add_to_collection
def add_to_collection(self, request, pk=None): """Add Entity to a collection.""" entity = self.get_object() # TODO use `self.get_ids` (and elsewhere). Backwards # incompatible because raised error's response contains # ``detail`` instead of ``error``). if 'ids' not in request.data: return Response({"error": "`ids` parameter is required"}, status=status.HTTP_400_BAD_REQUEST) for collection_id in request.data['ids']: self._get_collection_for_user(collection_id, request.user) for collection_id in request.data['ids']: entity.collections.add(collection_id) collection = Collection.objects.get(pk=collection_id) for data in entity.data.all(): collection.data.add(data) return Response()
python
def add_to_collection(self, request, pk=None): """Add Entity to a collection.""" entity = self.get_object() # TODO use `self.get_ids` (and elsewhere). Backwards # incompatible because raised error's response contains # ``detail`` instead of ``error``). if 'ids' not in request.data: return Response({"error": "`ids` parameter is required"}, status=status.HTTP_400_BAD_REQUEST) for collection_id in request.data['ids']: self._get_collection_for_user(collection_id, request.user) for collection_id in request.data['ids']: entity.collections.add(collection_id) collection = Collection.objects.get(pk=collection_id) for data in entity.data.all(): collection.data.add(data) return Response()
[ "def", "add_to_collection", "(", "self", ",", "request", ",", "pk", "=", "None", ")", ":", "entity", "=", "self", ".", "get_object", "(", ")", "# TODO use `self.get_ids` (and elsewhere). Backwards", "# incompatible because raised error's response contains", "# ``detail`` in...
Add Entity to a collection.
[ "Add", "Entity", "to", "a", "collection", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/entity.py#L107-L127
train
45,247
genialis/resolwe
resolwe/flow/views/entity.py
EntityViewSet.add_data
def add_data(self, request, pk=None): """Add data to Entity and it's collection.""" # add data to entity resp = super().add_data(request, pk) # add data to collections in which entity is entity = self.get_object() for collection in entity.collections.all(): collection.data.add(*request.data['ids']) return resp
python
def add_data(self, request, pk=None): """Add data to Entity and it's collection.""" # add data to entity resp = super().add_data(request, pk) # add data to collections in which entity is entity = self.get_object() for collection in entity.collections.all(): collection.data.add(*request.data['ids']) return resp
[ "def", "add_data", "(", "self", ",", "request", ",", "pk", "=", "None", ")", ":", "# add data to entity", "resp", "=", "super", "(", ")", ".", "add_data", "(", "request", ",", "pk", ")", "# add data to collections in which entity is", "entity", "=", "self", ...
Add data to Entity and it's collection.
[ "Add", "data", "to", "Entity", "and", "it", "s", "collection", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/entity.py#L150-L160
train
45,248
genialis/resolwe
resolwe/flow/views/entity.py
EntityViewSet.move_to_collection
def move_to_collection(self, request, *args, **kwargs): """Move samples from source to destination collection.""" ids = self.get_ids(request.data) src_collection_id = self.get_id(request.data, 'source_collection') dst_collection_id = self.get_id(request.data, 'destination_collection') src_collection = self._get_collection_for_user(src_collection_id, request.user) dst_collection = self._get_collection_for_user(dst_collection_id, request.user) entity_qs = self._get_entities(request.user, ids) entity_qs.move_to_collection(src_collection, dst_collection) return Response()
python
def move_to_collection(self, request, *args, **kwargs): """Move samples from source to destination collection.""" ids = self.get_ids(request.data) src_collection_id = self.get_id(request.data, 'source_collection') dst_collection_id = self.get_id(request.data, 'destination_collection') src_collection = self._get_collection_for_user(src_collection_id, request.user) dst_collection = self._get_collection_for_user(dst_collection_id, request.user) entity_qs = self._get_entities(request.user, ids) entity_qs.move_to_collection(src_collection, dst_collection) return Response()
[ "def", "move_to_collection", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ids", "=", "self", ".", "get_ids", "(", "request", ".", "data", ")", "src_collection_id", "=", "self", ".", "get_id", "(", "request", ".", ...
Move samples from source to destination collection.
[ "Move", "samples", "from", "source", "to", "destination", "collection", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/entity.py#L163-L175
train
45,249
genialis/resolwe
resolwe/flow/views/entity.py
EntityViewSet.update
def update(self, request, *args, **kwargs): """Update an entity. Original queryset produces a temporary database table whose rows cannot be selected for an update. As a workaround, we patch get_queryset function to return only Entity objects without additional data that is not needed for the update. """ orig_get_queryset = self.get_queryset def patched_get_queryset(): """Patched get_queryset method.""" entity_ids = orig_get_queryset().values_list('id', flat=True) return Entity.objects.filter(id__in=entity_ids) self.get_queryset = patched_get_queryset resp = super().update(request, *args, **kwargs) self.get_queryset = orig_get_queryset return resp
python
def update(self, request, *args, **kwargs): """Update an entity. Original queryset produces a temporary database table whose rows cannot be selected for an update. As a workaround, we patch get_queryset function to return only Entity objects without additional data that is not needed for the update. """ orig_get_queryset = self.get_queryset def patched_get_queryset(): """Patched get_queryset method.""" entity_ids = orig_get_queryset().values_list('id', flat=True) return Entity.objects.filter(id__in=entity_ids) self.get_queryset = patched_get_queryset resp = super().update(request, *args, **kwargs) self.get_queryset = orig_get_queryset return resp
[ "def", "update", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "orig_get_queryset", "=", "self", ".", "get_queryset", "def", "patched_get_queryset", "(", ")", ":", "\"\"\"Patched get_queryset method.\"\"\"", "entity_ids", "=",...
Update an entity. Original queryset produces a temporary database table whose rows cannot be selected for an update. As a workaround, we patch get_queryset function to return only Entity objects without additional data that is not needed for the update.
[ "Update", "an", "entity", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/entity.py#L180-L198
train
45,250
genialis/resolwe
resolwe/flow/executors/docker/run.py
FlowExecutor.end
async def end(self): """End process execution.""" try: await self.proc.wait() finally: # Cleanup temporary files. for temporary_file in self.temporary_files: temporary_file.close() self.temporary_files = [] return self.proc.returncode
python
async def end(self): """End process execution.""" try: await self.proc.wait() finally: # Cleanup temporary files. for temporary_file in self.temporary_files: temporary_file.close() self.temporary_files = [] return self.proc.returncode
[ "async", "def", "end", "(", "self", ")", ":", "try", ":", "await", "self", ".", "proc", ".", "wait", "(", ")", "finally", ":", "# Cleanup temporary files.", "for", "temporary_file", "in", "self", ".", "temporary_files", ":", "temporary_file", ".", "close", ...
End process execution.
[ "End", "process", "execution", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/executors/docker/run.py#L318-L328
train
45,251
genialis/resolwe
resolwe/flow/executors/run.py
iterjson
def iterjson(text): """Decode JSON stream.""" decoder = json.JSONDecoder() while text: obj, ndx = decoder.raw_decode(text) if not isinstance(obj, dict): raise ValueError() text = text[ndx:].lstrip('\r\n') yield obj
python
def iterjson(text): """Decode JSON stream.""" decoder = json.JSONDecoder() while text: obj, ndx = decoder.raw_decode(text) if not isinstance(obj, dict): raise ValueError() text = text[ndx:].lstrip('\r\n') yield obj
[ "def", "iterjson", "(", "text", ")", ":", "decoder", "=", "json", ".", "JSONDecoder", "(", ")", "while", "text", ":", "obj", ",", "ndx", "=", "decoder", ".", "raw_decode", "(", "text", ")", "if", "not", "isinstance", "(", "obj", ",", "dict", ")", "...
Decode JSON stream.
[ "Decode", "JSON", "stream", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/executors/run.py#L32-L42
train
45,252
genialis/resolwe
resolwe/flow/executors/run.py
BaseFlowExecutor._send_manager_command
async def _send_manager_command(self, *args, **kwargs): """Send an update to manager and terminate the process if it fails.""" resp = await send_manager_command(*args, **kwargs) if resp is False: await self.terminate()
python
async def _send_manager_command(self, *args, **kwargs): """Send an update to manager and terminate the process if it fails.""" resp = await send_manager_command(*args, **kwargs) if resp is False: await self.terminate()
[ "async", "def", "_send_manager_command", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "resp", "=", "await", "send_manager_command", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "resp", "is", "False", ":", "await", "self", ...
Send an update to manager and terminate the process if it fails.
[ "Send", "an", "update", "to", "manager", "and", "terminate", "the", "process", "if", "it", "fails", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/executors/run.py#L70-L75
train
45,253
genialis/resolwe
resolwe/flow/executors/run.py
BaseFlowExecutor._create_file
def _create_file(self, filename): """Ensure a new file is created and opened for writing.""" file_descriptor = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_EXCL) return os.fdopen(file_descriptor, 'w')
python
def _create_file(self, filename): """Ensure a new file is created and opened for writing.""" file_descriptor = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_EXCL) return os.fdopen(file_descriptor, 'w')
[ "def", "_create_file", "(", "self", ",", "filename", ")", ":", "file_descriptor", "=", "os", ".", "open", "(", "filename", ",", "os", ".", "O_WRONLY", "|", "os", ".", "O_CREAT", "|", "os", ".", "O_EXCL", ")", "return", "os", ".", "fdopen", "(", "file...
Ensure a new file is created and opened for writing.
[ "Ensure", "a", "new", "file", "is", "created", "and", "opened", "for", "writing", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/executors/run.py#L129-L132
train
45,254
DataONEorg/d1_python
gmn/src/d1_gmn/app/local_replica.py
get_total_size_of_queued_replicas
def get_total_size_of_queued_replicas(): """Return the total number of bytes of requested, unprocessed replicas.""" return ( d1_gmn.app.models.ReplicationQueue.objects.filter( local_replica__info__status__status='queued' ).aggregate(Sum('size'))['size__sum'] or 0 )
python
def get_total_size_of_queued_replicas(): """Return the total number of bytes of requested, unprocessed replicas.""" return ( d1_gmn.app.models.ReplicationQueue.objects.filter( local_replica__info__status__status='queued' ).aggregate(Sum('size'))['size__sum'] or 0 )
[ "def", "get_total_size_of_queued_replicas", "(", ")", ":", "return", "(", "d1_gmn", ".", "app", ".", "models", ".", "ReplicationQueue", ".", "objects", ".", "filter", "(", "local_replica__info__status__status", "=", "'queued'", ")", ".", "aggregate", "(", "Sum", ...
Return the total number of bytes of requested, unprocessed replicas.
[ "Return", "the", "total", "number", "of", "bytes", "of", "requested", "unprocessed", "replicas", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/local_replica.py#L48-L55
train
45,255
DataONEorg/d1_python
gmn/src/d1_gmn/app/local_replica.py
add_to_replication_queue
def add_to_replication_queue(source_node_urn, sysmeta_pyxb): """Add a replication request issued by a CN to a queue that is processed asynchronously. Preconditions: - sysmeta_pyxb.identifier is verified to be available for create. E.g., with d1_gmn.app.views.is_valid_pid_for_create(pid). Postconditions: - The database is set up to track a new replica, with initial status, "queued". - The PID provided in the sysmeta_pyxb is reserved for the replica. """ replica_info_model = d1_gmn.app.models.replica_info( status_str='queued', source_node_urn=source_node_urn ) local_replica_model = d1_gmn.app.models.local_replica( pid=d1_common.xml.get_req_val(sysmeta_pyxb.identifier), replica_info_model=replica_info_model, ) d1_gmn.app.models.replication_queue( local_replica_model=local_replica_model, size=sysmeta_pyxb.size )
python
def add_to_replication_queue(source_node_urn, sysmeta_pyxb): """Add a replication request issued by a CN to a queue that is processed asynchronously. Preconditions: - sysmeta_pyxb.identifier is verified to be available for create. E.g., with d1_gmn.app.views.is_valid_pid_for_create(pid). Postconditions: - The database is set up to track a new replica, with initial status, "queued". - The PID provided in the sysmeta_pyxb is reserved for the replica. """ replica_info_model = d1_gmn.app.models.replica_info( status_str='queued', source_node_urn=source_node_urn ) local_replica_model = d1_gmn.app.models.local_replica( pid=d1_common.xml.get_req_val(sysmeta_pyxb.identifier), replica_info_model=replica_info_model, ) d1_gmn.app.models.replication_queue( local_replica_model=local_replica_model, size=sysmeta_pyxb.size )
[ "def", "add_to_replication_queue", "(", "source_node_urn", ",", "sysmeta_pyxb", ")", ":", "replica_info_model", "=", "d1_gmn", ".", "app", ".", "models", ".", "replica_info", "(", "status_str", "=", "'queued'", ",", "source_node_urn", "=", "source_node_urn", ")", ...
Add a replication request issued by a CN to a queue that is processed asynchronously. Preconditions: - sysmeta_pyxb.identifier is verified to be available for create. E.g., with d1_gmn.app.views.is_valid_pid_for_create(pid). Postconditions: - The database is set up to track a new replica, with initial status, "queued". - The PID provided in the sysmeta_pyxb is reserved for the replica.
[ "Add", "a", "replication", "request", "issued", "by", "a", "CN", "to", "a", "queue", "that", "is", "processed", "asynchronously", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/local_replica.py#L58-L80
train
45,256
DataONEorg/d1_python
gmn/src/d1_gmn/app/management/commands/util/standard_args.py
add_arguments
def add_arguments(parser, doc_str, add_base_url=True): """Add standard arguments for DataONE utilities to a command line parser.""" parser.description = doc_str parser.formatter_class = argparse.RawDescriptionHelpFormatter parser.add_argument("--debug", action="store_true", help="Debug level logging") parser.add_argument( "--cert-pub", dest="cert_pem_path", action="store", default=django.conf.settings.CLIENT_CERT_PATH, help="Path to PEM formatted public key of certificate", ) parser.add_argument( "--cert-key", dest="cert_key_path", action="store", default=django.conf.settings.CLIENT_CERT_PRIVATE_KEY_PATH, help="Path to PEM formatted private key of certificate", ) parser.add_argument( "--public", action="store_true", help="Do not use certificate even if available" ) parser.add_argument( "--disable-server-cert-validation", action="store_true", help="Do not validate the TLS/SSL server side certificate of the source node (insecure)", ) parser.add_argument( "--timeout", type=float, action="store", default=DEFAULT_TIMEOUT_SEC, help="Timeout for DataONE API calls to the source MN", ) parser.add_argument( "--retries", type=int, action="store", default=DEFAULT_RETRY_COUNT, help="Retry DataONE API calls that raise HTTP level exceptions", ) parser.add_argument( "--page-size", type=int, action="store", default=DEFAULT_PAGE_SIZE, help="Number of objects to retrieve in each list method API call to source MN", ) parser.add_argument( "--major", type=int, action="store", help="Skip automatic detection of API major version and use the provided version", ) parser.add_argument( "--max-concurrent", type=int, action="store", default=DEFAULT_MAX_CONCURRENT_TASK_COUNT, help="Max number of concurrent DataONE API", ) if not add_base_url: parser.add_argument( "--baseurl", action="store", default=django.conf.settings.DATAONE_ROOT, help="Remote MN or CN BaseURL", ) else: parser.add_argument("baseurl", help="Remote MN or CN BaseURL")
python
def add_arguments(parser, doc_str, add_base_url=True): """Add standard arguments for DataONE utilities to a command line parser.""" parser.description = doc_str parser.formatter_class = argparse.RawDescriptionHelpFormatter parser.add_argument("--debug", action="store_true", help="Debug level logging") parser.add_argument( "--cert-pub", dest="cert_pem_path", action="store", default=django.conf.settings.CLIENT_CERT_PATH, help="Path to PEM formatted public key of certificate", ) parser.add_argument( "--cert-key", dest="cert_key_path", action="store", default=django.conf.settings.CLIENT_CERT_PRIVATE_KEY_PATH, help="Path to PEM formatted private key of certificate", ) parser.add_argument( "--public", action="store_true", help="Do not use certificate even if available" ) parser.add_argument( "--disable-server-cert-validation", action="store_true", help="Do not validate the TLS/SSL server side certificate of the source node (insecure)", ) parser.add_argument( "--timeout", type=float, action="store", default=DEFAULT_TIMEOUT_SEC, help="Timeout for DataONE API calls to the source MN", ) parser.add_argument( "--retries", type=int, action="store", default=DEFAULT_RETRY_COUNT, help="Retry DataONE API calls that raise HTTP level exceptions", ) parser.add_argument( "--page-size", type=int, action="store", default=DEFAULT_PAGE_SIZE, help="Number of objects to retrieve in each list method API call to source MN", ) parser.add_argument( "--major", type=int, action="store", help="Skip automatic detection of API major version and use the provided version", ) parser.add_argument( "--max-concurrent", type=int, action="store", default=DEFAULT_MAX_CONCURRENT_TASK_COUNT, help="Max number of concurrent DataONE API", ) if not add_base_url: parser.add_argument( "--baseurl", action="store", default=django.conf.settings.DATAONE_ROOT, help="Remote MN or CN BaseURL", ) else: parser.add_argument("baseurl", help="Remote MN or CN BaseURL")
[ "def", "add_arguments", "(", "parser", ",", "doc_str", ",", "add_base_url", "=", "True", ")", ":", "parser", ".", "description", "=", "doc_str", "parser", ".", "formatter_class", "=", "argparse", ".", "RawDescriptionHelpFormatter", "parser", ".", "add_argument", ...
Add standard arguments for DataONE utilities to a command line parser.
[ "Add", "standard", "arguments", "for", "DataONE", "utilities", "to", "a", "command", "line", "parser", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/management/commands/util/standard_args.py#L29-L98
train
45,257
DataONEorg/d1_python
gmn/src/d1_gmn/app/resource_map.py
get_resource_map_members
def get_resource_map_members(pid): """``pid`` is the PID of a Resource Map or the PID of a member of a Resource Map.""" if d1_gmn.app.did.is_resource_map_db(pid): return get_resource_map_members_by_map(pid) elif d1_gmn.app.did.is_resource_map_member(pid): return get_resource_map_members_by_member(pid) else: raise d1_common.types.exceptions.InvalidRequest( 0, 'Not a Resource Map or Resource Map member. pid="{}"'.format(pid) )
python
def get_resource_map_members(pid): """``pid`` is the PID of a Resource Map or the PID of a member of a Resource Map.""" if d1_gmn.app.did.is_resource_map_db(pid): return get_resource_map_members_by_map(pid) elif d1_gmn.app.did.is_resource_map_member(pid): return get_resource_map_members_by_member(pid) else: raise d1_common.types.exceptions.InvalidRequest( 0, 'Not a Resource Map or Resource Map member. pid="{}"'.format(pid) )
[ "def", "get_resource_map_members", "(", "pid", ")", ":", "if", "d1_gmn", ".", "app", ".", "did", ".", "is_resource_map_db", "(", "pid", ")", ":", "return", "get_resource_map_members_by_map", "(", "pid", ")", "elif", "d1_gmn", ".", "app", ".", "did", ".", "...
``pid`` is the PID of a Resource Map or the PID of a member of a Resource Map.
[ "pid", "is", "the", "PID", "of", "a", "Resource", "Map", "or", "the", "PID", "of", "a", "member", "of", "a", "Resource", "Map", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/resource_map.py#L97-L106
train
45,258
DataONEorg/d1_python
client_cli/src/d1_cli/impl/session.py
Session.set_with_conversion
def set_with_conversion(self, variable, value_string): """Convert user supplied string to Python type. Lets user use values such as True, False and integers. All variables can be set to None, regardless of type. Handle the case where a string is typed by the user and is not quoted, as a string literal. """ self._assert_valid_variable(variable) try: v = ast.literal_eval(value_string) except (ValueError, SyntaxError): v = value_string if v is None or v == "none": self._variables[variable] = None else: try: type_converter = variable_type_map[variable] value_string = self._validate_variable_type( value_string, type_converter ) value = type_converter(value_string) self._variables[variable] = value except ValueError: raise d1_cli.impl.exceptions.InvalidArguments( "Invalid value for {}: {}".format(variable, value_string) )
python
def set_with_conversion(self, variable, value_string): """Convert user supplied string to Python type. Lets user use values such as True, False and integers. All variables can be set to None, regardless of type. Handle the case where a string is typed by the user and is not quoted, as a string literal. """ self._assert_valid_variable(variable) try: v = ast.literal_eval(value_string) except (ValueError, SyntaxError): v = value_string if v is None or v == "none": self._variables[variable] = None else: try: type_converter = variable_type_map[variable] value_string = self._validate_variable_type( value_string, type_converter ) value = type_converter(value_string) self._variables[variable] = value except ValueError: raise d1_cli.impl.exceptions.InvalidArguments( "Invalid value for {}: {}".format(variable, value_string) )
[ "def", "set_with_conversion", "(", "self", ",", "variable", ",", "value_string", ")", ":", "self", ".", "_assert_valid_variable", "(", "variable", ")", "try", ":", "v", "=", "ast", ".", "literal_eval", "(", "value_string", ")", "except", "(", "ValueError", "...
Convert user supplied string to Python type. Lets user use values such as True, False and integers. All variables can be set to None, regardless of type. Handle the case where a string is typed by the user and is not quoted, as a string literal.
[ "Convert", "user", "supplied", "string", "to", "Python", "type", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/session.py#L160-L186
train
45,259
DataONEorg/d1_python
lib_common/src/d1_common/util.py
log_setup
def log_setup(is_debug=False, is_multiprocess=False): """Set up a standardized log format for the DataONE Python stack. All Python components should use this function. If ``is_multiprocess`` is True, include process ID in the log so that logs can be separated for each process. Output only to stdout and stderr. """ format_str = ( '%(asctime)s %(name)s %(module)s:%(lineno)d %(process)4d %(levelname)-8s %(message)s' if is_multiprocess else '%(asctime)s %(name)s %(module)s:%(lineno)d %(levelname)-8s %(message)s' ) formatter = logging.Formatter(format_str, '%Y-%m-%d %H:%M:%S') console_logger = logging.StreamHandler(sys.stdout) console_logger.setFormatter(formatter) logging.getLogger('').addHandler(console_logger) if is_debug: logging.getLogger('').setLevel(logging.DEBUG) else: logging.getLogger('').setLevel(logging.INFO)
python
def log_setup(is_debug=False, is_multiprocess=False): """Set up a standardized log format for the DataONE Python stack. All Python components should use this function. If ``is_multiprocess`` is True, include process ID in the log so that logs can be separated for each process. Output only to stdout and stderr. """ format_str = ( '%(asctime)s %(name)s %(module)s:%(lineno)d %(process)4d %(levelname)-8s %(message)s' if is_multiprocess else '%(asctime)s %(name)s %(module)s:%(lineno)d %(levelname)-8s %(message)s' ) formatter = logging.Formatter(format_str, '%Y-%m-%d %H:%M:%S') console_logger = logging.StreamHandler(sys.stdout) console_logger.setFormatter(formatter) logging.getLogger('').addHandler(console_logger) if is_debug: logging.getLogger('').setLevel(logging.DEBUG) else: logging.getLogger('').setLevel(logging.INFO)
[ "def", "log_setup", "(", "is_debug", "=", "False", ",", "is_multiprocess", "=", "False", ")", ":", "format_str", "=", "(", "'%(asctime)s %(name)s %(module)s:%(lineno)d %(process)4d %(levelname)-8s %(message)s'", "if", "is_multiprocess", "else", "'%(asctime)s %(name)s %(module)s...
Set up a standardized log format for the DataONE Python stack. All Python components should use this function. If ``is_multiprocess`` is True, include process ID in the log so that logs can be separated for each process. Output only to stdout and stderr.
[ "Set", "up", "a", "standardized", "log", "format", "for", "the", "DataONE", "Python", "stack", ".", "All", "Python", "components", "should", "use", "this", "function", ".", "If", "is_multiprocess", "is", "True", "include", "process", "ID", "in", "the", "log"...
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/util.py#L32-L52
train
45,260
DataONEorg/d1_python
lib_common/src/d1_common/util.py
get_content_type
def get_content_type(content_type): """Extract the MIME type value from a content type string. Removes any subtype and parameter values that may be present in the string. Args: content_type: str String with content type and optional subtype and parameter fields. Returns: str: String with only content type Example: :: Input: multipart/form-data; boundary=aBoundaryString Returns: multipart/form-data """ m = email.message.Message() m['Content-Type'] = content_type return m.get_content_type()
python
def get_content_type(content_type): """Extract the MIME type value from a content type string. Removes any subtype and parameter values that may be present in the string. Args: content_type: str String with content type and optional subtype and parameter fields. Returns: str: String with only content type Example: :: Input: multipart/form-data; boundary=aBoundaryString Returns: multipart/form-data """ m = email.message.Message() m['Content-Type'] = content_type return m.get_content_type()
[ "def", "get_content_type", "(", "content_type", ")", ":", "m", "=", "email", ".", "message", ".", "Message", "(", ")", "m", "[", "'Content-Type'", "]", "=", "content_type", "return", "m", ".", "get_content_type", "(", ")" ]
Extract the MIME type value from a content type string. Removes any subtype and parameter values that may be present in the string. Args: content_type: str String with content type and optional subtype and parameter fields. Returns: str: String with only content type Example: :: Input: multipart/form-data; boundary=aBoundaryString Returns: multipart/form-data
[ "Extract", "the", "MIME", "type", "value", "from", "a", "content", "type", "string", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/util.py#L55-L77
train
45,261
DataONEorg/d1_python
lib_common/src/d1_common/util.py
nested_update
def nested_update(d, u): """Merge two nested dicts. Nested dicts are sometimes used for representing various recursive structures. When updating such a structure, it may be convenient to present the updated data as a corresponding recursive structure. This function will then apply the update. Args: d: dict dict that will be updated in-place. May or may not contain nested dicts. u: dict dict with contents that will be merged into ``d``. May or may not contain nested dicts. """ for k, v in list(u.items()): if isinstance(v, collections.Mapping): r = nested_update(d.get(k, {}), v) d[k] = r else: d[k] = u[k] return d
python
def nested_update(d, u): """Merge two nested dicts. Nested dicts are sometimes used for representing various recursive structures. When updating such a structure, it may be convenient to present the updated data as a corresponding recursive structure. This function will then apply the update. Args: d: dict dict that will be updated in-place. May or may not contain nested dicts. u: dict dict with contents that will be merged into ``d``. May or may not contain nested dicts. """ for k, v in list(u.items()): if isinstance(v, collections.Mapping): r = nested_update(d.get(k, {}), v) d[k] = r else: d[k] = u[k] return d
[ "def", "nested_update", "(", "d", ",", "u", ")", ":", "for", "k", ",", "v", "in", "list", "(", "u", ".", "items", "(", ")", ")", ":", "if", "isinstance", "(", "v", ",", "collections", ".", "Mapping", ")", ":", "r", "=", "nested_update", "(", "d...
Merge two nested dicts. Nested dicts are sometimes used for representing various recursive structures. When updating such a structure, it may be convenient to present the updated data as a corresponding recursive structure. This function will then apply the update. Args: d: dict dict that will be updated in-place. May or may not contain nested dicts. u: dict dict with contents that will be merged into ``d``. May or may not contain nested dicts.
[ "Merge", "two", "nested", "dicts", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/util.py#L80-L102
train
45,262
DataONEorg/d1_python
lib_common/src/d1_common/util.py
print_logging
def print_logging(): """Context manager to temporarily suppress additional information such as timestamps when writing to loggers. This makes logging look like ``print()``. The main use case is in scripts that mix logging and ``print()``, as Python uses separate streams for those, and output can and does end up getting shuffled if ``print()`` and logging is used interchangeably. When entering the context, the logging levels on the current handlers are saved then modified to WARNING levels. A new DEBUG level handler with a formatter that does not write timestamps, etc, is then created. When leaving the context, the DEBUG handler is removed and existing loggers are restored to their previous levels. By modifying the log levels to WARNING instead of completely disabling the loggers, it is ensured that potentially serious issues can still be logged while the context manager is in effect. """ root_logger = logging.getLogger() old_level_list = [h.level for h in root_logger.handlers] for h in root_logger.handlers: h.setLevel(logging.WARN) log_format = logging.Formatter('%(message)s') stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setFormatter(log_format) stream_handler.setLevel(logging.DEBUG) root_logger.addHandler(stream_handler) yield root_logger.removeHandler(stream_handler) for h, level in zip(root_logger.handlers, old_level_list): h.setLevel(level)
python
def print_logging(): """Context manager to temporarily suppress additional information such as timestamps when writing to loggers. This makes logging look like ``print()``. The main use case is in scripts that mix logging and ``print()``, as Python uses separate streams for those, and output can and does end up getting shuffled if ``print()`` and logging is used interchangeably. When entering the context, the logging levels on the current handlers are saved then modified to WARNING levels. A new DEBUG level handler with a formatter that does not write timestamps, etc, is then created. When leaving the context, the DEBUG handler is removed and existing loggers are restored to their previous levels. By modifying the log levels to WARNING instead of completely disabling the loggers, it is ensured that potentially serious issues can still be logged while the context manager is in effect. """ root_logger = logging.getLogger() old_level_list = [h.level for h in root_logger.handlers] for h in root_logger.handlers: h.setLevel(logging.WARN) log_format = logging.Formatter('%(message)s') stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setFormatter(log_format) stream_handler.setLevel(logging.DEBUG) root_logger.addHandler(stream_handler) yield root_logger.removeHandler(stream_handler) for h, level in zip(root_logger.handlers, old_level_list): h.setLevel(level)
[ "def", "print_logging", "(", ")", ":", "root_logger", "=", "logging", ".", "getLogger", "(", ")", "old_level_list", "=", "[", "h", ".", "level", "for", "h", "in", "root_logger", ".", "handlers", "]", "for", "h", "in", "root_logger", ".", "handlers", ":",...
Context manager to temporarily suppress additional information such as timestamps when writing to loggers. This makes logging look like ``print()``. The main use case is in scripts that mix logging and ``print()``, as Python uses separate streams for those, and output can and does end up getting shuffled if ``print()`` and logging is used interchangeably. When entering the context, the logging levels on the current handlers are saved then modified to WARNING levels. A new DEBUG level handler with a formatter that does not write timestamps, etc, is then created. When leaving the context, the DEBUG handler is removed and existing loggers are restored to their previous levels. By modifying the log levels to WARNING instead of completely disabling the loggers, it is ensured that potentially serious issues can still be logged while the context manager is in effect.
[ "Context", "manager", "to", "temporarily", "suppress", "additional", "information", "such", "as", "timestamps", "when", "writing", "to", "loggers", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/util.py#L198-L230
train
45,263
DataONEorg/d1_python
lib_common/src/d1_common/util.py
save_json
def save_json(py_obj, json_path): """Serialize a native object to JSON and save it normalized, pretty printed to a file. The JSON string is normalized by sorting any dictionary keys. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. json_path: str File path to which to write the JSON file. E.g.: The path must exist. The filename will normally end with ".json". See Also: ToJsonCompatibleTypes() """ with open(json_path, 'w', encoding='utf-8') as f: f.write(serialize_to_normalized_pretty_json(py_obj))
python
def save_json(py_obj, json_path): """Serialize a native object to JSON and save it normalized, pretty printed to a file. The JSON string is normalized by sorting any dictionary keys. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. json_path: str File path to which to write the JSON file. E.g.: The path must exist. The filename will normally end with ".json". See Also: ToJsonCompatibleTypes() """ with open(json_path, 'w', encoding='utf-8') as f: f.write(serialize_to_normalized_pretty_json(py_obj))
[ "def", "save_json", "(", "py_obj", ",", "json_path", ")", ":", "with", "open", "(", "json_path", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "f", ".", "write", "(", "serialize_to_normalized_pretty_json", "(", "py_obj", ")", ")" ]
Serialize a native object to JSON and save it normalized, pretty printed to a file. The JSON string is normalized by sorting any dictionary keys. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. json_path: str File path to which to write the JSON file. E.g.: The path must exist. The filename will normally end with ".json". See Also: ToJsonCompatibleTypes()
[ "Serialize", "a", "native", "object", "to", "JSON", "and", "save", "it", "normalized", "pretty", "printed", "to", "a", "file", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/util.py#L233-L253
train
45,264
DataONEorg/d1_python
lib_common/src/d1_common/util.py
serialize_to_normalized_pretty_json
def serialize_to_normalized_pretty_json(py_obj): """Serialize a native object to normalized, pretty printed JSON. The JSON string is normalized by sorting any dictionary keys. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. Returns: str: normalized, pretty printed JSON string. """ return json.dumps(py_obj, sort_keys=True, indent=2, cls=ToJsonCompatibleTypes)
python
def serialize_to_normalized_pretty_json(py_obj): """Serialize a native object to normalized, pretty printed JSON. The JSON string is normalized by sorting any dictionary keys. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. Returns: str: normalized, pretty printed JSON string. """ return json.dumps(py_obj, sort_keys=True, indent=2, cls=ToJsonCompatibleTypes)
[ "def", "serialize_to_normalized_pretty_json", "(", "py_obj", ")", ":", "return", "json", ".", "dumps", "(", "py_obj", ",", "sort_keys", "=", "True", ",", "indent", "=", "2", ",", "cls", "=", "ToJsonCompatibleTypes", ")" ]
Serialize a native object to normalized, pretty printed JSON. The JSON string is normalized by sorting any dictionary keys. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. Returns: str: normalized, pretty printed JSON string.
[ "Serialize", "a", "native", "object", "to", "normalized", "pretty", "printed", "JSON", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/util.py#L287-L301
train
45,265
DataONEorg/d1_python
lib_common/src/d1_common/util.py
serialize_to_normalized_compact_json
def serialize_to_normalized_compact_json(py_obj): """Serialize a native object to normalized, compact JSON. The JSON string is normalized by sorting any dictionary keys. It will be on a single line without whitespace between elements. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. Returns: str: normalized, compact JSON string. """ return json.dumps( py_obj, sort_keys=True, separators=(',', ':'), cls=ToJsonCompatibleTypes )
python
def serialize_to_normalized_compact_json(py_obj): """Serialize a native object to normalized, compact JSON. The JSON string is normalized by sorting any dictionary keys. It will be on a single line without whitespace between elements. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. Returns: str: normalized, compact JSON string. """ return json.dumps( py_obj, sort_keys=True, separators=(',', ':'), cls=ToJsonCompatibleTypes )
[ "def", "serialize_to_normalized_compact_json", "(", "py_obj", ")", ":", "return", "json", ".", "dumps", "(", "py_obj", ",", "sort_keys", "=", "True", ",", "separators", "=", "(", "','", ",", "':'", ")", ",", "cls", "=", "ToJsonCompatibleTypes", ")" ]
Serialize a native object to normalized, compact JSON. The JSON string is normalized by sorting any dictionary keys. It will be on a single line without whitespace between elements. Args: py_obj: object Any object that can be represented in JSON. Some types, such as datetimes are automatically converted to strings. Returns: str: normalized, compact JSON string.
[ "Serialize", "a", "native", "object", "to", "normalized", "compact", "JSON", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/util.py#L304-L321
train
45,266
DataONEorg/d1_python
lib_common/src/d1_common/util.py
format_sec_to_dhm
def format_sec_to_dhm(sec): """Format seconds to days, hours, minutes. Args: sec: float or int Number of seconds in a period of time Returns: Period of time represented as a string on the form ``0d:00h:00m``. """ rem_int, s_int = divmod(int(sec), 60) rem_int, m_int, = divmod(rem_int, 60) d_int, h_int, = divmod(rem_int, 24) return '{}d{:02d}h{:02d}m'.format(d_int, h_int, m_int)
python
def format_sec_to_dhm(sec): """Format seconds to days, hours, minutes. Args: sec: float or int Number of seconds in a period of time Returns: Period of time represented as a string on the form ``0d:00h:00m``. """ rem_int, s_int = divmod(int(sec), 60) rem_int, m_int, = divmod(rem_int, 60) d_int, h_int, = divmod(rem_int, 24) return '{}d{:02d}h{:02d}m'.format(d_int, h_int, m_int)
[ "def", "format_sec_to_dhm", "(", "sec", ")", ":", "rem_int", ",", "s_int", "=", "divmod", "(", "int", "(", "sec", ")", ",", "60", ")", "rem_int", ",", "m_int", ",", "=", "divmod", "(", "rem_int", ",", "60", ")", "d_int", ",", "h_int", ",", "=", "...
Format seconds to days, hours, minutes. Args: sec: float or int Number of seconds in a period of time Returns: Period of time represented as a string on the form ``0d:00h:00m``.
[ "Format", "seconds", "to", "days", "hours", "minutes", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/util.py#L347-L361
train
45,267
DataONEorg/d1_python
lib_common/src/d1_common/util.py
EventCounter.count
def count(self, event_str, inc_int=1): """Count an event. Args: event_str: The name of an event to count. Used as a key in the event dict. The same name will also be used in the summary. inc_int: int Optional argument to increase the count for the event by more than 1. """ self._event_dict.setdefault(event_str, 0) self._event_dict[event_str] += inc_int
python
def count(self, event_str, inc_int=1): """Count an event. Args: event_str: The name of an event to count. Used as a key in the event dict. The same name will also be used in the summary. inc_int: int Optional argument to increase the count for the event by more than 1. """ self._event_dict.setdefault(event_str, 0) self._event_dict[event_str] += inc_int
[ "def", "count", "(", "self", ",", "event_str", ",", "inc_int", "=", "1", ")", ":", "self", ".", "_event_dict", ".", "setdefault", "(", "event_str", ",", "0", ")", "self", ".", "_event_dict", "[", "event_str", "]", "+=", "inc_int" ]
Count an event. Args: event_str: The name of an event to count. Used as a key in the event dict. The same name will also be used in the summary. inc_int: int Optional argument to increase the count for the event by more than 1.
[ "Count", "an", "event", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/util.py#L141-L154
train
45,268
DataONEorg/d1_python
lib_common/src/d1_common/util.py
EventCounter.log_and_count
def log_and_count(self, event_str, msg_str=None, inc_int=None): """Count an event and write a message to a logger. Args: event_str: str The name of an event to count. Used as a key in the event dict. The same name will be used in the summary. This also becomes a part of the message logged by this function. msg_str: str Optional message with details about the events. The message is only written to the log. While the ``event_str`` functions as a key and must remain the same for the same type of event, ``log_str`` may change between calls. inc_int: int Optional argument to increase the count for the event by more than 1. """ logger.info( ' - '.join(map(str, [v for v in (event_str, msg_str, inc_int) if v])) ) self.count(event_str, inc_int or 1)
python
def log_and_count(self, event_str, msg_str=None, inc_int=None): """Count an event and write a message to a logger. Args: event_str: str The name of an event to count. Used as a key in the event dict. The same name will be used in the summary. This also becomes a part of the message logged by this function. msg_str: str Optional message with details about the events. The message is only written to the log. While the ``event_str`` functions as a key and must remain the same for the same type of event, ``log_str`` may change between calls. inc_int: int Optional argument to increase the count for the event by more than 1. """ logger.info( ' - '.join(map(str, [v for v in (event_str, msg_str, inc_int) if v])) ) self.count(event_str, inc_int or 1)
[ "def", "log_and_count", "(", "self", ",", "event_str", ",", "msg_str", "=", "None", ",", "inc_int", "=", "None", ")", ":", "logger", ".", "info", "(", "' - '", ".", "join", "(", "map", "(", "str", ",", "[", "v", "for", "v", "in", "(", "event_str", ...
Count an event and write a message to a logger. Args: event_str: str The name of an event to count. Used as a key in the event dict. The same name will be used in the summary. This also becomes a part of the message logged by this function. msg_str: str Optional message with details about the events. The message is only written to the log. While the ``event_str`` functions as a key and must remain the same for the same type of event, ``log_str`` may change between calls. inc_int: int Optional argument to increase the count for the event by more than 1.
[ "Count", "an", "event", "and", "write", "a", "message", "to", "a", "logger", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/util.py#L156-L177
train
45,269
DataONEorg/d1_python
lib_common/src/d1_common/util.py
EventCounter.dump_to_log
def dump_to_log(self): """Write summary to logger with the name and number of times each event has been counted. This function may be called at any point in the process. Counts are not zeroed. """ if self._event_dict: logger.info('Events:') for event_str, count_int in sorted(self._event_dict.items()): logger.info(' {}: {}'.format(event_str, count_int)) else: logger.info('No Events')
python
def dump_to_log(self): """Write summary to logger with the name and number of times each event has been counted. This function may be called at any point in the process. Counts are not zeroed. """ if self._event_dict: logger.info('Events:') for event_str, count_int in sorted(self._event_dict.items()): logger.info(' {}: {}'.format(event_str, count_int)) else: logger.info('No Events')
[ "def", "dump_to_log", "(", "self", ")", ":", "if", "self", ".", "_event_dict", ":", "logger", ".", "info", "(", "'Events:'", ")", "for", "event_str", ",", "count_int", "in", "sorted", "(", "self", ".", "_event_dict", ".", "items", "(", ")", ")", ":", ...
Write summary to logger with the name and number of times each event has been counted. This function may be called at any point in the process. Counts are not zeroed.
[ "Write", "summary", "to", "logger", "with", "the", "name", "and", "number", "of", "times", "each", "event", "has", "been", "counted", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/util.py#L179-L191
train
45,270
DataONEorg/d1_python
gmn/src/d1_gmn/app/delete.py
delete_all_from_db
def delete_all_from_db(): """Clear the database. Used for testing and debugging. """ # The models.CASCADE property is set on all ForeignKey fields, so tables can # be deleted in any order without breaking constraints. for model in django.apps.apps.get_models(): model.objects.all().delete()
python
def delete_all_from_db(): """Clear the database. Used for testing and debugging. """ # The models.CASCADE property is set on all ForeignKey fields, so tables can # be deleted in any order without breaking constraints. for model in django.apps.apps.get_models(): model.objects.all().delete()
[ "def", "delete_all_from_db", "(", ")", ":", "# The models.CASCADE property is set on all ForeignKey fields, so tables can", "# be deleted in any order without breaking constraints.", "for", "model", "in", "django", ".", "apps", ".", "apps", ".", "get_models", "(", ")", ":", "...
Clear the database. Used for testing and debugging.
[ "Clear", "the", "database", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/delete.py#L44-L53
train
45,271
genialis/resolwe
resolwe/elastic/pagination.py
LimitOffsetPostPagination.get_limit
def get_limit(self, request): """Return limit parameter.""" if self.limit_query_param: try: return _positive_int( get_query_param(request, self.limit_query_param), strict=True, cutoff=self.max_limit ) except (KeyError, ValueError): pass return self.default_limit
python
def get_limit(self, request): """Return limit parameter.""" if self.limit_query_param: try: return _positive_int( get_query_param(request, self.limit_query_param), strict=True, cutoff=self.max_limit ) except (KeyError, ValueError): pass return self.default_limit
[ "def", "get_limit", "(", "self", ",", "request", ")", ":", "if", "self", ".", "limit_query_param", ":", "try", ":", "return", "_positive_int", "(", "get_query_param", "(", "request", ",", "self", ".", "limit_query_param", ")", ",", "strict", "=", "True", "...
Return limit parameter.
[ "Return", "limit", "parameter", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/pagination.py#L33-L45
train
45,272
genialis/resolwe
resolwe/elastic/pagination.py
LimitOffsetPostPagination.get_offset
def get_offset(self, request): """Return offset parameter.""" try: return _positive_int( get_query_param(request, self.offset_query_param), ) except (KeyError, ValueError): return 0
python
def get_offset(self, request): """Return offset parameter.""" try: return _positive_int( get_query_param(request, self.offset_query_param), ) except (KeyError, ValueError): return 0
[ "def", "get_offset", "(", "self", ",", "request", ")", ":", "try", ":", "return", "_positive_int", "(", "get_query_param", "(", "request", ",", "self", ".", "offset_query_param", ")", ",", ")", "except", "(", "KeyError", ",", "ValueError", ")", ":", "retur...
Return offset parameter.
[ "Return", "offset", "parameter", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/pagination.py#L47-L54
train
45,273
DataONEorg/d1_python
dev_tools/src/d1_dev/src-cleanup.py
futurize_module
def futurize_module(module_path, show_diff, write_update): """2to3 uses AST, not Baron.""" logging.info('Futurizing module... path="{}"'.format(module_path)) ast_tree = back_to_the_futurize(module_path) return d1_dev.util.update_module_file_ast( ast_tree, module_path, show_diff, write_update )
python
def futurize_module(module_path, show_diff, write_update): """2to3 uses AST, not Baron.""" logging.info('Futurizing module... path="{}"'.format(module_path)) ast_tree = back_to_the_futurize(module_path) return d1_dev.util.update_module_file_ast( ast_tree, module_path, show_diff, write_update )
[ "def", "futurize_module", "(", "module_path", ",", "show_diff", ",", "write_update", ")", ":", "logging", ".", "info", "(", "'Futurizing module... path=\"{}\"'", ".", "format", "(", "module_path", ")", ")", "ast_tree", "=", "back_to_the_futurize", "(", "module_path"...
2to3 uses AST, not Baron.
[ "2to3", "uses", "AST", "not", "Baron", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/dev_tools/src/d1_dev/src-cleanup.py#L159-L165
train
45,274
DataONEorg/d1_python
dev_tools/src/d1_dev/src-cleanup.py
_remove_single_line_import_comments
def _remove_single_line_import_comments(r): """We previously used more groups for the import statements and named each group.""" logging.info('Removing single line import comments') import_r, remaining_r = split_by_last_import(r) new_import_r = redbaron.NodeList() for i, v in enumerate(import_r): if 1 < i < len(import_r) - 2: if not ( import_r[i - 2].type != 'comment' and v.type == 'comment' and import_r[i + 2].type != 'comment' ) or _is_keep_comment(v): new_import_r.append(v) else: new_import_r.append(v) return new_import_r + remaining_r
python
def _remove_single_line_import_comments(r): """We previously used more groups for the import statements and named each group.""" logging.info('Removing single line import comments') import_r, remaining_r = split_by_last_import(r) new_import_r = redbaron.NodeList() for i, v in enumerate(import_r): if 1 < i < len(import_r) - 2: if not ( import_r[i - 2].type != 'comment' and v.type == 'comment' and import_r[i + 2].type != 'comment' ) or _is_keep_comment(v): new_import_r.append(v) else: new_import_r.append(v) return new_import_r + remaining_r
[ "def", "_remove_single_line_import_comments", "(", "r", ")", ":", "logging", ".", "info", "(", "'Removing single line import comments'", ")", "import_r", ",", "remaining_r", "=", "split_by_last_import", "(", "r", ")", "new_import_r", "=", "redbaron", ".", "NodeList", ...
We previously used more groups for the import statements and named each group.
[ "We", "previously", "used", "more", "groups", "for", "the", "import", "statements", "and", "named", "each", "group", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/dev_tools/src/d1_dev/src-cleanup.py#L171-L186
train
45,275
genialis/resolwe
resolwe/flow/executors/logger.py
configure_logging
def configure_logging(emit_list): """Configure logging to send log records to the master.""" if 'sphinx' in sys.modules: module_base = 'resolwe.flow.executors' else: module_base = 'executors' logging_config = dict( version=1, formatters={ 'json_formatter': { '()': JSONFormatter }, }, handlers={ 'redis': { 'class': module_base + '.logger.RedisHandler', 'formatter': 'json_formatter', 'level': logging.INFO, 'emit_list': emit_list }, 'console': { 'class': 'logging.StreamHandler', 'level': logging.WARNING }, }, root={ 'handlers': ['redis', 'console'], 'level': logging.DEBUG, }, loggers={ # Don't use redis logger to prevent circular dependency. module_base + '.manager_comm': { 'level': 'INFO', 'handlers': ['console'], 'propagate': False, }, }, ) dictConfig(logging_config)
python
def configure_logging(emit_list): """Configure logging to send log records to the master.""" if 'sphinx' in sys.modules: module_base = 'resolwe.flow.executors' else: module_base = 'executors' logging_config = dict( version=1, formatters={ 'json_formatter': { '()': JSONFormatter }, }, handlers={ 'redis': { 'class': module_base + '.logger.RedisHandler', 'formatter': 'json_formatter', 'level': logging.INFO, 'emit_list': emit_list }, 'console': { 'class': 'logging.StreamHandler', 'level': logging.WARNING }, }, root={ 'handlers': ['redis', 'console'], 'level': logging.DEBUG, }, loggers={ # Don't use redis logger to prevent circular dependency. module_base + '.manager_comm': { 'level': 'INFO', 'handlers': ['console'], 'propagate': False, }, }, ) dictConfig(logging_config)
[ "def", "configure_logging", "(", "emit_list", ")", ":", "if", "'sphinx'", "in", "sys", ".", "modules", ":", "module_base", "=", "'resolwe.flow.executors'", "else", ":", "module_base", "=", "'executors'", "logging_config", "=", "dict", "(", "version", "=", "1", ...
Configure logging to send log records to the master.
[ "Configure", "logging", "to", "send", "log", "records", "to", "the", "master", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/executors/logger.py#L62-L102
train
45,276
genialis/resolwe
resolwe/flow/executors/logger.py
JSONFormatter.format
def format(self, record): """Dump the record to JSON.""" data = record.__dict__.copy() data['data_id'] = DATA['id'] data['data_location_id'] = DATA_LOCATION['id'] data['hostname'] = socket.gethostname() # Get relative path, so listener can reconstruct the path to the actual code. data['pathname'] = os.path.relpath(data['pathname'], os.path.dirname(__file__)) # Exception and Traceback cannot be serialized. data['exc_info'] = None # Ensure logging message is instantiated to a string. data['msg'] = str(data['msg']) return json.dumps(data)
python
def format(self, record): """Dump the record to JSON.""" data = record.__dict__.copy() data['data_id'] = DATA['id'] data['data_location_id'] = DATA_LOCATION['id'] data['hostname'] = socket.gethostname() # Get relative path, so listener can reconstruct the path to the actual code. data['pathname'] = os.path.relpath(data['pathname'], os.path.dirname(__file__)) # Exception and Traceback cannot be serialized. data['exc_info'] = None # Ensure logging message is instantiated to a string. data['msg'] = str(data['msg']) return json.dumps(data)
[ "def", "format", "(", "self", ",", "record", ")", ":", "data", "=", "record", ".", "__dict__", ".", "copy", "(", ")", "data", "[", "'data_id'", "]", "=", "DATA", "[", "'id'", "]", "data", "[", "'data_location_id'", "]", "=", "DATA_LOCATION", "[", "'i...
Dump the record to JSON.
[ "Dump", "the", "record", "to", "JSON", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/executors/logger.py#L18-L35
train
45,277
genialis/resolwe
resolwe/flow/executors/logger.py
RedisHandler.emit
def emit(self, record): """Send log message to the listener.""" future = asyncio.ensure_future(send_manager_command( ExecutorProtocol.LOG, extra_fields={ ExecutorProtocol.LOG_MESSAGE: self.format(record), }, expect_reply=False )) self.emit_list.append(future)
python
def emit(self, record): """Send log message to the listener.""" future = asyncio.ensure_future(send_manager_command( ExecutorProtocol.LOG, extra_fields={ ExecutorProtocol.LOG_MESSAGE: self.format(record), }, expect_reply=False )) self.emit_list.append(future)
[ "def", "emit", "(", "self", ",", "record", ")", ":", "future", "=", "asyncio", ".", "ensure_future", "(", "send_manager_command", "(", "ExecutorProtocol", ".", "LOG", ",", "extra_fields", "=", "{", "ExecutorProtocol", ".", "LOG_MESSAGE", ":", "self", ".", "f...
Send log message to the listener.
[ "Send", "log", "message", "to", "the", "listener", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/executors/logger.py#L50-L59
train
45,278
genialis/resolwe
resolwe/flow/models/secret.py
SecretManager.create_secret
def create_secret(self, value, contributor, metadata=None, expires=None): """Create a new secret, returning its handle. :param value: Secret value to store :param contributor: User owning the secret :param metadata: Optional metadata dictionary (must be JSON serializable) :param expires: Optional date/time of expiry (defaults to None, which means that the secret never expires) :return: Secret handle """ if metadata is None: metadata = {} secret = self.create( value=value, contributor=contributor, metadata=metadata, expires=expires, ) return str(secret.handle)
python
def create_secret(self, value, contributor, metadata=None, expires=None): """Create a new secret, returning its handle. :param value: Secret value to store :param contributor: User owning the secret :param metadata: Optional metadata dictionary (must be JSON serializable) :param expires: Optional date/time of expiry (defaults to None, which means that the secret never expires) :return: Secret handle """ if metadata is None: metadata = {} secret = self.create( value=value, contributor=contributor, metadata=metadata, expires=expires, ) return str(secret.handle)
[ "def", "create_secret", "(", "self", ",", "value", ",", "contributor", ",", "metadata", "=", "None", ",", "expires", "=", "None", ")", ":", "if", "metadata", "is", "None", ":", "metadata", "=", "{", "}", "secret", "=", "self", ".", "create", "(", "va...
Create a new secret, returning its handle. :param value: Secret value to store :param contributor: User owning the secret :param metadata: Optional metadata dictionary (must be JSON serializable) :param expires: Optional date/time of expiry (defaults to None, which means that the secret never expires) :return: Secret handle
[ "Create", "a", "new", "secret", "returning", "its", "handle", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/secret.py#L14-L33
train
45,279
genialis/resolwe
resolwe/flow/models/secret.py
SecretManager.get_secret
def get_secret(self, handle, contributor): """Retrieve an existing secret's value. :param handle: Secret handle :param contributor: User instance to perform contributor validation, which means that only secrets for the given contributor will be looked up. """ queryset = self.all() if contributor is not None: queryset = queryset.filter(contributor=contributor) secret = queryset.get(handle=handle) return secret.value
python
def get_secret(self, handle, contributor): """Retrieve an existing secret's value. :param handle: Secret handle :param contributor: User instance to perform contributor validation, which means that only secrets for the given contributor will be looked up. """ queryset = self.all() if contributor is not None: queryset = queryset.filter(contributor=contributor) secret = queryset.get(handle=handle) return secret.value
[ "def", "get_secret", "(", "self", ",", "handle", ",", "contributor", ")", ":", "queryset", "=", "self", ".", "all", "(", ")", "if", "contributor", "is", "not", "None", ":", "queryset", "=", "queryset", ".", "filter", "(", "contributor", "=", "contributor...
Retrieve an existing secret's value. :param handle: Secret handle :param contributor: User instance to perform contributor validation, which means that only secrets for the given contributor will be looked up.
[ "Retrieve", "an", "existing", "secret", "s", "value", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/secret.py#L35-L47
train
45,280
genialis/resolwe
resolwe/flow/models/utils.py
validation_schema
def validation_schema(name): """Return json schema for json validation.""" schemas = { 'processor': 'processSchema.json', 'descriptor': 'descriptorSchema.json', 'field': 'fieldSchema.json', 'type': 'typeSchema.json', } if name not in schemas: raise ValueError() field_schema_file = finders.find('flow/{}'.format(schemas['field']), all=True)[0] with open(field_schema_file, 'r') as fn: field_schema = fn.read() if name == 'field': return json.loads(field_schema.replace('{{PARENT}}', '')) schema_file = finders.find('flow/{}'.format(schemas[name]), all=True)[0] with open(schema_file, 'r') as fn: schema = fn.read() return json.loads(schema.replace('{{FIELD}}', field_schema).replace('{{PARENT}}', '/field'))
python
def validation_schema(name): """Return json schema for json validation.""" schemas = { 'processor': 'processSchema.json', 'descriptor': 'descriptorSchema.json', 'field': 'fieldSchema.json', 'type': 'typeSchema.json', } if name not in schemas: raise ValueError() field_schema_file = finders.find('flow/{}'.format(schemas['field']), all=True)[0] with open(field_schema_file, 'r') as fn: field_schema = fn.read() if name == 'field': return json.loads(field_schema.replace('{{PARENT}}', '')) schema_file = finders.find('flow/{}'.format(schemas[name]), all=True)[0] with open(schema_file, 'r') as fn: schema = fn.read() return json.loads(schema.replace('{{FIELD}}', field_schema).replace('{{PARENT}}', '/field'))
[ "def", "validation_schema", "(", "name", ")", ":", "schemas", "=", "{", "'processor'", ":", "'processSchema.json'", ",", "'descriptor'", ":", "'descriptorSchema.json'", ",", "'field'", ":", "'fieldSchema.json'", ",", "'type'", ":", "'typeSchema.json'", ",", "}", "...
Return json schema for json validation.
[ "Return", "json", "schema", "for", "json", "validation", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/utils.py#L19-L42
train
45,281
genialis/resolwe
resolwe/flow/models/utils.py
hydrate_input_references
def hydrate_input_references(input_, input_schema, hydrate_values=True): """Hydrate ``input_`` with linked data. Find fields with complex data:<...> types in ``input_``. Assign an output of corresponding data object to those fields. """ from .data import Data # prevent circular import for field_schema, fields in iterate_fields(input_, input_schema): name = field_schema['name'] value = fields[name] if 'type' in field_schema: if field_schema['type'].startswith('data:'): if value is None: continue try: data = Data.objects.get(id=value) except Data.DoesNotExist: fields[name] = {} continue output = copy.deepcopy(data.output) if hydrate_values: _hydrate_values(output, data.process.output_schema, data) output["__id"] = data.id output["__type"] = data.process.type output["__descriptor"] = data.descriptor output["__entity_name"] = None output["__output_schema"] = data.process.output_schema entity = data.entity_set.values('name').first() if entity: output["__entity_name"] = entity['name'] fields[name] = output elif field_schema['type'].startswith('list:data:'): outputs = [] for val in value: if val is None: continue try: data = Data.objects.get(id=val) except Data.DoesNotExist: outputs.append({}) continue output = copy.deepcopy(data.output) if hydrate_values: _hydrate_values(output, data.process.output_schema, data) output["__id"] = data.id output["__type"] = data.process.type output["__descriptor"] = data.descriptor output["__output_schema"] = data.process.output_schema entity = data.entity_set.values('name').first() if entity: output["__entity_name"] = entity['name'] outputs.append(output) fields[name] = outputs
python
def hydrate_input_references(input_, input_schema, hydrate_values=True): """Hydrate ``input_`` with linked data. Find fields with complex data:<...> types in ``input_``. Assign an output of corresponding data object to those fields. """ from .data import Data # prevent circular import for field_schema, fields in iterate_fields(input_, input_schema): name = field_schema['name'] value = fields[name] if 'type' in field_schema: if field_schema['type'].startswith('data:'): if value is None: continue try: data = Data.objects.get(id=value) except Data.DoesNotExist: fields[name] = {} continue output = copy.deepcopy(data.output) if hydrate_values: _hydrate_values(output, data.process.output_schema, data) output["__id"] = data.id output["__type"] = data.process.type output["__descriptor"] = data.descriptor output["__entity_name"] = None output["__output_schema"] = data.process.output_schema entity = data.entity_set.values('name').first() if entity: output["__entity_name"] = entity['name'] fields[name] = output elif field_schema['type'].startswith('list:data:'): outputs = [] for val in value: if val is None: continue try: data = Data.objects.get(id=val) except Data.DoesNotExist: outputs.append({}) continue output = copy.deepcopy(data.output) if hydrate_values: _hydrate_values(output, data.process.output_schema, data) output["__id"] = data.id output["__type"] = data.process.type output["__descriptor"] = data.descriptor output["__output_schema"] = data.process.output_schema entity = data.entity_set.values('name').first() if entity: output["__entity_name"] = entity['name'] outputs.append(output) fields[name] = outputs
[ "def", "hydrate_input_references", "(", "input_", ",", "input_schema", ",", "hydrate_values", "=", "True", ")", ":", "from", ".", "data", "import", "Data", "# prevent circular import", "for", "field_schema", ",", "fields", "in", "iterate_fields", "(", "input_", ",...
Hydrate ``input_`` with linked data. Find fields with complex data:<...> types in ``input_``. Assign an output of corresponding data object to those fields.
[ "Hydrate", "input_", "with", "linked", "data", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/utils.py#L295-L360
train
45,282
genialis/resolwe
resolwe/flow/models/utils.py
hydrate_size
def hydrate_size(data, force=False): """Add file and dir sizes. Add sizes to ``basic:file:``, ``list:basic:file``, ``basic:dir:`` and ``list:basic:dir:`` fields. ``force`` parameter is used to recompute file sizes also on objects that already have these values, e.g. in migrations. """ from .data import Data # prevent circular import def get_dir_size(path): """Get directory size.""" total_size = 0 for dirpath, _, filenames in os.walk(path): for file_name in filenames: file_path = os.path.join(dirpath, file_name) if not os.path.isfile(file_path): # Skip all "not normal" files (links, ...) continue total_size += os.path.getsize(file_path) return total_size def get_refs_size(obj, obj_path): """Calculate size of all references of ``obj``. :param dict obj: Data object's output field (of type file/dir). :param str obj_path: Path to ``obj``. """ total_size = 0 for ref in obj.get('refs', []): ref_path = data.location.get_path(filename=ref) if ref_path in obj_path: # It is a common case that ``obj['file']`` is also contained in # one of obj['ref']. In that case, we need to make sure that it's # size is not counted twice: continue if os.path.isfile(ref_path): total_size += os.path.getsize(ref_path) elif os.path.isdir(ref_path): total_size += get_dir_size(ref_path) return total_size def add_file_size(obj): """Add file size to the basic:file field.""" if data.status in [Data.STATUS_DONE, Data.STATUS_ERROR] and 'size' in obj and not force: return path = data.location.get_path(filename=obj['file']) if not os.path.isfile(path): raise ValidationError("Referenced file does not exist ({})".format(path)) obj['size'] = os.path.getsize(path) obj['total_size'] = obj['size'] + get_refs_size(obj, path) def add_dir_size(obj): """Add directory size to the basic:dir field.""" if data.status in [Data.STATUS_DONE, Data.STATUS_ERROR] and 'size' in obj and not force: return path = data.location.get_path(filename=obj['dir']) if not os.path.isdir(path): raise ValidationError("Referenced dir does not exist ({})".format(path)) obj['size'] = get_dir_size(path) obj['total_size'] = obj['size'] + get_refs_size(obj, path) data_size = 0 for field_schema, fields in iterate_fields(data.output, data.process.output_schema): name = field_schema['name'] value = fields[name] if 'type' in field_schema: if field_schema['type'].startswith('basic:file:'): add_file_size(value) data_size += value.get('total_size', 0) elif field_schema['type'].startswith('list:basic:file:'): for obj in value: add_file_size(obj) data_size += obj.get('total_size', 0) elif field_schema['type'].startswith('basic:dir:'): add_dir_size(value) data_size += value.get('total_size', 0) elif field_schema['type'].startswith('list:basic:dir:'): for obj in value: add_dir_size(obj) data_size += obj.get('total_size', 0) data.size = data_size
python
def hydrate_size(data, force=False): """Add file and dir sizes. Add sizes to ``basic:file:``, ``list:basic:file``, ``basic:dir:`` and ``list:basic:dir:`` fields. ``force`` parameter is used to recompute file sizes also on objects that already have these values, e.g. in migrations. """ from .data import Data # prevent circular import def get_dir_size(path): """Get directory size.""" total_size = 0 for dirpath, _, filenames in os.walk(path): for file_name in filenames: file_path = os.path.join(dirpath, file_name) if not os.path.isfile(file_path): # Skip all "not normal" files (links, ...) continue total_size += os.path.getsize(file_path) return total_size def get_refs_size(obj, obj_path): """Calculate size of all references of ``obj``. :param dict obj: Data object's output field (of type file/dir). :param str obj_path: Path to ``obj``. """ total_size = 0 for ref in obj.get('refs', []): ref_path = data.location.get_path(filename=ref) if ref_path in obj_path: # It is a common case that ``obj['file']`` is also contained in # one of obj['ref']. In that case, we need to make sure that it's # size is not counted twice: continue if os.path.isfile(ref_path): total_size += os.path.getsize(ref_path) elif os.path.isdir(ref_path): total_size += get_dir_size(ref_path) return total_size def add_file_size(obj): """Add file size to the basic:file field.""" if data.status in [Data.STATUS_DONE, Data.STATUS_ERROR] and 'size' in obj and not force: return path = data.location.get_path(filename=obj['file']) if not os.path.isfile(path): raise ValidationError("Referenced file does not exist ({})".format(path)) obj['size'] = os.path.getsize(path) obj['total_size'] = obj['size'] + get_refs_size(obj, path) def add_dir_size(obj): """Add directory size to the basic:dir field.""" if data.status in [Data.STATUS_DONE, Data.STATUS_ERROR] and 'size' in obj and not force: return path = data.location.get_path(filename=obj['dir']) if not os.path.isdir(path): raise ValidationError("Referenced dir does not exist ({})".format(path)) obj['size'] = get_dir_size(path) obj['total_size'] = obj['size'] + get_refs_size(obj, path) data_size = 0 for field_schema, fields in iterate_fields(data.output, data.process.output_schema): name = field_schema['name'] value = fields[name] if 'type' in field_schema: if field_schema['type'].startswith('basic:file:'): add_file_size(value) data_size += value.get('total_size', 0) elif field_schema['type'].startswith('list:basic:file:'): for obj in value: add_file_size(obj) data_size += obj.get('total_size', 0) elif field_schema['type'].startswith('basic:dir:'): add_dir_size(value) data_size += value.get('total_size', 0) elif field_schema['type'].startswith('list:basic:dir:'): for obj in value: add_dir_size(obj) data_size += obj.get('total_size', 0) data.size = data_size
[ "def", "hydrate_size", "(", "data", ",", "force", "=", "False", ")", ":", "from", ".", "data", "import", "Data", "# prevent circular import", "def", "get_dir_size", "(", "path", ")", ":", "\"\"\"Get directory size.\"\"\"", "total_size", "=", "0", "for", "dirpath...
Add file and dir sizes. Add sizes to ``basic:file:``, ``list:basic:file``, ``basic:dir:`` and ``list:basic:dir:`` fields. ``force`` parameter is used to recompute file sizes also on objects that already have these values, e.g. in migrations.
[ "Add", "file", "and", "dir", "sizes", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/utils.py#L395-L482
train
45,283
genialis/resolwe
resolwe/flow/models/utils.py
render_descriptor
def render_descriptor(data): """Render data descriptor. The rendering is based on descriptor schema and input context. :param data: data instance :type data: :class:`resolwe.flow.models.Data` or :class:`dict` """ if not data.descriptor_schema: return # Set default values for field_schema, field, path in iterate_schema(data.descriptor, data.descriptor_schema.schema, 'descriptor'): if 'default' in field_schema and field_schema['name'] not in field: dict_dot(data, path, field_schema['default'])
python
def render_descriptor(data): """Render data descriptor. The rendering is based on descriptor schema and input context. :param data: data instance :type data: :class:`resolwe.flow.models.Data` or :class:`dict` """ if not data.descriptor_schema: return # Set default values for field_schema, field, path in iterate_schema(data.descriptor, data.descriptor_schema.schema, 'descriptor'): if 'default' in field_schema and field_schema['name'] not in field: dict_dot(data, path, field_schema['default'])
[ "def", "render_descriptor", "(", "data", ")", ":", "if", "not", "data", ".", "descriptor_schema", ":", "return", "# Set default values", "for", "field_schema", ",", "field", ",", "path", "in", "iterate_schema", "(", "data", ".", "descriptor", ",", "data", ".",...
Render data descriptor. The rendering is based on descriptor schema and input context. :param data: data instance :type data: :class:`resolwe.flow.models.Data` or :class:`dict`
[ "Render", "data", "descriptor", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/utils.py#L485-L500
train
45,284
genialis/resolwe
resolwe/flow/models/utils.py
render_template
def render_template(process, template_string, context): """Render template using the specified expression engine.""" from resolwe.flow.managers import manager # Get the appropriate expression engine. If none is defined, do not evaluate # any expressions. expression_engine = process.requirements.get('expression-engine', None) if not expression_engine: return template_string return manager.get_expression_engine(expression_engine).evaluate_block(template_string, context)
python
def render_template(process, template_string, context): """Render template using the specified expression engine.""" from resolwe.flow.managers import manager # Get the appropriate expression engine. If none is defined, do not evaluate # any expressions. expression_engine = process.requirements.get('expression-engine', None) if not expression_engine: return template_string return manager.get_expression_engine(expression_engine).evaluate_block(template_string, context)
[ "def", "render_template", "(", "process", ",", "template_string", ",", "context", ")", ":", "from", "resolwe", ".", "flow", ".", "managers", "import", "manager", "# Get the appropriate expression engine. If none is defined, do not evaluate", "# any expressions.", "expression_...
Render template using the specified expression engine.
[ "Render", "template", "using", "the", "specified", "expression", "engine", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/utils.py#L503-L513
train
45,285
genialis/resolwe
resolwe/flow/models/utils.py
json_path_components
def json_path_components(path): """Convert JSON path to individual path components. :param path: JSON path, which can be either an iterable of path components or a dot-separated string :return: A list of path components """ if isinstance(path, str): path = path.split('.') return list(path)
python
def json_path_components(path): """Convert JSON path to individual path components. :param path: JSON path, which can be either an iterable of path components or a dot-separated string :return: A list of path components """ if isinstance(path, str): path = path.split('.') return list(path)
[ "def", "json_path_components", "(", "path", ")", ":", "if", "isinstance", "(", "path", ",", "str", ")", ":", "path", "=", "path", ".", "split", "(", "'.'", ")", "return", "list", "(", "path", ")" ]
Convert JSON path to individual path components. :param path: JSON path, which can be either an iterable of path components or a dot-separated string :return: A list of path components
[ "Convert", "JSON", "path", "to", "individual", "path", "components", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/utils.py#L516-L526
train
45,286
genialis/resolwe
resolwe/flow/models/utils.py
validate_process_subtype
def validate_process_subtype(supertype_name, supertype, subtype_name, subtype): """Perform process subtype validation. :param supertype_name: Supertype name :param supertype: Supertype schema :param subtype_name: Subtype name :param subtype: Subtype schema :return: A list of validation error strings """ errors = [] for item in supertype: # Ensure that the item exists in subtype and has the same schema. for subitem in subtype: if item['name'] != subitem['name']: continue for key in set(item.keys()) | set(subitem.keys()): if key in ('label', 'description'): # Label and description can differ. continue elif key == 'required': # A non-required item can be made required in subtype, but not the # other way around. item_required = item.get('required', True) subitem_required = subitem.get('required', False) if item_required and not subitem_required: errors.append("Field '{}' is marked as required in '{}' and optional in '{}'.".format( item['name'], supertype_name, subtype_name, )) elif item.get(key, None) != subitem.get(key, None): errors.append("Schema for field '{}' in type '{}' does not match supertype '{}'.".format( item['name'], subtype_name, supertype_name )) break else: errors.append("Schema for type '{}' is missing supertype '{}' field '{}'.".format( subtype_name, supertype_name, item['name'] )) return errors
python
def validate_process_subtype(supertype_name, supertype, subtype_name, subtype): """Perform process subtype validation. :param supertype_name: Supertype name :param supertype: Supertype schema :param subtype_name: Subtype name :param subtype: Subtype schema :return: A list of validation error strings """ errors = [] for item in supertype: # Ensure that the item exists in subtype and has the same schema. for subitem in subtype: if item['name'] != subitem['name']: continue for key in set(item.keys()) | set(subitem.keys()): if key in ('label', 'description'): # Label and description can differ. continue elif key == 'required': # A non-required item can be made required in subtype, but not the # other way around. item_required = item.get('required', True) subitem_required = subitem.get('required', False) if item_required and not subitem_required: errors.append("Field '{}' is marked as required in '{}' and optional in '{}'.".format( item['name'], supertype_name, subtype_name, )) elif item.get(key, None) != subitem.get(key, None): errors.append("Schema for field '{}' in type '{}' does not match supertype '{}'.".format( item['name'], subtype_name, supertype_name )) break else: errors.append("Schema for type '{}' is missing supertype '{}' field '{}'.".format( subtype_name, supertype_name, item['name'] )) return errors
[ "def", "validate_process_subtype", "(", "supertype_name", ",", "supertype", ",", "subtype_name", ",", "subtype", ")", ":", "errors", "=", "[", "]", "for", "item", "in", "supertype", ":", "# Ensure that the item exists in subtype and has the same schema.", "for", "subite...
Perform process subtype validation. :param supertype_name: Supertype name :param supertype: Supertype schema :param subtype_name: Subtype name :param subtype: Subtype schema :return: A list of validation error strings
[ "Perform", "process", "subtype", "validation", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/utils.py#L529-L576
train
45,287
genialis/resolwe
resolwe/flow/models/utils.py
validate_process_types
def validate_process_types(queryset=None): """Perform process type validation. :param queryset: Optional process queryset to validate :return: A list of validation error strings """ if not queryset: from .process import Process queryset = Process.objects.all() processes = {} for process in queryset: dict_dot( processes, process.type.replace(':', '.') + '__schema__', process.output_schema ) errors = [] for path, key, value in iterate_dict(processes, exclude=lambda key, value: key == '__schema__'): if '__schema__' not in value: continue # Validate with any parent types. for length in range(len(path), 0, -1): parent_type = '.'.join(path[:length] + ['__schema__']) try: parent_schema = dict_dot(processes, parent_type) except KeyError: continue errors += validate_process_subtype( supertype_name=':'.join(path[:length]), supertype=parent_schema, subtype_name=':'.join(path + [key]), subtype=value['__schema__'] ) return errors
python
def validate_process_types(queryset=None): """Perform process type validation. :param queryset: Optional process queryset to validate :return: A list of validation error strings """ if not queryset: from .process import Process queryset = Process.objects.all() processes = {} for process in queryset: dict_dot( processes, process.type.replace(':', '.') + '__schema__', process.output_schema ) errors = [] for path, key, value in iterate_dict(processes, exclude=lambda key, value: key == '__schema__'): if '__schema__' not in value: continue # Validate with any parent types. for length in range(len(path), 0, -1): parent_type = '.'.join(path[:length] + ['__schema__']) try: parent_schema = dict_dot(processes, parent_type) except KeyError: continue errors += validate_process_subtype( supertype_name=':'.join(path[:length]), supertype=parent_schema, subtype_name=':'.join(path + [key]), subtype=value['__schema__'] ) return errors
[ "def", "validate_process_types", "(", "queryset", "=", "None", ")", ":", "if", "not", "queryset", ":", "from", ".", "process", "import", "Process", "queryset", "=", "Process", ".", "objects", ".", "all", "(", ")", "processes", "=", "{", "}", "for", "proc...
Perform process type validation. :param queryset: Optional process queryset to validate :return: A list of validation error strings
[ "Perform", "process", "type", "validation", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/utils.py#L579-L617
train
45,288
genialis/resolwe
resolwe/flow/models/utils.py
fill_with_defaults
def fill_with_defaults(process_input, input_schema): """Fill empty optional fields in input with default values.""" for field_schema, fields, path in iterate_schema(process_input, input_schema): if 'default' in field_schema and field_schema['name'] not in fields: dict_dot(process_input, path, field_schema['default'])
python
def fill_with_defaults(process_input, input_schema): """Fill empty optional fields in input with default values.""" for field_schema, fields, path in iterate_schema(process_input, input_schema): if 'default' in field_schema and field_schema['name'] not in fields: dict_dot(process_input, path, field_schema['default'])
[ "def", "fill_with_defaults", "(", "process_input", ",", "input_schema", ")", ":", "for", "field_schema", ",", "fields", ",", "path", "in", "iterate_schema", "(", "process_input", ",", "input_schema", ")", ":", "if", "'default'", "in", "field_schema", "and", "fie...
Fill empty optional fields in input with default values.
[ "Fill", "empty", "optional", "fields", "in", "input", "with", "default", "values", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/utils.py#L620-L624
train
45,289
genialis/resolwe
resolwe/flow/serializers/contributor.py
ContributorSerializer.to_internal_value
def to_internal_value(self, data): """Format the internal value.""" # When setting the contributor, it may be passed as an integer. if isinstance(data, dict) and isinstance(data.get('id', None), int): data = data['id'] elif isinstance(data, int): pass else: raise ValidationError("Contributor must be an integer or a dictionary with key 'id'") return self.Meta.model.objects.get(pk=data)
python
def to_internal_value(self, data): """Format the internal value.""" # When setting the contributor, it may be passed as an integer. if isinstance(data, dict) and isinstance(data.get('id', None), int): data = data['id'] elif isinstance(data, int): pass else: raise ValidationError("Contributor must be an integer or a dictionary with key 'id'") return self.Meta.model.objects.get(pk=data)
[ "def", "to_internal_value", "(", "self", ",", "data", ")", ":", "# When setting the contributor, it may be passed as an integer.", "if", "isinstance", "(", "data", ",", "dict", ")", "and", "isinstance", "(", "data", ".", "get", "(", "'id'", ",", "None", ")", ","...
Format the internal value.
[ "Format", "the", "internal", "value", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/serializers/contributor.py#L35-L45
train
45,290
genialis/resolwe
resolwe/flow/elastic_indexes/collection.py
CollectionIndexMixin.extract_descriptor
def extract_descriptor(self, obj): """Extract data from the descriptor.""" descriptor = [] def flatten(current): """Flatten descriptor.""" if isinstance(current, dict): for key in current: flatten(current[key]) elif isinstance(current, list): for val in current: flatten(val) elif isinstance(current, (int, bool, float, str)): descriptor.append(str(current)) flatten(obj.descriptor) return descriptor
python
def extract_descriptor(self, obj): """Extract data from the descriptor.""" descriptor = [] def flatten(current): """Flatten descriptor.""" if isinstance(current, dict): for key in current: flatten(current[key]) elif isinstance(current, list): for val in current: flatten(val) elif isinstance(current, (int, bool, float, str)): descriptor.append(str(current)) flatten(obj.descriptor) return descriptor
[ "def", "extract_descriptor", "(", "self", ",", "obj", ")", ":", "descriptor", "=", "[", "]", "def", "flatten", "(", "current", ")", ":", "\"\"\"Flatten descriptor.\"\"\"", "if", "isinstance", "(", "current", ",", "dict", ")", ":", "for", "key", "in", "curr...
Extract data from the descriptor.
[ "Extract", "data", "from", "the", "descriptor", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/elastic_indexes/collection.py#L26-L43
train
45,291
genialis/resolwe
resolwe/flow/serializers/data.py
DataSerializer._serialize_items
def _serialize_items(self, serializer, kind, items): """Return serialized items or list of ids, depending on `hydrate_XXX` query param.""" if self.request and self.request.query_params.get('hydrate_{}'.format(kind), False): serializer = serializer(items, many=True, read_only=True) serializer.bind(kind, self) return serializer.data else: return [item.id for item in items]
python
def _serialize_items(self, serializer, kind, items): """Return serialized items or list of ids, depending on `hydrate_XXX` query param.""" if self.request and self.request.query_params.get('hydrate_{}'.format(kind), False): serializer = serializer(items, many=True, read_only=True) serializer.bind(kind, self) return serializer.data else: return [item.id for item in items]
[ "def", "_serialize_items", "(", "self", ",", "serializer", ",", "kind", ",", "items", ")", ":", "if", "self", ".", "request", "and", "self", ".", "request", ".", "query_params", ".", "get", "(", "'hydrate_{}'", ".", "format", "(", "kind", ")", ",", "Fa...
Return serialized items or list of ids, depending on `hydrate_XXX` query param.
[ "Return", "serialized", "items", "or", "list", "of", "ids", "depending", "on", "hydrate_XXX", "query", "param", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/serializers/data.py#L91-L98
train
45,292
genialis/resolwe
resolwe/flow/serializers/data.py
DataSerializer.get_entity_names
def get_entity_names(self, data): """Return serialized list of entity names on data that user has `view` permission on.""" entities = self._filter_queryset('view_entity', data.entity_set.all()) return list(entities.values_list('name', flat=True))
python
def get_entity_names(self, data): """Return serialized list of entity names on data that user has `view` permission on.""" entities = self._filter_queryset('view_entity', data.entity_set.all()) return list(entities.values_list('name', flat=True))
[ "def", "get_entity_names", "(", "self", ",", "data", ")", ":", "entities", "=", "self", ".", "_filter_queryset", "(", "'view_entity'", ",", "data", ".", "entity_set", ".", "all", "(", ")", ")", "return", "list", "(", "entities", ".", "values_list", "(", ...
Return serialized list of entity names on data that user has `view` permission on.
[ "Return", "serialized", "list", "of", "entity", "names", "on", "data", "that", "user", "has", "view", "permission", "on", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/serializers/data.py#L105-L108
train
45,293
genialis/resolwe
resolwe/flow/serializers/data.py
DataSerializer.get_collections
def get_collections(self, data): """Return serialized list of collection objects on data that user has `view` permission on.""" collections = self._filter_queryset('view_collection', data.collection_set.all()) from .collection import CollectionSerializer class CollectionWithoutDataSerializer(WithoutDataSerializerMixin, CollectionSerializer): """Collection without data field serializer.""" return self._serialize_items(CollectionWithoutDataSerializer, 'collections', collections)
python
def get_collections(self, data): """Return serialized list of collection objects on data that user has `view` permission on.""" collections = self._filter_queryset('view_collection', data.collection_set.all()) from .collection import CollectionSerializer class CollectionWithoutDataSerializer(WithoutDataSerializerMixin, CollectionSerializer): """Collection without data field serializer.""" return self._serialize_items(CollectionWithoutDataSerializer, 'collections', collections)
[ "def", "get_collections", "(", "self", ",", "data", ")", ":", "collections", "=", "self", ".", "_filter_queryset", "(", "'view_collection'", ",", "data", ".", "collection_set", ".", "all", "(", ")", ")", "from", ".", "collection", "import", "CollectionSerializ...
Return serialized list of collection objects on data that user has `view` permission on.
[ "Return", "serialized", "list", "of", "collection", "objects", "on", "data", "that", "user", "has", "view", "permission", "on", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/serializers/data.py#L110-L119
train
45,294
genialis/resolwe
resolwe/flow/serializers/data.py
DataSerializer.get_entities
def get_entities(self, data): """Return serialized list of entity objects on data that user has `view` permission on.""" entities = self._filter_queryset('view_entity', data.entity_set.all()) from .entity import EntitySerializer class EntityWithoutDataSerializer(WithoutDataSerializerMixin, EntitySerializer): """Entity without data field serializer.""" return self._serialize_items(EntityWithoutDataSerializer, 'entities', entities)
python
def get_entities(self, data): """Return serialized list of entity objects on data that user has `view` permission on.""" entities = self._filter_queryset('view_entity', data.entity_set.all()) from .entity import EntitySerializer class EntityWithoutDataSerializer(WithoutDataSerializerMixin, EntitySerializer): """Entity without data field serializer.""" return self._serialize_items(EntityWithoutDataSerializer, 'entities', entities)
[ "def", "get_entities", "(", "self", ",", "data", ")", ":", "entities", "=", "self", ".", "_filter_queryset", "(", "'view_entity'", ",", "data", ".", "entity_set", ".", "all", "(", ")", ")", "from", ".", "entity", "import", "EntitySerializer", "class", "Ent...
Return serialized list of entity objects on data that user has `view` permission on.
[ "Return", "serialized", "list", "of", "entity", "objects", "on", "data", "that", "user", "has", "view", "permission", "on", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/serializers/data.py#L121-L130
train
45,295
genialis/resolwe
resolwe/flow/management/commands/list_docker_images.py
Command.handle
def handle(self, *args, **options): """Handle command list_docker_images.""" verbosity = int(options.get('verbosity')) # Check that the specified output format is valid if options['format'] != 'plain' and options['format'] != 'yaml': raise CommandError("Unknown output format: %s" % options['format']) # Gather only unique latest custom Docker requirements that the processes are using # The 'image' field is optional, so be careful about that as well unique_docker_images = set( p.requirements['executor']['docker']['image'] for p in Process.objects.filter(is_active=True).order_by( 'slug', '-version' ).distinct( 'slug' ).only( 'requirements' ).filter( requirements__icontains='docker' ) if 'image' in p.requirements.get('executor', {}).get('docker', {}) ) # Add the default image. unique_docker_images.add(DEFAULT_CONTAINER_IMAGE) # Pull images if requested or just output the list in specified format if options['pull']: # Remove set of already pulled images. with PULLED_IMAGES_LOCK: unique_docker_images.difference_update(PULLED_IMAGES) # Get the desired 'docker' command from settings or use the default docker = getattr(settings, 'FLOW_DOCKER_COMMAND', 'docker') # Pull each image for img in unique_docker_images: ret = subprocess.call( shlex.split('{} pull {}'.format(docker, img)), stdout=None if verbosity > 0 else subprocess.DEVNULL, stderr=None if verbosity > 0 else subprocess.DEVNULL, ) # Update set of pulled images. with PULLED_IMAGES_LOCK: PULLED_IMAGES.add(img) if ret != 0: errmsg = "Failed to pull Docker image '{}'!".format(img) if not options['ignore_pull_errors']: # Print error and stop execution raise CommandError(errmsg) else: # Print error, but keep going logger.error(errmsg) if verbosity > 0: self.stderr.write(errmsg) else: msg = "Docker image '{}' pulled successfully!".format(img) logger.info(msg) if verbosity > 0: self.stdout.write(msg) else: # Sort the set of unique Docker images for nicer output. unique_docker_images = sorted(unique_docker_images) # Convert the set of unique Docker images into a list of dicts for easier output imgs = [ dict(name=s[0], tag=s[1] if len(s) == 2 else 'latest') for s in (img.split(':') for img in unique_docker_images) ] # Output in YAML or plaintext (one image per line), as requested if options['format'] == 'yaml': out = yaml.safe_dump(imgs, default_flow_style=True, default_style="'") else: out = functools.reduce(operator.add, ('{name}:{tag}\n'.format(**i) for i in imgs), '') self.stdout.write(out, ending='')
python
def handle(self, *args, **options): """Handle command list_docker_images.""" verbosity = int(options.get('verbosity')) # Check that the specified output format is valid if options['format'] != 'plain' and options['format'] != 'yaml': raise CommandError("Unknown output format: %s" % options['format']) # Gather only unique latest custom Docker requirements that the processes are using # The 'image' field is optional, so be careful about that as well unique_docker_images = set( p.requirements['executor']['docker']['image'] for p in Process.objects.filter(is_active=True).order_by( 'slug', '-version' ).distinct( 'slug' ).only( 'requirements' ).filter( requirements__icontains='docker' ) if 'image' in p.requirements.get('executor', {}).get('docker', {}) ) # Add the default image. unique_docker_images.add(DEFAULT_CONTAINER_IMAGE) # Pull images if requested or just output the list in specified format if options['pull']: # Remove set of already pulled images. with PULLED_IMAGES_LOCK: unique_docker_images.difference_update(PULLED_IMAGES) # Get the desired 'docker' command from settings or use the default docker = getattr(settings, 'FLOW_DOCKER_COMMAND', 'docker') # Pull each image for img in unique_docker_images: ret = subprocess.call( shlex.split('{} pull {}'.format(docker, img)), stdout=None if verbosity > 0 else subprocess.DEVNULL, stderr=None if verbosity > 0 else subprocess.DEVNULL, ) # Update set of pulled images. with PULLED_IMAGES_LOCK: PULLED_IMAGES.add(img) if ret != 0: errmsg = "Failed to pull Docker image '{}'!".format(img) if not options['ignore_pull_errors']: # Print error and stop execution raise CommandError(errmsg) else: # Print error, but keep going logger.error(errmsg) if verbosity > 0: self.stderr.write(errmsg) else: msg = "Docker image '{}' pulled successfully!".format(img) logger.info(msg) if verbosity > 0: self.stdout.write(msg) else: # Sort the set of unique Docker images for nicer output. unique_docker_images = sorted(unique_docker_images) # Convert the set of unique Docker images into a list of dicts for easier output imgs = [ dict(name=s[0], tag=s[1] if len(s) == 2 else 'latest') for s in (img.split(':') for img in unique_docker_images) ] # Output in YAML or plaintext (one image per line), as requested if options['format'] == 'yaml': out = yaml.safe_dump(imgs, default_flow_style=True, default_style="'") else: out = functools.reduce(operator.add, ('{name}:{tag}\n'.format(**i) for i in imgs), '') self.stdout.write(out, ending='')
[ "def", "handle", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "verbosity", "=", "int", "(", "options", ".", "get", "(", "'verbosity'", ")", ")", "# Check that the specified output format is valid", "if", "options", "[", "'format'", "]",...
Handle command list_docker_images.
[ "Handle", "command", "list_docker_images", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/management/commands/list_docker_images.py#L48-L129
train
45,296
genialis/resolwe
resolwe/flow/views/data.py
DataViewSet.get_or_create
def get_or_create(self, request, *args, **kwargs): """Get ``Data`` object if similar already exists, otherwise create it.""" kwargs['get_or_create'] = True return self.create(request, *args, **kwargs)
python
def get_or_create(self, request, *args, **kwargs): """Get ``Data`` object if similar already exists, otherwise create it.""" kwargs['get_or_create'] = True return self.create(request, *args, **kwargs)
[ "def", "get_or_create", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'get_or_create'", "]", "=", "True", "return", "self", ".", "create", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ...
Get ``Data`` object if similar already exists, otherwise create it.
[ "Get", "Data", "object", "if", "similar", "already", "exists", "otherwise", "create", "it", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/data.py#L162-L165
train
45,297
genialis/resolwe
resolwe/flow/views/data.py
DataViewSet.perform_get_or_create
def perform_get_or_create(self, request, *args, **kwargs): """Perform "get_or_create" - return existing object if found.""" serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) process = serializer.validated_data.get('process') process_input = request.data.get('input', {}) fill_with_defaults(process_input, process.input_schema) checksum = get_data_checksum(process_input, process.slug, process.version) data_qs = Data.objects.filter( checksum=checksum, process__persistence__in=[Process.PERSISTENCE_CACHED, Process.PERSISTENCE_TEMP], ) data_qs = get_objects_for_user(request.user, 'view_data', data_qs) if data_qs.exists(): data = data_qs.order_by('created').last() serializer = self.get_serializer(data) return Response(serializer.data)
python
def perform_get_or_create(self, request, *args, **kwargs): """Perform "get_or_create" - return existing object if found.""" serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) process = serializer.validated_data.get('process') process_input = request.data.get('input', {}) fill_with_defaults(process_input, process.input_schema) checksum = get_data_checksum(process_input, process.slug, process.version) data_qs = Data.objects.filter( checksum=checksum, process__persistence__in=[Process.PERSISTENCE_CACHED, Process.PERSISTENCE_TEMP], ) data_qs = get_objects_for_user(request.user, 'view_data', data_qs) if data_qs.exists(): data = data_qs.order_by('created').last() serializer = self.get_serializer(data) return Response(serializer.data)
[ "def", "perform_get_or_create", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "serializer", "=", "self", ".", "get_serializer", "(", "data", "=", "request", ".", "data", ")", "serializer", ".", "is_valid", "(", "raise_...
Perform "get_or_create" - return existing object if found.
[ "Perform", "get_or_create", "-", "return", "existing", "object", "if", "found", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/data.py#L167-L185
train
45,298
DataONEorg/d1_python
gmn/src/d1_gmn/app/gmn.py
Startup.ready
def ready(self): """Called once per Django process instance. If the filesystem setup fails or if an error is found in settings.py, django.core.exceptions.ImproperlyConfigured is raised, causing Django not to launch the main GMN app. """ # Stop the startup code from running automatically from pytest unit tests. # When running tests in parallel with xdist, an instance of GMN is launched # before thread specific settings have been applied. # if hasattr(sys, '_launched_by_pytest'): # return self._assert_readable_file_if_set('CLIENT_CERT_PATH') self._assert_readable_file_if_set('CLIENT_CERT_PRIVATE_KEY_PATH') self._assert_dirs_exist('OBJECT_FORMAT_CACHE_PATH') self._assert_is_type('SCIMETA_VALIDATION_ENABLED', bool) self._assert_is_type('SCIMETA_VALIDATION_MAX_SIZE', int) self._assert_is_in('SCIMETA_VALIDATION_OVER_SIZE_ACTION', ('reject', 'accept')) self._warn_unsafe_for_prod() self._check_resource_map_create() if not d1_gmn.app.sciobj_store.is_existing_store(): self._create_sciobj_store_root() self._add_xslt_mimetype()
python
def ready(self): """Called once per Django process instance. If the filesystem setup fails or if an error is found in settings.py, django.core.exceptions.ImproperlyConfigured is raised, causing Django not to launch the main GMN app. """ # Stop the startup code from running automatically from pytest unit tests. # When running tests in parallel with xdist, an instance of GMN is launched # before thread specific settings have been applied. # if hasattr(sys, '_launched_by_pytest'): # return self._assert_readable_file_if_set('CLIENT_CERT_PATH') self._assert_readable_file_if_set('CLIENT_CERT_PRIVATE_KEY_PATH') self._assert_dirs_exist('OBJECT_FORMAT_CACHE_PATH') self._assert_is_type('SCIMETA_VALIDATION_ENABLED', bool) self._assert_is_type('SCIMETA_VALIDATION_MAX_SIZE', int) self._assert_is_in('SCIMETA_VALIDATION_OVER_SIZE_ACTION', ('reject', 'accept')) self._warn_unsafe_for_prod() self._check_resource_map_create() if not d1_gmn.app.sciobj_store.is_existing_store(): self._create_sciobj_store_root() self._add_xslt_mimetype()
[ "def", "ready", "(", "self", ")", ":", "# Stop the startup code from running automatically from pytest unit tests.", "# When running tests in parallel with xdist, an instance of GMN is launched", "# before thread specific settings have been applied.", "# if hasattr(sys, '_launched_by_pytest'):", ...
Called once per Django process instance. If the filesystem setup fails or if an error is found in settings.py, django.core.exceptions.ImproperlyConfigured is raised, causing Django not to launch the main GMN app.
[ "Called", "once", "per", "Django", "process", "instance", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/gmn.py#L49-L75
train
45,299