repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
DataONEorg/d1_python | gmn/src/d1_gmn/app/gmn.py | Startup._assert_dirs_exist | def _assert_dirs_exist(self, setting_name):
"""Check that the dirs leading up to the given file path exist.
Does not check if the file exists.
"""
v = self._get_setting(setting_name)
if (not os.path.isdir(os.path.split(v)[0])) or os.path.isdir(v):
self.raise_config_error(
setting_name,
v,
str,
'a file path in an existing directory',
is_none_allowed=False,
) | python | def _assert_dirs_exist(self, setting_name):
"""Check that the dirs leading up to the given file path exist.
Does not check if the file exists.
"""
v = self._get_setting(setting_name)
if (not os.path.isdir(os.path.split(v)[0])) or os.path.isdir(v):
self.raise_config_error(
setting_name,
v,
str,
'a file path in an existing directory',
is_none_allowed=False,
) | [
"def",
"_assert_dirs_exist",
"(",
"self",
",",
"setting_name",
")",
":",
"v",
"=",
"self",
".",
"_get_setting",
"(",
"setting_name",
")",
"if",
"(",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"split",
"(",
"v",
")",
"[",
"... | Check that the dirs leading up to the given file path exist.
Does not check if the file exists. | [
"Check",
"that",
"the",
"dirs",
"leading",
"up",
"to",
"the",
"given",
"file",
"path",
"exist",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/gmn.py#L109-L123 | train | 45,300 |
DataONEorg/d1_python | gmn/src/d1_gmn/app/gmn.py | Startup._warn_unsafe_for_prod | def _warn_unsafe_for_prod(self):
"""Warn on settings that are not safe for production."""
safe_settings_list = [
('DEBUG', False),
('DEBUG_GMN', False),
('STAND_ALONE', False),
('DATABASES.default.ATOMIC_REQUESTS', True),
('SECRET_KEY', '<Do not modify this placeholder value>'),
('STATIC_SERVER', False),
]
for setting_str, setting_safe in safe_settings_list:
setting_current = self._get_setting(setting_str)
if setting_current != setting_safe:
logger.warning(
'Setting is unsafe for use in production. setting="{}" current="{}" '
'safe="{}"'.format(setting_str, setting_current, setting_safe)
) | python | def _warn_unsafe_for_prod(self):
"""Warn on settings that are not safe for production."""
safe_settings_list = [
('DEBUG', False),
('DEBUG_GMN', False),
('STAND_ALONE', False),
('DATABASES.default.ATOMIC_REQUESTS', True),
('SECRET_KEY', '<Do not modify this placeholder value>'),
('STATIC_SERVER', False),
]
for setting_str, setting_safe in safe_settings_list:
setting_current = self._get_setting(setting_str)
if setting_current != setting_safe:
logger.warning(
'Setting is unsafe for use in production. setting="{}" current="{}" '
'safe="{}"'.format(setting_str, setting_current, setting_safe)
) | [
"def",
"_warn_unsafe_for_prod",
"(",
"self",
")",
":",
"safe_settings_list",
"=",
"[",
"(",
"'DEBUG'",
",",
"False",
")",
",",
"(",
"'DEBUG_GMN'",
",",
"False",
")",
",",
"(",
"'STAND_ALONE'",
",",
"False",
")",
",",
"(",
"'DATABASES.default.ATOMIC_REQUESTS'",... | Warn on settings that are not safe for production. | [
"Warn",
"on",
"settings",
"that",
"are",
"not",
"safe",
"for",
"production",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/gmn.py#L153-L169 | train | 45,301 |
DataONEorg/d1_python | gmn/src/d1_gmn/app/gmn.py | Startup._get_setting | def _get_setting(self, setting_dotted_name, default=None):
"""Return the value of a potentially nested dict setting.
E.g., 'DATABASES.default.NAME
"""
name_list = setting_dotted_name.split('.')
setting_obj = getattr(django.conf.settings, name_list[0], default)
# if len(name_list) == 1:
# return setting_obj
return functools.reduce(
lambda o, a: o.get(a, default), [setting_obj] + name_list[1:]
) | python | def _get_setting(self, setting_dotted_name, default=None):
"""Return the value of a potentially nested dict setting.
E.g., 'DATABASES.default.NAME
"""
name_list = setting_dotted_name.split('.')
setting_obj = getattr(django.conf.settings, name_list[0], default)
# if len(name_list) == 1:
# return setting_obj
return functools.reduce(
lambda o, a: o.get(a, default), [setting_obj] + name_list[1:]
) | [
"def",
"_get_setting",
"(",
"self",
",",
"setting_dotted_name",
",",
"default",
"=",
"None",
")",
":",
"name_list",
"=",
"setting_dotted_name",
".",
"split",
"(",
"'.'",
")",
"setting_obj",
"=",
"getattr",
"(",
"django",
".",
"conf",
".",
"settings",
",",
... | Return the value of a potentially nested dict setting.
E.g., 'DATABASES.default.NAME | [
"Return",
"the",
"value",
"of",
"a",
"potentially",
"nested",
"dict",
"setting",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/gmn.py#L244-L257 | train | 45,302 |
genialis/resolwe | resolwe/elastic/indices.py | BaseIndex._refresh_connection | def _refresh_connection(self):
"""Refresh connection to Elasticsearch when worker is started.
File descriptors (sockets) can be shared between multiple
threads. If same connection is used by multiple threads at the
same time, this can cause timeouts in some of the pushes. So
connection needs to be reestablished in each thread to make sure
that it is unique per thread.
"""
# Thread with same id can be created when one terminates, but it
# is ok, as we are only concerned about concurent pushes.
current_thread_id = threading.current_thread().ident
if current_thread_id != self.connection_thread_id:
prepare_connection()
self.connection_thread_id = current_thread_id | python | def _refresh_connection(self):
"""Refresh connection to Elasticsearch when worker is started.
File descriptors (sockets) can be shared between multiple
threads. If same connection is used by multiple threads at the
same time, this can cause timeouts in some of the pushes. So
connection needs to be reestablished in each thread to make sure
that it is unique per thread.
"""
# Thread with same id can be created when one terminates, but it
# is ok, as we are only concerned about concurent pushes.
current_thread_id = threading.current_thread().ident
if current_thread_id != self.connection_thread_id:
prepare_connection()
self.connection_thread_id = current_thread_id | [
"def",
"_refresh_connection",
"(",
"self",
")",
":",
"# Thread with same id can be created when one terminates, but it",
"# is ok, as we are only concerned about concurent pushes.",
"current_thread_id",
"=",
"threading",
".",
"current_thread",
"(",
")",
".",
"ident",
"if",
"curre... | Refresh connection to Elasticsearch when worker is started.
File descriptors (sockets) can be shared between multiple
threads. If same connection is used by multiple threads at the
same time, this can cause timeouts in some of the pushes. So
connection needs to be reestablished in each thread to make sure
that it is unique per thread. | [
"Refresh",
"connection",
"to",
"Elasticsearch",
"when",
"worker",
"is",
"started",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/indices.py#L138-L154 | train | 45,303 |
genialis/resolwe | resolwe/elastic/indices.py | BaseIndex.generate_id | def generate_id(self, obj):
"""Generate unique document id for ElasticSearch."""
object_type = type(obj).__name__.lower()
return '{}_{}'.format(object_type, self.get_object_id(obj)) | python | def generate_id(self, obj):
"""Generate unique document id for ElasticSearch."""
object_type = type(obj).__name__.lower()
return '{}_{}'.format(object_type, self.get_object_id(obj)) | [
"def",
"generate_id",
"(",
"self",
",",
"obj",
")",
":",
"object_type",
"=",
"type",
"(",
"obj",
")",
".",
"__name__",
".",
"lower",
"(",
")",
"return",
"'{}_{}'",
".",
"format",
"(",
"object_type",
",",
"self",
".",
"get_object_id",
"(",
"obj",
")",
... | Generate unique document id for ElasticSearch. | [
"Generate",
"unique",
"document",
"id",
"for",
"ElasticSearch",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/indices.py#L180-L183 | train | 45,304 |
genialis/resolwe | resolwe/elastic/indices.py | BaseIndex.process_object | def process_object(self, obj):
"""Process current object and push it to the ElasticSearch."""
document = self.document_class(meta={'id': self.generate_id(obj)})
for field in document._doc_type.mapping: # pylint: disable=protected-access
if field in ['users_with_permissions', 'groups_with_permissions', 'public_permission']:
continue # These fields are handled separately
try:
# use get_X_value function
get_value_function = getattr(self, 'get_{}_value'.format(field), None)
if get_value_function:
setattr(document, field, get_value_function(obj)) # pylint: disable=not-callable
continue
# use `mapping` dict
if field in self.mapping:
if callable(self.mapping[field]):
setattr(document, field, self.mapping[field](obj))
continue
try:
object_attr = dict_dot(obj, self.mapping[field])
except (KeyError, AttributeError):
object_attr = None
if callable(object_attr):
# use method on object
setattr(document, field, object_attr(obj))
else:
# use attribute on object
setattr(document, field, object_attr)
continue
# get value from the object
try:
object_value = dict_dot(obj, field)
setattr(document, field, object_value)
continue
except KeyError:
pass
raise AttributeError("Cannot determine mapping for field {}".format(field))
except Exception: # pylint: disable=broad-except
logger.exception(
"Error occurred while setting value of field '%s' in '%s' Elasticsearch index.",
field, self.__class__.__name__,
extra={'object_type': self.object_type, 'obj_id': obj.pk}
)
permissions = self.get_permissions(obj)
document.users_with_permissions = permissions['users']
document.groups_with_permissions = permissions['groups']
document.public_permission = permissions['public']
self.push_queue.append(document) | python | def process_object(self, obj):
"""Process current object and push it to the ElasticSearch."""
document = self.document_class(meta={'id': self.generate_id(obj)})
for field in document._doc_type.mapping: # pylint: disable=protected-access
if field in ['users_with_permissions', 'groups_with_permissions', 'public_permission']:
continue # These fields are handled separately
try:
# use get_X_value function
get_value_function = getattr(self, 'get_{}_value'.format(field), None)
if get_value_function:
setattr(document, field, get_value_function(obj)) # pylint: disable=not-callable
continue
# use `mapping` dict
if field in self.mapping:
if callable(self.mapping[field]):
setattr(document, field, self.mapping[field](obj))
continue
try:
object_attr = dict_dot(obj, self.mapping[field])
except (KeyError, AttributeError):
object_attr = None
if callable(object_attr):
# use method on object
setattr(document, field, object_attr(obj))
else:
# use attribute on object
setattr(document, field, object_attr)
continue
# get value from the object
try:
object_value = dict_dot(obj, field)
setattr(document, field, object_value)
continue
except KeyError:
pass
raise AttributeError("Cannot determine mapping for field {}".format(field))
except Exception: # pylint: disable=broad-except
logger.exception(
"Error occurred while setting value of field '%s' in '%s' Elasticsearch index.",
field, self.__class__.__name__,
extra={'object_type': self.object_type, 'obj_id': obj.pk}
)
permissions = self.get_permissions(obj)
document.users_with_permissions = permissions['users']
document.groups_with_permissions = permissions['groups']
document.public_permission = permissions['public']
self.push_queue.append(document) | [
"def",
"process_object",
"(",
"self",
",",
"obj",
")",
":",
"document",
"=",
"self",
".",
"document_class",
"(",
"meta",
"=",
"{",
"'id'",
":",
"self",
".",
"generate_id",
"(",
"obj",
")",
"}",
")",
"for",
"field",
"in",
"document",
".",
"_doc_type",
... | Process current object and push it to the ElasticSearch. | [
"Process",
"current",
"object",
"and",
"push",
"it",
"to",
"the",
"ElasticSearch",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/indices.py#L185-L241 | train | 45,305 |
genialis/resolwe | resolwe/elastic/indices.py | BaseIndex.create_mapping | def create_mapping(self):
"""Create the mappings in elasticsearch."""
try:
self.document_class.init()
self._mapping_created = True
except IllegalOperation as error:
if error.args[0].startswith('You cannot update analysis configuration'):
# Ignore mapping update errors, which are thrown even when the analysis
# configuration stays the same.
# TODO: Remove this when https://github.com/elastic/elasticsearch-dsl-py/pull/272 is merged.
return
raise | python | def create_mapping(self):
"""Create the mappings in elasticsearch."""
try:
self.document_class.init()
self._mapping_created = True
except IllegalOperation as error:
if error.args[0].startswith('You cannot update analysis configuration'):
# Ignore mapping update errors, which are thrown even when the analysis
# configuration stays the same.
# TODO: Remove this when https://github.com/elastic/elasticsearch-dsl-py/pull/272 is merged.
return
raise | [
"def",
"create_mapping",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"document_class",
".",
"init",
"(",
")",
"self",
".",
"_mapping_created",
"=",
"True",
"except",
"IllegalOperation",
"as",
"error",
":",
"if",
"error",
".",
"args",
"[",
"0",
"]",
... | Create the mappings in elasticsearch. | [
"Create",
"the",
"mappings",
"in",
"elasticsearch",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/indices.py#L243-L255 | train | 45,306 |
genialis/resolwe | resolwe/elastic/indices.py | BaseIndex.destroy | def destroy(self):
"""Destroy an index."""
self._refresh_connection()
self.push_queue = []
index_name = self.document_class()._get_index() # pylint: disable=protected-access
connections.get_connection().indices.delete(index_name, ignore=404)
self._mapping_created = False | python | def destroy(self):
"""Destroy an index."""
self._refresh_connection()
self.push_queue = []
index_name = self.document_class()._get_index() # pylint: disable=protected-access
connections.get_connection().indices.delete(index_name, ignore=404)
self._mapping_created = False | [
"def",
"destroy",
"(",
"self",
")",
":",
"self",
".",
"_refresh_connection",
"(",
")",
"self",
".",
"push_queue",
"=",
"[",
"]",
"index_name",
"=",
"self",
".",
"document_class",
"(",
")",
".",
"_get_index",
"(",
")",
"# pylint: disable=protected-access",
"c... | Destroy an index. | [
"Destroy",
"an",
"index",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/indices.py#L392-L400 | train | 45,307 |
genialis/resolwe | resolwe/elastic/indices.py | BaseIndex.get_permissions | def get_permissions(self, obj):
"""Return users and groups with ``view`` permission on the current object.
Return a dict with two keys - ``users`` and ``groups`` - which
contain list of ids of users/groups with ``view`` permission.
"""
# TODO: Optimize this for bulk running
filters = {
'object_pk': obj.id,
'content_type': ContentType.objects.get_for_model(obj),
'permission__codename__startswith': 'view',
}
return {
'users': list(
UserObjectPermission.objects.filter(**filters).distinct('user').values_list('user_id', flat=True)
),
'groups': list(
GroupObjectPermission.objects.filter(**filters).distinct('group').values_list('group', flat=True)
),
'public': UserObjectPermission.objects.filter(user__username=ANONYMOUS_USER_NAME, **filters).exists(),
} | python | def get_permissions(self, obj):
"""Return users and groups with ``view`` permission on the current object.
Return a dict with two keys - ``users`` and ``groups`` - which
contain list of ids of users/groups with ``view`` permission.
"""
# TODO: Optimize this for bulk running
filters = {
'object_pk': obj.id,
'content_type': ContentType.objects.get_for_model(obj),
'permission__codename__startswith': 'view',
}
return {
'users': list(
UserObjectPermission.objects.filter(**filters).distinct('user').values_list('user_id', flat=True)
),
'groups': list(
GroupObjectPermission.objects.filter(**filters).distinct('group').values_list('group', flat=True)
),
'public': UserObjectPermission.objects.filter(user__username=ANONYMOUS_USER_NAME, **filters).exists(),
} | [
"def",
"get_permissions",
"(",
"self",
",",
"obj",
")",
":",
"# TODO: Optimize this for bulk running",
"filters",
"=",
"{",
"'object_pk'",
":",
"obj",
".",
"id",
",",
"'content_type'",
":",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"obj",
")",
... | Return users and groups with ``view`` permission on the current object.
Return a dict with two keys - ``users`` and ``groups`` - which
contain list of ids of users/groups with ``view`` permission. | [
"Return",
"users",
"and",
"groups",
"with",
"view",
"permission",
"on",
"the",
"current",
"object",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/indices.py#L402-L422 | train | 45,308 |
genialis/resolwe | resolwe/elastic/indices.py | BaseIndex.remove_object | def remove_object(self, obj):
"""Remove current object from the ElasticSearch."""
obj_id = self.generate_id(obj)
es_obj = self.document_class.get(obj_id, ignore=[404])
# Object may not exist in this index.
if es_obj:
es_obj.delete(refresh=True) | python | def remove_object(self, obj):
"""Remove current object from the ElasticSearch."""
obj_id = self.generate_id(obj)
es_obj = self.document_class.get(obj_id, ignore=[404])
# Object may not exist in this index.
if es_obj:
es_obj.delete(refresh=True) | [
"def",
"remove_object",
"(",
"self",
",",
"obj",
")",
":",
"obj_id",
"=",
"self",
".",
"generate_id",
"(",
"obj",
")",
"es_obj",
"=",
"self",
".",
"document_class",
".",
"get",
"(",
"obj_id",
",",
"ignore",
"=",
"[",
"404",
"]",
")",
"# Object may not ... | Remove current object from the ElasticSearch. | [
"Remove",
"current",
"object",
"from",
"the",
"ElasticSearch",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/indices.py#L428-L434 | train | 45,309 |
DataONEorg/d1_python | lib_common/src/d1_common/types/scripts/pyxbgen_all.py | GenerateVersionFile.generate_version_file | def generate_version_file(self, schema_filename, binding_filename):
"""Given a DataONE schema, generates a file that contains version information
about the schema."""
version_filename = binding_filename + '_version.txt'
version_path = os.path.join(self.binding_dir, version_filename)
schema_path = os.path.join(self.schema_dir, schema_filename)
try:
tstamp, svnpath, svnrev, version = self.get_version_info_from_svn(
schema_path
)
except TypeError:
pass
else:
self.write_version_file(version_path, tstamp, svnpath, svnrev, version) | python | def generate_version_file(self, schema_filename, binding_filename):
"""Given a DataONE schema, generates a file that contains version information
about the schema."""
version_filename = binding_filename + '_version.txt'
version_path = os.path.join(self.binding_dir, version_filename)
schema_path = os.path.join(self.schema_dir, schema_filename)
try:
tstamp, svnpath, svnrev, version = self.get_version_info_from_svn(
schema_path
)
except TypeError:
pass
else:
self.write_version_file(version_path, tstamp, svnpath, svnrev, version) | [
"def",
"generate_version_file",
"(",
"self",
",",
"schema_filename",
",",
"binding_filename",
")",
":",
"version_filename",
"=",
"binding_filename",
"+",
"'_version.txt'",
"version_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"binding_dir",
",",
... | Given a DataONE schema, generates a file that contains version information
about the schema. | [
"Given",
"a",
"DataONE",
"schema",
"generates",
"a",
"file",
"that",
"contains",
"version",
"information",
"about",
"the",
"schema",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/types/scripts/pyxbgen_all.py#L170-L183 | train | 45,310 |
genialis/resolwe | resolwe/flow/migrations/0028_add_data_location.py | set_data_location | def set_data_location(apps, schema_editor):
"""Create DataLocation for each Data."""
Data = apps.get_model('flow', 'Data')
DataLocation = apps.get_model('flow', 'DataLocation')
for data in Data.objects.all():
if os.path.isdir(os.path.join(settings.FLOW_EXECUTOR['DATA_DIR'], str(data.id))):
with transaction.atomic():
# Manually set DataLocation id to preserve data directory.
data_location = DataLocation.objects.create(id=data.id, subpath=str(data.id))
data_location.data.add(data)
# Increment DataLocation id's sequence
if DataLocation.objects.exists():
max_id = DataLocation.objects.order_by('id').last().id
with connection.cursor() as cursor:
cursor.execute(
"ALTER SEQUENCE flow_datalocation_id_seq RESTART WITH {};".format(max_id + 1)
) | python | def set_data_location(apps, schema_editor):
"""Create DataLocation for each Data."""
Data = apps.get_model('flow', 'Data')
DataLocation = apps.get_model('flow', 'DataLocation')
for data in Data.objects.all():
if os.path.isdir(os.path.join(settings.FLOW_EXECUTOR['DATA_DIR'], str(data.id))):
with transaction.atomic():
# Manually set DataLocation id to preserve data directory.
data_location = DataLocation.objects.create(id=data.id, subpath=str(data.id))
data_location.data.add(data)
# Increment DataLocation id's sequence
if DataLocation.objects.exists():
max_id = DataLocation.objects.order_by('id').last().id
with connection.cursor() as cursor:
cursor.execute(
"ALTER SEQUENCE flow_datalocation_id_seq RESTART WITH {};".format(max_id + 1)
) | [
"def",
"set_data_location",
"(",
"apps",
",",
"schema_editor",
")",
":",
"Data",
"=",
"apps",
".",
"get_model",
"(",
"'flow'",
",",
"'Data'",
")",
"DataLocation",
"=",
"apps",
".",
"get_model",
"(",
"'flow'",
",",
"'DataLocation'",
")",
"for",
"data",
"in"... | Create DataLocation for each Data. | [
"Create",
"DataLocation",
"for",
"each",
"Data",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/migrations/0028_add_data_location.py#L12-L30 | train | 45,311 |
DataONEorg/d1_python | lib_client/src/d1_client/iter/objectlist.py | ObjectListIterator._loadMore | def _loadMore(self, start=0, trys=0, validation=True):
"""Retrieves the next page of results."""
self._log.debug("Loading page starting from %d" % start)
self._czero = start
self._pageoffs = 0
try:
pyxb.RequireValidWhenParsing(validation)
self._object_list = self._client.listObjects(
start=start,
count=self._pagesize,
fromDate=self._fromDate,
nodeId=self._nodeId,
)
except http.client.BadStatusLine as e:
self._log.warning("Server responded with Bad Status Line. Retrying in 5sec")
self._client.connection.close()
if trys > 3:
raise e
trys += 1
self._loadMore(start, trys)
except d1_common.types.exceptions.ServiceFailure as e:
self._log.error(e)
if trys > 3:
raise e
trys += 1
self._loadMore(start, trys, validation=False) | python | def _loadMore(self, start=0, trys=0, validation=True):
"""Retrieves the next page of results."""
self._log.debug("Loading page starting from %d" % start)
self._czero = start
self._pageoffs = 0
try:
pyxb.RequireValidWhenParsing(validation)
self._object_list = self._client.listObjects(
start=start,
count=self._pagesize,
fromDate=self._fromDate,
nodeId=self._nodeId,
)
except http.client.BadStatusLine as e:
self._log.warning("Server responded with Bad Status Line. Retrying in 5sec")
self._client.connection.close()
if trys > 3:
raise e
trys += 1
self._loadMore(start, trys)
except d1_common.types.exceptions.ServiceFailure as e:
self._log.error(e)
if trys > 3:
raise e
trys += 1
self._loadMore(start, trys, validation=False) | [
"def",
"_loadMore",
"(",
"self",
",",
"start",
"=",
"0",
",",
"trys",
"=",
"0",
",",
"validation",
"=",
"True",
")",
":",
"self",
".",
"_log",
".",
"debug",
"(",
"\"Loading page starting from %d\"",
"%",
"start",
")",
"self",
".",
"_czero",
"=",
"start... | Retrieves the next page of results. | [
"Retrieves",
"the",
"next",
"page",
"of",
"results",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_client/src/d1_client/iter/objectlist.py#L188-L213 | train | 45,312 |
genialis/resolwe | resolwe/flow/models/data.py | DataQuerySet._delete_chunked | def _delete_chunked(queryset, chunk_size=500):
"""Chunked delete, which should be used if deleting many objects.
The reason why this method is needed is that deleting a lot of Data objects
requires Django to fetch all of them into memory (fast path is not used) and
this causes huge memory usage (and possibly OOM).
:param chunk_size: Optional chunk size
"""
while True:
# Discover primary key to limit the current chunk. This is required because delete
# cannot be called on a sliced queryset due to ordering requirement.
with transaction.atomic():
# Get offset of last item (needed because it may be less than the chunk size).
offset = queryset.order_by('pk')[:chunk_size].count()
if not offset:
break
# Fetch primary key of last item and use it to delete the chunk.
last_instance = queryset.order_by('pk')[offset - 1]
queryset.filter(pk__lte=last_instance.pk).delete() | python | def _delete_chunked(queryset, chunk_size=500):
"""Chunked delete, which should be used if deleting many objects.
The reason why this method is needed is that deleting a lot of Data objects
requires Django to fetch all of them into memory (fast path is not used) and
this causes huge memory usage (and possibly OOM).
:param chunk_size: Optional chunk size
"""
while True:
# Discover primary key to limit the current chunk. This is required because delete
# cannot be called on a sliced queryset due to ordering requirement.
with transaction.atomic():
# Get offset of last item (needed because it may be less than the chunk size).
offset = queryset.order_by('pk')[:chunk_size].count()
if not offset:
break
# Fetch primary key of last item and use it to delete the chunk.
last_instance = queryset.order_by('pk')[offset - 1]
queryset.filter(pk__lte=last_instance.pk).delete() | [
"def",
"_delete_chunked",
"(",
"queryset",
",",
"chunk_size",
"=",
"500",
")",
":",
"while",
"True",
":",
"# Discover primary key to limit the current chunk. This is required because delete",
"# cannot be called on a sliced queryset due to ordering requirement.",
"with",
"transaction... | Chunked delete, which should be used if deleting many objects.
The reason why this method is needed is that deleting a lot of Data objects
requires Django to fetch all of them into memory (fast path is not used) and
this causes huge memory usage (and possibly OOM).
:param chunk_size: Optional chunk size | [
"Chunked",
"delete",
"which",
"should",
"be",
"used",
"if",
"deleting",
"many",
"objects",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/data.py#L40-L60 | train | 45,313 |
genialis/resolwe | resolwe/flow/models/data.py | Data.create_entity | def create_entity(self):
"""Create entity if `flow_collection` is defined in process.
Following rules applies for adding `Data` object to `Entity`:
* Only add `Data object` to `Entity` if process has defined
`flow_collection` field
* Add object to existing `Entity`, if all parents that are part
of it (but not necessary all parents), are part of the same
`Entity`
* If parents belong to different `Entities` or do not belong to
any `Entity`, create new `Entity`
"""
entity_type = self.process.entity_type # pylint: disable=no-member
entity_descriptor_schema = self.process.entity_descriptor_schema # pylint: disable=no-member
entity_input = self.process.entity_input # pylint: disable=no-member
if entity_type:
data_filter = {}
if entity_input:
input_id = dict_dot(self.input, entity_input, default=lambda: None)
if input_id is None:
logger.warning("Skipping creation of entity due to missing input.")
return
if isinstance(input_id, int):
data_filter['data__pk'] = input_id
elif isinstance(input_id, list):
data_filter['data__pk__in'] = input_id
else:
raise ValueError(
"Cannot create entity due to invalid value of field {}.".format(entity_input)
)
else:
data_filter['data__in'] = self.parents.all() # pylint: disable=no-member
entity_query = Entity.objects.filter(type=entity_type, **data_filter).distinct()
entity_count = entity_query.count()
if entity_count == 0:
descriptor_schema = DescriptorSchema.objects.filter(
slug=entity_descriptor_schema
).latest()
entity = Entity.objects.create(
contributor=self.contributor,
descriptor_schema=descriptor_schema,
type=entity_type,
name=self.name,
tags=self.tags,
)
assign_contributor_permissions(entity)
elif entity_count == 1:
entity = entity_query.first()
copy_permissions(entity, self)
else:
logger.info("Skipping creation of entity due to multiple entities found.")
entity = None
if entity:
entity.data.add(self)
# Inherit collections from entity.
for collection in entity.collections.all():
collection.data.add(self) | python | def create_entity(self):
"""Create entity if `flow_collection` is defined in process.
Following rules applies for adding `Data` object to `Entity`:
* Only add `Data object` to `Entity` if process has defined
`flow_collection` field
* Add object to existing `Entity`, if all parents that are part
of it (but not necessary all parents), are part of the same
`Entity`
* If parents belong to different `Entities` or do not belong to
any `Entity`, create new `Entity`
"""
entity_type = self.process.entity_type # pylint: disable=no-member
entity_descriptor_schema = self.process.entity_descriptor_schema # pylint: disable=no-member
entity_input = self.process.entity_input # pylint: disable=no-member
if entity_type:
data_filter = {}
if entity_input:
input_id = dict_dot(self.input, entity_input, default=lambda: None)
if input_id is None:
logger.warning("Skipping creation of entity due to missing input.")
return
if isinstance(input_id, int):
data_filter['data__pk'] = input_id
elif isinstance(input_id, list):
data_filter['data__pk__in'] = input_id
else:
raise ValueError(
"Cannot create entity due to invalid value of field {}.".format(entity_input)
)
else:
data_filter['data__in'] = self.parents.all() # pylint: disable=no-member
entity_query = Entity.objects.filter(type=entity_type, **data_filter).distinct()
entity_count = entity_query.count()
if entity_count == 0:
descriptor_schema = DescriptorSchema.objects.filter(
slug=entity_descriptor_schema
).latest()
entity = Entity.objects.create(
contributor=self.contributor,
descriptor_schema=descriptor_schema,
type=entity_type,
name=self.name,
tags=self.tags,
)
assign_contributor_permissions(entity)
elif entity_count == 1:
entity = entity_query.first()
copy_permissions(entity, self)
else:
logger.info("Skipping creation of entity due to multiple entities found.")
entity = None
if entity:
entity.data.add(self)
# Inherit collections from entity.
for collection in entity.collections.all():
collection.data.add(self) | [
"def",
"create_entity",
"(",
"self",
")",
":",
"entity_type",
"=",
"self",
".",
"process",
".",
"entity_type",
"# pylint: disable=no-member",
"entity_descriptor_schema",
"=",
"self",
".",
"process",
".",
"entity_descriptor_schema",
"# pylint: disable=no-member",
"entity_i... | Create entity if `flow_collection` is defined in process.
Following rules applies for adding `Data` object to `Entity`:
* Only add `Data object` to `Entity` if process has defined
`flow_collection` field
* Add object to existing `Entity`, if all parents that are part
of it (but not necessary all parents), are part of the same
`Entity`
* If parents belong to different `Entities` or do not belong to
any `Entity`, create new `Entity` | [
"Create",
"entity",
"if",
"flow_collection",
"is",
"defined",
"in",
"process",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/data.py#L338-L401 | train | 45,314 |
genialis/resolwe | resolwe/flow/models/data.py | Data.save | def save(self, render_name=False, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg
"""Save the data model."""
if self.name != self._original_name:
self.named_by_user = True
create = self.pk is None
if create:
fill_with_defaults(self.input, self.process.input_schema) # pylint: disable=no-member
if not self.name:
self._render_name()
else:
self.named_by_user = True
self.checksum = get_data_checksum(
self.input, self.process.slug, self.process.version) # pylint: disable=no-member
elif render_name:
self._render_name()
self.save_storage(self.output, self.process.output_schema) # pylint: disable=no-member
if self.status != Data.STATUS_ERROR:
hydrate_size(self)
# If only specified fields are updated (e.g. in executor), size needs to be added
if 'update_fields' in kwargs:
kwargs['update_fields'].append('size')
# Input Data objects are validated only upon creation as they can be deleted later.
skip_missing_data = not create
validate_schema(
self.input, self.process.input_schema, skip_missing_data=skip_missing_data # pylint: disable=no-member
)
render_descriptor(self)
if self.descriptor_schema:
try:
validate_schema(self.descriptor, self.descriptor_schema.schema) # pylint: disable=no-member
self.descriptor_dirty = False
except DirtyError:
self.descriptor_dirty = True
elif self.descriptor and self.descriptor != {}:
raise ValueError("`descriptor_schema` must be defined if `descriptor` is given")
if self.status != Data.STATUS_ERROR:
output_schema = self.process.output_schema # pylint: disable=no-member
if self.status == Data.STATUS_DONE:
validate_schema(
self.output, output_schema, data_location=self.location, skip_missing_data=True
)
else:
validate_schema(
self.output, output_schema, data_location=self.location, test_required=False
)
with transaction.atomic():
self._perform_save(*args, **kwargs)
# We can only save dependencies after the data object has been saved. This
# is why a transaction block is needed and the save method must be called first.
if create:
self.save_dependencies(self.input, self.process.input_schema) # pylint: disable=no-member
self.create_entity() | python | def save(self, render_name=False, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg
"""Save the data model."""
if self.name != self._original_name:
self.named_by_user = True
create = self.pk is None
if create:
fill_with_defaults(self.input, self.process.input_schema) # pylint: disable=no-member
if not self.name:
self._render_name()
else:
self.named_by_user = True
self.checksum = get_data_checksum(
self.input, self.process.slug, self.process.version) # pylint: disable=no-member
elif render_name:
self._render_name()
self.save_storage(self.output, self.process.output_schema) # pylint: disable=no-member
if self.status != Data.STATUS_ERROR:
hydrate_size(self)
# If only specified fields are updated (e.g. in executor), size needs to be added
if 'update_fields' in kwargs:
kwargs['update_fields'].append('size')
# Input Data objects are validated only upon creation as they can be deleted later.
skip_missing_data = not create
validate_schema(
self.input, self.process.input_schema, skip_missing_data=skip_missing_data # pylint: disable=no-member
)
render_descriptor(self)
if self.descriptor_schema:
try:
validate_schema(self.descriptor, self.descriptor_schema.schema) # pylint: disable=no-member
self.descriptor_dirty = False
except DirtyError:
self.descriptor_dirty = True
elif self.descriptor and self.descriptor != {}:
raise ValueError("`descriptor_schema` must be defined if `descriptor` is given")
if self.status != Data.STATUS_ERROR:
output_schema = self.process.output_schema # pylint: disable=no-member
if self.status == Data.STATUS_DONE:
validate_schema(
self.output, output_schema, data_location=self.location, skip_missing_data=True
)
else:
validate_schema(
self.output, output_schema, data_location=self.location, test_required=False
)
with transaction.atomic():
self._perform_save(*args, **kwargs)
# We can only save dependencies after the data object has been saved. This
# is why a transaction block is needed and the save method must be called first.
if create:
self.save_dependencies(self.input, self.process.input_schema) # pylint: disable=no-member
self.create_entity() | [
"def",
"save",
"(",
"self",
",",
"render_name",
"=",
"False",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=keyword-arg-before-vararg",
"if",
"self",
".",
"name",
"!=",
"self",
".",
"_original_name",
":",
"self",
".",
"named_by_user... | Save the data model. | [
"Save",
"the",
"data",
"model",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/data.py#L403-L466 | train | 45,315 |
genialis/resolwe | resolwe/flow/models/data.py | Data.delete | def delete(self, *args, **kwargs):
"""Delete the data model."""
# Store ids in memory as relations are also deleted with the Data object.
storage_ids = list(self.storages.values_list('pk', flat=True)) # pylint: disable=no-member
super().delete(*args, **kwargs)
Storage.objects.filter(pk__in=storage_ids, data=None).delete() | python | def delete(self, *args, **kwargs):
"""Delete the data model."""
# Store ids in memory as relations are also deleted with the Data object.
storage_ids = list(self.storages.values_list('pk', flat=True)) # pylint: disable=no-member
super().delete(*args, **kwargs)
Storage.objects.filter(pk__in=storage_ids, data=None).delete() | [
"def",
"delete",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Store ids in memory as relations are also deleted with the Data object.",
"storage_ids",
"=",
"list",
"(",
"self",
".",
"storages",
".",
"values_list",
"(",
"'pk'",
",",
"flat",
... | Delete the data model. | [
"Delete",
"the",
"data",
"model",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/data.py#L472-L479 | train | 45,316 |
genialis/resolwe | resolwe/flow/models/data.py | Data._render_name | def _render_name(self):
"""Render data name.
The rendering is based on name template (`process.data_name`) and
input context.
"""
if not self.process.data_name or self.named_by_user: # pylint: disable=no-member
return
inputs = copy.deepcopy(self.input)
hydrate_input_references(inputs, self.process.input_schema, hydrate_values=False) # pylint: disable=no-member
template_context = inputs
try:
name = render_template(
self.process,
self.process.data_name, # pylint: disable=no-member
template_context
)
except EvaluationError:
name = '?'
self.name = name | python | def _render_name(self):
"""Render data name.
The rendering is based on name template (`process.data_name`) and
input context.
"""
if not self.process.data_name or self.named_by_user: # pylint: disable=no-member
return
inputs = copy.deepcopy(self.input)
hydrate_input_references(inputs, self.process.input_schema, hydrate_values=False) # pylint: disable=no-member
template_context = inputs
try:
name = render_template(
self.process,
self.process.data_name, # pylint: disable=no-member
template_context
)
except EvaluationError:
name = '?'
self.name = name | [
"def",
"_render_name",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"process",
".",
"data_name",
"or",
"self",
".",
"named_by_user",
":",
"# pylint: disable=no-member",
"return",
"inputs",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"input",
")",
"... | Render data name.
The rendering is based on name template (`process.data_name`) and
input context. | [
"Render",
"data",
"name",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/data.py#L529-L552 | train | 45,317 |
genialis/resolwe | resolwe/flow/models/data.py | DataLocation.get_path | def get_path(self, prefix=None, filename=None):
"""Compose data location path."""
prefix = prefix or settings.FLOW_EXECUTOR['DATA_DIR']
path = os.path.join(prefix, self.subpath)
if filename:
path = os.path.join(path, filename)
return path | python | def get_path(self, prefix=None, filename=None):
"""Compose data location path."""
prefix = prefix or settings.FLOW_EXECUTOR['DATA_DIR']
path = os.path.join(prefix, self.subpath)
if filename:
path = os.path.join(path, filename)
return path | [
"def",
"get_path",
"(",
"self",
",",
"prefix",
"=",
"None",
",",
"filename",
"=",
"None",
")",
":",
"prefix",
"=",
"prefix",
"or",
"settings",
".",
"FLOW_EXECUTOR",
"[",
"'DATA_DIR'",
"]",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
"... | Compose data location path. | [
"Compose",
"data",
"location",
"path",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/data.py#L584-L592 | train | 45,318 |
genialis/resolwe | resolwe/flow/models/data.py | DataLocation.get_runtime_path | def get_runtime_path(self, filename=None):
"""Compose data runtime location path."""
return self.get_path(prefix=settings.FLOW_EXECUTOR['RUNTIME_DIR'], filename=filename) | python | def get_runtime_path(self, filename=None):
"""Compose data runtime location path."""
return self.get_path(prefix=settings.FLOW_EXECUTOR['RUNTIME_DIR'], filename=filename) | [
"def",
"get_runtime_path",
"(",
"self",
",",
"filename",
"=",
"None",
")",
":",
"return",
"self",
".",
"get_path",
"(",
"prefix",
"=",
"settings",
".",
"FLOW_EXECUTOR",
"[",
"'RUNTIME_DIR'",
"]",
",",
"filename",
"=",
"filename",
")"
] | Compose data runtime location path. | [
"Compose",
"data",
"runtime",
"location",
"path",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/data.py#L594-L596 | train | 45,319 |
genialis/resolwe | resolwe/flow/serializers/entity.py | EntitySerializer.get_data | def get_data(self, entity):
"""Return serialized list of data objects on entity that user has `view` permission on."""
data = self._filter_queryset('view_data', entity.data.all())
return self._serialize_data(data) | python | def get_data(self, entity):
"""Return serialized list of data objects on entity that user has `view` permission on."""
data = self._filter_queryset('view_data', entity.data.all())
return self._serialize_data(data) | [
"def",
"get_data",
"(",
"self",
",",
"entity",
")",
":",
"data",
"=",
"self",
".",
"_filter_queryset",
"(",
"'view_data'",
",",
"entity",
".",
"data",
".",
"all",
"(",
")",
")",
"return",
"self",
".",
"_serialize_data",
"(",
"data",
")"
] | Return serialized list of data objects on entity that user has `view` permission on. | [
"Return",
"serialized",
"list",
"of",
"data",
"objects",
"on",
"entity",
"that",
"user",
"has",
"view",
"permission",
"on",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/serializers/entity.py#L25-L29 | train | 45,320 |
genialis/resolwe | resolwe/process/descriptor.py | ProcessDescriptor.validate | def validate(self):
"""Validate process descriptor."""
required_fields = ('slug', 'name', 'process_type', 'version')
for field in required_fields:
if getattr(self.metadata, field, None) is None:
raise ValidationError("process '{}' is missing required meta attribute: {}".format(
self.metadata.slug or '<unknown>', field))
if not PROCESSOR_TYPE_RE.match(self.metadata.process_type):
raise ValidationError("process '{}' has invalid type: {}".format(
self.metadata.slug, self.metadata.process_type)) | python | def validate(self):
"""Validate process descriptor."""
required_fields = ('slug', 'name', 'process_type', 'version')
for field in required_fields:
if getattr(self.metadata, field, None) is None:
raise ValidationError("process '{}' is missing required meta attribute: {}".format(
self.metadata.slug or '<unknown>', field))
if not PROCESSOR_TYPE_RE.match(self.metadata.process_type):
raise ValidationError("process '{}' has invalid type: {}".format(
self.metadata.slug, self.metadata.process_type)) | [
"def",
"validate",
"(",
"self",
")",
":",
"required_fields",
"=",
"(",
"'slug'",
",",
"'name'",
",",
"'process_type'",
",",
"'version'",
")",
"for",
"field",
"in",
"required_fields",
":",
"if",
"getattr",
"(",
"self",
".",
"metadata",
",",
"field",
",",
... | Validate process descriptor. | [
"Validate",
"process",
"descriptor",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/process/descriptor.py#L61-L71 | train | 45,321 |
genialis/resolwe | resolwe/process/descriptor.py | ProcessDescriptor.to_schema | def to_schema(self):
"""Return process schema for this process."""
process_type = self.metadata.process_type
if not process_type.endswith(':'):
process_type = '{}:'.format(process_type)
schema = {
'slug': self.metadata.slug,
'name': self.metadata.name,
'type': process_type,
'version': self.metadata.version,
'data_name': '',
'requirements': {
'executor': {
'docker': {
'image': 'resolwe/base:ubuntu-18.04',
},
},
},
}
if self.metadata.description is not None:
schema['description'] = self.metadata.description
if self.metadata.category is not None:
schema['category'] = self.metadata.category
if self.metadata.scheduling_class is not None:
schema['scheduling_class'] = self.metadata.scheduling_class
if self.metadata.persistence is not None:
schema['persistence'] = self.metadata.persistence
if self.metadata.requirements is not None:
schema['requirements'] = self.metadata.requirements
if self.metadata.data_name is not None:
schema['data_name'] = self.metadata.data_name
if self.metadata.entity is not None:
schema['entity'] = self.metadata.entity
if self.inputs:
schema['input'] = []
for field in self.inputs.values():
schema['input'].append(field.to_schema())
if self.outputs:
schema['output'] = []
for field in self.outputs.values():
schema['output'].append(field.to_schema())
schema['run'] = {
'language': 'python',
'program': self.source or '',
}
return schema | python | def to_schema(self):
"""Return process schema for this process."""
process_type = self.metadata.process_type
if not process_type.endswith(':'):
process_type = '{}:'.format(process_type)
schema = {
'slug': self.metadata.slug,
'name': self.metadata.name,
'type': process_type,
'version': self.metadata.version,
'data_name': '',
'requirements': {
'executor': {
'docker': {
'image': 'resolwe/base:ubuntu-18.04',
},
},
},
}
if self.metadata.description is not None:
schema['description'] = self.metadata.description
if self.metadata.category is not None:
schema['category'] = self.metadata.category
if self.metadata.scheduling_class is not None:
schema['scheduling_class'] = self.metadata.scheduling_class
if self.metadata.persistence is not None:
schema['persistence'] = self.metadata.persistence
if self.metadata.requirements is not None:
schema['requirements'] = self.metadata.requirements
if self.metadata.data_name is not None:
schema['data_name'] = self.metadata.data_name
if self.metadata.entity is not None:
schema['entity'] = self.metadata.entity
if self.inputs:
schema['input'] = []
for field in self.inputs.values():
schema['input'].append(field.to_schema())
if self.outputs:
schema['output'] = []
for field in self.outputs.values():
schema['output'].append(field.to_schema())
schema['run'] = {
'language': 'python',
'program': self.source or '',
}
return schema | [
"def",
"to_schema",
"(",
"self",
")",
":",
"process_type",
"=",
"self",
".",
"metadata",
".",
"process_type",
"if",
"not",
"process_type",
".",
"endswith",
"(",
"':'",
")",
":",
"process_type",
"=",
"'{}:'",
".",
"format",
"(",
"process_type",
")",
"schema... | Return process schema for this process. | [
"Return",
"process",
"schema",
"for",
"this",
"process",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/process/descriptor.py#L73-L124 | train | 45,322 |
genialis/resolwe | resolwe/flow/executors/docker/prepare.py | FlowExecutorPreparer.post_register_hook | def post_register_hook(self, verbosity=1):
"""Pull Docker images needed by processes after registering."""
if not getattr(settings, 'FLOW_DOCKER_DONT_PULL', False):
call_command('list_docker_images', pull=True, verbosity=verbosity) | python | def post_register_hook(self, verbosity=1):
"""Pull Docker images needed by processes after registering."""
if not getattr(settings, 'FLOW_DOCKER_DONT_PULL', False):
call_command('list_docker_images', pull=True, verbosity=verbosity) | [
"def",
"post_register_hook",
"(",
"self",
",",
"verbosity",
"=",
"1",
")",
":",
"if",
"not",
"getattr",
"(",
"settings",
",",
"'FLOW_DOCKER_DONT_PULL'",
",",
"False",
")",
":",
"call_command",
"(",
"'list_docker_images'",
",",
"pull",
"=",
"True",
",",
"verb... | Pull Docker images needed by processes after registering. | [
"Pull",
"Docker",
"images",
"needed",
"by",
"processes",
"after",
"registering",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/executors/docker/prepare.py#L24-L27 | train | 45,323 |
DataONEorg/d1_python | lib_common/src/d1_common/date_time.py | are_equal | def are_equal(a_dt, b_dt, round_sec=1):
"""Determine if two datetimes are equal with fuzz factor.
A naive datetime (no timezone information) is assumed to be in in UTC.
Args:
a_dt: datetime
Timestamp to compare.
b_dt: datetime
Timestamp to compare.
round_sec: int or float
Round the timestamps to the closest second divisible by this value before
comparing them.
E.g.:
- ``n_round_sec`` = 0.1: nearest 10th of a second.
- ``n_round_sec`` = 1: nearest second.
- ``n_round_sec`` = 30: nearest half minute.
Timestamps may lose resolution or otherwise change slightly as they go through
various transformations and storage systems. This again may cause timestamps
that
have been processed in different systems to fail an exact equality compare even
if
they were initially the same timestamp. This rounding avoids such problems as
long
as the error introduced to the original timestamp is not higher than the
rounding
value. Of course, the rounding also causes a loss in resolution in the values
compared, so should be kept as low as possible. The default value of 1 second
should
be a good tradeoff in most cases.
Returns:
bool
- **True**: If the two datetimes are equal after being rounded by
``round_sec``.
"""
ra_dt = round_to_nearest(a_dt, round_sec)
rb_dt = round_to_nearest(b_dt, round_sec)
logger.debug('Rounded:')
logger.debug('{} -> {}'.format(a_dt, ra_dt))
logger.debug('{} -> {}'.format(b_dt, rb_dt))
return normalize_datetime_to_utc(ra_dt) == normalize_datetime_to_utc(rb_dt) | python | def are_equal(a_dt, b_dt, round_sec=1):
"""Determine if two datetimes are equal with fuzz factor.
A naive datetime (no timezone information) is assumed to be in in UTC.
Args:
a_dt: datetime
Timestamp to compare.
b_dt: datetime
Timestamp to compare.
round_sec: int or float
Round the timestamps to the closest second divisible by this value before
comparing them.
E.g.:
- ``n_round_sec`` = 0.1: nearest 10th of a second.
- ``n_round_sec`` = 1: nearest second.
- ``n_round_sec`` = 30: nearest half minute.
Timestamps may lose resolution or otherwise change slightly as they go through
various transformations and storage systems. This again may cause timestamps
that
have been processed in different systems to fail an exact equality compare even
if
they were initially the same timestamp. This rounding avoids such problems as
long
as the error introduced to the original timestamp is not higher than the
rounding
value. Of course, the rounding also causes a loss in resolution in the values
compared, so should be kept as low as possible. The default value of 1 second
should
be a good tradeoff in most cases.
Returns:
bool
- **True**: If the two datetimes are equal after being rounded by
``round_sec``.
"""
ra_dt = round_to_nearest(a_dt, round_sec)
rb_dt = round_to_nearest(b_dt, round_sec)
logger.debug('Rounded:')
logger.debug('{} -> {}'.format(a_dt, ra_dt))
logger.debug('{} -> {}'.format(b_dt, rb_dt))
return normalize_datetime_to_utc(ra_dt) == normalize_datetime_to_utc(rb_dt) | [
"def",
"are_equal",
"(",
"a_dt",
",",
"b_dt",
",",
"round_sec",
"=",
"1",
")",
":",
"ra_dt",
"=",
"round_to_nearest",
"(",
"a_dt",
",",
"round_sec",
")",
"rb_dt",
"=",
"round_to_nearest",
"(",
"b_dt",
",",
"round_sec",
")",
"logger",
".",
"debug",
"(",
... | Determine if two datetimes are equal with fuzz factor.
A naive datetime (no timezone information) is assumed to be in in UTC.
Args:
a_dt: datetime
Timestamp to compare.
b_dt: datetime
Timestamp to compare.
round_sec: int or float
Round the timestamps to the closest second divisible by this value before
comparing them.
E.g.:
- ``n_round_sec`` = 0.1: nearest 10th of a second.
- ``n_round_sec`` = 1: nearest second.
- ``n_round_sec`` = 30: nearest half minute.
Timestamps may lose resolution or otherwise change slightly as they go through
various transformations and storage systems. This again may cause timestamps
that
have been processed in different systems to fail an exact equality compare even
if
they were initially the same timestamp. This rounding avoids such problems as
long
as the error introduced to the original timestamp is not higher than the
rounding
value. Of course, the rounding also causes a loss in resolution in the values
compared, so should be kept as low as possible. The default value of 1 second
should
be a good tradeoff in most cases.
Returns:
bool
- **True**: If the two datetimes are equal after being rounded by
``round_sec``. | [
"Determine",
"if",
"two",
"datetimes",
"are",
"equal",
"with",
"fuzz",
"factor",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/date_time.py#L191-L238 | train | 45,324 |
DataONEorg/d1_python | lib_common/src/d1_common/date_time.py | http_datetime_str_from_dt | def http_datetime_str_from_dt(dt):
"""Format datetime to HTTP Full Date format.
Args:
dt : datetime
- tz-aware: Used in the formatted string.
- tz-naive: Assumed to be in UTC.
Returns:
str
The returned format is a is fixed-length subset of that defined by RFC 1123 and
is
the preferred format for use in the HTTP Date header. E.g.:
``Sat, 02 Jan 1999 03:04:05 GMT``
See Also:
- http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1
"""
epoch_seconds = ts_from_dt(dt)
return email.utils.formatdate(epoch_seconds, localtime=False, usegmt=True) | python | def http_datetime_str_from_dt(dt):
"""Format datetime to HTTP Full Date format.
Args:
dt : datetime
- tz-aware: Used in the formatted string.
- tz-naive: Assumed to be in UTC.
Returns:
str
The returned format is a is fixed-length subset of that defined by RFC 1123 and
is
the preferred format for use in the HTTP Date header. E.g.:
``Sat, 02 Jan 1999 03:04:05 GMT``
See Also:
- http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1
"""
epoch_seconds = ts_from_dt(dt)
return email.utils.formatdate(epoch_seconds, localtime=False, usegmt=True) | [
"def",
"http_datetime_str_from_dt",
"(",
"dt",
")",
":",
"epoch_seconds",
"=",
"ts_from_dt",
"(",
"dt",
")",
"return",
"email",
".",
"utils",
".",
"formatdate",
"(",
"epoch_seconds",
",",
"localtime",
"=",
"False",
",",
"usegmt",
"=",
"True",
")"
] | Format datetime to HTTP Full Date format.
Args:
dt : datetime
- tz-aware: Used in the formatted string.
- tz-naive: Assumed to be in UTC.
Returns:
str
The returned format is a is fixed-length subset of that defined by RFC 1123 and
is
the preferred format for use in the HTTP Date header. E.g.:
``Sat, 02 Jan 1999 03:04:05 GMT``
See Also:
- http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1 | [
"Format",
"datetime",
"to",
"HTTP",
"Full",
"Date",
"format",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/date_time.py#L295-L317 | train | 45,325 |
DataONEorg/d1_python | lib_common/src/d1_common/date_time.py | dt_from_http_datetime_str | def dt_from_http_datetime_str(http_full_datetime):
"""Parse HTTP Full Date formats and return as datetime.
Args:
http_full_datetime : str
Each of the allowed formats are supported:
- Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
- Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
- Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
HTTP Full Dates are always in UTC.
Returns:
datetime
The returned datetime is always timezone aware and in UTC.
See Also:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1
"""
date_parts = list(email.utils.parsedate(http_full_datetime)[:6])
year = date_parts[0]
if year <= 99:
year = year + 2000 if year < 50 else year + 1900
return create_utc_datetime(year, *date_parts[1:]) | python | def dt_from_http_datetime_str(http_full_datetime):
"""Parse HTTP Full Date formats and return as datetime.
Args:
http_full_datetime : str
Each of the allowed formats are supported:
- Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
- Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
- Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
HTTP Full Dates are always in UTC.
Returns:
datetime
The returned datetime is always timezone aware and in UTC.
See Also:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1
"""
date_parts = list(email.utils.parsedate(http_full_datetime)[:6])
year = date_parts[0]
if year <= 99:
year = year + 2000 if year < 50 else year + 1900
return create_utc_datetime(year, *date_parts[1:]) | [
"def",
"dt_from_http_datetime_str",
"(",
"http_full_datetime",
")",
":",
"date_parts",
"=",
"list",
"(",
"email",
".",
"utils",
".",
"parsedate",
"(",
"http_full_datetime",
")",
"[",
":",
"6",
"]",
")",
"year",
"=",
"date_parts",
"[",
"0",
"]",
"if",
"year... | Parse HTTP Full Date formats and return as datetime.
Args:
http_full_datetime : str
Each of the allowed formats are supported:
- Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
- Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
- Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
HTTP Full Dates are always in UTC.
Returns:
datetime
The returned datetime is always timezone aware and in UTC.
See Also:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1 | [
"Parse",
"HTTP",
"Full",
"Date",
"formats",
"and",
"return",
"as",
"datetime",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/date_time.py#L339-L364 | train | 45,326 |
DataONEorg/d1_python | lib_common/src/d1_common/date_time.py | normalize_datetime_to_utc | def normalize_datetime_to_utc(dt):
"""Adjust datetime to UTC.
Apply the timezone offset to the datetime and set the timezone to UTC.
This is a no-op if the datetime is already in UTC.
Args:
dt : datetime
- tz-aware: Used in the formatted string.
- tz-naive: Assumed to be in UTC.
Returns:
datetime
The returned datetime is always timezone aware and in UTC.
Notes:
This forces a new object to be returned, which fixes an issue with
serialization to XML in PyXB. PyXB uses a mixin together with
datetime to handle the XML xs:dateTime. That type keeps track of
timezone information included in the original XML doc, which conflicts if we
return it here as part of a datetime mixin.
See Also:
``cast_naive_datetime_to_tz()``
"""
return datetime.datetime(
*dt.utctimetuple()[:6], microsecond=dt.microsecond, tzinfo=datetime.timezone.utc
) | python | def normalize_datetime_to_utc(dt):
"""Adjust datetime to UTC.
Apply the timezone offset to the datetime and set the timezone to UTC.
This is a no-op if the datetime is already in UTC.
Args:
dt : datetime
- tz-aware: Used in the formatted string.
- tz-naive: Assumed to be in UTC.
Returns:
datetime
The returned datetime is always timezone aware and in UTC.
Notes:
This forces a new object to be returned, which fixes an issue with
serialization to XML in PyXB. PyXB uses a mixin together with
datetime to handle the XML xs:dateTime. That type keeps track of
timezone information included in the original XML doc, which conflicts if we
return it here as part of a datetime mixin.
See Also:
``cast_naive_datetime_to_tz()``
"""
return datetime.datetime(
*dt.utctimetuple()[:6], microsecond=dt.microsecond, tzinfo=datetime.timezone.utc
) | [
"def",
"normalize_datetime_to_utc",
"(",
"dt",
")",
":",
"return",
"datetime",
".",
"datetime",
"(",
"*",
"dt",
".",
"utctimetuple",
"(",
")",
"[",
":",
"6",
"]",
",",
"microsecond",
"=",
"dt",
".",
"microsecond",
",",
"tzinfo",
"=",
"datetime",
".",
"... | Adjust datetime to UTC.
Apply the timezone offset to the datetime and set the timezone to UTC.
This is a no-op if the datetime is already in UTC.
Args:
dt : datetime
- tz-aware: Used in the formatted string.
- tz-naive: Assumed to be in UTC.
Returns:
datetime
The returned datetime is always timezone aware and in UTC.
Notes:
This forces a new object to be returned, which fixes an issue with
serialization to XML in PyXB. PyXB uses a mixin together with
datetime to handle the XML xs:dateTime. That type keeps track of
timezone information included in the original XML doc, which conflicts if we
return it here as part of a datetime mixin.
See Also:
``cast_naive_datetime_to_tz()`` | [
"Adjust",
"datetime",
"to",
"UTC",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/date_time.py#L399-L429 | train | 45,327 |
DataONEorg/d1_python | lib_common/src/d1_common/date_time.py | cast_naive_datetime_to_tz | def cast_naive_datetime_to_tz(dt, tz=UTC()):
"""If datetime is tz-naive, set it to ``tz``. If datetime is tz-aware, return it
unmodified.
Args:
dt : datetime
tz-naive or tz-aware datetime.
tz : datetime.tzinfo
The timezone to which to adjust tz-naive datetime.
Returns:
datetime
tz-aware datetime.
Warning:
This will change the actual moment in time that is represented if the datetime is
naive and represents a date and time not in ``tz``.
See Also:
``normalize_datetime_to_utc()``
"""
if has_tz(dt):
return dt
return dt.replace(tzinfo=tz) | python | def cast_naive_datetime_to_tz(dt, tz=UTC()):
"""If datetime is tz-naive, set it to ``tz``. If datetime is tz-aware, return it
unmodified.
Args:
dt : datetime
tz-naive or tz-aware datetime.
tz : datetime.tzinfo
The timezone to which to adjust tz-naive datetime.
Returns:
datetime
tz-aware datetime.
Warning:
This will change the actual moment in time that is represented if the datetime is
naive and represents a date and time not in ``tz``.
See Also:
``normalize_datetime_to_utc()``
"""
if has_tz(dt):
return dt
return dt.replace(tzinfo=tz) | [
"def",
"cast_naive_datetime_to_tz",
"(",
"dt",
",",
"tz",
"=",
"UTC",
"(",
")",
")",
":",
"if",
"has_tz",
"(",
"dt",
")",
":",
"return",
"dt",
"return",
"dt",
".",
"replace",
"(",
"tzinfo",
"=",
"tz",
")"
] | If datetime is tz-naive, set it to ``tz``. If datetime is tz-aware, return it
unmodified.
Args:
dt : datetime
tz-naive or tz-aware datetime.
tz : datetime.tzinfo
The timezone to which to adjust tz-naive datetime.
Returns:
datetime
tz-aware datetime.
Warning:
This will change the actual moment in time that is represented if the datetime is
naive and represents a date and time not in ``tz``.
See Also:
``normalize_datetime_to_utc()`` | [
"If",
"datetime",
"is",
"tz",
"-",
"naive",
"set",
"it",
"to",
"tz",
".",
"If",
"datetime",
"is",
"tz",
"-",
"aware",
"return",
"it",
"unmodified",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/date_time.py#L432-L457 | train | 45,328 |
DataONEorg/d1_python | lib_common/src/d1_common/date_time.py | round_to_nearest | def round_to_nearest(dt, n_round_sec=1.0):
"""Round datetime up or down to nearest divisor.
Round datetime up or down to nearest number of seconds that divides evenly by
the divisor.
Any timezone is preserved but ignored in the rounding.
Args:
dt: datetime
n_round_sec : int or float
Divisor for rounding
Examples:
- ``n_round_sec`` = 0.1: nearest 10th of a second.
- ``n_round_sec`` = 1: nearest second.
- ``n_round_sec`` = 30: nearest half minute.
"""
ts = ts_from_dt(strip_timezone(dt)) + n_round_sec / 2.0
res = dt_from_ts(ts - (ts % n_round_sec))
return res.replace(tzinfo=dt.tzinfo) | python | def round_to_nearest(dt, n_round_sec=1.0):
"""Round datetime up or down to nearest divisor.
Round datetime up or down to nearest number of seconds that divides evenly by
the divisor.
Any timezone is preserved but ignored in the rounding.
Args:
dt: datetime
n_round_sec : int or float
Divisor for rounding
Examples:
- ``n_round_sec`` = 0.1: nearest 10th of a second.
- ``n_round_sec`` = 1: nearest second.
- ``n_round_sec`` = 30: nearest half minute.
"""
ts = ts_from_dt(strip_timezone(dt)) + n_round_sec / 2.0
res = dt_from_ts(ts - (ts % n_round_sec))
return res.replace(tzinfo=dt.tzinfo) | [
"def",
"round_to_nearest",
"(",
"dt",
",",
"n_round_sec",
"=",
"1.0",
")",
":",
"ts",
"=",
"ts_from_dt",
"(",
"strip_timezone",
"(",
"dt",
")",
")",
"+",
"n_round_sec",
"/",
"2.0",
"res",
"=",
"dt_from_ts",
"(",
"ts",
"-",
"(",
"ts",
"%",
"n_round_sec"... | Round datetime up or down to nearest divisor.
Round datetime up or down to nearest number of seconds that divides evenly by
the divisor.
Any timezone is preserved but ignored in the rounding.
Args:
dt: datetime
n_round_sec : int or float
Divisor for rounding
Examples:
- ``n_round_sec`` = 0.1: nearest 10th of a second.
- ``n_round_sec`` = 1: nearest second.
- ``n_round_sec`` = 30: nearest half minute. | [
"Round",
"datetime",
"up",
"or",
"down",
"to",
"nearest",
"divisor",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/date_time.py#L545-L567 | train | 45,329 |
genialis/resolwe | resolwe/flow/managers/workload_connectors/slurm.py | Connector.submit | def submit(self, data, runtime_dir, argv):
"""Run process with SLURM.
For details, see
:meth:`~resolwe.flow.managers.workload_connectors.base.BaseConnector.submit`.
"""
limits = data.process.get_resource_limits()
logger.debug(__(
"Connector '{}' running for Data with id {} ({}).",
self.__class__.__module__,
data.id,
repr(argv)
))
# Compute target partition.
partition = getattr(settings, 'FLOW_SLURM_PARTITION_DEFAULT', None)
if data.process.slug in getattr(settings, 'FLOW_SLURM_PARTITION_OVERRIDES', {}):
partition = settings.FLOW_SLURM_PARTITION_OVERRIDES[data.process.slug]
try:
# Make sure the resulting file is executable on creation.
script_path = os.path.join(runtime_dir, 'slurm.sh')
file_descriptor = os.open(script_path, os.O_WRONLY | os.O_CREAT, mode=0o555)
with os.fdopen(file_descriptor, 'wt') as script:
script.write('#!/bin/bash\n')
script.write('#SBATCH --mem={}M\n'.format(limits['memory'] + EXECUTOR_MEMORY_OVERHEAD))
script.write('#SBATCH --cpus-per-task={}\n'.format(limits['cores']))
if partition:
script.write('#SBATCH --partition={}\n'.format(partition))
# Render the argument vector into a command line.
line = ' '.join(map(shlex.quote, argv))
script.write(line + '\n')
command = ['/usr/bin/env', 'sbatch', script_path]
subprocess.Popen(
command,
cwd=runtime_dir,
stdin=subprocess.DEVNULL
).wait()
except OSError as err:
logger.error(__(
"OSError occurred while preparing SLURM script for Data {}: {}",
data.id, err
)) | python | def submit(self, data, runtime_dir, argv):
"""Run process with SLURM.
For details, see
:meth:`~resolwe.flow.managers.workload_connectors.base.BaseConnector.submit`.
"""
limits = data.process.get_resource_limits()
logger.debug(__(
"Connector '{}' running for Data with id {} ({}).",
self.__class__.__module__,
data.id,
repr(argv)
))
# Compute target partition.
partition = getattr(settings, 'FLOW_SLURM_PARTITION_DEFAULT', None)
if data.process.slug in getattr(settings, 'FLOW_SLURM_PARTITION_OVERRIDES', {}):
partition = settings.FLOW_SLURM_PARTITION_OVERRIDES[data.process.slug]
try:
# Make sure the resulting file is executable on creation.
script_path = os.path.join(runtime_dir, 'slurm.sh')
file_descriptor = os.open(script_path, os.O_WRONLY | os.O_CREAT, mode=0o555)
with os.fdopen(file_descriptor, 'wt') as script:
script.write('#!/bin/bash\n')
script.write('#SBATCH --mem={}M\n'.format(limits['memory'] + EXECUTOR_MEMORY_OVERHEAD))
script.write('#SBATCH --cpus-per-task={}\n'.format(limits['cores']))
if partition:
script.write('#SBATCH --partition={}\n'.format(partition))
# Render the argument vector into a command line.
line = ' '.join(map(shlex.quote, argv))
script.write(line + '\n')
command = ['/usr/bin/env', 'sbatch', script_path]
subprocess.Popen(
command,
cwd=runtime_dir,
stdin=subprocess.DEVNULL
).wait()
except OSError as err:
logger.error(__(
"OSError occurred while preparing SLURM script for Data {}: {}",
data.id, err
)) | [
"def",
"submit",
"(",
"self",
",",
"data",
",",
"runtime_dir",
",",
"argv",
")",
":",
"limits",
"=",
"data",
".",
"process",
".",
"get_resource_limits",
"(",
")",
"logger",
".",
"debug",
"(",
"__",
"(",
"\"Connector '{}' running for Data with id {} ({}).\"",
"... | Run process with SLURM.
For details, see
:meth:`~resolwe.flow.managers.workload_connectors.base.BaseConnector.submit`. | [
"Run",
"process",
"with",
"SLURM",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/workload_connectors/slurm.py#L29-L73 | train | 45,330 |
genialis/resolwe | resolwe/flow/utils/purge.py | get_purge_files | def get_purge_files(root, output, output_schema, descriptor, descriptor_schema):
"""Get files to purge."""
def remove_file(fn, paths):
"""From paths remove fn and dirs before fn in dir tree."""
while fn:
for i in range(len(paths) - 1, -1, -1):
if fn == paths[i]:
paths.pop(i)
fn, _ = os.path.split(fn)
def remove_tree(fn, paths):
"""From paths remove fn and dirs before or after fn in dir tree."""
for i in range(len(paths) - 1, -1, -1):
head = paths[i]
while head:
if fn == head:
paths.pop(i)
break
head, _ = os.path.split(head)
remove_file(fn, paths)
def subfiles(root):
"""Extend unreferenced list with all subdirs and files in top dir."""
subs = []
for path, dirs, files in os.walk(root, topdown=False):
path = path[len(root) + 1:]
subs.extend(os.path.join(path, f) for f in files)
subs.extend(os.path.join(path, d) for d in dirs)
return subs
unreferenced_files = subfiles(root)
remove_file('jsonout.txt', unreferenced_files)
remove_file('stderr.txt', unreferenced_files)
remove_file('stdout.txt', unreferenced_files)
meta_fields = [
[output, output_schema],
[descriptor, descriptor_schema]
]
for meta_field, meta_field_schema in meta_fields:
for field_schema, fields in iterate_fields(meta_field, meta_field_schema):
if 'type' in field_schema:
field_type = field_schema['type']
field_name = field_schema['name']
# Remove basic:file: entries
if field_type.startswith('basic:file:'):
remove_file(fields[field_name]['file'], unreferenced_files)
# Remove list:basic:file: entries
elif field_type.startswith('list:basic:file:'):
for field in fields[field_name]:
remove_file(field['file'], unreferenced_files)
# Remove basic:dir: entries
elif field_type.startswith('basic:dir:'):
remove_tree(fields[field_name]['dir'], unreferenced_files)
# Remove list:basic:dir: entries
elif field_type.startswith('list:basic:dir:'):
for field in fields[field_name]:
remove_tree(field['dir'], unreferenced_files)
# Remove refs entries
if field_type.startswith('basic:file:') or field_type.startswith('basic:dir:'):
for ref in fields[field_name].get('refs', []):
remove_tree(ref, unreferenced_files)
elif field_type.startswith('list:basic:file:') or field_type.startswith('list:basic:dir:'):
for field in fields[field_name]:
for ref in field.get('refs', []):
remove_tree(ref, unreferenced_files)
return set([os.path.join(root, filename) for filename in unreferenced_files]) | python | def get_purge_files(root, output, output_schema, descriptor, descriptor_schema):
"""Get files to purge."""
def remove_file(fn, paths):
"""From paths remove fn and dirs before fn in dir tree."""
while fn:
for i in range(len(paths) - 1, -1, -1):
if fn == paths[i]:
paths.pop(i)
fn, _ = os.path.split(fn)
def remove_tree(fn, paths):
"""From paths remove fn and dirs before or after fn in dir tree."""
for i in range(len(paths) - 1, -1, -1):
head = paths[i]
while head:
if fn == head:
paths.pop(i)
break
head, _ = os.path.split(head)
remove_file(fn, paths)
def subfiles(root):
"""Extend unreferenced list with all subdirs and files in top dir."""
subs = []
for path, dirs, files in os.walk(root, topdown=False):
path = path[len(root) + 1:]
subs.extend(os.path.join(path, f) for f in files)
subs.extend(os.path.join(path, d) for d in dirs)
return subs
unreferenced_files = subfiles(root)
remove_file('jsonout.txt', unreferenced_files)
remove_file('stderr.txt', unreferenced_files)
remove_file('stdout.txt', unreferenced_files)
meta_fields = [
[output, output_schema],
[descriptor, descriptor_schema]
]
for meta_field, meta_field_schema in meta_fields:
for field_schema, fields in iterate_fields(meta_field, meta_field_schema):
if 'type' in field_schema:
field_type = field_schema['type']
field_name = field_schema['name']
# Remove basic:file: entries
if field_type.startswith('basic:file:'):
remove_file(fields[field_name]['file'], unreferenced_files)
# Remove list:basic:file: entries
elif field_type.startswith('list:basic:file:'):
for field in fields[field_name]:
remove_file(field['file'], unreferenced_files)
# Remove basic:dir: entries
elif field_type.startswith('basic:dir:'):
remove_tree(fields[field_name]['dir'], unreferenced_files)
# Remove list:basic:dir: entries
elif field_type.startswith('list:basic:dir:'):
for field in fields[field_name]:
remove_tree(field['dir'], unreferenced_files)
# Remove refs entries
if field_type.startswith('basic:file:') or field_type.startswith('basic:dir:'):
for ref in fields[field_name].get('refs', []):
remove_tree(ref, unreferenced_files)
elif field_type.startswith('list:basic:file:') or field_type.startswith('list:basic:dir:'):
for field in fields[field_name]:
for ref in field.get('refs', []):
remove_tree(ref, unreferenced_files)
return set([os.path.join(root, filename) for filename in unreferenced_files]) | [
"def",
"get_purge_files",
"(",
"root",
",",
"output",
",",
"output_schema",
",",
"descriptor",
",",
"descriptor_schema",
")",
":",
"def",
"remove_file",
"(",
"fn",
",",
"paths",
")",
":",
"\"\"\"From paths remove fn and dirs before fn in dir tree.\"\"\"",
"while",
"fn... | Get files to purge. | [
"Get",
"files",
"to",
"purge",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/utils/purge.py#L21-L97 | train | 45,331 |
genialis/resolwe | resolwe/flow/utils/purge.py | location_purge | def location_purge(location_id, delete=False, verbosity=0):
"""Print and conditionally delete files not referenced by meta data.
:param location_id: Id of the
:class:`~resolwe.flow.models.DataLocation` model that data
objects reference to.
:param delete: If ``True``, then delete unreferenced files.
"""
try:
location = DataLocation.objects.get(id=location_id)
except DataLocation.DoesNotExist:
logger.warning("Data location does not exist", extra={'location_id': location_id})
return
unreferenced_files = set()
purged_data = Data.objects.none()
referenced_by_data = location.data.exists()
if referenced_by_data:
if location.data.exclude(status__in=[Data.STATUS_DONE, Data.STATUS_ERROR]).exists():
return
# Perform cleanup.
purge_files_sets = list()
purged_data = location.data.all()
for data in purged_data:
purge_files_sets.append(get_purge_files(
location.get_path(),
data.output,
data.process.output_schema,
data.descriptor,
getattr(data.descriptor_schema, 'schema', [])
))
intersected_files = set.intersection(*purge_files_sets) if purge_files_sets else set()
unreferenced_files.update(intersected_files)
else:
# Remove data directory.
unreferenced_files.add(location.get_path())
unreferenced_files.add(location.get_runtime_path())
if verbosity >= 1:
# Print unreferenced files
if unreferenced_files:
logger.info(__("Unreferenced files for location id {} ({}):", location_id, len(unreferenced_files)))
for name in unreferenced_files:
logger.info(__(" {}", name))
else:
logger.info(__("No unreferenced files for location id {}", location_id))
# Go through unreferenced files and delete them.
if delete:
for name in unreferenced_files:
if os.path.isfile(name) or os.path.islink(name):
os.remove(name)
elif os.path.isdir(name):
shutil.rmtree(name)
location.purged = True
location.save()
if not referenced_by_data:
location.delete() | python | def location_purge(location_id, delete=False, verbosity=0):
"""Print and conditionally delete files not referenced by meta data.
:param location_id: Id of the
:class:`~resolwe.flow.models.DataLocation` model that data
objects reference to.
:param delete: If ``True``, then delete unreferenced files.
"""
try:
location = DataLocation.objects.get(id=location_id)
except DataLocation.DoesNotExist:
logger.warning("Data location does not exist", extra={'location_id': location_id})
return
unreferenced_files = set()
purged_data = Data.objects.none()
referenced_by_data = location.data.exists()
if referenced_by_data:
if location.data.exclude(status__in=[Data.STATUS_DONE, Data.STATUS_ERROR]).exists():
return
# Perform cleanup.
purge_files_sets = list()
purged_data = location.data.all()
for data in purged_data:
purge_files_sets.append(get_purge_files(
location.get_path(),
data.output,
data.process.output_schema,
data.descriptor,
getattr(data.descriptor_schema, 'schema', [])
))
intersected_files = set.intersection(*purge_files_sets) if purge_files_sets else set()
unreferenced_files.update(intersected_files)
else:
# Remove data directory.
unreferenced_files.add(location.get_path())
unreferenced_files.add(location.get_runtime_path())
if verbosity >= 1:
# Print unreferenced files
if unreferenced_files:
logger.info(__("Unreferenced files for location id {} ({}):", location_id, len(unreferenced_files)))
for name in unreferenced_files:
logger.info(__(" {}", name))
else:
logger.info(__("No unreferenced files for location id {}", location_id))
# Go through unreferenced files and delete them.
if delete:
for name in unreferenced_files:
if os.path.isfile(name) or os.path.islink(name):
os.remove(name)
elif os.path.isdir(name):
shutil.rmtree(name)
location.purged = True
location.save()
if not referenced_by_data:
location.delete() | [
"def",
"location_purge",
"(",
"location_id",
",",
"delete",
"=",
"False",
",",
"verbosity",
"=",
"0",
")",
":",
"try",
":",
"location",
"=",
"DataLocation",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"location_id",
")",
"except",
"DataLocation",
".",
"D... | Print and conditionally delete files not referenced by meta data.
:param location_id: Id of the
:class:`~resolwe.flow.models.DataLocation` model that data
objects reference to.
:param delete: If ``True``, then delete unreferenced files. | [
"Print",
"and",
"conditionally",
"delete",
"files",
"not",
"referenced",
"by",
"meta",
"data",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/utils/purge.py#L100-L161 | train | 45,332 |
genialis/resolwe | resolwe/flow/utils/purge.py | _storage_purge_all | def _storage_purge_all(delete=False, verbosity=0):
"""Purge unreferenced storages."""
orphaned_storages = Storage.objects.filter(data=None)
if verbosity >= 1:
if orphaned_storages.exists():
logger.info(__("Unreferenced storages ({}):", orphaned_storages.count()))
for storage_id in orphaned_storages.values_list('id', flat=True):
logger.info(__(" {}", storage_id))
else:
logger.info("No unreferenced storages")
if delete:
orphaned_storages.delete() | python | def _storage_purge_all(delete=False, verbosity=0):
"""Purge unreferenced storages."""
orphaned_storages = Storage.objects.filter(data=None)
if verbosity >= 1:
if orphaned_storages.exists():
logger.info(__("Unreferenced storages ({}):", orphaned_storages.count()))
for storage_id in orphaned_storages.values_list('id', flat=True):
logger.info(__(" {}", storage_id))
else:
logger.info("No unreferenced storages")
if delete:
orphaned_storages.delete() | [
"def",
"_storage_purge_all",
"(",
"delete",
"=",
"False",
",",
"verbosity",
"=",
"0",
")",
":",
"orphaned_storages",
"=",
"Storage",
".",
"objects",
".",
"filter",
"(",
"data",
"=",
"None",
")",
"if",
"verbosity",
">=",
"1",
":",
"if",
"orphaned_storages",... | Purge unreferenced storages. | [
"Purge",
"unreferenced",
"storages",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/utils/purge.py#L173-L186 | train | 45,333 |
genialis/resolwe | resolwe/process/parser.py | ProcessVisitor.visit_ClassDef | def visit_ClassDef(self, node): # pylint: disable=invalid-name
"""Visit top-level classes."""
# Resolve everything as root scope contains everything from the process module.
for base in node.bases:
# Cover `from resolwe.process import ...`.
if isinstance(base, ast.Name) and isinstance(base.ctx, ast.Load):
base = getattr(runtime, base.id, None)
# Cover `from resolwe import process`.
elif isinstance(base, ast.Attribute) and isinstance(base.ctx, ast.Load):
base = getattr(runtime, base.attr, None)
else:
continue
if issubclass(base, runtime.Process):
break
else:
return
descriptor = ProcessDescriptor(source=self.source)
# Available embedded classes.
embedded_class_fields = {
runtime.PROCESS_INPUTS_NAME: descriptor.inputs,
runtime.PROCESS_OUTPUTS_NAME: descriptor.outputs,
}
# Parse metadata in class body.
for item in node.body:
if isinstance(item, ast.Assign):
# Possible metadata.
if (len(item.targets) == 1 and isinstance(item.targets[0], ast.Name)
and isinstance(item.targets[0].ctx, ast.Store)
and item.targets[0].id in PROCESS_METADATA):
# Try to get the metadata value.
value = PROCESS_METADATA[item.targets[0].id].get_value(item.value)
setattr(descriptor.metadata, item.targets[0].id, value)
elif (isinstance(item, ast.Expr) and isinstance(item.value, ast.Str)
and descriptor.metadata.description is None):
# Possible description string.
descriptor.metadata.description = item.value.s
elif isinstance(item, ast.ClassDef) and item.name in embedded_class_fields.keys():
# Possible input/output declaration.
self.visit_field_class(item, descriptor, embedded_class_fields[item.name])
descriptor.validate()
self.processes.append(descriptor) | python | def visit_ClassDef(self, node): # pylint: disable=invalid-name
"""Visit top-level classes."""
# Resolve everything as root scope contains everything from the process module.
for base in node.bases:
# Cover `from resolwe.process import ...`.
if isinstance(base, ast.Name) and isinstance(base.ctx, ast.Load):
base = getattr(runtime, base.id, None)
# Cover `from resolwe import process`.
elif isinstance(base, ast.Attribute) and isinstance(base.ctx, ast.Load):
base = getattr(runtime, base.attr, None)
else:
continue
if issubclass(base, runtime.Process):
break
else:
return
descriptor = ProcessDescriptor(source=self.source)
# Available embedded classes.
embedded_class_fields = {
runtime.PROCESS_INPUTS_NAME: descriptor.inputs,
runtime.PROCESS_OUTPUTS_NAME: descriptor.outputs,
}
# Parse metadata in class body.
for item in node.body:
if isinstance(item, ast.Assign):
# Possible metadata.
if (len(item.targets) == 1 and isinstance(item.targets[0], ast.Name)
and isinstance(item.targets[0].ctx, ast.Store)
and item.targets[0].id in PROCESS_METADATA):
# Try to get the metadata value.
value = PROCESS_METADATA[item.targets[0].id].get_value(item.value)
setattr(descriptor.metadata, item.targets[0].id, value)
elif (isinstance(item, ast.Expr) and isinstance(item.value, ast.Str)
and descriptor.metadata.description is None):
# Possible description string.
descriptor.metadata.description = item.value.s
elif isinstance(item, ast.ClassDef) and item.name in embedded_class_fields.keys():
# Possible input/output declaration.
self.visit_field_class(item, descriptor, embedded_class_fields[item.name])
descriptor.validate()
self.processes.append(descriptor) | [
"def",
"visit_ClassDef",
"(",
"self",
",",
"node",
")",
":",
"# pylint: disable=invalid-name",
"# Resolve everything as root scope contains everything from the process module.",
"for",
"base",
"in",
"node",
".",
"bases",
":",
"# Cover `from resolwe.process import ...`.",
"if",
... | Visit top-level classes. | [
"Visit",
"top",
"-",
"level",
"classes",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/process/parser.py#L167-L212 | train | 45,334 |
genialis/resolwe | resolwe/process/parser.py | SafeParser.parse | def parse(self):
"""Parse process.
:return: A list of discovered process descriptors
"""
root = ast.parse(self._source)
visitor = ProcessVisitor(source=self._source)
visitor.visit(root)
return visitor.processes | python | def parse(self):
"""Parse process.
:return: A list of discovered process descriptors
"""
root = ast.parse(self._source)
visitor = ProcessVisitor(source=self._source)
visitor.visit(root)
return visitor.processes | [
"def",
"parse",
"(",
"self",
")",
":",
"root",
"=",
"ast",
".",
"parse",
"(",
"self",
".",
"_source",
")",
"visitor",
"=",
"ProcessVisitor",
"(",
"source",
"=",
"self",
".",
"_source",
")",
"visitor",
".",
"visit",
"(",
"root",
")",
"return",
"visito... | Parse process.
:return: A list of discovered process descriptors | [
"Parse",
"process",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/process/parser.py#L225-L234 | train | 45,335 |
genialis/resolwe | resolwe/permissions/loader.py | get_permissions_class | def get_permissions_class(permissions_name=None):
"""Load and cache permissions class.
If ``permissions_name`` is not given, it defaults to permissions
class set in Django ``FLOW_API['PERMISSIONS']`` setting.
"""
def load_permissions(permissions_name):
"""Look for a fully qualified flow permissions class."""
try:
return import_module('{}'.format(permissions_name)).ResolwePermissions
except AttributeError:
raise AttributeError("'ResolwePermissions' class not found in {} module.".format(
permissions_name))
except ImportError as ex:
# The permissions module wasn't found. Display a helpful error
# message listing all possible (built-in) permissions classes.
permissions_dir = os.path.join(os.path.dirname(upath(__file__)), '..', 'perms')
permissions_dir = os.path.normpath(permissions_dir)
try:
builtin_permissions = [
name for _, name, _ in pkgutil.iter_modules([permissions_dir]) if name not in ['tests']]
except EnvironmentError:
builtin_permissions = []
if permissions_name not in ['resolwe.auth.{}'.format(p) for p in builtin_permissions]:
permissions_reprs = map(repr, sorted(builtin_permissions))
err_msg = ("{} isn't an available flow permissions class.\n"
"Try using 'resolwe.auth.XXX', where XXX is one of:\n"
" {}\n"
"Error was: {}".format(permissions_name, ", ".join(permissions_reprs), ex))
raise ImproperlyConfigured(err_msg)
else:
# If there's some other error, this must be an error in Django
raise
if permissions_name is None:
permissions_name = settings.FLOW_API['PERMISSIONS']
if permissions_name not in permissions_classes:
permissions_classes[permissions_name] = load_permissions(permissions_name)
return permissions_classes[permissions_name] | python | def get_permissions_class(permissions_name=None):
"""Load and cache permissions class.
If ``permissions_name`` is not given, it defaults to permissions
class set in Django ``FLOW_API['PERMISSIONS']`` setting.
"""
def load_permissions(permissions_name):
"""Look for a fully qualified flow permissions class."""
try:
return import_module('{}'.format(permissions_name)).ResolwePermissions
except AttributeError:
raise AttributeError("'ResolwePermissions' class not found in {} module.".format(
permissions_name))
except ImportError as ex:
# The permissions module wasn't found. Display a helpful error
# message listing all possible (built-in) permissions classes.
permissions_dir = os.path.join(os.path.dirname(upath(__file__)), '..', 'perms')
permissions_dir = os.path.normpath(permissions_dir)
try:
builtin_permissions = [
name for _, name, _ in pkgutil.iter_modules([permissions_dir]) if name not in ['tests']]
except EnvironmentError:
builtin_permissions = []
if permissions_name not in ['resolwe.auth.{}'.format(p) for p in builtin_permissions]:
permissions_reprs = map(repr, sorted(builtin_permissions))
err_msg = ("{} isn't an available flow permissions class.\n"
"Try using 'resolwe.auth.XXX', where XXX is one of:\n"
" {}\n"
"Error was: {}".format(permissions_name, ", ".join(permissions_reprs), ex))
raise ImproperlyConfigured(err_msg)
else:
# If there's some other error, this must be an error in Django
raise
if permissions_name is None:
permissions_name = settings.FLOW_API['PERMISSIONS']
if permissions_name not in permissions_classes:
permissions_classes[permissions_name] = load_permissions(permissions_name)
return permissions_classes[permissions_name] | [
"def",
"get_permissions_class",
"(",
"permissions_name",
"=",
"None",
")",
":",
"def",
"load_permissions",
"(",
"permissions_name",
")",
":",
"\"\"\"Look for a fully qualified flow permissions class.\"\"\"",
"try",
":",
"return",
"import_module",
"(",
"'{}'",
".",
"format... | Load and cache permissions class.
If ``permissions_name`` is not given, it defaults to permissions
class set in Django ``FLOW_API['PERMISSIONS']`` setting. | [
"Load",
"and",
"cache",
"permissions",
"class",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/permissions/loader.py#L21-L62 | train | 45,336 |
DataONEorg/d1_python | lib_common/src/d1_common/revision.py | get_identifiers | def get_identifiers(sysmeta_pyxb):
"""Get set of identifiers that provide revision context for SciObj.
Returns: tuple: PID, SID, OBSOLETES_PID, OBSOLETED_BY_PID
"""
pid = d1_common.xml.get_opt_val(sysmeta_pyxb, 'identifier')
sid = d1_common.xml.get_opt_val(sysmeta_pyxb, 'seriesId')
obsoletes_pid = d1_common.xml.get_opt_val(sysmeta_pyxb, 'obsoletes')
obsoleted_by_pid = d1_common.xml.get_opt_val(sysmeta_pyxb, 'obsoletedBy')
return pid, sid, obsoletes_pid, obsoleted_by_pid | python | def get_identifiers(sysmeta_pyxb):
"""Get set of identifiers that provide revision context for SciObj.
Returns: tuple: PID, SID, OBSOLETES_PID, OBSOLETED_BY_PID
"""
pid = d1_common.xml.get_opt_val(sysmeta_pyxb, 'identifier')
sid = d1_common.xml.get_opt_val(sysmeta_pyxb, 'seriesId')
obsoletes_pid = d1_common.xml.get_opt_val(sysmeta_pyxb, 'obsoletes')
obsoleted_by_pid = d1_common.xml.get_opt_val(sysmeta_pyxb, 'obsoletedBy')
return pid, sid, obsoletes_pid, obsoleted_by_pid | [
"def",
"get_identifiers",
"(",
"sysmeta_pyxb",
")",
":",
"pid",
"=",
"d1_common",
".",
"xml",
".",
"get_opt_val",
"(",
"sysmeta_pyxb",
",",
"'identifier'",
")",
"sid",
"=",
"d1_common",
".",
"xml",
".",
"get_opt_val",
"(",
"sysmeta_pyxb",
",",
"'seriesId'",
... | Get set of identifiers that provide revision context for SciObj.
Returns: tuple: PID, SID, OBSOLETES_PID, OBSOLETED_BY_PID | [
"Get",
"set",
"of",
"identifiers",
"that",
"provide",
"revision",
"context",
"for",
"SciObj",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/revision.py#L23-L33 | train | 45,337 |
DataONEorg/d1_python | lib_common/src/d1_common/revision.py | topological_sort | def topological_sort(unsorted_dict):
"""Sort objects by dependency.
Sort a dict of obsoleting PID to obsoleted PID to a list of PIDs in order of
obsolescence.
Args:
unsorted_dict : dict
Dict that holds obsolescence information. Each ``key/value`` pair establishes
that the PID in ``key`` identifies an object that obsoletes an object identifies
by the PID in ``value``.
Returns:
tuple of sorted_list, unconnected_dict :
``sorted_list``: A list of PIDs ordered so that all PIDs that obsolete an object
are listed after the object they obsolete.
``unconnected_dict``: A dict of PID to obsoleted PID of any objects that could not
be added to a revision chain. These items will have obsoletes PIDs that directly
or indirectly reference a PID that could not be sorted.
Notes:
``obsoletes_dict`` is modified by the sort and on return holds any items that
could not be sorted.
The sort works by repeatedly iterating over an unsorted list of PIDs and
moving PIDs to the sorted list as they become available. A PID is available to
be moved to the sorted list if it does not obsolete a PID or if the PID it
obsoletes is already in the sorted list.
"""
sorted_list = []
sorted_set = set()
found = True
unconnected_dict = unsorted_dict.copy()
while found:
found = False
for pid, obsoletes_pid in list(unconnected_dict.items()):
if obsoletes_pid is None or obsoletes_pid in sorted_set:
found = True
sorted_list.append(pid)
sorted_set.add(pid)
del unconnected_dict[pid]
return sorted_list, unconnected_dict | python | def topological_sort(unsorted_dict):
"""Sort objects by dependency.
Sort a dict of obsoleting PID to obsoleted PID to a list of PIDs in order of
obsolescence.
Args:
unsorted_dict : dict
Dict that holds obsolescence information. Each ``key/value`` pair establishes
that the PID in ``key`` identifies an object that obsoletes an object identifies
by the PID in ``value``.
Returns:
tuple of sorted_list, unconnected_dict :
``sorted_list``: A list of PIDs ordered so that all PIDs that obsolete an object
are listed after the object they obsolete.
``unconnected_dict``: A dict of PID to obsoleted PID of any objects that could not
be added to a revision chain. These items will have obsoletes PIDs that directly
or indirectly reference a PID that could not be sorted.
Notes:
``obsoletes_dict`` is modified by the sort and on return holds any items that
could not be sorted.
The sort works by repeatedly iterating over an unsorted list of PIDs and
moving PIDs to the sorted list as they become available. A PID is available to
be moved to the sorted list if it does not obsolete a PID or if the PID it
obsoletes is already in the sorted list.
"""
sorted_list = []
sorted_set = set()
found = True
unconnected_dict = unsorted_dict.copy()
while found:
found = False
for pid, obsoletes_pid in list(unconnected_dict.items()):
if obsoletes_pid is None or obsoletes_pid in sorted_set:
found = True
sorted_list.append(pid)
sorted_set.add(pid)
del unconnected_dict[pid]
return sorted_list, unconnected_dict | [
"def",
"topological_sort",
"(",
"unsorted_dict",
")",
":",
"sorted_list",
"=",
"[",
"]",
"sorted_set",
"=",
"set",
"(",
")",
"found",
"=",
"True",
"unconnected_dict",
"=",
"unsorted_dict",
".",
"copy",
"(",
")",
"while",
"found",
":",
"found",
"=",
"False"... | Sort objects by dependency.
Sort a dict of obsoleting PID to obsoleted PID to a list of PIDs in order of
obsolescence.
Args:
unsorted_dict : dict
Dict that holds obsolescence information. Each ``key/value`` pair establishes
that the PID in ``key`` identifies an object that obsoletes an object identifies
by the PID in ``value``.
Returns:
tuple of sorted_list, unconnected_dict :
``sorted_list``: A list of PIDs ordered so that all PIDs that obsolete an object
are listed after the object they obsolete.
``unconnected_dict``: A dict of PID to obsoleted PID of any objects that could not
be added to a revision chain. These items will have obsoletes PIDs that directly
or indirectly reference a PID that could not be sorted.
Notes:
``obsoletes_dict`` is modified by the sort and on return holds any items that
could not be sorted.
The sort works by repeatedly iterating over an unsorted list of PIDs and
moving PIDs to the sorted list as they become available. A PID is available to
be moved to the sorted list if it does not obsolete a PID or if the PID it
obsoletes is already in the sorted list. | [
"Sort",
"objects",
"by",
"dependency",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/revision.py#L36-L81 | train | 45,338 |
DataONEorg/d1_python | dev_tools/src/d1_dev/src-format-docstrings.py | wrap | def wrap(indent_int, unwrap_str):
"""Wrap a single line to one or more lines that start at indent_int and end at the
last word that will fit before WRAP_MARGIN_INT.
If there are no word breaks (spaces) before WRAP_MARGIN_INT, force a break at
WRAP_MARGIN_INT.
"""
with io.StringIO() as str_buf:
is_rest_block = unwrap_str.startswith(("- ", "* "))
while unwrap_str:
cut_pos = (unwrap_str + " ").rfind(" ", 0, WRAP_MARGIN_INT - indent_int)
if cut_pos == -1:
cut_pos = WRAP_MARGIN_INT
this_str, unwrap_str = unwrap_str[:cut_pos], unwrap_str[cut_pos + 1 :]
str_buf.write("{}{}\n".format(" " * indent_int, this_str))
if is_rest_block:
is_rest_block = False
indent_int += 2
return str_buf.getvalue() | python | def wrap(indent_int, unwrap_str):
"""Wrap a single line to one or more lines that start at indent_int and end at the
last word that will fit before WRAP_MARGIN_INT.
If there are no word breaks (spaces) before WRAP_MARGIN_INT, force a break at
WRAP_MARGIN_INT.
"""
with io.StringIO() as str_buf:
is_rest_block = unwrap_str.startswith(("- ", "* "))
while unwrap_str:
cut_pos = (unwrap_str + " ").rfind(" ", 0, WRAP_MARGIN_INT - indent_int)
if cut_pos == -1:
cut_pos = WRAP_MARGIN_INT
this_str, unwrap_str = unwrap_str[:cut_pos], unwrap_str[cut_pos + 1 :]
str_buf.write("{}{}\n".format(" " * indent_int, this_str))
if is_rest_block:
is_rest_block = False
indent_int += 2
return str_buf.getvalue() | [
"def",
"wrap",
"(",
"indent_int",
",",
"unwrap_str",
")",
":",
"with",
"io",
".",
"StringIO",
"(",
")",
"as",
"str_buf",
":",
"is_rest_block",
"=",
"unwrap_str",
".",
"startswith",
"(",
"(",
"\"- \"",
",",
"\"* \"",
")",
")",
"while",
"unwrap_str",
":",
... | Wrap a single line to one or more lines that start at indent_int and end at the
last word that will fit before WRAP_MARGIN_INT.
If there are no word breaks (spaces) before WRAP_MARGIN_INT, force a break at
WRAP_MARGIN_INT. | [
"Wrap",
"a",
"single",
"line",
"to",
"one",
"or",
"more",
"lines",
"that",
"start",
"at",
"indent_int",
"and",
"end",
"at",
"the",
"last",
"word",
"that",
"will",
"fit",
"before",
"WRAP_MARGIN_INT",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/dev_tools/src/d1_dev/src-format-docstrings.py#L273-L297 | train | 45,339 |
DataONEorg/d1_python | lib_common/src/d1_common/resource_map.py | createSimpleResourceMap | def createSimpleResourceMap(ore_pid, scimeta_pid, sciobj_pid_list):
"""Create a simple OAI-ORE Resource Map with one Science Metadata document and any
number of Science Data objects.
This creates a document that establishes an association between a Science Metadata
object and any number of Science Data objects. The Science Metadata object contains
information that is indexed by DataONE, allowing both the Science Metadata and the
Science Data objects to be discoverable in DataONE Search. In search results, the
objects will appear together and can be downloaded as a single package.
Args:
ore_pid: str
Persistent Identifier (PID) to use for the new Resource Map
scimeta_pid: str
PID for an object that will be listed as the Science Metadata that is
describing the Science Data objects.
sciobj_pid_list: list of str
List of PIDs that will be listed as the Science Data objects that are being
described by the Science Metadata.
Returns:
ResourceMap : OAI-ORE Resource Map
"""
ore = ResourceMap()
ore.initialize(ore_pid)
ore.addMetadataDocument(scimeta_pid)
ore.addDataDocuments(sciobj_pid_list, scimeta_pid)
return ore | python | def createSimpleResourceMap(ore_pid, scimeta_pid, sciobj_pid_list):
"""Create a simple OAI-ORE Resource Map with one Science Metadata document and any
number of Science Data objects.
This creates a document that establishes an association between a Science Metadata
object and any number of Science Data objects. The Science Metadata object contains
information that is indexed by DataONE, allowing both the Science Metadata and the
Science Data objects to be discoverable in DataONE Search. In search results, the
objects will appear together and can be downloaded as a single package.
Args:
ore_pid: str
Persistent Identifier (PID) to use for the new Resource Map
scimeta_pid: str
PID for an object that will be listed as the Science Metadata that is
describing the Science Data objects.
sciobj_pid_list: list of str
List of PIDs that will be listed as the Science Data objects that are being
described by the Science Metadata.
Returns:
ResourceMap : OAI-ORE Resource Map
"""
ore = ResourceMap()
ore.initialize(ore_pid)
ore.addMetadataDocument(scimeta_pid)
ore.addDataDocuments(sciobj_pid_list, scimeta_pid)
return ore | [
"def",
"createSimpleResourceMap",
"(",
"ore_pid",
",",
"scimeta_pid",
",",
"sciobj_pid_list",
")",
":",
"ore",
"=",
"ResourceMap",
"(",
")",
"ore",
".",
"initialize",
"(",
"ore_pid",
")",
"ore",
".",
"addMetadataDocument",
"(",
"scimeta_pid",
")",
"ore",
".",
... | Create a simple OAI-ORE Resource Map with one Science Metadata document and any
number of Science Data objects.
This creates a document that establishes an association between a Science Metadata
object and any number of Science Data objects. The Science Metadata object contains
information that is indexed by DataONE, allowing both the Science Metadata and the
Science Data objects to be discoverable in DataONE Search. In search results, the
objects will appear together and can be downloaded as a single package.
Args:
ore_pid: str
Persistent Identifier (PID) to use for the new Resource Map
scimeta_pid: str
PID for an object that will be listed as the Science Metadata that is
describing the Science Data objects.
sciobj_pid_list: list of str
List of PIDs that will be listed as the Science Data objects that are being
described by the Science Metadata.
Returns:
ResourceMap : OAI-ORE Resource Map | [
"Create",
"a",
"simple",
"OAI",
"-",
"ORE",
"Resource",
"Map",
"with",
"one",
"Science",
"Metadata",
"document",
"and",
"any",
"number",
"of",
"Science",
"Data",
"objects",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/resource_map.py#L66-L96 | train | 45,340 |
DataONEorg/d1_python | lib_common/src/d1_common/resource_map.py | createResourceMapFromStream | def createResourceMapFromStream(in_stream, base_url=d1_common.const.URL_DATAONE_ROOT):
"""Create a simple OAI-ORE Resource Map with one Science Metadata document and any
number of Science Data objects, using a stream of PIDs.
Args:
in_stream:
The first non-blank line is the PID of the resource map itself. Second line is
the science metadata PID and remaining lines are science data PIDs.
Example stream contents:
::
PID_ORE_value
sci_meta_pid_value
data_pid_1
data_pid_2
data_pid_3
base_url : str
Root of the DataONE environment in which the Resource Map will be used.
Returns:
ResourceMap : OAI-ORE Resource Map
"""
pids = []
for line in in_stream:
pid = line.strip()
if pid == "#" or pid.startswith("# "):
continue
if len(pids) < 2:
raise ValueError("Insufficient numbers of identifiers provided.")
logging.info("Read {} identifiers".format(len(pids)))
ore = ResourceMap(base_url=base_url)
logging.info("ORE PID = {}".format(pids[0]))
ore.initialize(pids[0])
logging.info("Metadata PID = {}".format(pids[1]))
ore.addMetadataDocument(pids[1])
ore.addDataDocuments(pids[2:], pids[1])
return ore | python | def createResourceMapFromStream(in_stream, base_url=d1_common.const.URL_DATAONE_ROOT):
"""Create a simple OAI-ORE Resource Map with one Science Metadata document and any
number of Science Data objects, using a stream of PIDs.
Args:
in_stream:
The first non-blank line is the PID of the resource map itself. Second line is
the science metadata PID and remaining lines are science data PIDs.
Example stream contents:
::
PID_ORE_value
sci_meta_pid_value
data_pid_1
data_pid_2
data_pid_3
base_url : str
Root of the DataONE environment in which the Resource Map will be used.
Returns:
ResourceMap : OAI-ORE Resource Map
"""
pids = []
for line in in_stream:
pid = line.strip()
if pid == "#" or pid.startswith("# "):
continue
if len(pids) < 2:
raise ValueError("Insufficient numbers of identifiers provided.")
logging.info("Read {} identifiers".format(len(pids)))
ore = ResourceMap(base_url=base_url)
logging.info("ORE PID = {}".format(pids[0]))
ore.initialize(pids[0])
logging.info("Metadata PID = {}".format(pids[1]))
ore.addMetadataDocument(pids[1])
ore.addDataDocuments(pids[2:], pids[1])
return ore | [
"def",
"createResourceMapFromStream",
"(",
"in_stream",
",",
"base_url",
"=",
"d1_common",
".",
"const",
".",
"URL_DATAONE_ROOT",
")",
":",
"pids",
"=",
"[",
"]",
"for",
"line",
"in",
"in_stream",
":",
"pid",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"p... | Create a simple OAI-ORE Resource Map with one Science Metadata document and any
number of Science Data objects, using a stream of PIDs.
Args:
in_stream:
The first non-blank line is the PID of the resource map itself. Second line is
the science metadata PID and remaining lines are science data PIDs.
Example stream contents:
::
PID_ORE_value
sci_meta_pid_value
data_pid_1
data_pid_2
data_pid_3
base_url : str
Root of the DataONE environment in which the Resource Map will be used.
Returns:
ResourceMap : OAI-ORE Resource Map | [
"Create",
"a",
"simple",
"OAI",
"-",
"ORE",
"Resource",
"Map",
"with",
"one",
"Science",
"Metadata",
"document",
"and",
"any",
"number",
"of",
"Science",
"Data",
"objects",
"using",
"a",
"stream",
"of",
"PIDs",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/resource_map.py#L99-L142 | train | 45,341 |
DataONEorg/d1_python | lib_common/src/d1_common/resource_map.py | ResourceMap.initialize | def initialize(self, pid, ore_software_id=d1_common.const.ORE_SOFTWARE_ID):
"""Create the basic ORE document structure."""
# Set nice prefixes for the namespaces
for k in list(d1_common.const.ORE_NAMESPACE_DICT.keys()):
self.bind(k, d1_common.const.ORE_NAMESPACE_DICT[k])
# Create the ORE entity
oid = self._pid_to_id(pid)
ore = rdflib.URIRef(oid)
self.add((ore, rdflib.RDF.type, ORE.ResourceMap))
self.add((ore, DCTERMS.identifier, rdflib.term.Literal(pid)))
self.add((ore, DCTERMS.creator, rdflib.term.Literal(ore_software_id)))
# Add an empty aggregation
ag = rdflib.URIRef(oid + "#aggregation")
self.add((ore, ORE.describes, ag))
self.add((ag, rdflib.RDF.type, ORE.Aggregation))
self.add((ORE.Aggregation, rdflib.RDFS.isDefinedBy, ORE.term("")))
self.add(
(ORE.Aggregation, rdflib.RDFS.label, rdflib.term.Literal("Aggregation"))
)
self._ore_initialized = True | python | def initialize(self, pid, ore_software_id=d1_common.const.ORE_SOFTWARE_ID):
"""Create the basic ORE document structure."""
# Set nice prefixes for the namespaces
for k in list(d1_common.const.ORE_NAMESPACE_DICT.keys()):
self.bind(k, d1_common.const.ORE_NAMESPACE_DICT[k])
# Create the ORE entity
oid = self._pid_to_id(pid)
ore = rdflib.URIRef(oid)
self.add((ore, rdflib.RDF.type, ORE.ResourceMap))
self.add((ore, DCTERMS.identifier, rdflib.term.Literal(pid)))
self.add((ore, DCTERMS.creator, rdflib.term.Literal(ore_software_id)))
# Add an empty aggregation
ag = rdflib.URIRef(oid + "#aggregation")
self.add((ore, ORE.describes, ag))
self.add((ag, rdflib.RDF.type, ORE.Aggregation))
self.add((ORE.Aggregation, rdflib.RDFS.isDefinedBy, ORE.term("")))
self.add(
(ORE.Aggregation, rdflib.RDFS.label, rdflib.term.Literal("Aggregation"))
)
self._ore_initialized = True | [
"def",
"initialize",
"(",
"self",
",",
"pid",
",",
"ore_software_id",
"=",
"d1_common",
".",
"const",
".",
"ORE_SOFTWARE_ID",
")",
":",
"# Set nice prefixes for the namespaces",
"for",
"k",
"in",
"list",
"(",
"d1_common",
".",
"const",
".",
"ORE_NAMESPACE_DICT",
... | Create the basic ORE document structure. | [
"Create",
"the",
"basic",
"ORE",
"document",
"structure",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/resource_map.py#L204-L223 | train | 45,342 |
DataONEorg/d1_python | lib_common/src/d1_common/resource_map.py | ResourceMap.serialize_to_transport | def serialize_to_transport(self, doc_format="xml", *args, **kwargs):
"""Serialize ResourceMap to UTF-8 encoded XML document.
Args:
doc_format: str
One of: ``xml``, ``n3``, ``turtle``, ``nt``, ``pretty-xml``, ``trix``,
``trig`` and ``nquads``.
args and kwargs:
Optional arguments forwarded to rdflib.ConjunctiveGraph.serialize().
Returns:
bytes: UTF-8 encoded XML doc.
Note:
Only the default, "xml", is automatically indexed by DataONE.
"""
return super(ResourceMap, self).serialize(
format=doc_format, encoding="utf-8", *args, **kwargs
) | python | def serialize_to_transport(self, doc_format="xml", *args, **kwargs):
"""Serialize ResourceMap to UTF-8 encoded XML document.
Args:
doc_format: str
One of: ``xml``, ``n3``, ``turtle``, ``nt``, ``pretty-xml``, ``trix``,
``trig`` and ``nquads``.
args and kwargs:
Optional arguments forwarded to rdflib.ConjunctiveGraph.serialize().
Returns:
bytes: UTF-8 encoded XML doc.
Note:
Only the default, "xml", is automatically indexed by DataONE.
"""
return super(ResourceMap, self).serialize(
format=doc_format, encoding="utf-8", *args, **kwargs
) | [
"def",
"serialize_to_transport",
"(",
"self",
",",
"doc_format",
"=",
"\"xml\"",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"super",
"(",
"ResourceMap",
",",
"self",
")",
".",
"serialize",
"(",
"format",
"=",
"doc_format",
",",
"encod... | Serialize ResourceMap to UTF-8 encoded XML document.
Args:
doc_format: str
One of: ``xml``, ``n3``, ``turtle``, ``nt``, ``pretty-xml``, ``trix``,
``trig`` and ``nquads``.
args and kwargs:
Optional arguments forwarded to rdflib.ConjunctiveGraph.serialize().
Returns:
bytes: UTF-8 encoded XML doc.
Note:
Only the default, "xml", is automatically indexed by DataONE. | [
"Serialize",
"ResourceMap",
"to",
"UTF",
"-",
"8",
"encoded",
"XML",
"document",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/resource_map.py#L225-L245 | train | 45,343 |
DataONEorg/d1_python | lib_common/src/d1_common/resource_map.py | ResourceMap.serialize_to_display | def serialize_to_display(self, doc_format="pretty-xml", *args, **kwargs):
"""Serialize ResourceMap to an XML doc that is pretty printed for display.
Args:
doc_format: str
One of: ``xml``, ``n3``, ``turtle``, ``nt``, ``pretty-xml``, ``trix``,
``trig`` and ``nquads``.
args and kwargs:
Optional arguments forwarded to rdflib.ConjunctiveGraph.serialize().
Returns:
str: Pretty printed Resource Map XML doc
Note:
Only the default, "xml", is automatically indexed by DataONE.
"""
return (
super(ResourceMap, self)
.serialize(format=doc_format, encoding=None, *args, **kwargs)
.decode("utf-8")
) | python | def serialize_to_display(self, doc_format="pretty-xml", *args, **kwargs):
"""Serialize ResourceMap to an XML doc that is pretty printed for display.
Args:
doc_format: str
One of: ``xml``, ``n3``, ``turtle``, ``nt``, ``pretty-xml``, ``trix``,
``trig`` and ``nquads``.
args and kwargs:
Optional arguments forwarded to rdflib.ConjunctiveGraph.serialize().
Returns:
str: Pretty printed Resource Map XML doc
Note:
Only the default, "xml", is automatically indexed by DataONE.
"""
return (
super(ResourceMap, self)
.serialize(format=doc_format, encoding=None, *args, **kwargs)
.decode("utf-8")
) | [
"def",
"serialize_to_display",
"(",
"self",
",",
"doc_format",
"=",
"\"pretty-xml\"",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"(",
"super",
"(",
"ResourceMap",
",",
"self",
")",
".",
"serialize",
"(",
"format",
"=",
"doc_format",
"... | Serialize ResourceMap to an XML doc that is pretty printed for display.
Args:
doc_format: str
One of: ``xml``, ``n3``, ``turtle``, ``nt``, ``pretty-xml``, ``trix``,
``trig`` and ``nquads``.
args and kwargs:
Optional arguments forwarded to rdflib.ConjunctiveGraph.serialize().
Returns:
str: Pretty printed Resource Map XML doc
Note:
Only the default, "xml", is automatically indexed by DataONE. | [
"Serialize",
"ResourceMap",
"to",
"an",
"XML",
"doc",
"that",
"is",
"pretty",
"printed",
"for",
"display",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/resource_map.py#L247-L269 | train | 45,344 |
DataONEorg/d1_python | lib_common/src/d1_common/resource_map.py | ResourceMap.deserialize | def deserialize(self, *args, **kwargs):
"""Deserialize Resource Map XML doc.
The source is specified using one of source, location, file or data.
Args:
source: InputSource, file-like object, or string
In the case of a string the string is the location of the source.
location: str
String indicating the relative or absolute URL of the source. Graph``s
absolutize method is used if a relative location is specified.
file: file-like object
data: str
The document to be parsed.
format : str
Used if format can not be determined from source. Defaults to ``rdf/xml``.
Format support can be extended with plugins.
Built-in: ``xml``, ``n3``, ``nt``, ``trix``, ``rdfa``
publicID: str
Logical URI to use as the document base. If None specified the document
location is used (at least in the case where there is a document location).
Raises:
xml.sax.SAXException based exception: On parse error.
"""
self.parse(*args, **kwargs)
self._ore_initialized = True | python | def deserialize(self, *args, **kwargs):
"""Deserialize Resource Map XML doc.
The source is specified using one of source, location, file or data.
Args:
source: InputSource, file-like object, or string
In the case of a string the string is the location of the source.
location: str
String indicating the relative or absolute URL of the source. Graph``s
absolutize method is used if a relative location is specified.
file: file-like object
data: str
The document to be parsed.
format : str
Used if format can not be determined from source. Defaults to ``rdf/xml``.
Format support can be extended with plugins.
Built-in: ``xml``, ``n3``, ``nt``, ``trix``, ``rdfa``
publicID: str
Logical URI to use as the document base. If None specified the document
location is used (at least in the case where there is a document location).
Raises:
xml.sax.SAXException based exception: On parse error.
"""
self.parse(*args, **kwargs)
self._ore_initialized = True | [
"def",
"deserialize",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"parse",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_ore_initialized",
"=",
"True"
] | Deserialize Resource Map XML doc.
The source is specified using one of source, location, file or data.
Args:
source: InputSource, file-like object, or string
In the case of a string the string is the location of the source.
location: str
String indicating the relative or absolute URL of the source. Graph``s
absolutize method is used if a relative location is specified.
file: file-like object
data: str
The document to be parsed.
format : str
Used if format can not be determined from source. Defaults to ``rdf/xml``.
Format support can be extended with plugins.
Built-in: ``xml``, ``n3``, ``nt``, ``trix``, ``rdfa``
publicID: str
Logical URI to use as the document base. If None specified the document
location is used (at least in the case where there is a document location).
Raises:
xml.sax.SAXException based exception: On parse error. | [
"Deserialize",
"Resource",
"Map",
"XML",
"doc",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/resource_map.py#L272-L305 | train | 45,345 |
DataONEorg/d1_python | lib_common/src/d1_common/resource_map.py | ResourceMap.addResource | def addResource(self, pid):
"""Add a resource to the Resource Map.
Args:
pid : str
"""
self._check_initialized()
try:
# is entry already in place?
self.getObjectByPid(pid)
return
except IndexError:
pass
# Entry not present, add it to the graph
oid = self._pid_to_id(pid)
obj = rdflib.URIRef(oid)
ag = self.getAggregation()
self.add((ag, ORE.aggregates, obj))
self.add((obj, ORE.isAggregatedBy, ag))
self.add((obj, DCTERMS.identifier, rdflib.term.Literal(pid))) | python | def addResource(self, pid):
"""Add a resource to the Resource Map.
Args:
pid : str
"""
self._check_initialized()
try:
# is entry already in place?
self.getObjectByPid(pid)
return
except IndexError:
pass
# Entry not present, add it to the graph
oid = self._pid_to_id(pid)
obj = rdflib.URIRef(oid)
ag = self.getAggregation()
self.add((ag, ORE.aggregates, obj))
self.add((obj, ORE.isAggregatedBy, ag))
self.add((obj, DCTERMS.identifier, rdflib.term.Literal(pid))) | [
"def",
"addResource",
"(",
"self",
",",
"pid",
")",
":",
"self",
".",
"_check_initialized",
"(",
")",
"try",
":",
"# is entry already in place?",
"self",
".",
"getObjectByPid",
"(",
"pid",
")",
"return",
"except",
"IndexError",
":",
"pass",
"# Entry not present,... | Add a resource to the Resource Map.
Args:
pid : str | [
"Add",
"a",
"resource",
"to",
"the",
"Resource",
"Map",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/resource_map.py#L330-L350 | train | 45,346 |
DataONEorg/d1_python | lib_common/src/d1_common/resource_map.py | ResourceMap.setDocuments | def setDocuments(self, documenting_pid, documented_pid):
"""Add a CiTO, the Citation Typing Ontology, triple asserting that
``documenting_pid`` documents ``documented_pid``.
Adds assertion: ``documenting_pid cito:documents documented_pid``
Args:
documenting_pid: str
PID of a Science Object that documents ``documented_pid``.
documented_pid: str
PID of a Science Object that is documented by ``documenting_pid``.
"""
self._check_initialized()
documenting_id = self.getObjectByPid(documenting_pid)
documented_id = self.getObjectByPid(documented_pid)
self.add((documenting_id, CITO.documents, documented_id)) | python | def setDocuments(self, documenting_pid, documented_pid):
"""Add a CiTO, the Citation Typing Ontology, triple asserting that
``documenting_pid`` documents ``documented_pid``.
Adds assertion: ``documenting_pid cito:documents documented_pid``
Args:
documenting_pid: str
PID of a Science Object that documents ``documented_pid``.
documented_pid: str
PID of a Science Object that is documented by ``documenting_pid``.
"""
self._check_initialized()
documenting_id = self.getObjectByPid(documenting_pid)
documented_id = self.getObjectByPid(documented_pid)
self.add((documenting_id, CITO.documents, documented_id)) | [
"def",
"setDocuments",
"(",
"self",
",",
"documenting_pid",
",",
"documented_pid",
")",
":",
"self",
".",
"_check_initialized",
"(",
")",
"documenting_id",
"=",
"self",
".",
"getObjectByPid",
"(",
"documenting_pid",
")",
"documented_id",
"=",
"self",
".",
"getOb... | Add a CiTO, the Citation Typing Ontology, triple asserting that
``documenting_pid`` documents ``documented_pid``.
Adds assertion: ``documenting_pid cito:documents documented_pid``
Args:
documenting_pid: str
PID of a Science Object that documents ``documented_pid``.
documented_pid: str
PID of a Science Object that is documented by ``documenting_pid``. | [
"Add",
"a",
"CiTO",
"the",
"Citation",
"Typing",
"Ontology",
"triple",
"asserting",
"that",
"documenting_pid",
"documents",
"documented_pid",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/resource_map.py#L352-L369 | train | 45,347 |
DataONEorg/d1_python | lib_common/src/d1_common/resource_map.py | ResourceMap.setDocumentedBy | def setDocumentedBy(self, documented_pid, documenting_pid):
"""Add a CiTO, the Citation Typing Ontology, triple asserting that
``documented_pid`` isDocumentedBy ``documenting_pid``.
Adds assertion: ``documented_pid cito:isDocumentedBy documenting_pid``
Args:
documented_pid: str
PID of a Science Object that is documented by ``documenting_pid``.
documenting_pid: str
PID of a Science Object that documents ``documented_pid``.
"""
self._check_initialized()
documented_id = self.getObjectByPid(documented_pid)
documenting_id = self.getObjectByPid(documenting_pid)
self.add((documented_id, CITO.isDocumentedBy, documenting_id)) | python | def setDocumentedBy(self, documented_pid, documenting_pid):
"""Add a CiTO, the Citation Typing Ontology, triple asserting that
``documented_pid`` isDocumentedBy ``documenting_pid``.
Adds assertion: ``documented_pid cito:isDocumentedBy documenting_pid``
Args:
documented_pid: str
PID of a Science Object that is documented by ``documenting_pid``.
documenting_pid: str
PID of a Science Object that documents ``documented_pid``.
"""
self._check_initialized()
documented_id = self.getObjectByPid(documented_pid)
documenting_id = self.getObjectByPid(documenting_pid)
self.add((documented_id, CITO.isDocumentedBy, documenting_id)) | [
"def",
"setDocumentedBy",
"(",
"self",
",",
"documented_pid",
",",
"documenting_pid",
")",
":",
"self",
".",
"_check_initialized",
"(",
")",
"documented_id",
"=",
"self",
".",
"getObjectByPid",
"(",
"documented_pid",
")",
"documenting_id",
"=",
"self",
".",
"get... | Add a CiTO, the Citation Typing Ontology, triple asserting that
``documented_pid`` isDocumentedBy ``documenting_pid``.
Adds assertion: ``documented_pid cito:isDocumentedBy documenting_pid``
Args:
documented_pid: str
PID of a Science Object that is documented by ``documenting_pid``.
documenting_pid: str
PID of a Science Object that documents ``documented_pid``. | [
"Add",
"a",
"CiTO",
"the",
"Citation",
"Typing",
"Ontology",
"triple",
"asserting",
"that",
"documented_pid",
"isDocumentedBy",
"documenting_pid",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/resource_map.py#L371-L388 | train | 45,348 |
DataONEorg/d1_python | lib_common/src/d1_common/resource_map.py | ResourceMap.parseDoc | def parseDoc(self, doc_str, format="xml"):
"""Parse a OAI-ORE Resource Maps document.
See Also: ``rdflib.ConjunctiveGraph.parse`` for documentation on arguments.
"""
self.parse(data=doc_str, format=format)
self._ore_initialized = True
return self | python | def parseDoc(self, doc_str, format="xml"):
"""Parse a OAI-ORE Resource Maps document.
See Also: ``rdflib.ConjunctiveGraph.parse`` for documentation on arguments.
"""
self.parse(data=doc_str, format=format)
self._ore_initialized = True
return self | [
"def",
"parseDoc",
"(",
"self",
",",
"doc_str",
",",
"format",
"=",
"\"xml\"",
")",
":",
"self",
".",
"parse",
"(",
"data",
"=",
"doc_str",
",",
"format",
"=",
"format",
")",
"self",
".",
"_ore_initialized",
"=",
"True",
"return",
"self"
] | Parse a OAI-ORE Resource Maps document.
See Also: ``rdflib.ConjunctiveGraph.parse`` for documentation on arguments. | [
"Parse",
"a",
"OAI",
"-",
"ORE",
"Resource",
"Maps",
"document",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/resource_map.py#L606-L614 | train | 45,349 |
DataONEorg/d1_python | lib_common/src/d1_common/resource_map.py | ResourceMap._pid_to_id | def _pid_to_id(self, pid):
"""Converts a pid to a URI that can be used as an OAI-ORE identifier."""
return d1_common.url.joinPathElements(
self._base_url,
self._version_tag,
"resolve",
d1_common.url.encodePathElement(pid),
) | python | def _pid_to_id(self, pid):
"""Converts a pid to a URI that can be used as an OAI-ORE identifier."""
return d1_common.url.joinPathElements(
self._base_url,
self._version_tag,
"resolve",
d1_common.url.encodePathElement(pid),
) | [
"def",
"_pid_to_id",
"(",
"self",
",",
"pid",
")",
":",
"return",
"d1_common",
".",
"url",
".",
"joinPathElements",
"(",
"self",
".",
"_base_url",
",",
"self",
".",
"_version_tag",
",",
"\"resolve\"",
",",
"d1_common",
".",
"url",
".",
"encodePathElement",
... | Converts a pid to a URI that can be used as an OAI-ORE identifier. | [
"Converts",
"a",
"pid",
"to",
"a",
"URI",
"that",
"can",
"be",
"used",
"as",
"an",
"OAI",
"-",
"ORE",
"identifier",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/resource_map.py#L618-L625 | train | 45,350 |
DataONEorg/d1_python | utilities/src/d1_util/check_object_checksums.py | make_checksum_validation_script | def make_checksum_validation_script(stats_list):
"""Make batch files required for checking checksums from another machine."""
if not os.path.exists('./hash_check'):
os.mkdir('./hash_check')
with open('./hash_check/curl.sh', 'w') as curl_f, open(
'./hash_check/md5.txt', 'w'
) as md5_f, open('./hash_check/sha1.txt', 'w') as sha1_f:
curl_f.write('#!/usr/bin/env bash\n\n')
for stats_dict in stats_list:
for sysmeta_xml in stats_dict['largest_sysmeta_xml']:
print(sysmeta_xml)
sysmeta_pyxb = d1_common.types.dataoneTypes_v1_2.CreateFromDocument(
sysmeta_xml
)
pid = sysmeta_pyxb.identifier.value().encode('utf-8')
file_name = re.sub('\W+', '_', pid)
size = sysmeta_pyxb.size
base_url = stats_dict['gmn_dict']['base_url']
if size > 100 * 1024 * 1024:
logging.info('Ignored large object. size={} pid={}')
curl_f.write('# {} {}\n'.format(size, pid))
curl_f.write(
'curl -o obj/{} {}/v1/object/{}\n'.format(
file_name, base_url, d1_common.url.encodePathElement(pid)
)
)
if sysmeta_pyxb.checksum.algorithm == 'MD5':
md5_f.write(
'{} obj/{}\n'.format(sysmeta_pyxb.checksum.value(), file_name)
)
else:
sha1_f.write(
'{} obj/{}\n'.format(sysmeta_pyxb.checksum.value(), file_name)
)
with open('./hash_check/check.sh', 'w') as f:
f.write('#!/usr/bin/env bash\n\n')
f.write('mkdir -p obj\n')
f.write('./curl.sh\n')
f.write('sha1sum -c sha1.txt\n')
f.write('md5sum -c md5.txt\n') | python | def make_checksum_validation_script(stats_list):
"""Make batch files required for checking checksums from another machine."""
if not os.path.exists('./hash_check'):
os.mkdir('./hash_check')
with open('./hash_check/curl.sh', 'w') as curl_f, open(
'./hash_check/md5.txt', 'w'
) as md5_f, open('./hash_check/sha1.txt', 'w') as sha1_f:
curl_f.write('#!/usr/bin/env bash\n\n')
for stats_dict in stats_list:
for sysmeta_xml in stats_dict['largest_sysmeta_xml']:
print(sysmeta_xml)
sysmeta_pyxb = d1_common.types.dataoneTypes_v1_2.CreateFromDocument(
sysmeta_xml
)
pid = sysmeta_pyxb.identifier.value().encode('utf-8')
file_name = re.sub('\W+', '_', pid)
size = sysmeta_pyxb.size
base_url = stats_dict['gmn_dict']['base_url']
if size > 100 * 1024 * 1024:
logging.info('Ignored large object. size={} pid={}')
curl_f.write('# {} {}\n'.format(size, pid))
curl_f.write(
'curl -o obj/{} {}/v1/object/{}\n'.format(
file_name, base_url, d1_common.url.encodePathElement(pid)
)
)
if sysmeta_pyxb.checksum.algorithm == 'MD5':
md5_f.write(
'{} obj/{}\n'.format(sysmeta_pyxb.checksum.value(), file_name)
)
else:
sha1_f.write(
'{} obj/{}\n'.format(sysmeta_pyxb.checksum.value(), file_name)
)
with open('./hash_check/check.sh', 'w') as f:
f.write('#!/usr/bin/env bash\n\n')
f.write('mkdir -p obj\n')
f.write('./curl.sh\n')
f.write('sha1sum -c sha1.txt\n')
f.write('md5sum -c md5.txt\n') | [
"def",
"make_checksum_validation_script",
"(",
"stats_list",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"'./hash_check'",
")",
":",
"os",
".",
"mkdir",
"(",
"'./hash_check'",
")",
"with",
"open",
"(",
"'./hash_check/curl.sh'",
",",
"'w'",
... | Make batch files required for checking checksums from another machine. | [
"Make",
"batch",
"files",
"required",
"for",
"checking",
"checksums",
"from",
"another",
"machine",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/utilities/src/d1_util/check_object_checksums.py#L113-L160 | train | 45,351 |
genialis/resolwe | resolwe/flow/migrations/0005_data_dependency_3.py | update_dependency_kinds | def update_dependency_kinds(apps, schema_editor):
"""Update historical dependency kinds as they may be wrong."""
DataDependency = apps.get_model('flow', 'DataDependency')
for dependency in DataDependency.objects.all():
# Assume dependency is of subprocess kind.
dependency.kind = 'subprocess'
# Check child inputs to determine if this is an IO dependency.
child = dependency.child
parent = dependency.parent
for field_schema, fields in iterate_fields(child.input, child.process.input_schema):
name = field_schema['name']
value = fields[name]
if field_schema.get('type', '').startswith('data:'):
if value == parent.pk:
dependency.kind = 'io'
break
elif field_schema.get('type', '').startswith('list:data:'):
for data in value:
if value == parent.pk:
dependency.kind = 'io'
break
dependency.save() | python | def update_dependency_kinds(apps, schema_editor):
"""Update historical dependency kinds as they may be wrong."""
DataDependency = apps.get_model('flow', 'DataDependency')
for dependency in DataDependency.objects.all():
# Assume dependency is of subprocess kind.
dependency.kind = 'subprocess'
# Check child inputs to determine if this is an IO dependency.
child = dependency.child
parent = dependency.parent
for field_schema, fields in iterate_fields(child.input, child.process.input_schema):
name = field_schema['name']
value = fields[name]
if field_schema.get('type', '').startswith('data:'):
if value == parent.pk:
dependency.kind = 'io'
break
elif field_schema.get('type', '').startswith('list:data:'):
for data in value:
if value == parent.pk:
dependency.kind = 'io'
break
dependency.save() | [
"def",
"update_dependency_kinds",
"(",
"apps",
",",
"schema_editor",
")",
":",
"DataDependency",
"=",
"apps",
".",
"get_model",
"(",
"'flow'",
",",
"'DataDependency'",
")",
"for",
"dependency",
"in",
"DataDependency",
".",
"objects",
".",
"all",
"(",
")",
":",... | Update historical dependency kinds as they may be wrong. | [
"Update",
"historical",
"dependency",
"kinds",
"as",
"they",
"may",
"be",
"wrong",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/migrations/0005_data_dependency_3.py#L10-L35 | train | 45,352 |
genialis/resolwe | resolwe/flow/expression_engines/jinja/__init__.py | Environment.escape | def escape(self, value):
"""Escape given value."""
value = soft_unicode(value)
if self._engine._escape is None: # pylint: disable=protected-access
return value
return self._engine._escape(value) | python | def escape(self, value):
"""Escape given value."""
value = soft_unicode(value)
if self._engine._escape is None: # pylint: disable=protected-access
return value
return self._engine._escape(value) | [
"def",
"escape",
"(",
"self",
",",
"value",
")",
":",
"value",
"=",
"soft_unicode",
"(",
"value",
")",
"if",
"self",
".",
"_engine",
".",
"_escape",
"is",
"None",
":",
"# pylint: disable=protected-access",
"return",
"value",
"return",
"self",
".",
"_engine",... | Escape given value. | [
"Escape",
"given",
"value",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/expression_engines/jinja/__init__.py#L70-L77 | train | 45,353 |
genialis/resolwe | resolwe/flow/expression_engines/jinja/__init__.py | ExpressionEngine._wrap_jinja_filter | def _wrap_jinja_filter(self, function):
"""Propagate exceptions as undefined values filter."""
def wrapper(*args, **kwargs):
"""Filter wrapper."""
try:
return function(*args, **kwargs)
except Exception: # pylint: disable=broad-except
return NestedUndefined()
# Copy over Jinja filter decoration attributes.
for attribute in dir(function):
if attribute.endswith('filter'):
setattr(wrapper, attribute, getattr(function, attribute))
return wrapper | python | def _wrap_jinja_filter(self, function):
"""Propagate exceptions as undefined values filter."""
def wrapper(*args, **kwargs):
"""Filter wrapper."""
try:
return function(*args, **kwargs)
except Exception: # pylint: disable=broad-except
return NestedUndefined()
# Copy over Jinja filter decoration attributes.
for attribute in dir(function):
if attribute.endswith('filter'):
setattr(wrapper, attribute, getattr(function, attribute))
return wrapper | [
"def",
"_wrap_jinja_filter",
"(",
"self",
",",
"function",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Filter wrapper.\"\"\"",
"try",
":",
"return",
"function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
... | Propagate exceptions as undefined values filter. | [
"Propagate",
"exceptions",
"as",
"undefined",
"values",
"filter",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/expression_engines/jinja/__init__.py#L115-L129 | train | 45,354 |
genialis/resolwe | resolwe/flow/expression_engines/jinja/__init__.py | ExpressionEngine._register_custom_filters | def _register_custom_filters(self):
"""Register any custom filter modules."""
custom_filters = self.settings.get('CUSTOM_FILTERS', [])
if not isinstance(custom_filters, list):
raise KeyError("`CUSTOM_FILTERS` setting must be a list.")
for filter_module_name in custom_filters:
try:
filter_module = import_module(filter_module_name)
except ImportError as error:
raise ImproperlyConfigured(
"Failed to load custom filter module '{}'.\n"
"Error was: {}".format(filter_module_name, error)
)
try:
filter_map = getattr(filter_module, 'filters')
if not isinstance(filter_map, dict):
raise TypeError
except (AttributeError, TypeError):
raise ImproperlyConfigured(
"Filter module '{}' does not define a 'filters' dictionary".format(filter_module_name)
)
self._environment.filters.update(filter_map) | python | def _register_custom_filters(self):
"""Register any custom filter modules."""
custom_filters = self.settings.get('CUSTOM_FILTERS', [])
if not isinstance(custom_filters, list):
raise KeyError("`CUSTOM_FILTERS` setting must be a list.")
for filter_module_name in custom_filters:
try:
filter_module = import_module(filter_module_name)
except ImportError as error:
raise ImproperlyConfigured(
"Failed to load custom filter module '{}'.\n"
"Error was: {}".format(filter_module_name, error)
)
try:
filter_map = getattr(filter_module, 'filters')
if not isinstance(filter_map, dict):
raise TypeError
except (AttributeError, TypeError):
raise ImproperlyConfigured(
"Filter module '{}' does not define a 'filters' dictionary".format(filter_module_name)
)
self._environment.filters.update(filter_map) | [
"def",
"_register_custom_filters",
"(",
"self",
")",
":",
"custom_filters",
"=",
"self",
".",
"settings",
".",
"get",
"(",
"'CUSTOM_FILTERS'",
",",
"[",
"]",
")",
"if",
"not",
"isinstance",
"(",
"custom_filters",
",",
"list",
")",
":",
"raise",
"KeyError",
... | Register any custom filter modules. | [
"Register",
"any",
"custom",
"filter",
"modules",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/expression_engines/jinja/__init__.py#L131-L154 | train | 45,355 |
genialis/resolwe | resolwe/flow/expression_engines/jinja/__init__.py | ExpressionEngine._evaluation_context | def _evaluation_context(self, escape, safe_wrapper):
"""Configure the evaluation context."""
self._escape = escape
self._safe_wrapper = safe_wrapper
try:
yield
finally:
self._escape = None
self._safe_wrapper = None | python | def _evaluation_context(self, escape, safe_wrapper):
"""Configure the evaluation context."""
self._escape = escape
self._safe_wrapper = safe_wrapper
try:
yield
finally:
self._escape = None
self._safe_wrapper = None | [
"def",
"_evaluation_context",
"(",
"self",
",",
"escape",
",",
"safe_wrapper",
")",
":",
"self",
".",
"_escape",
"=",
"escape",
"self",
".",
"_safe_wrapper",
"=",
"safe_wrapper",
"try",
":",
"yield",
"finally",
":",
"self",
".",
"_escape",
"=",
"None",
"se... | Configure the evaluation context. | [
"Configure",
"the",
"evaluation",
"context",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/expression_engines/jinja/__init__.py#L157-L166 | train | 45,356 |
genialis/resolwe | resolwe/flow/expression_engines/jinja/__init__.py | ExpressionEngine.evaluate_block | def evaluate_block(self, template, context=None, escape=None, safe_wrapper=None):
"""Evaluate a template block."""
if context is None:
context = {}
try:
with self._evaluation_context(escape, safe_wrapper):
template = self._environment.from_string(template)
return template.render(**context)
except jinja2.TemplateError as error:
raise EvaluationError(error.args[0])
finally:
self._escape = None | python | def evaluate_block(self, template, context=None, escape=None, safe_wrapper=None):
"""Evaluate a template block."""
if context is None:
context = {}
try:
with self._evaluation_context(escape, safe_wrapper):
template = self._environment.from_string(template)
return template.render(**context)
except jinja2.TemplateError as error:
raise EvaluationError(error.args[0])
finally:
self._escape = None | [
"def",
"evaluate_block",
"(",
"self",
",",
"template",
",",
"context",
"=",
"None",
",",
"escape",
"=",
"None",
",",
"safe_wrapper",
"=",
"None",
")",
":",
"if",
"context",
"is",
"None",
":",
"context",
"=",
"{",
"}",
"try",
":",
"with",
"self",
".",... | Evaluate a template block. | [
"Evaluate",
"a",
"template",
"block",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/expression_engines/jinja/__init__.py#L168-L180 | train | 45,357 |
genialis/resolwe | resolwe/flow/expression_engines/jinja/__init__.py | ExpressionEngine.evaluate_inline | def evaluate_inline(self, expression, context=None, escape=None, safe_wrapper=None):
"""Evaluate an inline expression."""
if context is None:
context = {}
try:
with self._evaluation_context(escape, safe_wrapper):
compiled = self._environment.compile_expression(expression)
return compiled(**context)
except jinja2.TemplateError as error:
raise EvaluationError(error.args[0]) | python | def evaluate_inline(self, expression, context=None, escape=None, safe_wrapper=None):
"""Evaluate an inline expression."""
if context is None:
context = {}
try:
with self._evaluation_context(escape, safe_wrapper):
compiled = self._environment.compile_expression(expression)
return compiled(**context)
except jinja2.TemplateError as error:
raise EvaluationError(error.args[0]) | [
"def",
"evaluate_inline",
"(",
"self",
",",
"expression",
",",
"context",
"=",
"None",
",",
"escape",
"=",
"None",
",",
"safe_wrapper",
"=",
"None",
")",
":",
"if",
"context",
"is",
"None",
":",
"context",
"=",
"{",
"}",
"try",
":",
"with",
"self",
"... | Evaluate an inline expression. | [
"Evaluate",
"an",
"inline",
"expression",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/expression_engines/jinja/__init__.py#L182-L192 | train | 45,358 |
DataONEorg/d1_python | dev_tools/src/d1_dev/util.py | update_module_file | def update_module_file(redbaron_tree, module_path, show_diff=False, dry_run=False):
"""Set show_diff to False to overwrite module_path with a new file generated from
``redbaron_tree``.
Returns True if tree is different from source.
"""
with tempfile.NamedTemporaryFile() as tmp_file:
tmp_file.write(redbaron_tree_to_module_str(redbaron_tree))
tmp_file.seek(0)
if are_files_equal(module_path, tmp_file.name):
logging.debug('Source unchanged')
return False
logging.debug('Source modified')
tmp_file.seek(0)
diff_update_file(module_path, tmp_file.read(), show_diff, dry_run) | python | def update_module_file(redbaron_tree, module_path, show_diff=False, dry_run=False):
"""Set show_diff to False to overwrite module_path with a new file generated from
``redbaron_tree``.
Returns True if tree is different from source.
"""
with tempfile.NamedTemporaryFile() as tmp_file:
tmp_file.write(redbaron_tree_to_module_str(redbaron_tree))
tmp_file.seek(0)
if are_files_equal(module_path, tmp_file.name):
logging.debug('Source unchanged')
return False
logging.debug('Source modified')
tmp_file.seek(0)
diff_update_file(module_path, tmp_file.read(), show_diff, dry_run) | [
"def",
"update_module_file",
"(",
"redbaron_tree",
",",
"module_path",
",",
"show_diff",
"=",
"False",
",",
"dry_run",
"=",
"False",
")",
":",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
")",
"as",
"tmp_file",
":",
"tmp_file",
".",
"write",
"(",
"red... | Set show_diff to False to overwrite module_path with a new file generated from
``redbaron_tree``.
Returns True if tree is different from source. | [
"Set",
"show_diff",
"to",
"False",
"to",
"overwrite",
"module_path",
"with",
"a",
"new",
"file",
"generated",
"from",
"redbaron_tree",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/dev_tools/src/d1_dev/util.py#L54-L71 | train | 45,359 |
DataONEorg/d1_python | dev_tools/src/d1_dev/util.py | find_repo_root_by_path | def find_repo_root_by_path(path):
"""Given a path to an item in a git repository, find the root of the repository."""
repo = git.Repo(path, search_parent_directories=True)
repo_path = repo.git.rev_parse('--show-toplevel')
logging.info('Repository: {}'.format(repo_path))
return repo_path | python | def find_repo_root_by_path(path):
"""Given a path to an item in a git repository, find the root of the repository."""
repo = git.Repo(path, search_parent_directories=True)
repo_path = repo.git.rev_parse('--show-toplevel')
logging.info('Repository: {}'.format(repo_path))
return repo_path | [
"def",
"find_repo_root_by_path",
"(",
"path",
")",
":",
"repo",
"=",
"git",
".",
"Repo",
"(",
"path",
",",
"search_parent_directories",
"=",
"True",
")",
"repo_path",
"=",
"repo",
".",
"git",
".",
"rev_parse",
"(",
"'--show-toplevel'",
")",
"logging",
".",
... | Given a path to an item in a git repository, find the root of the repository. | [
"Given",
"a",
"path",
"to",
"an",
"item",
"in",
"a",
"git",
"repository",
"find",
"the",
"root",
"of",
"the",
"repository",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/dev_tools/src/d1_dev/util.py#L181-L186 | train | 45,360 |
DataONEorg/d1_python | client_cli/src/d1_cli/impl/operation_formatter.py | OperationFormatter._format_value | def _format_value(self, operation, key, indent):
"""A value that exists in the operation but has value None is displayed.
A value that does not exist in the operation is left out entirely. The value
name in the operation must match the value name in the template, but the
location does not have to match.
"""
v = self._find_value(operation, key)
if v == "NOT_FOUND":
return []
if not isinstance(v, list):
v = [v]
if not len(v):
v = [None]
key = key + ":"
lines = []
for s in v:
# Access control rules are stored in tuples.
if isinstance(s, tuple):
s = "{}: {}".format(*s)
lines.append(
"{}{}{}{}".format(
" " * indent, key, " " * (TAB - indent - len(key) - 1), s
)
)
key = ""
return lines | python | def _format_value(self, operation, key, indent):
"""A value that exists in the operation but has value None is displayed.
A value that does not exist in the operation is left out entirely. The value
name in the operation must match the value name in the template, but the
location does not have to match.
"""
v = self._find_value(operation, key)
if v == "NOT_FOUND":
return []
if not isinstance(v, list):
v = [v]
if not len(v):
v = [None]
key = key + ":"
lines = []
for s in v:
# Access control rules are stored in tuples.
if isinstance(s, tuple):
s = "{}: {}".format(*s)
lines.append(
"{}{}{}{}".format(
" " * indent, key, " " * (TAB - indent - len(key) - 1), s
)
)
key = ""
return lines | [
"def",
"_format_value",
"(",
"self",
",",
"operation",
",",
"key",
",",
"indent",
")",
":",
"v",
"=",
"self",
".",
"_find_value",
"(",
"operation",
",",
"key",
")",
"if",
"v",
"==",
"\"NOT_FOUND\"",
":",
"return",
"[",
"]",
"if",
"not",
"isinstance",
... | A value that exists in the operation but has value None is displayed.
A value that does not exist in the operation is left out entirely. The value
name in the operation must match the value name in the template, but the
location does not have to match. | [
"A",
"value",
"that",
"exists",
"in",
"the",
"operation",
"but",
"has",
"value",
"None",
"is",
"displayed",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/operation_formatter.py#L94-L121 | train | 45,361 |
genialis/resolwe | resolwe/process/runtime.py | Process.run_process | def run_process(self, slug, inputs):
"""Run a new process from a running process."""
def export_files(value):
"""Export input files of spawned process."""
if isinstance(value, str) and os.path.isfile(value):
# TODO: Use the protocol to export files and get the
# process schema to check field type.
print("export {}".format(value))
elif isinstance(value, dict):
for item in value.values():
export_files(item)
elif isinstance(value, list):
for item in value:
export_files(item)
export_files(inputs)
print('run {}'.format(json.dumps({'process': slug, 'input': inputs}, separators=(',', ':')))) | python | def run_process(self, slug, inputs):
"""Run a new process from a running process."""
def export_files(value):
"""Export input files of spawned process."""
if isinstance(value, str) and os.path.isfile(value):
# TODO: Use the protocol to export files and get the
# process schema to check field type.
print("export {}".format(value))
elif isinstance(value, dict):
for item in value.values():
export_files(item)
elif isinstance(value, list):
for item in value:
export_files(item)
export_files(inputs)
print('run {}'.format(json.dumps({'process': slug, 'input': inputs}, separators=(',', ':')))) | [
"def",
"run_process",
"(",
"self",
",",
"slug",
",",
"inputs",
")",
":",
"def",
"export_files",
"(",
"value",
")",
":",
"\"\"\"Export input files of spawned process.\"\"\"",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
"and",
"os",
".",
"path",
".",
"i... | Run a new process from a running process. | [
"Run",
"a",
"new",
"process",
"from",
"a",
"running",
"process",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/process/runtime.py#L177-L193 | train | 45,362 |
genialis/resolwe | resolwe/process/runtime.py | Process.info | def info(self, *args):
"""Log informational message."""
report = resolwe_runtime_utils.info(' '.join([str(x) for x in args]))
# TODO: Use the protocol to report progress.
print(report) | python | def info(self, *args):
"""Log informational message."""
report = resolwe_runtime_utils.info(' '.join([str(x) for x in args]))
# TODO: Use the protocol to report progress.
print(report) | [
"def",
"info",
"(",
"self",
",",
"*",
"args",
")",
":",
"report",
"=",
"resolwe_runtime_utils",
".",
"info",
"(",
"' '",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"args",
"]",
")",
")",
"# TODO: Use the protocol to report progress.",... | Log informational message. | [
"Log",
"informational",
"message",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/process/runtime.py#L204-L208 | train | 45,363 |
genialis/resolwe | resolwe/process/runtime.py | Process.get_data_id_by_slug | def get_data_id_by_slug(self, slug):
"""Find data object ID for given slug.
This method queries the Resolwe API and requires network access.
"""
resolwe_host = os.environ.get('RESOLWE_HOST_URL')
url = urllib.parse.urljoin(resolwe_host, '/api/data?slug={}&fields=id'.format(slug))
with urllib.request.urlopen(url, timeout=60) as f:
data = json.loads(f.read().decode('utf-8'))
if len(data) == 1:
return data[0]['id']
elif not data:
raise ValueError('Data not found for slug {}'.format(slug))
else:
raise ValueError('More than one data object returned for slug {}'.format(slug)) | python | def get_data_id_by_slug(self, slug):
"""Find data object ID for given slug.
This method queries the Resolwe API and requires network access.
"""
resolwe_host = os.environ.get('RESOLWE_HOST_URL')
url = urllib.parse.urljoin(resolwe_host, '/api/data?slug={}&fields=id'.format(slug))
with urllib.request.urlopen(url, timeout=60) as f:
data = json.loads(f.read().decode('utf-8'))
if len(data) == 1:
return data[0]['id']
elif not data:
raise ValueError('Data not found for slug {}'.format(slug))
else:
raise ValueError('More than one data object returned for slug {}'.format(slug)) | [
"def",
"get_data_id_by_slug",
"(",
"self",
",",
"slug",
")",
":",
"resolwe_host",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'RESOLWE_HOST_URL'",
")",
"url",
"=",
"urllib",
".",
"parse",
".",
"urljoin",
"(",
"resolwe_host",
",",
"'/api/data?slug={}&fields=id... | Find data object ID for given slug.
This method queries the Resolwe API and requires network access. | [
"Find",
"data",
"object",
"ID",
"for",
"given",
"slug",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/process/runtime.py#L222-L238 | train | 45,364 |
genialis/resolwe | resolwe/process/runtime.py | Process.requirements | def requirements(self):
"""Process requirements."""
class dotdict(dict): # pylint: disable=invalid-name
"""Dot notation access to dictionary attributes."""
def __getattr__(self, attr):
value = self.get(attr)
return dotdict(value) if isinstance(value, dict) else value
return dotdict(self._meta.metadata.requirements) | python | def requirements(self):
"""Process requirements."""
class dotdict(dict): # pylint: disable=invalid-name
"""Dot notation access to dictionary attributes."""
def __getattr__(self, attr):
value = self.get(attr)
return dotdict(value) if isinstance(value, dict) else value
return dotdict(self._meta.metadata.requirements) | [
"def",
"requirements",
"(",
"self",
")",
":",
"class",
"dotdict",
"(",
"dict",
")",
":",
"# pylint: disable=invalid-name",
"\"\"\"Dot notation access to dictionary attributes.\"\"\"",
"def",
"__getattr__",
"(",
"self",
",",
"attr",
")",
":",
"value",
"=",
"self",
".... | Process requirements. | [
"Process",
"requirements",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/process/runtime.py#L245-L254 | train | 45,365 |
genialis/resolwe | resolwe/flow/execution_engines/python/__init__.py | ExecutionEngine.prepare_runtime | def prepare_runtime(self, runtime_dir, data):
"""Prepare runtime directory."""
# Copy over Python process runtime (resolwe.process).
import resolwe.process as runtime_package
src_dir = os.path.dirname(inspect.getsourcefile(runtime_package))
dest_package_dir = os.path.join(runtime_dir, PYTHON_RUNTIME_DIRNAME, 'resolwe', 'process')
shutil.copytree(src_dir, dest_package_dir)
os.chmod(dest_package_dir, 0o755)
# Write python source file.
source = data.process.run.get('program', '')
program_path = os.path.join(runtime_dir, PYTHON_PROGRAM_FILENAME)
with open(program_path, 'w') as file:
file.write(source)
os.chmod(program_path, 0o755)
# Write serialized inputs.
inputs = copy.deepcopy(data.input)
hydrate_input_references(inputs, data.process.input_schema)
hydrate_input_uploads(inputs, data.process.input_schema)
inputs_path = os.path.join(runtime_dir, PYTHON_INPUTS_FILENAME)
# XXX: Skip serialization of LazyStorageJSON. We should support
# LazyStorageJSON in Python processes on the new communication protocol
def default(obj):
"""Get default value."""
class_name = obj.__class__.__name__
if class_name == 'LazyStorageJSON':
return ''
raise TypeError(f'Object of type {class_name} is not JSON serializable')
with open(inputs_path, 'w') as file:
json.dump(inputs, file, default=default)
# Generate volume maps required to expose needed files.
volume_maps = {
PYTHON_RUNTIME_DIRNAME: PYTHON_RUNTIME_VOLUME,
PYTHON_PROGRAM_FILENAME: PYTHON_PROGRAM_VOLUME,
PYTHON_INPUTS_FILENAME: PYTHON_INPUTS_VOLUME,
}
return volume_maps | python | def prepare_runtime(self, runtime_dir, data):
"""Prepare runtime directory."""
# Copy over Python process runtime (resolwe.process).
import resolwe.process as runtime_package
src_dir = os.path.dirname(inspect.getsourcefile(runtime_package))
dest_package_dir = os.path.join(runtime_dir, PYTHON_RUNTIME_DIRNAME, 'resolwe', 'process')
shutil.copytree(src_dir, dest_package_dir)
os.chmod(dest_package_dir, 0o755)
# Write python source file.
source = data.process.run.get('program', '')
program_path = os.path.join(runtime_dir, PYTHON_PROGRAM_FILENAME)
with open(program_path, 'w') as file:
file.write(source)
os.chmod(program_path, 0o755)
# Write serialized inputs.
inputs = copy.deepcopy(data.input)
hydrate_input_references(inputs, data.process.input_schema)
hydrate_input_uploads(inputs, data.process.input_schema)
inputs_path = os.path.join(runtime_dir, PYTHON_INPUTS_FILENAME)
# XXX: Skip serialization of LazyStorageJSON. We should support
# LazyStorageJSON in Python processes on the new communication protocol
def default(obj):
"""Get default value."""
class_name = obj.__class__.__name__
if class_name == 'LazyStorageJSON':
return ''
raise TypeError(f'Object of type {class_name} is not JSON serializable')
with open(inputs_path, 'w') as file:
json.dump(inputs, file, default=default)
# Generate volume maps required to expose needed files.
volume_maps = {
PYTHON_RUNTIME_DIRNAME: PYTHON_RUNTIME_VOLUME,
PYTHON_PROGRAM_FILENAME: PYTHON_PROGRAM_VOLUME,
PYTHON_INPUTS_FILENAME: PYTHON_INPUTS_VOLUME,
}
return volume_maps | [
"def",
"prepare_runtime",
"(",
"self",
",",
"runtime_dir",
",",
"data",
")",
":",
"# Copy over Python process runtime (resolwe.process).",
"import",
"resolwe",
".",
"process",
"as",
"runtime_package",
"src_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"inspect"... | Prepare runtime directory. | [
"Prepare",
"runtime",
"directory",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/execution_engines/python/__init__.py#L53-L96 | train | 45,366 |
DataONEorg/d1_python | lib_common/src/d1_common/cert/jwt.py | get_subject_with_local_validation | def get_subject_with_local_validation(jwt_bu64, cert_obj):
"""Validate the JWT and return the subject it contains.
- The JWT is validated by checking that it was signed with a CN certificate.
- The returned subject can be trusted for authz and authn operations.
- Possible validation errors include:
- A trusted (TLS/SSL) connection could not be made to the CN holding the
signing certificate.
- The JWT could not be decoded.
- The JWT signature signature was invalid.
- The JWT claim set contains invalid "Not Before" or "Expiration Time" claims.
Args:
jwt_bu64: bytes
The JWT encoded using a a URL safe flavor of Base64.
cert_obj: cryptography.Certificate
Public certificate used for signing the JWT (typically the CN cert).
Returns:
- On successful validation, the subject contained in the JWT is returned.
- If validation fails for any reason, errors are logged and None is returned.
"""
try:
jwt_dict = validate_and_decode(jwt_bu64, cert_obj)
except JwtException as e:
return log_jwt_bu64_info(logging.error, str(e), jwt_bu64)
try:
return jwt_dict['sub']
except LookupError:
log_jwt_dict_info(logging.error, 'Missing "sub" key', jwt_dict) | python | def get_subject_with_local_validation(jwt_bu64, cert_obj):
"""Validate the JWT and return the subject it contains.
- The JWT is validated by checking that it was signed with a CN certificate.
- The returned subject can be trusted for authz and authn operations.
- Possible validation errors include:
- A trusted (TLS/SSL) connection could not be made to the CN holding the
signing certificate.
- The JWT could not be decoded.
- The JWT signature signature was invalid.
- The JWT claim set contains invalid "Not Before" or "Expiration Time" claims.
Args:
jwt_bu64: bytes
The JWT encoded using a a URL safe flavor of Base64.
cert_obj: cryptography.Certificate
Public certificate used for signing the JWT (typically the CN cert).
Returns:
- On successful validation, the subject contained in the JWT is returned.
- If validation fails for any reason, errors are logged and None is returned.
"""
try:
jwt_dict = validate_and_decode(jwt_bu64, cert_obj)
except JwtException as e:
return log_jwt_bu64_info(logging.error, str(e), jwt_bu64)
try:
return jwt_dict['sub']
except LookupError:
log_jwt_dict_info(logging.error, 'Missing "sub" key', jwt_dict) | [
"def",
"get_subject_with_local_validation",
"(",
"jwt_bu64",
",",
"cert_obj",
")",
":",
"try",
":",
"jwt_dict",
"=",
"validate_and_decode",
"(",
"jwt_bu64",
",",
"cert_obj",
")",
"except",
"JwtException",
"as",
"e",
":",
"return",
"log_jwt_bu64_info",
"(",
"loggin... | Validate the JWT and return the subject it contains.
- The JWT is validated by checking that it was signed with a CN certificate.
- The returned subject can be trusted for authz and authn operations.
- Possible validation errors include:
- A trusted (TLS/SSL) connection could not be made to the CN holding the
signing certificate.
- The JWT could not be decoded.
- The JWT signature signature was invalid.
- The JWT claim set contains invalid "Not Before" or "Expiration Time" claims.
Args:
jwt_bu64: bytes
The JWT encoded using a a URL safe flavor of Base64.
cert_obj: cryptography.Certificate
Public certificate used for signing the JWT (typically the CN cert).
Returns:
- On successful validation, the subject contained in the JWT is returned.
- If validation fails for any reason, errors are logged and None is returned. | [
"Validate",
"the",
"JWT",
"and",
"return",
"the",
"subject",
"it",
"contains",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/jwt.py#L54-L88 | train | 45,367 |
DataONEorg/d1_python | lib_common/src/d1_common/cert/jwt.py | get_subject_without_validation | def get_subject_without_validation(jwt_bu64):
"""Extract subject from the JWT without validating the JWT.
- The extracted subject cannot be trusted for authn or authz.
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64.
Returns:
str: The subject contained in the JWT.
"""
try:
jwt_dict = get_jwt_dict(jwt_bu64)
except JwtException as e:
return log_jwt_bu64_info(logging.error, str(e), jwt_bu64)
try:
return jwt_dict['sub']
except LookupError:
log_jwt_dict_info(logging.error, 'Missing "sub" key', jwt_dict) | python | def get_subject_without_validation(jwt_bu64):
"""Extract subject from the JWT without validating the JWT.
- The extracted subject cannot be trusted for authn or authz.
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64.
Returns:
str: The subject contained in the JWT.
"""
try:
jwt_dict = get_jwt_dict(jwt_bu64)
except JwtException as e:
return log_jwt_bu64_info(logging.error, str(e), jwt_bu64)
try:
return jwt_dict['sub']
except LookupError:
log_jwt_dict_info(logging.error, 'Missing "sub" key', jwt_dict) | [
"def",
"get_subject_without_validation",
"(",
"jwt_bu64",
")",
":",
"try",
":",
"jwt_dict",
"=",
"get_jwt_dict",
"(",
"jwt_bu64",
")",
"except",
"JwtException",
"as",
"e",
":",
"return",
"log_jwt_bu64_info",
"(",
"logging",
".",
"error",
",",
"str",
"(",
"e",
... | Extract subject from the JWT without validating the JWT.
- The extracted subject cannot be trusted for authn or authz.
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64.
Returns:
str: The subject contained in the JWT. | [
"Extract",
"subject",
"from",
"the",
"JWT",
"without",
"validating",
"the",
"JWT",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/jwt.py#L111-L131 | train | 45,368 |
DataONEorg/d1_python | lib_common/src/d1_common/cert/jwt.py | get_jwt_dict | def get_jwt_dict(jwt_bu64):
"""Parse Base64 encoded JWT and return as a dict.
- JWTs contain a set of values serialized to a JSON dict. This decodes the JWT and
returns it as a dict containing Unicode strings.
- In addition, a SHA1 hash is added to the dict for convenience.
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64.
Returns:
dict: Values embedded in and derived from the JWT.
"""
jwt_tup = get_jwt_tup(jwt_bu64)
try:
jwt_dict = json.loads(jwt_tup[0].decode('utf-8'))
jwt_dict.update(json.loads(jwt_tup[1].decode('utf-8')))
jwt_dict['_sig_sha1'] = hashlib.sha1(jwt_tup[2]).hexdigest()
except TypeError as e:
raise JwtException('Decode failed. error="{}"'.format(e))
return jwt_dict | python | def get_jwt_dict(jwt_bu64):
"""Parse Base64 encoded JWT and return as a dict.
- JWTs contain a set of values serialized to a JSON dict. This decodes the JWT and
returns it as a dict containing Unicode strings.
- In addition, a SHA1 hash is added to the dict for convenience.
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64.
Returns:
dict: Values embedded in and derived from the JWT.
"""
jwt_tup = get_jwt_tup(jwt_bu64)
try:
jwt_dict = json.loads(jwt_tup[0].decode('utf-8'))
jwt_dict.update(json.loads(jwt_tup[1].decode('utf-8')))
jwt_dict['_sig_sha1'] = hashlib.sha1(jwt_tup[2]).hexdigest()
except TypeError as e:
raise JwtException('Decode failed. error="{}"'.format(e))
return jwt_dict | [
"def",
"get_jwt_dict",
"(",
"jwt_bu64",
")",
":",
"jwt_tup",
"=",
"get_jwt_tup",
"(",
"jwt_bu64",
")",
"try",
":",
"jwt_dict",
"=",
"json",
".",
"loads",
"(",
"jwt_tup",
"[",
"0",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"jwt_dict",
".",
"update",... | Parse Base64 encoded JWT and return as a dict.
- JWTs contain a set of values serialized to a JSON dict. This decodes the JWT and
returns it as a dict containing Unicode strings.
- In addition, a SHA1 hash is added to the dict for convenience.
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64.
Returns:
dict: Values embedded in and derived from the JWT. | [
"Parse",
"Base64",
"encoded",
"JWT",
"and",
"return",
"as",
"a",
"dict",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/jwt.py#L181-L203 | train | 45,369 |
DataONEorg/d1_python | lib_common/src/d1_common/cert/jwt.py | validate_and_decode | def validate_and_decode(jwt_bu64, cert_obj):
"""Validate the JWT and return as a dict.
- JWTs contain a set of values serialized to a JSON dict. This decodes the JWT and
returns it as a dict.
Args:
jwt_bu64: bytes
The JWT encoded using a a URL safe flavor of Base64.
cert_obj: cryptography.Certificate
Public certificate used for signing the JWT (typically the CN cert).
Raises:
JwtException: If validation fails.
Returns:
dict: Values embedded in the JWT.
"""
try:
return jwt.decode(
jwt_bu64.strip(), cert_obj.public_key(), algorithms=['RS256'], verify=True
)
except jwt.InvalidTokenError as e:
raise JwtException('Signature is invalid. error="{}"'.format(str(e))) | python | def validate_and_decode(jwt_bu64, cert_obj):
"""Validate the JWT and return as a dict.
- JWTs contain a set of values serialized to a JSON dict. This decodes the JWT and
returns it as a dict.
Args:
jwt_bu64: bytes
The JWT encoded using a a URL safe flavor of Base64.
cert_obj: cryptography.Certificate
Public certificate used for signing the JWT (typically the CN cert).
Raises:
JwtException: If validation fails.
Returns:
dict: Values embedded in the JWT.
"""
try:
return jwt.decode(
jwt_bu64.strip(), cert_obj.public_key(), algorithms=['RS256'], verify=True
)
except jwt.InvalidTokenError as e:
raise JwtException('Signature is invalid. error="{}"'.format(str(e))) | [
"def",
"validate_and_decode",
"(",
"jwt_bu64",
",",
"cert_obj",
")",
":",
"try",
":",
"return",
"jwt",
".",
"decode",
"(",
"jwt_bu64",
".",
"strip",
"(",
")",
",",
"cert_obj",
".",
"public_key",
"(",
")",
",",
"algorithms",
"=",
"[",
"'RS256'",
"]",
",... | Validate the JWT and return as a dict.
- JWTs contain a set of values serialized to a JSON dict. This decodes the JWT and
returns it as a dict.
Args:
jwt_bu64: bytes
The JWT encoded using a a URL safe flavor of Base64.
cert_obj: cryptography.Certificate
Public certificate used for signing the JWT (typically the CN cert).
Raises:
JwtException: If validation fails.
Returns:
dict: Values embedded in the JWT. | [
"Validate",
"the",
"JWT",
"and",
"return",
"as",
"a",
"dict",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/jwt.py#L206-L231 | train | 45,370 |
DataONEorg/d1_python | lib_common/src/d1_common/cert/jwt.py | log_jwt_dict_info | def log_jwt_dict_info(log, msg_str, jwt_dict):
"""Dump JWT to log.
Args:
log: Logger
Logger to which to write the message.
msg_str: str
A message to write to the log before the JWT values.
jwt_dict: dict
JWT containing values to log.
Returns:
None
"""
d = ts_to_str(jwt_dict)
# Log known items in specific order, then the rest just sorted
log_list = [(b, d.pop(a)) for a, b, c in CLAIM_LIST if a in d] + [
(k, d[k]) for k in sorted(d)
]
list(
map(
log,
['{}:'.format(msg_str)] + [' {}: {}'.format(k, v) for k, v in log_list],
)
) | python | def log_jwt_dict_info(log, msg_str, jwt_dict):
"""Dump JWT to log.
Args:
log: Logger
Logger to which to write the message.
msg_str: str
A message to write to the log before the JWT values.
jwt_dict: dict
JWT containing values to log.
Returns:
None
"""
d = ts_to_str(jwt_dict)
# Log known items in specific order, then the rest just sorted
log_list = [(b, d.pop(a)) for a, b, c in CLAIM_LIST if a in d] + [
(k, d[k]) for k in sorted(d)
]
list(
map(
log,
['{}:'.format(msg_str)] + [' {}: {}'.format(k, v) for k, v in log_list],
)
) | [
"def",
"log_jwt_dict_info",
"(",
"log",
",",
"msg_str",
",",
"jwt_dict",
")",
":",
"d",
"=",
"ts_to_str",
"(",
"jwt_dict",
")",
"# Log known items in specific order, then the rest just sorted",
"log_list",
"=",
"[",
"(",
"b",
",",
"d",
".",
"pop",
"(",
"a",
")... | Dump JWT to log.
Args:
log: Logger
Logger to which to write the message.
msg_str: str
A message to write to the log before the JWT values.
jwt_dict: dict
JWT containing values to log.
Returns:
None | [
"Dump",
"JWT",
"to",
"log",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/jwt.py#L234-L261 | train | 45,371 |
DataONEorg/d1_python | lib_common/src/d1_common/cert/jwt.py | ts_to_str | def ts_to_str(jwt_dict):
"""Convert timestamps in JWT to human readable dates.
Args:
jwt_dict: dict
JWT with some keys containing timestamps.
Returns:
dict: Copy of input dict where timestamps have been replaced with human readable
dates.
"""
d = ts_to_dt(jwt_dict)
for k, v in list(d.items()):
if isinstance(v, datetime.datetime):
d[k] = v.isoformat().replace('T', ' ')
return d | python | def ts_to_str(jwt_dict):
"""Convert timestamps in JWT to human readable dates.
Args:
jwt_dict: dict
JWT with some keys containing timestamps.
Returns:
dict: Copy of input dict where timestamps have been replaced with human readable
dates.
"""
d = ts_to_dt(jwt_dict)
for k, v in list(d.items()):
if isinstance(v, datetime.datetime):
d[k] = v.isoformat().replace('T', ' ')
return d | [
"def",
"ts_to_str",
"(",
"jwt_dict",
")",
":",
"d",
"=",
"ts_to_dt",
"(",
"jwt_dict",
")",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"d",
".",
"items",
"(",
")",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"datetime",
".",
"datetime",
")",
":",
... | Convert timestamps in JWT to human readable dates.
Args:
jwt_dict: dict
JWT with some keys containing timestamps.
Returns:
dict: Copy of input dict where timestamps have been replaced with human readable
dates. | [
"Convert",
"timestamps",
"in",
"JWT",
"to",
"human",
"readable",
"dates",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/jwt.py#L284-L300 | train | 45,372 |
DataONEorg/d1_python | lib_common/src/d1_common/cert/jwt.py | ts_to_dt | def ts_to_dt(jwt_dict):
"""Convert timestamps in JWT to datetime objects.
Args:
jwt_dict: dict
JWT with some keys containing timestamps.
Returns:
dict: Copy of input dict where timestamps have been replaced with
datetime.datetime() objects.
"""
d = jwt_dict.copy()
for k, v in [v[:2] for v in CLAIM_LIST if v[2]]:
if k in jwt_dict:
d[k] = d1_common.date_time.dt_from_ts(jwt_dict[k])
return d | python | def ts_to_dt(jwt_dict):
"""Convert timestamps in JWT to datetime objects.
Args:
jwt_dict: dict
JWT with some keys containing timestamps.
Returns:
dict: Copy of input dict where timestamps have been replaced with
datetime.datetime() objects.
"""
d = jwt_dict.copy()
for k, v in [v[:2] for v in CLAIM_LIST if v[2]]:
if k in jwt_dict:
d[k] = d1_common.date_time.dt_from_ts(jwt_dict[k])
return d | [
"def",
"ts_to_dt",
"(",
"jwt_dict",
")",
":",
"d",
"=",
"jwt_dict",
".",
"copy",
"(",
")",
"for",
"k",
",",
"v",
"in",
"[",
"v",
"[",
":",
"2",
"]",
"for",
"v",
"in",
"CLAIM_LIST",
"if",
"v",
"[",
"2",
"]",
"]",
":",
"if",
"k",
"in",
"jwt_d... | Convert timestamps in JWT to datetime objects.
Args:
jwt_dict: dict
JWT with some keys containing timestamps.
Returns:
dict: Copy of input dict where timestamps have been replaced with
datetime.datetime() objects. | [
"Convert",
"timestamps",
"in",
"JWT",
"to",
"datetime",
"objects",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/jwt.py#L303-L319 | train | 45,373 |
genialis/resolwe | resolwe/flow/execution_engines/bash/__init__.py | ExecutionEngine._escape | def _escape(self, value):
"""Escape given value unless it is safe."""
if isinstance(value, SafeString):
return value
return shellescape.quote(value) | python | def _escape(self, value):
"""Escape given value unless it is safe."""
if isinstance(value, SafeString):
return value
return shellescape.quote(value) | [
"def",
"_escape",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"SafeString",
")",
":",
"return",
"value",
"return",
"shellescape",
".",
"quote",
"(",
"value",
")"
] | Escape given value unless it is safe. | [
"Escape",
"given",
"value",
"unless",
"it",
"is",
"safe",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/execution_engines/bash/__init__.py#L89-L94 | train | 45,374 |
DataONEorg/d1_python | gmn/src/d1_gmn/app/sciobj_store.py | open_sciobj_file_by_pid_ctx | def open_sciobj_file_by_pid_ctx(pid, write=False):
"""Open the file containing the Science Object bytes of ``pid`` in the default
location within the tree of the local SciObj store.
If ``write`` is True, the file is opened for writing and any missing directories are
created. Return the file handle and file_url with the file location in a suitable
form for storing in the DB.
If nothing was written to the file, it is deleted.
"""
abs_path = get_abs_sciobj_file_path_by_pid(pid)
with open_sciobj_file_by_path_ctx(abs_path, write) as sciobj_file:
yield sciobj_file | python | def open_sciobj_file_by_pid_ctx(pid, write=False):
"""Open the file containing the Science Object bytes of ``pid`` in the default
location within the tree of the local SciObj store.
If ``write`` is True, the file is opened for writing and any missing directories are
created. Return the file handle and file_url with the file location in a suitable
form for storing in the DB.
If nothing was written to the file, it is deleted.
"""
abs_path = get_abs_sciobj_file_path_by_pid(pid)
with open_sciobj_file_by_path_ctx(abs_path, write) as sciobj_file:
yield sciobj_file | [
"def",
"open_sciobj_file_by_pid_ctx",
"(",
"pid",
",",
"write",
"=",
"False",
")",
":",
"abs_path",
"=",
"get_abs_sciobj_file_path_by_pid",
"(",
"pid",
")",
"with",
"open_sciobj_file_by_path_ctx",
"(",
"abs_path",
",",
"write",
")",
"as",
"sciobj_file",
":",
"yiel... | Open the file containing the Science Object bytes of ``pid`` in the default
location within the tree of the local SciObj store.
If ``write`` is True, the file is opened for writing and any missing directories are
created. Return the file handle and file_url with the file location in a suitable
form for storing in the DB.
If nothing was written to the file, it is deleted. | [
"Open",
"the",
"file",
"containing",
"the",
"Science",
"Object",
"bytes",
"of",
"pid",
"in",
"the",
"default",
"location",
"within",
"the",
"tree",
"of",
"the",
"local",
"SciObj",
"store",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/sciobj_store.py#L74-L87 | train | 45,375 |
DataONEorg/d1_python | gmn/src/d1_gmn/app/sciobj_store.py | open_sciobj_file_by_path_ctx | def open_sciobj_file_by_path_ctx(abs_path, write=False):
"""Open the file containing the Science Object bytes at the custom location
``abs_path`` in the local filesystem.
If ``write`` is True, the file is opened for writing and any missing directores are
created. Return the file handle and file_url with the file location in a suitable
form for storing in the DB.
If nothing was written to the file, delete it.
"""
if write:
d1_common.utils.filesystem.create_missing_directories_for_file(abs_path)
try:
with open(abs_path, 'wb' if write else 'rb') as sciobj_file:
yield sciobj_file
finally:
if os.path.exists(abs_path) and not os.path.getsize(abs_path):
os.unlink(abs_path) | python | def open_sciobj_file_by_path_ctx(abs_path, write=False):
"""Open the file containing the Science Object bytes at the custom location
``abs_path`` in the local filesystem.
If ``write`` is True, the file is opened for writing and any missing directores are
created. Return the file handle and file_url with the file location in a suitable
form for storing in the DB.
If nothing was written to the file, delete it.
"""
if write:
d1_common.utils.filesystem.create_missing_directories_for_file(abs_path)
try:
with open(abs_path, 'wb' if write else 'rb') as sciobj_file:
yield sciobj_file
finally:
if os.path.exists(abs_path) and not os.path.getsize(abs_path):
os.unlink(abs_path) | [
"def",
"open_sciobj_file_by_path_ctx",
"(",
"abs_path",
",",
"write",
"=",
"False",
")",
":",
"if",
"write",
":",
"d1_common",
".",
"utils",
".",
"filesystem",
".",
"create_missing_directories_for_file",
"(",
"abs_path",
")",
"try",
":",
"with",
"open",
"(",
"... | Open the file containing the Science Object bytes at the custom location
``abs_path`` in the local filesystem.
If ``write`` is True, the file is opened for writing and any missing directores are
created. Return the file handle and file_url with the file location in a suitable
form for storing in the DB.
If nothing was written to the file, delete it. | [
"Open",
"the",
"file",
"containing",
"the",
"Science",
"Object",
"bytes",
"at",
"the",
"custom",
"location",
"abs_path",
"in",
"the",
"local",
"filesystem",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/sciobj_store.py#L91-L109 | train | 45,376 |
DataONEorg/d1_python | gmn/src/d1_gmn/app/sciobj_store.py | open_sciobj_file_by_pid | def open_sciobj_file_by_pid(pid, write=False):
"""Open the file containing the Science Object bytes at the custom location
``abs_path`` in the local filesystem for read."""
abs_path = get_abs_sciobj_file_path_by_pid(pid)
if write:
d1_common.utils.filesystem.create_missing_directories_for_file(abs_path)
return open_sciobj_file_by_path(abs_path, write) | python | def open_sciobj_file_by_pid(pid, write=False):
"""Open the file containing the Science Object bytes at the custom location
``abs_path`` in the local filesystem for read."""
abs_path = get_abs_sciobj_file_path_by_pid(pid)
if write:
d1_common.utils.filesystem.create_missing_directories_for_file(abs_path)
return open_sciobj_file_by_path(abs_path, write) | [
"def",
"open_sciobj_file_by_pid",
"(",
"pid",
",",
"write",
"=",
"False",
")",
":",
"abs_path",
"=",
"get_abs_sciobj_file_path_by_pid",
"(",
"pid",
")",
"if",
"write",
":",
"d1_common",
".",
"utils",
".",
"filesystem",
".",
"create_missing_directories_for_file",
"... | Open the file containing the Science Object bytes at the custom location
``abs_path`` in the local filesystem for read. | [
"Open",
"the",
"file",
"containing",
"the",
"Science",
"Object",
"bytes",
"at",
"the",
"custom",
"location",
"abs_path",
"in",
"the",
"local",
"filesystem",
"for",
"read",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/sciobj_store.py#L134-L140 | train | 45,377 |
DataONEorg/d1_python | gmn/src/d1_gmn/app/sciobj_store.py | open_sciobj_file_by_path | def open_sciobj_file_by_path(abs_path, write=False):
"""Open a SciObj file for read or write. If opened for write, create any missing
directories. For a SciObj stored in the default SciObj store, the path includes the
PID hash based directory levels.
This is the only method in GMN that opens SciObj files, so can be modified to
customize the SciObj storage locations and can be mocked for testing.
Note that when a SciObj is created by a client via MNStorage.create(), Django
streams the SciObj bytes to a temporary file or memory location as set by
``FILE_UPLOAD_TEMP_DIR`` and related settings.
"""
if write:
d1_common.utils.filesystem.create_missing_directories_for_file(abs_path)
return open(abs_path, 'wb' if write else 'rb') | python | def open_sciobj_file_by_path(abs_path, write=False):
"""Open a SciObj file for read or write. If opened for write, create any missing
directories. For a SciObj stored in the default SciObj store, the path includes the
PID hash based directory levels.
This is the only method in GMN that opens SciObj files, so can be modified to
customize the SciObj storage locations and can be mocked for testing.
Note that when a SciObj is created by a client via MNStorage.create(), Django
streams the SciObj bytes to a temporary file or memory location as set by
``FILE_UPLOAD_TEMP_DIR`` and related settings.
"""
if write:
d1_common.utils.filesystem.create_missing_directories_for_file(abs_path)
return open(abs_path, 'wb' if write else 'rb') | [
"def",
"open_sciobj_file_by_path",
"(",
"abs_path",
",",
"write",
"=",
"False",
")",
":",
"if",
"write",
":",
"d1_common",
".",
"utils",
".",
"filesystem",
".",
"create_missing_directories_for_file",
"(",
"abs_path",
")",
"return",
"open",
"(",
"abs_path",
",",
... | Open a SciObj file for read or write. If opened for write, create any missing
directories. For a SciObj stored in the default SciObj store, the path includes the
PID hash based directory levels.
This is the only method in GMN that opens SciObj files, so can be modified to
customize the SciObj storage locations and can be mocked for testing.
Note that when a SciObj is created by a client via MNStorage.create(), Django
streams the SciObj bytes to a temporary file or memory location as set by
``FILE_UPLOAD_TEMP_DIR`` and related settings. | [
"Open",
"a",
"SciObj",
"file",
"for",
"read",
"or",
"write",
".",
"If",
"opened",
"for",
"write",
"create",
"any",
"missing",
"directories",
".",
"For",
"a",
"SciObj",
"stored",
"in",
"the",
"default",
"SciObj",
"store",
"the",
"path",
"includes",
"the",
... | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/sciobj_store.py#L143-L158 | train | 45,378 |
DataONEorg/d1_python | gmn/src/d1_gmn/app/sciobj_store.py | get_rel_sciobj_file_path | def get_rel_sciobj_file_path(pid):
"""Get the relative local path to the file holding an object's bytes.
- The path is relative to settings.OBJECT_STORE_PATH
- There is a one-to-one mapping between pid and path
- The path is based on a SHA1 hash. It's now possible to craft SHA1 collisions, but
it's so unlikely that we ignore it for now
- The path may or may not exist (yet).
"""
hash_str = hashlib.sha1(pid.encode('utf-8')).hexdigest()
return os.path.join(hash_str[:2], hash_str[2:4], hash_str) | python | def get_rel_sciobj_file_path(pid):
"""Get the relative local path to the file holding an object's bytes.
- The path is relative to settings.OBJECT_STORE_PATH
- There is a one-to-one mapping between pid and path
- The path is based on a SHA1 hash. It's now possible to craft SHA1 collisions, but
it's so unlikely that we ignore it for now
- The path may or may not exist (yet).
"""
hash_str = hashlib.sha1(pid.encode('utf-8')).hexdigest()
return os.path.join(hash_str[:2], hash_str[2:4], hash_str) | [
"def",
"get_rel_sciobj_file_path",
"(",
"pid",
")",
":",
"hash_str",
"=",
"hashlib",
".",
"sha1",
"(",
"pid",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"hash_str",
"[",
":",
"2... | Get the relative local path to the file holding an object's bytes.
- The path is relative to settings.OBJECT_STORE_PATH
- There is a one-to-one mapping between pid and path
- The path is based on a SHA1 hash. It's now possible to craft SHA1 collisions, but
it's so unlikely that we ignore it for now
- The path may or may not exist (yet). | [
"Get",
"the",
"relative",
"local",
"path",
"to",
"the",
"file",
"holding",
"an",
"object",
"s",
"bytes",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/sciobj_store.py#L161-L172 | train | 45,379 |
DataONEorg/d1_python | gmn/src/d1_gmn/app/sciobj_store.py | get_abs_sciobj_file_path_by_url | def get_abs_sciobj_file_path_by_url(file_url):
"""Get the absolute path to the file holding an object's bytes.
- ``file_url`` is an absolute or relative file:// url as stored in the DB.
"""
assert_sciobj_store_exists()
m = re.match(r'file://(.*?)/(.*)', file_url, re.IGNORECASE)
if m.group(1) == RELATIVE_PATH_MAGIC_HOST_STR:
return os.path.join(get_abs_sciobj_store_path(), m.group(2))
assert os.path.isabs(m.group(2))
return m.group(2) | python | def get_abs_sciobj_file_path_by_url(file_url):
"""Get the absolute path to the file holding an object's bytes.
- ``file_url`` is an absolute or relative file:// url as stored in the DB.
"""
assert_sciobj_store_exists()
m = re.match(r'file://(.*?)/(.*)', file_url, re.IGNORECASE)
if m.group(1) == RELATIVE_PATH_MAGIC_HOST_STR:
return os.path.join(get_abs_sciobj_store_path(), m.group(2))
assert os.path.isabs(m.group(2))
return m.group(2) | [
"def",
"get_abs_sciobj_file_path_by_url",
"(",
"file_url",
")",
":",
"assert_sciobj_store_exists",
"(",
")",
"m",
"=",
"re",
".",
"match",
"(",
"r'file://(.*?)/(.*)'",
",",
"file_url",
",",
"re",
".",
"IGNORECASE",
")",
"if",
"m",
".",
"group",
"(",
"1",
")"... | Get the absolute path to the file holding an object's bytes.
- ``file_url`` is an absolute or relative file:// url as stored in the DB. | [
"Get",
"the",
"absolute",
"path",
"to",
"the",
"file",
"holding",
"an",
"object",
"s",
"bytes",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/sciobj_store.py#L254-L265 | train | 45,380 |
DataONEorg/d1_python | utilities/src/d1_util/find_gmn_instances.py | get_gmn_version | def get_gmn_version(base_url):
"""Return the version currently running on a GMN instance.
(is_gmn, version_or_error)
"""
home_url = d1_common.url.joinPathElements(base_url, 'home')
try:
response = requests.get(home_url, verify=False)
except requests.exceptions.ConnectionError as e:
return False, str(e)
if not response.ok:
return False, 'invalid /home. status={}'.format(response.status_code)
soup = bs4.BeautifulSoup(response.content, 'html.parser')
version_str = soup.find(string='GMN version:').find_next('td').string
if version_str is None:
return False, 'Parse failed'
return True, version_str | python | def get_gmn_version(base_url):
"""Return the version currently running on a GMN instance.
(is_gmn, version_or_error)
"""
home_url = d1_common.url.joinPathElements(base_url, 'home')
try:
response = requests.get(home_url, verify=False)
except requests.exceptions.ConnectionError as e:
return False, str(e)
if not response.ok:
return False, 'invalid /home. status={}'.format(response.status_code)
soup = bs4.BeautifulSoup(response.content, 'html.parser')
version_str = soup.find(string='GMN version:').find_next('td').string
if version_str is None:
return False, 'Parse failed'
return True, version_str | [
"def",
"get_gmn_version",
"(",
"base_url",
")",
":",
"home_url",
"=",
"d1_common",
".",
"url",
".",
"joinPathElements",
"(",
"base_url",
",",
"'home'",
")",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"home_url",
",",
"verify",
"=",
"False",
... | Return the version currently running on a GMN instance.
(is_gmn, version_or_error) | [
"Return",
"the",
"version",
"currently",
"running",
"on",
"a",
"GMN",
"instance",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/utilities/src/d1_util/find_gmn_instances.py#L179-L199 | train | 45,381 |
DataONEorg/d1_python | lib_common/src/d1_common/cert/subject_info.py | extract_subjects | def extract_subjects(subject_info_xml, primary_str):
"""Extract a set of authenticated subjects from a DataONE SubjectInfo.
- See subject_info_tree for details.
Args:
subject_info_xml : str
A SubjectInfo XML document.
primary_str : str
A DataONE subject, typically a DataONE compliant serialization of the DN of
the DataONE X.509 v3 certificate extension from which the SubjectInfo was
extracted.
The primary subject can be viewed as the root of a tree. Any subject in the
SubjectInfo that is directly or indirectly connected to the root subject is
included in the returned set of authenticated subjects.
Returns:
set: Set of authenticated subjects. Will always include the primary subject.
- All subjects in the returned set are equivalent to ``primary_str`` for the
purpose of access control for private science objects.
- If SubjectInfo does not contain all relevant records, it is still considered
to be valid, but the authenticated set will be incomplete.
- Only the subject strings and relationships in SubjectInfo are used by this
function. Other information about subjects, such as name and email address,
is ignored.
- No attempt should be made to infer type of subject from the content of a
subject string. Subject strings should be handled as random Unicode
sequences, each of which may designate an person subject, an equivalent
subject, or a group subject.
- To determine if an action is authorized, the returned set is checked against
the authorized_set for a given object. If one or more subjects exist in both
sets, the action is authorized. The check can be performed with high
performance using a set union operation in Python or an inner join in
Postgres.
- Subject types are only known and relevant while processing the SubjectInfo
type.
- The type of each subject in the authenticated_subjects and allowed_subjects
lists are unknown and irrelevant.
Notes:
Procedure:
The set of authenticated subjects is generated from the SubjectInfo and primary
subject using the following procedure:
- Start with empty set of subjects
- Add authenticatedUser
- If ``subject`` is not in set of subjects:
- Add ``subject``
- Iterate over Person records
- If Person.subject is ``subject``:
- If Person.verified is present and set:
- Add "verifiedUser"
- Iterate over Person.equivalentIdentity:
- Recursively add those subjects
- Iterate over Person.isMemberOf
- Recursively add those subjects, but ONLY check Group subjects
- Iterate over Group records
- If any Group.hasMember is ``subject``:
- Recursively add Group.subject (not group members)
Handling of various invalid SubjectInfo and corner cases:
- SubjectInfo XML doc that is not well formed
- Return an exception that includes a useful error message with the line number
of the issue
- person.isMemberOf and group.hasMember should always form pairs referencing
each other.
- One side of the pair is missing
- Process the available side as normal
- person.isMemberOf subject references a person or equivalent instead of a
group
- Only Group subjects are searched for isMemberOf references, so only the
referenced Group subject is added to the list of authorized subjects
- Multiple Person or Group records conflict by using the same subject
- The records are handled as equivalents
- person.isMemberOf subject does not reference a known subject
- If the Person containing the dangling isMemberOf IS NOT connected with the
authenticated subject, the whole record, including the isMemberOf subject is
simply ignored
- If it IS connected with an authenticated subject, the isMemberOf subject is
authenticated and recursive processing of the subject is skipped
- Circular references
- Handled by skipping recursive add for subjects that are already added
- See the unit tests for example SubjectInfo XML documents for each of these
issues and the expected results.
"""
subject_info_pyxb = deserialize_subject_info(subject_info_xml)
subject_info_tree = gen_subject_info_tree(subject_info_pyxb, primary_str)
return subject_info_tree.get_subject_set() | python | def extract_subjects(subject_info_xml, primary_str):
"""Extract a set of authenticated subjects from a DataONE SubjectInfo.
- See subject_info_tree for details.
Args:
subject_info_xml : str
A SubjectInfo XML document.
primary_str : str
A DataONE subject, typically a DataONE compliant serialization of the DN of
the DataONE X.509 v3 certificate extension from which the SubjectInfo was
extracted.
The primary subject can be viewed as the root of a tree. Any subject in the
SubjectInfo that is directly or indirectly connected to the root subject is
included in the returned set of authenticated subjects.
Returns:
set: Set of authenticated subjects. Will always include the primary subject.
- All subjects in the returned set are equivalent to ``primary_str`` for the
purpose of access control for private science objects.
- If SubjectInfo does not contain all relevant records, it is still considered
to be valid, but the authenticated set will be incomplete.
- Only the subject strings and relationships in SubjectInfo are used by this
function. Other information about subjects, such as name and email address,
is ignored.
- No attempt should be made to infer type of subject from the content of a
subject string. Subject strings should be handled as random Unicode
sequences, each of which may designate an person subject, an equivalent
subject, or a group subject.
- To determine if an action is authorized, the returned set is checked against
the authorized_set for a given object. If one or more subjects exist in both
sets, the action is authorized. The check can be performed with high
performance using a set union operation in Python or an inner join in
Postgres.
- Subject types are only known and relevant while processing the SubjectInfo
type.
- The type of each subject in the authenticated_subjects and allowed_subjects
lists are unknown and irrelevant.
Notes:
Procedure:
The set of authenticated subjects is generated from the SubjectInfo and primary
subject using the following procedure:
- Start with empty set of subjects
- Add authenticatedUser
- If ``subject`` is not in set of subjects:
- Add ``subject``
- Iterate over Person records
- If Person.subject is ``subject``:
- If Person.verified is present and set:
- Add "verifiedUser"
- Iterate over Person.equivalentIdentity:
- Recursively add those subjects
- Iterate over Person.isMemberOf
- Recursively add those subjects, but ONLY check Group subjects
- Iterate over Group records
- If any Group.hasMember is ``subject``:
- Recursively add Group.subject (not group members)
Handling of various invalid SubjectInfo and corner cases:
- SubjectInfo XML doc that is not well formed
- Return an exception that includes a useful error message with the line number
of the issue
- person.isMemberOf and group.hasMember should always form pairs referencing
each other.
- One side of the pair is missing
- Process the available side as normal
- person.isMemberOf subject references a person or equivalent instead of a
group
- Only Group subjects are searched for isMemberOf references, so only the
referenced Group subject is added to the list of authorized subjects
- Multiple Person or Group records conflict by using the same subject
- The records are handled as equivalents
- person.isMemberOf subject does not reference a known subject
- If the Person containing the dangling isMemberOf IS NOT connected with the
authenticated subject, the whole record, including the isMemberOf subject is
simply ignored
- If it IS connected with an authenticated subject, the isMemberOf subject is
authenticated and recursive processing of the subject is skipped
- Circular references
- Handled by skipping recursive add for subjects that are already added
- See the unit tests for example SubjectInfo XML documents for each of these
issues and the expected results.
"""
subject_info_pyxb = deserialize_subject_info(subject_info_xml)
subject_info_tree = gen_subject_info_tree(subject_info_pyxb, primary_str)
return subject_info_tree.get_subject_set() | [
"def",
"extract_subjects",
"(",
"subject_info_xml",
",",
"primary_str",
")",
":",
"subject_info_pyxb",
"=",
"deserialize_subject_info",
"(",
"subject_info_xml",
")",
"subject_info_tree",
"=",
"gen_subject_info_tree",
"(",
"subject_info_pyxb",
",",
"primary_str",
")",
"ret... | Extract a set of authenticated subjects from a DataONE SubjectInfo.
- See subject_info_tree for details.
Args:
subject_info_xml : str
A SubjectInfo XML document.
primary_str : str
A DataONE subject, typically a DataONE compliant serialization of the DN of
the DataONE X.509 v3 certificate extension from which the SubjectInfo was
extracted.
The primary subject can be viewed as the root of a tree. Any subject in the
SubjectInfo that is directly or indirectly connected to the root subject is
included in the returned set of authenticated subjects.
Returns:
set: Set of authenticated subjects. Will always include the primary subject.
- All subjects in the returned set are equivalent to ``primary_str`` for the
purpose of access control for private science objects.
- If SubjectInfo does not contain all relevant records, it is still considered
to be valid, but the authenticated set will be incomplete.
- Only the subject strings and relationships in SubjectInfo are used by this
function. Other information about subjects, such as name and email address,
is ignored.
- No attempt should be made to infer type of subject from the content of a
subject string. Subject strings should be handled as random Unicode
sequences, each of which may designate an person subject, an equivalent
subject, or a group subject.
- To determine if an action is authorized, the returned set is checked against
the authorized_set for a given object. If one or more subjects exist in both
sets, the action is authorized. The check can be performed with high
performance using a set union operation in Python or an inner join in
Postgres.
- Subject types are only known and relevant while processing the SubjectInfo
type.
- The type of each subject in the authenticated_subjects and allowed_subjects
lists are unknown and irrelevant.
Notes:
Procedure:
The set of authenticated subjects is generated from the SubjectInfo and primary
subject using the following procedure:
- Start with empty set of subjects
- Add authenticatedUser
- If ``subject`` is not in set of subjects:
- Add ``subject``
- Iterate over Person records
- If Person.subject is ``subject``:
- If Person.verified is present and set:
- Add "verifiedUser"
- Iterate over Person.equivalentIdentity:
- Recursively add those subjects
- Iterate over Person.isMemberOf
- Recursively add those subjects, but ONLY check Group subjects
- Iterate over Group records
- If any Group.hasMember is ``subject``:
- Recursively add Group.subject (not group members)
Handling of various invalid SubjectInfo and corner cases:
- SubjectInfo XML doc that is not well formed
- Return an exception that includes a useful error message with the line number
of the issue
- person.isMemberOf and group.hasMember should always form pairs referencing
each other.
- One side of the pair is missing
- Process the available side as normal
- person.isMemberOf subject references a person or equivalent instead of a
group
- Only Group subjects are searched for isMemberOf references, so only the
referenced Group subject is added to the list of authorized subjects
- Multiple Person or Group records conflict by using the same subject
- The records are handled as equivalents
- person.isMemberOf subject does not reference a known subject
- If the Person containing the dangling isMemberOf IS NOT connected with the
authenticated subject, the whole record, including the isMemberOf subject is
simply ignored
- If it IS connected with an authenticated subject, the isMemberOf subject is
authenticated and recursive processing of the subject is skipped
- Circular references
- Handled by skipping recursive add for subjects that are already added
- See the unit tests for example SubjectInfo XML documents for each of these
issues and the expected results. | [
"Extract",
"a",
"set",
"of",
"authenticated",
"subjects",
"from",
"a",
"DataONE",
"SubjectInfo",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/subject_info.py#L144-L249 | train | 45,382 |
DataONEorg/d1_python | lib_common/src/d1_common/cert/subject_info.py | deserialize_subject_info | def deserialize_subject_info(subject_info_xml):
"""Deserialize SubjectInfo XML doc to native object.
Args:
subject_info_xml: str
SubjectInfo XML doc
Returns:
SubjectInfo PyXB object
"""
try:
return d1_common.xml.deserialize(subject_info_xml)
except ValueError as e:
raise d1_common.types.exceptions.InvalidToken(
0,
'Could not deserialize SubjectInfo. subject_info="{}", error="{}"'.format(
subject_info_xml, str(e)
),
) | python | def deserialize_subject_info(subject_info_xml):
"""Deserialize SubjectInfo XML doc to native object.
Args:
subject_info_xml: str
SubjectInfo XML doc
Returns:
SubjectInfo PyXB object
"""
try:
return d1_common.xml.deserialize(subject_info_xml)
except ValueError as e:
raise d1_common.types.exceptions.InvalidToken(
0,
'Could not deserialize SubjectInfo. subject_info="{}", error="{}"'.format(
subject_info_xml, str(e)
),
) | [
"def",
"deserialize_subject_info",
"(",
"subject_info_xml",
")",
":",
"try",
":",
"return",
"d1_common",
".",
"xml",
".",
"deserialize",
"(",
"subject_info_xml",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"d1_common",
".",
"types",
".",
"exceptions",
... | Deserialize SubjectInfo XML doc to native object.
Args:
subject_info_xml: str
SubjectInfo XML doc
Returns:
SubjectInfo PyXB object | [
"Deserialize",
"SubjectInfo",
"XML",
"doc",
"to",
"native",
"object",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/subject_info.py#L252-L271 | train | 45,383 |
DataONEorg/d1_python | lib_common/src/d1_common/cert/subject_info.py | gen_subject_info_tree | def gen_subject_info_tree(subject_info_pyxb, authn_subj, include_duplicates=False):
"""Convert the flat, self referential lists in the SubjectInfo to a tree structure.
Args:
subject_info_pyxb: SubjectInfo PyXB object
authn_subj: str
The authenticated subject that becomes the root subject in the tree of
subjects built from the SubjectInfo.
Only subjects that are authenticated by a direct or indirect connection to
this subject are included in the tree.
include_duplicates:
Include branches of the tree that contain subjects that have already been
included via other branches.
If the tree is intended for rendering, including the duplicates will
provide a more complete view of the SubjectInfo.
Returns:
SubjectInfoNode : Tree of nodes holding information about subjects that are
directly or indirectly connected to the authenticated subject in the root.
"""
class State:
"""self."""
pass
state = State()
state.subject_info_pyxb = subject_info_pyxb
state.include_duplicates = include_duplicates
state.visited_set = set()
state.tree = SubjectInfoNode("Root", TYPE_NODE_TAG)
_add_subject(state, state.tree, authn_subj)
symbolic_node = state.tree.add_child("Symbolic", TYPE_NODE_TAG)
_add_subject(state, symbolic_node, d1_common.const.SUBJECT_AUTHENTICATED)
_trim_tree(state)
return state.tree | python | def gen_subject_info_tree(subject_info_pyxb, authn_subj, include_duplicates=False):
"""Convert the flat, self referential lists in the SubjectInfo to a tree structure.
Args:
subject_info_pyxb: SubjectInfo PyXB object
authn_subj: str
The authenticated subject that becomes the root subject in the tree of
subjects built from the SubjectInfo.
Only subjects that are authenticated by a direct or indirect connection to
this subject are included in the tree.
include_duplicates:
Include branches of the tree that contain subjects that have already been
included via other branches.
If the tree is intended for rendering, including the duplicates will
provide a more complete view of the SubjectInfo.
Returns:
SubjectInfoNode : Tree of nodes holding information about subjects that are
directly or indirectly connected to the authenticated subject in the root.
"""
class State:
"""self."""
pass
state = State()
state.subject_info_pyxb = subject_info_pyxb
state.include_duplicates = include_duplicates
state.visited_set = set()
state.tree = SubjectInfoNode("Root", TYPE_NODE_TAG)
_add_subject(state, state.tree, authn_subj)
symbolic_node = state.tree.add_child("Symbolic", TYPE_NODE_TAG)
_add_subject(state, symbolic_node, d1_common.const.SUBJECT_AUTHENTICATED)
_trim_tree(state)
return state.tree | [
"def",
"gen_subject_info_tree",
"(",
"subject_info_pyxb",
",",
"authn_subj",
",",
"include_duplicates",
"=",
"False",
")",
":",
"class",
"State",
":",
"\"\"\"self.\"\"\"",
"pass",
"state",
"=",
"State",
"(",
")",
"state",
".",
"subject_info_pyxb",
"=",
"subject_in... | Convert the flat, self referential lists in the SubjectInfo to a tree structure.
Args:
subject_info_pyxb: SubjectInfo PyXB object
authn_subj: str
The authenticated subject that becomes the root subject in the tree of
subjects built from the SubjectInfo.
Only subjects that are authenticated by a direct or indirect connection to
this subject are included in the tree.
include_duplicates:
Include branches of the tree that contain subjects that have already been
included via other branches.
If the tree is intended for rendering, including the duplicates will
provide a more complete view of the SubjectInfo.
Returns:
SubjectInfoNode : Tree of nodes holding information about subjects that are
directly or indirectly connected to the authenticated subject in the root. | [
"Convert",
"the",
"flat",
"self",
"referential",
"lists",
"in",
"the",
"SubjectInfo",
"to",
"a",
"tree",
"structure",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/subject_info.py#L275-L318 | train | 45,384 |
DataONEorg/d1_python | lib_common/src/d1_common/cert/subject_info.py | _trim_tree | def _trim_tree(state):
"""Trim empty leaf nodes from the tree.
- To simplify the tree conversion, empty nodes are added before it is known if they
will contain items that connect back to the authenticated subject. If there are
no connections, the nodes remain empty, which causes them to be removed here.
- Removing a leaf node may cause the parent to become a new empty leaf node, so the
function is repeated until there are no more empty leaf nodes.
"""
for n in list(state.tree.leaf_node_gen):
if n.type_str == TYPE_NODE_TAG:
n.parent.child_list.remove(n)
return _trim_tree(state) | python | def _trim_tree(state):
"""Trim empty leaf nodes from the tree.
- To simplify the tree conversion, empty nodes are added before it is known if they
will contain items that connect back to the authenticated subject. If there are
no connections, the nodes remain empty, which causes them to be removed here.
- Removing a leaf node may cause the parent to become a new empty leaf node, so the
function is repeated until there are no more empty leaf nodes.
"""
for n in list(state.tree.leaf_node_gen):
if n.type_str == TYPE_NODE_TAG:
n.parent.child_list.remove(n)
return _trim_tree(state) | [
"def",
"_trim_tree",
"(",
"state",
")",
":",
"for",
"n",
"in",
"list",
"(",
"state",
".",
"tree",
".",
"leaf_node_gen",
")",
":",
"if",
"n",
".",
"type_str",
"==",
"TYPE_NODE_TAG",
":",
"n",
".",
"parent",
".",
"child_list",
".",
"remove",
"(",
"n",
... | Trim empty leaf nodes from the tree.
- To simplify the tree conversion, empty nodes are added before it is known if they
will contain items that connect back to the authenticated subject. If there are
no connections, the nodes remain empty, which causes them to be removed here.
- Removing a leaf node may cause the parent to become a new empty leaf node, so the
function is repeated until there are no more empty leaf nodes. | [
"Trim",
"empty",
"leaf",
"nodes",
"from",
"the",
"tree",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/subject_info.py#L378-L392 | train | 45,385 |
DataONEorg/d1_python | lib_common/src/d1_common/cert/subject_info.py | SubjectInfoNode.get_path_str | def get_path_str(self, sep=os.path.sep, type_str=None):
"""Get path from root to this node.
Args:
sep: str
One or more characters to insert between each element in the path.
Defaults to "/" on Unix and "\" on Windows.
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
str: String describing the path from the root to this node.
"""
return sep.join(
list(
reversed(
[
v.label_str
for v in self.parent_gen
if type_str in (None, v.type_str)
]
)
)
) | python | def get_path_str(self, sep=os.path.sep, type_str=None):
"""Get path from root to this node.
Args:
sep: str
One or more characters to insert between each element in the path.
Defaults to "/" on Unix and "\" on Windows.
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
str: String describing the path from the root to this node.
"""
return sep.join(
list(
reversed(
[
v.label_str
for v in self.parent_gen
if type_str in (None, v.type_str)
]
)
)
) | [
"def",
"get_path_str",
"(",
"self",
",",
"sep",
"=",
"os",
".",
"path",
".",
"sep",
",",
"type_str",
"=",
"None",
")",
":",
"return",
"sep",
".",
"join",
"(",
"list",
"(",
"reversed",
"(",
"[",
"v",
".",
"label_str",
"for",
"v",
"in",
"self",
"."... | Get path from root to this node.
Args:
sep: str
One or more characters to insert between each element in the path.
Defaults to "/" on Unix and "\" on Windows.
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
str: String describing the path from the root to this node. | [
"Get",
"path",
"from",
"root",
"to",
"this",
"node",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/subject_info.py#L452-L478 | train | 45,386 |
DataONEorg/d1_python | lib_common/src/d1_common/cert/subject_info.py | SubjectInfoNode.get_leaf_node_path_list | def get_leaf_node_path_list(self, sep=os.path.sep, type_str=None):
"""Get paths for all leaf nodes for the tree rooted at this node.
Args:
sep: str
One or more characters to insert between each element in the path.
Defaults to "/" on Unix and "\" on Windows.
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
list of str: The paths to the leaf nodes for the tree rooted at this node.
"""
return [v.get_path_str(sep, type_str) for v in self.leaf_node_gen] | python | def get_leaf_node_path_list(self, sep=os.path.sep, type_str=None):
"""Get paths for all leaf nodes for the tree rooted at this node.
Args:
sep: str
One or more characters to insert between each element in the path.
Defaults to "/" on Unix and "\" on Windows.
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
list of str: The paths to the leaf nodes for the tree rooted at this node.
"""
return [v.get_path_str(sep, type_str) for v in self.leaf_node_gen] | [
"def",
"get_leaf_node_path_list",
"(",
"self",
",",
"sep",
"=",
"os",
".",
"path",
".",
"sep",
",",
"type_str",
"=",
"None",
")",
":",
"return",
"[",
"v",
".",
"get_path_str",
"(",
"sep",
",",
"type_str",
")",
"for",
"v",
"in",
"self",
".",
"leaf_nod... | Get paths for all leaf nodes for the tree rooted at this node.
Args:
sep: str
One or more characters to insert between each element in the path.
Defaults to "/" on Unix and "\" on Windows.
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
list of str: The paths to the leaf nodes for the tree rooted at this node. | [
"Get",
"paths",
"for",
"all",
"leaf",
"nodes",
"for",
"the",
"tree",
"rooted",
"at",
"this",
"node",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/subject_info.py#L480-L496 | train | 45,387 |
DataONEorg/d1_python | lib_common/src/d1_common/cert/subject_info.py | SubjectInfoNode.get_path_list | def get_path_list(self, type_str=None):
"""Get list of the labels of the nodes leading up to this node from the root.
Args:
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
list of str: The labels of the nodes leading up to this node from the root.
"""
return list(
reversed(
[v.label_str for v in self.parent_gen if type_str in (None, v.type_str)]
)
) | python | def get_path_list(self, type_str=None):
"""Get list of the labels of the nodes leading up to this node from the root.
Args:
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
list of str: The labels of the nodes leading up to this node from the root.
"""
return list(
reversed(
[v.label_str for v in self.parent_gen if type_str in (None, v.type_str)]
)
) | [
"def",
"get_path_list",
"(",
"self",
",",
"type_str",
"=",
"None",
")",
":",
"return",
"list",
"(",
"reversed",
"(",
"[",
"v",
".",
"label_str",
"for",
"v",
"in",
"self",
".",
"parent_gen",
"if",
"type_str",
"in",
"(",
"None",
",",
"v",
".",
"type_st... | Get list of the labels of the nodes leading up to this node from the root.
Args:
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
list of str: The labels of the nodes leading up to this node from the root. | [
"Get",
"list",
"of",
"the",
"labels",
"of",
"the",
"nodes",
"leading",
"up",
"to",
"this",
"node",
"from",
"the",
"root",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/subject_info.py#L498-L514 | train | 45,388 |
DataONEorg/d1_python | lib_common/src/d1_common/cert/subject_info.py | SubjectInfoNode.get_label_set | def get_label_set(self, type_str=None):
"""Get a set of label_str for the tree rooted at this node.
Args:
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
set: The labels of the nodes leading up to this node from the root.
"""
return {v.label_str for v in self.node_gen if type_str in (None, v.type_str)} | python | def get_label_set(self, type_str=None):
"""Get a set of label_str for the tree rooted at this node.
Args:
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
set: The labels of the nodes leading up to this node from the root.
"""
return {v.label_str for v in self.node_gen if type_str in (None, v.type_str)} | [
"def",
"get_label_set",
"(",
"self",
",",
"type_str",
"=",
"None",
")",
":",
"return",
"{",
"v",
".",
"label_str",
"for",
"v",
"in",
"self",
".",
"node_gen",
"if",
"type_str",
"in",
"(",
"None",
",",
"v",
".",
"type_str",
")",
"}"
] | Get a set of label_str for the tree rooted at this node.
Args:
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
set: The labels of the nodes leading up to this node from the root. | [
"Get",
"a",
"set",
"of",
"label_str",
"for",
"the",
"tree",
"rooted",
"at",
"this",
"node",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/subject_info.py#L521-L533 | train | 45,389 |
DataONEorg/d1_python | lib_common/src/d1_common/utils/progress_logger.py | ProgressLogger.start_task_type | def start_task_type(self, task_type_str, total_task_count):
"""Call when about to start processing a new type of task, typically just before
entering a loop that processes many task of the given type.
Args:
task_type_str (str):
The name of the task, used as a dict key and printed in the progress
updates.
total_task_count (int):
The total number of the new type of task that will be processed.
This starts the timer that is used for providing an ETA for completing all tasks
of the given type.
The task type is included in progress updates until end_task_type() is called.
"""
assert (
task_type_str not in self._task_dict
), "Task type has already been started"
self._task_dict[task_type_str] = {
"start_time": time.time(),
"total_task_count": total_task_count,
"task_idx": 0,
} | python | def start_task_type(self, task_type_str, total_task_count):
"""Call when about to start processing a new type of task, typically just before
entering a loop that processes many task of the given type.
Args:
task_type_str (str):
The name of the task, used as a dict key and printed in the progress
updates.
total_task_count (int):
The total number of the new type of task that will be processed.
This starts the timer that is used for providing an ETA for completing all tasks
of the given type.
The task type is included in progress updates until end_task_type() is called.
"""
assert (
task_type_str not in self._task_dict
), "Task type has already been started"
self._task_dict[task_type_str] = {
"start_time": time.time(),
"total_task_count": total_task_count,
"task_idx": 0,
} | [
"def",
"start_task_type",
"(",
"self",
",",
"task_type_str",
",",
"total_task_count",
")",
":",
"assert",
"(",
"task_type_str",
"not",
"in",
"self",
".",
"_task_dict",
")",
",",
"\"Task type has already been started\"",
"self",
".",
"_task_dict",
"[",
"task_type_str... | Call when about to start processing a new type of task, typically just before
entering a loop that processes many task of the given type.
Args:
task_type_str (str):
The name of the task, used as a dict key and printed in the progress
updates.
total_task_count (int):
The total number of the new type of task that will be processed.
This starts the timer that is used for providing an ETA for completing all tasks
of the given type.
The task type is included in progress updates until end_task_type() is called. | [
"Call",
"when",
"about",
"to",
"start",
"processing",
"a",
"new",
"type",
"of",
"task",
"typically",
"just",
"before",
"entering",
"a",
"loop",
"that",
"processes",
"many",
"task",
"of",
"the",
"given",
"type",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/utils/progress_logger.py#L126-L151 | train | 45,390 |
DataONEorg/d1_python | lib_common/src/d1_common/utils/progress_logger.py | ProgressLogger.end_task_type | def end_task_type(self, task_type_str):
"""Call when processing of all tasks of the given type is completed, typically
just after exiting a loop that processes many tasks of the given type.
Progress messages logged at intervals will typically not include the final entry
which shows that processing is 100% complete, so a final progress message is
logged here.
"""
assert (
task_type_str in self._task_dict
), "Task type has not been started yet: {}".format(task_type_str)
self._log_progress()
del self._task_dict[task_type_str] | python | def end_task_type(self, task_type_str):
"""Call when processing of all tasks of the given type is completed, typically
just after exiting a loop that processes many tasks of the given type.
Progress messages logged at intervals will typically not include the final entry
which shows that processing is 100% complete, so a final progress message is
logged here.
"""
assert (
task_type_str in self._task_dict
), "Task type has not been started yet: {}".format(task_type_str)
self._log_progress()
del self._task_dict[task_type_str] | [
"def",
"end_task_type",
"(",
"self",
",",
"task_type_str",
")",
":",
"assert",
"(",
"task_type_str",
"in",
"self",
".",
"_task_dict",
")",
",",
"\"Task type has not been started yet: {}\"",
".",
"format",
"(",
"task_type_str",
")",
"self",
".",
"_log_progress",
"(... | Call when processing of all tasks of the given type is completed, typically
just after exiting a loop that processes many tasks of the given type.
Progress messages logged at intervals will typically not include the final entry
which shows that processing is 100% complete, so a final progress message is
logged here. | [
"Call",
"when",
"processing",
"of",
"all",
"tasks",
"of",
"the",
"given",
"type",
"is",
"completed",
"typically",
"just",
"after",
"exiting",
"a",
"loop",
"that",
"processes",
"many",
"tasks",
"of",
"the",
"given",
"type",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/utils/progress_logger.py#L155-L168 | train | 45,391 |
DataONEorg/d1_python | lib_common/src/d1_common/utils/progress_logger.py | ProgressLogger.start_task | def start_task(self, task_type_str, current_task_index=None):
"""Call when processing is about to start on a single task of the given task
type, typically at the top inside of the loop that processes the tasks.
Args:
task_type_str (str):
The name of the task, used as a dict key and printed in the progress
updates.
current_task_index (int):
If the task processing loop may skip or repeat tasks, the index of the
current task must be provided here. This parameter can normally be left
unset.
"""
assert (
task_type_str in self._task_dict
), "Task type has not been started yet: {}".format(task_type_str)
if current_task_index is not None:
self._task_dict[task_type_str]["task_idx"] = current_task_index
else:
self._task_dict[task_type_str]["task_idx"] += 1
self._log_progress_if_interval_elapsed() | python | def start_task(self, task_type_str, current_task_index=None):
"""Call when processing is about to start on a single task of the given task
type, typically at the top inside of the loop that processes the tasks.
Args:
task_type_str (str):
The name of the task, used as a dict key and printed in the progress
updates.
current_task_index (int):
If the task processing loop may skip or repeat tasks, the index of the
current task must be provided here. This parameter can normally be left
unset.
"""
assert (
task_type_str in self._task_dict
), "Task type has not been started yet: {}".format(task_type_str)
if current_task_index is not None:
self._task_dict[task_type_str]["task_idx"] = current_task_index
else:
self._task_dict[task_type_str]["task_idx"] += 1
self._log_progress_if_interval_elapsed() | [
"def",
"start_task",
"(",
"self",
",",
"task_type_str",
",",
"current_task_index",
"=",
"None",
")",
":",
"assert",
"(",
"task_type_str",
"in",
"self",
".",
"_task_dict",
")",
",",
"\"Task type has not been started yet: {}\"",
".",
"format",
"(",
"task_type_str",
... | Call when processing is about to start on a single task of the given task
type, typically at the top inside of the loop that processes the tasks.
Args:
task_type_str (str):
The name of the task, used as a dict key and printed in the progress
updates.
current_task_index (int):
If the task processing loop may skip or repeat tasks, the index of the
current task must be provided here. This parameter can normally be left
unset. | [
"Call",
"when",
"processing",
"is",
"about",
"to",
"start",
"on",
"a",
"single",
"task",
"of",
"the",
"given",
"task",
"type",
"typically",
"at",
"the",
"top",
"inside",
"of",
"the",
"loop",
"that",
"processes",
"the",
"tasks",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/utils/progress_logger.py#L171-L193 | train | 45,392 |
DataONEorg/d1_python | lib_common/src/d1_common/utils/progress_logger.py | ProgressLogger.event | def event(self, event_name):
"""Register an event that occurred during processing of a task of the given
type.
Args: event_name: str A name for a type of events. Events of the
same type are displayed as a single entry and a total count of
occurences.
"""
self._event_dict.setdefault(event_name, 0)
self._event_dict[event_name] += 1
self._log_progress_if_interval_elapsed() | python | def event(self, event_name):
"""Register an event that occurred during processing of a task of the given
type.
Args: event_name: str A name for a type of events. Events of the
same type are displayed as a single entry and a total count of
occurences.
"""
self._event_dict.setdefault(event_name, 0)
self._event_dict[event_name] += 1
self._log_progress_if_interval_elapsed() | [
"def",
"event",
"(",
"self",
",",
"event_name",
")",
":",
"self",
".",
"_event_dict",
".",
"setdefault",
"(",
"event_name",
",",
"0",
")",
"self",
".",
"_event_dict",
"[",
"event_name",
"]",
"+=",
"1",
"self",
".",
"_log_progress_if_interval_elapsed",
"(",
... | Register an event that occurred during processing of a task of the given
type.
Args: event_name: str A name for a type of events. Events of the
same type are displayed as a single entry and a total count of
occurences. | [
"Register",
"an",
"event",
"that",
"occurred",
"during",
"processing",
"of",
"a",
"task",
"of",
"the",
"given",
"type",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/utils/progress_logger.py#L195-L206 | train | 45,393 |
wilson-eft/wilson | wilson/run/wet/rge.py | admeig | def admeig(classname, f, m_u, m_d, m_s, m_c, m_b, m_e, m_mu, m_tau):
"""Compute the eigenvalues and eigenvectors for a QCD anomalous dimension
matrix that is defined in `adm.adm_s_X` where X is the name of the sector.
Supports memoization. Output analogous to `np.linalg.eig`."""
args = f, m_u, m_d, m_s, m_c, m_b, m_e, m_mu, m_tau
A = getattr(adm, 'adm_s_' + classname)(*args)
perm_keys = get_permissible_wcs(classname, f)
if perm_keys != 'all':
# remove disallowed rows & columns if necessary
A = A[perm_keys][:, perm_keys]
w, v = np.linalg.eig(A.T)
return w, v | python | def admeig(classname, f, m_u, m_d, m_s, m_c, m_b, m_e, m_mu, m_tau):
"""Compute the eigenvalues and eigenvectors for a QCD anomalous dimension
matrix that is defined in `adm.adm_s_X` where X is the name of the sector.
Supports memoization. Output analogous to `np.linalg.eig`."""
args = f, m_u, m_d, m_s, m_c, m_b, m_e, m_mu, m_tau
A = getattr(adm, 'adm_s_' + classname)(*args)
perm_keys = get_permissible_wcs(classname, f)
if perm_keys != 'all':
# remove disallowed rows & columns if necessary
A = A[perm_keys][:, perm_keys]
w, v = np.linalg.eig(A.T)
return w, v | [
"def",
"admeig",
"(",
"classname",
",",
"f",
",",
"m_u",
",",
"m_d",
",",
"m_s",
",",
"m_c",
",",
"m_b",
",",
"m_e",
",",
"m_mu",
",",
"m_tau",
")",
":",
"args",
"=",
"f",
",",
"m_u",
",",
"m_d",
",",
"m_s",
",",
"m_c",
",",
"m_b",
",",
"m_... | Compute the eigenvalues and eigenvectors for a QCD anomalous dimension
matrix that is defined in `adm.adm_s_X` where X is the name of the sector.
Supports memoization. Output analogous to `np.linalg.eig`. | [
"Compute",
"the",
"eigenvalues",
"and",
"eigenvectors",
"for",
"a",
"QCD",
"anomalous",
"dimension",
"matrix",
"that",
"is",
"defined",
"in",
"adm",
".",
"adm_s_X",
"where",
"X",
"is",
"the",
"name",
"of",
"the",
"sector",
"."
] | 4164f55ff663d4f668c6e2b4575fd41562662cc9 | https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/run/wet/rge.py#L38-L50 | train | 45,394 |
wilson-eft/wilson | wilson/run/wet/rge.py | run_sector | def run_sector(sector, C_in, eta_s, f, p_in, p_out, qed_order=1, qcd_order=1):
r"""Solve the WET RGE for a specific sector.
Parameters:
- sector: sector of interest
- C_in: dictionary of Wilson coefficients
- eta_s: ratio of $\alpha_s$ at input and output scale
- f: number of active quark flavours
- p_in: running parameters at the input scale
- p_out: running parameters at the output scale
"""
Cdictout = OrderedDict()
classname = sectors[sector]
keylist = coeffs[sector]
if sector == 'dF=0':
perm_keys = get_permissible_wcs('dF0', f)
else:
perm_keys = get_permissible_wcs(sector, f)
if perm_keys != 'all':
# remove disallowed keys if necessary
keylist = np.asarray(keylist)[perm_keys]
C_input = np.array([C_in.get(key, 0) for key in keylist])
if np.count_nonzero(C_input) == 0 or classname == 'inv':
# nothing to do for SM-like WCs or RG invariant operators
C_result = C_input
else:
C_scaled = np.asarray([C_input[i] * scale_C(key, p_in) for i, key in enumerate(keylist)])
if qcd_order == 0:
Us = np.eye(len(C_scaled))
elif qcd_order == 1:
Us = getUs(classname, eta_s, f, **p_in)
if qed_order == 0:
Ue = np.zeros(C_scaled.shape)
elif qed_order == 1:
if qcd_order == 0:
Ue = getUe(classname, 1, f, **p_in)
else:
Ue = getUe(classname, eta_s, f, **p_in)
C_out = (Us + Ue) @ C_scaled
C_result = [C_out[i] / scale_C(key, p_out) for i, key in enumerate(keylist)]
for j in range(len(C_result)):
Cdictout[keylist[j]] = C_result[j]
return Cdictout | python | def run_sector(sector, C_in, eta_s, f, p_in, p_out, qed_order=1, qcd_order=1):
r"""Solve the WET RGE for a specific sector.
Parameters:
- sector: sector of interest
- C_in: dictionary of Wilson coefficients
- eta_s: ratio of $\alpha_s$ at input and output scale
- f: number of active quark flavours
- p_in: running parameters at the input scale
- p_out: running parameters at the output scale
"""
Cdictout = OrderedDict()
classname = sectors[sector]
keylist = coeffs[sector]
if sector == 'dF=0':
perm_keys = get_permissible_wcs('dF0', f)
else:
perm_keys = get_permissible_wcs(sector, f)
if perm_keys != 'all':
# remove disallowed keys if necessary
keylist = np.asarray(keylist)[perm_keys]
C_input = np.array([C_in.get(key, 0) for key in keylist])
if np.count_nonzero(C_input) == 0 or classname == 'inv':
# nothing to do for SM-like WCs or RG invariant operators
C_result = C_input
else:
C_scaled = np.asarray([C_input[i] * scale_C(key, p_in) for i, key in enumerate(keylist)])
if qcd_order == 0:
Us = np.eye(len(C_scaled))
elif qcd_order == 1:
Us = getUs(classname, eta_s, f, **p_in)
if qed_order == 0:
Ue = np.zeros(C_scaled.shape)
elif qed_order == 1:
if qcd_order == 0:
Ue = getUe(classname, 1, f, **p_in)
else:
Ue = getUe(classname, eta_s, f, **p_in)
C_out = (Us + Ue) @ C_scaled
C_result = [C_out[i] / scale_C(key, p_out) for i, key in enumerate(keylist)]
for j in range(len(C_result)):
Cdictout[keylist[j]] = C_result[j]
return Cdictout | [
"def",
"run_sector",
"(",
"sector",
",",
"C_in",
",",
"eta_s",
",",
"f",
",",
"p_in",
",",
"p_out",
",",
"qed_order",
"=",
"1",
",",
"qcd_order",
"=",
"1",
")",
":",
"Cdictout",
"=",
"OrderedDict",
"(",
")",
"classname",
"=",
"sectors",
"[",
"sector"... | r"""Solve the WET RGE for a specific sector.
Parameters:
- sector: sector of interest
- C_in: dictionary of Wilson coefficients
- eta_s: ratio of $\alpha_s$ at input and output scale
- f: number of active quark flavours
- p_in: running parameters at the input scale
- p_out: running parameters at the output scale | [
"r",
"Solve",
"the",
"WET",
"RGE",
"for",
"a",
"specific",
"sector",
"."
] | 4164f55ff663d4f668c6e2b4575fd41562662cc9 | https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/run/wet/rge.py#L119-L162 | train | 45,395 |
DataONEorg/d1_python | client_onedrive/src/d1_onedrive/impl/resolver/region.py | Resolver._merge_region_trees | def _merge_region_trees(self, dst_tree, src_tree, pid):
"""Merge conflicts occur if a folder in one tree is a file in the other.
As the files are PIDs, this can only happen if a PID matches one of the
geographical areas that the dataset covers and should be very rare. In such
conflicts, the destination wins.
"""
for k, v in list(src_tree.items()):
# Prepend an underscore to the administrative area names, to make them
# sort separately from the identifiers.
# k = '_' + k
if k not in dst_tree or dst_tree[k] is None:
dst_tree[k] = {}
dst_tree[k][pid] = None
if v is not None:
self._merge_region_trees(dst_tree[k], v, pid) | python | def _merge_region_trees(self, dst_tree, src_tree, pid):
"""Merge conflicts occur if a folder in one tree is a file in the other.
As the files are PIDs, this can only happen if a PID matches one of the
geographical areas that the dataset covers and should be very rare. In such
conflicts, the destination wins.
"""
for k, v in list(src_tree.items()):
# Prepend an underscore to the administrative area names, to make them
# sort separately from the identifiers.
# k = '_' + k
if k not in dst_tree or dst_tree[k] is None:
dst_tree[k] = {}
dst_tree[k][pid] = None
if v is not None:
self._merge_region_trees(dst_tree[k], v, pid) | [
"def",
"_merge_region_trees",
"(",
"self",
",",
"dst_tree",
",",
"src_tree",
",",
"pid",
")",
":",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"src_tree",
".",
"items",
"(",
")",
")",
":",
"# Prepend an underscore to the administrative area names, to make them",
"#... | Merge conflicts occur if a folder in one tree is a file in the other.
As the files are PIDs, this can only happen if a PID matches one of the
geographical areas that the dataset covers and should be very rare. In such
conflicts, the destination wins. | [
"Merge",
"conflicts",
"occur",
"if",
"a",
"folder",
"in",
"one",
"tree",
"is",
"a",
"file",
"in",
"the",
"other",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/resolver/region.py#L219-L235 | train | 45,396 |
genialis/resolwe | resolwe/flow/serializers/base.py | ResolweBaseSerializer.save | def save(self, **kwargs):
"""Override save method to catch handled errors and repackage them as 400 errors."""
try:
return super().save(**kwargs)
except SlugError as error:
raise ParseError(error) | python | def save(self, **kwargs):
"""Override save method to catch handled errors and repackage them as 400 errors."""
try:
return super().save(**kwargs)
except SlugError as error:
raise ParseError(error) | [
"def",
"save",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"super",
"(",
")",
".",
"save",
"(",
"*",
"*",
"kwargs",
")",
"except",
"SlugError",
"as",
"error",
":",
"raise",
"ParseError",
"(",
"error",
")"
] | Override save method to catch handled errors and repackage them as 400 errors. | [
"Override",
"save",
"method",
"to",
"catch",
"handled",
"errors",
"and",
"repackage",
"them",
"as",
"400",
"errors",
"."
] | f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86 | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/serializers/base.py#L63-L68 | train | 45,397 |
DataONEorg/d1_python | lib_client/src/d1_client/d1client.py | get_api_major_by_base_url | def get_api_major_by_base_url(base_url, *client_arg_list, **client_arg_dict):
"""Read the Node document from a node and return an int containing the latest D1 API
version supported by the node.
The Node document can always be reached through the v1 API and will list services
for v1 and any later APIs versions supported by the node.
"""
api_major = 0
client = d1_client.mnclient.MemberNodeClient(
base_url, *client_arg_list, **client_arg_dict
)
node_pyxb = client.getCapabilities()
for service_pyxb in node_pyxb.services.service:
if service_pyxb.available:
api_major = max(api_major, int(service_pyxb.version[-1]))
return api_major | python | def get_api_major_by_base_url(base_url, *client_arg_list, **client_arg_dict):
"""Read the Node document from a node and return an int containing the latest D1 API
version supported by the node.
The Node document can always be reached through the v1 API and will list services
for v1 and any later APIs versions supported by the node.
"""
api_major = 0
client = d1_client.mnclient.MemberNodeClient(
base_url, *client_arg_list, **client_arg_dict
)
node_pyxb = client.getCapabilities()
for service_pyxb in node_pyxb.services.service:
if service_pyxb.available:
api_major = max(api_major, int(service_pyxb.version[-1]))
return api_major | [
"def",
"get_api_major_by_base_url",
"(",
"base_url",
",",
"*",
"client_arg_list",
",",
"*",
"*",
"client_arg_dict",
")",
":",
"api_major",
"=",
"0",
"client",
"=",
"d1_client",
".",
"mnclient",
".",
"MemberNodeClient",
"(",
"base_url",
",",
"*",
"client_arg_list... | Read the Node document from a node and return an int containing the latest D1 API
version supported by the node.
The Node document can always be reached through the v1 API and will list services
for v1 and any later APIs versions supported by the node. | [
"Read",
"the",
"Node",
"document",
"from",
"a",
"node",
"and",
"return",
"an",
"int",
"containing",
"the",
"latest",
"D1",
"API",
"version",
"supported",
"by",
"the",
"node",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_client/src/d1_client/d1client.py#L39-L55 | train | 45,398 |
DataONEorg/d1_python | gmn/src/d1_gmn/app/model_util.py | delete_unused_subjects | def delete_unused_subjects():
"""Delete any unused subjects from the database.
This is not strictly required as any unused subjects will automatically be reused if
needed in the future.
"""
# This causes Django to create a single join (check with query.query)
query = d1_gmn.app.models.Subject.objects.all()
query = query.filter(scienceobject_submitter__isnull=True)
query = query.filter(scienceobject_rights_holder__isnull=True)
query = query.filter(eventlog__isnull=True)
query = query.filter(permission__isnull=True)
query = query.filter(whitelistforcreateupdatedelete__isnull=True)
logger.debug('Deleting {} unused subjects:'.format(query.count()))
for s in query.all():
logging.debug(' {}'.format(s.subject))
query.delete() | python | def delete_unused_subjects():
"""Delete any unused subjects from the database.
This is not strictly required as any unused subjects will automatically be reused if
needed in the future.
"""
# This causes Django to create a single join (check with query.query)
query = d1_gmn.app.models.Subject.objects.all()
query = query.filter(scienceobject_submitter__isnull=True)
query = query.filter(scienceobject_rights_holder__isnull=True)
query = query.filter(eventlog__isnull=True)
query = query.filter(permission__isnull=True)
query = query.filter(whitelistforcreateupdatedelete__isnull=True)
logger.debug('Deleting {} unused subjects:'.format(query.count()))
for s in query.all():
logging.debug(' {}'.format(s.subject))
query.delete() | [
"def",
"delete_unused_subjects",
"(",
")",
":",
"# This causes Django to create a single join (check with query.query)",
"query",
"=",
"d1_gmn",
".",
"app",
".",
"models",
".",
"Subject",
".",
"objects",
".",
"all",
"(",
")",
"query",
"=",
"query",
".",
"filter",
... | Delete any unused subjects from the database.
This is not strictly required as any unused subjects will automatically be reused if
needed in the future. | [
"Delete",
"any",
"unused",
"subjects",
"from",
"the",
"database",
"."
] | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/model_util.py#L46-L65 | train | 45,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.