repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
genialis/resolwe
resolwe/permissions/shortcuts.py
get_object_perms
def get_object_perms(obj, user=None): """Return permissions for given object in Resolwe specific format. Function returns permissions for given object ``obj`` in following format:: { "type": "group"/"user"/"public", "id": <group_or_user_id>, "name": <group_or_user_name>, "permissions": [<first_permission>, <second_permission>,...] } For ``public`` type ``id`` and ``name`` keys are omitted. If ``user`` parameter is given, permissions are limited only to given user, groups he belongs to and public permissions. :param obj: Resolwe's DB model's instance :type obj: a subclass of :class:`~resolwe.flow.models.base.BaseModel` :param user: Django user :type user: :class:`~django.contrib.auth.models.User` or :data:`None` :return: list of permissions object in described format :rtype: list """ def format_permissions(perms): """Remove model name from permission.""" ctype = ContentType.objects.get_for_model(obj) return [perm.replace('_{}'.format(ctype.name), '') for perm in perms] perms_list = [] if user: if user.is_authenticated: user_perms, group_perms = get_user_group_perms(user, obj) else: user_perms, group_perms = [], [] if user_perms != []: perms_list.append({ 'type': 'user', 'id': user.pk, 'name': user.get_full_name() or user.username, 'permissions': format_permissions(user_perms), }) if group_perms != []: for group_id, group_name, perms in group_perms: perms_list.append({ 'type': 'group', 'id': group_id, 'name': group_name, 'permissions': format_permissions(perms), }) else: user_options = { 'attach_perms': True, 'with_group_users': False } for user, perms in get_users_with_perms(obj, **user_options).items(): if user.username == settings.ANONYMOUS_USER_NAME: # public user is treated separately continue perms_list.append({ 'type': 'user', 'id': user.pk, 'name': user.get_full_name() or user.username, 'permissions': format_permissions(perms), }) group_options = { 'attach_perms': True, } for group, perms in get_groups_with_perms(obj, **group_options).items(): perms_list.append({ 'type': 'group', 'id': group.pk, 'name': group.name, 'permissions': format_permissions(perms), }) public_perms = get_perms(AnonymousUser(), obj) if public_perms != []: perms_list.append({ 'type': 'public', 'permissions': format_permissions(public_perms), }) return perms_list
python
def get_object_perms(obj, user=None): """Return permissions for given object in Resolwe specific format. Function returns permissions for given object ``obj`` in following format:: { "type": "group"/"user"/"public", "id": <group_or_user_id>, "name": <group_or_user_name>, "permissions": [<first_permission>, <second_permission>,...] } For ``public`` type ``id`` and ``name`` keys are omitted. If ``user`` parameter is given, permissions are limited only to given user, groups he belongs to and public permissions. :param obj: Resolwe's DB model's instance :type obj: a subclass of :class:`~resolwe.flow.models.base.BaseModel` :param user: Django user :type user: :class:`~django.contrib.auth.models.User` or :data:`None` :return: list of permissions object in described format :rtype: list """ def format_permissions(perms): """Remove model name from permission.""" ctype = ContentType.objects.get_for_model(obj) return [perm.replace('_{}'.format(ctype.name), '') for perm in perms] perms_list = [] if user: if user.is_authenticated: user_perms, group_perms = get_user_group_perms(user, obj) else: user_perms, group_perms = [], [] if user_perms != []: perms_list.append({ 'type': 'user', 'id': user.pk, 'name': user.get_full_name() or user.username, 'permissions': format_permissions(user_perms), }) if group_perms != []: for group_id, group_name, perms in group_perms: perms_list.append({ 'type': 'group', 'id': group_id, 'name': group_name, 'permissions': format_permissions(perms), }) else: user_options = { 'attach_perms': True, 'with_group_users': False } for user, perms in get_users_with_perms(obj, **user_options).items(): if user.username == settings.ANONYMOUS_USER_NAME: # public user is treated separately continue perms_list.append({ 'type': 'user', 'id': user.pk, 'name': user.get_full_name() or user.username, 'permissions': format_permissions(perms), }) group_options = { 'attach_perms': True, } for group, perms in get_groups_with_perms(obj, **group_options).items(): perms_list.append({ 'type': 'group', 'id': group.pk, 'name': group.name, 'permissions': format_permissions(perms), }) public_perms = get_perms(AnonymousUser(), obj) if public_perms != []: perms_list.append({ 'type': 'public', 'permissions': format_permissions(public_perms), }) return perms_list
[ "def", "get_object_perms", "(", "obj", ",", "user", "=", "None", ")", ":", "def", "format_permissions", "(", "perms", ")", ":", "\"\"\"Remove model name from permission.\"\"\"", "ctype", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "obj", ")", ...
Return permissions for given object in Resolwe specific format. Function returns permissions for given object ``obj`` in following format:: { "type": "group"/"user"/"public", "id": <group_or_user_id>, "name": <group_or_user_name>, "permissions": [<first_permission>, <second_permission>,...] } For ``public`` type ``id`` and ``name`` keys are omitted. If ``user`` parameter is given, permissions are limited only to given user, groups he belongs to and public permissions. :param obj: Resolwe's DB model's instance :type obj: a subclass of :class:`~resolwe.flow.models.base.BaseModel` :param user: Django user :type user: :class:`~django.contrib.auth.models.User` or :data:`None` :return: list of permissions object in described format :rtype: list
[ "Return", "permissions", "for", "given", "object", "in", "Resolwe", "specific", "format", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/permissions/shortcuts.py#L146-L235
train
45,000
genialis/resolwe
resolwe/permissions/shortcuts.py
get_users_with_permission
def get_users_with_permission(obj, permission): """Return users with specific permission on object. :param obj: Object to return users for :param permission: Permission codename """ user_model = get_user_model() return user_model.objects.filter( userobjectpermission__object_pk=obj.pk, userobjectpermission__permission__codename=permission, ).distinct()
python
def get_users_with_permission(obj, permission): """Return users with specific permission on object. :param obj: Object to return users for :param permission: Permission codename """ user_model = get_user_model() return user_model.objects.filter( userobjectpermission__object_pk=obj.pk, userobjectpermission__permission__codename=permission, ).distinct()
[ "def", "get_users_with_permission", "(", "obj", ",", "permission", ")", ":", "user_model", "=", "get_user_model", "(", ")", "return", "user_model", ".", "objects", ".", "filter", "(", "userobjectpermission__object_pk", "=", "obj", ".", "pk", ",", "userobjectpermis...
Return users with specific permission on object. :param obj: Object to return users for :param permission: Permission codename
[ "Return", "users", "with", "specific", "permission", "on", "object", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/permissions/shortcuts.py#L397-L407
train
45,001
genialis/resolwe
resolwe/flow/consumers.py
PurgeConsumer.purge_run
def purge_run(self, event): """Run purge for the object with ``location_id`` specified in ``event`` argument.""" location_id = event['location_id'] verbosity = event['verbosity'] try: logger.info(__("Running purge for location id {}.", location_id)) location_purge(location_id=location_id, delete=True, verbosity=verbosity) except Exception: # pylint: disable=broad-except logger.exception("Error while purging location.", extra={'location_id': location_id})
python
def purge_run(self, event): """Run purge for the object with ``location_id`` specified in ``event`` argument.""" location_id = event['location_id'] verbosity = event['verbosity'] try: logger.info(__("Running purge for location id {}.", location_id)) location_purge(location_id=location_id, delete=True, verbosity=verbosity) except Exception: # pylint: disable=broad-except logger.exception("Error while purging location.", extra={'location_id': location_id})
[ "def", "purge_run", "(", "self", ",", "event", ")", ":", "location_id", "=", "event", "[", "'location_id'", "]", "verbosity", "=", "event", "[", "'verbosity'", "]", "try", ":", "logger", ".", "info", "(", "__", "(", "\"Running purge for location id {}.\"", "...
Run purge for the object with ``location_id`` specified in ``event`` argument.
[ "Run", "purge", "for", "the", "object", "with", "location_id", "specified", "in", "event", "argument", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/consumers.py#L15-L24
train
45,002
genialis/resolwe
resolwe/flow/models/process.py
Process.get_resource_limits
def get_resource_limits(self): """Get the core count and memory usage limits for this process. :return: A dictionary with the resource limits, containing the following keys: - ``memory``: Memory usage limit, in MB. Defaults to 4096 if not otherwise specified in the resource requirements. - ``cores``: Core count limit. Defaults to 1. :rtype: dict """ # Get limit defaults and overrides. limit_defaults = getattr(settings, 'FLOW_PROCESS_RESOURCE_DEFAULTS', {}) limit_overrides = getattr(settings, 'FLOW_PROCESS_RESOURCE_OVERRIDES', {}) limits = {} resources = self.requirements.get('resources', {}) # pylint: disable=no-member limits['cores'] = int(resources.get('cores', 1)) max_cores = getattr(settings, 'FLOW_PROCESS_MAX_CORES', None) if max_cores: limits['cores'] = min(limits['cores'], max_cores) memory = limit_overrides.get('memory', {}).get(self.slug, None) if memory is None: memory = int(resources.get( 'memory', # If no memory resource is configured, check settings. limit_defaults.get('memory', 4096) )) limits['memory'] = memory return limits
python
def get_resource_limits(self): """Get the core count and memory usage limits for this process. :return: A dictionary with the resource limits, containing the following keys: - ``memory``: Memory usage limit, in MB. Defaults to 4096 if not otherwise specified in the resource requirements. - ``cores``: Core count limit. Defaults to 1. :rtype: dict """ # Get limit defaults and overrides. limit_defaults = getattr(settings, 'FLOW_PROCESS_RESOURCE_DEFAULTS', {}) limit_overrides = getattr(settings, 'FLOW_PROCESS_RESOURCE_OVERRIDES', {}) limits = {} resources = self.requirements.get('resources', {}) # pylint: disable=no-member limits['cores'] = int(resources.get('cores', 1)) max_cores = getattr(settings, 'FLOW_PROCESS_MAX_CORES', None) if max_cores: limits['cores'] = min(limits['cores'], max_cores) memory = limit_overrides.get('memory', {}).get(self.slug, None) if memory is None: memory = int(resources.get( 'memory', # If no memory resource is configured, check settings. limit_defaults.get('memory', 4096) )) limits['memory'] = memory return limits
[ "def", "get_resource_limits", "(", "self", ")", ":", "# Get limit defaults and overrides.", "limit_defaults", "=", "getattr", "(", "settings", ",", "'FLOW_PROCESS_RESOURCE_DEFAULTS'", ",", "{", "}", ")", "limit_overrides", "=", "getattr", "(", "settings", ",", "'FLOW_...
Get the core count and memory usage limits for this process. :return: A dictionary with the resource limits, containing the following keys: - ``memory``: Memory usage limit, in MB. Defaults to 4096 if not otherwise specified in the resource requirements. - ``cores``: Core count limit. Defaults to 1. :rtype: dict
[ "Get", "the", "core", "count", "and", "memory", "usage", "limits", "for", "this", "process", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/process.py#L167-L202
train
45,003
genialis/resolwe
resolwe/flow/filters.py
CheckQueryParamsMixin.validate_query_params
def validate_query_params(self): """Ensure no unsupported query params were used.""" allowed_params = set(self.get_filters().keys()) allowed_params.update(self.get_always_allowed_arguments()) unallowed = set(self.request.query_params.keys()) - allowed_params if unallowed: msg = 'Unsupported parameter(s): {}. Please use a combination of: {}.'.format( ', '.join(unallowed), ', '.join(allowed_params), ) self.form.add_error(field=None, error=ParseError(msg))
python
def validate_query_params(self): """Ensure no unsupported query params were used.""" allowed_params = set(self.get_filters().keys()) allowed_params.update(self.get_always_allowed_arguments()) unallowed = set(self.request.query_params.keys()) - allowed_params if unallowed: msg = 'Unsupported parameter(s): {}. Please use a combination of: {}.'.format( ', '.join(unallowed), ', '.join(allowed_params), ) self.form.add_error(field=None, error=ParseError(msg))
[ "def", "validate_query_params", "(", "self", ")", ":", "allowed_params", "=", "set", "(", "self", ".", "get_filters", "(", ")", ".", "keys", "(", ")", ")", "allowed_params", ".", "update", "(", "self", ".", "get_always_allowed_arguments", "(", ")", ")", "u...
Ensure no unsupported query params were used.
[ "Ensure", "no", "unsupported", "query", "params", "were", "used", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/filters.py#L59-L71
train
45,004
wilson-eft/wilson
wilson/classes.py
Wilson.from_wc
def from_wc(cls, wc): """Return a `Wilson` instance initialized by a `wcxf.WC` instance""" return cls(wcdict=wc.dict, scale=wc.scale, eft=wc.eft, basis=wc.basis)
python
def from_wc(cls, wc): """Return a `Wilson` instance initialized by a `wcxf.WC` instance""" return cls(wcdict=wc.dict, scale=wc.scale, eft=wc.eft, basis=wc.basis)
[ "def", "from_wc", "(", "cls", ",", "wc", ")", ":", "return", "cls", "(", "wcdict", "=", "wc", ".", "dict", ",", "scale", "=", "wc", ".", "scale", ",", "eft", "=", "wc", ".", "eft", ",", "basis", "=", "wc", ".", "basis", ")" ]
Return a `Wilson` instance initialized by a `wcxf.WC` instance
[ "Return", "a", "Wilson", "instance", "initialized", "by", "a", "wcxf", ".", "WC", "instance" ]
4164f55ff663d4f668c6e2b4575fd41562662cc9
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/classes.py#L132-L134
train
45,005
wilson-eft/wilson
wilson/classes.py
Wilson.load_wc
def load_wc(cls, stream): """Return a `Wilson` instance initialized by a WCxf file-like object""" wc = wcxf.WC.load(stream) return cls.from_wc(wc)
python
def load_wc(cls, stream): """Return a `Wilson` instance initialized by a WCxf file-like object""" wc = wcxf.WC.load(stream) return cls.from_wc(wc)
[ "def", "load_wc", "(", "cls", ",", "stream", ")", ":", "wc", "=", "wcxf", ".", "WC", ".", "load", "(", "stream", ")", "return", "cls", ".", "from_wc", "(", "wc", ")" ]
Return a `Wilson` instance initialized by a WCxf file-like object
[ "Return", "a", "Wilson", "instance", "initialized", "by", "a", "WCxf", "file", "-", "like", "object" ]
4164f55ff663d4f668c6e2b4575fd41562662cc9
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/classes.py#L137-L140
train
45,006
wilson-eft/wilson
wilson/classes.py
Wilson._get_from_cache
def _get_from_cache(self, sector, scale, eft, basis): """Try to load a set of Wilson coefficients from the cache, else return None.""" try: return self._cache[eft][scale][basis][sector] except KeyError: return None
python
def _get_from_cache(self, sector, scale, eft, basis): """Try to load a set of Wilson coefficients from the cache, else return None.""" try: return self._cache[eft][scale][basis][sector] except KeyError: return None
[ "def", "_get_from_cache", "(", "self", ",", "sector", ",", "scale", ",", "eft", ",", "basis", ")", ":", "try", ":", "return", "self", ".", "_cache", "[", "eft", "]", "[", "scale", "]", "[", "basis", "]", "[", "sector", "]", "except", "KeyError", ":...
Try to load a set of Wilson coefficients from the cache, else return None.
[ "Try", "to", "load", "a", "set", "of", "Wilson", "coefficients", "from", "the", "cache", "else", "return", "None", "." ]
4164f55ff663d4f668c6e2b4575fd41562662cc9
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/classes.py#L245-L251
train
45,007
wilson-eft/wilson
wilson/classes.py
RGsolution.plotdata
def plotdata(self, key, part='re', scale='log', steps=50): """Return a tuple of arrays x, y that can be fed to plt.plot, where x is the scale in GeV and y is the parameter of interest. Parameters: - key: dicionary key of the parameter to be plotted (e.g. a WCxf coefficient name or a SM parameter like 'g') - part: plot the real part 're' (default) or the imaginary part 'im' - scale: 'log'; make the x steps logarithmically distributed; for 'linear', linearly distributed - steps: steps in x to take (default: 50) """ if scale == 'log': x = np.logspace(log(self.scale_min), log(self.scale_max), steps, base=e) elif scale == 'linear': x = np.linspace(self.scale_min, self.scale_max, steps) y = self.fun(x) y = np.array([d[key] for d in y]) if part == 're': return x, y.real elif part == 'im': return x, y.imag
python
def plotdata(self, key, part='re', scale='log', steps=50): """Return a tuple of arrays x, y that can be fed to plt.plot, where x is the scale in GeV and y is the parameter of interest. Parameters: - key: dicionary key of the parameter to be plotted (e.g. a WCxf coefficient name or a SM parameter like 'g') - part: plot the real part 're' (default) or the imaginary part 'im' - scale: 'log'; make the x steps logarithmically distributed; for 'linear', linearly distributed - steps: steps in x to take (default: 50) """ if scale == 'log': x = np.logspace(log(self.scale_min), log(self.scale_max), steps, base=e) elif scale == 'linear': x = np.linspace(self.scale_min, self.scale_max, steps) y = self.fun(x) y = np.array([d[key] for d in y]) if part == 're': return x, y.real elif part == 'im': return x, y.imag
[ "def", "plotdata", "(", "self", ",", "key", ",", "part", "=", "'re'", ",", "scale", "=", "'log'", ",", "steps", "=", "50", ")", ":", "if", "scale", "==", "'log'", ":", "x", "=", "np", ".", "logspace", "(", "log", "(", "self", ".", "scale_min", ...
Return a tuple of arrays x, y that can be fed to plt.plot, where x is the scale in GeV and y is the parameter of interest. Parameters: - key: dicionary key of the parameter to be plotted (e.g. a WCxf coefficient name or a SM parameter like 'g') - part: plot the real part 're' (default) or the imaginary part 'im' - scale: 'log'; make the x steps logarithmically distributed; for 'linear', linearly distributed - steps: steps in x to take (default: 50)
[ "Return", "a", "tuple", "of", "arrays", "x", "y", "that", "can", "be", "fed", "to", "plt", ".", "plot", "where", "x", "is", "the", "scale", "in", "GeV", "and", "y", "is", "the", "parameter", "of", "interest", "." ]
4164f55ff663d4f668c6e2b4575fd41562662cc9
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/classes.py#L281-L308
train
45,008
wilson-eft/wilson
wilson/classes.py
RGsolution.plot
def plot(self, key, part='re', scale='log', steps=50, legend=True, plotargs={}): """Plot the RG evolution of parameter `key`. Parameters: - part, scale, steps: see `plotdata` - legend: boolean, show the legend (default: True) - plotargs: dictionary of arguments to be passed to plt.plot """ try: import matplotlib.pyplot as plt except ImportError: raise ImportError("Please install matplotlib if you want to use the plot method") pdat = self.plotdata(key, part=part, scale=scale, steps=steps) plt.plot(*pdat, label=key, **plotargs) if scale == 'log': plt.xscale('log') if legend: plt.legend()
python
def plot(self, key, part='re', scale='log', steps=50, legend=True, plotargs={}): """Plot the RG evolution of parameter `key`. Parameters: - part, scale, steps: see `plotdata` - legend: boolean, show the legend (default: True) - plotargs: dictionary of arguments to be passed to plt.plot """ try: import matplotlib.pyplot as plt except ImportError: raise ImportError("Please install matplotlib if you want to use the plot method") pdat = self.plotdata(key, part=part, scale=scale, steps=steps) plt.plot(*pdat, label=key, **plotargs) if scale == 'log': plt.xscale('log') if legend: plt.legend()
[ "def", "plot", "(", "self", ",", "key", ",", "part", "=", "'re'", ",", "scale", "=", "'log'", ",", "steps", "=", "50", ",", "legend", "=", "True", ",", "plotargs", "=", "{", "}", ")", ":", "try", ":", "import", "matplotlib", ".", "pyplot", "as", ...
Plot the RG evolution of parameter `key`. Parameters: - part, scale, steps: see `plotdata` - legend: boolean, show the legend (default: True) - plotargs: dictionary of arguments to be passed to plt.plot
[ "Plot", "the", "RG", "evolution", "of", "parameter", "key", "." ]
4164f55ff663d4f668c6e2b4575fd41562662cc9
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/classes.py#L310-L328
train
45,009
genialis/resolwe
resolwe/flow/managers/consumer.py
run_consumer
async def run_consumer(timeout=None, dry_run=False): """Run the consumer until it finishes processing. :param timeout: Set maximum execution time before cancellation, or ``None`` (default) for unlimited. :param dry_run: If ``True``, don't actually dispatch messages, just dequeue them. Defaults to ``False``. """ channel = state.MANAGER_CONTROL_CHANNEL scope = { 'type': 'control_event', 'channel': channel, } app = ApplicationCommunicator(ManagerConsumer, scope) channel_layer = get_channel_layer() async def _consume_loop(): """Run a loop to consume messages off the channels layer.""" while True: message = await channel_layer.receive(channel) if dry_run: continue if message.get('type', {}) == '_resolwe_manager_quit': break message.update(scope) await app.send_input(message) if timeout is None: await _consume_loop() try: # A further grace period to catch late messages. async with async_timeout.timeout(timeout or 1): await _consume_loop() except asyncio.TimeoutError: pass await app.wait()
python
async def run_consumer(timeout=None, dry_run=False): """Run the consumer until it finishes processing. :param timeout: Set maximum execution time before cancellation, or ``None`` (default) for unlimited. :param dry_run: If ``True``, don't actually dispatch messages, just dequeue them. Defaults to ``False``. """ channel = state.MANAGER_CONTROL_CHANNEL scope = { 'type': 'control_event', 'channel': channel, } app = ApplicationCommunicator(ManagerConsumer, scope) channel_layer = get_channel_layer() async def _consume_loop(): """Run a loop to consume messages off the channels layer.""" while True: message = await channel_layer.receive(channel) if dry_run: continue if message.get('type', {}) == '_resolwe_manager_quit': break message.update(scope) await app.send_input(message) if timeout is None: await _consume_loop() try: # A further grace period to catch late messages. async with async_timeout.timeout(timeout or 1): await _consume_loop() except asyncio.TimeoutError: pass await app.wait()
[ "async", "def", "run_consumer", "(", "timeout", "=", "None", ",", "dry_run", "=", "False", ")", ":", "channel", "=", "state", ".", "MANAGER_CONTROL_CHANNEL", "scope", "=", "{", "'type'", ":", "'control_event'", ",", "'channel'", ":", "channel", ",", "}", "...
Run the consumer until it finishes processing. :param timeout: Set maximum execution time before cancellation, or ``None`` (default) for unlimited. :param dry_run: If ``True``, don't actually dispatch messages, just dequeue them. Defaults to ``False``.
[ "Run", "the", "consumer", "until", "it", "finishes", "processing", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/consumer.py#L32-L70
train
45,010
genialis/resolwe
resolwe/elastic/composer.py
Composer.discover_extensions
def discover_extensions(self): """Discover available extensions.""" if self._discovery_done: return try: previous_state = self._extensions.copy() for app_config in apps.get_app_configs(): indexes_path = '{}.extensions'.format(app_config.name) try: import_module(indexes_path) except ImportError: pass self._discovery_done = True except Exception: # Rollback state to prevent corrupted state on exceptions during import. self._extensions = previous_state raise
python
def discover_extensions(self): """Discover available extensions.""" if self._discovery_done: return try: previous_state = self._extensions.copy() for app_config in apps.get_app_configs(): indexes_path = '{}.extensions'.format(app_config.name) try: import_module(indexes_path) except ImportError: pass self._discovery_done = True except Exception: # Rollback state to prevent corrupted state on exceptions during import. self._extensions = previous_state raise
[ "def", "discover_extensions", "(", "self", ")", ":", "if", "self", ".", "_discovery_done", ":", "return", "try", ":", "previous_state", "=", "self", ".", "_extensions", ".", "copy", "(", ")", "for", "app_config", "in", "apps", ".", "get_app_configs", "(", ...
Discover available extensions.
[ "Discover", "available", "extensions", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/composer.py#L16-L35
train
45,011
genialis/resolwe
resolwe/elastic/composer.py
Composer._get_class_path
def _get_class_path(self, klass_or_instance): """Return class path for a given class. :param klass_or_instance: Class or instance of given class :return: String containing the class path """ if inspect.isclass(klass_or_instance): klass = '{}.{}'.format(klass_or_instance.__module__, klass_or_instance.__name__) elif not isinstance(klass_or_instance, str): klass = klass_or_instance.__class__ klass = '{}.{}'.format(klass.__module__, klass.__name__) else: klass = klass_or_instance return klass
python
def _get_class_path(self, klass_or_instance): """Return class path for a given class. :param klass_or_instance: Class or instance of given class :return: String containing the class path """ if inspect.isclass(klass_or_instance): klass = '{}.{}'.format(klass_or_instance.__module__, klass_or_instance.__name__) elif not isinstance(klass_or_instance, str): klass = klass_or_instance.__class__ klass = '{}.{}'.format(klass.__module__, klass.__name__) else: klass = klass_or_instance return klass
[ "def", "_get_class_path", "(", "self", ",", "klass_or_instance", ")", ":", "if", "inspect", ".", "isclass", "(", "klass_or_instance", ")", ":", "klass", "=", "'{}.{}'", ".", "format", "(", "klass_or_instance", ".", "__module__", ",", "klass_or_instance", ".", ...
Return class path for a given class. :param klass_or_instance: Class or instance of given class :return: String containing the class path
[ "Return", "class", "path", "for", "a", "given", "class", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/composer.py#L37-L51
train
45,012
genialis/resolwe
resolwe/elastic/composer.py
Composer.add_extension
def add_extension(self, klass, extension): """Register an extension for a class. :param klass: Class to register an extension for :param extension: Extension (arbitrary type) """ klass = self._get_class_path(klass) # TODO: Take order into account. self._extensions.setdefault(klass, []).append(extension)
python
def add_extension(self, klass, extension): """Register an extension for a class. :param klass: Class to register an extension for :param extension: Extension (arbitrary type) """ klass = self._get_class_path(klass) # TODO: Take order into account. self._extensions.setdefault(klass, []).append(extension)
[ "def", "add_extension", "(", "self", ",", "klass", ",", "extension", ")", ":", "klass", "=", "self", ".", "_get_class_path", "(", "klass", ")", "# TODO: Take order into account.", "self", ".", "_extensions", ".", "setdefault", "(", "klass", ",", "[", "]", ")...
Register an extension for a class. :param klass: Class to register an extension for :param extension: Extension (arbitrary type)
[ "Register", "an", "extension", "for", "a", "class", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/composer.py#L53-L62
train
45,013
genialis/resolwe
resolwe/elastic/composer.py
Composer.get_extensions
def get_extensions(self, klass): """Return all registered extensions of a class. :param klass: Class to get registered extensions for :return: All registered extensions for given class """ self.discover_extensions() return self._extensions.get(self._get_class_path(klass), [])
python
def get_extensions(self, klass): """Return all registered extensions of a class. :param klass: Class to get registered extensions for :return: All registered extensions for given class """ self.discover_extensions() return self._extensions.get(self._get_class_path(klass), [])
[ "def", "get_extensions", "(", "self", ",", "klass", ")", ":", "self", ".", "discover_extensions", "(", ")", "return", "self", ".", "_extensions", ".", "get", "(", "self", ".", "_get_class_path", "(", "klass", ")", ",", "[", "]", ")" ]
Return all registered extensions of a class. :param klass: Class to get registered extensions for :return: All registered extensions for given class
[ "Return", "all", "registered", "extensions", "of", "a", "class", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/composer.py#L64-L72
train
45,014
wilson-eft/wilson
wilson/run/wet/classes.py
WETrunner.run
def run(self, scale_out, sectors='all'): """Evolve the Wilson coefficients to the scale `scale_out`. Parameters: - scale_out: output scale - sectors: optional. If provided, must be a tuple of strings corresponding to WCxf sector names. Only Wilson coefficients belonging to these sectors will be present in the output. Returns an instance of `wcxf.WC`. """ C_out = self._run_dict(scale_out, sectors=sectors) all_wcs = set(wcxf.Basis[self.eft, 'JMS'].all_wcs) # to speed up lookup C_out = {k: v for k, v in C_out.items() if v != 0 and k in all_wcs} return wcxf.WC(eft=self.eft, basis='JMS', scale=scale_out, values=wcxf.WC.dict2values(C_out))
python
def run(self, scale_out, sectors='all'): """Evolve the Wilson coefficients to the scale `scale_out`. Parameters: - scale_out: output scale - sectors: optional. If provided, must be a tuple of strings corresponding to WCxf sector names. Only Wilson coefficients belonging to these sectors will be present in the output. Returns an instance of `wcxf.WC`. """ C_out = self._run_dict(scale_out, sectors=sectors) all_wcs = set(wcxf.Basis[self.eft, 'JMS'].all_wcs) # to speed up lookup C_out = {k: v for k, v in C_out.items() if v != 0 and k in all_wcs} return wcxf.WC(eft=self.eft, basis='JMS', scale=scale_out, values=wcxf.WC.dict2values(C_out))
[ "def", "run", "(", "self", ",", "scale_out", ",", "sectors", "=", "'all'", ")", ":", "C_out", "=", "self", ".", "_run_dict", "(", "scale_out", ",", "sectors", "=", "sectors", ")", "all_wcs", "=", "set", "(", "wcxf", ".", "Basis", "[", "self", ".", ...
Evolve the Wilson coefficients to the scale `scale_out`. Parameters: - scale_out: output scale - sectors: optional. If provided, must be a tuple of strings corresponding to WCxf sector names. Only Wilson coefficients belonging to these sectors will be present in the output. Returns an instance of `wcxf.WC`.
[ "Evolve", "the", "Wilson", "coefficients", "to", "the", "scale", "scale_out", "." ]
4164f55ff663d4f668c6e2b4575fd41562662cc9
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/run/wet/classes.py#L86-L104
train
45,015
joshua-stone/DerPyBooru
derpibooru/search.py
Search.sort_by
def sort_by(self, sf): """ Determines how to sort search results. Available sorting methods are sort.SCORE, sort.COMMENTS, sort.HEIGHT, sort.RELEVANCE, sort.CREATED_AT, and sort.RANDOM; default is sort.CREATED_AT. """ params = join_params(self.parameters, {"sf": sf}) return self.__class__(**params)
python
def sort_by(self, sf): """ Determines how to sort search results. Available sorting methods are sort.SCORE, sort.COMMENTS, sort.HEIGHT, sort.RELEVANCE, sort.CREATED_AT, and sort.RANDOM; default is sort.CREATED_AT. """ params = join_params(self.parameters, {"sf": sf}) return self.__class__(**params)
[ "def", "sort_by", "(", "self", ",", "sf", ")", ":", "params", "=", "join_params", "(", "self", ".", "parameters", ",", "{", "\"sf\"", ":", "sf", "}", ")", "return", "self", ".", "__class__", "(", "*", "*", "params", ")" ]
Determines how to sort search results. Available sorting methods are sort.SCORE, sort.COMMENTS, sort.HEIGHT, sort.RELEVANCE, sort.CREATED_AT, and sort.RANDOM; default is sort.CREATED_AT.
[ "Determines", "how", "to", "sort", "search", "results", ".", "Available", "sorting", "methods", "are", "sort", ".", "SCORE", "sort", ".", "COMMENTS", "sort", ".", "HEIGHT", "sort", ".", "RELEVANCE", "sort", ".", "CREATED_AT", "and", "sort", ".", "RANDOM", ...
75aec19488042ba89115ff002b4d696ad87fb03f
https://github.com/joshua-stone/DerPyBooru/blob/75aec19488042ba89115ff002b4d696ad87fb03f/derpibooru/search.py#L109-L117
train
45,016
joshua-stone/DerPyBooru
derpibooru/search.py
Search.limit
def limit(self, limit): """ Set absolute limit on number of images to return, or set to None to return as many results as needed; default 50 posts. """ params = join_params(self.parameters, {"limit": limit}) return self.__class__(**params)
python
def limit(self, limit): """ Set absolute limit on number of images to return, or set to None to return as many results as needed; default 50 posts. """ params = join_params(self.parameters, {"limit": limit}) return self.__class__(**params)
[ "def", "limit", "(", "self", ",", "limit", ")", ":", "params", "=", "join_params", "(", "self", ".", "parameters", ",", "{", "\"limit\"", ":", "limit", "}", ")", "return", "self", ".", "__class__", "(", "*", "*", "params", ")" ]
Set absolute limit on number of images to return, or set to None to return as many results as needed; default 50 posts.
[ "Set", "absolute", "limit", "on", "number", "of", "images", "to", "return", "or", "set", "to", "None", "to", "return", "as", "many", "results", "as", "needed", ";", "default", "50", "posts", "." ]
75aec19488042ba89115ff002b4d696ad87fb03f
https://github.com/joshua-stone/DerPyBooru/blob/75aec19488042ba89115ff002b4d696ad87fb03f/derpibooru/search.py#L135-L142
train
45,017
joshua-stone/DerPyBooru
derpibooru/search.py
Search.faves
def faves(self, option): """ Set whether to filter by a user's faves list. Options available are user.ONLY, user.NOT, and None; default is None. """ params = join_params(self.parameters, {"faves": option}) return self.__class__(**params)
python
def faves(self, option): """ Set whether to filter by a user's faves list. Options available are user.ONLY, user.NOT, and None; default is None. """ params = join_params(self.parameters, {"faves": option}) return self.__class__(**params)
[ "def", "faves", "(", "self", ",", "option", ")", ":", "params", "=", "join_params", "(", "self", ".", "parameters", ",", "{", "\"faves\"", ":", "option", "}", ")", "return", "self", ".", "__class__", "(", "*", "*", "params", ")" ]
Set whether to filter by a user's faves list. Options available are user.ONLY, user.NOT, and None; default is None.
[ "Set", "whether", "to", "filter", "by", "a", "user", "s", "faves", "list", ".", "Options", "available", "are", "user", ".", "ONLY", "user", ".", "NOT", "and", "None", ";", "default", "is", "None", "." ]
75aec19488042ba89115ff002b4d696ad87fb03f
https://github.com/joshua-stone/DerPyBooru/blob/75aec19488042ba89115ff002b4d696ad87fb03f/derpibooru/search.py#L156-L163
train
45,018
joshua-stone/DerPyBooru
derpibooru/search.py
Search.upvotes
def upvotes(self, option): """ Set whether to filter by a user's upvoted list. Options available are user.ONLY, user.NOT, and None; default is None. """ params = join_params(self.parameters, {"upvotes": option}) return self.__class__(**params)
python
def upvotes(self, option): """ Set whether to filter by a user's upvoted list. Options available are user.ONLY, user.NOT, and None; default is None. """ params = join_params(self.parameters, {"upvotes": option}) return self.__class__(**params)
[ "def", "upvotes", "(", "self", ",", "option", ")", ":", "params", "=", "join_params", "(", "self", ".", "parameters", ",", "{", "\"upvotes\"", ":", "option", "}", ")", "return", "self", ".", "__class__", "(", "*", "*", "params", ")" ]
Set whether to filter by a user's upvoted list. Options available are user.ONLY, user.NOT, and None; default is None.
[ "Set", "whether", "to", "filter", "by", "a", "user", "s", "upvoted", "list", ".", "Options", "available", "are", "user", ".", "ONLY", "user", ".", "NOT", "and", "None", ";", "default", "is", "None", "." ]
75aec19488042ba89115ff002b4d696ad87fb03f
https://github.com/joshua-stone/DerPyBooru/blob/75aec19488042ba89115ff002b4d696ad87fb03f/derpibooru/search.py#L165-L172
train
45,019
joshua-stone/DerPyBooru
derpibooru/search.py
Search.uploads
def uploads(self, option): """ Set whether to filter by a user's uploads list. Options available are user.ONLY, user.NOT, and None; default is None. """ params = join_params(self.parameters, {"uploads": option}) return self.__class__(**params)
python
def uploads(self, option): """ Set whether to filter by a user's uploads list. Options available are user.ONLY, user.NOT, and None; default is None. """ params = join_params(self.parameters, {"uploads": option}) return self.__class__(**params)
[ "def", "uploads", "(", "self", ",", "option", ")", ":", "params", "=", "join_params", "(", "self", ".", "parameters", ",", "{", "\"uploads\"", ":", "option", "}", ")", "return", "self", ".", "__class__", "(", "*", "*", "params", ")" ]
Set whether to filter by a user's uploads list. Options available are user.ONLY, user.NOT, and None; default is None.
[ "Set", "whether", "to", "filter", "by", "a", "user", "s", "uploads", "list", ".", "Options", "available", "are", "user", ".", "ONLY", "user", ".", "NOT", "and", "None", ";", "default", "is", "None", "." ]
75aec19488042ba89115ff002b4d696ad87fb03f
https://github.com/joshua-stone/DerPyBooru/blob/75aec19488042ba89115ff002b4d696ad87fb03f/derpibooru/search.py#L174-L181
train
45,020
joshua-stone/DerPyBooru
derpibooru/search.py
Search.watched
def watched(self, option): """ Set whether to filter by a user's watchlist. Options available are user.ONLY, user.NOT, and None; default is None. """ params = join_params(self.parameters, {"watched": option}) return self.__class__(**params)
python
def watched(self, option): """ Set whether to filter by a user's watchlist. Options available are user.ONLY, user.NOT, and None; default is None. """ params = join_params(self.parameters, {"watched": option}) return self.__class__(**params)
[ "def", "watched", "(", "self", ",", "option", ")", ":", "params", "=", "join_params", "(", "self", ".", "parameters", ",", "{", "\"watched\"", ":", "option", "}", ")", "return", "self", ".", "__class__", "(", "*", "*", "params", ")" ]
Set whether to filter by a user's watchlist. Options available are user.ONLY, user.NOT, and None; default is None.
[ "Set", "whether", "to", "filter", "by", "a", "user", "s", "watchlist", ".", "Options", "available", "are", "user", ".", "ONLY", "user", ".", "NOT", "and", "None", ";", "default", "is", "None", "." ]
75aec19488042ba89115ff002b4d696ad87fb03f
https://github.com/joshua-stone/DerPyBooru/blob/75aec19488042ba89115ff002b4d696ad87fb03f/derpibooru/search.py#L183-L190
train
45,021
genialis/resolwe
resolwe/flow/views/mixins.py
ResolweCreateModelMixin.define_contributor
def define_contributor(self, request): """Define contributor by adding it to request.data.""" request.data['contributor'] = self.resolve_user(request.user).pk
python
def define_contributor(self, request): """Define contributor by adding it to request.data.""" request.data['contributor'] = self.resolve_user(request.user).pk
[ "def", "define_contributor", "(", "self", ",", "request", ")", ":", "request", ".", "data", "[", "'contributor'", "]", "=", "self", ".", "resolve_user", "(", "request", ".", "user", ")", ".", "pk" ]
Define contributor by adding it to request.data.
[ "Define", "contributor", "by", "adding", "it", "to", "request", ".", "data", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/mixins.py#L30-L32
train
45,022
genialis/resolwe
resolwe/flow/views/mixins.py
ResolweCheckSlugMixin.slug_exists
def slug_exists(self, request): """Check if given url slug exists. Check if slug given in query parameter ``name`` exists. Return ``True`` if slug already exists and ``False`` otherwise. """ if not request.user.is_authenticated: return Response(status=status.HTTP_401_UNAUTHORIZED) if 'name' not in request.query_params: return Response({'error': 'Query parameter `name` must be given.'}, status=status.HTTP_400_BAD_REQUEST) queryset = self.get_queryset() slug_name = request.query_params['name'] return Response(queryset.filter(slug__iexact=slug_name).exists())
python
def slug_exists(self, request): """Check if given url slug exists. Check if slug given in query parameter ``name`` exists. Return ``True`` if slug already exists and ``False`` otherwise. """ if not request.user.is_authenticated: return Response(status=status.HTTP_401_UNAUTHORIZED) if 'name' not in request.query_params: return Response({'error': 'Query parameter `name` must be given.'}, status=status.HTTP_400_BAD_REQUEST) queryset = self.get_queryset() slug_name = request.query_params['name'] return Response(queryset.filter(slug__iexact=slug_name).exists())
[ "def", "slug_exists", "(", "self", ",", "request", ")", ":", "if", "not", "request", ".", "user", ".", "is_authenticated", ":", "return", "Response", "(", "status", "=", "status", ".", "HTTP_401_UNAUTHORIZED", ")", "if", "'name'", "not", "in", "request", "...
Check if given url slug exists. Check if slug given in query parameter ``name`` exists. Return ``True`` if slug already exists and ``False`` otherwise.
[ "Check", "if", "given", "url", "slug", "exists", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/mixins.py#L121-L137
train
45,023
genialis/resolwe
resolwe/flow/views/mixins.py
ParametersMixin.get_ids
def get_ids(self, request_data, parameter_name='ids'): """Extract a list of integers from request data.""" if parameter_name not in request_data: raise ParseError("`{}` parameter is required".format(parameter_name)) ids = request_data.get(parameter_name) if not isinstance(ids, list): raise ParseError("`{}` parameter not a list".format(parameter_name)) if not ids: raise ParseError("`{}` parameter is empty".format(parameter_name)) if any(map(lambda id: not isinstance(id, int), ids)): raise ParseError("`{}` parameter contains non-integers".format(parameter_name)) return ids
python
def get_ids(self, request_data, parameter_name='ids'): """Extract a list of integers from request data.""" if parameter_name not in request_data: raise ParseError("`{}` parameter is required".format(parameter_name)) ids = request_data.get(parameter_name) if not isinstance(ids, list): raise ParseError("`{}` parameter not a list".format(parameter_name)) if not ids: raise ParseError("`{}` parameter is empty".format(parameter_name)) if any(map(lambda id: not isinstance(id, int), ids)): raise ParseError("`{}` parameter contains non-integers".format(parameter_name)) return ids
[ "def", "get_ids", "(", "self", ",", "request_data", ",", "parameter_name", "=", "'ids'", ")", ":", "if", "parameter_name", "not", "in", "request_data", ":", "raise", "ParseError", "(", "\"`{}` parameter is required\"", ".", "format", "(", "parameter_name", ")", ...
Extract a list of integers from request data.
[ "Extract", "a", "list", "of", "integers", "from", "request", "data", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/mixins.py#L143-L158
train
45,024
genialis/resolwe
resolwe/flow/views/mixins.py
ParametersMixin.get_id
def get_id(self, request_data, parameter_name='id'): """Extract an integer from request data.""" if parameter_name not in request_data: raise ParseError("`{}` parameter is required".format(parameter_name)) id_parameter = request_data.get(parameter_name, None) if not isinstance(id_parameter, int): raise ParseError("`{}` parameter not an integer".format(parameter_name)) return id_parameter
python
def get_id(self, request_data, parameter_name='id'): """Extract an integer from request data.""" if parameter_name not in request_data: raise ParseError("`{}` parameter is required".format(parameter_name)) id_parameter = request_data.get(parameter_name, None) if not isinstance(id_parameter, int): raise ParseError("`{}` parameter not an integer".format(parameter_name)) return id_parameter
[ "def", "get_id", "(", "self", ",", "request_data", ",", "parameter_name", "=", "'id'", ")", ":", "if", "parameter_name", "not", "in", "request_data", ":", "raise", "ParseError", "(", "\"`{}` parameter is required\"", ".", "format", "(", "parameter_name", ")", ")...
Extract an integer from request data.
[ "Extract", "an", "integer", "from", "request", "data", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/mixins.py#L160-L169
train
45,025
DataONEorg/d1_python
lib_common/src/d1_common/types/exceptions.py
deserialize
def deserialize(dataone_exception_xml): """Deserialize a DataONE Exception XML doc.""" # logging.debug('dataone_exception_xml="{}"' # .format(d1_common.xml.pretty_xml(dataone_exception_xml))) try: dataone_exception_pyxb = d1_common.xml.deserialize_d1_exception( dataone_exception_xml ) except ValueError as e: raise ServiceFailure( detailCode='0', description='Deserialization failed. error="{}" doc="{}"'.format( str(e), '<empty response>' if not dataone_exception_xml else dataone_exception_xml, ), traceInformation=traceback.format_exc(), ) else: x = create_exception_by_name( dataone_exception_pyxb.name, dataone_exception_pyxb.detailCode, dataone_exception_pyxb.description, _get_trace_information_content(dataone_exception_pyxb), dataone_exception_pyxb.identifier, dataone_exception_pyxb.nodeId, ) return x
python
def deserialize(dataone_exception_xml): """Deserialize a DataONE Exception XML doc.""" # logging.debug('dataone_exception_xml="{}"' # .format(d1_common.xml.pretty_xml(dataone_exception_xml))) try: dataone_exception_pyxb = d1_common.xml.deserialize_d1_exception( dataone_exception_xml ) except ValueError as e: raise ServiceFailure( detailCode='0', description='Deserialization failed. error="{}" doc="{}"'.format( str(e), '<empty response>' if not dataone_exception_xml else dataone_exception_xml, ), traceInformation=traceback.format_exc(), ) else: x = create_exception_by_name( dataone_exception_pyxb.name, dataone_exception_pyxb.detailCode, dataone_exception_pyxb.description, _get_trace_information_content(dataone_exception_pyxb), dataone_exception_pyxb.identifier, dataone_exception_pyxb.nodeId, ) return x
[ "def", "deserialize", "(", "dataone_exception_xml", ")", ":", "# logging.debug('dataone_exception_xml=\"{}\"'", "# .format(d1_common.xml.pretty_xml(dataone_exception_xml)))", "try", ":", "dataone_exception_pyxb", "=", "d1_common", ".", "xml", ".", "deserialize_d1_exception", "(", ...
Deserialize a DataONE Exception XML doc.
[ "Deserialize", "a", "DataONE", "Exception", "XML", "doc", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/types/exceptions.py#L97-L125
train
45,026
DataONEorg/d1_python
lib_common/src/d1_common/types/exceptions.py
create_exception_by_name
def create_exception_by_name( name, detailCode='0', description='', traceInformation=None, identifier=None, nodeId=None, ): """Create a DataONEException based object by name. Args: name: str The type name of a DataONE Exception. E.g. NotFound. If an unknown type name is used, it is automatically set to ServiceFailure. As the XML Schema for DataONE Exceptions does not restrict the type names, this may occur when deserializing an exception not defined by DataONE. detailCode: int Optional index into a table of predefined error conditions. See Also: For remaining args, see: ``DataONEException()`` """ try: dataone_exception = globals()[name] except LookupError: dataone_exception = ServiceFailure return dataone_exception( detailCode, description, traceInformation, identifier, nodeId )
python
def create_exception_by_name( name, detailCode='0', description='', traceInformation=None, identifier=None, nodeId=None, ): """Create a DataONEException based object by name. Args: name: str The type name of a DataONE Exception. E.g. NotFound. If an unknown type name is used, it is automatically set to ServiceFailure. As the XML Schema for DataONE Exceptions does not restrict the type names, this may occur when deserializing an exception not defined by DataONE. detailCode: int Optional index into a table of predefined error conditions. See Also: For remaining args, see: ``DataONEException()`` """ try: dataone_exception = globals()[name] except LookupError: dataone_exception = ServiceFailure return dataone_exception( detailCode, description, traceInformation, identifier, nodeId )
[ "def", "create_exception_by_name", "(", "name", ",", "detailCode", "=", "'0'", ",", "description", "=", "''", ",", "traceInformation", "=", "None", ",", "identifier", "=", "None", ",", "nodeId", "=", "None", ",", ")", ":", "try", ":", "dataone_exception", ...
Create a DataONEException based object by name. Args: name: str The type name of a DataONE Exception. E.g. NotFound. If an unknown type name is used, it is automatically set to ServiceFailure. As the XML Schema for DataONE Exceptions does not restrict the type names, this may occur when deserializing an exception not defined by DataONE. detailCode: int Optional index into a table of predefined error conditions. See Also: For remaining args, see: ``DataONEException()``
[ "Create", "a", "DataONEException", "based", "object", "by", "name", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/types/exceptions.py#L155-L186
train
45,027
DataONEorg/d1_python
lib_common/src/d1_common/types/exceptions.py
create_exception_by_error_code
def create_exception_by_error_code( errorCode, detailCode='0', description='', traceInformation=None, identifier=None, nodeId=None, ): """Create a DataONE Exception object by errorCode. See Also: For args, see: ``DataONEException()`` """ try: dataone_exception = ERROR_CODE_TO_EXCEPTION_DICT[errorCode] except LookupError: dataone_exception = ServiceFailure return dataone_exception( detailCode, description, traceInformation, identifier, nodeId )
python
def create_exception_by_error_code( errorCode, detailCode='0', description='', traceInformation=None, identifier=None, nodeId=None, ): """Create a DataONE Exception object by errorCode. See Also: For args, see: ``DataONEException()`` """ try: dataone_exception = ERROR_CODE_TO_EXCEPTION_DICT[errorCode] except LookupError: dataone_exception = ServiceFailure return dataone_exception( detailCode, description, traceInformation, identifier, nodeId )
[ "def", "create_exception_by_error_code", "(", "errorCode", ",", "detailCode", "=", "'0'", ",", "description", "=", "''", ",", "traceInformation", "=", "None", ",", "identifier", "=", "None", ",", "nodeId", "=", "None", ",", ")", ":", "try", ":", "dataone_exc...
Create a DataONE Exception object by errorCode. See Also: For args, see: ``DataONEException()``
[ "Create", "a", "DataONE", "Exception", "object", "by", "errorCode", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/types/exceptions.py#L190-L209
train
45,028
DataONEorg/d1_python
lib_common/src/d1_common/types/exceptions.py
DataONEException._fmt
def _fmt(self, tag, msg): """Format a string for inclusion in the exception's string representation. If msg is None, format to empty string. If msg has a single line, format to: tag: msg If msg has multiple lines, format to: tag: line 1 line 2 Msg is truncated to 1024 chars. """ msg = msg or '<unset>' msg = str(msg) msg = msg.strip() if not msg: return if len(msg) > 2048: msg = msg[:1024] + '...' if msg.count('\n') <= 1: return '{}: {}\n'.format(tag, msg.strip()) else: return '{}:\n {}\n'.format(tag, msg.replace('\n', '\n ').strip())
python
def _fmt(self, tag, msg): """Format a string for inclusion in the exception's string representation. If msg is None, format to empty string. If msg has a single line, format to: tag: msg If msg has multiple lines, format to: tag: line 1 line 2 Msg is truncated to 1024 chars. """ msg = msg or '<unset>' msg = str(msg) msg = msg.strip() if not msg: return if len(msg) > 2048: msg = msg[:1024] + '...' if msg.count('\n') <= 1: return '{}: {}\n'.format(tag, msg.strip()) else: return '{}:\n {}\n'.format(tag, msg.replace('\n', '\n ').strip())
[ "def", "_fmt", "(", "self", ",", "tag", ",", "msg", ")", ":", "msg", "=", "msg", "or", "'<unset>'", "msg", "=", "str", "(", "msg", ")", "msg", "=", "msg", ".", "strip", "(", ")", "if", "not", "msg", ":", "return", "if", "len", "(", "msg", ")"...
Format a string for inclusion in the exception's string representation. If msg is None, format to empty string. If msg has a single line, format to: tag: msg If msg has multiple lines, format to: tag: line 1 line 2 Msg is truncated to 1024 chars.
[ "Format", "a", "string", "for", "inclusion", "in", "the", "exception", "s", "string", "representation", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/types/exceptions.py#L298-L316
train
45,029
DataONEorg/d1_python
lib_common/src/d1_common/types/exceptions.py
DataONEException.friendly_format
def friendly_format(self): """Serialize to a format more suitable for displaying to end users.""" if self.description is not None: msg = self.description else: msg = 'errorCode: {} / detailCode: {}'.format( self.errorCode, self.detailCode ) return self._fmt(self.name, msg)
python
def friendly_format(self): """Serialize to a format more suitable for displaying to end users.""" if self.description is not None: msg = self.description else: msg = 'errorCode: {} / detailCode: {}'.format( self.errorCode, self.detailCode ) return self._fmt(self.name, msg)
[ "def", "friendly_format", "(", "self", ")", ":", "if", "self", ".", "description", "is", "not", "None", ":", "msg", "=", "self", ".", "description", "else", ":", "msg", "=", "'errorCode: {} / detailCode: {}'", ".", "format", "(", "self", ".", "errorCode", ...
Serialize to a format more suitable for displaying to end users.
[ "Serialize", "to", "a", "format", "more", "suitable", "for", "displaying", "to", "end", "users", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/types/exceptions.py#L318-L326
train
45,030
DataONEorg/d1_python
lib_common/src/d1_common/types/exceptions.py
DataONEException.serialize_to_transport
def serialize_to_transport(self, encoding='utf-8', xslt_url=None): """Serialize to XML ``bytes`` with prolog. Args: encoding: str Encoding to use for XML doc bytes xslt_url: str If specified, add a processing instruction to the XML doc that specifies the download location for an XSLT stylesheet. Returns: bytes: XML holding a DataONEError based type. """ assert encoding in ('utf-8', 'UTF-8') dataone_exception_pyxb = self.get_pyxb() return d1_common.xml.serialize_for_transport( dataone_exception_pyxb, xslt_url=xslt_url )
python
def serialize_to_transport(self, encoding='utf-8', xslt_url=None): """Serialize to XML ``bytes`` with prolog. Args: encoding: str Encoding to use for XML doc bytes xslt_url: str If specified, add a processing instruction to the XML doc that specifies the download location for an XSLT stylesheet. Returns: bytes: XML holding a DataONEError based type. """ assert encoding in ('utf-8', 'UTF-8') dataone_exception_pyxb = self.get_pyxb() return d1_common.xml.serialize_for_transport( dataone_exception_pyxb, xslt_url=xslt_url )
[ "def", "serialize_to_transport", "(", "self", ",", "encoding", "=", "'utf-8'", ",", "xslt_url", "=", "None", ")", ":", "assert", "encoding", "in", "(", "'utf-8'", ",", "'UTF-8'", ")", "dataone_exception_pyxb", "=", "self", ".", "get_pyxb", "(", ")", "return"...
Serialize to XML ``bytes`` with prolog. Args: encoding: str Encoding to use for XML doc bytes xslt_url: str If specified, add a processing instruction to the XML doc that specifies the download location for an XSLT stylesheet. Returns: bytes: XML holding a DataONEError based type.
[ "Serialize", "to", "XML", "bytes", "with", "prolog", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/types/exceptions.py#L328-L346
train
45,031
DataONEorg/d1_python
lib_common/src/d1_common/types/exceptions.py
DataONEException.serialize_to_display
def serialize_to_display(self, xslt_url=None): """Serialize to a pretty printed Unicode str, suitable for display. Args: xslt_url: url Optional link to an XSLT stylesheet. If provided, a processing instruction for the stylesheet is included in the XML prolog. """ return d1_common.xml.serialize_to_xml_str( self.get_pyxb(), pretty=True, xslt_url=xslt_url )
python
def serialize_to_display(self, xslt_url=None): """Serialize to a pretty printed Unicode str, suitable for display. Args: xslt_url: url Optional link to an XSLT stylesheet. If provided, a processing instruction for the stylesheet is included in the XML prolog. """ return d1_common.xml.serialize_to_xml_str( self.get_pyxb(), pretty=True, xslt_url=xslt_url )
[ "def", "serialize_to_display", "(", "self", ",", "xslt_url", "=", "None", ")", ":", "return", "d1_common", ".", "xml", ".", "serialize_to_xml_str", "(", "self", ".", "get_pyxb", "(", ")", ",", "pretty", "=", "True", ",", "xslt_url", "=", "xslt_url", ")" ]
Serialize to a pretty printed Unicode str, suitable for display. Args: xslt_url: url Optional link to an XSLT stylesheet. If provided, a processing instruction for the stylesheet is included in the XML prolog.
[ "Serialize", "to", "a", "pretty", "printed", "Unicode", "str", "suitable", "for", "display", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/types/exceptions.py#L348-L357
train
45,032
DataONEorg/d1_python
lib_common/src/d1_common/types/exceptions.py
DataONEException.serialize_to_headers
def serialize_to_headers(self): """Serialize to a dict of HTTP headers. Used in responses to HTTP HEAD requests. As with regular HTTP GET requests, HEAD requests may return DataONE Exceptions. Since a response to a HEAD request cannot include a body, the error is returned as a set of HTTP headers instead of an XML document. """ return { 'DataONE-Exception-Name': self.__class__.__name__, 'DataONE-Exception-ErrorCode': self._format_header(self.errorCode), 'DataONE-Exception-DetailCode': self._format_header(self.detailCode), 'DataONE-Exception-Description': self._format_header(self.description), 'DataONE-Exception-TraceInformation': self._format_header( self.traceInformation ), 'DataONE-Exception-Identifier': self._format_header(self.identifier), 'DataONE-Exception-NodeID': self._format_header(self.nodeId), }
python
def serialize_to_headers(self): """Serialize to a dict of HTTP headers. Used in responses to HTTP HEAD requests. As with regular HTTP GET requests, HEAD requests may return DataONE Exceptions. Since a response to a HEAD request cannot include a body, the error is returned as a set of HTTP headers instead of an XML document. """ return { 'DataONE-Exception-Name': self.__class__.__name__, 'DataONE-Exception-ErrorCode': self._format_header(self.errorCode), 'DataONE-Exception-DetailCode': self._format_header(self.detailCode), 'DataONE-Exception-Description': self._format_header(self.description), 'DataONE-Exception-TraceInformation': self._format_header( self.traceInformation ), 'DataONE-Exception-Identifier': self._format_header(self.identifier), 'DataONE-Exception-NodeID': self._format_header(self.nodeId), }
[ "def", "serialize_to_headers", "(", "self", ")", ":", "return", "{", "'DataONE-Exception-Name'", ":", "self", ".", "__class__", ".", "__name__", ",", "'DataONE-Exception-ErrorCode'", ":", "self", ".", "_format_header", "(", "self", ".", "errorCode", ")", ",", "'...
Serialize to a dict of HTTP headers. Used in responses to HTTP HEAD requests. As with regular HTTP GET requests, HEAD requests may return DataONE Exceptions. Since a response to a HEAD request cannot include a body, the error is returned as a set of HTTP headers instead of an XML document.
[ "Serialize", "to", "a", "dict", "of", "HTTP", "headers", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/types/exceptions.py#L363-L382
train
45,033
DataONEorg/d1_python
lib_common/src/d1_common/types/exceptions.py
DataONEException.get_pyxb
def get_pyxb(self): """Generate a DataONE Exception PyXB object. The PyXB object supports directly reading and writing the individual values that may be included in a DataONE Exception. """ dataone_exception_pyxb = dataoneErrors.error() dataone_exception_pyxb.name = self.__class__.__name__ dataone_exception_pyxb.errorCode = self.errorCode dataone_exception_pyxb.detailCode = self.detailCode if self.description is not None: dataone_exception_pyxb.description = self.description dataone_exception_pyxb.traceInformation = self.traceInformation if self.identifier is not None: dataone_exception_pyxb.identifier = self.identifier if self.nodeId is not None: dataone_exception_pyxb.nodeId = self.nodeId return dataone_exception_pyxb
python
def get_pyxb(self): """Generate a DataONE Exception PyXB object. The PyXB object supports directly reading and writing the individual values that may be included in a DataONE Exception. """ dataone_exception_pyxb = dataoneErrors.error() dataone_exception_pyxb.name = self.__class__.__name__ dataone_exception_pyxb.errorCode = self.errorCode dataone_exception_pyxb.detailCode = self.detailCode if self.description is not None: dataone_exception_pyxb.description = self.description dataone_exception_pyxb.traceInformation = self.traceInformation if self.identifier is not None: dataone_exception_pyxb.identifier = self.identifier if self.nodeId is not None: dataone_exception_pyxb.nodeId = self.nodeId return dataone_exception_pyxb
[ "def", "get_pyxb", "(", "self", ")", ":", "dataone_exception_pyxb", "=", "dataoneErrors", ".", "error", "(", ")", "dataone_exception_pyxb", ".", "name", "=", "self", ".", "__class__", ".", "__name__", "dataone_exception_pyxb", ".", "errorCode", "=", "self", ".",...
Generate a DataONE Exception PyXB object. The PyXB object supports directly reading and writing the individual values that may be included in a DataONE Exception.
[ "Generate", "a", "DataONE", "Exception", "PyXB", "object", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/types/exceptions.py#L384-L402
train
45,034
genialis/resolwe
resolwe/flow/migrations/0012_recreate_empty_parents.py
recreate_parent_dependencies
def recreate_parent_dependencies(apps, schema_editor): """Create empty dependency relation if parent has been deleted.""" Data = apps.get_model('flow', 'Data') DataDependency = apps.get_model('flow', 'DataDependency') def process_dependency(data, parent): if not Data.objects.filter(pk=parent).exists(): DataDependency.objects.create( child=data, parent=None, kind='io' ) for data in Data.objects.all(): for field_schema, fields in iterate_fields(data.input, data.process.input_schema): name = field_schema['name'] value = fields[name] if field_schema.get('type', '').startswith('data:'): process_dependency(data, value) elif field_schema.get('type', '').startswith('list:data:'): for parent in value: process_dependency(data, parent)
python
def recreate_parent_dependencies(apps, schema_editor): """Create empty dependency relation if parent has been deleted.""" Data = apps.get_model('flow', 'Data') DataDependency = apps.get_model('flow', 'DataDependency') def process_dependency(data, parent): if not Data.objects.filter(pk=parent).exists(): DataDependency.objects.create( child=data, parent=None, kind='io' ) for data in Data.objects.all(): for field_schema, fields in iterate_fields(data.input, data.process.input_schema): name = field_schema['name'] value = fields[name] if field_schema.get('type', '').startswith('data:'): process_dependency(data, value) elif field_schema.get('type', '').startswith('list:data:'): for parent in value: process_dependency(data, parent)
[ "def", "recreate_parent_dependencies", "(", "apps", ",", "schema_editor", ")", ":", "Data", "=", "apps", ".", "get_model", "(", "'flow'", ",", "'Data'", ")", "DataDependency", "=", "apps", ".", "get_model", "(", "'flow'", ",", "'DataDependency'", ")", "def", ...
Create empty dependency relation if parent has been deleted.
[ "Create", "empty", "dependency", "relation", "if", "parent", "has", "been", "deleted", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/migrations/0012_recreate_empty_parents.py#L10-L31
train
45,035
DataONEorg/d1_python
gmn/src/d1_gmn/app/db_filter.py
add_access_policy_filter
def add_access_policy_filter(request, query, column_name): """Filter records that do not have ``read`` or better access for one or more of the active subjects. Since ``read`` is the lowest access level that a subject can have, this method only has to filter on the presence of the subject. """ q = d1_gmn.app.models.Subject.objects.filter( subject__in=request.all_subjects_set ).values('permission__sciobj') filter_arg = '{}__in'.format(column_name) return query.filter(**{filter_arg: q})
python
def add_access_policy_filter(request, query, column_name): """Filter records that do not have ``read`` or better access for one or more of the active subjects. Since ``read`` is the lowest access level that a subject can have, this method only has to filter on the presence of the subject. """ q = d1_gmn.app.models.Subject.objects.filter( subject__in=request.all_subjects_set ).values('permission__sciobj') filter_arg = '{}__in'.format(column_name) return query.filter(**{filter_arg: q})
[ "def", "add_access_policy_filter", "(", "request", ",", "query", ",", "column_name", ")", ":", "q", "=", "d1_gmn", ".", "app", ".", "models", ".", "Subject", ".", "objects", ".", "filter", "(", "subject__in", "=", "request", ".", "all_subjects_set", ")", "...
Filter records that do not have ``read`` or better access for one or more of the active subjects. Since ``read`` is the lowest access level that a subject can have, this method only has to filter on the presence of the subject.
[ "Filter", "records", "that", "do", "not", "have", "read", "or", "better", "access", "for", "one", "or", "more", "of", "the", "active", "subjects", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/db_filter.py#L46-L58
train
45,036
DataONEorg/d1_python
gmn/src/d1_gmn/app/db_filter.py
add_redact_annotation
def add_redact_annotation(request, query): """Flag LogEntry records that require ``ipAddress`` and ``subject`` fields to be redacted before being returned to the client. Only trusted subjects and subjects with ``write`` or ``changePermission`` on a SciObj receive unredacted ``ipAddress`` and ``subject`` in LogEntry records for the associated SciObj. Subjects with only ``read`` access receive redacted records. """ return query.annotate( redact=django.db.models.Exists( d1_gmn.app.models.Permission.objects.filter( sciobj=django.db.models.OuterRef('sciobj'), subject__subject__in=request.all_subjects_set, level__gte=d1_gmn.app.auth.WRITE_LEVEL, ), negated=True, ) )
python
def add_redact_annotation(request, query): """Flag LogEntry records that require ``ipAddress`` and ``subject`` fields to be redacted before being returned to the client. Only trusted subjects and subjects with ``write`` or ``changePermission`` on a SciObj receive unredacted ``ipAddress`` and ``subject`` in LogEntry records for the associated SciObj. Subjects with only ``read`` access receive redacted records. """ return query.annotate( redact=django.db.models.Exists( d1_gmn.app.models.Permission.objects.filter( sciobj=django.db.models.OuterRef('sciobj'), subject__subject__in=request.all_subjects_set, level__gte=d1_gmn.app.auth.WRITE_LEVEL, ), negated=True, ) )
[ "def", "add_redact_annotation", "(", "request", ",", "query", ")", ":", "return", "query", ".", "annotate", "(", "redact", "=", "django", ".", "db", ".", "models", ".", "Exists", "(", "d1_gmn", ".", "app", ".", "models", ".", "Permission", ".", "objects"...
Flag LogEntry records that require ``ipAddress`` and ``subject`` fields to be redacted before being returned to the client. Only trusted subjects and subjects with ``write`` or ``changePermission`` on a SciObj receive unredacted ``ipAddress`` and ``subject`` in LogEntry records for the associated SciObj. Subjects with only ``read`` access receive redacted records.
[ "Flag", "LogEntry", "records", "that", "require", "ipAddress", "and", "subject", "fields", "to", "be", "redacted", "before", "being", "returned", "to", "the", "client", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/db_filter.py#L61-L81
train
45,037
DataONEorg/d1_python
gmn/src/d1_gmn/app/views/assert_db.py
post_has_mime_parts
def post_has_mime_parts(request, parts): """Validate that a MMP POST contains all required sections. :param request: Django Request :param parts: [(part_type, part_name), ...] :return: None or raises exception. Where information is stored in the request: part_type header: request.META['HTTP_<UPPER CASE NAME>'] part_type file: request.FILES['<name>'] part_type field: request.POST['<name>'] """ missing = [] for part_type, part_name in parts: if part_type == 'header': if 'HTTP_' + part_name.upper() not in request.META: missing.append('{}: {}'.format(part_type, part_name)) elif part_type == 'file': if part_name not in list(request.FILES.keys()): missing.append('{}: {}'.format(part_type, part_name)) elif part_type == 'field': if part_name not in list(request.POST.keys()): missing.append('{}: {}'.format(part_type, part_name)) else: raise d1_common.types.exceptions.ServiceFailure( 0, 'Invalid part_type. part_type="{}"'.format(part_type) ) if len(missing) > 0: raise d1_common.types.exceptions.InvalidRequest( 0, 'Missing part(s) in MIME Multipart document. missing="{}"'.format( ', '.join(missing) ), )
python
def post_has_mime_parts(request, parts): """Validate that a MMP POST contains all required sections. :param request: Django Request :param parts: [(part_type, part_name), ...] :return: None or raises exception. Where information is stored in the request: part_type header: request.META['HTTP_<UPPER CASE NAME>'] part_type file: request.FILES['<name>'] part_type field: request.POST['<name>'] """ missing = [] for part_type, part_name in parts: if part_type == 'header': if 'HTTP_' + part_name.upper() not in request.META: missing.append('{}: {}'.format(part_type, part_name)) elif part_type == 'file': if part_name not in list(request.FILES.keys()): missing.append('{}: {}'.format(part_type, part_name)) elif part_type == 'field': if part_name not in list(request.POST.keys()): missing.append('{}: {}'.format(part_type, part_name)) else: raise d1_common.types.exceptions.ServiceFailure( 0, 'Invalid part_type. part_type="{}"'.format(part_type) ) if len(missing) > 0: raise d1_common.types.exceptions.InvalidRequest( 0, 'Missing part(s) in MIME Multipart document. missing="{}"'.format( ', '.join(missing) ), )
[ "def", "post_has_mime_parts", "(", "request", ",", "parts", ")", ":", "missing", "=", "[", "]", "for", "part_type", ",", "part_name", "in", "parts", ":", "if", "part_type", "==", "'header'", ":", "if", "'HTTP_'", "+", "part_name", ".", "upper", "(", ")",...
Validate that a MMP POST contains all required sections. :param request: Django Request :param parts: [(part_type, part_name), ...] :return: None or raises exception. Where information is stored in the request: part_type header: request.META['HTTP_<UPPER CASE NAME>'] part_type file: request.FILES['<name>'] part_type field: request.POST['<name>']
[ "Validate", "that", "a", "MMP", "POST", "contains", "all", "required", "sections", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/views/assert_db.py#L139-L175
train
45,038
DataONEorg/d1_python
client_onedrive/src/d1_onedrive/impl/resolver/time_period.py
Resolver._decade_ranges_in_date_range
def _decade_ranges_in_date_range(self, begin_date, end_date): """Return a list of decades which is covered by date range.""" begin_dated = begin_date.year / 10 end_dated = end_date.year / 10 decades = [] for d in range(begin_dated, end_dated + 1): decades.append('{}-{}'.format(d * 10, d * 10 + 9)) return decades
python
def _decade_ranges_in_date_range(self, begin_date, end_date): """Return a list of decades which is covered by date range.""" begin_dated = begin_date.year / 10 end_dated = end_date.year / 10 decades = [] for d in range(begin_dated, end_dated + 1): decades.append('{}-{}'.format(d * 10, d * 10 + 9)) return decades
[ "def", "_decade_ranges_in_date_range", "(", "self", ",", "begin_date", ",", "end_date", ")", ":", "begin_dated", "=", "begin_date", ".", "year", "/", "10", "end_dated", "=", "end_date", ".", "year", "/", "10", "decades", "=", "[", "]", "for", "d", "in", ...
Return a list of decades which is covered by date range.
[ "Return", "a", "list", "of", "decades", "which", "is", "covered", "by", "date", "range", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/resolver/time_period.py#L160-L167
train
45,039
DataONEorg/d1_python
client_onedrive/src/d1_onedrive/impl/resolver/time_period.py
Resolver._years_in_date_range_within_decade
def _years_in_date_range_within_decade(self, decade, begin_date, end_date): """Return a list of years in one decade which is covered by date range.""" begin_year = begin_date.year end_year = end_date.year if begin_year < decade: begin_year = decade if end_year > decade + 9: end_year = decade + 9 return list(range(begin_year, end_year + 1))
python
def _years_in_date_range_within_decade(self, decade, begin_date, end_date): """Return a list of years in one decade which is covered by date range.""" begin_year = begin_date.year end_year = end_date.year if begin_year < decade: begin_year = decade if end_year > decade + 9: end_year = decade + 9 return list(range(begin_year, end_year + 1))
[ "def", "_years_in_date_range_within_decade", "(", "self", ",", "decade", ",", "begin_date", ",", "end_date", ")", ":", "begin_year", "=", "begin_date", ".", "year", "end_year", "=", "end_date", ".", "year", "if", "begin_year", "<", "decade", ":", "begin_year", ...
Return a list of years in one decade which is covered by date range.
[ "Return", "a", "list", "of", "years", "in", "one", "decade", "which", "is", "covered", "by", "date", "range", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/resolver/time_period.py#L172-L180
train
45,040
wilson-eft/wilson
wilson/run/smeft/rge.py
smeft_evolve_leadinglog
def smeft_evolve_leadinglog(C_in, scale_in, scale_out, newphys=True): """Solve the SMEFT RGEs in the leading log approximation. Input C_in and output C_out are dictionaries of arrays.""" C_out = deepcopy(C_in) b = beta.beta(C_out, newphys=newphys) for k, C in C_out.items(): C_out[k] = C + b[k] / (16 * pi**2) * log(scale_out / scale_in) return C_out
python
def smeft_evolve_leadinglog(C_in, scale_in, scale_out, newphys=True): """Solve the SMEFT RGEs in the leading log approximation. Input C_in and output C_out are dictionaries of arrays.""" C_out = deepcopy(C_in) b = beta.beta(C_out, newphys=newphys) for k, C in C_out.items(): C_out[k] = C + b[k] / (16 * pi**2) * log(scale_out / scale_in) return C_out
[ "def", "smeft_evolve_leadinglog", "(", "C_in", ",", "scale_in", ",", "scale_out", ",", "newphys", "=", "True", ")", ":", "C_out", "=", "deepcopy", "(", "C_in", ")", "b", "=", "beta", ".", "beta", "(", "C_out", ",", "newphys", "=", "newphys", ")", "for"...
Solve the SMEFT RGEs in the leading log approximation. Input C_in and output C_out are dictionaries of arrays.
[ "Solve", "the", "SMEFT", "RGEs", "in", "the", "leading", "log", "approximation", "." ]
4164f55ff663d4f668c6e2b4575fd41562662cc9
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/run/smeft/rge.py#L12-L20
train
45,041
wilson-eft/wilson
wilson/run/smeft/rge.py
_smeft_evolve
def _smeft_evolve(C_in, scale_in, scale_out, newphys=True, **kwargs): """Axuliary function used in `smeft_evolve` and `smeft_evolve_continuous`""" def fun(t0, y): return beta.beta_array(C=C_array2dict(y.view(complex)), newphys=newphys).view(float) / (16 * pi**2) y0 = C_dict2array(C_in).view(float) sol = solve_ivp(fun=fun, t_span=(log(scale_in), log(scale_out)), y0=y0, **kwargs) return sol
python
def _smeft_evolve(C_in, scale_in, scale_out, newphys=True, **kwargs): """Axuliary function used in `smeft_evolve` and `smeft_evolve_continuous`""" def fun(t0, y): return beta.beta_array(C=C_array2dict(y.view(complex)), newphys=newphys).view(float) / (16 * pi**2) y0 = C_dict2array(C_in).view(float) sol = solve_ivp(fun=fun, t_span=(log(scale_in), log(scale_out)), y0=y0, **kwargs) return sol
[ "def", "_smeft_evolve", "(", "C_in", ",", "scale_in", ",", "scale_out", ",", "newphys", "=", "True", ",", "*", "*", "kwargs", ")", ":", "def", "fun", "(", "t0", ",", "y", ")", ":", "return", "beta", ".", "beta_array", "(", "C", "=", "C_array2dict", ...
Axuliary function used in `smeft_evolve` and `smeft_evolve_continuous`
[ "Axuliary", "function", "used", "in", "smeft_evolve", "and", "smeft_evolve_continuous" ]
4164f55ff663d4f668c6e2b4575fd41562662cc9
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/run/smeft/rge.py#L23-L32
train
45,042
wilson-eft/wilson
wilson/run/smeft/rge.py
smeft_evolve
def smeft_evolve(C_in, scale_in, scale_out, newphys=True, **kwargs): """Solve the SMEFT RGEs by numeric integration. Input C_in and output C_out are dictionaries of arrays.""" sol = _smeft_evolve(C_in, scale_in, scale_out, newphys=newphys, **kwargs) return C_array2dict(sol.y[:, -1].view(complex))
python
def smeft_evolve(C_in, scale_in, scale_out, newphys=True, **kwargs): """Solve the SMEFT RGEs by numeric integration. Input C_in and output C_out are dictionaries of arrays.""" sol = _smeft_evolve(C_in, scale_in, scale_out, newphys=newphys, **kwargs) return C_array2dict(sol.y[:, -1].view(complex))
[ "def", "smeft_evolve", "(", "C_in", ",", "scale_in", ",", "scale_out", ",", "newphys", "=", "True", ",", "*", "*", "kwargs", ")", ":", "sol", "=", "_smeft_evolve", "(", "C_in", ",", "scale_in", ",", "scale_out", ",", "newphys", "=", "newphys", ",", "*"...
Solve the SMEFT RGEs by numeric integration. Input C_in and output C_out are dictionaries of arrays.
[ "Solve", "the", "SMEFT", "RGEs", "by", "numeric", "integration", "." ]
4164f55ff663d4f668c6e2b4575fd41562662cc9
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/run/smeft/rge.py#L35-L40
train
45,043
wilson-eft/wilson
wilson/run/smeft/rge.py
smeft_evolve_continuous
def smeft_evolve_continuous(C_in, scale_in, scale_out, newphys=True, **kwargs): """Solve the SMEFT RGEs by numeric integration, returning a function that allows to compute an interpolated solution at arbitrary intermediate scales.""" sol = _smeft_evolve(C_in, scale_in, scale_out, newphys=newphys, dense_output=True, **kwargs) @np.vectorize def _rge_solution(scale): t = log(scale) y = sol.sol(t).view(complex) yd = C_array2dict(y) yw = arrays2wcxf_nonred(yd) return yw def rge_solution(scale): # this is to return a scalar if the input is scalar return _rge_solution(scale)[()] return rge_solution
python
def smeft_evolve_continuous(C_in, scale_in, scale_out, newphys=True, **kwargs): """Solve the SMEFT RGEs by numeric integration, returning a function that allows to compute an interpolated solution at arbitrary intermediate scales.""" sol = _smeft_evolve(C_in, scale_in, scale_out, newphys=newphys, dense_output=True, **kwargs) @np.vectorize def _rge_solution(scale): t = log(scale) y = sol.sol(t).view(complex) yd = C_array2dict(y) yw = arrays2wcxf_nonred(yd) return yw def rge_solution(scale): # this is to return a scalar if the input is scalar return _rge_solution(scale)[()] return rge_solution
[ "def", "smeft_evolve_continuous", "(", "C_in", ",", "scale_in", ",", "scale_out", ",", "newphys", "=", "True", ",", "*", "*", "kwargs", ")", ":", "sol", "=", "_smeft_evolve", "(", "C_in", ",", "scale_in", ",", "scale_out", ",", "newphys", "=", "newphys", ...
Solve the SMEFT RGEs by numeric integration, returning a function that allows to compute an interpolated solution at arbitrary intermediate scales.
[ "Solve", "the", "SMEFT", "RGEs", "by", "numeric", "integration", "returning", "a", "function", "that", "allows", "to", "compute", "an", "interpolated", "solution", "at", "arbitrary", "intermediate", "scales", "." ]
4164f55ff663d4f668c6e2b4575fd41562662cc9
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/run/smeft/rge.py#L43-L59
train
45,044
genialis/resolwe
resolwe/elastic/mixins.py
ElasticIndexFilterMixin.invalid_index
def invalid_index(self, name): """Show an invalid index error message.""" self.stderr.write("Unknown index: {}".format(name)) self.stderr.write("Supported indices are:") for index in index_builder.indexes: self.stderr.write(" * {}".format(index.__class__.__name__))
python
def invalid_index(self, name): """Show an invalid index error message.""" self.stderr.write("Unknown index: {}".format(name)) self.stderr.write("Supported indices are:") for index in index_builder.indexes: self.stderr.write(" * {}".format(index.__class__.__name__))
[ "def", "invalid_index", "(", "self", ",", "name", ")", ":", "self", ".", "stderr", ".", "write", "(", "\"Unknown index: {}\"", ".", "format", "(", "name", ")", ")", "self", ".", "stderr", ".", "write", "(", "\"Supported indices are:\"", ")", "for", "index"...
Show an invalid index error message.
[ "Show", "an", "invalid", "index", "error", "message", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/mixins.py#L22-L27
train
45,045
genialis/resolwe
resolwe/elastic/mixins.py
ElasticIndexFilterMixin.filter_indices
def filter_indices(self, options, verbosity, *args, **kwargs): """Filter indices and execute an action for each index.""" index_name_map = { index.__class__.__name__: index for index in index_builder.indexes } # Process includes. if options['index']: indices = set(options['index']) else: indices = set(index_name_map.keys()) # Process excludes. for index_name in options['exclude']: if index_name not in index_name_map: self.invalid_index(index_name) return indices.discard(index_name) # Execute action for each remaining index. for index_name in indices: try: index = index_name_map[index_name] except KeyError: self.invalid_index(index_name) return if verbosity > 0: self.stdout.write("Processing index '{}'...".format(index_name)) self.handle_index(index, *args, **kwargs)
python
def filter_indices(self, options, verbosity, *args, **kwargs): """Filter indices and execute an action for each index.""" index_name_map = { index.__class__.__name__: index for index in index_builder.indexes } # Process includes. if options['index']: indices = set(options['index']) else: indices = set(index_name_map.keys()) # Process excludes. for index_name in options['exclude']: if index_name not in index_name_map: self.invalid_index(index_name) return indices.discard(index_name) # Execute action for each remaining index. for index_name in indices: try: index = index_name_map[index_name] except KeyError: self.invalid_index(index_name) return if verbosity > 0: self.stdout.write("Processing index '{}'...".format(index_name)) self.handle_index(index, *args, **kwargs)
[ "def", "filter_indices", "(", "self", ",", "options", ",", "verbosity", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "index_name_map", "=", "{", "index", ".", "__class__", ".", "__name__", ":", "index", "for", "index", "in", "index_builder", ".",...
Filter indices and execute an action for each index.
[ "Filter", "indices", "and", "execute", "an", "action", "for", "each", "index", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/mixins.py#L36-L67
train
45,046
genialis/resolwe
resolwe/flow/elastic_indexes/base.py
BaseIndexMixin.get_contributor_sort_value
def get_contributor_sort_value(self, obj): """Generate display name for contributor.""" user = obj.contributor if user.first_name or user.last_name: contributor = user.get_full_name() else: contributor = user.username return contributor.strip().lower()
python
def get_contributor_sort_value(self, obj): """Generate display name for contributor.""" user = obj.contributor if user.first_name or user.last_name: contributor = user.get_full_name() else: contributor = user.username return contributor.strip().lower()
[ "def", "get_contributor_sort_value", "(", "self", ",", "obj", ")", ":", "user", "=", "obj", ".", "contributor", "if", "user", ".", "first_name", "or", "user", ".", "last_name", ":", "contributor", "=", "user", ".", "get_full_name", "(", ")", "else", ":", ...
Generate display name for contributor.
[ "Generate", "display", "name", "for", "contributor", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/elastic_indexes/base.py#L31-L40
train
45,047
genialis/resolwe
resolwe/flow/elastic_indexes/base.py
BaseIndexMixin._get_user
def _get_user(self, user): """Generate user filtering tokens.""" return ' '.join([user.username, user.first_name, user.last_name])
python
def _get_user(self, user): """Generate user filtering tokens.""" return ' '.join([user.username, user.first_name, user.last_name])
[ "def", "_get_user", "(", "self", ",", "user", ")", ":", "return", "' '", ".", "join", "(", "[", "user", ".", "username", ",", "user", ".", "first_name", ",", "user", ".", "last_name", "]", ")" ]
Generate user filtering tokens.
[ "Generate", "user", "filtering", "tokens", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/elastic_indexes/base.py#L42-L44
train
45,048
genialis/resolwe
resolwe/flow/elastic_indexes/base.py
BaseIndexMixin.get_owner_ids_value
def get_owner_ids_value(self, obj): """Extract owners' ids.""" return [ user.pk for user in get_users_with_permission(obj, get_full_perm('owner', obj)) ]
python
def get_owner_ids_value(self, obj): """Extract owners' ids.""" return [ user.pk for user in get_users_with_permission(obj, get_full_perm('owner', obj)) ]
[ "def", "get_owner_ids_value", "(", "self", ",", "obj", ")", ":", "return", "[", "user", ".", "pk", "for", "user", "in", "get_users_with_permission", "(", "obj", ",", "get_full_perm", "(", "'owner'", ",", "obj", ")", ")", "]" ]
Extract owners' ids.
[ "Extract", "owners", "ids", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/elastic_indexes/base.py#L54-L59
train
45,049
genialis/resolwe
resolwe/flow/elastic_indexes/base.py
BaseIndexMixin.get_owner_names_value
def get_owner_names_value(self, obj): """Extract owners' names.""" return [ self._get_user(user) for user in get_users_with_permission(obj, get_full_perm('owner', obj)) ]
python
def get_owner_names_value(self, obj): """Extract owners' names.""" return [ self._get_user(user) for user in get_users_with_permission(obj, get_full_perm('owner', obj)) ]
[ "def", "get_owner_names_value", "(", "self", ",", "obj", ")", ":", "return", "[", "self", ".", "_get_user", "(", "user", ")", "for", "user", "in", "get_users_with_permission", "(", "obj", ",", "get_full_perm", "(", "'owner'", ",", "obj", ")", ")", "]" ]
Extract owners' names.
[ "Extract", "owners", "names", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/elastic_indexes/base.py#L61-L66
train
45,050
DataONEorg/d1_python
gmn/src/d1_gmn/app/views/slice.py
_get_and_assert_slice_param
def _get_and_assert_slice_param(url_dict, param_name, default_int): """Return ``param_str`` converted to an int. If str cannot be converted to int or int is not zero or positive, raise InvalidRequest. """ param_str = url_dict['query'].get(param_name, default_int) try: n = int(param_str) except ValueError: raise d1_common.types.exceptions.InvalidRequest( 0, 'Slice parameter is not a valid integer. {}="{}"'.format( param_name, param_str ), ) if n < 0: raise d1_common.types.exceptions.InvalidRequest( 0, 'Slice parameter cannot be a negative number. {}="{}"'.format( param_name, param_str ), ) return n
python
def _get_and_assert_slice_param(url_dict, param_name, default_int): """Return ``param_str`` converted to an int. If str cannot be converted to int or int is not zero or positive, raise InvalidRequest. """ param_str = url_dict['query'].get(param_name, default_int) try: n = int(param_str) except ValueError: raise d1_common.types.exceptions.InvalidRequest( 0, 'Slice parameter is not a valid integer. {}="{}"'.format( param_name, param_str ), ) if n < 0: raise d1_common.types.exceptions.InvalidRequest( 0, 'Slice parameter cannot be a negative number. {}="{}"'.format( param_name, param_str ), ) return n
[ "def", "_get_and_assert_slice_param", "(", "url_dict", ",", "param_name", ",", "default_int", ")", ":", "param_str", "=", "url_dict", "[", "'query'", "]", ".", "get", "(", "param_name", ",", "default_int", ")", "try", ":", "n", "=", "int", "(", "param_str", ...
Return ``param_str`` converted to an int. If str cannot be converted to int or int is not zero or positive, raise InvalidRequest.
[ "Return", "param_str", "converted", "to", "an", "int", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/views/slice.py#L79-L103
train
45,051
DataONEorg/d1_python
gmn/src/d1_gmn/app/views/slice.py
_assert_valid_start
def _assert_valid_start(start_int, count_int, total_int): """Assert that the number of objects visible to the active subject is higher than the requested start position for the slice. This ensures that it's possible to create a valid slice. """ if total_int and start_int >= total_int: raise d1_common.types.exceptions.InvalidRequest( 0, 'Requested a non-existing slice. start={} count={} total={}'.format( start_int, count_int, total_int ), )
python
def _assert_valid_start(start_int, count_int, total_int): """Assert that the number of objects visible to the active subject is higher than the requested start position for the slice. This ensures that it's possible to create a valid slice. """ if total_int and start_int >= total_int: raise d1_common.types.exceptions.InvalidRequest( 0, 'Requested a non-existing slice. start={} count={} total={}'.format( start_int, count_int, total_int ), )
[ "def", "_assert_valid_start", "(", "start_int", ",", "count_int", ",", "total_int", ")", ":", "if", "total_int", "and", "start_int", ">=", "total_int", ":", "raise", "d1_common", ".", "types", ".", "exceptions", ".", "InvalidRequest", "(", "0", ",", "'Requeste...
Assert that the number of objects visible to the active subject is higher than the requested start position for the slice. This ensures that it's possible to create a valid slice.
[ "Assert", "that", "the", "number", "of", "objects", "visible", "to", "the", "active", "subject", "is", "higher", "than", "the", "requested", "start", "position", "for", "the", "slice", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/views/slice.py#L106-L119
train
45,052
DataONEorg/d1_python
gmn/src/d1_gmn/app/views/slice.py
_adjust_count_if_required
def _adjust_count_if_required(start_int, count_int, total_int): """Adjust requested object count down if there are not enough objects visible to the active subjects to cover the requested slice start and count. Preconditions: start is verified to be lower than the number of visible objects, making it possible to create a valid slice by adjusting count. """ if start_int + count_int > total_int: count_int = total_int - start_int count_int = min(count_int, django.conf.settings.MAX_SLICE_ITEMS) return count_int
python
def _adjust_count_if_required(start_int, count_int, total_int): """Adjust requested object count down if there are not enough objects visible to the active subjects to cover the requested slice start and count. Preconditions: start is verified to be lower than the number of visible objects, making it possible to create a valid slice by adjusting count. """ if start_int + count_int > total_int: count_int = total_int - start_int count_int = min(count_int, django.conf.settings.MAX_SLICE_ITEMS) return count_int
[ "def", "_adjust_count_if_required", "(", "start_int", ",", "count_int", ",", "total_int", ")", ":", "if", "start_int", "+", "count_int", ">", "total_int", ":", "count_int", "=", "total_int", "-", "start_int", "count_int", "=", "min", "(", "count_int", ",", "dj...
Adjust requested object count down if there are not enough objects visible to the active subjects to cover the requested slice start and count. Preconditions: start is verified to be lower than the number of visible objects, making it possible to create a valid slice by adjusting count.
[ "Adjust", "requested", "object", "count", "down", "if", "there", "are", "not", "enough", "objects", "visible", "to", "the", "active", "subjects", "to", "cover", "the", "requested", "slice", "start", "and", "count", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/views/slice.py#L122-L133
train
45,053
DataONEorg/d1_python
gmn/src/d1_gmn/app/views/slice.py
_add_fallback_slice_filter
def _add_fallback_slice_filter(query, start_int, count_int, total_int): """Create a slice of a query based on request start and count parameters. This adds `OFFSET <start> LIMIT <count>` to the SQL query, which causes slicing to run very slowly on large result sets. """ logging.debug( 'Adding fallback slice filter. start={} count={} total={} '.format( start_int, count_int, total_int ) ) if not count_int: return query.none() else: return query[start_int : start_int + count_int]
python
def _add_fallback_slice_filter(query, start_int, count_int, total_int): """Create a slice of a query based on request start and count parameters. This adds `OFFSET <start> LIMIT <count>` to the SQL query, which causes slicing to run very slowly on large result sets. """ logging.debug( 'Adding fallback slice filter. start={} count={} total={} '.format( start_int, count_int, total_int ) ) if not count_int: return query.none() else: return query[start_int : start_int + count_int]
[ "def", "_add_fallback_slice_filter", "(", "query", ",", "start_int", ",", "count_int", ",", "total_int", ")", ":", "logging", ".", "debug", "(", "'Adding fallback slice filter. start={} count={} total={} '", ".", "format", "(", "start_int", ",", "count_int", ",", "tot...
Create a slice of a query based on request start and count parameters. This adds `OFFSET <start> LIMIT <count>` to the SQL query, which causes slicing to run very slowly on large result sets.
[ "Create", "a", "slice", "of", "a", "query", "based", "on", "request", "start", "and", "count", "parameters", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/views/slice.py#L155-L170
train
45,054
DataONEorg/d1_python
gmn/src/d1_gmn/app/views/slice.py
_cache_get_last_in_slice
def _cache_get_last_in_slice(url_dict, start_int, total_int, authn_subj_list): """Return None if cache entry does not exist.""" key_str = _gen_cache_key_for_slice(url_dict, start_int, total_int, authn_subj_list) # TODO: Django docs state that cache.get() should return None on unknown key. try: last_ts_tup = django.core.cache.cache.get(key_str) except KeyError: last_ts_tup = None logging.debug('Cache get. key="{}" -> last_ts_tup={}'.format(key_str, last_ts_tup)) return last_ts_tup
python
def _cache_get_last_in_slice(url_dict, start_int, total_int, authn_subj_list): """Return None if cache entry does not exist.""" key_str = _gen_cache_key_for_slice(url_dict, start_int, total_int, authn_subj_list) # TODO: Django docs state that cache.get() should return None on unknown key. try: last_ts_tup = django.core.cache.cache.get(key_str) except KeyError: last_ts_tup = None logging.debug('Cache get. key="{}" -> last_ts_tup={}'.format(key_str, last_ts_tup)) return last_ts_tup
[ "def", "_cache_get_last_in_slice", "(", "url_dict", ",", "start_int", ",", "total_int", ",", "authn_subj_list", ")", ":", "key_str", "=", "_gen_cache_key_for_slice", "(", "url_dict", ",", "start_int", ",", "total_int", ",", "authn_subj_list", ")", "# TODO: Django docs...
Return None if cache entry does not exist.
[ "Return", "None", "if", "cache", "entry", "does", "not", "exist", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/views/slice.py#L173-L182
train
45,055
DataONEorg/d1_python
gmn/src/d1_gmn/app/views/slice.py
_gen_cache_key_for_slice
def _gen_cache_key_for_slice(url_dict, start_int, total_int, authn_subj_list): """Generate cache key for the REST URL the client is currently accessing or is expected to access in order to get the slice starting at the given ``start_int`` of a multi-slice result set. When used for finding the key to check in the current call, ``start_int`` is 0, or the start that was passed in the current call. When used for finding the key to set for the anticipated call, ``start_int`` is current ``start_int`` + ``count_int``, the number of objects the current call will return. The URL for the slice is the same as for the current slice, except that the `start` query parameter has been increased by the number of items returned in the current slice. Except for advancing the start value and potentially adjusting the desired slice size, it doesn't make sense for the client to change the REST URL during slicing, but such queries are supported. They will, however, trigger potentially expensive database queries to find the current slice position. To support adjustments in desired slice size during slicing, the count is not used when generating the key. The active subjects are used in the key in order to prevent potential security issues if authenticated subjects change during slicing. The url_dict is normalized by encoding it to a JSON string with sorted keys. A hash of the JSON is used for better distribution in a hash map and to avoid the 256 bytes limit on keys in some caches. """ # logging.debug('Gen key. result_record_count={}'.format(result_record_count)) key_url_dict = copy.deepcopy(url_dict) key_url_dict['query'].pop('start', None) key_url_dict['query'].pop('count', None) key_json = d1_common.util.serialize_to_normalized_compact_json( { 'url_dict': key_url_dict, 'start': start_int, 'total': total_int, 'subject': authn_subj_list, } ) logging.debug('key_json={}'.format(key_json)) return hashlib.sha256(key_json.encode('utf-8')).hexdigest()
python
def _gen_cache_key_for_slice(url_dict, start_int, total_int, authn_subj_list): """Generate cache key for the REST URL the client is currently accessing or is expected to access in order to get the slice starting at the given ``start_int`` of a multi-slice result set. When used for finding the key to check in the current call, ``start_int`` is 0, or the start that was passed in the current call. When used for finding the key to set for the anticipated call, ``start_int`` is current ``start_int`` + ``count_int``, the number of objects the current call will return. The URL for the slice is the same as for the current slice, except that the `start` query parameter has been increased by the number of items returned in the current slice. Except for advancing the start value and potentially adjusting the desired slice size, it doesn't make sense for the client to change the REST URL during slicing, but such queries are supported. They will, however, trigger potentially expensive database queries to find the current slice position. To support adjustments in desired slice size during slicing, the count is not used when generating the key. The active subjects are used in the key in order to prevent potential security issues if authenticated subjects change during slicing. The url_dict is normalized by encoding it to a JSON string with sorted keys. A hash of the JSON is used for better distribution in a hash map and to avoid the 256 bytes limit on keys in some caches. """ # logging.debug('Gen key. result_record_count={}'.format(result_record_count)) key_url_dict = copy.deepcopy(url_dict) key_url_dict['query'].pop('start', None) key_url_dict['query'].pop('count', None) key_json = d1_common.util.serialize_to_normalized_compact_json( { 'url_dict': key_url_dict, 'start': start_int, 'total': total_int, 'subject': authn_subj_list, } ) logging.debug('key_json={}'.format(key_json)) return hashlib.sha256(key_json.encode('utf-8')).hexdigest()
[ "def", "_gen_cache_key_for_slice", "(", "url_dict", ",", "start_int", ",", "total_int", ",", "authn_subj_list", ")", ":", "# logging.debug('Gen key. result_record_count={}'.format(result_record_count))", "key_url_dict", "=", "copy", ".", "deepcopy", "(", "url_dict", ")", "k...
Generate cache key for the REST URL the client is currently accessing or is expected to access in order to get the slice starting at the given ``start_int`` of a multi-slice result set. When used for finding the key to check in the current call, ``start_int`` is 0, or the start that was passed in the current call. When used for finding the key to set for the anticipated call, ``start_int`` is current ``start_int`` + ``count_int``, the number of objects the current call will return. The URL for the slice is the same as for the current slice, except that the `start` query parameter has been increased by the number of items returned in the current slice. Except for advancing the start value and potentially adjusting the desired slice size, it doesn't make sense for the client to change the REST URL during slicing, but such queries are supported. They will, however, trigger potentially expensive database queries to find the current slice position. To support adjustments in desired slice size during slicing, the count is not used when generating the key. The active subjects are used in the key in order to prevent potential security issues if authenticated subjects change during slicing. The url_dict is normalized by encoding it to a JSON string with sorted keys. A hash of the JSON is used for better distribution in a hash map and to avoid the 256 bytes limit on keys in some caches.
[ "Generate", "cache", "key", "for", "the", "REST", "URL", "the", "client", "is", "currently", "accessing", "or", "is", "expected", "to", "access", "in", "order", "to", "get", "the", "slice", "starting", "at", "the", "given", "start_int", "of", "a", "multi",...
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/views/slice.py#L185-L230
train
45,056
wilson-eft/wilson
wilson/translate/smeft.py
smeft_toarray
def smeft_toarray(wc_name, wc_dict): """Construct a numpy array with Wilson coefficient values from a dictionary of label-value pairs corresponding to the non-redundant elements.""" shape = smeftutil.C_keys_shape[wc_name] C = np.zeros(shape, dtype=complex) for k, v in wc_dict.items(): if k.split('_')[0] != wc_name: continue indices = k.split('_')[-1] # e.g. '1213' indices = tuple(int(s) - 1 for s in indices) # e.g. (1, 2, 1, 3) C[indices] = v C = smeftutil.symmetrize({wc_name: C})[wc_name] return C
python
def smeft_toarray(wc_name, wc_dict): """Construct a numpy array with Wilson coefficient values from a dictionary of label-value pairs corresponding to the non-redundant elements.""" shape = smeftutil.C_keys_shape[wc_name] C = np.zeros(shape, dtype=complex) for k, v in wc_dict.items(): if k.split('_')[0] != wc_name: continue indices = k.split('_')[-1] # e.g. '1213' indices = tuple(int(s) - 1 for s in indices) # e.g. (1, 2, 1, 3) C[indices] = v C = smeftutil.symmetrize({wc_name: C})[wc_name] return C
[ "def", "smeft_toarray", "(", "wc_name", ",", "wc_dict", ")", ":", "shape", "=", "smeftutil", ".", "C_keys_shape", "[", "wc_name", "]", "C", "=", "np", ".", "zeros", "(", "shape", ",", "dtype", "=", "complex", ")", "for", "k", ",", "v", "in", "wc_dict...
Construct a numpy array with Wilson coefficient values from a dictionary of label-value pairs corresponding to the non-redundant elements.
[ "Construct", "a", "numpy", "array", "with", "Wilson", "coefficient", "values", "from", "a", "dictionary", "of", "label", "-", "value", "pairs", "corresponding", "to", "the", "non", "-", "redundant", "elements", "." ]
4164f55ff663d4f668c6e2b4575fd41562662cc9
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/translate/smeft.py#L10-L23
train
45,057
wilson-eft/wilson
wilson/translate/smeft.py
warsaw_to_warsawmass
def warsaw_to_warsawmass(C, parameters=None, sectors=None): """Translate from the Warsaw basis to the 'Warsaw mass' basis. Parameters used: - `Vus`, `Vub`, `Vcb`, `gamma`: elements of the unitary CKM matrix (defined as the mismatch between left-handed quark mass matrix diagonalization matrices). """ p = default_parameters.copy() if parameters is not None: # if parameters are passed in, overwrite the default values p.update(parameters) # start out with a 1:1 copy C_out = C.copy() # rotate left-handed up-type quark fields in uL-uR operator WCs C_rotate_u = ['uphi', 'uG', 'uW', 'uB'] for name in C_rotate_u: _array = smeft_toarray(name, C) V = ckmutil.ckm.ckm_tree(p["Vus"], p["Vub"], p["Vcb"], p["delta"]) UuL = V.conj().T _array = UuL.conj().T @ _array _dict = smeft_fromarray(name, _array) C_out.update(_dict) # diagonalize dimension-5 Weinberg operator _array = smeft_toarray('llphiphi', C) _array = np.diag(ckmutil.diag.msvd(_array)[1]) _dict = smeft_fromarray('llphiphi', _array) C_out.update(_dict) return C_out
python
def warsaw_to_warsawmass(C, parameters=None, sectors=None): """Translate from the Warsaw basis to the 'Warsaw mass' basis. Parameters used: - `Vus`, `Vub`, `Vcb`, `gamma`: elements of the unitary CKM matrix (defined as the mismatch between left-handed quark mass matrix diagonalization matrices). """ p = default_parameters.copy() if parameters is not None: # if parameters are passed in, overwrite the default values p.update(parameters) # start out with a 1:1 copy C_out = C.copy() # rotate left-handed up-type quark fields in uL-uR operator WCs C_rotate_u = ['uphi', 'uG', 'uW', 'uB'] for name in C_rotate_u: _array = smeft_toarray(name, C) V = ckmutil.ckm.ckm_tree(p["Vus"], p["Vub"], p["Vcb"], p["delta"]) UuL = V.conj().T _array = UuL.conj().T @ _array _dict = smeft_fromarray(name, _array) C_out.update(_dict) # diagonalize dimension-5 Weinberg operator _array = smeft_toarray('llphiphi', C) _array = np.diag(ckmutil.diag.msvd(_array)[1]) _dict = smeft_fromarray('llphiphi', _array) C_out.update(_dict) return C_out
[ "def", "warsaw_to_warsawmass", "(", "C", ",", "parameters", "=", "None", ",", "sectors", "=", "None", ")", ":", "p", "=", "default_parameters", ".", "copy", "(", ")", "if", "parameters", "is", "not", "None", ":", "# if parameters are passed in, overwrite the def...
Translate from the Warsaw basis to the 'Warsaw mass' basis. Parameters used: - `Vus`, `Vub`, `Vcb`, `gamma`: elements of the unitary CKM matrix (defined as the mismatch between left-handed quark mass matrix diagonalization matrices).
[ "Translate", "from", "the", "Warsaw", "basis", "to", "the", "Warsaw", "mass", "basis", "." ]
4164f55ff663d4f668c6e2b4575fd41562662cc9
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/translate/smeft.py#L35-L63
train
45,058
wilson-eft/wilson
wilson/translate/smeft.py
warsaw_up_to_warsaw
def warsaw_up_to_warsaw(C, parameters=None, sectors=None): """Translate from the 'Warsaw up' basis to the Warsaw basis. Parameters used: - `Vus`, `Vub`, `Vcb`, `gamma`: elements of the unitary CKM matrix (defined as the mismatch between left-handed quark mass matrix diagonalization matrices). """ C_in = smeftutil.wcxf2arrays_symmetrized(C) p = default_parameters.copy() if parameters is not None: # if parameters are passed in, overwrite the default values p.update(parameters) Uu = Ud = Ul = Ue = np.eye(3) V = ckmutil.ckm.ckm_tree(p["Vus"], p["Vub"], p["Vcb"], p["delta"]) Uq = V C_out = smeftutil.flavor_rotation(C_in, Uq, Uu, Ud, Ul, Ue) C_out = smeftutil.arrays2wcxf_nonred(C_out) warsaw = wcxf.Basis['SMEFT', 'Warsaw'] all_wcs = set(warsaw.all_wcs) # to speed up lookup return {k: v for k, v in C_out.items() if k in all_wcs}
python
def warsaw_up_to_warsaw(C, parameters=None, sectors=None): """Translate from the 'Warsaw up' basis to the Warsaw basis. Parameters used: - `Vus`, `Vub`, `Vcb`, `gamma`: elements of the unitary CKM matrix (defined as the mismatch between left-handed quark mass matrix diagonalization matrices). """ C_in = smeftutil.wcxf2arrays_symmetrized(C) p = default_parameters.copy() if parameters is not None: # if parameters are passed in, overwrite the default values p.update(parameters) Uu = Ud = Ul = Ue = np.eye(3) V = ckmutil.ckm.ckm_tree(p["Vus"], p["Vub"], p["Vcb"], p["delta"]) Uq = V C_out = smeftutil.flavor_rotation(C_in, Uq, Uu, Ud, Ul, Ue) C_out = smeftutil.arrays2wcxf_nonred(C_out) warsaw = wcxf.Basis['SMEFT', 'Warsaw'] all_wcs = set(warsaw.all_wcs) # to speed up lookup return {k: v for k, v in C_out.items() if k in all_wcs}
[ "def", "warsaw_up_to_warsaw", "(", "C", ",", "parameters", "=", "None", ",", "sectors", "=", "None", ")", ":", "C_in", "=", "smeftutil", ".", "wcxf2arrays_symmetrized", "(", "C", ")", "p", "=", "default_parameters", ".", "copy", "(", ")", "if", "parameters...
Translate from the 'Warsaw up' basis to the Warsaw basis. Parameters used: - `Vus`, `Vub`, `Vcb`, `gamma`: elements of the unitary CKM matrix (defined as the mismatch between left-handed quark mass matrix diagonalization matrices).
[ "Translate", "from", "the", "Warsaw", "up", "basis", "to", "the", "Warsaw", "basis", "." ]
4164f55ff663d4f668c6e2b4575fd41562662cc9
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/translate/smeft.py#L89-L109
train
45,059
DataONEorg/d1_python
lib_common/src/d1_common/replication_policy.py
sysmeta_add_preferred
def sysmeta_add_preferred(sysmeta_pyxb, node_urn): """Add a remote Member Node to the list of preferred replication targets to this System Metadata object. Also remove the target MN from the list of blocked Member Nodes if present. If the target MN is already in the preferred list and not in the blocked list, this function is a no-op. Args: sysmeta_pyxb : SystemMetadata PyXB object. System Metadata in which to add the preferred replication target. If the System Metadata does not already have a Replication Policy, a default replication policy which enables replication is added and populated with the preferred replication target. node_urn : str Node URN of the remote MN that will be added. On the form ``urn:node:MyMemberNode``. """ if not has_replication_policy(sysmeta_pyxb): sysmeta_set_default_rp(sysmeta_pyxb) rp_pyxb = sysmeta_pyxb.replicationPolicy _add_node(rp_pyxb, 'pref', node_urn) _remove_node(rp_pyxb, 'block', node_urn)
python
def sysmeta_add_preferred(sysmeta_pyxb, node_urn): """Add a remote Member Node to the list of preferred replication targets to this System Metadata object. Also remove the target MN from the list of blocked Member Nodes if present. If the target MN is already in the preferred list and not in the blocked list, this function is a no-op. Args: sysmeta_pyxb : SystemMetadata PyXB object. System Metadata in which to add the preferred replication target. If the System Metadata does not already have a Replication Policy, a default replication policy which enables replication is added and populated with the preferred replication target. node_urn : str Node URN of the remote MN that will be added. On the form ``urn:node:MyMemberNode``. """ if not has_replication_policy(sysmeta_pyxb): sysmeta_set_default_rp(sysmeta_pyxb) rp_pyxb = sysmeta_pyxb.replicationPolicy _add_node(rp_pyxb, 'pref', node_urn) _remove_node(rp_pyxb, 'block', node_urn)
[ "def", "sysmeta_add_preferred", "(", "sysmeta_pyxb", ",", "node_urn", ")", ":", "if", "not", "has_replication_policy", "(", "sysmeta_pyxb", ")", ":", "sysmeta_set_default_rp", "(", "sysmeta_pyxb", ")", "rp_pyxb", "=", "sysmeta_pyxb", ".", "replicationPolicy", "_add_no...
Add a remote Member Node to the list of preferred replication targets to this System Metadata object. Also remove the target MN from the list of blocked Member Nodes if present. If the target MN is already in the preferred list and not in the blocked list, this function is a no-op. Args: sysmeta_pyxb : SystemMetadata PyXB object. System Metadata in which to add the preferred replication target. If the System Metadata does not already have a Replication Policy, a default replication policy which enables replication is added and populated with the preferred replication target. node_urn : str Node URN of the remote MN that will be added. On the form ``urn:node:MyMemberNode``.
[ "Add", "a", "remote", "Member", "Node", "to", "the", "list", "of", "preferred", "replication", "targets", "to", "this", "System", "Metadata", "object", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/replication_policy.py#L54-L80
train
45,060
DataONEorg/d1_python
lib_common/src/d1_common/replication_policy.py
normalize
def normalize(rp_pyxb): """Normalize a ReplicationPolicy PyXB type in place. The preferred and blocked lists are sorted alphabetically. As blocked nodes override preferred nodes, and any node present in both lists is removed from the preferred list. Args: rp_pyxb : ReplicationPolicy PyXB object The object will be normalized in place. """ # noinspection PyMissingOrEmptyDocstring def sort(r, a): d1_common.xml.sort_value_list_pyxb(_get_attr_or_list(r, a)) rp_pyxb.preferredMemberNode = set(_get_attr_or_list(rp_pyxb, 'pref')) - set( _get_attr_or_list(rp_pyxb, 'block') ) sort(rp_pyxb, 'block') sort(rp_pyxb, 'pref')
python
def normalize(rp_pyxb): """Normalize a ReplicationPolicy PyXB type in place. The preferred and blocked lists are sorted alphabetically. As blocked nodes override preferred nodes, and any node present in both lists is removed from the preferred list. Args: rp_pyxb : ReplicationPolicy PyXB object The object will be normalized in place. """ # noinspection PyMissingOrEmptyDocstring def sort(r, a): d1_common.xml.sort_value_list_pyxb(_get_attr_or_list(r, a)) rp_pyxb.preferredMemberNode = set(_get_attr_or_list(rp_pyxb, 'pref')) - set( _get_attr_or_list(rp_pyxb, 'block') ) sort(rp_pyxb, 'block') sort(rp_pyxb, 'pref')
[ "def", "normalize", "(", "rp_pyxb", ")", ":", "# noinspection PyMissingOrEmptyDocstring", "def", "sort", "(", "r", ",", "a", ")", ":", "d1_common", ".", "xml", ".", "sort_value_list_pyxb", "(", "_get_attr_or_list", "(", "r", ",", "a", ")", ")", "rp_pyxb", "....
Normalize a ReplicationPolicy PyXB type in place. The preferred and blocked lists are sorted alphabetically. As blocked nodes override preferred nodes, and any node present in both lists is removed from the preferred list. Args: rp_pyxb : ReplicationPolicy PyXB object The object will be normalized in place.
[ "Normalize", "a", "ReplicationPolicy", "PyXB", "type", "in", "place", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/replication_policy.py#L136-L157
train
45,061
DataONEorg/d1_python
lib_common/src/d1_common/replication_policy.py
are_equivalent_xml
def are_equivalent_xml(a_xml, b_xml): """Check if two ReplicationPolicy XML docs are semantically equivalent. The ReplicationPolicy XML docs are normalized before comparison. Args: a_xml, b_xml: ReplicationPolicy XML docs to compare Returns: bool: ``True`` if the resulting policies for the two objects are semantically equivalent. """ return are_equivalent_pyxb( d1_common.xml.deserialize(a_xml), d1_common.xml.deserialize(b_xml) )
python
def are_equivalent_xml(a_xml, b_xml): """Check if two ReplicationPolicy XML docs are semantically equivalent. The ReplicationPolicy XML docs are normalized before comparison. Args: a_xml, b_xml: ReplicationPolicy XML docs to compare Returns: bool: ``True`` if the resulting policies for the two objects are semantically equivalent. """ return are_equivalent_pyxb( d1_common.xml.deserialize(a_xml), d1_common.xml.deserialize(b_xml) )
[ "def", "are_equivalent_xml", "(", "a_xml", ",", "b_xml", ")", ":", "return", "are_equivalent_pyxb", "(", "d1_common", ".", "xml", ".", "deserialize", "(", "a_xml", ")", ",", "d1_common", ".", "xml", ".", "deserialize", "(", "b_xml", ")", ")" ]
Check if two ReplicationPolicy XML docs are semantically equivalent. The ReplicationPolicy XML docs are normalized before comparison. Args: a_xml, b_xml: ReplicationPolicy XML docs to compare Returns: bool: ``True`` if the resulting policies for the two objects are semantically equivalent.
[ "Check", "if", "two", "ReplicationPolicy", "XML", "docs", "are", "semantically", "equivalent", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/replication_policy.py#L214-L229
train
45,062
DataONEorg/d1_python
lib_common/src/d1_common/replication_policy.py
pyxb_to_dict
def pyxb_to_dict(rp_pyxb): """Convert ReplicationPolicy PyXB object to a normalized dict. Args: rp_pyxb: ReplicationPolicy to convert. Returns: dict : Replication Policy as normalized dict. Example:: { 'allowed': True, 'num': 3, 'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'}, 'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'}, } """ return { 'allowed': bool(_get_attr_or_list(rp_pyxb, 'allowed')), 'num': _get_as_int(rp_pyxb), 'block': _get_as_set(rp_pyxb, 'block'), 'pref': _get_as_set(rp_pyxb, 'pref'), }
python
def pyxb_to_dict(rp_pyxb): """Convert ReplicationPolicy PyXB object to a normalized dict. Args: rp_pyxb: ReplicationPolicy to convert. Returns: dict : Replication Policy as normalized dict. Example:: { 'allowed': True, 'num': 3, 'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'}, 'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'}, } """ return { 'allowed': bool(_get_attr_or_list(rp_pyxb, 'allowed')), 'num': _get_as_int(rp_pyxb), 'block': _get_as_set(rp_pyxb, 'block'), 'pref': _get_as_set(rp_pyxb, 'pref'), }
[ "def", "pyxb_to_dict", "(", "rp_pyxb", ")", ":", "return", "{", "'allowed'", ":", "bool", "(", "_get_attr_or_list", "(", "rp_pyxb", ",", "'allowed'", ")", ")", ",", "'num'", ":", "_get_as_int", "(", "rp_pyxb", ")", ",", "'block'", ":", "_get_as_set", "(", ...
Convert ReplicationPolicy PyXB object to a normalized dict. Args: rp_pyxb: ReplicationPolicy to convert. Returns: dict : Replication Policy as normalized dict. Example:: { 'allowed': True, 'num': 3, 'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'}, 'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'}, }
[ "Convert", "ReplicationPolicy", "PyXB", "object", "to", "a", "normalized", "dict", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/replication_policy.py#L274-L298
train
45,063
DataONEorg/d1_python
lib_common/src/d1_common/replication_policy.py
dict_to_pyxb
def dict_to_pyxb(rp_dict): """Convert dict to ReplicationPolicy PyXB object. Args: rp_dict: Native Python structure representing a Replication Policy. Example:: { 'allowed': True, 'num': 3, 'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'}, 'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'}, } Returns: ReplicationPolicy PyXB object. """ rp_pyxb = d1_common.types.dataoneTypes.replicationPolicy() rp_pyxb.replicationAllowed = rp_dict['allowed'] rp_pyxb.numberReplicas = rp_dict['num'] rp_pyxb.blockedMemberNode = rp_dict['block'] rp_pyxb.preferredMemberNode = rp_dict['pref'] normalize(rp_pyxb) return rp_pyxb
python
def dict_to_pyxb(rp_dict): """Convert dict to ReplicationPolicy PyXB object. Args: rp_dict: Native Python structure representing a Replication Policy. Example:: { 'allowed': True, 'num': 3, 'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'}, 'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'}, } Returns: ReplicationPolicy PyXB object. """ rp_pyxb = d1_common.types.dataoneTypes.replicationPolicy() rp_pyxb.replicationAllowed = rp_dict['allowed'] rp_pyxb.numberReplicas = rp_dict['num'] rp_pyxb.blockedMemberNode = rp_dict['block'] rp_pyxb.preferredMemberNode = rp_dict['pref'] normalize(rp_pyxb) return rp_pyxb
[ "def", "dict_to_pyxb", "(", "rp_dict", ")", ":", "rp_pyxb", "=", "d1_common", ".", "types", ".", "dataoneTypes", ".", "replicationPolicy", "(", ")", "rp_pyxb", ".", "replicationAllowed", "=", "rp_dict", "[", "'allowed'", "]", "rp_pyxb", ".", "numberReplicas", ...
Convert dict to ReplicationPolicy PyXB object. Args: rp_dict: Native Python structure representing a Replication Policy. Example:: { 'allowed': True, 'num': 3, 'blockedMemberNode': {'urn:node:NODE1', 'urn:node:NODE2', 'urn:node:NODE3'}, 'preferredMemberNode': {'urn:node:NODE4', 'urn:node:NODE5'}, } Returns: ReplicationPolicy PyXB object.
[ "Convert", "dict", "to", "ReplicationPolicy", "PyXB", "object", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/replication_policy.py#L301-L326
train
45,064
DataONEorg/d1_python
lib_common/src/d1_common/replication_policy.py
_ensure_allow_rp
def _ensure_allow_rp(rp_pyxb): """Ensure that RP allows replication.""" if not rp_pyxb.replicationAllowed: rp_pyxb.replicationAllowed = True if not rp_pyxb.numberReplicas: rp_pyxb.numberReplicas = 3
python
def _ensure_allow_rp(rp_pyxb): """Ensure that RP allows replication.""" if not rp_pyxb.replicationAllowed: rp_pyxb.replicationAllowed = True if not rp_pyxb.numberReplicas: rp_pyxb.numberReplicas = 3
[ "def", "_ensure_allow_rp", "(", "rp_pyxb", ")", ":", "if", "not", "rp_pyxb", ".", "replicationAllowed", ":", "rp_pyxb", ".", "replicationAllowed", "=", "True", "if", "not", "rp_pyxb", ".", "numberReplicas", ":", "rp_pyxb", ".", "numberReplicas", "=", "3" ]
Ensure that RP allows replication.
[ "Ensure", "that", "RP", "allows", "replication", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/replication_policy.py#L340-L345
train
45,065
genialis/resolwe
resolwe/flow/utils/stats.py
_display_interval
def _display_interval(i): """Convert a time interval into a human-readable string. :param i: The interval to convert, in seconds. """ sigils = ["d", "h", "m", "s"] factors = [24 * 60 * 60, 60 * 60, 60, 1] remain = int(i) result = "" for fac, sig in zip(factors, sigils): if remain < fac: continue result += "{}{}".format(remain // fac, sig) remain = remain % fac return result
python
def _display_interval(i): """Convert a time interval into a human-readable string. :param i: The interval to convert, in seconds. """ sigils = ["d", "h", "m", "s"] factors = [24 * 60 * 60, 60 * 60, 60, 1] remain = int(i) result = "" for fac, sig in zip(factors, sigils): if remain < fac: continue result += "{}{}".format(remain // fac, sig) remain = remain % fac return result
[ "def", "_display_interval", "(", "i", ")", ":", "sigils", "=", "[", "\"d\"", ",", "\"h\"", ",", "\"m\"", ",", "\"s\"", "]", "factors", "=", "[", "24", "*", "60", "*", "60", ",", "60", "*", "60", ",", "60", ",", "1", "]", "remain", "=", "int", ...
Convert a time interval into a human-readable string. :param i: The interval to convert, in seconds.
[ "Convert", "a", "time", "interval", "into", "a", "human", "-", "readable", "string", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/utils/stats.py#L59-L73
train
45,066
genialis/resolwe
resolwe/flow/utils/stats.py
NumberSeriesShape.update
def update(self, num): """Update metrics with the new number.""" num = float(num) self.count += 1 self.low = min(self.low, num) self.high = max(self.high, num) # Welford's online mean and variance algorithm. delta = num - self.mean self.mean = self.mean + delta / self.count delta2 = num - self.mean self._rolling_variance = self._rolling_variance + delta * delta2 if self.count > 1: self.deviation = math.sqrt(self._rolling_variance / (self.count - 1)) else: self.deviation = 0.0
python
def update(self, num): """Update metrics with the new number.""" num = float(num) self.count += 1 self.low = min(self.low, num) self.high = max(self.high, num) # Welford's online mean and variance algorithm. delta = num - self.mean self.mean = self.mean + delta / self.count delta2 = num - self.mean self._rolling_variance = self._rolling_variance + delta * delta2 if self.count > 1: self.deviation = math.sqrt(self._rolling_variance / (self.count - 1)) else: self.deviation = 0.0
[ "def", "update", "(", "self", ",", "num", ")", ":", "num", "=", "float", "(", "num", ")", "self", ".", "count", "+=", "1", "self", ".", "low", "=", "min", "(", "self", ".", "low", ",", "num", ")", "self", ".", "high", "=", "max", "(", "self",...
Update metrics with the new number.
[ "Update", "metrics", "with", "the", "new", "number", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/utils/stats.py#L30-L46
train
45,067
genialis/resolwe
resolwe/flow/utils/stats.py
NumberSeriesShape.to_dict
def to_dict(self): """Pack the stats computed into a dictionary.""" return { 'high': self.high, 'low': self.low, 'mean': self.mean, 'count': self.count, 'deviation': self.deviation, }
python
def to_dict(self): """Pack the stats computed into a dictionary.""" return { 'high': self.high, 'low': self.low, 'mean': self.mean, 'count': self.count, 'deviation': self.deviation, }
[ "def", "to_dict", "(", "self", ")", ":", "return", "{", "'high'", ":", "self", ".", "high", ",", "'low'", ":", "self", ".", "low", ",", "'mean'", ":", "self", ".", "mean", ",", "'count'", ":", "self", ".", "count", ",", "'deviation'", ":", "self", ...
Pack the stats computed into a dictionary.
[ "Pack", "the", "stats", "computed", "into", "a", "dictionary", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/utils/stats.py#L48-L56
train
45,068
genialis/resolwe
resolwe/flow/utils/stats.py
SimpleLoadAvg.add
def add(self, count, timestamp=None): """Add a value at the specified time to the series. :param count: The number of work items ready at the specified time. :param timestamp: The timestamp to add. Defaults to None, meaning current time. It should be strictly greater (newer) than the last added timestamp. """ if timestamp is None: timestamp = time.time() if self.last_data >= timestamp: raise ValueError("Time {} >= {} in load average calculation".format(self.last_data, timestamp)) self.last_data = timestamp for meta in self.intervals.values(): meta.push(count, timestamp)
python
def add(self, count, timestamp=None): """Add a value at the specified time to the series. :param count: The number of work items ready at the specified time. :param timestamp: The timestamp to add. Defaults to None, meaning current time. It should be strictly greater (newer) than the last added timestamp. """ if timestamp is None: timestamp = time.time() if self.last_data >= timestamp: raise ValueError("Time {} >= {} in load average calculation".format(self.last_data, timestamp)) self.last_data = timestamp for meta in self.intervals.values(): meta.push(count, timestamp)
[ "def", "add", "(", "self", ",", "count", ",", "timestamp", "=", "None", ")", ":", "if", "timestamp", "is", "None", ":", "timestamp", "=", "time", ".", "time", "(", ")", "if", "self", ".", "last_data", ">=", "timestamp", ":", "raise", "ValueError", "(...
Add a value at the specified time to the series. :param count: The number of work items ready at the specified time. :param timestamp: The timestamp to add. Defaults to None, meaning current time. It should be strictly greater (newer) than the last added timestamp.
[ "Add", "a", "value", "at", "the", "specified", "time", "to", "the", "series", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/utils/stats.py#L146-L162
train
45,069
genialis/resolwe
resolwe/flow/utils/stats.py
SimpleLoadAvg.to_dict
def to_dict(self): """Pack the load averages into a nicely-keyed dictionary.""" result = {} for meta in self.intervals.values(): result[meta.display] = meta.value return result
python
def to_dict(self): """Pack the load averages into a nicely-keyed dictionary.""" result = {} for meta in self.intervals.values(): result[meta.display] = meta.value return result
[ "def", "to_dict", "(", "self", ")", ":", "result", "=", "{", "}", "for", "meta", "in", "self", ".", "intervals", ".", "values", "(", ")", ":", "result", "[", "meta", ".", "display", "]", "=", "meta", ".", "value", "return", "result" ]
Pack the load averages into a nicely-keyed dictionary.
[ "Pack", "the", "load", "averages", "into", "a", "nicely", "-", "keyed", "dictionary", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/utils/stats.py#L164-L169
train
45,070
genialis/resolwe
resolwe/flow/management/commands/register.py
Command.valid
def valid(self, instance, schema): """Validate schema.""" try: jsonschema.validate(instance, schema) except jsonschema.exceptions.ValidationError as ex: self.stderr.write(" VALIDATION ERROR: {}".format(instance['name'] if 'name' in instance else '')) self.stderr.write(" path: {}".format(ex.path)) self.stderr.write(" message: {}".format(ex.message)) self.stderr.write(" validator: {}".format(ex.validator)) self.stderr.write(" val. value: {}".format(ex.validator_value)) return False try: # Check that default values fit field schema. for field in ['input', 'output', 'schema']: for schema, _, path in iterate_schema({}, instance.get(field, {})): if 'default' in schema: validate_schema({schema['name']: schema['default']}, [schema]) except ValidationError: self.stderr.write(" VALIDATION ERROR: {}".format(instance['name'])) self.stderr.write(" Default value of field '{}' is not valid.". format(path)) return False return True
python
def valid(self, instance, schema): """Validate schema.""" try: jsonschema.validate(instance, schema) except jsonschema.exceptions.ValidationError as ex: self.stderr.write(" VALIDATION ERROR: {}".format(instance['name'] if 'name' in instance else '')) self.stderr.write(" path: {}".format(ex.path)) self.stderr.write(" message: {}".format(ex.message)) self.stderr.write(" validator: {}".format(ex.validator)) self.stderr.write(" val. value: {}".format(ex.validator_value)) return False try: # Check that default values fit field schema. for field in ['input', 'output', 'schema']: for schema, _, path in iterate_schema({}, instance.get(field, {})): if 'default' in schema: validate_schema({schema['name']: schema['default']}, [schema]) except ValidationError: self.stderr.write(" VALIDATION ERROR: {}".format(instance['name'])) self.stderr.write(" Default value of field '{}' is not valid.". format(path)) return False return True
[ "def", "valid", "(", "self", ",", "instance", ",", "schema", ")", ":", "try", ":", "jsonschema", ".", "validate", "(", "instance", ",", "schema", ")", "except", "jsonschema", ".", "exceptions", ".", "ValidationError", "as", "ex", ":", "self", ".", "stder...
Validate schema.
[ "Validate", "schema", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/management/commands/register.py#L47-L70
train
45,071
genialis/resolwe
resolwe/flow/management/commands/register.py
Command.find_descriptor_schemas
def find_descriptor_schemas(self, schema_file): """Find descriptor schemas in given path.""" if not schema_file.lower().endswith(('.yml', '.yaml')): return [] with open(schema_file) as fn: schemas = yaml.load(fn, Loader=yaml.FullLoader) if not schemas: self.stderr.write("Could not read YAML file {}".format(schema_file)) return [] descriptor_schemas = [] for schema in schemas: if 'schema' not in schema: continue descriptor_schemas.append(schema) return descriptor_schemas
python
def find_descriptor_schemas(self, schema_file): """Find descriptor schemas in given path.""" if not schema_file.lower().endswith(('.yml', '.yaml')): return [] with open(schema_file) as fn: schemas = yaml.load(fn, Loader=yaml.FullLoader) if not schemas: self.stderr.write("Could not read YAML file {}".format(schema_file)) return [] descriptor_schemas = [] for schema in schemas: if 'schema' not in schema: continue descriptor_schemas.append(schema) return descriptor_schemas
[ "def", "find_descriptor_schemas", "(", "self", ",", "schema_file", ")", ":", "if", "not", "schema_file", ".", "lower", "(", ")", ".", "endswith", "(", "(", "'.yml'", ",", "'.yaml'", ")", ")", ":", "return", "[", "]", "with", "open", "(", "schema_file", ...
Find descriptor schemas in given path.
[ "Find", "descriptor", "schemas", "in", "given", "path", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/management/commands/register.py#L72-L90
train
45,072
genialis/resolwe
resolwe/flow/management/commands/register.py
Command.find_schemas
def find_schemas(self, schema_path, schema_type=SCHEMA_TYPE_PROCESS, verbosity=1): """Find schemas in packages that match filters.""" schema_matches = [] if not os.path.isdir(schema_path): if verbosity > 0: self.stdout.write("Invalid path {}".format(schema_path)) return if schema_type not in [SCHEMA_TYPE_PROCESS, SCHEMA_TYPE_DESCRIPTOR]: raise ValueError('Invalid schema type') for root, _, files in os.walk(schema_path): for schema_file in [os.path.join(root, fn) for fn in files]: schemas = None if schema_type == SCHEMA_TYPE_DESCRIPTOR: # Discover descriptors. schemas = self.find_descriptor_schemas(schema_file) elif schema_type == SCHEMA_TYPE_PROCESS: # Perform process discovery for all supported execution engines. schemas = [] for execution_engine in manager.execution_engines.values(): schemas.extend(execution_engine.discover_process(schema_file)) for schema in schemas: schema_matches.append(schema) return schema_matches
python
def find_schemas(self, schema_path, schema_type=SCHEMA_TYPE_PROCESS, verbosity=1): """Find schemas in packages that match filters.""" schema_matches = [] if not os.path.isdir(schema_path): if verbosity > 0: self.stdout.write("Invalid path {}".format(schema_path)) return if schema_type not in [SCHEMA_TYPE_PROCESS, SCHEMA_TYPE_DESCRIPTOR]: raise ValueError('Invalid schema type') for root, _, files in os.walk(schema_path): for schema_file in [os.path.join(root, fn) for fn in files]: schemas = None if schema_type == SCHEMA_TYPE_DESCRIPTOR: # Discover descriptors. schemas = self.find_descriptor_schemas(schema_file) elif schema_type == SCHEMA_TYPE_PROCESS: # Perform process discovery for all supported execution engines. schemas = [] for execution_engine in manager.execution_engines.values(): schemas.extend(execution_engine.discover_process(schema_file)) for schema in schemas: schema_matches.append(schema) return schema_matches
[ "def", "find_schemas", "(", "self", ",", "schema_path", ",", "schema_type", "=", "SCHEMA_TYPE_PROCESS", ",", "verbosity", "=", "1", ")", ":", "schema_matches", "=", "[", "]", "if", "not", "os", ".", "path", ".", "isdir", "(", "schema_path", ")", ":", "if...
Find schemas in packages that match filters.
[ "Find", "schemas", "in", "packages", "that", "match", "filters", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/management/commands/register.py#L92-L119
train
45,073
genialis/resolwe
resolwe/flow/management/commands/register.py
Command.register_descriptors
def register_descriptors(self, descriptor_schemas, user, force=False, verbosity=1): """Read and register descriptors.""" log_descriptors = [] for descriptor_schema in descriptor_schemas: for schema, _, _ in iterate_schema({}, descriptor_schema.get('schema', {})): if not schema['type'][-1].endswith(':'): schema['type'] += ':' if 'schema' not in descriptor_schema: descriptor_schema['schema'] = [] if not self.valid(descriptor_schema, DESCRIPTOR_SCHEMA): continue slug = descriptor_schema['slug'] version = descriptor_schema.get('version', '0.0.0') int_version = convert_version_string_to_int(version, VERSION_NUMBER_BITS) # `latest version` is returned as `int` so it has to be compared to `int_version` latest_version = DescriptorSchema.objects.filter(slug=slug).aggregate(Max('version'))['version__max'] if latest_version is not None and latest_version > int_version: self.stderr.write("Skip descriptor schema {}: newer version installed".format(slug)) continue previous_descriptor_qs = DescriptorSchema.objects.filter(slug=slug) if previous_descriptor_qs.exists(): previous_descriptor = previous_descriptor_qs.latest() else: previous_descriptor = None descriptor_query = DescriptorSchema.objects.filter(slug=slug, version=version) if descriptor_query.exists(): if not force: if verbosity > 0: self.stdout.write("Skip descriptor schema {}: same version installed".format(slug)) continue descriptor_query.update(**descriptor_schema) log_descriptors.append("Updated {}".format(slug)) else: descriptor = DescriptorSchema.objects.create(contributor=user, **descriptor_schema) assign_contributor_permissions(descriptor) if previous_descriptor: copy_permissions(previous_descriptor, descriptor) log_descriptors.append("Inserted {}".format(slug)) if log_descriptors and verbosity > 0: self.stdout.write("Descriptor schemas Updates:") for log in log_descriptors: self.stdout.write(" {}".format(log))
python
def register_descriptors(self, descriptor_schemas, user, force=False, verbosity=1): """Read and register descriptors.""" log_descriptors = [] for descriptor_schema in descriptor_schemas: for schema, _, _ in iterate_schema({}, descriptor_schema.get('schema', {})): if not schema['type'][-1].endswith(':'): schema['type'] += ':' if 'schema' not in descriptor_schema: descriptor_schema['schema'] = [] if not self.valid(descriptor_schema, DESCRIPTOR_SCHEMA): continue slug = descriptor_schema['slug'] version = descriptor_schema.get('version', '0.0.0') int_version = convert_version_string_to_int(version, VERSION_NUMBER_BITS) # `latest version` is returned as `int` so it has to be compared to `int_version` latest_version = DescriptorSchema.objects.filter(slug=slug).aggregate(Max('version'))['version__max'] if latest_version is not None and latest_version > int_version: self.stderr.write("Skip descriptor schema {}: newer version installed".format(slug)) continue previous_descriptor_qs = DescriptorSchema.objects.filter(slug=slug) if previous_descriptor_qs.exists(): previous_descriptor = previous_descriptor_qs.latest() else: previous_descriptor = None descriptor_query = DescriptorSchema.objects.filter(slug=slug, version=version) if descriptor_query.exists(): if not force: if verbosity > 0: self.stdout.write("Skip descriptor schema {}: same version installed".format(slug)) continue descriptor_query.update(**descriptor_schema) log_descriptors.append("Updated {}".format(slug)) else: descriptor = DescriptorSchema.objects.create(contributor=user, **descriptor_schema) assign_contributor_permissions(descriptor) if previous_descriptor: copy_permissions(previous_descriptor, descriptor) log_descriptors.append("Inserted {}".format(slug)) if log_descriptors and verbosity > 0: self.stdout.write("Descriptor schemas Updates:") for log in log_descriptors: self.stdout.write(" {}".format(log))
[ "def", "register_descriptors", "(", "self", ",", "descriptor_schemas", ",", "user", ",", "force", "=", "False", ",", "verbosity", "=", "1", ")", ":", "log_descriptors", "=", "[", "]", "for", "descriptor_schema", "in", "descriptor_schemas", ":", "for", "schema"...
Read and register descriptors.
[ "Read", "and", "register", "descriptors", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/management/commands/register.py#L271-L321
train
45,074
genialis/resolwe
resolwe/flow/management/commands/register.py
Command.retire
def retire(self, process_schemas): """Retire obsolete processes. Remove old process versions without data. Find processes that have been registered but do not exist in the code anymore, then: - If they do not have data: remove them - If they have data: flag them not active (``is_active=False``) """ process_slugs = set(ps['slug'] for ps in process_schemas) # Processes that are in DB but not in the code retired_processes = Process.objects.filter(~Q(slug__in=process_slugs)) # Remove retired processes which do not have data retired_processes.filter(data__exact=None).delete() # Remove non-latest processes which do not have data latest_version_processes = Process.objects.order_by('slug', '-version').distinct('slug') Process.objects.filter(data__exact=None).difference(latest_version_processes).delete() # Deactivate retired processes which have data retired_processes.update(is_active=False)
python
def retire(self, process_schemas): """Retire obsolete processes. Remove old process versions without data. Find processes that have been registered but do not exist in the code anymore, then: - If they do not have data: remove them - If they have data: flag them not active (``is_active=False``) """ process_slugs = set(ps['slug'] for ps in process_schemas) # Processes that are in DB but not in the code retired_processes = Process.objects.filter(~Q(slug__in=process_slugs)) # Remove retired processes which do not have data retired_processes.filter(data__exact=None).delete() # Remove non-latest processes which do not have data latest_version_processes = Process.objects.order_by('slug', '-version').distinct('slug') Process.objects.filter(data__exact=None).difference(latest_version_processes).delete() # Deactivate retired processes which have data retired_processes.update(is_active=False)
[ "def", "retire", "(", "self", ",", "process_schemas", ")", ":", "process_slugs", "=", "set", "(", "ps", "[", "'slug'", "]", "for", "ps", "in", "process_schemas", ")", "# Processes that are in DB but not in the code", "retired_processes", "=", "Process", ".", "obje...
Retire obsolete processes. Remove old process versions without data. Find processes that have been registered but do not exist in the code anymore, then: - If they do not have data: remove them - If they have data: flag them not active (``is_active=False``)
[ "Retire", "obsolete", "processes", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/management/commands/register.py#L323-L346
train
45,075
genialis/resolwe
resolwe/flow/management/commands/register.py
Command.handle
def handle(self, *args, **options): """Register processes.""" force = options.get('force') retire = options.get('retire') verbosity = int(options.get('verbosity')) users = get_user_model().objects.filter(is_superuser=True).order_by('date_joined') if not users.exists(): self.stderr.write("Admin does not exist: create a superuser") exit(1) process_paths, descriptor_paths = [], [] process_schemas, descriptor_schemas = [], [] for finder in get_finders(): process_paths.extend(finder.find_processes()) descriptor_paths.extend(finder.find_descriptors()) for proc_path in process_paths: process_schemas.extend( self.find_schemas(proc_path, schema_type=SCHEMA_TYPE_PROCESS, verbosity=verbosity)) for desc_path in descriptor_paths: descriptor_schemas.extend( self.find_schemas(desc_path, schema_type=SCHEMA_TYPE_DESCRIPTOR, verbosity=verbosity)) user_admin = users.first() self.register_descriptors(descriptor_schemas, user_admin, force, verbosity=verbosity) # NOTE: Descriptor schemas must be registered first, so # processes can validate 'entity_descriptor_schema' field. self.register_processes(process_schemas, user_admin, force, verbosity=verbosity) if retire: self.retire(process_schemas) if verbosity > 0: self.stdout.write("Running executor post-registration hook...") manager.get_executor().post_register_hook(verbosity=verbosity)
python
def handle(self, *args, **options): """Register processes.""" force = options.get('force') retire = options.get('retire') verbosity = int(options.get('verbosity')) users = get_user_model().objects.filter(is_superuser=True).order_by('date_joined') if not users.exists(): self.stderr.write("Admin does not exist: create a superuser") exit(1) process_paths, descriptor_paths = [], [] process_schemas, descriptor_schemas = [], [] for finder in get_finders(): process_paths.extend(finder.find_processes()) descriptor_paths.extend(finder.find_descriptors()) for proc_path in process_paths: process_schemas.extend( self.find_schemas(proc_path, schema_type=SCHEMA_TYPE_PROCESS, verbosity=verbosity)) for desc_path in descriptor_paths: descriptor_schemas.extend( self.find_schemas(desc_path, schema_type=SCHEMA_TYPE_DESCRIPTOR, verbosity=verbosity)) user_admin = users.first() self.register_descriptors(descriptor_schemas, user_admin, force, verbosity=verbosity) # NOTE: Descriptor schemas must be registered first, so # processes can validate 'entity_descriptor_schema' field. self.register_processes(process_schemas, user_admin, force, verbosity=verbosity) if retire: self.retire(process_schemas) if verbosity > 0: self.stdout.write("Running executor post-registration hook...") manager.get_executor().post_register_hook(verbosity=verbosity)
[ "def", "handle", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "force", "=", "options", ".", "get", "(", "'force'", ")", "retire", "=", "options", ".", "get", "(", "'retire'", ")", "verbosity", "=", "int", "(", "options", ".", ...
Register processes.
[ "Register", "processes", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/management/commands/register.py#L348-L386
train
45,076
DataONEorg/d1_python
gmn/src/d1_gmn/app/did.py
is_valid_sid_for_chain
def is_valid_sid_for_chain(pid, sid): """Return True if ``sid`` can be assigned to the single object ``pid`` or to the chain to which ``pid`` belongs. - If the chain does not have a SID, the new SID must be previously unused. - If the chain already has a SID, the new SID must match the existing SID. All known PIDs are associated with a chain. Preconditions: - ``pid`` is verified to exist. E.g., with d1_gmn.app.views.asserts.is_existing_object(). - ``sid`` is None or verified to be a SID """ if _is_unused_did(sid): return True existing_sid = d1_gmn.app.revision.get_sid_by_pid(pid) if existing_sid is None: return False return existing_sid == sid
python
def is_valid_sid_for_chain(pid, sid): """Return True if ``sid`` can be assigned to the single object ``pid`` or to the chain to which ``pid`` belongs. - If the chain does not have a SID, the new SID must be previously unused. - If the chain already has a SID, the new SID must match the existing SID. All known PIDs are associated with a chain. Preconditions: - ``pid`` is verified to exist. E.g., with d1_gmn.app.views.asserts.is_existing_object(). - ``sid`` is None or verified to be a SID """ if _is_unused_did(sid): return True existing_sid = d1_gmn.app.revision.get_sid_by_pid(pid) if existing_sid is None: return False return existing_sid == sid
[ "def", "is_valid_sid_for_chain", "(", "pid", ",", "sid", ")", ":", "if", "_is_unused_did", "(", "sid", ")", ":", "return", "True", "existing_sid", "=", "d1_gmn", ".", "app", ".", "revision", ".", "get_sid_by_pid", "(", "pid", ")", "if", "existing_sid", "is...
Return True if ``sid`` can be assigned to the single object ``pid`` or to the chain to which ``pid`` belongs. - If the chain does not have a SID, the new SID must be previously unused. - If the chain already has a SID, the new SID must match the existing SID. All known PIDs are associated with a chain. Preconditions: - ``pid`` is verified to exist. E.g., with d1_gmn.app.views.asserts.is_existing_object(). - ``sid`` is None or verified to be a SID
[ "Return", "True", "if", "sid", "can", "be", "assigned", "to", "the", "single", "object", "pid", "or", "to", "the", "chain", "to", "which", "pid", "belongs", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/did.py#L80-L100
train
45,077
DataONEorg/d1_python
gmn/src/d1_gmn/app/did.py
is_existing_object
def is_existing_object(did): """Return True if PID is for an object for which science bytes are stored locally. This excludes SIDs and PIDs for unprocessed replica requests, remote or non-existing revisions of local replicas and objects aggregated in Resource Maps. """ return d1_gmn.app.models.ScienceObject.objects.filter(pid__did=did).exists()
python
def is_existing_object(did): """Return True if PID is for an object for which science bytes are stored locally. This excludes SIDs and PIDs for unprocessed replica requests, remote or non-existing revisions of local replicas and objects aggregated in Resource Maps. """ return d1_gmn.app.models.ScienceObject.objects.filter(pid__did=did).exists()
[ "def", "is_existing_object", "(", "did", ")", ":", "return", "d1_gmn", ".", "app", ".", "models", ".", "ScienceObject", ".", "objects", ".", "filter", "(", "pid__did", "=", "did", ")", ".", "exists", "(", ")" ]
Return True if PID is for an object for which science bytes are stored locally. This excludes SIDs and PIDs for unprocessed replica requests, remote or non-existing revisions of local replicas and objects aggregated in Resource Maps.
[ "Return", "True", "if", "PID", "is", "for", "an", "object", "for", "which", "science", "bytes", "are", "stored", "locally", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/did.py#L115-L122
train
45,078
DataONEorg/d1_python
gmn/src/d1_gmn/app/did.py
classify_identifier
def classify_identifier(did): """Return a text fragment classifying the ``did`` Return <UNKNOWN> if the DID could not be classified. This should not normally happen and may indicate that the DID was orphaned in the database. """ if _is_unused_did(did): return 'unused on this Member Node' elif is_sid(did): return 'a Series ID (SID) of a revision chain' elif is_local_replica(did): return 'a Persistent ID (PID) of a local replica' elif is_unprocessed_local_replica(did): return ( 'a Persistent ID (PID) of an accepted but not yet processed local replica' ) elif is_archived(did): return 'a Persistent ID (PID) of a previously archived local object' elif is_obsoleted(did): return 'a Persistent ID (PID) of a previously updated (obsoleted) local object' elif is_resource_map_db(did): return 'a Persistent ID (PID) of a local resource map' elif is_existing_object(did): return 'a Persistent ID (PID) of an existing local object' elif is_revision_chain_placeholder(did): return ( 'a Persistent ID (PID) of a remote or non-existing revision of a local ' 'replica' ) elif is_resource_map_member(did): return ( 'a Persistent ID (PID) of a remote or non-existing object aggregated in ' 'a local Resource Map' ) logger.warning('Unable to classify known identifier. did="{}"'.format(did)) return '<UNKNOWN>'
python
def classify_identifier(did): """Return a text fragment classifying the ``did`` Return <UNKNOWN> if the DID could not be classified. This should not normally happen and may indicate that the DID was orphaned in the database. """ if _is_unused_did(did): return 'unused on this Member Node' elif is_sid(did): return 'a Series ID (SID) of a revision chain' elif is_local_replica(did): return 'a Persistent ID (PID) of a local replica' elif is_unprocessed_local_replica(did): return ( 'a Persistent ID (PID) of an accepted but not yet processed local replica' ) elif is_archived(did): return 'a Persistent ID (PID) of a previously archived local object' elif is_obsoleted(did): return 'a Persistent ID (PID) of a previously updated (obsoleted) local object' elif is_resource_map_db(did): return 'a Persistent ID (PID) of a local resource map' elif is_existing_object(did): return 'a Persistent ID (PID) of an existing local object' elif is_revision_chain_placeholder(did): return ( 'a Persistent ID (PID) of a remote or non-existing revision of a local ' 'replica' ) elif is_resource_map_member(did): return ( 'a Persistent ID (PID) of a remote or non-existing object aggregated in ' 'a local Resource Map' ) logger.warning('Unable to classify known identifier. did="{}"'.format(did)) return '<UNKNOWN>'
[ "def", "classify_identifier", "(", "did", ")", ":", "if", "_is_unused_did", "(", "did", ")", ":", "return", "'unused on this Member Node'", "elif", "is_sid", "(", "did", ")", ":", "return", "'a Series ID (SID) of a revision chain'", "elif", "is_local_replica", "(", ...
Return a text fragment classifying the ``did`` Return <UNKNOWN> if the DID could not be classified. This should not normally happen and may indicate that the DID was orphaned in the database.
[ "Return", "a", "text", "fragment", "classifying", "the", "did" ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/did.py#L142-L178
train
45,079
DataONEorg/d1_python
gmn/src/d1_gmn/app/did.py
is_local_replica
def is_local_replica(pid): """Includes unprocessed replication requests.""" return d1_gmn.app.models.LocalReplica.objects.filter(pid__did=pid).exists()
python
def is_local_replica(pid): """Includes unprocessed replication requests.""" return d1_gmn.app.models.LocalReplica.objects.filter(pid__did=pid).exists()
[ "def", "is_local_replica", "(", "pid", ")", ":", "return", "d1_gmn", ".", "app", ".", "models", ".", "LocalReplica", ".", "objects", ".", "filter", "(", "pid__did", "=", "pid", ")", ".", "exists", "(", ")" ]
Includes unprocessed replication requests.
[ "Includes", "unprocessed", "replication", "requests", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/did.py#L195-L197
train
45,080
DataONEorg/d1_python
gmn/src/d1_gmn/app/did.py
is_unprocessed_local_replica
def is_unprocessed_local_replica(pid): """Is local replica with status "queued".""" return d1_gmn.app.models.LocalReplica.objects.filter( pid__did=pid, info__status__status='queued' ).exists()
python
def is_unprocessed_local_replica(pid): """Is local replica with status "queued".""" return d1_gmn.app.models.LocalReplica.objects.filter( pid__did=pid, info__status__status='queued' ).exists()
[ "def", "is_unprocessed_local_replica", "(", "pid", ")", ":", "return", "d1_gmn", ".", "app", ".", "models", ".", "LocalReplica", ".", "objects", ".", "filter", "(", "pid__did", "=", "pid", ",", "info__status__status", "=", "'queued'", ")", ".", "exists", "("...
Is local replica with status "queued".
[ "Is", "local", "replica", "with", "status", "queued", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/did.py#L200-L204
train
45,081
DataONEorg/d1_python
gmn/src/d1_gmn/app/did.py
is_revision_chain_placeholder
def is_revision_chain_placeholder(pid): """For replicas, the PIDs referenced in revision chains are reserved for use by other replicas.""" return d1_gmn.app.models.ReplicaRevisionChainReference.objects.filter( pid__did=pid ).exists()
python
def is_revision_chain_placeholder(pid): """For replicas, the PIDs referenced in revision chains are reserved for use by other replicas.""" return d1_gmn.app.models.ReplicaRevisionChainReference.objects.filter( pid__did=pid ).exists()
[ "def", "is_revision_chain_placeholder", "(", "pid", ")", ":", "return", "d1_gmn", ".", "app", ".", "models", ".", "ReplicaRevisionChainReference", ".", "objects", ".", "filter", "(", "pid__did", "=", "pid", ")", ".", "exists", "(", ")" ]
For replicas, the PIDs referenced in revision chains are reserved for use by other replicas.
[ "For", "replicas", "the", "PIDs", "referenced", "in", "revision", "chains", "are", "reserved", "for", "use", "by", "other", "replicas", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/did.py#L207-L212
train
45,082
DataONEorg/d1_python
gmn/src/d1_gmn/app/did.py
_is_did
def _is_did(did): """Return True if ``did`` is recorded in a local context. ``did``=None is supported and returns False. A DID can be classified with classify_identifier(). """ return d1_gmn.app.models.IdNamespace.objects.filter(did=did).exists()
python
def _is_did(did): """Return True if ``did`` is recorded in a local context. ``did``=None is supported and returns False. A DID can be classified with classify_identifier(). """ return d1_gmn.app.models.IdNamespace.objects.filter(did=did).exists()
[ "def", "_is_did", "(", "did", ")", ":", "return", "d1_gmn", ".", "app", ".", "models", ".", "IdNamespace", ".", "objects", ".", "filter", "(", "did", "=", "did", ")", ".", "exists", "(", ")" ]
Return True if ``did`` is recorded in a local context. ``did``=None is supported and returns False. A DID can be classified with classify_identifier().
[ "Return", "True", "if", "did", "is", "recorded", "in", "a", "local", "context", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/did.py#L231-L239
train
45,083
genialis/resolwe
resolwe/elastic/utils/__init__.py
prepare_connection
def prepare_connection(): """Set dafault connection for ElasticSearch. .. warning:: In case of using multiprocessing/multithreading, connection will be probably initialized in the main process/thread and the same connection (socket) will be used in all processes/threads. This will cause some unexpected timeouts of pushes to Elasticsearch. So make sure that this function is called again in each process/thread to make sure that unique connection will be used. """ elasticsearch_host = getattr(settings, 'ELASTICSEARCH_HOST', 'localhost') elasticsearch_port = getattr(settings, 'ELASTICSEARCH_PORT', 9200) connections.create_connection(hosts=['{}:{}'.format(elasticsearch_host, elasticsearch_port)])
python
def prepare_connection(): """Set dafault connection for ElasticSearch. .. warning:: In case of using multiprocessing/multithreading, connection will be probably initialized in the main process/thread and the same connection (socket) will be used in all processes/threads. This will cause some unexpected timeouts of pushes to Elasticsearch. So make sure that this function is called again in each process/thread to make sure that unique connection will be used. """ elasticsearch_host = getattr(settings, 'ELASTICSEARCH_HOST', 'localhost') elasticsearch_port = getattr(settings, 'ELASTICSEARCH_PORT', 9200) connections.create_connection(hosts=['{}:{}'.format(elasticsearch_host, elasticsearch_port)])
[ "def", "prepare_connection", "(", ")", ":", "elasticsearch_host", "=", "getattr", "(", "settings", ",", "'ELASTICSEARCH_HOST'", ",", "'localhost'", ")", "elasticsearch_port", "=", "getattr", "(", "settings", ",", "'ELASTICSEARCH_PORT'", ",", "9200", ")", "connection...
Set dafault connection for ElasticSearch. .. warning:: In case of using multiprocessing/multithreading, connection will be probably initialized in the main process/thread and the same connection (socket) will be used in all processes/threads. This will cause some unexpected timeouts of pushes to Elasticsearch. So make sure that this function is called again in each process/thread to make sure that unique connection will be used.
[ "Set", "dafault", "connection", "for", "ElasticSearch", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/utils/__init__.py#L35-L49
train
45,084
DataONEorg/d1_python
gmn/src/d1_gmn/app/event_log.py
_log
def _log(pid, request, event, timestamp=None): """Log an operation that was performed on a sciobj.""" # Support logging events that are not associated with an object. sciobj_model = None if pid is not None: try: sciobj_model = d1_gmn.app.models.ScienceObject.objects.filter(pid__did=pid)[ 0 ] except IndexError: raise d1_common.types.exceptions.ServiceFailure( 0, 'Attempted to create event log for non-existing object. pid="{}"'.format( pid ), ) event_log_model = create_log_entry( sciobj_model, event, request.META['REMOTE_ADDR'], request.META.get('HTTP_USER_AGENT', '<not provided>'), request.primary_subject_str, ) # The datetime is an optional parameter. If it is not provided, a # "auto_now_add=True" value in the the model defaults it to Now. The # disadvantage to this approach is that we have to update the timestamp in a # separate step if we want to set it to anything other than Now. if timestamp is not None: event_log_model.timestamp = timestamp event_log_model.save()
python
def _log(pid, request, event, timestamp=None): """Log an operation that was performed on a sciobj.""" # Support logging events that are not associated with an object. sciobj_model = None if pid is not None: try: sciobj_model = d1_gmn.app.models.ScienceObject.objects.filter(pid__did=pid)[ 0 ] except IndexError: raise d1_common.types.exceptions.ServiceFailure( 0, 'Attempted to create event log for non-existing object. pid="{}"'.format( pid ), ) event_log_model = create_log_entry( sciobj_model, event, request.META['REMOTE_ADDR'], request.META.get('HTTP_USER_AGENT', '<not provided>'), request.primary_subject_str, ) # The datetime is an optional parameter. If it is not provided, a # "auto_now_add=True" value in the the model defaults it to Now. The # disadvantage to this approach is that we have to update the timestamp in a # separate step if we want to set it to anything other than Now. if timestamp is not None: event_log_model.timestamp = timestamp event_log_model.save()
[ "def", "_log", "(", "pid", ",", "request", ",", "event", ",", "timestamp", "=", "None", ")", ":", "# Support logging events that are not associated with an object.", "sciobj_model", "=", "None", "if", "pid", "is", "not", "None", ":", "try", ":", "sciobj_model", ...
Log an operation that was performed on a sciobj.
[ "Log", "an", "operation", "that", "was", "performed", "on", "a", "sciobj", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/event_log.py#L78-L109
train
45,085
DataONEorg/d1_python
gmn/src/d1_gmn/app/auth.py
action_to_level
def action_to_level(action): """Map action name to action level.""" try: return ACTION_LEVEL_MAP[action] except LookupError: raise d1_common.types.exceptions.InvalidRequest( 0, 'Unknown action. action="{}"'.format(action) )
python
def action_to_level(action): """Map action name to action level.""" try: return ACTION_LEVEL_MAP[action] except LookupError: raise d1_common.types.exceptions.InvalidRequest( 0, 'Unknown action. action="{}"'.format(action) )
[ "def", "action_to_level", "(", "action", ")", ":", "try", ":", "return", "ACTION_LEVEL_MAP", "[", "action", "]", "except", "LookupError", ":", "raise", "d1_common", ".", "types", ".", "exceptions", ".", "InvalidRequest", "(", "0", ",", "'Unknown action. action=\...
Map action name to action level.
[ "Map", "action", "name", "to", "action", "level", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/auth.py#L65-L72
train
45,086
DataONEorg/d1_python
gmn/src/d1_gmn/app/auth.py
level_to_action
def level_to_action(level): """Map action level to action name.""" try: return LEVEL_ACTION_MAP[level] except LookupError: raise d1_common.types.exceptions.InvalidRequest( 0, 'Unknown action level. level="{}"'.format(level) )
python
def level_to_action(level): """Map action level to action name.""" try: return LEVEL_ACTION_MAP[level] except LookupError: raise d1_common.types.exceptions.InvalidRequest( 0, 'Unknown action level. level="{}"'.format(level) )
[ "def", "level_to_action", "(", "level", ")", ":", "try", ":", "return", "LEVEL_ACTION_MAP", "[", "level", "]", "except", "LookupError", ":", "raise", "d1_common", ".", "types", ".", "exceptions", ".", "InvalidRequest", "(", "0", ",", "'Unknown action level. leve...
Map action level to action name.
[ "Map", "action", "level", "to", "action", "name", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/auth.py#L75-L82
train
45,087
DataONEorg/d1_python
gmn/src/d1_gmn/app/auth.py
get_trusted_subjects
def get_trusted_subjects(): """Get set of subjects that have unlimited access to all SciObj and APIs on this node.""" cert_subj = _get_client_side_certificate_subject() return ( d1_gmn.app.node_registry.get_cn_subjects() | django.conf.settings.DATAONE_TRUSTED_SUBJECTS | {cert_subj} if cert_subj is not None else set() )
python
def get_trusted_subjects(): """Get set of subjects that have unlimited access to all SciObj and APIs on this node.""" cert_subj = _get_client_side_certificate_subject() return ( d1_gmn.app.node_registry.get_cn_subjects() | django.conf.settings.DATAONE_TRUSTED_SUBJECTS | {cert_subj} if cert_subj is not None else set() )
[ "def", "get_trusted_subjects", "(", ")", ":", "cert_subj", "=", "_get_client_side_certificate_subject", "(", ")", "return", "(", "d1_gmn", ".", "app", ".", "node_registry", ".", "get_cn_subjects", "(", ")", "|", "django", ".", "conf", ".", "settings", ".", "DA...
Get set of subjects that have unlimited access to all SciObj and APIs on this node.
[ "Get", "set", "of", "subjects", "that", "have", "unlimited", "access", "to", "all", "SciObj", "and", "APIs", "on", "this", "node", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/auth.py#L85-L95
train
45,088
DataONEorg/d1_python
gmn/src/d1_gmn/app/auth.py
is_trusted_subject
def is_trusted_subject(request): """Determine if calling subject is fully trusted.""" logging.debug('Active subjects: {}'.format(', '.join(request.all_subjects_set))) logging.debug('Trusted subjects: {}'.format(', '.join(get_trusted_subjects()))) return not request.all_subjects_set.isdisjoint(get_trusted_subjects())
python
def is_trusted_subject(request): """Determine if calling subject is fully trusted.""" logging.debug('Active subjects: {}'.format(', '.join(request.all_subjects_set))) logging.debug('Trusted subjects: {}'.format(', '.join(get_trusted_subjects()))) return not request.all_subjects_set.isdisjoint(get_trusted_subjects())
[ "def", "is_trusted_subject", "(", "request", ")", ":", "logging", ".", "debug", "(", "'Active subjects: {}'", ".", "format", "(", "', '", ".", "join", "(", "request", ".", "all_subjects_set", ")", ")", ")", "logging", ".", "debug", "(", "'Trusted subjects: {}'...
Determine if calling subject is fully trusted.
[ "Determine", "if", "calling", "subject", "is", "fully", "trusted", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/auth.py#L109-L113
train
45,089
DataONEorg/d1_python
gmn/src/d1_gmn/app/auth.py
_get_client_side_certificate_subject
def _get_client_side_certificate_subject(): """Return the DN from the client side certificate as a D1 subject if a client side cert has been configured. Else return None. """ subject = django.core.cache.cache.get('client_side_certificate_subject') if subject is not None: return subject cert_pem = _get_client_side_certificate_pem() if cert_pem is None: return None subject = _extract_subject_from_pem(cert_pem) django.core.cache.cache.set('client_side_certificate_subject', subject) return subject
python
def _get_client_side_certificate_subject(): """Return the DN from the client side certificate as a D1 subject if a client side cert has been configured. Else return None. """ subject = django.core.cache.cache.get('client_side_certificate_subject') if subject is not None: return subject cert_pem = _get_client_side_certificate_pem() if cert_pem is None: return None subject = _extract_subject_from_pem(cert_pem) django.core.cache.cache.set('client_side_certificate_subject', subject) return subject
[ "def", "_get_client_side_certificate_subject", "(", ")", ":", "subject", "=", "django", ".", "core", ".", "cache", ".", "cache", ".", "get", "(", "'client_side_certificate_subject'", ")", "if", "subject", "is", "not", "None", ":", "return", "subject", "cert_pem"...
Return the DN from the client side certificate as a D1 subject if a client side cert has been configured. Else return None.
[ "Return", "the", "DN", "from", "the", "client", "side", "certificate", "as", "a", "D1", "subject", "if", "a", "client", "side", "cert", "has", "been", "configured", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/auth.py#L122-L137
train
45,090
DataONEorg/d1_python
gmn/src/d1_gmn/app/auth.py
is_allowed
def is_allowed(request, level, pid): """Check if one or more subjects are allowed to perform action level on object. If a subject holds permissions for one action level on object, all lower action levels are also allowed. Any included subject that is unknown to this MN is treated as a subject without permissions. Returns: bool True: - The active subjects include one or more subjects that: - are fully trusted DataONE infrastructure subjects, causing all rights to be granted regardless of requested access level and SciObj - OR are in the object's ACL for the requested access level. The ACL contains the subjects from the object's allow rules and the object's rightsHolder, which has all rights. - OR object is public, which always yields a match on the "public" symbolic subject. False: - None of the active subjects are in the object's ACL for the requested access level or for lower levels. - OR PID does not exist - OR access level is invalid """ if is_trusted_subject(request): return True return d1_gmn.app.models.Permission.objects.filter( sciobj__pid__did=pid, subject__subject__in=request.all_subjects_set, level__gte=level, ).exists()
python
def is_allowed(request, level, pid): """Check if one or more subjects are allowed to perform action level on object. If a subject holds permissions for one action level on object, all lower action levels are also allowed. Any included subject that is unknown to this MN is treated as a subject without permissions. Returns: bool True: - The active subjects include one or more subjects that: - are fully trusted DataONE infrastructure subjects, causing all rights to be granted regardless of requested access level and SciObj - OR are in the object's ACL for the requested access level. The ACL contains the subjects from the object's allow rules and the object's rightsHolder, which has all rights. - OR object is public, which always yields a match on the "public" symbolic subject. False: - None of the active subjects are in the object's ACL for the requested access level or for lower levels. - OR PID does not exist - OR access level is invalid """ if is_trusted_subject(request): return True return d1_gmn.app.models.Permission.objects.filter( sciobj__pid__did=pid, subject__subject__in=request.all_subjects_set, level__gte=level, ).exists()
[ "def", "is_allowed", "(", "request", ",", "level", ",", "pid", ")", ":", "if", "is_trusted_subject", "(", "request", ")", ":", "return", "True", "return", "d1_gmn", ".", "app", ".", "models", ".", "Permission", ".", "objects", ".", "filter", "(", "sciobj...
Check if one or more subjects are allowed to perform action level on object. If a subject holds permissions for one action level on object, all lower action levels are also allowed. Any included subject that is unknown to this MN is treated as a subject without permissions. Returns: bool True: - The active subjects include one or more subjects that: - are fully trusted DataONE infrastructure subjects, causing all rights to be granted regardless of requested access level and SciObj - OR are in the object's ACL for the requested access level. The ACL contains the subjects from the object's allow rules and the object's rightsHolder, which has all rights. - OR object is public, which always yields a match on the "public" symbolic subject. False: - None of the active subjects are in the object's ACL for the requested access level or for lower levels. - OR PID does not exist - OR access level is invalid
[ "Check", "if", "one", "or", "more", "subjects", "are", "allowed", "to", "perform", "action", "level", "on", "object", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/auth.py#L164-L195
train
45,091
DataONEorg/d1_python
gmn/src/d1_gmn/app/auth.py
assert_allowed
def assert_allowed(request, level, pid): """Assert that one or more subjects are allowed to perform action on object. Raise NotAuthorized if object exists and subject is not allowed. Raise NotFound if object does not exist. """ if not d1_gmn.app.models.ScienceObject.objects.filter(pid__did=pid).exists(): raise d1_common.types.exceptions.NotFound( 0, 'Attempted to perform operation on non-existing object. pid="{}"'.format( pid ), ) if not is_allowed(request, level, pid): raise d1_common.types.exceptions.NotAuthorized( 0, 'Operation is denied. level="{}", pid="{}", active_subjects="{}"'.format( level_to_action(level), pid, format_active_subjects(request) ), )
python
def assert_allowed(request, level, pid): """Assert that one or more subjects are allowed to perform action on object. Raise NotAuthorized if object exists and subject is not allowed. Raise NotFound if object does not exist. """ if not d1_gmn.app.models.ScienceObject.objects.filter(pid__did=pid).exists(): raise d1_common.types.exceptions.NotFound( 0, 'Attempted to perform operation on non-existing object. pid="{}"'.format( pid ), ) if not is_allowed(request, level, pid): raise d1_common.types.exceptions.NotAuthorized( 0, 'Operation is denied. level="{}", pid="{}", active_subjects="{}"'.format( level_to_action(level), pid, format_active_subjects(request) ), )
[ "def", "assert_allowed", "(", "request", ",", "level", ",", "pid", ")", ":", "if", "not", "d1_gmn", ".", "app", ".", "models", ".", "ScienceObject", ".", "objects", ".", "filter", "(", "pid__did", "=", "pid", ")", ".", "exists", "(", ")", ":", "raise...
Assert that one or more subjects are allowed to perform action on object. Raise NotAuthorized if object exists and subject is not allowed. Raise NotFound if object does not exist.
[ "Assert", "that", "one", "or", "more", "subjects", "are", "allowed", "to", "perform", "action", "on", "object", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/auth.py#L225-L245
train
45,092
DataONEorg/d1_python
gmn/src/d1_gmn/app/auth.py
format_active_subjects
def format_active_subjects(request): """Create a string listing active subjects for this connection, suitable for appending to authentication error messages.""" decorated_subject_list = [request.primary_subject_str + ' (primary)'] for subject in request.all_subjects_set: if subject != request.primary_subject_str: decorated_subject_list.append(subject) return ', '.join(decorated_subject_list)
python
def format_active_subjects(request): """Create a string listing active subjects for this connection, suitable for appending to authentication error messages.""" decorated_subject_list = [request.primary_subject_str + ' (primary)'] for subject in request.all_subjects_set: if subject != request.primary_subject_str: decorated_subject_list.append(subject) return ', '.join(decorated_subject_list)
[ "def", "format_active_subjects", "(", "request", ")", ":", "decorated_subject_list", "=", "[", "request", ".", "primary_subject_str", "+", "' (primary)'", "]", "for", "subject", "in", "request", ".", "all_subjects_set", ":", "if", "subject", "!=", "request", ".", ...
Create a string listing active subjects for this connection, suitable for appending to authentication error messages.
[ "Create", "a", "string", "listing", "active", "subjects", "for", "this", "connection", "suitable", "for", "appending", "to", "authentication", "error", "messages", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/auth.py#L248-L255
train
45,093
DataONEorg/d1_python
dev_tools/src/d1_dev/src-remove-unused-imports.py
get_atomtrailer_list
def get_atomtrailer_list(r): """Capture only the leading dotted name list. A full sequence typically includes function calls and parameters. pkga.pkgb.pkgc.one_call(arg1, arg2, arg3=4) """ dot_set = set() for n in r.find_all(("atomtrailers",)): name_list = [] for x in n.value: if x.type != "name": break name_list.append(x.value) if name_list: dot_set.add(tuple(name_list)) return sorted(dot_set)
python
def get_atomtrailer_list(r): """Capture only the leading dotted name list. A full sequence typically includes function calls and parameters. pkga.pkgb.pkgc.one_call(arg1, arg2, arg3=4) """ dot_set = set() for n in r.find_all(("atomtrailers",)): name_list = [] for x in n.value: if x.type != "name": break name_list.append(x.value) if name_list: dot_set.add(tuple(name_list)) return sorted(dot_set)
[ "def", "get_atomtrailer_list", "(", "r", ")", ":", "dot_set", "=", "set", "(", ")", "for", "n", "in", "r", ".", "find_all", "(", "(", "\"atomtrailers\"", ",", ")", ")", ":", "name_list", "=", "[", "]", "for", "x", "in", "n", ".", "value", ":", "i...
Capture only the leading dotted name list. A full sequence typically includes function calls and parameters. pkga.pkgb.pkgc.one_call(arg1, arg2, arg3=4)
[ "Capture", "only", "the", "leading", "dotted", "name", "list", "." ]
3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/dev_tools/src/d1_dev/src-remove-unused-imports.py#L182-L198
train
45,094
genialis/resolwe
resolwe/flow/views/collection.py
CollectionViewSet.custom_filter_tags
def custom_filter_tags(self, value, search): """Support tags query.""" if not isinstance(value, list): value = value.split(',') filters = [Q('match', **{'tags': item}) for item in value] search = search.query('bool', must=filters) return search
python
def custom_filter_tags(self, value, search): """Support tags query.""" if not isinstance(value, list): value = value.split(',') filters = [Q('match', **{'tags': item}) for item in value] search = search.query('bool', must=filters) return search
[ "def", "custom_filter_tags", "(", "self", ",", "value", ",", "search", ")", ":", "if", "not", "isinstance", "(", "value", ",", "list", ")", ":", "value", "=", "value", ".", "split", "(", "','", ")", "filters", "=", "[", "Q", "(", "'match'", ",", "*...
Support tags query.
[ "Support", "tags", "query", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/collection.py#L68-L76
train
45,095
genialis/resolwe
resolwe/flow/views/collection.py
CollectionViewSet.custom_filter_text
def custom_filter_text(self, value, search): """Support general query using the 'text' attribute.""" if isinstance(value, list): value = ' '.join(value) should = [ Q('match', slug={'query': value, 'operator': 'and', 'boost': 10.0}), Q('match', **{'slug.ngrams': {'query': value, 'operator': 'and', 'boost': 5.0}}), Q('match', name={'query': value, 'operator': 'and', 'boost': 10.0}), Q('match', **{'name.ngrams': {'query': value, 'operator': 'and', 'boost': 5.0}}), Q('match', contributor_name={'query': value, 'operator': 'and', 'boost': 5.0}), Q('match', **{'contributor_name.ngrams': {'query': value, 'operator': 'and', 'boost': 2.0}}), Q('match', owner_names={'query': value, 'operator': 'and', 'boost': 5.0}), Q('match', **{'owner_names.ngrams': {'query': value, 'operator': 'and', 'boost': 2.0}}), Q('match', descriptor_data={'query': value, 'operator': 'and'}), ] # Add registered text extensions. for extension in composer.get_extensions(self): if hasattr(extension, 'text_filter'): should += extension.text_filter(value) search = search.query('bool', should=should) return search
python
def custom_filter_text(self, value, search): """Support general query using the 'text' attribute.""" if isinstance(value, list): value = ' '.join(value) should = [ Q('match', slug={'query': value, 'operator': 'and', 'boost': 10.0}), Q('match', **{'slug.ngrams': {'query': value, 'operator': 'and', 'boost': 5.0}}), Q('match', name={'query': value, 'operator': 'and', 'boost': 10.0}), Q('match', **{'name.ngrams': {'query': value, 'operator': 'and', 'boost': 5.0}}), Q('match', contributor_name={'query': value, 'operator': 'and', 'boost': 5.0}), Q('match', **{'contributor_name.ngrams': {'query': value, 'operator': 'and', 'boost': 2.0}}), Q('match', owner_names={'query': value, 'operator': 'and', 'boost': 5.0}), Q('match', **{'owner_names.ngrams': {'query': value, 'operator': 'and', 'boost': 2.0}}), Q('match', descriptor_data={'query': value, 'operator': 'and'}), ] # Add registered text extensions. for extension in composer.get_extensions(self): if hasattr(extension, 'text_filter'): should += extension.text_filter(value) search = search.query('bool', should=should) return search
[ "def", "custom_filter_text", "(", "self", ",", "value", ",", "search", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "value", "=", "' '", ".", "join", "(", "value", ")", "should", "=", "[", "Q", "(", "'match'", ",", "slug", "="...
Support general query using the 'text' attribute.
[ "Support", "general", "query", "using", "the", "text", "attribute", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/collection.py#L85-L109
train
45,096
genialis/resolwe
resolwe/flow/views/collection.py
CollectionViewSet.set_content_permissions
def set_content_permissions(self, user, obj, payload): """Apply permissions to data objects and entities in ``Collection``.""" for entity in obj.entity_set.all(): if user.has_perm('share_entity', entity): update_permission(entity, payload) # Data doesn't have "ADD" permission, so it has to be removed payload = remove_permission(payload, 'add') for data in obj.data.all(): if user.has_perm('share_data', data): update_permission(data, payload)
python
def set_content_permissions(self, user, obj, payload): """Apply permissions to data objects and entities in ``Collection``.""" for entity in obj.entity_set.all(): if user.has_perm('share_entity', entity): update_permission(entity, payload) # Data doesn't have "ADD" permission, so it has to be removed payload = remove_permission(payload, 'add') for data in obj.data.all(): if user.has_perm('share_data', data): update_permission(data, payload)
[ "def", "set_content_permissions", "(", "self", ",", "user", ",", "obj", ",", "payload", ")", ":", "for", "entity", "in", "obj", ".", "entity_set", ".", "all", "(", ")", ":", "if", "user", ".", "has_perm", "(", "'share_entity'", ",", "entity", ")", ":",...
Apply permissions to data objects and entities in ``Collection``.
[ "Apply", "permissions", "to", "data", "objects", "and", "entities", "in", "Collection", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/collection.py#L111-L122
train
45,097
genialis/resolwe
resolwe/flow/views/collection.py
CollectionViewSet.create
def create(self, request, *args, **kwargs): """Only authenticated usesr can create new collections.""" if not request.user.is_authenticated: raise exceptions.NotFound return super().create(request, *args, **kwargs)
python
def create(self, request, *args, **kwargs): """Only authenticated usesr can create new collections.""" if not request.user.is_authenticated: raise exceptions.NotFound return super().create(request, *args, **kwargs)
[ "def", "create", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "request", ".", "user", ".", "is_authenticated", ":", "raise", "exceptions", ".", "NotFound", "return", "super", "(", ")", ".", "create", ...
Only authenticated usesr can create new collections.
[ "Only", "authenticated", "usesr", "can", "create", "new", "collections", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/collection.py#L124-L129
train
45,098
genialis/resolwe
resolwe/flow/views/collection.py
CollectionViewSet.add_data
def add_data(self, request, pk=None): """Add data to collection.""" collection = self.get_object() if 'ids' not in request.data: return Response({"error": "`ids`parameter is required"}, status=status.HTTP_400_BAD_REQUEST) missing = [] for data_id in request.data['ids']: if not Data.objects.filter(pk=data_id).exists(): missing.append(data_id) if missing: return Response( {"error": "Data objects with following ids are missing: {}".format(', '.join(missing))}, status=status.HTTP_400_BAD_REQUEST) for data_id in request.data['ids']: collection.data.add(data_id) return Response()
python
def add_data(self, request, pk=None): """Add data to collection.""" collection = self.get_object() if 'ids' not in request.data: return Response({"error": "`ids`parameter is required"}, status=status.HTTP_400_BAD_REQUEST) missing = [] for data_id in request.data['ids']: if not Data.objects.filter(pk=data_id).exists(): missing.append(data_id) if missing: return Response( {"error": "Data objects with following ids are missing: {}".format(', '.join(missing))}, status=status.HTTP_400_BAD_REQUEST) for data_id in request.data['ids']: collection.data.add(data_id) return Response()
[ "def", "add_data", "(", "self", ",", "request", ",", "pk", "=", "None", ")", ":", "collection", "=", "self", ".", "get_object", "(", ")", "if", "'ids'", "not", "in", "request", ".", "data", ":", "return", "Response", "(", "{", "\"error\"", ":", "\"`i...
Add data to collection.
[ "Add", "data", "to", "collection", "." ]
f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/views/collection.py#L153-L173
train
45,099