text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def expand_groups(config_ids, maps):
""" Iterates over a list of container configuration ids, expanding groups of container configurations. :param config_ids: List of container configuration ids. :type config_ids: collections.Iterable[dockermap.map.input.InputConfigId | dockermap.map.input.MapConfigId] :param maps: Extended container maps. :type maps: dict[unicode | str, dockermap.map.config.main.ContainerMap] :return: Expanded MapConfigId tuples. :rtype: collections.Iterable[dockermap.map.input.InputConfigId] """
|
for config_id in config_ids:
if config_id.map_name == '__all__':
c_maps = six.iteritems(maps)
else:
c_maps = (config_id.map_name, maps[config_id.map_name]),
if isinstance(config_id, InputConfigId):
instance_name = config_id.instance_names
elif isinstance(config_id, MapConfigId):
instance_name = (config_id.instance_name, )
else:
raise ValueError("Expected InputConfigId or MapConfigId tuple; found {0}."
"".format(type(config_id).__name__))
for map_name, c_map in c_maps:
if config_id.config_name == '__all__' and config_id.config_type == ItemType.CONTAINER:
for config_name in six.iterkeys(c_map.containers):
yield MapConfigId(config_id.config_type, map_name, config_name, instance_name)
else:
group = c_map.groups.get(config_id.config_name)
if group is not None:
for group_item in group:
if isinstance(group_item, MapConfigId):
yield group_item
elif isinstance(group_item, six.string_types):
config_name, __, instance = group_item.partition('.')
yield MapConfigId(config_id.config_type, map_name, config_name,
(instance, ) if instance else instance_name)
else:
raise ValueError("Invalid group item. Must be string or MapConfigId tuple; "
"found {0}.".format(type(group_item).__name__))
else:
yield MapConfigId(config_id.config_type, map_name, config_id.config_name, instance_name)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def expand_instances(config_ids, ext_maps):
""" Iterates over a list of input configuration ids, expanding configured instances if ``None`` is specified. Otherwise where instance names are specified as a tuple, they are expanded. :param config_ids: Iterable of container configuration ids or (map, config, instance names) tuples. :type config_ids: collections.Iterable[dockermap.map.input.InputConfigId] | collections.Iterable[tuple[unicode | str, unicode | str, unicode | str]] :param ext_maps: Dictionary of extended ContainerMap instances for looking up container configurations. :type ext_maps: dict[unicode | str, ContainerMap] :return: MapConfigId tuples. :rtype: collections.Iterable[dockermap.map.input.MapConfigId] """
|
for type_map_config, items in itertools.groupby(sorted(config_ids, key=get_map_config), get_map_config):
config_type, map_name, config_name = type_map_config
instances = _get_nested_instances(items)
c_map = ext_maps[map_name]
try:
c_instances = _get_config_instances(config_type, c_map, config_name)
except KeyError:
raise KeyError("Configuration not found.", type_map_config)
if c_instances and None in instances:
for i in c_instances:
yield MapConfigId(config_type, map_name, config_name, i)
else:
for i in instances:
yield MapConfigId(config_type, map_name, config_name, i)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_network(self, action, n_name, **kwargs):
""" Creates a configured network. :param action: Action configuration. :type action: dockermap.map.runner.ActionConfig :param n_name: Network name. :type n_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict """
|
c_kwargs = self.get_network_create_kwargs(action, n_name, **kwargs)
res = action.client.create_network(**c_kwargs)
self._policy.network_names[action.client_name][n_name] = res['Id']
return res
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_network(self, action, n_name, **kwargs):
""" Removes a network. :param action: Action configuration. :type action: dockermap.map.runner.ActionConfig :param n_name: Network name or id. :type n_name: unicode | str :param kwargs: Additional keyword arguments. :type kwargs: dict """
|
c_kwargs = self.get_network_remove_kwargs(action, n_name, **kwargs)
res = action.client.remove_network(**c_kwargs)
del self._policy.network_names[action.client_name][n_name]
return res
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_container_create_kwargs(self, action, container_name, kwargs=None):
""" Generates keyword arguments for the Docker client to create a container. :param action: Action configuration. :type action: ActionConfig :param container_name: Container name. :type container_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict | NoneType :return: Resulting keyword arguments. :rtype: dict """
|
policy = self._policy
client_config = action.client_config
container_map = action.container_map
container_config = action.config
image_tag = container_map.get_image(container_config.image or action.config_id.config_name)
default_paths = policy.default_volume_paths[action.config_id.map_name]
c_kwargs = dict(
name=container_name,
image=format_image_tag(image_tag),
volumes=get_volumes(container_map, container_config, default_paths,
client_config.features['volumes']),
user=extract_user(container_config.user),
ports=[resolve_value(port_binding.exposed_port)
for port_binding in container_config.exposes if port_binding.exposed_port],
hostname=policy.get_hostname(container_name, action.client_name) if container_map.set_hostname else None,
domainname=resolve_value(client_config.get('domainname', container_map.default_domain)) or None,
)
if container_config.network_mode == 'none':
c_kwargs['network_disabled'] = True
elif client_config.features['networks'] and container_config.networks:
first_network = container_config.networks[0]
c_kwargs['networking_config'] = NetworkingConfig({
policy.nname(action.config_id.map_name, first_network.network_name): EndpointConfig(
client_config.version, **self.get_network_create_endpoint_kwargs(action, first_network)
)
})
if client_config.features['stop_signal'] and container_config.stop_signal:
c_kwargs['stop_signal'] = container_config.stop_signal
hc_extra_kwargs = kwargs.pop('host_config', None) if kwargs else None
use_host_config = client_config.features['host_config']
if use_host_config:
hc_kwargs = self.get_container_host_config_kwargs(action, None, kwargs=hc_extra_kwargs)
if hc_kwargs:
if use_host_config == USE_HC_MERGE:
c_kwargs.update(hc_kwargs)
else:
c_kwargs['host_config'] = HostConfig(version=client_config.version, **hc_kwargs)
if client_config.features['stop_timeout'] and container_config.stop_timeout:
c_kwargs['stop_timeout'] = container_config.stop_timeout
if client_config.features['healthcheck'] and container_config.healthcheck:
c_kwargs['healthcheck'] = container_config.healthcheck._asdict()
update_kwargs(c_kwargs, init_options(container_config.create_options), kwargs)
return c_kwargs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_attached_container_create_kwargs(self, action, container_name, kwargs=None):
""" Generates keyword arguments for the Docker client to create an attached container. :param action: Action configuration. :type action: ActionConfig :param container_name: Container name. :type container_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict | NoneType :return: Resulting keyword arguments. :rtype: dict """
|
client_config = action.client_config
policy = self._policy
config_id = action.config_id
path = resolve_value(policy.default_volume_paths[config_id.map_name][config_id.instance_name])
user = extract_user(action.config.user)
c_kwargs = dict(
name=container_name,
image=self._policy.base_image,
volumes=[path],
user=user,
network_disabled=True,
)
hc_extra_kwargs = kwargs.pop('host_config', None) if kwargs else None
use_host_config = client_config.features['host_config']
if use_host_config:
hc_kwargs = self.get_attached_container_host_config_kwargs(action, None, kwargs=hc_extra_kwargs)
if hc_kwargs:
if use_host_config == USE_HC_MERGE:
c_kwargs.update(hc_kwargs)
else:
c_kwargs['host_config'] = HostConfig(version=client_config.version, **hc_kwargs)
update_kwargs(c_kwargs, kwargs)
return c_kwargs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_attached_container_host_config_kwargs(self, action, container_name, kwargs=None):
""" Generates keyword arguments for the Docker client to set up the HostConfig or start an attached container. :param action: Action configuration. :type action: ActionConfig :param container_name: Container name or id. Set ``None`` when included in kwargs for ``create_container``. :type container_name: unicode | str | NoneType :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict | NoneType :return: Resulting keyword arguments. :rtype: dict """
|
if container_name:
c_kwargs = {'container': container_name}
else:
c_kwargs = {}
update_kwargs(c_kwargs, kwargs)
return c_kwargs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_container_update_kwargs(self, action, container_name, update_values, kwargs=None):
""" Generates keyword arguments for the Docker client to update the HostConfig of an existing container. :param action: Action configuration. :type action: ActionConfig :param container_name: Container name or id. :type container_name: unicode | str :param update_values: Dictionary of values to update; i.e. keyword arguments to the Docker client. :type update_values: dict[unicode | str, unicode | str | int | float | decimal.Decimal] :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict | NoneType :return: Resulting keyword arguments. :rtype: dict """
|
c_kwargs = dict(container=container_name)
update_kwargs(c_kwargs, update_values, kwargs)
return c_kwargs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_container_wait_kwargs(self, action, container_name, kwargs=None):
""" Generates keyword arguments for the Docker client to wait for a container. :param action: Action configuration. :type action: ActionConfig :param container_name: Container name or id. :type container_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict :return: Resulting keyword arguments. :rtype: dict """
|
c_kwargs = dict(container=container_name)
timeout = action.client_config.get('wait_timeout')
if timeout is not None:
c_kwargs['timeout'] = timeout
update_kwargs(c_kwargs, kwargs)
return c_kwargs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_container_stop_kwargs(self, action, container_name, kwargs=None):
""" Generates keyword arguments for the Docker client to stop a container. :param action: Action configuration. :type action: ActionConfig :param container_name: Container name or id. :type container_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict :return: Resulting keyword arguments. :rtype: dict """
|
c_kwargs = dict(
container=container_name,
)
stop_timeout = action.config.stop_timeout
if stop_timeout is NotSet:
timeout = action.client_config.get('stop_timeout')
if timeout is not None:
c_kwargs['timeout'] = timeout
elif stop_timeout is not None:
c_kwargs['timeout'] = stop_timeout
update_kwargs(c_kwargs, kwargs)
return c_kwargs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_container_remove_kwargs(self, action, container_name, kwargs=None):
""" Generates keyword arguments for the Docker client to remove a container. :param action: Action configuration. :type action: ActionConfig :param container_name: Container name or id. :type container_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict :return: Resulting keyword arguments. :rtype: dict """
|
c_kwargs = dict(container=container_name)
update_kwargs(c_kwargs, kwargs)
return c_kwargs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_network_create_kwargs(self, action, network_name, kwargs=None):
""" Generates keyword arguments for the Docker client to create a network. :param action: Action configuration. :type action: ActionConfig :param network_name: Network name or id. :type network_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict :return: Resulting keyword arguments. :rtype: dict """
|
config = action.config
c_kwargs = dict(
name=network_name,
driver=config.driver,
options=config.driver_options,
)
if config.internal:
c_kwargs['internal'] = True
driver_opts = init_options(config.driver_options)
if driver_opts:
c_kwargs['options'] = {option_name: resolve_value(option_value)
for option_name, option_value in iteritems(driver_opts)}
update_kwargs(c_kwargs, init_options(config.create_options), kwargs)
return c_kwargs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_network_remove_kwargs(self, action, network_name, kwargs=None):
""" Generates keyword arguments for the Docker client to remove a network. :param action: Action configuration. :type action: ActionConfig :param network_name: Network name or id. :type network_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict :return: Resulting keyword arguments. :rtype: dict """
|
c_kwargs = dict(net_id=network_name)
update_kwargs(c_kwargs, kwargs)
return c_kwargs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_network_connect_kwargs(self, action, network_name, container_name, endpoint_config=None, kwargs=None):
""" Generates keyword arguments for the Docker client to add a container to a network. :param action: Action configuration. :type action: ActionConfig :param network_name: Network name or id. :type network_name: unicode | str :param container_name: Container name or id. :type container_name: unicode | str :param endpoint_config: Network endpoint configuration. :type endpoint_config: dockermap.map.input.NetworkEndpoint :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict :return: Resulting keyword arguments. :rtype: dict """
|
c_kwargs = dict(
container=container_name,
net_id=network_name,
)
if endpoint_config:
c_kwargs.update(self.get_network_create_endpoint_kwargs(action, endpoint_config))
update_kwargs(c_kwargs, kwargs)
return c_kwargs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_network_disconnect_kwargs(self, action, network_name, container_name, kwargs=None):
""" Generates keyword arguments for the Docker client to remove a container from a network. :param action: Action configuration. :type action: ActionConfig :param container_name: Container name or id. :type container_name: unicode | str :param network_name: Network name or id. :type network_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict :return: Resulting keyword arguments. :rtype: dict """
|
c_kwargs = dict(
container=container_name,
net_id=network_name,
)
update_kwargs(c_kwargs, kwargs)
return c_kwargs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_volume_create_kwargs(self, action, volume_name, kwargs=None):
""" Generates keyword arguments for the Docker client to create a volume. :param action: Action configuration. :type action: ActionConfig :param volume_name: Volume name. :type volume_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict :return: Resulting keyword arguments. :rtype: dict """
|
config = action.config
c_kwargs = dict(name=volume_name)
if config:
c_kwargs['driver'] = config.driver
driver_opts = init_options(config.driver_options)
if driver_opts:
c_kwargs['driver_opts'] = {option_name: resolve_value(option_value)
for option_name, option_value in iteritems(driver_opts)}
update_kwargs(c_kwargs, init_options(config.create_options), kwargs)
else:
update_kwargs(c_kwargs, kwargs)
return c_kwargs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_volume_remove_kwargs(self, action, volume_name, kwargs=None):
""" Generates keyword arguments for the Docker client to remove a volume. :param action: Action configuration. :type action: ActionConfig :param volume_name: Volume name. :type volume_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict :return: Resulting keyword arguments. :rtype: dict """
|
c_kwargs = dict(name=volume_name)
update_kwargs(c_kwargs, kwargs)
return c_kwargs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cname(cls, map_name, container, instance=None):
""" Generates a container name that should be used for creating new containers and checking the status of existing containers. In this implementation, the format will be ``<map name>.<container name>.<instance>``. If no instance is provided, it is just ``<map name>.<container name>``. :param map_name: Container map name. :type map_name: unicode | str :param container: Container configuration name. :type container: unicode | str :param instance: Instance name (optional). :type instance: unicode | str :return: Container name. :rtype: unicode | str """
|
if instance:
return '{0}.{1}.{2}'.format(map_name, container, instance)
return '{0}.{1}'.format(map_name, container)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def aname(cls, map_name, attached_name, parent_name=None):
""" Generates a container name that should be used for creating new attached volume containers and checking the status of existing containers. In this implementation, the format will be ``<map name>.<attached>``, or ``<map name>.<parent name>.<attached>`` if the parent container configuration name is provided. :param map_name: Container map name. :type map_name: unicode | str :param attached_name: Attached container alias. :type attached_name: unicode | str :param parent_name: Container configuration name that has contains attached container. :type parent_name: unicode | str :return: Container name. :rtype: unicode | str """
|
if parent_name:
return '{0}.{1}.{2}'.format(map_name, parent_name, attached_name)
return '{0}.{1}'.format(map_name, attached_name)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nname(cls, map_name, network_name):
""" Generates a network name that should be used for creating new networks and checking the status of existing networks on the client. In this implementation, the format will be ``<map name>.<network name>``. :param map_name: Container map name. :type map_name: unicode | str :param network_name: Network configuration name. :type network_name: unicode | str :return: Network name. :rtype: unicode | str """
|
if network_name in DEFAULT_PRESET_NETWORKS:
return network_name
return '{0}.{1}'.format(map_name, network_name)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_hostname(cls, container_name, client_name=None):
""" Determines the host name of a container. In this implementation, replaces all dots and underscores of a container name with a dash; then attaches another dash with the client name, unless there is just one default client. :param container_name: Name of the container. :type container_name: unicode | str :param client_name: Name of the client configuration, where applicable. :type client_name: unicode | str :return: Host name. :rtype: unicode | str """
|
base_name = container_name
for old, new in cls.hostname_replace:
base_name = base_name.replace(old, new)
if not client_name or client_name == cls.default_client_name:
return base_name
client_suffix = client_name
for old, new in cls.hostname_replace:
client_suffix = client_suffix.replace(old, new)
return '{0}-{1}'.format(base_name, client_suffix)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def adduser(username, uid=None, system=False, no_login=True, no_password=False, group=False, gecos=None, **kwargs):
""" Formats an ``adduser`` command. :param username: User name. :type username: unicode | str :param uid: Optional user id to use. :type uid: long | int :param system: Create a system user account. :type system: bool :param no_login: Disable the login for this user. Not compatible with CentOS. Implies setting '--no-create-home', and ``no_password``. :type no_login: bool :param no_password: Disable the password for this user. Not compatible with CentOS. :type no_password: bool :param group: Create a group along with the user. Not compatible with CentOS. :type group: bool :param gecos: Set GECOS information in order to suppress an interactive prompt. On CentOS, use ``__comment`` instead. :type gecos: unicode | str :param kwargs: Additional keyword arguments which are converted to the command line. :return: A formatted ``adduser`` command with arguments. :rtype: unicode | str """
|
return _format_cmd('adduser', username, __system=bool(system), __uid=uid, __group=bool(group), __gid=uid,
no_login=(no_login, _NO_CREATE_HOME, _NO_LOGIN),
__disabled_password=no_login or bool(no_password),
__gecos=gecos, **kwargs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mkdir(path, create_parent=True, check_if_exists=False):
""" Generates a unix command line for creating a directory. :param path: Directory path. :type path: unicode | str :param create_parent: Create parent directories, if necessary. Default is ``True``. :type create_parent: bool :param check_if_exists: Prepend a check if the directory exists; in that case, the command is not run. Default is ``False``. :type check_if_exists: bool :return: Unix shell command line. :rtype: unicode | str """
|
cmd = _format_cmd('mkdir', path, _p=create_parent)
if check_if_exists:
return 'if [[ ! -d {0} ]]; then {1}; fi'.format(path, cmd)
return cmd
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bind(self, field_name, parent):
""" Create translation serializer dynamically. Takes translatable model class (shared_model) from parent serializer and it may create a serializer class on the fly if no custom class was specified. """
|
super(TranslatedFieldsField, self).bind(field_name, parent)
# Expect 1-on-1 for now. Allow using source as alias,
# but it should not be a dotted path for now
related_name = self.source or field_name
# This could all be done in __init__(), but by moving the code here,
# it's possible to auto-detect the parent model.
if self.shared_model is not None and self.serializer_class is not None:
return
# Fill in the blanks
if self.serializer_class is None:
if self.shared_model is None:
# Auto detect parent model
from .serializers import TranslatableModelSerializer
if not isinstance(parent, TranslatableModelSerializer):
raise TypeError("Expected 'TranslatableModelSerializer' as serializer base class")
if not issubclass(parent.Meta.model, TranslatableModel):
raise TypeError("Expected 'TranslatableModel' for the parent model")
self.shared_model = parent.Meta.model
# Create serializer based on shared model.
translated_model = self.shared_model._parler_meta[related_name]
self.serializer_class = create_translated_fields_serializer(
self.shared_model, related_name=related_name,
meta={'fields': translated_model.get_translated_fields()}
)
else:
if not issubclass(self.serializer_class.Meta.model, TranslatedFieldsModel):
raise TypeError("Expected 'TranslatedFieldsModel' for the serializer model")
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_representation(self, value):
""" Serialize translated fields. Simply iterate over available translations and, for each language, delegate serialization logic to the translation model serializer. Output languages can be selected by passing a list of language codes, `languages`, within the serialization context. """
|
if value is None:
return
# Only need one serializer to create the native objects
serializer = self.serializer_class(
instance=self.parent.instance, # Typically None
context=self.context,
partial=self.parent.partial
)
# Don't need to have a 'language_code', it will be split up already,
# so this should avoid redundant output.
if 'language_code' in serializer.fields:
raise ImproperlyConfigured("Serializer may not have a 'language_code' field")
translations = value.all() # value = translations related manager
languages = self.context.get('languages')
if languages:
translations = translations.filter(language_code__in=languages)
# Split into a dictionary per language
result = OrderedDict()
for translation in translations:
result[translation.language_code] = serializer.to_representation(translation)
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_internal_value(self, data):
""" Deserialize data from translations fields. For each received language, delegate validation logic to the translation model serializer. """
|
if data is None:
return
if not isinstance(data, dict):
self.fail('invalid')
if not self.allow_empty and len(data) == 0:
self.fail('empty')
result, errors = {}, {}
for lang_code, model_fields in data.items():
serializer = self.serializer_class(data=model_fields)
if serializer.is_valid():
result[lang_code] = serializer.validated_data
else:
errors[lang_code] = serializer.errors
if errors:
raise serializers.ValidationError(errors)
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse(self, text, layers=None):
"""Parsing passed text to json. Args: text: Text to parse. layers (optional):
Special fields. Only one string or iterable object (e.g "Data", ("Data", "Fio")). Only these fields will be returned. Returns: The parsed text into a json object. """
|
params = {
"text": text,
"key": self.key,
}
if layers is not None:
# if it's string
if isinstance(layers, six.string_types):
params["layers"] = layers
# if it's another iterable object
elif isinstance(layers, collections.Iterable):
params["layers"] = ",".join(layers)
req = requests.get(self.NLU_URL, params=params)
return req.json()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate(self, text):
"""Try to get the generated file. Args: text: The text that you want to generate. """
|
if not text:
raise Exception("No text to speak")
if len(text) >= self.MAX_CHARS:
raise Exception("Number of characters must be less than 2000")
params = self.__params.copy()
params["text"] = text
self._data = requests.get(self.TTS_URL, params=params,
stream=False).iter_content()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, path="speech"):
"""Save data in file. Args: path (optional):
A path to save file. Defaults to "speech". File extension is optional. Absolute path is allowed. Returns: The path to the saved file. """
|
if self._data is None:
raise Exception("There's nothing to save")
extension = "." + self.__params["format"]
if os.path.splitext(path)[1] != extension:
path += extension
with open(path, "wb") as f:
for d in self._data:
f.write(d)
return path
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_translated_fields_serializer(shared_model, meta=None, related_name=None, **fields):
""" Create a Rest Framework serializer class for a translated fields model. :param shared_model: The shared model. :type shared_model: :class:`parler.models.TranslatableModel` """
|
if not related_name:
translated_model = shared_model._parler_meta.root_model
else:
translated_model = shared_model._parler_meta[related_name].model
# Define inner Meta class
if not meta:
meta = {}
meta['model'] = translated_model
meta.setdefault('fields', ['language_code'] + translated_model.get_translated_fields())
# Define serialize class attributes
attrs = {}
attrs.update(fields)
attrs['Meta'] = type('Meta', (), meta)
# Dynamically create the serializer class
return type('{0}Serializer'.format(translated_model.__name__), (serializers.ModelSerializer,), attrs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, **kwargs):
""" Extract the translations and save them after main object save. By default all translations will be saved no matter if creating or updating an object. Users with more complex needs might define their own save and handle translation saving themselves. """
|
translated_data = self._pop_translated_data()
instance = super(TranslatableModelSerializer, self).save(**kwargs)
self.save_translations(instance, translated_data)
return instance
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _pop_translated_data(self):
""" Separate data of translated fields from other data. """
|
translated_data = {}
for meta in self.Meta.model._parler_meta:
translations = self.validated_data.pop(meta.rel_name, {})
if translations:
translated_data[meta.rel_name] = translations
return translated_data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_translations(self, instance, translated_data):
""" Save translation data into translation objects. """
|
for meta in self.Meta.model._parler_meta:
translations = translated_data.get(meta.rel_name, {})
for lang_code, model_fields in translations.items():
translation = instance._get_translated_model(lang_code, auto_create=True, meta=meta)
for field, value in model_fields.items():
setattr(translation, field, value)
# Go through the same hooks as the regular model,
# instead of calling translation.save() directly.
instance.save_translations()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_conf(cfg_path):
""" Try to load the given conf file. """
|
global config
try:
cfg = open(cfg_path, 'r')
except Exception as ex:
if verbose:
print("Unable to open {0}".format(cfg_path))
print(str(ex))
return False
# Read the entire contents of the conf file
cfg_json = cfg.read()
cfg.close()
# print(cfg_json)
# Try to parse the conf file into a Python structure
try:
config = json.loads(cfg_json)
except Exception as ex:
print("Unable to parse configuration file as JSON")
print(str(ex))
return False
# This config was successfully loaded
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def translate_message_tokens(message_tokens):
""" Translates alias references to their defined values. The first token is a channel alias. The remaining tokens are value aliases. """
|
trans_tokens = []
if message_tokens[0] in cv_dict[channels_key]:
trans_tokens.append(cv_dict[channels_key][message_tokens[0]])
else:
trans_tokens.append(int(message_tokens[0]))
for token in message_tokens[1:]:
if token in cv_dict[values_key]:
trans_tokens.extend(cv_dict[values_key][token])
else:
trans_tokens.append(int(token))
return trans_tokens
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_headers(cls, msg):
""" Parse HTTP headers. Args: msg (str):
HTTP message. Returns: (List[Tuple[str, str]):
List of header tuples. """
|
return list(email.parser.Parser().parsestr(msg).items())
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse(cls, msg):
"""Parse message string to response object."""
|
lines = msg.splitlines()
version, status_code, reason = lines[0].split()
headers = cls.parse_headers('\r\n'.join(lines[1:]))
return cls(version=version, status_code=status_code,
reason=reason, headers=headers)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse(cls, msg):
"""Parse message string to request object."""
|
lines = msg.splitlines()
method, uri, version = lines[0].split()
headers = cls.parse_headers('\r\n'.join(lines[1:]))
return cls(version=version, uri=uri, method=method, headers=headers)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sendto(self, transport, addr):
""" Send request to a given address via given transport. Args: transport (asyncio.DatagramTransport):
Write transport to send the message on. addr (Tuple[str, int]):
IP address and port pair to send the message to. """
|
msg = bytes(self) + b'\r\n'
logger.debug("%s:%s < %s", *(addr + (self,)))
transport.sendto(msg, addr)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send_rgb(dev, red, green, blue, dimmer):
""" Send a set of RGB values to the light """
|
cv = [0 for v in range(0, 512)]
cv[0] = red
cv[1] = green
cv[2] = blue
cv[6] = dimmer
sent = dev.send_multi_value(1, cv)
return sent
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
""" How to control a DMX light through an Anyma USB controller """
|
# Channel value list for channels 1-512
cv = [0 for v in range(0, 512)]
# Create an instance of the DMX controller and open it
print("Opening DMX controller...")
dev = pyudmx.uDMXDevice()
# This will automagically find a single Anyma-type USB DMX controller
dev.open()
# For informational purpose, display what we know about the DMX controller
print(dev.Device)
# Send messages to the light changing it to red, then green, then blue
# This is the "hard way" to do it, but illustrates how it's done
print("Setting to red...")
cv[0] = 255 # red
cv[6] = 128 # dimmer to half value
sent = dev.send_multi_value(1, cv)
print("Set to red")
sleep(3.0)
print("Setting to green...")
cv[0] = 0 # red
cv[1] = 255 # green
cv[6] = 128 # dimmer to half value
sent = dev.send_multi_value(1, cv)
print("Set to green")
sleep(3.0)
print("Setting to blue...")
cv[0] = 0 # red
cv[1] = 0 # green
cv[2] = 255 # blue
cv[6] = 128 # dimmer to half value
sent = dev.send_multi_value(1, cv)
print("Set to blue")
sleep(3.0)
# Here's an easier way to do it
print("And, again the easier way")
send_rgb(dev, 255, 0, 0, 128)
sleep(3.0)
send_rgb(dev, 0, 255, 0, 128)
sleep(3.0)
send_rgb(dev, 0, 0, 255, 128)
sleep(3.0)
print("Reset all channels and close..")
# Turns the light off
cv = [0 for v in range(0, 512)]
dev.send_multi_value(1, cv)
dev.close()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connect(self):
"""Connect to vCenter server"""
|
try:
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
if self.config['no_ssl_verify']:
requests.packages.urllib3.disable_warnings()
context.verify_mode = ssl.CERT_NONE
self.si = SmartConnectNoSSL(
host=self.config['server'],
user=self.config['username'],
pwd=self.config['password'],
port=int(self.config['port']),
certFile=None,
keyFile=None,
)
else:
self.si = SmartConnect(
host=self.config['server'],
user=self.config['username'],
pwd=self.config['password'],
port=int(self.config['port']),
sslContext=context,
certFile=None,
keyFile=None,
)
except Exception as e:
print('Unable to connect to vsphere server.')
print(e)
sys.exit(1)
# add a clean up routine
atexit.register(Disconnect, self.si)
self.content = self.si.RetrieveContent()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def status(self):
"""Check power status"""
|
vm = self.get_vm_failfast(self.config['name'])
extra = self.config['extra']
parserFriendly = self.config['parserFriendly']
status_to_print = []
if extra:
status_to_print = \
[["vmname", "powerstate", "ipaddress", "hostname", "memory",
"cpunum", "uuid", "guestid", "uptime"]] + \
[[vm.name, vm.runtime.powerState,
vm.summary.guest.ipAddress or '',
vm.summary.guest.hostName or '',
str(vm.summary.config.memorySizeMB),
str(vm.summary.config.numCpu),
vm.summary.config.uuid, vm.summary.guest.guestId,
str(vm.summary.quickStats.uptimeSeconds) or '0']]
else:
status_to_print = [[vm.name, vm.runtime.powerState]]
if parserFriendly:
self.print_as_lines(status_to_print)
else:
self.print_as_table(status_to_print)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shutdown(self):
""" Shutdown guest fallback to power off if guest tools aren't installed """
|
vm = self.get_vm_failfast(self.config['name'])
if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOff:
print("%s already poweredOff" % vm.name)
else:
if self.guestToolsRunning(vm):
timeout_minutes = 10
print("waiting for %s to shutdown "
"(%s minutes before forced powerOff)" % (
vm.name,
str(timeout_minutes)
))
vm.ShutdownGuest()
if self.WaitForVirtualMachineShutdown(vm,
timeout_minutes * 60):
print("shutdown complete")
print("%s poweredOff" % vm.name)
else:
print("%s has not shutdown after %s minutes:"
"will powerOff" % (vm.name, str(timeout_minutes)))
self.powerOff()
else:
print("GuestTools not running or not installed: will powerOff")
self.powerOff()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_resource_pool(self, cluster, pool_name):
""" Find a resource pool given a pool name for desired cluster """
|
pool_obj = None
# get a list of all resource pools in this cluster
cluster_pools_list = cluster.resourcePool.resourcePool
# get list of all resource pools with a given text name
pool_selections = self.get_obj(
[vim.ResourcePool],
pool_name,
return_all=True
)
# get the first pool that exists in a given cluster
if pool_selections:
for p in pool_selections:
if p in cluster_pools_list:
pool_obj = p
break
return pool_obj
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_obj(self, vimtype, name, return_all=False, path=""):
"""Get the vsphere object associated with a given text name or MOID"""
|
obj = list()
if path:
obj_folder = self.content.searchIndex.FindByInventoryPath(path)
container = self.content.viewManager.CreateContainerView(
obj_folder, vimtype, True
)
else:
container = self.content.viewManager.CreateContainerView(
self.content.rootFolder, vimtype, True)
for c in container.view:
if name in [c.name, c._GetMoId()]:
if return_all is False:
return c
break
else:
obj.append(c)
if len(obj) > 0:
return obj
else:
# for backwards-compat
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_host_system_failfast( self, name, verbose=False, host_system_term='HS' ):
""" Get a HostSystem object fail fast if the object isn't a valid reference """
|
if verbose:
print("Finding HostSystem named %s..." % name)
hs = self.get_host_system(name)
if hs is None:
print("Error: %s '%s' does not exist" % (host_system_term, name))
sys.exit(1)
if verbose:
print("Found HostSystem: {0} Name: {1}" % (hs, hs.name))
return hs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_vm(self, name, path=""):
"""Get a VirtualMachine object"""
|
if path:
return self.get_obj([vim.VirtualMachine], name, path=path)
else:
return self.get_obj([vim.VirtualMachine], name)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_vm_failfast(self, name, verbose=False, vm_term='VM', path=""):
""" Get a VirtualMachine object fail fast if the object isn't a valid reference """
|
if verbose:
print("Finding VirtualMachine named %s..." % name)
if path:
vm = self.get_vm(name, path=path)
else:
vm = self.get_vm(name)
if vm is None:
print("Error: %s '%s' does not exist" % (vm_term, name))
sys.exit(1)
if verbose:
print("Found VirtualMachine: %s Name: %s" % (vm, vm.name))
return vm
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def WaitForVirtualMachineShutdown( self, vm_to_poll, timeout_seconds, sleep_period=5 ):
""" Guest shutdown requests do not run a task we can wait for. So, we must poll and wait for status to be poweredOff. Returns True if shutdown, False if poll expired. """
|
seconds_waited = 0 # wait counter
while seconds_waited < timeout_seconds:
# sleep first, since nothing shuts down instantly
seconds_waited += sleep_period
time.sleep(sleep_period)
vm = self.get_vm(vm_to_poll.name)
if vm.runtime.powerState == \
vim.VirtualMachinePowerState.poweredOff:
return True
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def location(ip=None, key=None, field=None):
''' Get geolocation data for a given IP address
If field is specified, get specific field as text
Else get complete location data as JSON
'''
if field and (field not in field_list):
return 'Invalid field'
if field:
if ip:
url = 'https://ipapi.co/{}/{}/'.format(ip, field)
else:
url = 'https://ipapi.co/{}/'.format(field)
else:
if ip:
url = 'https://ipapi.co/{}/json/'.format(ip)
else:
url = 'https://ipapi.co/json/'
if key or API_KEY:
url = '{}?key={}'.format(url, (key or API_KEY))
response = get(url, headers=headers)
if field:
return response.text
else:
return response.json()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
"""Main example."""
|
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
parser = argparse.ArgumentParser(
description='Test the SMA webconnect library.')
parser.add_argument(
'ip', type=str, help='IP address of the Webconnect module')
parser.add_argument(
'user', help='installer/user')
parser.add_argument(
'password', help='Installer password')
args = parser.parse_args()
loop = asyncio.get_event_loop()
def _shutdown(*_):
VAR['running'] = False
# asyncio.ensure_future(sma.close_session(), loop=loop)
signal.signal(signal.SIGINT, _shutdown)
# loop.add_signal_handler(signal.SIGINT, shutdown)
# signal.signal(signal.SIGINT, signal.SIG_DFL)
loop.run_until_complete(main_loop(
loop, user=args.user, password=args.password, ip=args.ip))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def data(self):
""" Get a cached post-processed result of a GitHub API call. Uses Trac cache to avoid constant querying of the remote API. If a previous API call did not succeed, automatically retries after a timeout. """
|
if self._next_update and datetime.now() > self._next_update:
self.update()
return self._data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def teams(self):
""" Return a sequence of `GitHubTeam` objects, one for each team in this org. """
|
teams = self._teamlist.teams()
# find out which teams have been added or removed since the last sync
current_teams = set(self._teamobjects.keys())
new_teams = set(teams.keys()) # pylint: disable=no-member
added = new_teams - current_teams
removed = current_teams - new_teams
for team in removed:
del self._teamobjects[team]
for team in added:
self._teamobjects[team] = GitHubTeam(
self._api, self._env, self._org, teams[team], team) # pylint: disable=unsubscriptable-object
return self._teamobjects.values()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def members(self):
""" Return a list of all users in this organization. Users are identified by their login name. Note that this is computed from the teams in the organization, because GitHub does not currently offer a WebHook for organization membership, so converting org membership would lead to stale data. """
|
allmembers = set()
for team in self.teams():
allmembers.update(team.members())
return sorted(allmembers)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_team(self, slug):
""" Trigger an update and cache invalidation for the team identified by the given `slug`. Returns `True` on success, `False` otherwise. :param slug: The GitHub 'slug' that identifies the team in URLs """
|
if slug not in self._teamobjects:
# This case is checked and handled further up, but better be safe
# than sorry.
return False # pragma: no cover
return self._teamobjects[slug].update()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def github_api(self, url, *args):
""" Connect to the given GitHub API URL template by replacing all placeholders with the given parameters and return the decoded JSON result on success. On error, return `None`. :param url: The path to request from the GitHub API. Contains format string placeholders that will be replaced with all additional positional arguments. """
|
import requests
import urllib
github_api_url = os.environ.get("TRAC_GITHUB_API_URL", "https://api.github.com/")
formatted_url = github_api_url + url.format(*(urllib.quote(str(x)) for x in args))
access_token = _config_secret(self.access_token)
self.log.debug("Hitting GitHub API endpoint %s with user %s", formatted_url, self.username) # pylint: disable=no-member
results = []
try:
has_next = True
while has_next:
req = requests.get(formatted_url, auth=(self.username, access_token))
if req.status_code != 200:
try:
message = req.json()['message']
except Exception: # pylint: disable=broad-except
message = req.text
self.log.error("Error communicating with GitHub API at {}: {}".format( # pylint: disable=no-member
formatted_url, message))
return None
results.extend(req.json())
has_next = 'next' in req.links
if has_next:
formatted_url = req.links['next']['url']
except requests.exceptions.ConnectionError as rce:
self.log.error("Exception while communicating with GitHub API at {}: {}".format( # pylint: disable=no-member
formatted_url, rce))
return None
return results
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_team(self, slug):
""" Trigger update and cache invalidation for the team identified by the given `slug`, if any. Returns `True` if the update was successful, `False` otherwise. :param slug: GitHub 'slug' name for the team to be updated. """
|
if self._org:
if not self._org.has_team(slug):
return self._org.update()
return self._org.update_team(slug)
# self._org is created during Trac startup, so there should never
# be a case where we try to update an org before it's created; this
# is a sanity check only.
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_permission_groups(self, username):
""" Return a list of names of the groups that the user with the specified name is a member of. Implements an `IPermissionGroupProvider` API. This specific implementation connects to GitHub with a dedicated user, fetches and caches the teams and their users configured at GitHub and converts the data into a format usable for easy access by username. """
|
if not self.organization or not self.username or not self.access_token:
return []
elif (self.username_prefix and
not username.startswith(self.username_prefix)):
return []
data = self._fetch_groups()
if not data:
self.log.error("No cached groups from GitHub available") # pylint: disable=no-member
return []
else:
return data.get(username[len(self.username_prefix):], [])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def match_request(self, req):
""" Return whether the handler wants to process the given request. Implements an `IRequestHandler` API. """
|
match = self._request_re.match(req.path_info)
if match:
return True
if os.environ.get('TRAC_GITHUB_ENABLE_DEBUGGING', None) is not None:
debug_match = self._debug_request_re.match(req.path_info)
if debug_match:
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_debug_request(self, req):
""" Debgging helper used for testing, processes the given request and dumps the internal state of cached user to group mappings. Note that this is only callable if TRAC_GITHUB_ENABLE_DEBUGGING is set in the environment. """
|
req.send(json.dumps(self._fetch_groups()).encode('utf-8'), 'application/json', 200)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_request(self, req):
""" Process the given request `req`, implements an `IRequestHandler` API. Normally, `process_request` would return a tuple, but since none of these requests will return an HTML page, they will all terminate without a return value and directly send a response. """
|
if os.environ.get('TRAC_GITHUB_ENABLE_DEBUGGING', None) is not None:
debug_match = self._debug_request_re.match(req.path_info)
if debug_match:
self.process_debug_request(req)
if req.method != 'POST':
msg = u'Endpoint is ready to accept GitHub Organization membership notifications.\n'
self.log.warning(u'Method not allowed (%s)', req.method) # pylint: disable=no-member
req.send(msg.encode('utf-8'), 'text/plain', 405)
event = req.get_header('X-GitHub-Event')
supported_events = {
'ping': self._handle_ping_ev,
'membership': self._handle_membership_ev
}
# Check whether this event is supported
if event not in supported_events:
msg = u'Event type %s is not supported\n' % event
self.log.warning(msg.rstrip('\n')) # pylint: disable=no-member
req.send(msg.encode('utf-8'), 'text/plain', 400)
# Verify the event's signature
reqdata = req.read()
signature = req.get_header('X-Hub-Signature')
if not self._verify_webhook_signature(signature, reqdata):
msg = u'Webhook signature verification failed\n'
self.log.warning(msg.rstrip('\n')) # pylint: disable=no-member
req.send(msg.encode('utf-8'), 'text/plain', 403)
# Decode JSON and handle errors
try:
payload = json.loads(reqdata)
except (ValueError, KeyError):
msg = u'Invalid payload\n'
self.log.warning(msg.rstrip('\n')) # pylint: disable=no-member
req.send(msg.encode('utf-8'), 'text/plain', 400)
# Handle the event
try:
supported_events[event](req, payload)
except RequestDone:
# Normal termination, bubble up
raise
except Exception: # pylint: disable=broad-except
msg = (u'Exception occurred while handling payload, '
'possible invalid payload\n%s' % traceback.format_exc())
self.log.warning(msg.rstrip('\n')) # pylint: disable=no-member
req.send(msg.encode('utf-8'), 'text/plain', 500)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_content_children(self, content_id, expand=None, parent_version=None, callback=None):
""" Returns a map of the direct children of a piece of Content. Content can have multiple types of children - for example a Page can have children that are also Pages, but it can also have Comments and Attachments. The {@link ContentType}(s) of the children returned is specified by the "expand" query parameter in the request - this parameter can include expands for multiple child types. If no types are included in the expand parameter, the map returned will just list the child types that are available to be expanded for the {@link Content} referenced by the "content_id" parameter. :param content_id (string):
A string containing the id of the content to retrieve children for. :param expand (string):
OPTIONAL :A comma separated list of properties to expand on the children. Default: None. :param parent_version (int):
OPTIONAL: An integer representing the version of the content to retrieve children for. Default: 0 (Latest) :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/child endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """
|
params = {}
if expand:
params["expand"] = expand
if parent_version:
params["parentVersion"] = parent_version
return self._service_get_request("rest/api/content/{id}/child".format(id=content_id), params=params,
callback=callback)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_content_descendants(self, content_id, expand=None, callback=None):
""" Returns a map of the descendants of a piece of Content. Content can have multiple types of descendants - for example a Page can have descendants that are also Pages, but it can also have Comments and Attachments. The {@link ContentType}(s) of the descendants returned is specified by the "expand" query parameter in the request - this parameter can include expands for multiple descendant types. If no types are included in the expand parameter, the map returned will just list the descendant types that are available to be expanded for the {@link Content} referenced by the "content_id" parameter. :param content_id (string):
A string containing the id of the content to retrieve descendants for. :param expand (string):
OPTIONAL: A comma separated list of properties to expand on the descendants. Default: None. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/child/{type} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """
|
params = {}
if expand:
params["expand"] = expand
return self._service_get_request("rest/api/content/{id}/descendant".format(id=content_id), params=params,
callback=callback)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_content_descendants_by_type(self, content_id, child_type, expand=None, start=None, limit=None, callback=None):
""" Returns the direct descendants of a piece of Content, limited to a single descendant type. The {@link ContentType}(s) of the descendants returned is specified by the "type" path parameter in the request. Currently the only supported descendants are comment descendants of non-comment Content. :param content_id (string):
A string containing the id of the content to retrieve descendants for :param child_type (string):
A {@link ContentType} to filter descendants on. :param expand (string):
OPTIONAL: A comma separated list of properties to expand on the descendants. Default: Empty :param start (int):
OPTIONAL: The index of the first item within the result set that should be returned. Default: 0. :param limit (int):
OPTIONAL: How many items should be returned after the start index. Default: 25 or site limit. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/descendant/{type} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """
|
params = {}
if expand:
params["expand"] = expand
if start is not None:
params["start"] = int(start)
if limit is not None:
params["limit"] = int(limit)
return self._service_get_request("rest/api/content/{id}/descendant/{type}"
"".format(id=content_id, type=child_type), params=params, callback=callback)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_content_properties(self, content_id, expand=None, start=None, limit=None, callback=None):
""" Returns a paginated list of content properties. Content properties are a key / value store of properties attached to a piece of Content. The key is a string, and the value is a JSON-serializable object. :param content_id (string):
A string containing the id of the property content container. :param expand (string):
OPTIONAL: A comma separated list of properties to expand on the content properties. Default: Empty. :param start (int):
OPTIONAL: The start point of the collection to return. Default: None (0). :param limit (int):
OPTIONAL: The limit of the number of items to return, this may be restricted by fixed system limits. Default: 10. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/property endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """
|
params = {}
if expand:
params["expand"] = expand
if start is not None:
params["start"] = int(start)
if limit is not None:
params["limit"] = int(limit)
return self._service_get_request("rest/api/content/{id}/property".format(id=content_id),
params=params, callback=callback)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_new_attachment_by_content_id(self, content_id, attachments, callback=None):
""" Add one or more attachments to a Confluence Content entity, with optional comments. Comments are optional, but if included there must be as many comments as there are files, and the comments must be in the same order as the files. :param content_id (string):
A string containing the id of the attachments content container. :param attachments (list of dicts or dict):
This is a list of dictionaries or a dictionary. Each dictionary must have the key "file" with a value that is I/O like (file, StringIO, etc.), and may also have a key "comment" with a string for file comments. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/child/attachment endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """
|
if isinstance(attachments, list):
assert all(isinstance(at, dict) and "file" in list(at.keys()) for at in attachments)
elif isinstance(attachments, dict):
assert "file" in list(attachments.keys())
else:
assert False
return self._service_post_request("rest/api/content/{id}/child/attachment".format(id=content_id),
headers={"X-Atlassian-Token": "nocheck"}, files=attachments,
callback=callback)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_new_space(self, space_definition, callback=None):
""" Creates a new Space. The incoming Space does not include an id, but must include a Key and Name, and should include a Description. :param space_definition (dict):
The dictionary describing the new space. Must include keys "key", "name", and "description". :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the space endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. Example space data: { "key": "TST", "name": "Example space", "description": { "plain": { "value": "This is an example space", "representation": "plain" } } } """
|
assert isinstance(space_definition, dict) and {"key", "name", "description"} <= set(space_definition.keys())
return self._service_post_request("rest/api/space", data=json.dumps(space_definition),
headers={"Content-Type": "application/json"}, callback=callback)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_content_by_id(self, content_data, content_id, callback=None):
""" Updates a piece of Content, or restores if it is trashed. The body contains the representation of the content. Must include the new version number. To restore a piece of content that has the status of trashed the content must have it's version incremented, and status set to current. No other field modifications will be performed when restoring a piece of content from the trash. Request example to restore from trash: { "id": "557059", "status": "current", "version": { "number": 2 } } :param content_data (dict):
The content data (with desired updates). This should be retrieved via the API call to get content data, then modified to desired state. Required keys are: "id", "type", "title", "space", "version", and "body". :param content_id (string):
The id of the content to update. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. Example content data: { "id": "3604482", "type": "page", "title": "Example Content title", "space": { "key": "TST" }, "version": { "number": 2, "minorEdit": false }, "body": { "storage": { "value": "<p>This is the updated text for the new page</p>", "representation": "storage" } } } """
|
assert isinstance(content_data, dict) and set(content_data.keys()) >= self.UPDATE_CONTENT_REQUIRED_KEYS
return self._service_put_request("rest/api/content/{id}".format(id=content_id), data=json.dumps(content_data),
headers={"Content-Type": "application/json"}, callback=callback)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_attachment_metadata(self, content_id, attachment_id, new_metadata, callback=None):
""" Update the non-binary data of an Attachment. This resource can be used to update an attachment's filename, media-type, comment, and parent container. :param content_id (string):
A string containing the ID of the attachments content container. :param attachment_id (string):
The ID of the attachment to update. :param new_metadata (dict):
The updated metadata for the attachment. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/child/attachment/{attachment_id} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. Example attachment metadata: { "id": "att5678", "type": "attachment", "title": "new_file_name.txt", "version": { "number": 2, "minorEdit": false } } """
|
assert isinstance(new_metadata, dict) and set(new_metadata.keys()) >= self.ATTACHMENT_METADATA_KEYS
return self._service_put_request("rest/api/content/{id}/child/attachment/{attachment_id}"
"".format(id=content_id, attachment_id=attachment_id),
data=json.dumps(new_metadata), headers={"Content-Type": "application/json"},
callback=callback)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_attachment(self, content_id, attachment_id, attachment, callback=None):
""" Update the binary data of an Attachment, and optionally the comment and the minor edit field. This adds a new version of the attachment, containing the new binary data, filename, and content-type. When updating the binary data of an attachment, the comment related to it together with the field that specifies if it's a minor edit can be updated as well, but are not required. If an update is considered to be a minor edit, notifications will not be sent to the watchers of that content. :param content_id (string):
A string containing the id of the attachments content container. :param attachment_id (string):
The id of the attachment to upload a new file for. :param attachment (dict):
The dictionary describing the attachment to upload. The dict must have a key "file", which has a value that is an I/O object (file, StringIO, etc.), and can also have a "comment" key describing the attachment, and a "minorEdit" key, which is a boolean used to flag that the changes to the attachment are not substantial. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{content_id}/child/attachment/{attachment_id}/data endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """
|
if isinstance(attachment, dict):
assert "file" in list(attachment.keys())
else:
assert False
return self._service_post_request("rest/api/content/{content_id}/child/attachment/{attachment_id}/data"
"".format(content_id=content_id, attachment_id=attachment_id),
headers={"X-Atlassian-Token": "nocheck"}, files=attachment,
callback=callback)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_property(self, content_id, property_key, new_property_data, callback=None):
""" Updates a content property. The body contains the representation of the content property. Must include the property id, and the new version number. Attempts to create a new content property if the given version number is 1, just like {@link #create(com.atlassian.confluence.api.model.content.id.ContentId, String, com.atlassian.confluence.api.model.content.JsonContentProperty)}. :param content_id (string):
The ID for the content to attach the property to. :param property_key (string):
The key for the property to update. :param new_property_data (dict):
The updated property data. This requires the keys "key", "value", and "version". :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/property/{key} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. Example updated property data: { "key": "example-property-key", "value": { "anything": "goes" }, "version": { "number": 2, "minorEdit": false } } """
|
assert isinstance(new_property_data, dict) and {"key", "value", "version"} <= set(new_property_data.keys())
return self._service_put_request("rest/api/content/{id}/property/{key}".format(id=content_id, key=property_key),
data=json.dumps(new_property_data),
headers={"Content-Type": "application/json"}, callback=callback)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_space(self, space_key, space_definition, callback=None):
""" Updates a Space. Currently only the Space name, description and homepage can be updated. :param space_key (string):
The key of the space to update. :param space_definition (dict):
The dictionary describing the updated space metadata. This should include "key", "name" and "description". :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the space/{key} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. Example updated space definition: { "key": "TST", "name": "Example space", "description": { "plain": { "value": "This is an example space", "representation": "plain" } } } """
|
assert isinstance(space_definition, dict) and {"key", "name", "description"} <= set(space_definition.keys())
return self._service_put_request("rest/api/space/{key}".format(key=space_key),
data=json.dumps(space_definition),
headers={"Content-Type": "application/json"}, callback=callback)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_contentbody_to_new_type(self, content_data, old_representation, new_representation, callback=None):
""" Converts between content body representations. Not all representations can be converted to/from other formats. Supported conversions: Source Representation | Destination Representation Supported "storage" | "view","export_view","editor" "editor" | "storage" "view" | None "export_view" | None :param content_data (string):
The content data to transform. :param old_representation (string):
The representation to convert from. :param new_representation (string):
The representation to convert to. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the contentbody/convert/{to} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """
|
assert {old_representation, new_representation} < {"storage", "editor", "view", "export_view"}
# TODO: Enforce conversion rules better here.
request_data = {"value": str(content_data), "representation": old_representation}
return self._service_post_request("rest/api/contentbody/convert/{to}".format(to=new_representation),
data=json.dumps(request_data),
headers={"Content-Type": "application/json"}, callback=callback)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_label_by_id(self, content_id, label_name, callback=None):
""" Deletes a labels to the specified content. There is an alternative form of this delete method that is not implemented. A DELETE request to /rest/api/content/{id}/label/{label} will also delete a label, but is more limited in the label name that can be accepted (and has no real apparent upside). :param content_id (string):
A string containing the id of the labels content container. :param label_name (string):
OPTIONAL: The name of the label to be removed from the content. Default: Empty (probably deletes all labels). :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: Empty if successful, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """
|
params = {"name": label_name}
return self._service_delete_request("rest/api/content/{id}/label".format(id=content_id),
params=params, callback=callback)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_space(self, space_key, callback=None):
""" Deletes a Space. The space is deleted in a long running task, so the space cannot be considered deleted when this method returns. Clients can follow the status link in the response and poll it until the task completes. :param space_key (string):
The key of the space to delete. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: A pointer to the longpoll task if successful, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """
|
return self._service_delete_request("rest/api/space/{key}".format(key=space_key),
callback=callback)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add(self, sensor):
"""Add a sensor, warning if it exists."""
|
if isinstance(sensor, (list, tuple)):
for sss in sensor:
self.add(sss)
return
if not isinstance(sensor, Sensor):
raise TypeError("pysma.Sensor expected")
if sensor.name in self:
old = self[sensor.name]
self.__s.remove(old)
_LOGGER.warning("Replacing sensor %s with %s", old, sensor)
if sensor.key in self:
_LOGGER.warning("Duplicate SMA sensor key %s", sensor.key)
self.__s.append(sensor)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _fetch_json(self, url, payload):
"""Fetch json data for requests."""
|
params = {
'data': json.dumps(payload),
'headers': {'content-type': 'application/json'},
'params': {'sid': self.sma_sid} if self.sma_sid else None,
}
for _ in range(3):
try:
with async_timeout.timeout(3):
res = yield from self._aio_session.post(
self._url + url, **params)
return (yield from res.json()) or {}
except asyncio.TimeoutError:
continue
return {'err': "Could not connect to SMA at {} (timeout)"
.format(self._url)}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def new_session(self):
"""Establish a new session."""
|
body = yield from self._fetch_json(URL_LOGIN, self._new_session_data)
self.sma_sid = jmespath.search('result.sid', body)
if self.sma_sid:
return True
msg = 'Could not start session, %s, got {}'.format(body)
if body.get('err'):
if body.get('err') == 503:
_LOGGER.error("Max amount of sessions reached")
else:
_LOGGER.error(msg, body.get('err'))
else:
_LOGGER.error(msg, "Session ID expected [result.sid]")
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read(self, sensors):
"""Read a set of keys."""
|
payload = {'destDev': [], 'keys': list(set([s.key for s in sensors]))}
if self.sma_sid is None:
yield from self.new_session()
if self.sma_sid is None:
return False
body = yield from self._fetch_json(URL_VALUES, payload=payload)
# On the first 401 error we close the session which will re-login
if body.get('err') == 401:
_LOGGER.warning("401 error detected, closing session to force "
"another login attempt")
self.close_session()
return False
_LOGGER.debug(json.dumps(body))
for sen in sensors:
if sen.extract_value(body):
_LOGGER.debug("%s\t= %s %s",
sen.name, sen.value, sen.unit)
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def media(self, uri):
"""Play a media file."""
|
try:
local_path, _ = urllib.request.urlretrieve(uri)
metadata = mutagen.File(local_path, easy=True)
if metadata.tags:
self._tags = metadata.tags
title = self._tags.get(TAG_TITLE, [])
self._manager[ATTR_TITLE] = title[0] if len(title) else ''
artist = self._tags.get(TAG_ARTIST, [])
self._manager[ATTR_ARTIST] = artist[0] if len(artist) else ''
album = self._tags.get(TAG_ALBUM, [])
self._manager[ATTR_ALBUM] = album[0] if len(album) else ''
local_uri = 'file://{}'.format(local_path)
# urllib.error.HTTPError
except Exception: # pylint: disable=broad-except
local_uri = uri
self._player.set_state(Gst.State.NULL)
self._player.set_property(PROP_URI, local_uri)
self._player.set_state(Gst.State.PLAYING)
self.state = STATE_PLAYING
self._manager[ATTR_URI] = uri
self._manager[ATTR_DURATION] = self._duration()
self._manager[ATTR_VOLUME] = self._player.get_property(PROP_VOLUME)
_LOGGER.info('playing %s (as %s)', uri, local_uri)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def play(self):
"""Change state to playing."""
|
if self.state == STATE_PAUSED:
self._player.set_state(Gst.State.PLAYING)
self.state = STATE_PLAYING
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pause(self):
"""Change state to paused."""
|
if self.state == STATE_PLAYING:
self._player.set_state(Gst.State.PAUSED)
self.state = STATE_PAUSED
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stop(self):
"""Stop pipeline."""
|
urllib.request.urlcleanup()
self._player.set_state(Gst.State.NULL)
self.state = STATE_IDLE
self._tags = {}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_position(self, position):
"""Set media position."""
|
if position > self._duration():
return
position_ns = position * _NANOSEC_MULT
self._manager[ATTR_POSITION] = position
self._player.seek_simple(_FORMAT_TIME, Gst.SeekFlags.FLUSH, position_ns)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def state(self, state):
"""Set state."""
|
self._state = state
self._manager[ATTR_STATE] = state
_LOGGER.info('state changed to %s', state)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _duration(self):
"""Get media duration."""
|
duration = 0
if self.state != STATE_IDLE:
resp = self._player.query_duration(_FORMAT_TIME)
duration = resp[1] // _NANOSEC_MULT
return duration
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _position(self):
"""Get media position."""
|
position = 0
if self.state != STATE_IDLE:
resp = self._player.query_position(_FORMAT_TIME)
position = resp[1] // _NANOSEC_MULT
return position
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _on_message(self, bus, message):
# pylint: disable=unused-argument """When a message is received from Gstreamer."""
|
if message.type == Gst.MessageType.EOS:
self.stop()
elif message.type == Gst.MessageType.ERROR:
self.stop()
err, _ = message.parse_error()
_LOGGER.error('%s', err)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_previous_node(node):
""" Return the node before this node. """
|
if node.prev_sibling:
return node.prev_sibling
if node.parent:
return get_previous_node(node.parent)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def casperjs_command_kwargs():
""" will construct kwargs for cmd """
|
kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'universal_newlines': True
}
phantom_js_cmd = app_settings['PHANTOMJS_CMD']
if phantom_js_cmd:
path = '{0}:{1}'.format(
os.getenv('PATH', ''), os.path.dirname(phantom_js_cmd)
)
kwargs.update({'env': {'PATH': path}})
return kwargs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def casperjs_capture(stream, url, method=None, width=None, height=None, selector=None, data=None, waitfor=None, size=None, crop=None, render='png', wait=None):
""" Captures web pages using ``casperjs`` """
|
if isinstance(stream, six.string_types):
output = stream
else:
with NamedTemporaryFile('wb+', suffix='.%s' % render, delete=False) as f:
output = f.name
try:
cmd = CASPERJS_CMD + [url, output]
# Extra command-line options
cmd += ['--format=%s' % render]
if method:
cmd += ['--method=%s' % method]
if width:
cmd += ['--width=%s' % width]
if height:
cmd += ['--height=%s' % height]
if selector:
cmd += ['--selector=%s' % selector]
if data:
cmd += ['--data="%s"' % json.dumps(data)]
if waitfor:
cmd += ['--waitfor=%s' % waitfor]
if wait:
cmd += ['--wait=%s' % wait]
logger.debug(cmd)
# Run CasperJS process
proc = subprocess.Popen(cmd, **casperjs_command_kwargs())
stdout = proc.communicate()[0]
process_casperjs_stdout(stdout)
size = parse_size(size)
render = parse_render(render)
if size or (render and render != 'png' and render != 'pdf'):
# pdf isn't an image, therefore we can't postprocess it.
image_postprocess(output, stream, size, crop, render)
else:
if stream != output:
# From file to stream
with open(output, 'rb') as out:
stream.write(out.read())
stream.flush()
finally:
if stream != output:
os.unlink(output)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_casperjs_stdout(stdout):
"""Parse and digest capture script output. """
|
for line in stdout.splitlines():
bits = line.split(':', 1)
if len(bits) < 2:
bits = ('INFO', bits)
level, msg = bits
if level == 'FATAL':
logger.fatal(msg)
raise CaptureError(msg)
elif level == 'ERROR':
logger.error(msg)
else:
logger.info(msg)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_url(request, url):
"""Parse url URL parameter."""
|
try:
validate = URLValidator()
validate(url)
except ValidationError:
if url.startswith('/'):
host = request.get_host()
scheme = 'https' if request.is_secure() else 'http'
url = '{scheme}://{host}{uri}'.format(scheme=scheme,
host=host,
uri=url)
else:
url = request.build_absolute_uri(reverse(url))
return url
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_render(render):
"""Parse render URL parameter. 'png' 'png' 'png' 'jpeg' 'gif' """
|
formats = {
'jpeg': guess_all_extensions('image/jpeg'),
'png': guess_all_extensions('image/png'),
'gif': guess_all_extensions('image/gif'),
'bmp': guess_all_extensions('image/x-ms-bmp'),
'tiff': guess_all_extensions('image/tiff'),
'xbm': guess_all_extensions('image/x-xbitmap'),
'pdf': guess_all_extensions('application/pdf')
}
if not render:
render = 'png'
else:
render = render.lower()
for k, v in formats.items():
if '.%s' % render in v:
render = k
break
else:
render = 'png'
return render
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_size(size_raw):
""" Parse size URL parameter. None (300, 100) None None None """
|
try:
width_str, height_str = size_raw.lower().split('x')
except AttributeError:
size = None
except ValueError:
size = None
else:
try:
width = int(width_str)
assert width > 0
except (ValueError, AssertionError):
width = None
try:
height = int(height_str)
assert height > 0
except (ValueError, AssertionError):
height = None
size = width, height
if not all(size):
size = None
return size
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_absolute_uri(request, url):
""" Allow to override printing url, not necessarily on the same server instance. """
|
if app_settings.get('CAPTURE_ROOT_URL'):
return urljoin(app_settings.get('CAPTURE_ROOT_URL'), url)
return request.build_absolute_uri(url)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render_template(template_name, context, format='png', output=None, using=None, **options):
""" Render a template from django project, and return the file object of the result. """
|
# output stream, as required by casperjs_capture
stream = BytesIO()
out_f = None
# the suffix=.html is a hack for phantomjs which *will*
# complain about not being able to open source file
# unless it has a 'html' extension.
with NamedTemporaryFile(suffix='.html') as render_file:
template_content = render_to_string(
template_name,
context,
using=using,
)
# now, we need to replace all occurences of STATIC_URL
# with the corresponding file://STATIC_ROOT, but only
# if STATIC_URL doesn't contain a public URI (like http(s))
static_url = getattr(settings, 'STATIC_URL', '')
if settings.STATIC_ROOT and\
static_url and not static_url.startswith('http'):
template_content = template_content.replace(
static_url,
'file://%s' % settings.STATIC_ROOT
)
render_file.write(template_content.encode('utf-8'))
# this is so that the temporary file actually gets filled
# with the result.
render_file.seek(0)
casperjs_capture(
stream,
url='file://%s' % render_file.name,
**options
)
# if no output was provided, use NamedTemporaryFile
# (so it is an actual file) and return it (so that
# after function ends, it gets automatically removed)
if not output:
out_f = NamedTemporaryFile()
else:
# if output was provided, write the rendered
# content to it
out_f = open(output, 'wb')
out_f.write(stream.getvalue())
out_f.seek(0)
# return the output if NamedTemporaryFile was used
if not output:
return out_f
else:
# otherwise, just close the file.
out_f.close()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def go(fn, *args, **kwargs):
"""Launch an operation on a thread and get a handle to its future result. hello from background thread main thread goodbye from background thread 'return value' """
|
if not callable(fn):
raise TypeError('go() requires a function, not %r' % (fn,))
result = [None]
error = []
def target():
try:
result[0] = fn(*args, **kwargs)
except Exception:
# Are we in interpreter shutdown?
if sys:
error.extend(sys.exc_info())
t = threading.Thread(target=target)
t.daemon = True
t.start()
def get_result(timeout=10):
t.join(timeout)
if t.is_alive():
raise AssertionError('timed out waiting for %r' % fn)
if error:
reraise(*error)
return result[0]
return get_result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def going(fn, *args, **kwargs):
"""Launch a thread and wait for its result before exiting the code block. 'return value' Or discard the result: If an exception is raised within the context, the result is lost: Traceback (most recent call last):
AssertionError """
|
future = go(fn, *args, **kwargs)
try:
yield future
except:
# We are raising an exception, just try to clean up the future.
exc_info = sys.exc_info()
try:
# Shorter than normal timeout.
future(timeout=1)
except:
log_message = ('\nerror in %s:\n'
% format_call(inspect.currentframe()))
sys.stderr.write(log_message)
traceback.print_exc()
# sys.stderr.write('exc in %s' % format_call(inspect.currentframe()))
reraise(*exc_info)
else:
# Raise exception or discard result.
future(timeout=10)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.