repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
maas/python-libmaas | maas/client/viscera/interfaces.py | get_parent | def get_parent(parent):
"""Get the parent to send to the handler."""
if isinstance(parent, Interface):
return parent.id
elif isinstance(parent, int):
return parent
else:
raise TypeError(
"parent must be an Interface or int, not %s" % (
type(parent).__name__)) | python | def get_parent(parent):
"""Get the parent to send to the handler."""
if isinstance(parent, Interface):
return parent.id
elif isinstance(parent, int):
return parent
else:
raise TypeError(
"parent must be an Interface or int, not %s" % (
type(parent).__name__)) | [
"def",
"get_parent",
"(",
"parent",
")",
":",
"if",
"isinstance",
"(",
"parent",
",",
"Interface",
")",
":",
"return",
"parent",
".",
"id",
"elif",
"isinstance",
"(",
"parent",
",",
"int",
")",
":",
"return",
"parent",
"else",
":",
"raise",
"TypeError",
... | Get the parent to send to the handler. | [
"Get",
"the",
"parent",
"to",
"send",
"to",
"the",
"handler",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/interfaces.py#L45-L54 | train | 28,800 |
maas/python-libmaas | maas/client/viscera/interfaces.py | Interface.save | async def save(self):
"""Save this interface."""
if set(self.tags) != set(self._orig_data['tags']):
self._changed_data['tags'] = ','.join(self.tags)
elif 'tags' in self._changed_data:
del self._changed_data['tags']
orig_params = self._orig_data['params']
if not isinstance(orig_params, dict):
orig_params = {}
params = self.params
if not isinstance(params, dict):
params = {}
self._changed_data.pop('params', None)
self._changed_data.update(
calculate_dict_diff(orig_params, params))
if 'vlan' in self._changed_data and self._changed_data['vlan']:
# Update uses the ID of the VLAN, not the VLAN object.
self._changed_data['vlan'] = self._changed_data['vlan']['id']
if (self._orig_data['vlan'] and
'id' in self._orig_data['vlan'] and
self._changed_data['vlan'] == (
self._orig_data['vlan']['id'])):
# VLAN didn't really change, the object was just set to the
# same VLAN.
del self._changed_data['vlan']
await super(Interface, self).save() | python | async def save(self):
"""Save this interface."""
if set(self.tags) != set(self._orig_data['tags']):
self._changed_data['tags'] = ','.join(self.tags)
elif 'tags' in self._changed_data:
del self._changed_data['tags']
orig_params = self._orig_data['params']
if not isinstance(orig_params, dict):
orig_params = {}
params = self.params
if not isinstance(params, dict):
params = {}
self._changed_data.pop('params', None)
self._changed_data.update(
calculate_dict_diff(orig_params, params))
if 'vlan' in self._changed_data and self._changed_data['vlan']:
# Update uses the ID of the VLAN, not the VLAN object.
self._changed_data['vlan'] = self._changed_data['vlan']['id']
if (self._orig_data['vlan'] and
'id' in self._orig_data['vlan'] and
self._changed_data['vlan'] == (
self._orig_data['vlan']['id'])):
# VLAN didn't really change, the object was just set to the
# same VLAN.
del self._changed_data['vlan']
await super(Interface, self).save() | [
"async",
"def",
"save",
"(",
"self",
")",
":",
"if",
"set",
"(",
"self",
".",
"tags",
")",
"!=",
"set",
"(",
"self",
".",
"_orig_data",
"[",
"'tags'",
"]",
")",
":",
"self",
".",
"_changed_data",
"[",
"'tags'",
"]",
"=",
"','",
".",
"join",
"(",
... | Save this interface. | [
"Save",
"this",
"interface",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/interfaces.py#L127-L152 | train | 28,801 |
maas/python-libmaas | maas/client/viscera/interfaces.py | Interface.disconnect | async def disconnect(self):
"""Disconnect this interface."""
self._data = await self._handler.disconnect(
system_id=self.node.system_id, id=self.id) | python | async def disconnect(self):
"""Disconnect this interface."""
self._data = await self._handler.disconnect(
system_id=self.node.system_id, id=self.id) | [
"async",
"def",
"disconnect",
"(",
"self",
")",
":",
"self",
".",
"_data",
"=",
"await",
"self",
".",
"_handler",
".",
"disconnect",
"(",
"system_id",
"=",
"self",
".",
"node",
".",
"system_id",
",",
"id",
"=",
"self",
".",
"id",
")"
] | Disconnect this interface. | [
"Disconnect",
"this",
"interface",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/interfaces.py#L159-L162 | train | 28,802 |
maas/python-libmaas | maas/client/viscera/interfaces.py | InterfaceLink.delete | async def delete(self):
"""Delete this interface link."""
interface = self._data['interface']
data = await interface._handler.unlink_subnet(
system_id=interface.node.system_id, id=interface.id, _id=self.id)
interface._data['links'] = list(data['links'])
interface._orig_data['links'] = copy.deepcopy(interface._data['links']) | python | async def delete(self):
"""Delete this interface link."""
interface = self._data['interface']
data = await interface._handler.unlink_subnet(
system_id=interface.node.system_id, id=interface.id, _id=self.id)
interface._data['links'] = list(data['links'])
interface._orig_data['links'] = copy.deepcopy(interface._data['links']) | [
"async",
"def",
"delete",
"(",
"self",
")",
":",
"interface",
"=",
"self",
".",
"_data",
"[",
"'interface'",
"]",
"data",
"=",
"await",
"interface",
".",
"_handler",
".",
"unlink_subnet",
"(",
"system_id",
"=",
"interface",
".",
"node",
".",
"system_id",
... | Delete this interface link. | [
"Delete",
"this",
"interface",
"link",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/interfaces.py#L192-L198 | train | 28,803 |
maas/python-libmaas | maas/client/viscera/interfaces.py | InterfaceLink.set_as_default_gateway | async def set_as_default_gateway(self):
"""Set this link as the default gateway for the node."""
interface = self._data['interface']
await interface._handler.set_default_gateway(
system_id=interface.node.system_id, id=interface.id,
link_id=self.id) | python | async def set_as_default_gateway(self):
"""Set this link as the default gateway for the node."""
interface = self._data['interface']
await interface._handler.set_default_gateway(
system_id=interface.node.system_id, id=interface.id,
link_id=self.id) | [
"async",
"def",
"set_as_default_gateway",
"(",
"self",
")",
":",
"interface",
"=",
"self",
".",
"_data",
"[",
"'interface'",
"]",
"await",
"interface",
".",
"_handler",
".",
"set_default_gateway",
"(",
"system_id",
"=",
"interface",
".",
"node",
".",
"system_i... | Set this link as the default gateway for the node. | [
"Set",
"this",
"link",
"as",
"the",
"default",
"gateway",
"for",
"the",
"node",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/interfaces.py#L200-L205 | train | 28,804 |
maas/python-libmaas | maas/client/viscera/interfaces.py | InterfaceLinksType.create | async def create(
cls, interface: Interface, mode: LinkMode,
subnet: Union[Subnet, int] = None, ip_address: str = None,
force: bool = False, default_gateway: bool = False):
"""
Create a link on `Interface` in MAAS.
:param interface: Interface to create the link on.
:type interface: `Interface`
:param mode: Mode of the link.
:type mode: `LinkMode`
:param subnet: The subnet to create the link on (optional).
:type subnet: `Subnet` or `int`
:param ip_address: The IP address to assign to the link.
:type ip_address: `str`
:param force: If True, allows `LinkMode.LINK_UP` to be created even if
other links already exist. Also allows the selection of any
subnet no matter the VLAN the subnet belongs to. Using this option
will cause all other interface links to be deleted (optional).
:type force: `bool`
:param default_gateway: If True, sets the gateway IP address for the
subnet as the default gateway for the node this interface belongs
to. Option can only be used with the `LinkMode.AUTO` and
`LinkMode.STATIC` modes.
:type default_gateway: `bool`
:returns: The created InterfaceLink.
:rtype: `InterfaceLink`
"""
if not isinstance(interface, Interface):
raise TypeError(
"interface must be an Interface, not %s"
% type(interface).__name__)
if not isinstance(mode, LinkMode):
raise TypeError(
"mode must be a LinkMode, not %s"
% type(mode).__name__)
if subnet is not None:
if isinstance(subnet, Subnet):
subnet = subnet.id
elif isinstance(subnet, int):
pass
else:
raise TypeError(
"subnet must be a Subnet or int, not %s"
% type(subnet).__name__)
if mode in [LinkMode.AUTO, LinkMode.STATIC]:
if subnet is None:
raise ValueError('subnet is required for %s' % mode)
if default_gateway and mode not in [LinkMode.AUTO, LinkMode.STATIC]:
raise ValueError('cannot set as default_gateway for %s' % mode)
params = {
'system_id': interface.node.system_id,
'id': interface.id,
'mode': mode.value,
'force': force,
'default_gateway': default_gateway,
}
if subnet is not None:
params['subnet'] = subnet
if ip_address is not None:
params['ip_address'] = ip_address
# The API doesn't return just the link it returns the whole interface.
# Store the link ids before the save to find the addition at the end.
link_ids = {
link.id
for link in interface.links
}
data = await interface._handler.link_subnet(**params)
# Update the links on the interface, except for the newly created link
# the `ManagedCreate` wrapper will add that to the interfaces link data
# automatically.
new_links = {
link['id']: link
for link in data['links']
}
links_diff = list(set(new_links.keys()) - link_ids)
new_link = new_links.pop(links_diff[0])
interface._data['links'] = list(new_links.values())
interface._orig_data['links'] = copy.deepcopy(interface._data['links'])
return cls._object(new_link) | python | async def create(
cls, interface: Interface, mode: LinkMode,
subnet: Union[Subnet, int] = None, ip_address: str = None,
force: bool = False, default_gateway: bool = False):
"""
Create a link on `Interface` in MAAS.
:param interface: Interface to create the link on.
:type interface: `Interface`
:param mode: Mode of the link.
:type mode: `LinkMode`
:param subnet: The subnet to create the link on (optional).
:type subnet: `Subnet` or `int`
:param ip_address: The IP address to assign to the link.
:type ip_address: `str`
:param force: If True, allows `LinkMode.LINK_UP` to be created even if
other links already exist. Also allows the selection of any
subnet no matter the VLAN the subnet belongs to. Using this option
will cause all other interface links to be deleted (optional).
:type force: `bool`
:param default_gateway: If True, sets the gateway IP address for the
subnet as the default gateway for the node this interface belongs
to. Option can only be used with the `LinkMode.AUTO` and
`LinkMode.STATIC` modes.
:type default_gateway: `bool`
:returns: The created InterfaceLink.
:rtype: `InterfaceLink`
"""
if not isinstance(interface, Interface):
raise TypeError(
"interface must be an Interface, not %s"
% type(interface).__name__)
if not isinstance(mode, LinkMode):
raise TypeError(
"mode must be a LinkMode, not %s"
% type(mode).__name__)
if subnet is not None:
if isinstance(subnet, Subnet):
subnet = subnet.id
elif isinstance(subnet, int):
pass
else:
raise TypeError(
"subnet must be a Subnet or int, not %s"
% type(subnet).__name__)
if mode in [LinkMode.AUTO, LinkMode.STATIC]:
if subnet is None:
raise ValueError('subnet is required for %s' % mode)
if default_gateway and mode not in [LinkMode.AUTO, LinkMode.STATIC]:
raise ValueError('cannot set as default_gateway for %s' % mode)
params = {
'system_id': interface.node.system_id,
'id': interface.id,
'mode': mode.value,
'force': force,
'default_gateway': default_gateway,
}
if subnet is not None:
params['subnet'] = subnet
if ip_address is not None:
params['ip_address'] = ip_address
# The API doesn't return just the link it returns the whole interface.
# Store the link ids before the save to find the addition at the end.
link_ids = {
link.id
for link in interface.links
}
data = await interface._handler.link_subnet(**params)
# Update the links on the interface, except for the newly created link
# the `ManagedCreate` wrapper will add that to the interfaces link data
# automatically.
new_links = {
link['id']: link
for link in data['links']
}
links_diff = list(set(new_links.keys()) - link_ids)
new_link = new_links.pop(links_diff[0])
interface._data['links'] = list(new_links.values())
interface._orig_data['links'] = copy.deepcopy(interface._data['links'])
return cls._object(new_link) | [
"async",
"def",
"create",
"(",
"cls",
",",
"interface",
":",
"Interface",
",",
"mode",
":",
"LinkMode",
",",
"subnet",
":",
"Union",
"[",
"Subnet",
",",
"int",
"]",
"=",
"None",
",",
"ip_address",
":",
"str",
"=",
"None",
",",
"force",
":",
"bool",
... | Create a link on `Interface` in MAAS.
:param interface: Interface to create the link on.
:type interface: `Interface`
:param mode: Mode of the link.
:type mode: `LinkMode`
:param subnet: The subnet to create the link on (optional).
:type subnet: `Subnet` or `int`
:param ip_address: The IP address to assign to the link.
:type ip_address: `str`
:param force: If True, allows `LinkMode.LINK_UP` to be created even if
other links already exist. Also allows the selection of any
subnet no matter the VLAN the subnet belongs to. Using this option
will cause all other interface links to be deleted (optional).
:type force: `bool`
:param default_gateway: If True, sets the gateway IP address for the
subnet as the default gateway for the node this interface belongs
to. Option can only be used with the `LinkMode.AUTO` and
`LinkMode.STATIC` modes.
:type default_gateway: `bool`
:returns: The created InterfaceLink.
:rtype: `InterfaceLink` | [
"Create",
"a",
"link",
"on",
"Interface",
"in",
"MAAS",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/interfaces.py#L211-L291 | train | 28,805 |
maas/python-libmaas | maas/client/viscera/bcache_cache_sets.py | BcacheCacheSetsType.create | async def create(
cls, node: Union[Node, str],
cache_device: Union[BlockDevice, Partition]):
"""
Create a BcacheCacheSet on a Node.
:param node: Node to create the interface on.
:type node: `Node` or `str`
:param cache_device: Block device or partition to create
the cache set on.
:type cache_device: `BlockDevice` or `Partition`
"""
params = {}
if isinstance(node, str):
params['system_id'] = node
elif isinstance(node, Node):
params['system_id'] = node.system_id
else:
raise TypeError(
'node must be a Node or str, not %s' % (
type(node).__name__))
if isinstance(cache_device, BlockDevice):
params['cache_device'] = cache_device.id
elif isinstance(cache_device, Partition):
params['cache_partition'] = cache_device.id
else:
raise TypeError(
'cache_device must be a BlockDevice or Partition, not %s' % (
type(cache_device).__name__))
return cls._object(await cls._handler.create(**params)) | python | async def create(
cls, node: Union[Node, str],
cache_device: Union[BlockDevice, Partition]):
"""
Create a BcacheCacheSet on a Node.
:param node: Node to create the interface on.
:type node: `Node` or `str`
:param cache_device: Block device or partition to create
the cache set on.
:type cache_device: `BlockDevice` or `Partition`
"""
params = {}
if isinstance(node, str):
params['system_id'] = node
elif isinstance(node, Node):
params['system_id'] = node.system_id
else:
raise TypeError(
'node must be a Node or str, not %s' % (
type(node).__name__))
if isinstance(cache_device, BlockDevice):
params['cache_device'] = cache_device.id
elif isinstance(cache_device, Partition):
params['cache_partition'] = cache_device.id
else:
raise TypeError(
'cache_device must be a BlockDevice or Partition, not %s' % (
type(cache_device).__name__))
return cls._object(await cls._handler.create(**params)) | [
"async",
"def",
"create",
"(",
"cls",
",",
"node",
":",
"Union",
"[",
"Node",
",",
"str",
"]",
",",
"cache_device",
":",
"Union",
"[",
"BlockDevice",
",",
"Partition",
"]",
")",
":",
"params",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"node",
",",
"s... | Create a BcacheCacheSet on a Node.
:param node: Node to create the interface on.
:type node: `Node` or `str`
:param cache_device: Block device or partition to create
the cache set on.
:type cache_device: `BlockDevice` or `Partition` | [
"Create",
"a",
"BcacheCacheSet",
"on",
"a",
"Node",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/bcache_cache_sets.py#L73-L103 | train | 28,806 |
maas/python-libmaas | maas/client/flesh/tabular.py | Table.render | def render(self, target, data):
"""Render the table."""
rows = self.get_rows(target, data)
rows = self._filter_rows(rows)
renderer = getattr(self, "_render_%s" % target.name, None)
if renderer is None:
raise ValueError(
"Cannot render %r for %s." % (self.value, target))
else:
return renderer(rows) | python | def render(self, target, data):
"""Render the table."""
rows = self.get_rows(target, data)
rows = self._filter_rows(rows)
renderer = getattr(self, "_render_%s" % target.name, None)
if renderer is None:
raise ValueError(
"Cannot render %r for %s." % (self.value, target))
else:
return renderer(rows) | [
"def",
"render",
"(",
"self",
",",
"target",
",",
"data",
")",
":",
"rows",
"=",
"self",
".",
"get_rows",
"(",
"target",
",",
"data",
")",
"rows",
"=",
"self",
".",
"_filter_rows",
"(",
"rows",
")",
"renderer",
"=",
"getattr",
"(",
"self",
",",
"\"... | Render the table. | [
"Render",
"the",
"table",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/flesh/tabular.py#L75-L84 | train | 28,807 |
maas/python-libmaas | maas/client/viscera/fabrics.py | FabricType.get_default | async def get_default(cls):
"""
Get the 'default' Fabric for the MAAS.
"""
data = await cls._handler.read(id=cls._default_fabric_id)
return cls(data) | python | async def get_default(cls):
"""
Get the 'default' Fabric for the MAAS.
"""
data = await cls._handler.read(id=cls._default_fabric_id)
return cls(data) | [
"async",
"def",
"get_default",
"(",
"cls",
")",
":",
"data",
"=",
"await",
"cls",
".",
"_handler",
".",
"read",
"(",
"id",
"=",
"cls",
".",
"_default_fabric_id",
")",
"return",
"cls",
"(",
"data",
")"
] | Get the 'default' Fabric for the MAAS. | [
"Get",
"the",
"default",
"Fabric",
"for",
"the",
"MAAS",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/fabrics.py#L60-L65 | train | 28,808 |
maas/python-libmaas | maas/client/viscera/fabrics.py | Fabric.delete | async def delete(self):
"""Delete this Fabric."""
if self.id == self._origin.Fabric._default_fabric_id:
raise CannotDelete("Default fabric cannot be deleted.")
await self._handler.delete(id=self.id) | python | async def delete(self):
"""Delete this Fabric."""
if self.id == self._origin.Fabric._default_fabric_id:
raise CannotDelete("Default fabric cannot be deleted.")
await self._handler.delete(id=self.id) | [
"async",
"def",
"delete",
"(",
"self",
")",
":",
"if",
"self",
".",
"id",
"==",
"self",
".",
"_origin",
".",
"Fabric",
".",
"_default_fabric_id",
":",
"raise",
"CannotDelete",
"(",
"\"Default fabric cannot be deleted.\"",
")",
"await",
"self",
".",
"_handler",... | Delete this Fabric. | [
"Delete",
"this",
"Fabric",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/fabrics.py#L82-L86 | train | 28,809 |
maas/python-libmaas | maas/client/viscera/__init__.py | dir_class | def dir_class(cls):
"""Return a list of names available on `cls`.
Eliminates names that bind to an `ObjectMethod` without a corresponding
class method; see `ObjectMethod`.
"""
# Class attributes (including methods).
for name, value in vars_class(cls).items():
if isinstance(value, ObjectMethod):
if value.has_classmethod:
yield name
elif isinstance(value, Disabled):
pass # Hide this; disabled.
else:
yield name
# Metaclass attributes.
for name, value in vars_class(type(cls)).items():
if name == "mro":
pass # Hide this; not interesting.
elif isinstance(value, Disabled):
pass # Hide this; disabled.
else:
yield name | python | def dir_class(cls):
"""Return a list of names available on `cls`.
Eliminates names that bind to an `ObjectMethod` without a corresponding
class method; see `ObjectMethod`.
"""
# Class attributes (including methods).
for name, value in vars_class(cls).items():
if isinstance(value, ObjectMethod):
if value.has_classmethod:
yield name
elif isinstance(value, Disabled):
pass # Hide this; disabled.
else:
yield name
# Metaclass attributes.
for name, value in vars_class(type(cls)).items():
if name == "mro":
pass # Hide this; not interesting.
elif isinstance(value, Disabled):
pass # Hide this; disabled.
else:
yield name | [
"def",
"dir_class",
"(",
"cls",
")",
":",
"# Class attributes (including methods).",
"for",
"name",
",",
"value",
"in",
"vars_class",
"(",
"cls",
")",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"ObjectMethod",
")",
":",
"if",
"valu... | Return a list of names available on `cls`.
Eliminates names that bind to an `ObjectMethod` without a corresponding
class method; see `ObjectMethod`. | [
"Return",
"a",
"list",
"of",
"names",
"available",
"on",
"cls",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L71-L93 | train | 28,810 |
maas/python-libmaas | maas/client/viscera/__init__.py | dir_instance | def dir_instance(inst):
"""Return a list of names available on `inst`.
Eliminates names that bind to an `ObjectMethod` without a corresponding
instance method; see `ObjectMethod`.
"""
# Skip instance attributes; __slots__ is automatically defined, and
# descriptors are used to define attributes. Instead, go straight to class
# attributes (including methods).
for name, value in vars_class(type(inst)).items():
if isinstance(value, ObjectMethod):
if value.has_instancemethod:
yield name
elif isinstance(value, Disabled):
pass # Hide this; disabled.
elif isinstance(value, (classmethod, staticmethod)):
pass # Hide this; not interesting here.
else:
yield name | python | def dir_instance(inst):
"""Return a list of names available on `inst`.
Eliminates names that bind to an `ObjectMethod` without a corresponding
instance method; see `ObjectMethod`.
"""
# Skip instance attributes; __slots__ is automatically defined, and
# descriptors are used to define attributes. Instead, go straight to class
# attributes (including methods).
for name, value in vars_class(type(inst)).items():
if isinstance(value, ObjectMethod):
if value.has_instancemethod:
yield name
elif isinstance(value, Disabled):
pass # Hide this; disabled.
elif isinstance(value, (classmethod, staticmethod)):
pass # Hide this; not interesting here.
else:
yield name | [
"def",
"dir_instance",
"(",
"inst",
")",
":",
"# Skip instance attributes; __slots__ is automatically defined, and",
"# descriptors are used to define attributes. Instead, go straight to class",
"# attributes (including methods).",
"for",
"name",
",",
"value",
"in",
"vars_class",
"(",
... | Return a list of names available on `inst`.
Eliminates names that bind to an `ObjectMethod` without a corresponding
instance method; see `ObjectMethod`. | [
"Return",
"a",
"list",
"of",
"names",
"available",
"on",
"inst",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L96-L114 | train | 28,811 |
maas/python-libmaas | maas/client/viscera/__init__.py | is_pk_descriptor | def is_pk_descriptor(descriptor, include_alt=False):
"""Return true if `descriptor` is a primary key."""
if descriptor.pk is True or type(descriptor.pk) is int:
return True
if include_alt:
return descriptor.alt_pk is True or type(descriptor.alt_pk) is int
else:
return False | python | def is_pk_descriptor(descriptor, include_alt=False):
"""Return true if `descriptor` is a primary key."""
if descriptor.pk is True or type(descriptor.pk) is int:
return True
if include_alt:
return descriptor.alt_pk is True or type(descriptor.alt_pk) is int
else:
return False | [
"def",
"is_pk_descriptor",
"(",
"descriptor",
",",
"include_alt",
"=",
"False",
")",
":",
"if",
"descriptor",
".",
"pk",
"is",
"True",
"or",
"type",
"(",
"descriptor",
".",
"pk",
")",
"is",
"int",
":",
"return",
"True",
"if",
"include_alt",
":",
"return"... | Return true if `descriptor` is a primary key. | [
"Return",
"true",
"if",
"descriptor",
"is",
"a",
"primary",
"key",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L183-L190 | train | 28,812 |
maas/python-libmaas | maas/client/viscera/__init__.py | get_pk_descriptors | def get_pk_descriptors(cls):
"""Return tuple of tuples with attribute name and descriptor on the
`cls` that is defined as the primary keys."""
pk_fields = {
name: descriptor
for name, descriptor in vars_class(cls).items()
if isinstance(descriptor, ObjectField) and is_pk_descriptor(descriptor)
}
alt_pk_fields = defaultdict(list)
for name, descriptor in vars_class(cls).items():
if isinstance(descriptor, ObjectField):
if descriptor.alt_pk is True:
alt_pk_fields[0].append((name, descriptor))
elif type(descriptor.alt_pk) is int:
alt_pk_fields[descriptor.alt_pk].append((name, descriptor))
if len(pk_fields) == 1:
return ((pk_fields.popitem(),), (alt_pk_fields[0],))
elif len(pk_fields) > 1:
unique_pk_fields = {
name: descriptor
for name, descriptor in pk_fields.items()
if descriptor.pk is True
}
if unique_pk_fields:
raise AttributeError(
"more than one field is marked as unique primary key: %s" % (
', '.join(sorted(pk_fields))))
pk_descriptors = tuple(sorted((
(name, descriptor)
for name, descriptor in pk_fields.items()
), key=lambda item: item[1].pk))
alt_pk_descriptors = tuple(
alt_pk_fields[idx]
for idx, (name, descriptor) in enumerate(pk_descriptors)
)
return pk_descriptors, alt_pk_descriptors
else:
return tuple(), tuple() | python | def get_pk_descriptors(cls):
"""Return tuple of tuples with attribute name and descriptor on the
`cls` that is defined as the primary keys."""
pk_fields = {
name: descriptor
for name, descriptor in vars_class(cls).items()
if isinstance(descriptor, ObjectField) and is_pk_descriptor(descriptor)
}
alt_pk_fields = defaultdict(list)
for name, descriptor in vars_class(cls).items():
if isinstance(descriptor, ObjectField):
if descriptor.alt_pk is True:
alt_pk_fields[0].append((name, descriptor))
elif type(descriptor.alt_pk) is int:
alt_pk_fields[descriptor.alt_pk].append((name, descriptor))
if len(pk_fields) == 1:
return ((pk_fields.popitem(),), (alt_pk_fields[0],))
elif len(pk_fields) > 1:
unique_pk_fields = {
name: descriptor
for name, descriptor in pk_fields.items()
if descriptor.pk is True
}
if unique_pk_fields:
raise AttributeError(
"more than one field is marked as unique primary key: %s" % (
', '.join(sorted(pk_fields))))
pk_descriptors = tuple(sorted((
(name, descriptor)
for name, descriptor in pk_fields.items()
), key=lambda item: item[1].pk))
alt_pk_descriptors = tuple(
alt_pk_fields[idx]
for idx, (name, descriptor) in enumerate(pk_descriptors)
)
return pk_descriptors, alt_pk_descriptors
else:
return tuple(), tuple() | [
"def",
"get_pk_descriptors",
"(",
"cls",
")",
":",
"pk_fields",
"=",
"{",
"name",
":",
"descriptor",
"for",
"name",
",",
"descriptor",
"in",
"vars_class",
"(",
"cls",
")",
".",
"items",
"(",
")",
"if",
"isinstance",
"(",
"descriptor",
",",
"ObjectField",
... | Return tuple of tuples with attribute name and descriptor on the
`cls` that is defined as the primary keys. | [
"Return",
"tuple",
"of",
"tuples",
"with",
"attribute",
"name",
"and",
"descriptor",
"on",
"the",
"cls",
"that",
"is",
"defined",
"as",
"the",
"primary",
"keys",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L193-L230 | train | 28,813 |
maas/python-libmaas | maas/client/viscera/__init__.py | ManagedCreate | def ManagedCreate(super_cls):
"""Dynamically creates a `create` method for a `ObjectSet.Managed` class
that calls the `super_cls.create`.
The first positional argument that is passed to the `super_cls.create` is
the `_manager` that was set using `ObjectSet.Managed`. The created object
is added to the `ObjectSet.Managed` also placed in the correct
`_data[field]` and `_orig_data[field]` for the `_manager` object.
"""
@wraps(super_cls.create)
async def _create(self, *args, **kwargs):
cls = type(self)
manager = getattr(cls, '_manager', None)
manager_field = getattr(cls, '_manager_field', None)
if manager is not None and manager_field is not None:
args = (manager,) + args
new_obj = await super_cls.create(*args, **kwargs)
self._items = self._items + [new_obj]
manager._data[manager_field.name] = (
manager._data[manager_field.name] +
[new_obj._data])
manager._orig_data[manager_field.name] = (
manager._orig_data[manager_field.name] +
[new_obj._data])
return new_obj
else:
raise AttributeError(
'create is not supported; %s is not a managed set' % (
super_cls.__name__))
return _create | python | def ManagedCreate(super_cls):
"""Dynamically creates a `create` method for a `ObjectSet.Managed` class
that calls the `super_cls.create`.
The first positional argument that is passed to the `super_cls.create` is
the `_manager` that was set using `ObjectSet.Managed`. The created object
is added to the `ObjectSet.Managed` also placed in the correct
`_data[field]` and `_orig_data[field]` for the `_manager` object.
"""
@wraps(super_cls.create)
async def _create(self, *args, **kwargs):
cls = type(self)
manager = getattr(cls, '_manager', None)
manager_field = getattr(cls, '_manager_field', None)
if manager is not None and manager_field is not None:
args = (manager,) + args
new_obj = await super_cls.create(*args, **kwargs)
self._items = self._items + [new_obj]
manager._data[manager_field.name] = (
manager._data[manager_field.name] +
[new_obj._data])
manager._orig_data[manager_field.name] = (
manager._orig_data[manager_field.name] +
[new_obj._data])
return new_obj
else:
raise AttributeError(
'create is not supported; %s is not a managed set' % (
super_cls.__name__))
return _create | [
"def",
"ManagedCreate",
"(",
"super_cls",
")",
":",
"@",
"wraps",
"(",
"super_cls",
".",
"create",
")",
"async",
"def",
"_create",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cls",
"=",
"type",
"(",
"self",
")",
"manager",
"=... | Dynamically creates a `create` method for a `ObjectSet.Managed` class
that calls the `super_cls.create`.
The first positional argument that is passed to the `super_cls.create` is
the `_manager` that was set using `ObjectSet.Managed`. The created object
is added to the `ObjectSet.Managed` also placed in the correct
`_data[field]` and `_orig_data[field]` for the `_manager` object. | [
"Dynamically",
"creates",
"a",
"create",
"method",
"for",
"a",
"ObjectSet",
".",
"Managed",
"class",
"that",
"calls",
"the",
"super_cls",
".",
"create",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L488-L518 | train | 28,814 |
maas/python-libmaas | maas/client/viscera/__init__.py | mapping_of | def mapping_of(cls):
"""Expects a mapping from some key to data for `cls` instances."""
def mapper(data):
if not isinstance(data, Mapping):
raise TypeError(
"data must be a mapping, not %s"
% type(data).__name__)
return {
key: cls(value)
for key, value in data.items()
}
return mapper | python | def mapping_of(cls):
"""Expects a mapping from some key to data for `cls` instances."""
def mapper(data):
if not isinstance(data, Mapping):
raise TypeError(
"data must be a mapping, not %s"
% type(data).__name__)
return {
key: cls(value)
for key, value in data.items()
}
return mapper | [
"def",
"mapping_of",
"(",
"cls",
")",
":",
"def",
"mapper",
"(",
"data",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"Mapping",
")",
":",
"raise",
"TypeError",
"(",
"\"data must be a mapping, not %s\"",
"%",
"type",
"(",
"data",
")",
".",
"__n... | Expects a mapping from some key to data for `cls` instances. | [
"Expects",
"a",
"mapping",
"from",
"some",
"key",
"to",
"data",
"for",
"cls",
"instances",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L1069-L1080 | train | 28,815 |
maas/python-libmaas | maas/client/viscera/__init__.py | find_objects | def find_objects(modules):
"""Find subclasses of `Object` and `ObjectSet` in the given modules.
:param modules: The full *names* of modules to include. These modules MUST
have been imported in advance.
"""
return {
subclass.__name__: subclass
for subclass in chain(
get_all_subclasses(Object),
get_all_subclasses(ObjectSet),
)
if subclass.__module__ in modules
} | python | def find_objects(modules):
"""Find subclasses of `Object` and `ObjectSet` in the given modules.
:param modules: The full *names* of modules to include. These modules MUST
have been imported in advance.
"""
return {
subclass.__name__: subclass
for subclass in chain(
get_all_subclasses(Object),
get_all_subclasses(ObjectSet),
)
if subclass.__module__ in modules
} | [
"def",
"find_objects",
"(",
"modules",
")",
":",
"return",
"{",
"subclass",
".",
"__name__",
":",
"subclass",
"for",
"subclass",
"in",
"chain",
"(",
"get_all_subclasses",
"(",
"Object",
")",
",",
"get_all_subclasses",
"(",
"ObjectSet",
")",
",",
")",
"if",
... | Find subclasses of `Object` and `ObjectSet` in the given modules.
:param modules: The full *names* of modules to include. These modules MUST
have been imported in advance. | [
"Find",
"subclasses",
"of",
"Object",
"and",
"ObjectSet",
"in",
"the",
"given",
"modules",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L1089-L1102 | train | 28,816 |
maas/python-libmaas | maas/client/viscera/__init__.py | ObjectType.bind | def bind(cls, origin, handler, *, name=None):
"""Bind this object to the given origin and handler.
:param origin: An instance of `Origin`.
:param handler: An instance of `bones.HandlerAPI`.
:return: A subclass of this class.
"""
name = cls.__name__ if name is None else name
attrs = {
"_origin": origin, "_handler": handler,
"__module__": "origin", # Could do better?
}
return type(name, (cls,), attrs) | python | def bind(cls, origin, handler, *, name=None):
"""Bind this object to the given origin and handler.
:param origin: An instance of `Origin`.
:param handler: An instance of `bones.HandlerAPI`.
:return: A subclass of this class.
"""
name = cls.__name__ if name is None else name
attrs = {
"_origin": origin, "_handler": handler,
"__module__": "origin", # Could do better?
}
return type(name, (cls,), attrs) | [
"def",
"bind",
"(",
"cls",
",",
"origin",
",",
"handler",
",",
"*",
",",
"name",
"=",
"None",
")",
":",
"name",
"=",
"cls",
".",
"__name__",
"if",
"name",
"is",
"None",
"else",
"name",
"attrs",
"=",
"{",
"\"_origin\"",
":",
"origin",
",",
"\"_handl... | Bind this object to the given origin and handler.
:param origin: An instance of `Origin`.
:param handler: An instance of `bones.HandlerAPI`.
:return: A subclass of this class. | [
"Bind",
"this",
"object",
"to",
"the",
"given",
"origin",
"and",
"handler",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L157-L169 | train | 28,817 |
maas/python-libmaas | maas/client/viscera/__init__.py | Object.refresh | async def refresh(self):
"""Refresh the object from MAAS."""
cls = type(self)
if hasattr(cls, 'read'):
descriptors, alt_descriptors = get_pk_descriptors(cls)
if len(descriptors) == 1:
try:
obj = await cls.read(getattr(self, descriptors[0][0]))
except AttributeError:
found = False
for alt_name, _ in alt_descriptors[0]:
if hasattr(self, alt_name):
obj = await cls.read(getattr(self, alt_name))
found = True
break
if not found:
raise
elif len(descriptors) > 1:
args = []
for idx, (name, _) in enumerate(descriptors):
try:
args.append(getattr(self, name))
except AttributeError:
found = False
for alt_name, _ in alt_descriptors[idx]:
if hasattr(self, alt_name):
args.append(getattr(self, alt_name))
found = True
break
if not found:
raise
obj = await cls.read(*args)
else:
raise AttributeError(
"unable to perform 'refresh' no primary key "
"fields defined.")
if type(obj) is cls:
self._data = obj._data
self._loaded = True
else:
raise TypeError(
"result of '%s.read' must be '%s', not '%s'" % (
cls.__name__, cls.__name__, type(obj).__name__))
else:
raise AttributeError(
"'%s' object doesn't support refresh." % cls.__name__) | python | async def refresh(self):
"""Refresh the object from MAAS."""
cls = type(self)
if hasattr(cls, 'read'):
descriptors, alt_descriptors = get_pk_descriptors(cls)
if len(descriptors) == 1:
try:
obj = await cls.read(getattr(self, descriptors[0][0]))
except AttributeError:
found = False
for alt_name, _ in alt_descriptors[0]:
if hasattr(self, alt_name):
obj = await cls.read(getattr(self, alt_name))
found = True
break
if not found:
raise
elif len(descriptors) > 1:
args = []
for idx, (name, _) in enumerate(descriptors):
try:
args.append(getattr(self, name))
except AttributeError:
found = False
for alt_name, _ in alt_descriptors[idx]:
if hasattr(self, alt_name):
args.append(getattr(self, alt_name))
found = True
break
if not found:
raise
obj = await cls.read(*args)
else:
raise AttributeError(
"unable to perform 'refresh' no primary key "
"fields defined.")
if type(obj) is cls:
self._data = obj._data
self._loaded = True
else:
raise TypeError(
"result of '%s.read' must be '%s', not '%s'" % (
cls.__name__, cls.__name__, type(obj).__name__))
else:
raise AttributeError(
"'%s' object doesn't support refresh." % cls.__name__) | [
"async",
"def",
"refresh",
"(",
"self",
")",
":",
"cls",
"=",
"type",
"(",
"self",
")",
"if",
"hasattr",
"(",
"cls",
",",
"'read'",
")",
":",
"descriptors",
",",
"alt_descriptors",
"=",
"get_pk_descriptors",
"(",
"cls",
")",
"if",
"len",
"(",
"descript... | Refresh the object from MAAS. | [
"Refresh",
"the",
"object",
"from",
"MAAS",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L426-L471 | train | 28,818 |
maas/python-libmaas | maas/client/viscera/__init__.py | Object.save | async def save(self):
"""Save the object in MAAS."""
if hasattr(self._handler, "update"):
if self._changed_data:
update_data = dict(self._changed_data)
update_data.update({
key: self._orig_data[key]
for key in self._handler.params
})
self._data = await self._handler.update(**update_data)
else:
raise AttributeError(
"'%s' object doesn't support save." % type(self).__name__) | python | async def save(self):
"""Save the object in MAAS."""
if hasattr(self._handler, "update"):
if self._changed_data:
update_data = dict(self._changed_data)
update_data.update({
key: self._orig_data[key]
for key in self._handler.params
})
self._data = await self._handler.update(**update_data)
else:
raise AttributeError(
"'%s' object doesn't support save." % type(self).__name__) | [
"async",
"def",
"save",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
".",
"_handler",
",",
"\"update\"",
")",
":",
"if",
"self",
".",
"_changed_data",
":",
"update_data",
"=",
"dict",
"(",
"self",
".",
"_changed_data",
")",
"update_data",
".",
... | Save the object in MAAS. | [
"Save",
"the",
"object",
"in",
"MAAS",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L473-L485 | train | 28,819 |
maas/python-libmaas | maas/client/viscera/__init__.py | ObjectSet.Managed | def Managed(cls, manager, field, items):
"""Create a custom `ObjectSet` that is managed by a related `Object.`
:param manager: The manager of the `ObjectSet`. This is the `Object`
that manages this set of objects.
:param field: The field on the `manager` that created this managed
`ObjectSet`.
:param items: The items in the `ObjectSet`.
"""
attrs = {
"_manager": manager,
"_manager_field": field,
}
if hasattr(cls, "create"):
attrs['create'] = ManagedCreate(cls)
cls = type(
"%s.Managed#%s" % (
cls.__name__, manager.__class__.__name__), (cls,), attrs)
return cls(items) | python | def Managed(cls, manager, field, items):
"""Create a custom `ObjectSet` that is managed by a related `Object.`
:param manager: The manager of the `ObjectSet`. This is the `Object`
that manages this set of objects.
:param field: The field on the `manager` that created this managed
`ObjectSet`.
:param items: The items in the `ObjectSet`.
"""
attrs = {
"_manager": manager,
"_manager_field": field,
}
if hasattr(cls, "create"):
attrs['create'] = ManagedCreate(cls)
cls = type(
"%s.Managed#%s" % (
cls.__name__, manager.__class__.__name__), (cls,), attrs)
return cls(items) | [
"def",
"Managed",
"(",
"cls",
",",
"manager",
",",
"field",
",",
"items",
")",
":",
"attrs",
"=",
"{",
"\"_manager\"",
":",
"manager",
",",
"\"_manager_field\"",
":",
"field",
",",
"}",
"if",
"hasattr",
"(",
"cls",
",",
"\"create\"",
")",
":",
"attrs",... | Create a custom `ObjectSet` that is managed by a related `Object.`
:param manager: The manager of the `ObjectSet`. This is the `Object`
that manages this set of objects.
:param field: The field on the `manager` that created this managed
`ObjectSet`.
:param items: The items in the `ObjectSet`. | [
"Create",
"a",
"custom",
"ObjectSet",
"that",
"is",
"managed",
"by",
"a",
"related",
"Object",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L529-L547 | train | 28,820 |
maas/python-libmaas | maas/client/viscera/__init__.py | ObjectField.Checked | def Checked(cls, name, datum_to_value=None, value_to_datum=None, **other):
"""Create a custom `ObjectField` that validates values and datums.
:param name: The name of the field. This is the name that's used to
store the datum in the MAAS-side data dictionary.
:param datum_to_value: A callable taking a single ``datum`` argument,
passed positionally. This callable should convert the datum to a
Python-side value, and/or raise an exception for invalid datums.
:param value_to_datum: A callable taking a single ``value`` argument,
passed positionally. This callable should convert the value to a
MAAS-side datum, and/or raise an exception for invalid values.
:param other: Additional arguments to pass to the default
`ObjectField` constructor.
"""
attrs = {}
if datum_to_value is not None:
@wraps(datum_to_value)
def datum_to_value_method(instance, datum):
return datum_to_value(datum)
attrs["datum_to_value"] = staticmethod(datum_to_value_method)
if value_to_datum is not None:
@wraps(value_to_datum)
def value_to_datum_method(instance, value):
return value_to_datum(value)
attrs["value_to_datum"] = staticmethod(value_to_datum_method)
cls = type("%s.Checked#%s" % (cls.__name__, name), (cls,), attrs)
return cls(name, **other) | python | def Checked(cls, name, datum_to_value=None, value_to_datum=None, **other):
"""Create a custom `ObjectField` that validates values and datums.
:param name: The name of the field. This is the name that's used to
store the datum in the MAAS-side data dictionary.
:param datum_to_value: A callable taking a single ``datum`` argument,
passed positionally. This callable should convert the datum to a
Python-side value, and/or raise an exception for invalid datums.
:param value_to_datum: A callable taking a single ``value`` argument,
passed positionally. This callable should convert the value to a
MAAS-side datum, and/or raise an exception for invalid values.
:param other: Additional arguments to pass to the default
`ObjectField` constructor.
"""
attrs = {}
if datum_to_value is not None:
@wraps(datum_to_value)
def datum_to_value_method(instance, datum):
return datum_to_value(datum)
attrs["datum_to_value"] = staticmethod(datum_to_value_method)
if value_to_datum is not None:
@wraps(value_to_datum)
def value_to_datum_method(instance, value):
return value_to_datum(value)
attrs["value_to_datum"] = staticmethod(value_to_datum_method)
cls = type("%s.Checked#%s" % (cls.__name__, name), (cls,), attrs)
return cls(name, **other) | [
"def",
"Checked",
"(",
"cls",
",",
"name",
",",
"datum_to_value",
"=",
"None",
",",
"value_to_datum",
"=",
"None",
",",
"*",
"*",
"other",
")",
":",
"attrs",
"=",
"{",
"}",
"if",
"datum_to_value",
"is",
"not",
"None",
":",
"@",
"wraps",
"(",
"datum_t... | Create a custom `ObjectField` that validates values and datums.
:param name: The name of the field. This is the name that's used to
store the datum in the MAAS-side data dictionary.
:param datum_to_value: A callable taking a single ``datum`` argument,
passed positionally. This callable should convert the datum to a
Python-side value, and/or raise an exception for invalid datums.
:param value_to_datum: A callable taking a single ``value`` argument,
passed positionally. This callable should convert the value to a
MAAS-side datum, and/or raise an exception for invalid values.
:param other: Additional arguments to pass to the default
`ObjectField` constructor. | [
"Create",
"a",
"custom",
"ObjectField",
"that",
"validates",
"values",
"and",
"datums",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L648-L674 | train | 28,821 |
maas/python-libmaas | maas/client/viscera/__init__.py | ObjectFieldRelated.value_to_datum | def value_to_datum(self, instance, value):
"""Convert a given Python-side value to a MAAS-side datum.
:param instance: The `Object` instance on which this field is
currently operating. This method should treat it as read-only, for
example to perform validation with regards to other fields.
:param datum: The Python-side value to validate and convert into a
MAAS-side datum.
:return: A datum derived from the given value.
"""
if value is None:
return None
bound = getattr(instance._origin, self.cls)
if type(value) is bound:
if self.use_data_setter:
# Using data setter, so just return the data for the object.
return value._data
else:
# Use the primary keys to set the value.
descriptors, alt_descriptors = get_pk_descriptors(bound)
if len(descriptors) == 1:
return getattr(value, descriptors[0][0])
elif len(descriptors) > 1:
return tuple(
getattr(value, name)
for name, _ in descriptors
)
else:
raise AttributeError(
"unable to perform set object no primary key "
"fields defined for %s" % self.cls)
else:
raise TypeError(
"must be %s, not %s" % (self.cls, type(value).__name__)) | python | def value_to_datum(self, instance, value):
"""Convert a given Python-side value to a MAAS-side datum.
:param instance: The `Object` instance on which this field is
currently operating. This method should treat it as read-only, for
example to perform validation with regards to other fields.
:param datum: The Python-side value to validate and convert into a
MAAS-side datum.
:return: A datum derived from the given value.
"""
if value is None:
return None
bound = getattr(instance._origin, self.cls)
if type(value) is bound:
if self.use_data_setter:
# Using data setter, so just return the data for the object.
return value._data
else:
# Use the primary keys to set the value.
descriptors, alt_descriptors = get_pk_descriptors(bound)
if len(descriptors) == 1:
return getattr(value, descriptors[0][0])
elif len(descriptors) > 1:
return tuple(
getattr(value, name)
for name, _ in descriptors
)
else:
raise AttributeError(
"unable to perform set object no primary key "
"fields defined for %s" % self.cls)
else:
raise TypeError(
"must be %s, not %s" % (self.cls, type(value).__name__)) | [
"def",
"value_to_datum",
"(",
"self",
",",
"instance",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"None",
"bound",
"=",
"getattr",
"(",
"instance",
".",
"_origin",
",",
"self",
".",
"cls",
")",
"if",
"type",
"(",
"value",
")"... | Convert a given Python-side value to a MAAS-side datum.
:param instance: The `Object` instance on which this field is
currently operating. This method should treat it as read-only, for
example to perform validation with regards to other fields.
:param datum: The Python-side value to validate and convert into a
MAAS-side datum.
:return: A datum derived from the given value. | [
"Convert",
"a",
"given",
"Python",
"-",
"side",
"value",
"to",
"a",
"MAAS",
"-",
"side",
"datum",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L855-L888 | train | 28,822 |
maas/python-libmaas | maas/client/viscera/__init__.py | OriginType.fromURL | async def fromURL(cls, url, *, credentials=None, insecure=False):
"""Return an `Origin` for a given MAAS instance."""
session = await bones.SessionAPI.fromURL(
url, credentials=credentials, insecure=insecure)
return cls(session) | python | async def fromURL(cls, url, *, credentials=None, insecure=False):
"""Return an `Origin` for a given MAAS instance."""
session = await bones.SessionAPI.fromURL(
url, credentials=credentials, insecure=insecure)
return cls(session) | [
"async",
"def",
"fromURL",
"(",
"cls",
",",
"url",
",",
"*",
",",
"credentials",
"=",
"None",
",",
"insecure",
"=",
"False",
")",
":",
"session",
"=",
"await",
"bones",
".",
"SessionAPI",
".",
"fromURL",
"(",
"url",
",",
"credentials",
"=",
"credential... | Return an `Origin` for a given MAAS instance. | [
"Return",
"an",
"Origin",
"for",
"a",
"given",
"MAAS",
"instance",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L1108-L1112 | train | 28,823 |
maas/python-libmaas | maas/client/viscera/__init__.py | OriginType.fromProfile | def fromProfile(cls, profile):
"""Return an `Origin` from a given configuration profile.
:see: `ProfileStore`.
"""
session = bones.SessionAPI.fromProfile(profile)
return cls(session) | python | def fromProfile(cls, profile):
"""Return an `Origin` from a given configuration profile.
:see: `ProfileStore`.
"""
session = bones.SessionAPI.fromProfile(profile)
return cls(session) | [
"def",
"fromProfile",
"(",
"cls",
",",
"profile",
")",
":",
"session",
"=",
"bones",
".",
"SessionAPI",
".",
"fromProfile",
"(",
"profile",
")",
"return",
"cls",
"(",
"session",
")"
] | Return an `Origin` from a given configuration profile.
:see: `ProfileStore`. | [
"Return",
"an",
"Origin",
"from",
"a",
"given",
"configuration",
"profile",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L1114-L1120 | train | 28,824 |
maas/python-libmaas | maas/client/viscera/__init__.py | OriginType.fromProfileName | def fromProfileName(cls, name):
"""Return an `Origin` from a given configuration profile name.
:see: `ProfileStore`.
"""
session = bones.SessionAPI.fromProfileName(name)
return cls(session) | python | def fromProfileName(cls, name):
"""Return an `Origin` from a given configuration profile name.
:see: `ProfileStore`.
"""
session = bones.SessionAPI.fromProfileName(name)
return cls(session) | [
"def",
"fromProfileName",
"(",
"cls",
",",
"name",
")",
":",
"session",
"=",
"bones",
".",
"SessionAPI",
".",
"fromProfileName",
"(",
"name",
")",
"return",
"cls",
"(",
"session",
")"
] | Return an `Origin` from a given configuration profile name.
:see: `ProfileStore`. | [
"Return",
"an",
"Origin",
"from",
"a",
"given",
"configuration",
"profile",
"name",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L1122-L1128 | train | 28,825 |
maas/python-libmaas | maas/client/viscera/__init__.py | OriginType.login | async def login(
cls, url, *, username=None, password=None, insecure=False):
"""Make an `Origin` by logging-in with a username and password.
:return: A tuple of ``profile`` and ``origin``, where the former is an
unsaved `Profile` instance, and the latter is an `Origin` instance
made using the profile.
"""
profile, session = await bones.SessionAPI.login(
url=url, username=username, password=password, insecure=insecure)
return profile, cls(session) | python | async def login(
cls, url, *, username=None, password=None, insecure=False):
"""Make an `Origin` by logging-in with a username and password.
:return: A tuple of ``profile`` and ``origin``, where the former is an
unsaved `Profile` instance, and the latter is an `Origin` instance
made using the profile.
"""
profile, session = await bones.SessionAPI.login(
url=url, username=username, password=password, insecure=insecure)
return profile, cls(session) | [
"async",
"def",
"login",
"(",
"cls",
",",
"url",
",",
"*",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"insecure",
"=",
"False",
")",
":",
"profile",
",",
"session",
"=",
"await",
"bones",
".",
"SessionAPI",
".",
"login",
"(",
... | Make an `Origin` by logging-in with a username and password.
:return: A tuple of ``profile`` and ``origin``, where the former is an
unsaved `Profile` instance, and the latter is an `Origin` instance
made using the profile. | [
"Make",
"an",
"Origin",
"by",
"logging",
"-",
"in",
"with",
"a",
"username",
"and",
"password",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L1130-L1140 | train | 28,826 |
maas/python-libmaas | maas/client/viscera/__init__.py | OriginType.connect | async def connect(
cls, url, *, apikey=None, insecure=False):
"""Make an `Origin` by connecting with an apikey.
:return: A tuple of ``profile`` and ``origin``, where the former is an
unsaved `Profile` instance, and the latter is an `Origin` instance
made using the profile.
"""
profile, session = await bones.SessionAPI.connect(
url=url, apikey=apikey, insecure=insecure)
return profile, cls(session) | python | async def connect(
cls, url, *, apikey=None, insecure=False):
"""Make an `Origin` by connecting with an apikey.
:return: A tuple of ``profile`` and ``origin``, where the former is an
unsaved `Profile` instance, and the latter is an `Origin` instance
made using the profile.
"""
profile, session = await bones.SessionAPI.connect(
url=url, apikey=apikey, insecure=insecure)
return profile, cls(session) | [
"async",
"def",
"connect",
"(",
"cls",
",",
"url",
",",
"*",
",",
"apikey",
"=",
"None",
",",
"insecure",
"=",
"False",
")",
":",
"profile",
",",
"session",
"=",
"await",
"bones",
".",
"SessionAPI",
".",
"connect",
"(",
"url",
"=",
"url",
",",
"api... | Make an `Origin` by connecting with an apikey.
:return: A tuple of ``profile`` and ``origin``, where the former is an
unsaved `Profile` instance, and the latter is an `Origin` instance
made using the profile. | [
"Make",
"an",
"Origin",
"by",
"connecting",
"with",
"an",
"apikey",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L1142-L1152 | train | 28,827 |
maas/python-libmaas | maas/client/facade.py | facade | def facade(factory):
"""Declare a method as a facade factory."""
wrapper = FacadeDescriptor(factory.__name__, factory)
return update_wrapper(wrapper, factory) | python | def facade(factory):
"""Declare a method as a facade factory."""
wrapper = FacadeDescriptor(factory.__name__, factory)
return update_wrapper(wrapper, factory) | [
"def",
"facade",
"(",
"factory",
")",
":",
"wrapper",
"=",
"FacadeDescriptor",
"(",
"factory",
".",
"__name__",
",",
"factory",
")",
"return",
"update_wrapper",
"(",
"wrapper",
",",
"factory",
")"
] | Declare a method as a facade factory. | [
"Declare",
"a",
"method",
"as",
"a",
"facade",
"factory",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/facade.py#L61-L64 | train | 28,828 |
maas/python-libmaas | maas/client/viscera/partitions.py | Partition.delete | async def delete(self):
"""Delete this partition."""
await self._handler.delete(
system_id=self.block_device.node.system_id,
device_id=self.block_device.id, id=self.id) | python | async def delete(self):
"""Delete this partition."""
await self._handler.delete(
system_id=self.block_device.node.system_id,
device_id=self.block_device.id, id=self.id) | [
"async",
"def",
"delete",
"(",
"self",
")",
":",
"await",
"self",
".",
"_handler",
".",
"delete",
"(",
"system_id",
"=",
"self",
".",
"block_device",
".",
"node",
".",
"system_id",
",",
"device_id",
"=",
"self",
".",
"block_device",
".",
"id",
",",
"id... | Delete this partition. | [
"Delete",
"this",
"partition",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/partitions.py#L78-L82 | train | 28,829 |
maas/python-libmaas | maas/client/viscera/partitions.py | Partition.unformat | async def unformat(self):
"""Unformat this partition."""
self._data = await self._handler.unformat(
system_id=self.block_device.node.system_id,
device_id=self.block_device.id, id=self.id) | python | async def unformat(self):
"""Unformat this partition."""
self._data = await self._handler.unformat(
system_id=self.block_device.node.system_id,
device_id=self.block_device.id, id=self.id) | [
"async",
"def",
"unformat",
"(",
"self",
")",
":",
"self",
".",
"_data",
"=",
"await",
"self",
".",
"_handler",
".",
"unformat",
"(",
"system_id",
"=",
"self",
".",
"block_device",
".",
"node",
".",
"system_id",
",",
"device_id",
"=",
"self",
".",
"blo... | Unformat this partition. | [
"Unformat",
"this",
"partition",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/partitions.py#L91-L95 | train | 28,830 |
maas/python-libmaas | maas/client/viscera/partitions.py | Partition.mount | async def mount(self, mount_point, *, mount_options=None):
"""Mount this partition."""
self._data = await self._handler.mount(
system_id=self.block_device.node.system_id,
device_id=self.block_device.id, id=self.id,
mount_point=mount_point,
mount_options=mount_options) | python | async def mount(self, mount_point, *, mount_options=None):
"""Mount this partition."""
self._data = await self._handler.mount(
system_id=self.block_device.node.system_id,
device_id=self.block_device.id, id=self.id,
mount_point=mount_point,
mount_options=mount_options) | [
"async",
"def",
"mount",
"(",
"self",
",",
"mount_point",
",",
"*",
",",
"mount_options",
"=",
"None",
")",
":",
"self",
".",
"_data",
"=",
"await",
"self",
".",
"_handler",
".",
"mount",
"(",
"system_id",
"=",
"self",
".",
"block_device",
".",
"node",... | Mount this partition. | [
"Mount",
"this",
"partition",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/partitions.py#L97-L103 | train | 28,831 |
maas/python-libmaas | maas/client/viscera/partitions.py | Partition.umount | async def umount(self):
"""Unmount this partition."""
self._data = await self._handler.unmount(
system_id=self.block_device.node.system_id,
device_id=self.block_device.id, id=self.id) | python | async def umount(self):
"""Unmount this partition."""
self._data = await self._handler.unmount(
system_id=self.block_device.node.system_id,
device_id=self.block_device.id, id=self.id) | [
"async",
"def",
"umount",
"(",
"self",
")",
":",
"self",
".",
"_data",
"=",
"await",
"self",
".",
"_handler",
".",
"unmount",
"(",
"system_id",
"=",
"self",
".",
"block_device",
".",
"node",
".",
"system_id",
",",
"device_id",
"=",
"self",
".",
"block_... | Unmount this partition. | [
"Unmount",
"this",
"partition",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/partitions.py#L105-L109 | train | 28,832 |
maas/python-libmaas | maas/client/viscera/partitions.py | PartitionsType.read | async def read(cls, node, block_device):
"""Get list of `Partitions`'s for `node` and `block_device`."""
if isinstance(node, str):
system_id = node
elif isinstance(node, Node):
system_id = node.system_id
else:
raise TypeError(
"node must be a Node or str, not %s"
% type(node).__name__)
if isinstance(block_device, int):
block_device = block_device
elif isinstance(block_device, BlockDevice):
block_device = block_device.id
else:
raise TypeError(
"node must be a Node or str, not %s"
% type(block_device).__name__)
data = await cls._handler.read(
system_id=system_id, device_id=block_device)
return cls(
cls._object(item)
for item in data) | python | async def read(cls, node, block_device):
"""Get list of `Partitions`'s for `node` and `block_device`."""
if isinstance(node, str):
system_id = node
elif isinstance(node, Node):
system_id = node.system_id
else:
raise TypeError(
"node must be a Node or str, not %s"
% type(node).__name__)
if isinstance(block_device, int):
block_device = block_device
elif isinstance(block_device, BlockDevice):
block_device = block_device.id
else:
raise TypeError(
"node must be a Node or str, not %s"
% type(block_device).__name__)
data = await cls._handler.read(
system_id=system_id, device_id=block_device)
return cls(
cls._object(item)
for item in data) | [
"async",
"def",
"read",
"(",
"cls",
",",
"node",
",",
"block_device",
")",
":",
"if",
"isinstance",
"(",
"node",
",",
"str",
")",
":",
"system_id",
"=",
"node",
"elif",
"isinstance",
"(",
"node",
",",
"Node",
")",
":",
"system_id",
"=",
"node",
".",
... | Get list of `Partitions`'s for `node` and `block_device`. | [
"Get",
"list",
"of",
"Partitions",
"s",
"for",
"node",
"and",
"block_device",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/partitions.py#L115-L137 | train | 28,833 |
maas/python-libmaas | maas/client/viscera/partitions.py | PartitionsType.create | async def create(cls, block_device: BlockDevice, size: int):
"""
Create a partition on a block device.
:param block_device: BlockDevice to create the paritition on.
:type block_device: `BlockDevice`
:param size: The size of the partition in bytes.
:type size: `int`
"""
params = {}
if isinstance(block_device, BlockDevice):
params['system_id'] = block_device.node.system_id
params['device_id'] = block_device.id
else:
raise TypeError(
'block_device must be a BlockDevice, not %s' % (
type(block_device).__name__))
if not size:
raise ValueError("size must be provided and greater than zero.")
params['size'] = size
return cls._object(await cls._handler.create(**params)) | python | async def create(cls, block_device: BlockDevice, size: int):
"""
Create a partition on a block device.
:param block_device: BlockDevice to create the paritition on.
:type block_device: `BlockDevice`
:param size: The size of the partition in bytes.
:type size: `int`
"""
params = {}
if isinstance(block_device, BlockDevice):
params['system_id'] = block_device.node.system_id
params['device_id'] = block_device.id
else:
raise TypeError(
'block_device must be a BlockDevice, not %s' % (
type(block_device).__name__))
if not size:
raise ValueError("size must be provided and greater than zero.")
params['size'] = size
return cls._object(await cls._handler.create(**params)) | [
"async",
"def",
"create",
"(",
"cls",
",",
"block_device",
":",
"BlockDevice",
",",
"size",
":",
"int",
")",
":",
"params",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"block_device",
",",
"BlockDevice",
")",
":",
"params",
"[",
"'system_id'",
"]",
"=",
"b... | Create a partition on a block device.
:param block_device: BlockDevice to create the paritition on.
:type block_device: `BlockDevice`
:param size: The size of the partition in bytes.
:type size: `int` | [
"Create",
"a",
"partition",
"on",
"a",
"block",
"device",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/partitions.py#L139-L160 | train | 28,834 |
maas/python-libmaas | maas/client/utils/maas_async.py | asynchronous | def asynchronous(func):
"""Return `func` in a "smart" asynchronous-aware wrapper.
If `func` is called within the event-loop — i.e. when it is running — this
returns the result of `func` without alteration. However, when called from
outside of the event-loop, and the result is awaitable, the result will be
passed though the current event-loop's `run_until_complete` method.
In other words, this automatically blocks when calling an asynchronous
function from outside of the event-loop, and so makes interactive use of
these APIs far more intuitive.
"""
@wraps(func)
def wrapper(*args, **kwargs):
eventloop = get_event_loop()
result = func(*args, **kwargs)
if not eventloop.is_running():
while isawaitable(result):
result = eventloop.run_until_complete(result)
return result
return wrapper | python | def asynchronous(func):
"""Return `func` in a "smart" asynchronous-aware wrapper.
If `func` is called within the event-loop — i.e. when it is running — this
returns the result of `func` without alteration. However, when called from
outside of the event-loop, and the result is awaitable, the result will be
passed though the current event-loop's `run_until_complete` method.
In other words, this automatically blocks when calling an asynchronous
function from outside of the event-loop, and so makes interactive use of
these APIs far more intuitive.
"""
@wraps(func)
def wrapper(*args, **kwargs):
eventloop = get_event_loop()
result = func(*args, **kwargs)
if not eventloop.is_running():
while isawaitable(result):
result = eventloop.run_until_complete(result)
return result
return wrapper | [
"def",
"asynchronous",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"eventloop",
"=",
"get_event_loop",
"(",
")",
"result",
"=",
"func",
"(",
"*",
"args",
",",
"*",
... | Return `func` in a "smart" asynchronous-aware wrapper.
If `func` is called within the event-loop — i.e. when it is running — this
returns the result of `func` without alteration. However, when called from
outside of the event-loop, and the result is awaitable, the result will be
passed though the current event-loop's `run_until_complete` method.
In other words, this automatically blocks when calling an asynchronous
function from outside of the event-loop, and so makes interactive use of
these APIs far more intuitive. | [
"Return",
"func",
"in",
"a",
"smart",
"asynchronous",
"-",
"aware",
"wrapper",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/utils/maas_async.py#L31-L52 | train | 28,835 |
maas/python-libmaas | maas/client/utils/maas_async.py | _maybe_wrap | def _maybe_wrap(attribute):
"""Helper for `Asynchronous`."""
if iscoroutinefunction(attribute):
return asynchronous(attribute)
if isinstance(attribute, (classmethod, staticmethod)):
if iscoroutinefunction(attribute.__func__):
return attribute.__class__(asynchronous(attribute.__func__))
return attribute | python | def _maybe_wrap(attribute):
"""Helper for `Asynchronous`."""
if iscoroutinefunction(attribute):
return asynchronous(attribute)
if isinstance(attribute, (classmethod, staticmethod)):
if iscoroutinefunction(attribute.__func__):
return attribute.__class__(asynchronous(attribute.__func__))
return attribute | [
"def",
"_maybe_wrap",
"(",
"attribute",
")",
":",
"if",
"iscoroutinefunction",
"(",
"attribute",
")",
":",
"return",
"asynchronous",
"(",
"attribute",
")",
"if",
"isinstance",
"(",
"attribute",
",",
"(",
"classmethod",
",",
"staticmethod",
")",
")",
":",
"if... | Helper for `Asynchronous`. | [
"Helper",
"for",
"Asynchronous",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/utils/maas_async.py#L68-L75 | train | 28,836 |
maas/python-libmaas | maas/client/viscera/subnets.py | SubnetsType.create | async def create(
cls, cidr: str, vlan: Union[Vlan, int] = None, *,
name: str = None, description: str = None,
gateway_ip: str = None, rdns_mode: RDNSMode = None,
dns_servers: Union[Sequence[str], str] = None,
managed: bool = None):
"""
Create a `Subnet` in MAAS.
:param cidr: The cidr of the `Subnet` (required).
:type cidr: `str`
:param vlan: The VLAN of the `Subnet` (required).
:type vlan: `Vlan`
:param name: The name of the `Subnet` (optional, will be given a
default value if not specified).
:type name: `str`
:param description: A description of the `Subnet` (optional).
:type description: `str`
:param gateway_ip: The gateway IP address for the `Subnet` (optional).
:type gateway_ip: `str`
:param rdns_mode: The reverse DNS mode for the `Subnet` (optional).
:type rdns_mode: `RDNSMode`
:param managed: Whether the `Subnet` is managed by MAAS (optional).
:type managed: `bool`
:returns: The created Subnet
:rtype: `Subnet`
"""
params = {
"cidr": cidr
}
if isinstance(vlan, int):
params["vlan"] = vlan
elif isinstance(vlan, Vlan):
params["vlan"] = vlan.id
else:
raise TypeError(
"vlan must be Vlan or int, not %s" % (
type(vlan).__class__))
if name is not None:
params["name"] = name
if description is not None:
params["description"] = description
if gateway_ip is not None:
params["gateway_ip"] = gateway_ip
if rdns_mode is not None:
params["rdns_mode"] = rdns_mode
if isinstance(dns_servers, Sequence):
if len(dns_servers) > 0:
params["dns_servers"] = ",".join(dns_servers)
elif dns_servers is not None:
params["dns_servers"] = dns_servers
if managed is not None:
params["managed"] = managed
return cls._object(await cls._handler.create(**params)) | python | async def create(
cls, cidr: str, vlan: Union[Vlan, int] = None, *,
name: str = None, description: str = None,
gateway_ip: str = None, rdns_mode: RDNSMode = None,
dns_servers: Union[Sequence[str], str] = None,
managed: bool = None):
"""
Create a `Subnet` in MAAS.
:param cidr: The cidr of the `Subnet` (required).
:type cidr: `str`
:param vlan: The VLAN of the `Subnet` (required).
:type vlan: `Vlan`
:param name: The name of the `Subnet` (optional, will be given a
default value if not specified).
:type name: `str`
:param description: A description of the `Subnet` (optional).
:type description: `str`
:param gateway_ip: The gateway IP address for the `Subnet` (optional).
:type gateway_ip: `str`
:param rdns_mode: The reverse DNS mode for the `Subnet` (optional).
:type rdns_mode: `RDNSMode`
:param managed: Whether the `Subnet` is managed by MAAS (optional).
:type managed: `bool`
:returns: The created Subnet
:rtype: `Subnet`
"""
params = {
"cidr": cidr
}
if isinstance(vlan, int):
params["vlan"] = vlan
elif isinstance(vlan, Vlan):
params["vlan"] = vlan.id
else:
raise TypeError(
"vlan must be Vlan or int, not %s" % (
type(vlan).__class__))
if name is not None:
params["name"] = name
if description is not None:
params["description"] = description
if gateway_ip is not None:
params["gateway_ip"] = gateway_ip
if rdns_mode is not None:
params["rdns_mode"] = rdns_mode
if isinstance(dns_servers, Sequence):
if len(dns_servers) > 0:
params["dns_servers"] = ",".join(dns_servers)
elif dns_servers is not None:
params["dns_servers"] = dns_servers
if managed is not None:
params["managed"] = managed
return cls._object(await cls._handler.create(**params)) | [
"async",
"def",
"create",
"(",
"cls",
",",
"cidr",
":",
"str",
",",
"vlan",
":",
"Union",
"[",
"Vlan",
",",
"int",
"]",
"=",
"None",
",",
"*",
",",
"name",
":",
"str",
"=",
"None",
",",
"description",
":",
"str",
"=",
"None",
",",
"gateway_ip",
... | Create a `Subnet` in MAAS.
:param cidr: The cidr of the `Subnet` (required).
:type cidr: `str`
:param vlan: The VLAN of the `Subnet` (required).
:type vlan: `Vlan`
:param name: The name of the `Subnet` (optional, will be given a
default value if not specified).
:type name: `str`
:param description: A description of the `Subnet` (optional).
:type description: `str`
:param gateway_ip: The gateway IP address for the `Subnet` (optional).
:type gateway_ip: `str`
:param rdns_mode: The reverse DNS mode for the `Subnet` (optional).
:type rdns_mode: `RDNSMode`
:param managed: Whether the `Subnet` is managed by MAAS (optional).
:type managed: `bool`
:returns: The created Subnet
:rtype: `Subnet` | [
"Create",
"a",
"Subnet",
"in",
"MAAS",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/subnets.py#L34-L88 | train | 28,837 |
maas/python-libmaas | maas/client/viscera/subnets.py | Subnet.save | async def save(self):
"""Save this subnet."""
if 'vlan' in self._changed_data and self._changed_data['vlan']:
# Update uses the ID of the VLAN, not the VLAN object.
self._changed_data['vlan'] = self._changed_data['vlan']['id']
if (self._orig_data['vlan'] and
'id' in self._orig_data['vlan'] and
self._changed_data['vlan'] == (
self._orig_data['vlan']['id'])):
# VLAN didn't really change, the object was just set to the
# same VLAN.
del self._changed_data['vlan']
await super(Subnet, self).save() | python | async def save(self):
"""Save this subnet."""
if 'vlan' in self._changed_data and self._changed_data['vlan']:
# Update uses the ID of the VLAN, not the VLAN object.
self._changed_data['vlan'] = self._changed_data['vlan']['id']
if (self._orig_data['vlan'] and
'id' in self._orig_data['vlan'] and
self._changed_data['vlan'] == (
self._orig_data['vlan']['id'])):
# VLAN didn't really change, the object was just set to the
# same VLAN.
del self._changed_data['vlan']
await super(Subnet, self).save() | [
"async",
"def",
"save",
"(",
"self",
")",
":",
"if",
"'vlan'",
"in",
"self",
".",
"_changed_data",
"and",
"self",
".",
"_changed_data",
"[",
"'vlan'",
"]",
":",
"# Update uses the ID of the VLAN, not the VLAN object.",
"self",
".",
"_changed_data",
"[",
"'vlan'",
... | Save this subnet. | [
"Save",
"this",
"subnet",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/subnets.py#L132-L144 | train | 28,838 |
maas/python-libmaas | maas/client/utils/__init__.py | urlencode | def urlencode(data):
"""A version of `urllib.urlencode` that isn't insane.
This only cares that `data` is an iterable of iterables. Each sub-iterable
must be of overall length 2, i.e. a name/value pair.
Unicode strings will be encoded to UTF-8. This is what Django expects; see
`smart_text` in the Django documentation.
"""
def dec(string):
if isinstance(string, bytes):
string = string.decode("utf-8")
return quote_plus(string)
return "&".join(
"%s=%s" % (dec(name), dec(value))
for name, value in data) | python | def urlencode(data):
"""A version of `urllib.urlencode` that isn't insane.
This only cares that `data` is an iterable of iterables. Each sub-iterable
must be of overall length 2, i.e. a name/value pair.
Unicode strings will be encoded to UTF-8. This is what Django expects; see
`smart_text` in the Django documentation.
"""
def dec(string):
if isinstance(string, bytes):
string = string.decode("utf-8")
return quote_plus(string)
return "&".join(
"%s=%s" % (dec(name), dec(value))
for name, value in data) | [
"def",
"urlencode",
"(",
"data",
")",
":",
"def",
"dec",
"(",
"string",
")",
":",
"if",
"isinstance",
"(",
"string",
",",
"bytes",
")",
":",
"string",
"=",
"string",
".",
"decode",
"(",
"\"utf-8\"",
")",
"return",
"quote_plus",
"(",
"string",
")",
"r... | A version of `urllib.urlencode` that isn't insane.
This only cares that `data` is an iterable of iterables. Each sub-iterable
must be of overall length 2, i.e. a name/value pair.
Unicode strings will be encoded to UTF-8. This is what Django expects; see
`smart_text` in the Django documentation. | [
"A",
"version",
"of",
"urllib",
".",
"urlencode",
"that",
"isn",
"t",
"insane",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/utils/__init__.py#L63-L79 | train | 28,839 |
maas/python-libmaas | maas/client/utils/__init__.py | sign | def sign(uri, headers, credentials):
"""Sign the URI and headers.
A request method of `GET` with no body content is assumed.
:param credentials: A tuple of consumer key, token key, and token secret.
"""
consumer_key, token_key, token_secret = credentials
auth = OAuthSigner(token_key, token_secret, consumer_key, "")
auth.sign_request(uri, method="GET", body=None, headers=headers) | python | def sign(uri, headers, credentials):
"""Sign the URI and headers.
A request method of `GET` with no body content is assumed.
:param credentials: A tuple of consumer key, token key, and token secret.
"""
consumer_key, token_key, token_secret = credentials
auth = OAuthSigner(token_key, token_secret, consumer_key, "")
auth.sign_request(uri, method="GET", body=None, headers=headers) | [
"def",
"sign",
"(",
"uri",
",",
"headers",
",",
"credentials",
")",
":",
"consumer_key",
",",
"token_key",
",",
"token_secret",
"=",
"credentials",
"auth",
"=",
"OAuthSigner",
"(",
"token_key",
",",
"token_secret",
",",
"consumer_key",
",",
"\"\"",
")",
"aut... | Sign the URI and headers.
A request method of `GET` with no body content is assumed.
:param credentials: A tuple of consumer key, token key, and token secret. | [
"Sign",
"the",
"URI",
"and",
"headers",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/utils/__init__.py#L166-L175 | train | 28,840 |
maas/python-libmaas | maas/client/utils/__init__.py | parse_docstring | def parse_docstring(thing):
"""Parse a Python docstring, or the docstring found on `thing`.
:return: a ``(title, body)`` tuple. As per docstring convention, title is
the docstring's first paragraph and body is the rest.
"""
assert not isinstance(thing, bytes)
doc = cleandoc(thing) if isinstance(thing, str) else getdoc(thing)
doc = empty if doc is None else doc
assert not isinstance(doc, bytes)
# Break the docstring into two parts: title and body.
parts = docstring_split(doc)
if len(parts) == 2:
title, body = parts[0], parts[1]
else:
title, body = parts[0], empty
# Remove line breaks from the title line.
title = remove_line_breaks(title)
# Normalise line-breaks on newline.
body = body.replace("\r\n", newline).replace("\r", newline)
return docstring(title, body) | python | def parse_docstring(thing):
"""Parse a Python docstring, or the docstring found on `thing`.
:return: a ``(title, body)`` tuple. As per docstring convention, title is
the docstring's first paragraph and body is the rest.
"""
assert not isinstance(thing, bytes)
doc = cleandoc(thing) if isinstance(thing, str) else getdoc(thing)
doc = empty if doc is None else doc
assert not isinstance(doc, bytes)
# Break the docstring into two parts: title and body.
parts = docstring_split(doc)
if len(parts) == 2:
title, body = parts[0], parts[1]
else:
title, body = parts[0], empty
# Remove line breaks from the title line.
title = remove_line_breaks(title)
# Normalise line-breaks on newline.
body = body.replace("\r\n", newline).replace("\r", newline)
return docstring(title, body) | [
"def",
"parse_docstring",
"(",
"thing",
")",
":",
"assert",
"not",
"isinstance",
"(",
"thing",
",",
"bytes",
")",
"doc",
"=",
"cleandoc",
"(",
"thing",
")",
"if",
"isinstance",
"(",
"thing",
",",
"str",
")",
"else",
"getdoc",
"(",
"thing",
")",
"doc",
... | Parse a Python docstring, or the docstring found on `thing`.
:return: a ``(title, body)`` tuple. As per docstring convention, title is
the docstring's first paragraph and body is the rest. | [
"Parse",
"a",
"Python",
"docstring",
"or",
"the",
"docstring",
"found",
"on",
"thing",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/utils/__init__.py#L194-L214 | train | 28,841 |
maas/python-libmaas | maas/client/utils/__init__.py | api_url | def api_url(string):
"""Ensure that `string` looks like a URL to the API.
This ensures that the API version is specified explicitly (i.e. the path
ends with /api/{version}). If not, version 2.0 is selected. It also
ensures that the path ends with a forward-slash.
This is suitable for use as an argument type with argparse.
"""
url = urlparse(string)
url = url._replace(path=ensure_trailing_slash(url.path))
if re.search("/api/[0-9.]+/?$", url.path) is None:
url = url._replace(path=url.path + "api/2.0/")
return url.geturl() | python | def api_url(string):
"""Ensure that `string` looks like a URL to the API.
This ensures that the API version is specified explicitly (i.e. the path
ends with /api/{version}). If not, version 2.0 is selected. It also
ensures that the path ends with a forward-slash.
This is suitable for use as an argument type with argparse.
"""
url = urlparse(string)
url = url._replace(path=ensure_trailing_slash(url.path))
if re.search("/api/[0-9.]+/?$", url.path) is None:
url = url._replace(path=url.path + "api/2.0/")
return url.geturl() | [
"def",
"api_url",
"(",
"string",
")",
":",
"url",
"=",
"urlparse",
"(",
"string",
")",
"url",
"=",
"url",
".",
"_replace",
"(",
"path",
"=",
"ensure_trailing_slash",
"(",
"url",
".",
"path",
")",
")",
"if",
"re",
".",
"search",
"(",
"\"/api/[0-9.]+/?$\... | Ensure that `string` looks like a URL to the API.
This ensures that the API version is specified explicitly (i.e. the path
ends with /api/{version}). If not, version 2.0 is selected. It also
ensures that the path ends with a forward-slash.
This is suitable for use as an argument type with argparse. | [
"Ensure",
"that",
"string",
"looks",
"like",
"a",
"URL",
"to",
"the",
"API",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/utils/__init__.py#L222-L235 | train | 28,842 |
maas/python-libmaas | maas/client/utils/__init__.py | vars_class | def vars_class(cls):
"""Return a dict of vars for the given class, including all ancestors.
This differs from the usual behaviour of `vars` which returns attributes
belonging to the given class and not its ancestors.
"""
return dict(chain.from_iterable(
vars(cls).items() for cls in reversed(cls.__mro__))) | python | def vars_class(cls):
"""Return a dict of vars for the given class, including all ancestors.
This differs from the usual behaviour of `vars` which returns attributes
belonging to the given class and not its ancestors.
"""
return dict(chain.from_iterable(
vars(cls).items() for cls in reversed(cls.__mro__))) | [
"def",
"vars_class",
"(",
"cls",
")",
":",
"return",
"dict",
"(",
"chain",
".",
"from_iterable",
"(",
"vars",
"(",
"cls",
")",
".",
"items",
"(",
")",
"for",
"cls",
"in",
"reversed",
"(",
"cls",
".",
"__mro__",
")",
")",
")"
] | Return a dict of vars for the given class, including all ancestors.
This differs from the usual behaviour of `vars` which returns attributes
belonging to the given class and not its ancestors. | [
"Return",
"a",
"dict",
"of",
"vars",
"for",
"the",
"given",
"class",
"including",
"all",
"ancestors",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/utils/__init__.py#L245-L252 | train | 28,843 |
maas/python-libmaas | maas/client/utils/__init__.py | remove_None | def remove_None(params: dict):
"""Remove all keys in `params` that have the value of `None`."""
return {
key: value
for key, value in params.items()
if value is not None
} | python | def remove_None(params: dict):
"""Remove all keys in `params` that have the value of `None`."""
return {
key: value
for key, value in params.items()
if value is not None
} | [
"def",
"remove_None",
"(",
"params",
":",
"dict",
")",
":",
"return",
"{",
"key",
":",
"value",
"for",
"key",
",",
"value",
"in",
"params",
".",
"items",
"(",
")",
"if",
"value",
"is",
"not",
"None",
"}"
] | Remove all keys in `params` that have the value of `None`. | [
"Remove",
"all",
"keys",
"in",
"params",
"that",
"have",
"the",
"value",
"of",
"None",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/utils/__init__.py#L325-L331 | train | 28,844 |
maas/python-libmaas | maas/client/utils/__init__.py | OAuthSigner.sign_request | def sign_request(self, url, method, body, headers):
"""Sign a request.
:param url: The URL to which the request is to be sent.
:param headers: The headers in the request. These will be updated with
the signature.
"""
# The use of PLAINTEXT here was copied from MAAS, but we should switch
# to HMAC once it works server-side.
client = oauth1.Client(
self.consumer_key, self.consumer_secret, self.token_key,
self.token_secret, signature_method=oauth1.SIGNATURE_PLAINTEXT,
realm=self.realm)
# To preserve API backward compatibility convert an empty string body
# to `None`. The old "oauth" library would treat the empty string as
# "no body", but "oauthlib" requires `None`.
body = None if body is None or len(body) == 0 else body
uri, signed_headers, body = client.sign(url, method, body, headers)
headers.update(signed_headers) | python | def sign_request(self, url, method, body, headers):
"""Sign a request.
:param url: The URL to which the request is to be sent.
:param headers: The headers in the request. These will be updated with
the signature.
"""
# The use of PLAINTEXT here was copied from MAAS, but we should switch
# to HMAC once it works server-side.
client = oauth1.Client(
self.consumer_key, self.consumer_secret, self.token_key,
self.token_secret, signature_method=oauth1.SIGNATURE_PLAINTEXT,
realm=self.realm)
# To preserve API backward compatibility convert an empty string body
# to `None`. The old "oauth" library would treat the empty string as
# "no body", but "oauthlib" requires `None`.
body = None if body is None or len(body) == 0 else body
uri, signed_headers, body = client.sign(url, method, body, headers)
headers.update(signed_headers) | [
"def",
"sign_request",
"(",
"self",
",",
"url",
",",
"method",
",",
"body",
",",
"headers",
")",
":",
"# The use of PLAINTEXT here was copied from MAAS, but we should switch",
"# to HMAC once it works server-side.",
"client",
"=",
"oauth1",
".",
"Client",
"(",
"self",
"... | Sign a request.
:param url: The URL to which the request is to be sent.
:param headers: The headers in the request. These will be updated with
the signature. | [
"Sign",
"a",
"request",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/utils/__init__.py#L145-L163 | train | 28,845 |
maas/python-libmaas | maas/client/utils/__init__.py | SpinnerContext.print | def print(self, *args, **kwargs):
"""Print inside of the spinner context.
This must be used when inside of a spinner context to ensure that
the line printed doesn't overwrite an already existing spinner line.
"""
clear_len = max(len(self._prev_msg), len(self.msg)) + 4
self.spinner.stream.write("%s\r" % (' ' * clear_len))
print(*args, file=self.spinner.stream, flush=True, **kwargs) | python | def print(self, *args, **kwargs):
"""Print inside of the spinner context.
This must be used when inside of a spinner context to ensure that
the line printed doesn't overwrite an already existing spinner line.
"""
clear_len = max(len(self._prev_msg), len(self.msg)) + 4
self.spinner.stream.write("%s\r" % (' ' * clear_len))
print(*args, file=self.spinner.stream, flush=True, **kwargs) | [
"def",
"print",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"clear_len",
"=",
"max",
"(",
"len",
"(",
"self",
".",
"_prev_msg",
")",
",",
"len",
"(",
"self",
".",
"msg",
")",
")",
"+",
"4",
"self",
".",
"spinner",
".",
"... | Print inside of the spinner context.
This must be used when inside of a spinner context to ensure that
the line printed doesn't overwrite an already existing spinner line. | [
"Print",
"inside",
"of",
"the",
"spinner",
"context",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/utils/__init__.py#L342-L350 | train | 28,846 |
maas/python-libmaas | maas/client/viscera/raids.py | RaidsType.create | async def create(
cls, node: Union[Node, str],
level: Union[RaidLevel, str],
devices: Iterable[Union[BlockDevice, Partition]], *,
name: str = None, uuid: str = None,
spare_devices: Iterable[Union[BlockDevice, Partition]]):
"""
Create a RAID on a Node.
:param node: Node to create the interface on.
:type node: `Node` or `str`
:param level: RAID level.
:type level: `RaidLevel`
:param devices: Mixed list of block devices or partitions to create
the RAID from.
:type devices: iterable of mixed type of `BlockDevice` or `Partition`
:param name: Name of the RAID (optional).
:type name: `str`
:param uuid: The UUID for the RAID (optional).
:type uuid: `str`
:param spare_devices: Mixed list of block devices or partitions to add
as spare devices on the RAID.
:type spare_devices: iterable of mixed type of `BlockDevice` or
`Partition`
"""
if isinstance(level, RaidLevel):
level = level.value
params = {
'level': str(level),
}
if isinstance(node, str):
params['system_id'] = node
elif isinstance(node, Node):
params['system_id'] = node.system_id
else:
raise TypeError(
'node must be a Node or str, not %s' % (
type(node).__name__))
if len(devices) == 0:
raise ValueError("devices must contain at least one device.")
block_devices = []
partitions = []
for idx, device in enumerate(devices):
if isinstance(device, BlockDevice):
block_devices.append(device.id)
elif isinstance(device, Partition):
partitions.append(device.id)
else:
raise TypeError(
"devices[%d] must be a BlockDevice or "
"Partition, not %s" % type(device).__name__)
if len(block_devices) > 0:
params['block_devices'] = block_devices
if len(partitions) > 0:
params['partitions'] = partitions
spare_block_devices = []
spare_partitions = []
for idx, device in enumerate(spare_devices):
if isinstance(device, BlockDevice):
spare_block_devices.append(device.id)
elif isinstance(device, Partition):
spare_partitions.append(device.id)
else:
raise TypeError(
"spare_devices[%d] must be a BlockDevice or "
"Partition, not %s" % type(device).__name__)
if len(spare_block_devices) > 0:
params['spare_devices'] = spare_block_devices
if len(spare_partitions) > 0:
params['spare_partitions'] = spare_partitions
if name is not None:
params['name'] = name
if uuid is not None:
params['uuid'] = uuid
return cls._object(await cls._handler.create(**params)) | python | async def create(
cls, node: Union[Node, str],
level: Union[RaidLevel, str],
devices: Iterable[Union[BlockDevice, Partition]], *,
name: str = None, uuid: str = None,
spare_devices: Iterable[Union[BlockDevice, Partition]]):
"""
Create a RAID on a Node.
:param node: Node to create the interface on.
:type node: `Node` or `str`
:param level: RAID level.
:type level: `RaidLevel`
:param devices: Mixed list of block devices or partitions to create
the RAID from.
:type devices: iterable of mixed type of `BlockDevice` or `Partition`
:param name: Name of the RAID (optional).
:type name: `str`
:param uuid: The UUID for the RAID (optional).
:type uuid: `str`
:param spare_devices: Mixed list of block devices or partitions to add
as spare devices on the RAID.
:type spare_devices: iterable of mixed type of `BlockDevice` or
`Partition`
"""
if isinstance(level, RaidLevel):
level = level.value
params = {
'level': str(level),
}
if isinstance(node, str):
params['system_id'] = node
elif isinstance(node, Node):
params['system_id'] = node.system_id
else:
raise TypeError(
'node must be a Node or str, not %s' % (
type(node).__name__))
if len(devices) == 0:
raise ValueError("devices must contain at least one device.")
block_devices = []
partitions = []
for idx, device in enumerate(devices):
if isinstance(device, BlockDevice):
block_devices.append(device.id)
elif isinstance(device, Partition):
partitions.append(device.id)
else:
raise TypeError(
"devices[%d] must be a BlockDevice or "
"Partition, not %s" % type(device).__name__)
if len(block_devices) > 0:
params['block_devices'] = block_devices
if len(partitions) > 0:
params['partitions'] = partitions
spare_block_devices = []
spare_partitions = []
for idx, device in enumerate(spare_devices):
if isinstance(device, BlockDevice):
spare_block_devices.append(device.id)
elif isinstance(device, Partition):
spare_partitions.append(device.id)
else:
raise TypeError(
"spare_devices[%d] must be a BlockDevice or "
"Partition, not %s" % type(device).__name__)
if len(spare_block_devices) > 0:
params['spare_devices'] = spare_block_devices
if len(spare_partitions) > 0:
params['spare_partitions'] = spare_partitions
if name is not None:
params['name'] = name
if uuid is not None:
params['uuid'] = uuid
return cls._object(await cls._handler.create(**params)) | [
"async",
"def",
"create",
"(",
"cls",
",",
"node",
":",
"Union",
"[",
"Node",
",",
"str",
"]",
",",
"level",
":",
"Union",
"[",
"RaidLevel",
",",
"str",
"]",
",",
"devices",
":",
"Iterable",
"[",
"Union",
"[",
"BlockDevice",
",",
"Partition",
"]",
... | Create a RAID on a Node.
:param node: Node to create the interface on.
:type node: `Node` or `str`
:param level: RAID level.
:type level: `RaidLevel`
:param devices: Mixed list of block devices or partitions to create
the RAID from.
:type devices: iterable of mixed type of `BlockDevice` or `Partition`
:param name: Name of the RAID (optional).
:type name: `str`
:param uuid: The UUID for the RAID (optional).
:type uuid: `str`
:param spare_devices: Mixed list of block devices or partitions to add
as spare devices on the RAID.
:type spare_devices: iterable of mixed type of `BlockDevice` or
`Partition` | [
"Create",
"a",
"RAID",
"on",
"a",
"Node",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/raids.py#L88-L166 | train | 28,847 |
maas/python-libmaas | maas/client/viscera/static_routes.py | StaticRoutesType.create | async def create(cls, destination: Union[int, Subnet],
source: Union[int, Subnet], gateway_ip: str, metric: int):
"""
Create a `StaticRoute` in MAAS.
:param name: The name of the `StaticRoute` (optional, will be given a
default value if not specified).
:type name: `str`
:param description: A description of the `StaticRoute` (optional).
:type description: `str`
:param class_type: The class type of the `StaticRoute` (optional).
:type class_type: `str`
:returns: The created StaticRoute
:rtype: `StaticRoute`
"""
params = {
"gateway_ip": gateway_ip,
"metric": metric,
}
if isinstance(source, Subnet):
params["source"] = source.id
elif isinstance(source, int):
params["source"] = source
if isinstance(destination, Subnet):
params["destination"] = destination.id
elif isinstance(destination, int):
params["destination"] = destination
return cls._object(await cls._handler.create(**params)) | python | async def create(cls, destination: Union[int, Subnet],
source: Union[int, Subnet], gateway_ip: str, metric: int):
"""
Create a `StaticRoute` in MAAS.
:param name: The name of the `StaticRoute` (optional, will be given a
default value if not specified).
:type name: `str`
:param description: A description of the `StaticRoute` (optional).
:type description: `str`
:param class_type: The class type of the `StaticRoute` (optional).
:type class_type: `str`
:returns: The created StaticRoute
:rtype: `StaticRoute`
"""
params = {
"gateway_ip": gateway_ip,
"metric": metric,
}
if isinstance(source, Subnet):
params["source"] = source.id
elif isinstance(source, int):
params["source"] = source
if isinstance(destination, Subnet):
params["destination"] = destination.id
elif isinstance(destination, int):
params["destination"] = destination
return cls._object(await cls._handler.create(**params)) | [
"async",
"def",
"create",
"(",
"cls",
",",
"destination",
":",
"Union",
"[",
"int",
",",
"Subnet",
"]",
",",
"source",
":",
"Union",
"[",
"int",
",",
"Subnet",
"]",
",",
"gateway_ip",
":",
"str",
",",
"metric",
":",
"int",
")",
":",
"params",
"=",
... | Create a `StaticRoute` in MAAS.
:param name: The name of the `StaticRoute` (optional, will be given a
default value if not specified).
:type name: `str`
:param description: A description of the `StaticRoute` (optional).
:type description: `str`
:param class_type: The class type of the `StaticRoute` (optional).
:type class_type: `str`
:returns: The created StaticRoute
:rtype: `StaticRoute` | [
"Create",
"a",
"StaticRoute",
"in",
"MAAS",
"."
] | 4092c68ef7fb1753efc843569848e2bcc3415002 | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/static_routes.py#L27-L54 | train | 28,848 |
pytest-dev/pytest-runner | ptr.py | CustomizedDist.fetch_build_egg | def fetch_build_egg(self, req):
""" Specialized version of Distribution.fetch_build_egg
that respects respects allow_hosts and index_url. """
from setuptools.command.easy_install import easy_install
dist = Distribution({'script_args': ['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
if self.allow_hosts:
opts['allow_hosts'] = ('test', self.allow_hosts)
if self.index_url:
opts['index_url'] = ('test', self.index_url)
install_dir_func = getattr(self, 'get_egg_cache_dir', _os.getcwd)
install_dir = install_dir_func()
cmd = easy_install(
dist, args=["x"], install_dir=install_dir,
exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
return cmd.easy_install(req) | python | def fetch_build_egg(self, req):
""" Specialized version of Distribution.fetch_build_egg
that respects respects allow_hosts and index_url. """
from setuptools.command.easy_install import easy_install
dist = Distribution({'script_args': ['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts'
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
if self.allow_hosts:
opts['allow_hosts'] = ('test', self.allow_hosts)
if self.index_url:
opts['index_url'] = ('test', self.index_url)
install_dir_func = getattr(self, 'get_egg_cache_dir', _os.getcwd)
install_dir = install_dir_func()
cmd = easy_install(
dist, args=["x"], install_dir=install_dir,
exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
return cmd.easy_install(req) | [
"def",
"fetch_build_egg",
"(",
"self",
",",
"req",
")",
":",
"from",
"setuptools",
".",
"command",
".",
"easy_install",
"import",
"easy_install",
"dist",
"=",
"Distribution",
"(",
"{",
"'script_args'",
":",
"[",
"'easy_install'",
"]",
"}",
")",
"dist",
".",
... | Specialized version of Distribution.fetch_build_egg
that respects respects allow_hosts and index_url. | [
"Specialized",
"version",
"of",
"Distribution",
".",
"fetch_build_egg",
"that",
"respects",
"respects",
"allow_hosts",
"and",
"index_url",
"."
] | 1bfab3a9b1cc68a3c8a2722fe1a7522ddc61ec2e | https://github.com/pytest-dev/pytest-runner/blob/1bfab3a9b1cc68a3c8a2722fe1a7522ddc61ec2e/ptr.py#L40-L72 | train | 28,849 |
PokeAPI/pokepy | pokepy/api.py | V2Client._caching | def _caching(self, disk_or_memory, cache_directory=None):
"""
Decorator that allows caching the outputs of the BaseClient get methods.
Cache can be either disk- or memory-based.
Disk-based cache is reloaded automatically between runs if the same
cache directory is specified.
Cache is kept per each unique uid.
ex:
>> client.get_pokemon(1) -> output gets cached
>> client.get_pokemon(uid=1) -> output already cached
>> client.get_pokemon(2) -> output gets cached
Parameters
----------
disk_or_memory: str
Specify if the cache is disk- or memory-based. Accepts 'disk' or 'memory'.
cache_directory: str
Specify the directory for the disk-based cache.
Optional, will chose an appropriate and platform-specific directory if not specified.
Ignored if memory-based cache is selected.
"""
if disk_or_memory not in ('disk', 'memory'):
raise ValueError('Accepted values are "disk" or "memory"')
# Because of how BaseClient get methods are generated, they don't get a proper __name__.
# As such, it is hard to generate a specific cache directory name for each get method.
# Therefore, I decided to just generate a number for each folder, starting at zero.
# The same get methods get the same number every time because their order doesn't change.
# Also, variable is incremented inside a list because nonlocals are only python 3.0 and up.
get_methods_id = [0]
def memoize(func):
_global_cache_dir = ''
if disk_or_memory == 'disk':
if cache_directory:
# Python 2 workaround
if sys.version_info[0] == 2 and not isinstance(cache_directory, str):
raise TypeError('expected str')
_global_cache_dir = os.path.join(cache_directory, 'pokepy_cache')
cache_dir = os.path.join(_global_cache_dir, str(get_methods_id[0]))
else:
_global_cache_dir = appdirs.user_cache_dir('pokepy_cache', False,
opinion=False)
cache_dir = os.path.join(_global_cache_dir, str(get_methods_id[0]))
cache = FileCache('pokepy', flag='cs', app_cache_dir=cache_dir)
get_methods_id[0] += 1
else: # 'memory'
cache = {}
_global_cache_dir = 'ram'
# global cache directory
# should only be set when setting the first get method
if not self._cache_location_global:
self._cache_location_global = _global_cache_dir
hits = [0]
misses = [0]
def cache_info():
return self._cache_info_(hits[0], misses[0], len(cache))
def cache_clear():
# global cache info
self._cache_hits_global -= hits[0]
self._cache_misses_global -= misses[0]
self._cache_len_global -= len(cache)
# local cache info
hits[0] = 0
misses[0] = 0
cache.clear() # for disk-based cache, files are deleted but not the directories
if disk_or_memory == 'disk':
cache.create() # recreate cache file handles
def cache_location():
return 'ram' if disk_or_memory == 'memory' else cache.cache_dir
@functools.wraps(func)
def memoizer(*args, **kwargs):
# arguments to the get methods can be a value or uid=value
key = str(args[1]) if len(args) > 1 else str(kwargs.get("uid"))
if key not in cache:
# local and global cache info
misses[0] += 1
self._cache_misses_global += 1
cache[key] = func(*args, **kwargs)
self._cache_len_global += 1
else:
self._cache_hits_global += 1 # global cache info
hits[0] += 1 # local cache info
return cache[key]
memoizer.cache_info = cache_info
memoizer.cache_clear = cache_clear
memoizer.cache_location = cache_location
return memoizer
return memoize | python | def _caching(self, disk_or_memory, cache_directory=None):
"""
Decorator that allows caching the outputs of the BaseClient get methods.
Cache can be either disk- or memory-based.
Disk-based cache is reloaded automatically between runs if the same
cache directory is specified.
Cache is kept per each unique uid.
ex:
>> client.get_pokemon(1) -> output gets cached
>> client.get_pokemon(uid=1) -> output already cached
>> client.get_pokemon(2) -> output gets cached
Parameters
----------
disk_or_memory: str
Specify if the cache is disk- or memory-based. Accepts 'disk' or 'memory'.
cache_directory: str
Specify the directory for the disk-based cache.
Optional, will chose an appropriate and platform-specific directory if not specified.
Ignored if memory-based cache is selected.
"""
if disk_or_memory not in ('disk', 'memory'):
raise ValueError('Accepted values are "disk" or "memory"')
# Because of how BaseClient get methods are generated, they don't get a proper __name__.
# As such, it is hard to generate a specific cache directory name for each get method.
# Therefore, I decided to just generate a number for each folder, starting at zero.
# The same get methods get the same number every time because their order doesn't change.
# Also, variable is incremented inside a list because nonlocals are only python 3.0 and up.
get_methods_id = [0]
def memoize(func):
_global_cache_dir = ''
if disk_or_memory == 'disk':
if cache_directory:
# Python 2 workaround
if sys.version_info[0] == 2 and not isinstance(cache_directory, str):
raise TypeError('expected str')
_global_cache_dir = os.path.join(cache_directory, 'pokepy_cache')
cache_dir = os.path.join(_global_cache_dir, str(get_methods_id[0]))
else:
_global_cache_dir = appdirs.user_cache_dir('pokepy_cache', False,
opinion=False)
cache_dir = os.path.join(_global_cache_dir, str(get_methods_id[0]))
cache = FileCache('pokepy', flag='cs', app_cache_dir=cache_dir)
get_methods_id[0] += 1
else: # 'memory'
cache = {}
_global_cache_dir = 'ram'
# global cache directory
# should only be set when setting the first get method
if not self._cache_location_global:
self._cache_location_global = _global_cache_dir
hits = [0]
misses = [0]
def cache_info():
return self._cache_info_(hits[0], misses[0], len(cache))
def cache_clear():
# global cache info
self._cache_hits_global -= hits[0]
self._cache_misses_global -= misses[0]
self._cache_len_global -= len(cache)
# local cache info
hits[0] = 0
misses[0] = 0
cache.clear() # for disk-based cache, files are deleted but not the directories
if disk_or_memory == 'disk':
cache.create() # recreate cache file handles
def cache_location():
return 'ram' if disk_or_memory == 'memory' else cache.cache_dir
@functools.wraps(func)
def memoizer(*args, **kwargs):
# arguments to the get methods can be a value or uid=value
key = str(args[1]) if len(args) > 1 else str(kwargs.get("uid"))
if key not in cache:
# local and global cache info
misses[0] += 1
self._cache_misses_global += 1
cache[key] = func(*args, **kwargs)
self._cache_len_global += 1
else:
self._cache_hits_global += 1 # global cache info
hits[0] += 1 # local cache info
return cache[key]
memoizer.cache_info = cache_info
memoizer.cache_clear = cache_clear
memoizer.cache_location = cache_location
return memoizer
return memoize | [
"def",
"_caching",
"(",
"self",
",",
"disk_or_memory",
",",
"cache_directory",
"=",
"None",
")",
":",
"if",
"disk_or_memory",
"not",
"in",
"(",
"'disk'",
",",
"'memory'",
")",
":",
"raise",
"ValueError",
"(",
"'Accepted values are \"disk\" or \"memory\"'",
")",
... | Decorator that allows caching the outputs of the BaseClient get methods.
Cache can be either disk- or memory-based.
Disk-based cache is reloaded automatically between runs if the same
cache directory is specified.
Cache is kept per each unique uid.
ex:
>> client.get_pokemon(1) -> output gets cached
>> client.get_pokemon(uid=1) -> output already cached
>> client.get_pokemon(2) -> output gets cached
Parameters
----------
disk_or_memory: str
Specify if the cache is disk- or memory-based. Accepts 'disk' or 'memory'.
cache_directory: str
Specify the directory for the disk-based cache.
Optional, will chose an appropriate and platform-specific directory if not specified.
Ignored if memory-based cache is selected. | [
"Decorator",
"that",
"allows",
"caching",
"the",
"outputs",
"of",
"the",
"BaseClient",
"get",
"methods",
".",
"Cache",
"can",
"be",
"either",
"disk",
"-",
"or",
"memory",
"-",
"based",
".",
"Disk",
"-",
"based",
"cache",
"is",
"reloaded",
"automatically",
... | 1154649c4d28414e487080d2601acd4912eeb8a5 | https://github.com/PokeAPI/pokepy/blob/1154649c4d28414e487080d2601acd4912eeb8a5/pokepy/api.py#L170-L272 | train | 28,850 |
PokeAPI/pokepy | pokepy/resources_v2.py | BaseResource.set_subresources | def set_subresources(self, **kwargs):
"""Same logic as the original except for the first 'if' clause."""
for attribute_name, resource in self._subresource_map.items():
sub_attr = kwargs.get(attribute_name)
if sub_attr is None:
# Attribute was not found or is null
value = None
elif isinstance(sub_attr, list):
# A list of subresources is supported
value = [resource(**x) for x in sub_attr]
else:
# So is a single resource
value = resource(**sub_attr)
setattr(self, attribute_name, value) | python | def set_subresources(self, **kwargs):
"""Same logic as the original except for the first 'if' clause."""
for attribute_name, resource in self._subresource_map.items():
sub_attr = kwargs.get(attribute_name)
if sub_attr is None:
# Attribute was not found or is null
value = None
elif isinstance(sub_attr, list):
# A list of subresources is supported
value = [resource(**x) for x in sub_attr]
else:
# So is a single resource
value = resource(**sub_attr)
setattr(self, attribute_name, value) | [
"def",
"set_subresources",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"attribute_name",
",",
"resource",
"in",
"self",
".",
"_subresource_map",
".",
"items",
"(",
")",
":",
"sub_attr",
"=",
"kwargs",
".",
"get",
"(",
"attribute_name",
")",
"i... | Same logic as the original except for the first 'if' clause. | [
"Same",
"logic",
"as",
"the",
"original",
"except",
"for",
"the",
"first",
"if",
"clause",
"."
] | 1154649c4d28414e487080d2601acd4912eeb8a5 | https://github.com/PokeAPI/pokepy/blob/1154649c4d28414e487080d2601acd4912eeb8a5/pokepy/resources_v2.py#L19-L32 | train | 28,851 |
fbradyirl/hikvision | hikvision/api.py | build_url_base | def build_url_base(host, port, is_https):
"""
Make base of url based on config
"""
base = "http"
if is_https:
base += 's'
base += "://"
base += host
if port:
base += ":"
base += str(port)
return base | python | def build_url_base(host, port, is_https):
"""
Make base of url based on config
"""
base = "http"
if is_https:
base += 's'
base += "://"
base += host
if port:
base += ":"
base += str(port)
return base | [
"def",
"build_url_base",
"(",
"host",
",",
"port",
",",
"is_https",
")",
":",
"base",
"=",
"\"http\"",
"if",
"is_https",
":",
"base",
"+=",
"'s'",
"base",
"+=",
"\"://\"",
"base",
"+=",
"host",
"if",
"port",
":",
"base",
"+=",
"\":\"",
"base",
"+=",
... | Make base of url based on config | [
"Make",
"base",
"of",
"url",
"based",
"on",
"config"
] | 3bc3b20b8f7d793cf9dd94777e4b8e82bfd4abc6 | https://github.com/fbradyirl/hikvision/blob/3bc3b20b8f7d793cf9dd94777e4b8e82bfd4abc6/hikvision/api.py#L27-L42 | train | 28,852 |
fbradyirl/hikvision | hikvision/api.py | CreateDevice.is_motion_detection_enabled | def is_motion_detection_enabled(self):
""" Get current state of Motion Detection """
response = requests.get(self.motion_url, auth=HTTPBasicAuth(
self._username, self._password))
_LOGGING.debug('Response: %s', response.text)
if response.status_code != 200:
_LOGGING.error(
"There was an error connecting to %s", self.motion_url)
_LOGGING.error("status_code %s", response.status_code)
return
try:
tree = ElementTree.fromstring(response.text)
enabled_element = tree.findall(
'.//{%s}enabled' % self._xml_namespace)
sensitivity_level_element = tree.findall(
'.//{%s}sensitivityLevel' % self._xml_namespace)
if len(enabled_element) == 0:
_LOGGING.error("Problem getting motion detection status")
return
if len(sensitivity_level_element) == 0:
_LOGGING.error("Problem getting sensitivityLevel status")
return
result = enabled_element[0].text.strip()
_LOGGING.info(
'Current motion detection state? enabled: %s', result)
if int(sensitivity_level_element[0].text) == 0:
_LOGGING.warn(
"sensitivityLevel is 0.")
sensitivity_level_element[0].text = str(
self._sensitivity_level)
_LOGGING.info(
"sensitivityLevel now set to %s", self._sensitivity_level)
if result == 'true':
# Save this for future switch off
self.xml_motion_detection_on = ElementTree.tostring(
tree, encoding=XML_ENCODING)
enabled_element[0].text = 'false'
self.xml_motion_detection_off = ElementTree.tostring(
tree, encoding=XML_ENCODING)
return True
else:
# Save this for future switch on
self.xml_motion_detection_off = ElementTree.tostring(
tree, encoding=XML_ENCODING)
enabled_element[0].text = 'true'
self.xml_motion_detection_on = ElementTree.tostring(
tree, encoding=XML_ENCODING)
return False
except AttributeError as attib_err:
_LOGGING.error(
'There was a problem parsing '
'camera motion detection state: %s', attib_err)
return | python | def is_motion_detection_enabled(self):
""" Get current state of Motion Detection """
response = requests.get(self.motion_url, auth=HTTPBasicAuth(
self._username, self._password))
_LOGGING.debug('Response: %s', response.text)
if response.status_code != 200:
_LOGGING.error(
"There was an error connecting to %s", self.motion_url)
_LOGGING.error("status_code %s", response.status_code)
return
try:
tree = ElementTree.fromstring(response.text)
enabled_element = tree.findall(
'.//{%s}enabled' % self._xml_namespace)
sensitivity_level_element = tree.findall(
'.//{%s}sensitivityLevel' % self._xml_namespace)
if len(enabled_element) == 0:
_LOGGING.error("Problem getting motion detection status")
return
if len(sensitivity_level_element) == 0:
_LOGGING.error("Problem getting sensitivityLevel status")
return
result = enabled_element[0].text.strip()
_LOGGING.info(
'Current motion detection state? enabled: %s', result)
if int(sensitivity_level_element[0].text) == 0:
_LOGGING.warn(
"sensitivityLevel is 0.")
sensitivity_level_element[0].text = str(
self._sensitivity_level)
_LOGGING.info(
"sensitivityLevel now set to %s", self._sensitivity_level)
if result == 'true':
# Save this for future switch off
self.xml_motion_detection_on = ElementTree.tostring(
tree, encoding=XML_ENCODING)
enabled_element[0].text = 'false'
self.xml_motion_detection_off = ElementTree.tostring(
tree, encoding=XML_ENCODING)
return True
else:
# Save this for future switch on
self.xml_motion_detection_off = ElementTree.tostring(
tree, encoding=XML_ENCODING)
enabled_element[0].text = 'true'
self.xml_motion_detection_on = ElementTree.tostring(
tree, encoding=XML_ENCODING)
return False
except AttributeError as attib_err:
_LOGGING.error(
'There was a problem parsing '
'camera motion detection state: %s', attib_err)
return | [
"def",
"is_motion_detection_enabled",
"(",
"self",
")",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"motion_url",
",",
"auth",
"=",
"HTTPBasicAuth",
"(",
"self",
".",
"_username",
",",
"self",
".",
"_password",
")",
")",
"_LOGGING",
"."... | Get current state of Motion Detection | [
"Get",
"current",
"state",
"of",
"Motion",
"Detection"
] | 3bc3b20b8f7d793cf9dd94777e4b8e82bfd4abc6 | https://github.com/fbradyirl/hikvision/blob/3bc3b20b8f7d793cf9dd94777e4b8e82bfd4abc6/hikvision/api.py#L165-L225 | train | 28,853 |
fbradyirl/hikvision | hikvision/api.py | CreateDevice.put_motion_detection_xml | def put_motion_detection_xml(self, xml):
""" Put request with xml Motion Detection """
_LOGGING.debug('xml:')
_LOGGING.debug("%s", xml)
headers = DEFAULT_HEADERS
headers['Content-Length'] = len(xml)
headers['Host'] = self._host
response = requests.put(self.motion_url, auth=HTTPBasicAuth(
self._username, self._password), data=xml, headers=headers)
_LOGGING.debug('request.headers:')
_LOGGING.debug('%s', response.request.headers)
_LOGGING.debug('Response:')
_LOGGING.debug('%s', response.text)
if response.status_code != 200:
_LOGGING.error(
"There was an error connecting to %s", self.motion_url)
_LOGGING.error("status_code %s", response.status_code)
return
try:
tree = ElementTree.fromstring(response.text)
enabled_element = tree.findall(
'.//{%s}statusString' % self._xml_namespace)
if len(enabled_element) == 0:
_LOGGING.error("Problem getting motion detection status")
return
if enabled_element[0].text.strip() == 'OK':
_LOGGING.info('Updated successfully')
except AttributeError as attib_err:
_LOGGING.error(
'There was a problem parsing the response: %s', attib_err)
return | python | def put_motion_detection_xml(self, xml):
""" Put request with xml Motion Detection """
_LOGGING.debug('xml:')
_LOGGING.debug("%s", xml)
headers = DEFAULT_HEADERS
headers['Content-Length'] = len(xml)
headers['Host'] = self._host
response = requests.put(self.motion_url, auth=HTTPBasicAuth(
self._username, self._password), data=xml, headers=headers)
_LOGGING.debug('request.headers:')
_LOGGING.debug('%s', response.request.headers)
_LOGGING.debug('Response:')
_LOGGING.debug('%s', response.text)
if response.status_code != 200:
_LOGGING.error(
"There was an error connecting to %s", self.motion_url)
_LOGGING.error("status_code %s", response.status_code)
return
try:
tree = ElementTree.fromstring(response.text)
enabled_element = tree.findall(
'.//{%s}statusString' % self._xml_namespace)
if len(enabled_element) == 0:
_LOGGING.error("Problem getting motion detection status")
return
if enabled_element[0].text.strip() == 'OK':
_LOGGING.info('Updated successfully')
except AttributeError as attib_err:
_LOGGING.error(
'There was a problem parsing the response: %s', attib_err)
return | [
"def",
"put_motion_detection_xml",
"(",
"self",
",",
"xml",
")",
":",
"_LOGGING",
".",
"debug",
"(",
"'xml:'",
")",
"_LOGGING",
".",
"debug",
"(",
"\"%s\"",
",",
"xml",
")",
"headers",
"=",
"DEFAULT_HEADERS",
"headers",
"[",
"'Content-Length'",
"]",
"=",
"... | Put request with xml Motion Detection | [
"Put",
"request",
"with",
"xml",
"Motion",
"Detection"
] | 3bc3b20b8f7d793cf9dd94777e4b8e82bfd4abc6 | https://github.com/fbradyirl/hikvision/blob/3bc3b20b8f7d793cf9dd94777e4b8e82bfd4abc6/hikvision/api.py#L237-L273 | train | 28,854 |
marrow/mailer | marrow/mailer/validator.py | DomainValidator._apply_common_rules | def _apply_common_rules(self, part, maxlength):
"""This method contains the rules that must be applied to both the
domain and the local part of the e-mail address.
"""
part = part.strip()
if self.fix:
part = part.strip('.')
if not part:
return part, 'It cannot be empty.'
if len(part) > maxlength:
return part, 'It cannot be longer than %i chars.' % maxlength
if part[0] == '.':
return part, 'It cannot start with a dot.'
if part[-1] == '.':
return part, 'It cannot end with a dot.'
if '..' in part:
return part, 'It cannot contain consecutive dots.'
return part, '' | python | def _apply_common_rules(self, part, maxlength):
"""This method contains the rules that must be applied to both the
domain and the local part of the e-mail address.
"""
part = part.strip()
if self.fix:
part = part.strip('.')
if not part:
return part, 'It cannot be empty.'
if len(part) > maxlength:
return part, 'It cannot be longer than %i chars.' % maxlength
if part[0] == '.':
return part, 'It cannot start with a dot.'
if part[-1] == '.':
return part, 'It cannot end with a dot.'
if '..' in part:
return part, 'It cannot contain consecutive dots.'
return part, '' | [
"def",
"_apply_common_rules",
"(",
"self",
",",
"part",
",",
"maxlength",
")",
":",
"part",
"=",
"part",
".",
"strip",
"(",
")",
"if",
"self",
".",
"fix",
":",
"part",
"=",
"part",
".",
"strip",
"(",
"'.'",
")",
"if",
"not",
"part",
":",
"return",
... | This method contains the rules that must be applied to both the
domain and the local part of the e-mail address. | [
"This",
"method",
"contains",
"the",
"rules",
"that",
"must",
"be",
"applied",
"to",
"both",
"the",
"domain",
"and",
"the",
"local",
"part",
"of",
"the",
"e",
"-",
"mail",
"address",
"."
] | 3995ef98a3f7feb75f1aeb652e6afe40a5c94def | https://github.com/marrow/mailer/blob/3995ef98a3f7feb75f1aeb652e6afe40a5c94def/marrow/mailer/validator.py#L160-L184 | train | 28,855 |
marrow/mailer | marrow/mailer/message.py | Message.mime | def mime(self):
"""Produce the final MIME message."""
author = self.author
sender = self.sender
if not author:
raise ValueError("You must specify an author.")
if not self.subject:
raise ValueError("You must specify a subject.")
if len(self.recipients) == 0:
raise ValueError("You must specify at least one recipient.")
if not self.plain:
raise ValueError("You must provide plain text content.")
# DISCUSS: Take the first author, or raise this error?
# if len(author) > 1 and len(sender) == 0:
# raise ValueError('If there are multiple authors of message, you must specify a sender!')
# if len(sender) > 1:
# raise ValueError('You must not specify more than one sender!')
if not self._dirty and self._processed:
return self._mime
self._processed = False
plain = MIMEText(self._callable(self.plain), 'plain', self.encoding)
rich = None
if self.rich:
rich = MIMEText(self._callable(self.rich), 'html', self.encoding)
message = self._mime_document(plain, rich)
headers = self._build_header_list(author, sender)
self._add_headers_to_message(message, headers)
self._mime = message
self._processed = True
self._dirty = False
return message | python | def mime(self):
"""Produce the final MIME message."""
author = self.author
sender = self.sender
if not author:
raise ValueError("You must specify an author.")
if not self.subject:
raise ValueError("You must specify a subject.")
if len(self.recipients) == 0:
raise ValueError("You must specify at least one recipient.")
if not self.plain:
raise ValueError("You must provide plain text content.")
# DISCUSS: Take the first author, or raise this error?
# if len(author) > 1 and len(sender) == 0:
# raise ValueError('If there are multiple authors of message, you must specify a sender!')
# if len(sender) > 1:
# raise ValueError('You must not specify more than one sender!')
if not self._dirty and self._processed:
return self._mime
self._processed = False
plain = MIMEText(self._callable(self.plain), 'plain', self.encoding)
rich = None
if self.rich:
rich = MIMEText(self._callable(self.rich), 'html', self.encoding)
message = self._mime_document(plain, rich)
headers = self._build_header_list(author, sender)
self._add_headers_to_message(message, headers)
self._mime = message
self._processed = True
self._dirty = False
return message | [
"def",
"mime",
"(",
"self",
")",
":",
"author",
"=",
"self",
".",
"author",
"sender",
"=",
"self",
".",
"sender",
"if",
"not",
"author",
":",
"raise",
"ValueError",
"(",
"\"You must specify an author.\"",
")",
"if",
"not",
"self",
".",
"subject",
":",
"r... | Produce the final MIME message. | [
"Produce",
"the",
"final",
"MIME",
"message",
"."
] | 3995ef98a3f7feb75f1aeb652e6afe40a5c94def | https://github.com/marrow/mailer/blob/3995ef98a3f7feb75f1aeb652e6afe40a5c94def/marrow/mailer/message.py#L209-L252 | train | 28,856 |
marrow/mailer | marrow/mailer/message.py | Message.attach | def attach(self, name, data=None, maintype=None, subtype=None,
inline=False, filename=None, filename_charset='', filename_language='',
encoding=None):
"""Attach a file to this message.
:param name: Path to the file to attach if data is None, or the name
of the file if the ``data`` argument is given
:param data: Contents of the file to attach, or None if the data is to
be read from the file pointed to by the ``name`` argument
:type data: bytes or a file-like object
:param maintype: First part of the MIME type of the file -- will be
automatically guessed if not given
:param subtype: Second part of the MIME type of the file -- will be
automatically guessed if not given
:param inline: Whether to set the Content-Disposition for the file to
"inline" (True) or "attachment" (False)
:param filename: The file name of the attached file as seen
by the user in his/her mail client.
:param filename_charset: Charset used for the filename paramenter. Allows for
attachment names with characters from UTF-8 or Latin 1. See RFC 2231.
:param filename_language: Used to specify what language the filename is in. See RFC 2231.
:param encoding: Value of the Content-Encoding MIME header (e.g. "gzip"
in case of .tar.gz, but usually empty)
"""
self._dirty = True
if not maintype:
maintype, guessed_encoding = guess_type(name)
encoding = encoding or guessed_encoding
if not maintype:
maintype, subtype = 'application', 'octet-stream'
else:
maintype, _, subtype = maintype.partition('/')
part = MIMENonMultipart(maintype, subtype)
part.add_header('Content-Transfer-Encoding', 'base64')
if encoding:
part.add_header('Content-Encoding', encoding)
if data is None:
with open(name, 'rb') as fp:
value = fp.read()
name = os.path.basename(name)
elif isinstance(data, bytes):
value = data
elif hasattr(data, 'read'):
value = data.read()
else:
raise TypeError("Unable to read attachment contents")
part.set_payload(base64.encodestring(value))
if not filename:
filename = name
filename = os.path.basename(filename)
if filename_charset or filename_language:
if not filename_charset:
filename_charset = 'utf-8'
# See https://docs.python.org/2/library/email.message.html#email.message.Message.add_header
# for more information.
# add_header() in the email module expects its arguments to be ASCII strings. Go ahead and handle
# the case where these arguments come in as unicode strings, since encoding ASCII strings
# as UTF-8 can't hurt.
if sys.version_info < (3, 0):
filename=(filename_charset.encode('utf-8'), filename_language.encode('utf-8'), filename.encode('utf-8'))
else:
filename=(filename_charset, filename_language, filename)
if inline:
if sys.version_info < (3, 0):
part.add_header('Content-Disposition'.encode('utf-8'), 'inline'.encode('utf-8'), filename=filename)
part.add_header('Content-ID'.encode('utf-8'), '<%s>'.encode('utf-8') % filename)
else:
part.add_header('Content-Disposition', 'inline', filename=filename)
part.add_header('Content-ID', '<%s>' % filename)
self.embedded.append(part)
else:
if sys.version_info < (3, 0):
part.add_header('Content-Disposition'.encode('utf-8'), 'attachment'.encode('utf-8'), filename=filename)
else:
part.add_header('Content-Disposition', 'attachment', filename=filename)
self.attachments.append(part) | python | def attach(self, name, data=None, maintype=None, subtype=None,
inline=False, filename=None, filename_charset='', filename_language='',
encoding=None):
"""Attach a file to this message.
:param name: Path to the file to attach if data is None, or the name
of the file if the ``data`` argument is given
:param data: Contents of the file to attach, or None if the data is to
be read from the file pointed to by the ``name`` argument
:type data: bytes or a file-like object
:param maintype: First part of the MIME type of the file -- will be
automatically guessed if not given
:param subtype: Second part of the MIME type of the file -- will be
automatically guessed if not given
:param inline: Whether to set the Content-Disposition for the file to
"inline" (True) or "attachment" (False)
:param filename: The file name of the attached file as seen
by the user in his/her mail client.
:param filename_charset: Charset used for the filename paramenter. Allows for
attachment names with characters from UTF-8 or Latin 1. See RFC 2231.
:param filename_language: Used to specify what language the filename is in. See RFC 2231.
:param encoding: Value of the Content-Encoding MIME header (e.g. "gzip"
in case of .tar.gz, but usually empty)
"""
self._dirty = True
if not maintype:
maintype, guessed_encoding = guess_type(name)
encoding = encoding or guessed_encoding
if not maintype:
maintype, subtype = 'application', 'octet-stream'
else:
maintype, _, subtype = maintype.partition('/')
part = MIMENonMultipart(maintype, subtype)
part.add_header('Content-Transfer-Encoding', 'base64')
if encoding:
part.add_header('Content-Encoding', encoding)
if data is None:
with open(name, 'rb') as fp:
value = fp.read()
name = os.path.basename(name)
elif isinstance(data, bytes):
value = data
elif hasattr(data, 'read'):
value = data.read()
else:
raise TypeError("Unable to read attachment contents")
part.set_payload(base64.encodestring(value))
if not filename:
filename = name
filename = os.path.basename(filename)
if filename_charset or filename_language:
if not filename_charset:
filename_charset = 'utf-8'
# See https://docs.python.org/2/library/email.message.html#email.message.Message.add_header
# for more information.
# add_header() in the email module expects its arguments to be ASCII strings. Go ahead and handle
# the case where these arguments come in as unicode strings, since encoding ASCII strings
# as UTF-8 can't hurt.
if sys.version_info < (3, 0):
filename=(filename_charset.encode('utf-8'), filename_language.encode('utf-8'), filename.encode('utf-8'))
else:
filename=(filename_charset, filename_language, filename)
if inline:
if sys.version_info < (3, 0):
part.add_header('Content-Disposition'.encode('utf-8'), 'inline'.encode('utf-8'), filename=filename)
part.add_header('Content-ID'.encode('utf-8'), '<%s>'.encode('utf-8') % filename)
else:
part.add_header('Content-Disposition', 'inline', filename=filename)
part.add_header('Content-ID', '<%s>' % filename)
self.embedded.append(part)
else:
if sys.version_info < (3, 0):
part.add_header('Content-Disposition'.encode('utf-8'), 'attachment'.encode('utf-8'), filename=filename)
else:
part.add_header('Content-Disposition', 'attachment', filename=filename)
self.attachments.append(part) | [
"def",
"attach",
"(",
"self",
",",
"name",
",",
"data",
"=",
"None",
",",
"maintype",
"=",
"None",
",",
"subtype",
"=",
"None",
",",
"inline",
"=",
"False",
",",
"filename",
"=",
"None",
",",
"filename_charset",
"=",
"''",
",",
"filename_language",
"="... | Attach a file to this message.
:param name: Path to the file to attach if data is None, or the name
of the file if the ``data`` argument is given
:param data: Contents of the file to attach, or None if the data is to
be read from the file pointed to by the ``name`` argument
:type data: bytes or a file-like object
:param maintype: First part of the MIME type of the file -- will be
automatically guessed if not given
:param subtype: Second part of the MIME type of the file -- will be
automatically guessed if not given
:param inline: Whether to set the Content-Disposition for the file to
"inline" (True) or "attachment" (False)
:param filename: The file name of the attached file as seen
by the user in his/her mail client.
:param filename_charset: Charset used for the filename paramenter. Allows for
attachment names with characters from UTF-8 or Latin 1. See RFC 2231.
:param filename_language: Used to specify what language the filename is in. See RFC 2231.
:param encoding: Value of the Content-Encoding MIME header (e.g. "gzip"
in case of .tar.gz, but usually empty) | [
"Attach",
"a",
"file",
"to",
"this",
"message",
"."
] | 3995ef98a3f7feb75f1aeb652e6afe40a5c94def | https://github.com/marrow/mailer/blob/3995ef98a3f7feb75f1aeb652e6afe40a5c94def/marrow/mailer/message.py#L254-L337 | train | 28,857 |
marrow/mailer | marrow/mailer/message.py | Message.embed | def embed(self, name, data=None):
"""Attach an image file and prepare for HTML embedding.
This method should only be used to embed images.
:param name: Path to the image to embed if data is None, or the name
of the file if the ``data`` argument is given
:param data: Contents of the image to embed, or None if the data is to
be read from the file pointed to by the ``name`` argument
"""
if data is None:
with open(name, 'rb') as fp:
data = fp.read()
name = os.path.basename(name)
elif isinstance(data, bytes):
pass
elif hasattr(data, 'read'):
data = data.read()
else:
raise TypeError("Unable to read image contents")
subtype = imghdr.what(None, data)
self.attach(name, data, 'image', subtype, True) | python | def embed(self, name, data=None):
"""Attach an image file and prepare for HTML embedding.
This method should only be used to embed images.
:param name: Path to the image to embed if data is None, or the name
of the file if the ``data`` argument is given
:param data: Contents of the image to embed, or None if the data is to
be read from the file pointed to by the ``name`` argument
"""
if data is None:
with open(name, 'rb') as fp:
data = fp.read()
name = os.path.basename(name)
elif isinstance(data, bytes):
pass
elif hasattr(data, 'read'):
data = data.read()
else:
raise TypeError("Unable to read image contents")
subtype = imghdr.what(None, data)
self.attach(name, data, 'image', subtype, True) | [
"def",
"embed",
"(",
"self",
",",
"name",
",",
"data",
"=",
"None",
")",
":",
"if",
"data",
"is",
"None",
":",
"with",
"open",
"(",
"name",
",",
"'rb'",
")",
"as",
"fp",
":",
"data",
"=",
"fp",
".",
"read",
"(",
")",
"name",
"=",
"os",
".",
... | Attach an image file and prepare for HTML embedding.
This method should only be used to embed images.
:param name: Path to the image to embed if data is None, or the name
of the file if the ``data`` argument is given
:param data: Contents of the image to embed, or None if the data is to
be read from the file pointed to by the ``name`` argument | [
"Attach",
"an",
"image",
"file",
"and",
"prepare",
"for",
"HTML",
"embedding",
"."
] | 3995ef98a3f7feb75f1aeb652e6afe40a5c94def | https://github.com/marrow/mailer/blob/3995ef98a3f7feb75f1aeb652e6afe40a5c94def/marrow/mailer/message.py#L339-L361 | train | 28,858 |
marrow/mailer | marrow/mailer/transport/mock.py | MockTransport.deliver | def deliver(self, message):
"""Concrete message delivery."""
config = self.config
success = config.success
failure = config.failure
exhaustion = config.exhaustion
if getattr(message, 'die', False):
1/0
if failure:
chance = random.randint(0,100001) / 100000.0
if chance < failure:
raise TransportFailedException("Mock failure.")
if exhaustion:
chance = random.randint(0,100001) / 100000.0
if chance < exhaustion:
raise TransportExhaustedException("Mock exhaustion.")
if success == 1.0:
return True
chance = random.randint(0,100001) / 100000.0
if chance <= success:
return True
return False | python | def deliver(self, message):
"""Concrete message delivery."""
config = self.config
success = config.success
failure = config.failure
exhaustion = config.exhaustion
if getattr(message, 'die', False):
1/0
if failure:
chance = random.randint(0,100001) / 100000.0
if chance < failure:
raise TransportFailedException("Mock failure.")
if exhaustion:
chance = random.randint(0,100001) / 100000.0
if chance < exhaustion:
raise TransportExhaustedException("Mock exhaustion.")
if success == 1.0:
return True
chance = random.randint(0,100001) / 100000.0
if chance <= success:
return True
return False | [
"def",
"deliver",
"(",
"self",
",",
"message",
")",
":",
"config",
"=",
"self",
".",
"config",
"success",
"=",
"config",
".",
"success",
"failure",
"=",
"config",
".",
"failure",
"exhaustion",
"=",
"config",
".",
"exhaustion",
"if",
"getattr",
"(",
"mess... | Concrete message delivery. | [
"Concrete",
"message",
"delivery",
"."
] | 3995ef98a3f7feb75f1aeb652e6afe40a5c94def | https://github.com/marrow/mailer/blob/3995ef98a3f7feb75f1aeb652e6afe40a5c94def/marrow/mailer/transport/mock.py#L40-L68 | train | 28,859 |
marrow/mailer | marrow/mailer/manager/immediate.py | ImmediateManager.startup | def startup(self):
"""Perform startup actions.
This just chains down to the transport layer.
"""
log.info("Immediate delivery manager starting.")
log.debug("Initializing transport queue.")
self.transport.startup()
log.info("Immediate delivery manager started.") | python | def startup(self):
"""Perform startup actions.
This just chains down to the transport layer.
"""
log.info("Immediate delivery manager starting.")
log.debug("Initializing transport queue.")
self.transport.startup()
log.info("Immediate delivery manager started.") | [
"def",
"startup",
"(",
"self",
")",
":",
"log",
".",
"info",
"(",
"\"Immediate delivery manager starting.\"",
")",
"log",
".",
"debug",
"(",
"\"Initializing transport queue.\"",
")",
"self",
".",
"transport",
".",
"startup",
"(",
")",
"log",
".",
"info",
"(",
... | Perform startup actions.
This just chains down to the transport layer. | [
"Perform",
"startup",
"actions",
".",
"This",
"just",
"chains",
"down",
"to",
"the",
"transport",
"layer",
"."
] | 3995ef98a3f7feb75f1aeb652e6afe40a5c94def | https://github.com/marrow/mailer/blob/3995ef98a3f7feb75f1aeb652e6afe40a5c94def/marrow/mailer/manager/immediate.py#L24-L35 | train | 28,860 |
marrow/mailer | marrow/mailer/address.py | AddressList.string_addresses | def string_addresses(self, encoding=None):
"""Return a list of string representations of the addresses suitable
for usage in an SMTP transaction."""
if not encoding:
encoding = self.encoding
# We need the punycode goodness.
return [Address(i.address).encode(encoding).decode(encoding) for i in self] | python | def string_addresses(self, encoding=None):
"""Return a list of string representations of the addresses suitable
for usage in an SMTP transaction."""
if not encoding:
encoding = self.encoding
# We need the punycode goodness.
return [Address(i.address).encode(encoding).decode(encoding) for i in self] | [
"def",
"string_addresses",
"(",
"self",
",",
"encoding",
"=",
"None",
")",
":",
"if",
"not",
"encoding",
":",
"encoding",
"=",
"self",
".",
"encoding",
"# We need the punycode goodness.",
"return",
"[",
"Address",
"(",
"i",
".",
"address",
")",
".",
"encode"... | Return a list of string representations of the addresses suitable
for usage in an SMTP transaction. | [
"Return",
"a",
"list",
"of",
"string",
"representations",
"of",
"the",
"addresses",
"suitable",
"for",
"usage",
"in",
"an",
"SMTP",
"transaction",
"."
] | 3995ef98a3f7feb75f1aeb652e6afe40a5c94def | https://github.com/marrow/mailer/blob/3995ef98a3f7feb75f1aeb652e6afe40a5c94def/marrow/mailer/address.py#L193-L201 | train | 28,861 |
learningequality/ricecooker | ricecooker/sushi_bar_client.py | ReconnectingWebSocket.run | def run(self):
"""
If the connection drops, then run_forever will terminate and a
reconnection attempt will be made.
"""
while True:
self.connect_lock.acquire()
if self.stopped():
return
self.__connect()
self.connect_lock.release()
self.ws.run_forever() | python | def run(self):
"""
If the connection drops, then run_forever will terminate and a
reconnection attempt will be made.
"""
while True:
self.connect_lock.acquire()
if self.stopped():
return
self.__connect()
self.connect_lock.release()
self.ws.run_forever() | [
"def",
"run",
"(",
"self",
")",
":",
"while",
"True",
":",
"self",
".",
"connect_lock",
".",
"acquire",
"(",
")",
"if",
"self",
".",
"stopped",
"(",
")",
":",
"return",
"self",
".",
"__connect",
"(",
")",
"self",
".",
"connect_lock",
".",
"release",
... | If the connection drops, then run_forever will terminate and a
reconnection attempt will be made. | [
"If",
"the",
"connection",
"drops",
"then",
"run_forever",
"will",
"terminate",
"and",
"a",
"reconnection",
"attempt",
"will",
"be",
"made",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/sushi_bar_client.py#L38-L49 | train | 28,862 |
learningequality/ricecooker | ricecooker/sushi_bar_client.py | ReconnectingWebSocket.send | def send(self, data):
"""
This method keeps trying to send a message relying on the run method
to reopen the websocket in case it was closed.
"""
while not self.stopped():
try:
self.ws.send(data)
return
except websocket.WebSocketConnectionClosedException:
# config.LOGGER.debug('WebSocket closed, retrying send.') # TODO(investigate infinite loop)
time.sleep(0.1) | python | def send(self, data):
"""
This method keeps trying to send a message relying on the run method
to reopen the websocket in case it was closed.
"""
while not self.stopped():
try:
self.ws.send(data)
return
except websocket.WebSocketConnectionClosedException:
# config.LOGGER.debug('WebSocket closed, retrying send.') # TODO(investigate infinite loop)
time.sleep(0.1) | [
"def",
"send",
"(",
"self",
",",
"data",
")",
":",
"while",
"not",
"self",
".",
"stopped",
"(",
")",
":",
"try",
":",
"self",
".",
"ws",
".",
"send",
"(",
"data",
")",
"return",
"except",
"websocket",
".",
"WebSocketConnectionClosedException",
":",
"# ... | This method keeps trying to send a message relying on the run method
to reopen the websocket in case it was closed. | [
"This",
"method",
"keeps",
"trying",
"to",
"send",
"a",
"message",
"relying",
"on",
"the",
"run",
"method",
"to",
"reopen",
"the",
"websocket",
"in",
"case",
"it",
"was",
"closed",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/sushi_bar_client.py#L54-L65 | train | 28,863 |
learningequality/ricecooker | ricecooker/sushi_bar_client.py | SushiBarClient.__create_channel_run | def __create_channel_run(self, channel, username, token):
"""Sends a post request to create the channel run."""
data = {
'channel_id': channel.get_node_id().hex,
'chef_name': self.__get_chef_name(),
'ricecooker_version': __version__,
'started_by_user': username,
'started_by_user_token': token,
'content_server': config.DOMAIN,
}
try:
response = requests.post(
config.sushi_bar_channel_runs_url(),
data=data,
auth=AUTH)
response.raise_for_status()
return response.json()['run_id']
except Exception as e:
config.LOGGER.error('Error channel run: %s' % e)
return None | python | def __create_channel_run(self, channel, username, token):
"""Sends a post request to create the channel run."""
data = {
'channel_id': channel.get_node_id().hex,
'chef_name': self.__get_chef_name(),
'ricecooker_version': __version__,
'started_by_user': username,
'started_by_user_token': token,
'content_server': config.DOMAIN,
}
try:
response = requests.post(
config.sushi_bar_channel_runs_url(),
data=data,
auth=AUTH)
response.raise_for_status()
return response.json()['run_id']
except Exception as e:
config.LOGGER.error('Error channel run: %s' % e)
return None | [
"def",
"__create_channel_run",
"(",
"self",
",",
"channel",
",",
"username",
",",
"token",
")",
":",
"data",
"=",
"{",
"'channel_id'",
":",
"channel",
".",
"get_node_id",
"(",
")",
".",
"hex",
",",
"'chef_name'",
":",
"self",
".",
"__get_chef_name",
"(",
... | Sends a post request to create the channel run. | [
"Sends",
"a",
"post",
"request",
"to",
"create",
"the",
"channel",
"run",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/sushi_bar_client.py#L140-L159 | train | 28,864 |
learningequality/ricecooker | ricecooker/sushi_bar_client.py | LocalControlSocket.run | def run(self):
"""
This override threading.Thread to open socket and wait for messages.
"""
while True:
self.open_lock.acquire()
if self.stopped():
return
self.__open()
self.open_lock.release() | python | def run(self):
"""
This override threading.Thread to open socket and wait for messages.
"""
while True:
self.open_lock.acquire()
if self.stopped():
return
self.__open()
self.open_lock.release() | [
"def",
"run",
"(",
"self",
")",
":",
"while",
"True",
":",
"self",
".",
"open_lock",
".",
"acquire",
"(",
")",
"if",
"self",
".",
"stopped",
"(",
")",
":",
"return",
"self",
".",
"__open",
"(",
")",
"self",
".",
"open_lock",
".",
"release",
"(",
... | This override threading.Thread to open socket and wait for messages. | [
"This",
"override",
"threading",
".",
"Thread",
"to",
"open",
"socket",
"and",
"wait",
"for",
"messages",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/sushi_bar_client.py#L387-L396 | train | 28,865 |
learningequality/ricecooker | ricecooker/utils/linecook.py | filter_filenames | def filter_filenames(filenames):
"""
Skip files with extentions in `FILE_EXCLUDE_EXTENTIONS` and filenames that
contain `FILE_SKIP_PATTENRS`.
"""
filenames_cleaned = []
for filename in filenames:
keep = True
for pattern in FILE_EXCLUDE_EXTENTIONS:
if filename.endswith(pattern):
keep = False
for pattern in FILE_SKIP_PATTENRS: # This will reject exercises...
if pattern in filename:
keep = False
if keep:
filenames_cleaned.append(filename)
return filenames_cleaned | python | def filter_filenames(filenames):
"""
Skip files with extentions in `FILE_EXCLUDE_EXTENTIONS` and filenames that
contain `FILE_SKIP_PATTENRS`.
"""
filenames_cleaned = []
for filename in filenames:
keep = True
for pattern in FILE_EXCLUDE_EXTENTIONS:
if filename.endswith(pattern):
keep = False
for pattern in FILE_SKIP_PATTENRS: # This will reject exercises...
if pattern in filename:
keep = False
if keep:
filenames_cleaned.append(filename)
return filenames_cleaned | [
"def",
"filter_filenames",
"(",
"filenames",
")",
":",
"filenames_cleaned",
"=",
"[",
"]",
"for",
"filename",
"in",
"filenames",
":",
"keep",
"=",
"True",
"for",
"pattern",
"in",
"FILE_EXCLUDE_EXTENTIONS",
":",
"if",
"filename",
".",
"endswith",
"(",
"pattern"... | Skip files with extentions in `FILE_EXCLUDE_EXTENTIONS` and filenames that
contain `FILE_SKIP_PATTENRS`. | [
"Skip",
"files",
"with",
"extentions",
"in",
"FILE_EXCLUDE_EXTENTIONS",
"and",
"filenames",
"that",
"contain",
"FILE_SKIP_PATTENRS",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/linecook.py#L83-L99 | train | 28,866 |
learningequality/ricecooker | ricecooker/utils/linecook.py | filter_thumbnail_files | def filter_thumbnail_files(chan_path, filenames, metadata_provider):
"""
We don't want to create `ContentNode` from thumbnail files.
"""
thumbnail_files_to_skip = metadata_provider.get_thumbnail_paths()
filenames_cleaned = []
for filename in filenames:
keep = True
chan_filepath = os.path.join(chan_path, filename)
chan_filepath_tuple = path_to_tuple(chan_filepath)
if chan_filepath_tuple in thumbnail_files_to_skip:
keep = False
if keep:
filenames_cleaned.append(filename)
return filenames_cleaned | python | def filter_thumbnail_files(chan_path, filenames, metadata_provider):
"""
We don't want to create `ContentNode` from thumbnail files.
"""
thumbnail_files_to_skip = metadata_provider.get_thumbnail_paths()
filenames_cleaned = []
for filename in filenames:
keep = True
chan_filepath = os.path.join(chan_path, filename)
chan_filepath_tuple = path_to_tuple(chan_filepath)
if chan_filepath_tuple in thumbnail_files_to_skip:
keep = False
if keep:
filenames_cleaned.append(filename)
return filenames_cleaned | [
"def",
"filter_thumbnail_files",
"(",
"chan_path",
",",
"filenames",
",",
"metadata_provider",
")",
":",
"thumbnail_files_to_skip",
"=",
"metadata_provider",
".",
"get_thumbnail_paths",
"(",
")",
"filenames_cleaned",
"=",
"[",
"]",
"for",
"filename",
"in",
"filenames"... | We don't want to create `ContentNode` from thumbnail files. | [
"We",
"don",
"t",
"want",
"to",
"create",
"ContentNode",
"from",
"thumbnail",
"files",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/linecook.py#L101-L115 | train | 28,867 |
learningequality/ricecooker | ricecooker/utils/linecook.py | keep_folder | def keep_folder(raw_path):
"""
Keep only folders that don't contain patterns in `DIR_EXCLUDE_PATTERNS`.
"""
keep = True
for pattern in DIR_EXCLUDE_PATTERNS:
if pattern in raw_path:
LOGGER.debug('rejecting', raw_path)
keep = False
return keep | python | def keep_folder(raw_path):
"""
Keep only folders that don't contain patterns in `DIR_EXCLUDE_PATTERNS`.
"""
keep = True
for pattern in DIR_EXCLUDE_PATTERNS:
if pattern in raw_path:
LOGGER.debug('rejecting', raw_path)
keep = False
return keep | [
"def",
"keep_folder",
"(",
"raw_path",
")",
":",
"keep",
"=",
"True",
"for",
"pattern",
"in",
"DIR_EXCLUDE_PATTERNS",
":",
"if",
"pattern",
"in",
"raw_path",
":",
"LOGGER",
".",
"debug",
"(",
"'rejecting'",
",",
"raw_path",
")",
"keep",
"=",
"False",
"retu... | Keep only folders that don't contain patterns in `DIR_EXCLUDE_PATTERNS`. | [
"Keep",
"only",
"folders",
"that",
"don",
"t",
"contain",
"patterns",
"in",
"DIR_EXCLUDE_PATTERNS",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/linecook.py#L117-L126 | train | 28,868 |
learningequality/ricecooker | ricecooker/utils/linecook.py | process_folder | def process_folder(channel, rel_path, filenames, metadata_provider):
"""
Create `ContentNode`s from each file in this folder and the node to `channel`
under the path `rel_path`.
"""
LOGGER.debug('IN process_folder ' + str(rel_path) + ' ' + str(filenames))
if not keep_folder(rel_path):
return
chan_path = chan_path_from_rel_path(rel_path, metadata_provider.channeldir)
chan_path_tuple = path_to_tuple(chan_path)
chan_path_list = list(chan_path_tuple)
LOGGER.debug('chan_path_list=' + str(chan_path_list))
# FIND THE CONTAINING NODE (channel or topic)
if len(chan_path_list) == 1:
# CASE CHANNEL ROOT: `rel_path` points to `channeldir`
# No need to create a topic node here since channel already exists
containing_node = channel # attach content nodes in filenames directly to channel
else:
# CASE TOPIC FOLDER: `rel_path` points to a channelroot subfolder (a.k.a TopicNode)
dirname = chan_path_list.pop() # name of the folder (used as ID for internal lookup)
topic_parent_node = get_topic_for_path(channel, chan_path_list)
# read topic metadata to get title and description for the TopicNode
topic_metadata = metadata_provider.get(chan_path_tuple)
thumbnail_chan_path = topic_metadata.get('thumbnail_chan_path', None)
if thumbnail_chan_path:
thumbnail_rel_path = rel_path_from_chan_path(thumbnail_chan_path, metadata_provider.channeldir)
else:
thumbnail_rel_path = None
# create TopicNode for this folder
topic = dict(
kind=TOPIC_NODE,
dirname=dirname,
source_id='sourceid:' + rel_path,
title=topic_metadata.get('title', dirname),
description=topic_metadata.get('description', None),
author=topic_metadata.get('author', None),
language=topic_metadata.get('language', None),
license=topic_metadata.get('license', None),
thumbnail=thumbnail_rel_path,
children=[],
)
topic_parent_node['children'].append(topic)
containing_node = topic # attach content nodes in filenames to the newly created topic
# filter filenames
filenames_cleaned = filter_filenames(filenames)
filenames_cleaned2 = filter_thumbnail_files(chan_path, filenames_cleaned, metadata_provider)
# PROCESS FILES
for filename in filenames_cleaned2:
chan_filepath = os.path.join(chan_path, filename)
chan_filepath_tuple = path_to_tuple(chan_filepath)
metadata = metadata_provider.get(chan_filepath_tuple)
node = make_content_node(metadata_provider.channeldir, rel_path, filename, metadata)
containing_node['children'].append(node) | python | def process_folder(channel, rel_path, filenames, metadata_provider):
"""
Create `ContentNode`s from each file in this folder and the node to `channel`
under the path `rel_path`.
"""
LOGGER.debug('IN process_folder ' + str(rel_path) + ' ' + str(filenames))
if not keep_folder(rel_path):
return
chan_path = chan_path_from_rel_path(rel_path, metadata_provider.channeldir)
chan_path_tuple = path_to_tuple(chan_path)
chan_path_list = list(chan_path_tuple)
LOGGER.debug('chan_path_list=' + str(chan_path_list))
# FIND THE CONTAINING NODE (channel or topic)
if len(chan_path_list) == 1:
# CASE CHANNEL ROOT: `rel_path` points to `channeldir`
# No need to create a topic node here since channel already exists
containing_node = channel # attach content nodes in filenames directly to channel
else:
# CASE TOPIC FOLDER: `rel_path` points to a channelroot subfolder (a.k.a TopicNode)
dirname = chan_path_list.pop() # name of the folder (used as ID for internal lookup)
topic_parent_node = get_topic_for_path(channel, chan_path_list)
# read topic metadata to get title and description for the TopicNode
topic_metadata = metadata_provider.get(chan_path_tuple)
thumbnail_chan_path = topic_metadata.get('thumbnail_chan_path', None)
if thumbnail_chan_path:
thumbnail_rel_path = rel_path_from_chan_path(thumbnail_chan_path, metadata_provider.channeldir)
else:
thumbnail_rel_path = None
# create TopicNode for this folder
topic = dict(
kind=TOPIC_NODE,
dirname=dirname,
source_id='sourceid:' + rel_path,
title=topic_metadata.get('title', dirname),
description=topic_metadata.get('description', None),
author=topic_metadata.get('author', None),
language=topic_metadata.get('language', None),
license=topic_metadata.get('license', None),
thumbnail=thumbnail_rel_path,
children=[],
)
topic_parent_node['children'].append(topic)
containing_node = topic # attach content nodes in filenames to the newly created topic
# filter filenames
filenames_cleaned = filter_filenames(filenames)
filenames_cleaned2 = filter_thumbnail_files(chan_path, filenames_cleaned, metadata_provider)
# PROCESS FILES
for filename in filenames_cleaned2:
chan_filepath = os.path.join(chan_path, filename)
chan_filepath_tuple = path_to_tuple(chan_filepath)
metadata = metadata_provider.get(chan_filepath_tuple)
node = make_content_node(metadata_provider.channeldir, rel_path, filename, metadata)
containing_node['children'].append(node) | [
"def",
"process_folder",
"(",
"channel",
",",
"rel_path",
",",
"filenames",
",",
"metadata_provider",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'IN process_folder '",
"+",
"str",
"(",
"rel_path",
")",
"+",
"' '",
"+",
"str",
"(",
"filenames",
")",
")",
"... | Create `ContentNode`s from each file in this folder and the node to `channel`
under the path `rel_path`. | [
"Create",
"ContentNode",
"s",
"from",
"each",
"file",
"in",
"this",
"folder",
"and",
"the",
"node",
"to",
"channel",
"under",
"the",
"path",
"rel_path",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/linecook.py#L128-L186 | train | 28,869 |
learningequality/ricecooker | ricecooker/utils/linecook.py | build_ricecooker_json_tree | def build_ricecooker_json_tree(args, options, metadata_provider, json_tree_path):
"""
Download all categories, subpages, modules, and resources from open.edu.
"""
LOGGER.info('Starting to build the ricecooker_json_tree')
channeldir = args['channeldir']
if channeldir.endswith(os.path.sep):
channeldir.rstrip(os.path.sep)
channelparentdir, channeldirname = os.path.split(channeldir)
channelparentdir, channeldirname = os.path.split(channeldir)
# Ricecooker tree
channel_info = metadata_provider.get_channel_info()
thumbnail_chan_path = channel_info.get('thumbnail_chan_path', None)
if thumbnail_chan_path:
thumbnail_rel_path = rel_path_from_chan_path(thumbnail_chan_path, metadata_provider.channeldir)
else:
thumbnail_rel_path = None
ricecooker_json_tree = dict(
dirname=channeldirname,
title=channel_info['title'],
description=channel_info['description'],
source_domain=channel_info['source_domain'],
source_id=channel_info['source_id'],
language=channel_info['language'],
thumbnail=thumbnail_rel_path,
children=[],
)
channeldir = args['channeldir']
content_folders = sorted(os.walk(channeldir))
# MAIN PROCESSING OF os.walk OUTPUT
############################################################################
# TODO(ivan): figure out all the implications of the
# _ = content_folders.pop(0) # Skip over channel folder because handled above
for rel_path, _subfolders, filenames in content_folders:
LOGGER.info('processing folder ' + str(rel_path))
# IMPLEMENTATION DETAIL:
# - `filenames` contains real files in the `channeldir` folder
# - `exercises_filenames` contains virtual files whose sole purpse is to set the
# order of nodes within a given topic. Since alphabetical order is used to
# walk the files in the `channeldir`, we must "splice in" the exercises here
if metadata_provider.has_exercises():
dir_chan_path = chan_path_from_rel_path(rel_path, metadata_provider.channeldir)
dir_path_tuple = path_to_tuple(dir_chan_path)
exercises_filenames = metadata_provider.get_exercises_for_dir(dir_path_tuple)
filenames.extend(exercises_filenames)
sorted_filenames = sorted(filenames)
process_folder(ricecooker_json_tree, rel_path, sorted_filenames, metadata_provider)
# Write out ricecooker_json_tree.json
write_tree_to_json_tree(json_tree_path, ricecooker_json_tree)
LOGGER.info('Folder hierarchy walk result stored in ' + json_tree_path) | python | def build_ricecooker_json_tree(args, options, metadata_provider, json_tree_path):
"""
Download all categories, subpages, modules, and resources from open.edu.
"""
LOGGER.info('Starting to build the ricecooker_json_tree')
channeldir = args['channeldir']
if channeldir.endswith(os.path.sep):
channeldir.rstrip(os.path.sep)
channelparentdir, channeldirname = os.path.split(channeldir)
channelparentdir, channeldirname = os.path.split(channeldir)
# Ricecooker tree
channel_info = metadata_provider.get_channel_info()
thumbnail_chan_path = channel_info.get('thumbnail_chan_path', None)
if thumbnail_chan_path:
thumbnail_rel_path = rel_path_from_chan_path(thumbnail_chan_path, metadata_provider.channeldir)
else:
thumbnail_rel_path = None
ricecooker_json_tree = dict(
dirname=channeldirname,
title=channel_info['title'],
description=channel_info['description'],
source_domain=channel_info['source_domain'],
source_id=channel_info['source_id'],
language=channel_info['language'],
thumbnail=thumbnail_rel_path,
children=[],
)
channeldir = args['channeldir']
content_folders = sorted(os.walk(channeldir))
# MAIN PROCESSING OF os.walk OUTPUT
############################################################################
# TODO(ivan): figure out all the implications of the
# _ = content_folders.pop(0) # Skip over channel folder because handled above
for rel_path, _subfolders, filenames in content_folders:
LOGGER.info('processing folder ' + str(rel_path))
# IMPLEMENTATION DETAIL:
# - `filenames` contains real files in the `channeldir` folder
# - `exercises_filenames` contains virtual files whose sole purpse is to set the
# order of nodes within a given topic. Since alphabetical order is used to
# walk the files in the `channeldir`, we must "splice in" the exercises here
if metadata_provider.has_exercises():
dir_chan_path = chan_path_from_rel_path(rel_path, metadata_provider.channeldir)
dir_path_tuple = path_to_tuple(dir_chan_path)
exercises_filenames = metadata_provider.get_exercises_for_dir(dir_path_tuple)
filenames.extend(exercises_filenames)
sorted_filenames = sorted(filenames)
process_folder(ricecooker_json_tree, rel_path, sorted_filenames, metadata_provider)
# Write out ricecooker_json_tree.json
write_tree_to_json_tree(json_tree_path, ricecooker_json_tree)
LOGGER.info('Folder hierarchy walk result stored in ' + json_tree_path) | [
"def",
"build_ricecooker_json_tree",
"(",
"args",
",",
"options",
",",
"metadata_provider",
",",
"json_tree_path",
")",
":",
"LOGGER",
".",
"info",
"(",
"'Starting to build the ricecooker_json_tree'",
")",
"channeldir",
"=",
"args",
"[",
"'channeldir'",
"]",
"if",
"... | Download all categories, subpages, modules, and resources from open.edu. | [
"Download",
"all",
"categories",
"subpages",
"modules",
"and",
"resources",
"from",
"open",
".",
"edu",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/linecook.py#L189-L245 | train | 28,870 |
learningequality/ricecooker | examples/tutorial_chef.py | TutorialChef.construct_channel | def construct_channel(self, *args, **kwargs):
"""
This method is reponsible for creating a `ChannelNode` object from the info
in `channel_info` and populating it with TopicNode and ContentNode children.
"""
# Create channel
########################################################################
channel = self.get_channel(*args, **kwargs) # uses self.channel_info
# Create topics to add to your channel
########################################################################
# Here we are creating a topic named 'Example Topic'
exampletopic = TopicNode(source_id="topic-1", title="Example Topic")
# TODO: Create your topic here
# Now we are adding 'Example Topic' to our channel
channel.add_child(exampletopic)
# TODO: Add your topic to channel here
# You can also add subtopics to topics
# Here we are creating a subtopic named 'Example Subtopic'
examplesubtopic = TopicNode(source_id="topic-1a", title="Example Subtopic")
# TODO: Create your subtopic here
# Now we are adding 'Example Subtopic' to our 'Example Topic'
exampletopic.add_child(examplesubtopic)
# TODO: Add your subtopic to your topic here
# Content
# You can add pdfs, videos, and audio files to your channel
########################################################################
# let's create a document file called 'Example PDF'
document_file = DocumentFile(path="http://www.pdf995.com/samples/pdf.pdf")
examplepdf = DocumentNode(title="Example PDF", source_id="example-pdf", files=[document_file], license=get_license(licenses.PUBLIC_DOMAIN))
# TODO: Create your pdf file here (use any url to a .pdf file)
# We are also going to add a video file called 'Example Video'
video_file = VideoFile(path="https://ia600209.us.archive.org/27/items/RiceChef/Rice Chef.mp4")
fancy_license = get_license(licenses.SPECIAL_PERMISSIONS, description='Special license for ricecooker fans only.', copyright_holder='The chef video makers')
examplevideo = VideoNode(title="Example Video", source_id="example-video", files=[video_file], license=fancy_license)
# TODO: Create your video file here (use any url to a .mp4 file)
# Finally, we are creating an audio file called 'Example Audio'
audio_file = AudioFile(path="https://ia802508.us.archive.org/5/items/testmp3testfile/mpthreetest.mp3")
exampleaudio = AudioNode(title="Example Audio", source_id="example-audio", files=[audio_file], license=get_license(licenses.PUBLIC_DOMAIN))
# TODO: Create your audio file here (use any url to a .mp3 file)
# Now that we have our files, let's add them to our channel
channel.add_child(examplepdf) # Adding 'Example PDF' to your channel
exampletopic.add_child(examplevideo) # Adding 'Example Video' to 'Example Topic'
examplesubtopic.add_child(exampleaudio) # Adding 'Example Audio' to 'Example Subtopic'
# TODO: Add your pdf file to your channel
# TODO: Add your video file to your topic
# TODO: Add your audio file to your subtopic
# the `construct_channel` method returns a ChannelNode that will be
# processed by the ricecooker framework
return channel | python | def construct_channel(self, *args, **kwargs):
"""
This method is reponsible for creating a `ChannelNode` object from the info
in `channel_info` and populating it with TopicNode and ContentNode children.
"""
# Create channel
########################################################################
channel = self.get_channel(*args, **kwargs) # uses self.channel_info
# Create topics to add to your channel
########################################################################
# Here we are creating a topic named 'Example Topic'
exampletopic = TopicNode(source_id="topic-1", title="Example Topic")
# TODO: Create your topic here
# Now we are adding 'Example Topic' to our channel
channel.add_child(exampletopic)
# TODO: Add your topic to channel here
# You can also add subtopics to topics
# Here we are creating a subtopic named 'Example Subtopic'
examplesubtopic = TopicNode(source_id="topic-1a", title="Example Subtopic")
# TODO: Create your subtopic here
# Now we are adding 'Example Subtopic' to our 'Example Topic'
exampletopic.add_child(examplesubtopic)
# TODO: Add your subtopic to your topic here
# Content
# You can add pdfs, videos, and audio files to your channel
########################################################################
# let's create a document file called 'Example PDF'
document_file = DocumentFile(path="http://www.pdf995.com/samples/pdf.pdf")
examplepdf = DocumentNode(title="Example PDF", source_id="example-pdf", files=[document_file], license=get_license(licenses.PUBLIC_DOMAIN))
# TODO: Create your pdf file here (use any url to a .pdf file)
# We are also going to add a video file called 'Example Video'
video_file = VideoFile(path="https://ia600209.us.archive.org/27/items/RiceChef/Rice Chef.mp4")
fancy_license = get_license(licenses.SPECIAL_PERMISSIONS, description='Special license for ricecooker fans only.', copyright_holder='The chef video makers')
examplevideo = VideoNode(title="Example Video", source_id="example-video", files=[video_file], license=fancy_license)
# TODO: Create your video file here (use any url to a .mp4 file)
# Finally, we are creating an audio file called 'Example Audio'
audio_file = AudioFile(path="https://ia802508.us.archive.org/5/items/testmp3testfile/mpthreetest.mp3")
exampleaudio = AudioNode(title="Example Audio", source_id="example-audio", files=[audio_file], license=get_license(licenses.PUBLIC_DOMAIN))
# TODO: Create your audio file here (use any url to a .mp3 file)
# Now that we have our files, let's add them to our channel
channel.add_child(examplepdf) # Adding 'Example PDF' to your channel
exampletopic.add_child(examplevideo) # Adding 'Example Video' to 'Example Topic'
examplesubtopic.add_child(exampleaudio) # Adding 'Example Audio' to 'Example Subtopic'
# TODO: Add your pdf file to your channel
# TODO: Add your video file to your topic
# TODO: Add your audio file to your subtopic
# the `construct_channel` method returns a ChannelNode that will be
# processed by the ricecooker framework
return channel | [
"def",
"construct_channel",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Create channel",
"########################################################################",
"channel",
"=",
"self",
".",
"get_channel",
"(",
"*",
"args",
",",
"*",
"*",... | This method is reponsible for creating a `ChannelNode` object from the info
in `channel_info` and populating it with TopicNode and ContentNode children. | [
"This",
"method",
"is",
"reponsible",
"for",
"creating",
"a",
"ChannelNode",
"object",
"from",
"the",
"info",
"in",
"channel_info",
"and",
"populating",
"it",
"with",
"TopicNode",
"and",
"ContentNode",
"children",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/examples/tutorial_chef.py#L29-L88 | train | 28,871 |
learningequality/ricecooker | ricecooker/utils/jsontrees.py | read_tree_from_json | def read_tree_from_json(srcpath):
"""
Load ricecooker json tree data from json file at `srcpath`.
"""
with open(srcpath) as infile:
json_tree = json.load(infile)
if json_tree is None:
raise ValueError('Could not find ricecooker json tree')
return json_tree | python | def read_tree_from_json(srcpath):
"""
Load ricecooker json tree data from json file at `srcpath`.
"""
with open(srcpath) as infile:
json_tree = json.load(infile)
if json_tree is None:
raise ValueError('Could not find ricecooker json tree')
return json_tree | [
"def",
"read_tree_from_json",
"(",
"srcpath",
")",
":",
"with",
"open",
"(",
"srcpath",
")",
"as",
"infile",
":",
"json_tree",
"=",
"json",
".",
"load",
"(",
"infile",
")",
"if",
"json_tree",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Could not find ... | Load ricecooker json tree data from json file at `srcpath`. | [
"Load",
"ricecooker",
"json",
"tree",
"data",
"from",
"json",
"file",
"at",
"srcpath",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/jsontrees.py#L48-L56 | train | 28,872 |
learningequality/ricecooker | ricecooker/utils/jsontrees.py | get_channel_node_from_json | def get_channel_node_from_json(json_tree):
"""
Build `ChannelNode` from json data provided in `json_tree`.
"""
channel = ChannelNode(
title=json_tree['title'],
description=json_tree['description'],
source_domain=json_tree['source_domain'],
source_id=json_tree['source_id'],
language=json_tree['language'],
thumbnail=json_tree.get('thumbnail', None),
)
return channel | python | def get_channel_node_from_json(json_tree):
"""
Build `ChannelNode` from json data provided in `json_tree`.
"""
channel = ChannelNode(
title=json_tree['title'],
description=json_tree['description'],
source_domain=json_tree['source_domain'],
source_id=json_tree['source_id'],
language=json_tree['language'],
thumbnail=json_tree.get('thumbnail', None),
)
return channel | [
"def",
"get_channel_node_from_json",
"(",
"json_tree",
")",
":",
"channel",
"=",
"ChannelNode",
"(",
"title",
"=",
"json_tree",
"[",
"'title'",
"]",
",",
"description",
"=",
"json_tree",
"[",
"'description'",
"]",
",",
"source_domain",
"=",
"json_tree",
"[",
"... | Build `ChannelNode` from json data provided in `json_tree`. | [
"Build",
"ChannelNode",
"from",
"json",
"data",
"provided",
"in",
"json_tree",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/jsontrees.py#L72-L84 | train | 28,873 |
learningequality/ricecooker | ricecooker/chefs.py | BaseChef.get_channel | def get_channel(self, **kwargs):
"""
Call chef script's get_channel method in compatibility mode
...or...
Create a `ChannelNode` from the Chef's `channel_info` class attribute.
Args:
kwargs (dict): additional keyword arguments that `uploadchannel` received
Returns: channel created from get_channel method or None
"""
if self.compatibility_mode:
# For pre-sushibar scritps that do not implement `get_channel`,
# we must check it this function exists before calling it...
if hasattr(self.chef_module, 'get_channel'):
config.LOGGER.info("Calling get_channel... ")
# Create channel (using the function in the chef script)
channel = self.chef_module.get_channel(**kwargs)
# For chefs with a `create_channel` method instead of `get_channel`
if hasattr(self.chef_module, 'create_channel'):
config.LOGGER.info("Calling create_channel... ")
# Create channel (using the function in the chef script)
channel = self.chef_module.create_channel(**kwargs)
else:
channel = None # since no channel info, SushiBar functionality will be disabled...
return channel
elif hasattr(self, 'channel_info'):
# If a sublass has an `channel_info` attribute (a dict) it doesn't need
# to define a `get_channel` method and instead rely on this code:
channel = ChannelNode(
source_domain=self.channel_info['CHANNEL_SOURCE_DOMAIN'],
source_id=self.channel_info['CHANNEL_SOURCE_ID'],
title=self.channel_info['CHANNEL_TITLE'],
thumbnail=self.channel_info.get('CHANNEL_THUMBNAIL'),
language=self.channel_info.get('CHANNEL_LANGUAGE'),
description=self.channel_info.get('CHANNEL_DESCRIPTION'),
)
return channel
else:
raise NotImplementedError('BaseChef must overrride the get_channel method') | python | def get_channel(self, **kwargs):
"""
Call chef script's get_channel method in compatibility mode
...or...
Create a `ChannelNode` from the Chef's `channel_info` class attribute.
Args:
kwargs (dict): additional keyword arguments that `uploadchannel` received
Returns: channel created from get_channel method or None
"""
if self.compatibility_mode:
# For pre-sushibar scritps that do not implement `get_channel`,
# we must check it this function exists before calling it...
if hasattr(self.chef_module, 'get_channel'):
config.LOGGER.info("Calling get_channel... ")
# Create channel (using the function in the chef script)
channel = self.chef_module.get_channel(**kwargs)
# For chefs with a `create_channel` method instead of `get_channel`
if hasattr(self.chef_module, 'create_channel'):
config.LOGGER.info("Calling create_channel... ")
# Create channel (using the function in the chef script)
channel = self.chef_module.create_channel(**kwargs)
else:
channel = None # since no channel info, SushiBar functionality will be disabled...
return channel
elif hasattr(self, 'channel_info'):
# If a sublass has an `channel_info` attribute (a dict) it doesn't need
# to define a `get_channel` method and instead rely on this code:
channel = ChannelNode(
source_domain=self.channel_info['CHANNEL_SOURCE_DOMAIN'],
source_id=self.channel_info['CHANNEL_SOURCE_ID'],
title=self.channel_info['CHANNEL_TITLE'],
thumbnail=self.channel_info.get('CHANNEL_THUMBNAIL'),
language=self.channel_info.get('CHANNEL_LANGUAGE'),
description=self.channel_info.get('CHANNEL_DESCRIPTION'),
)
return channel
else:
raise NotImplementedError('BaseChef must overrride the get_channel method') | [
"def",
"get_channel",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"compatibility_mode",
":",
"# For pre-sushibar scritps that do not implement `get_channel`,",
"# we must check it this function exists before calling it...",
"if",
"hasattr",
"(",
"self",... | Call chef script's get_channel method in compatibility mode
...or...
Create a `ChannelNode` from the Chef's `channel_info` class attribute.
Args:
kwargs (dict): additional keyword arguments that `uploadchannel` received
Returns: channel created from get_channel method or None | [
"Call",
"chef",
"script",
"s",
"get_channel",
"method",
"in",
"compatibility",
"mode",
"...",
"or",
"...",
"Create",
"a",
"ChannelNode",
"from",
"the",
"Chef",
"s",
"channel_info",
"class",
"attribute",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/chefs.py#L156-L196 | train | 28,874 |
learningequality/ricecooker | ricecooker/chefs.py | JsonTreeChef.construct_channel | def construct_channel(self, **kwargs):
"""
Build the channel tree by adding TopicNodes and ContentNode children.
"""
channel = self.get_channel(**kwargs)
json_tree_path = self.get_json_tree_path(**kwargs)
json_tree = read_tree_from_json(json_tree_path)
build_tree_from_json(channel, json_tree['children'])
raise_for_invalid_channel(channel)
return channel | python | def construct_channel(self, **kwargs):
"""
Build the channel tree by adding TopicNodes and ContentNode children.
"""
channel = self.get_channel(**kwargs)
json_tree_path = self.get_json_tree_path(**kwargs)
json_tree = read_tree_from_json(json_tree_path)
build_tree_from_json(channel, json_tree['children'])
raise_for_invalid_channel(channel)
return channel | [
"def",
"construct_channel",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"channel",
"=",
"self",
".",
"get_channel",
"(",
"*",
"*",
"kwargs",
")",
"json_tree_path",
"=",
"self",
".",
"get_json_tree_path",
"(",
"*",
"*",
"kwargs",
")",
"json_tree",
"="... | Build the channel tree by adding TopicNodes and ContentNode children. | [
"Build",
"the",
"channel",
"tree",
"by",
"adding",
"TopicNodes",
"and",
"ContentNode",
"children",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/chefs.py#L371-L380 | train | 28,875 |
learningequality/ricecooker | ricecooker/chefs.py | LineCook.pre_run | def pre_run(self, args, options):
"""
This function is called before `run` in order to build the json tree.
"""
if 'generate' in args and args['generate']:
self.metadata_provider = CsvMetadataProvider(args['channeldir'],
channelinfo=args['channelinfo'],
contentinfo=args['contentinfo'],
exercisesinfo=args['exercisesinfo'],
questionsinfo=args['questionsinfo'],
validate_and_cache=False)
self.metadata_provider.generate_templates(exercise_questions=True)
self.metadata_provider.generate_contentinfo_from_channeldir(args, options)
sys.exit(0)
elif 'importstudioid' in args and args['importstudioid']:
studio_id = args['importstudioid']
config.LOGGER.info("Calling with importstudioid... " + studio_id)
self.metadata_provider = CsvMetadataProvider(args['channeldir'],
channelinfo=args['channelinfo'],
contentinfo=args['contentinfo'],
exercisesinfo=args['exercisesinfo'],
questionsinfo=args['questionsinfo'],
validate_and_cache=False)
self.metadata_provider.generate_templates(exercise_questions=True)
self.metadata_provider.generate_exercises_from_importstudioid(args, options)
sys.exit(0)
if self.metadata_provider is None:
self._init_metadata_provider(args, options)
kwargs = {} # combined dictionary of argparse args and extra options
kwargs.update(args)
kwargs.update(options)
json_tree_path = self.get_json_tree_path(**kwargs)
build_ricecooker_json_tree(args, options, self.metadata_provider, json_tree_path) | python | def pre_run(self, args, options):
"""
This function is called before `run` in order to build the json tree.
"""
if 'generate' in args and args['generate']:
self.metadata_provider = CsvMetadataProvider(args['channeldir'],
channelinfo=args['channelinfo'],
contentinfo=args['contentinfo'],
exercisesinfo=args['exercisesinfo'],
questionsinfo=args['questionsinfo'],
validate_and_cache=False)
self.metadata_provider.generate_templates(exercise_questions=True)
self.metadata_provider.generate_contentinfo_from_channeldir(args, options)
sys.exit(0)
elif 'importstudioid' in args and args['importstudioid']:
studio_id = args['importstudioid']
config.LOGGER.info("Calling with importstudioid... " + studio_id)
self.metadata_provider = CsvMetadataProvider(args['channeldir'],
channelinfo=args['channelinfo'],
contentinfo=args['contentinfo'],
exercisesinfo=args['exercisesinfo'],
questionsinfo=args['questionsinfo'],
validate_and_cache=False)
self.metadata_provider.generate_templates(exercise_questions=True)
self.metadata_provider.generate_exercises_from_importstudioid(args, options)
sys.exit(0)
if self.metadata_provider is None:
self._init_metadata_provider(args, options)
kwargs = {} # combined dictionary of argparse args and extra options
kwargs.update(args)
kwargs.update(options)
json_tree_path = self.get_json_tree_path(**kwargs)
build_ricecooker_json_tree(args, options, self.metadata_provider, json_tree_path) | [
"def",
"pre_run",
"(",
"self",
",",
"args",
",",
"options",
")",
":",
"if",
"'generate'",
"in",
"args",
"and",
"args",
"[",
"'generate'",
"]",
":",
"self",
".",
"metadata_provider",
"=",
"CsvMetadataProvider",
"(",
"args",
"[",
"'channeldir'",
"]",
",",
... | This function is called before `run` in order to build the json tree. | [
"This",
"function",
"is",
"called",
"before",
"run",
"in",
"order",
"to",
"build",
"the",
"json",
"tree",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/chefs.py#L443-L477 | train | 28,876 |
learningequality/ricecooker | ricecooker/utils/html.py | calculate_relative_url | def calculate_relative_url(url, filename=None, baseurl=None, subpath=None):
"""
Calculate the relative path for a URL relative to a base URL, possibly also injecting in a subpath prefix.
"""
# ensure the provided subpath is a list
if isinstance(subpath, str):
subpath = subpath.strip("/").split("/")
elif subpath is None:
subpath = []
# if a base path was supplied, calculate the file's subpath relative to it
if baseurl:
baseurl = urllib.parse.urljoin(baseurl, ".") # ensure baseurl is normalized (to remove '/./' and '/../')
assert url.startswith(baseurl), "URL must start with baseurl"
subpath = subpath + url[len(baseurl):].strip("/").split("/")[:-1]
# if we don't have a filename, extract it from the URL
if not filename:
filename = unquote(urlparse(url).path.split("/")[-1])
# calculate the url path to use to refer to this file, relative to destpath
relative_file_url = "/".join(["."] + subpath + [filename])
return relative_file_url, subpath, filename | python | def calculate_relative_url(url, filename=None, baseurl=None, subpath=None):
"""
Calculate the relative path for a URL relative to a base URL, possibly also injecting in a subpath prefix.
"""
# ensure the provided subpath is a list
if isinstance(subpath, str):
subpath = subpath.strip("/").split("/")
elif subpath is None:
subpath = []
# if a base path was supplied, calculate the file's subpath relative to it
if baseurl:
baseurl = urllib.parse.urljoin(baseurl, ".") # ensure baseurl is normalized (to remove '/./' and '/../')
assert url.startswith(baseurl), "URL must start with baseurl"
subpath = subpath + url[len(baseurl):].strip("/").split("/")[:-1]
# if we don't have a filename, extract it from the URL
if not filename:
filename = unquote(urlparse(url).path.split("/")[-1])
# calculate the url path to use to refer to this file, relative to destpath
relative_file_url = "/".join(["."] + subpath + [filename])
return relative_file_url, subpath, filename | [
"def",
"calculate_relative_url",
"(",
"url",
",",
"filename",
"=",
"None",
",",
"baseurl",
"=",
"None",
",",
"subpath",
"=",
"None",
")",
":",
"# ensure the provided subpath is a list",
"if",
"isinstance",
"(",
"subpath",
",",
"str",
")",
":",
"subpath",
"=",
... | Calculate the relative path for a URL relative to a base URL, possibly also injecting in a subpath prefix. | [
"Calculate",
"the",
"relative",
"path",
"for",
"a",
"URL",
"relative",
"to",
"a",
"base",
"URL",
"possibly",
"also",
"injecting",
"in",
"a",
"subpath",
"prefix",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/html.py#L54-L78 | train | 28,877 |
learningequality/ricecooker | ricecooker/utils/html.py | download_file | def download_file(url, destpath, filename=None, baseurl=None, subpath=None, middleware_callbacks=None, middleware_kwargs=None, request_fn=sess.get):
"""
Download a file from a URL, into a destination folder, with optional use of relative paths and middleware processors.
- If `filename` is set, that will be used as the name of the file when it's written to the destpath.
- If `baseurl` is specified, the file will be put into subdirectory of destpath per the url's path relative to the baseurl.
- If `subpath` is specified, it will be appended to destpath before deciding where to write the file.
- If `middleware_callbacks` is specified, the returned content will be passed through those function(s) before being returned.
- If `middleware_kwargs` are also specified, they will also be passed in to each function in middleware_callbacks.
"""
relative_file_url, subpath, filename = calculate_relative_url(url, filename=filename, baseurl=baseurl, subpath=subpath)
# ensure that the destination directory exists
fulldestpath = os.path.join(destpath, *subpath)
os.makedirs(fulldestpath, exist_ok=True)
# make the actual request to the URL
response = request_fn(url)
content = response.content
# if there are any middleware callbacks, apply them to the content
if middleware_callbacks:
content = content.decode()
if not isinstance(middleware_callbacks, list):
middleware_callbacks = [middleware_callbacks]
kwargs = {
"url": url,
"destpath": destpath,
"filename": filename,
"baseurl": baseurl,
"subpath": subpath,
"fulldestpath": fulldestpath,
"response": response,
}
kwargs.update(middleware_kwargs or {})
for callback in middleware_callbacks:
content = callback(content, **kwargs)
# ensure content is encoded, as we're doing a binary write
if isinstance(content, str):
content = content.encode()
# calculate the final destination for the file, and write the content out to there
dest = os.path.join(fulldestpath, filename)
with open(dest, "wb") as f:
f.write(content)
return relative_file_url, response | python | def download_file(url, destpath, filename=None, baseurl=None, subpath=None, middleware_callbacks=None, middleware_kwargs=None, request_fn=sess.get):
"""
Download a file from a URL, into a destination folder, with optional use of relative paths and middleware processors.
- If `filename` is set, that will be used as the name of the file when it's written to the destpath.
- If `baseurl` is specified, the file will be put into subdirectory of destpath per the url's path relative to the baseurl.
- If `subpath` is specified, it will be appended to destpath before deciding where to write the file.
- If `middleware_callbacks` is specified, the returned content will be passed through those function(s) before being returned.
- If `middleware_kwargs` are also specified, they will also be passed in to each function in middleware_callbacks.
"""
relative_file_url, subpath, filename = calculate_relative_url(url, filename=filename, baseurl=baseurl, subpath=subpath)
# ensure that the destination directory exists
fulldestpath = os.path.join(destpath, *subpath)
os.makedirs(fulldestpath, exist_ok=True)
# make the actual request to the URL
response = request_fn(url)
content = response.content
# if there are any middleware callbacks, apply them to the content
if middleware_callbacks:
content = content.decode()
if not isinstance(middleware_callbacks, list):
middleware_callbacks = [middleware_callbacks]
kwargs = {
"url": url,
"destpath": destpath,
"filename": filename,
"baseurl": baseurl,
"subpath": subpath,
"fulldestpath": fulldestpath,
"response": response,
}
kwargs.update(middleware_kwargs or {})
for callback in middleware_callbacks:
content = callback(content, **kwargs)
# ensure content is encoded, as we're doing a binary write
if isinstance(content, str):
content = content.encode()
# calculate the final destination for the file, and write the content out to there
dest = os.path.join(fulldestpath, filename)
with open(dest, "wb") as f:
f.write(content)
return relative_file_url, response | [
"def",
"download_file",
"(",
"url",
",",
"destpath",
",",
"filename",
"=",
"None",
",",
"baseurl",
"=",
"None",
",",
"subpath",
"=",
"None",
",",
"middleware_callbacks",
"=",
"None",
",",
"middleware_kwargs",
"=",
"None",
",",
"request_fn",
"=",
"sess",
".... | Download a file from a URL, into a destination folder, with optional use of relative paths and middleware processors.
- If `filename` is set, that will be used as the name of the file when it's written to the destpath.
- If `baseurl` is specified, the file will be put into subdirectory of destpath per the url's path relative to the baseurl.
- If `subpath` is specified, it will be appended to destpath before deciding where to write the file.
- If `middleware_callbacks` is specified, the returned content will be passed through those function(s) before being returned.
- If `middleware_kwargs` are also specified, they will also be passed in to each function in middleware_callbacks. | [
"Download",
"a",
"file",
"from",
"a",
"URL",
"into",
"a",
"destination",
"folder",
"with",
"optional",
"use",
"of",
"relative",
"paths",
"and",
"middleware",
"processors",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/html.py#L81-L129 | train | 28,878 |
learningequality/ricecooker | ricecooker/classes/files.py | File.set_language | def set_language(self, language):
""" Set self.language to internal lang. repr. code from str or Language object. """
if isinstance(language, str):
language_obj = languages.getlang(language)
if language_obj:
self.language = language_obj.code
else:
raise TypeError("Language code {} not found".format(language))
if isinstance(language, languages.Language):
self.language = language.code | python | def set_language(self, language):
""" Set self.language to internal lang. repr. code from str or Language object. """
if isinstance(language, str):
language_obj = languages.getlang(language)
if language_obj:
self.language = language_obj.code
else:
raise TypeError("Language code {} not found".format(language))
if isinstance(language, languages.Language):
self.language = language.code | [
"def",
"set_language",
"(",
"self",
",",
"language",
")",
":",
"if",
"isinstance",
"(",
"language",
",",
"str",
")",
":",
"language_obj",
"=",
"languages",
".",
"getlang",
"(",
"language",
")",
"if",
"language_obj",
":",
"self",
".",
"language",
"=",
"la... | Set self.language to internal lang. repr. code from str or Language object. | [
"Set",
"self",
".",
"language",
"to",
"internal",
"lang",
".",
"repr",
".",
"code",
"from",
"str",
"or",
"Language",
"object",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/classes/files.py#L258-L267 | train | 28,879 |
learningequality/ricecooker | ricecooker/classes/nodes.py | Node.get_thumbnail_preset | def get_thumbnail_preset(self):
"""
Returns the format preset corresponding to this Node's type, or None if the node doesn't have a format preset.
"""
if isinstance(self, ChannelNode):
return format_presets.CHANNEL_THUMBNAIL
elif isinstance(self, TopicNode):
return format_presets.TOPIC_THUMBNAIL
elif isinstance(self, VideoNode):
return format_presets.VIDEO_THUMBNAIL
elif isinstance(self, AudioNode):
return format_presets.AUDIO_THUMBNAIL
elif isinstance(self, DocumentNode):
return format_presets.DOCUMENT_THUMBNAIL
elif isinstance(self, ExerciseNode):
return format_presets.EXERCISE_THUMBNAIL
elif isinstance(self, HTML5AppNode):
return format_presets.HTML5_THUMBNAIL
else:
return None | python | def get_thumbnail_preset(self):
"""
Returns the format preset corresponding to this Node's type, or None if the node doesn't have a format preset.
"""
if isinstance(self, ChannelNode):
return format_presets.CHANNEL_THUMBNAIL
elif isinstance(self, TopicNode):
return format_presets.TOPIC_THUMBNAIL
elif isinstance(self, VideoNode):
return format_presets.VIDEO_THUMBNAIL
elif isinstance(self, AudioNode):
return format_presets.AUDIO_THUMBNAIL
elif isinstance(self, DocumentNode):
return format_presets.DOCUMENT_THUMBNAIL
elif isinstance(self, ExerciseNode):
return format_presets.EXERCISE_THUMBNAIL
elif isinstance(self, HTML5AppNode):
return format_presets.HTML5_THUMBNAIL
else:
return None | [
"def",
"get_thumbnail_preset",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
",",
"ChannelNode",
")",
":",
"return",
"format_presets",
".",
"CHANNEL_THUMBNAIL",
"elif",
"isinstance",
"(",
"self",
",",
"TopicNode",
")",
":",
"return",
"format_presets",
... | Returns the format preset corresponding to this Node's type, or None if the node doesn't have a format preset. | [
"Returns",
"the",
"format",
"preset",
"corresponding",
"to",
"this",
"Node",
"s",
"type",
"or",
"None",
"if",
"the",
"node",
"doesn",
"t",
"have",
"a",
"format",
"preset",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/classes/nodes.py#L113-L132 | train | 28,880 |
learningequality/ricecooker | ricecooker/utils/pdf.py | PDFParser.open | def open(self, update=False):
"""
Opens pdf file to read from.
"""
filename = os.path.basename(self.source_path)
folder, _ext = os.path.splitext(filename)
self.path = os.path.sep.join([self.directory, folder, filename])
if not os.path.exists(os.path.dirname(self.path)):
os.makedirs(os.path.dirname(self.path))
# Download full pdf if it hasn't already been downloaded
if not os.path.isfile(self.path) or update:
with open(self.path, "wb") as fobj:
fobj.write(read(self.source_path))
self.file = open(self.path, 'rb')
self.pdf = CustomPDFReader(self.file) | python | def open(self, update=False):
"""
Opens pdf file to read from.
"""
filename = os.path.basename(self.source_path)
folder, _ext = os.path.splitext(filename)
self.path = os.path.sep.join([self.directory, folder, filename])
if not os.path.exists(os.path.dirname(self.path)):
os.makedirs(os.path.dirname(self.path))
# Download full pdf if it hasn't already been downloaded
if not os.path.isfile(self.path) or update:
with open(self.path, "wb") as fobj:
fobj.write(read(self.source_path))
self.file = open(self.path, 'rb')
self.pdf = CustomPDFReader(self.file) | [
"def",
"open",
"(",
"self",
",",
"update",
"=",
"False",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"self",
".",
"source_path",
")",
"folder",
",",
"_ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"sel... | Opens pdf file to read from. | [
"Opens",
"pdf",
"file",
"to",
"read",
"from",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/pdf.py#L46-L62 | train | 28,881 |
learningequality/ricecooker | ricecooker/utils/pdf.py | PDFParser.get_toc | def get_toc(self, subchapters=False):
"""
Returns table-of-contents information extracted from the PDF doc.
When `subchapters=False`, the output is a list of this form
.. code-block:: python
[
{'title': 'First chapter', 'page_start': 0, 'page_end': 10},
{'title': 'Second chapter', 'page_start': 10, 'page_end': 20},
...
]
Use the `split_chapters` method to process this list.
When `subchapters=True`, the output is chapter-subchapter tree structure,
that can be processed using the `split_subchapters` method.
"""
self.check_path()
chapters = []
index = 0
for dest in self.pdf.getOutlines():
# Process chapters
if isinstance(dest, CustomDestination) and not isinstance(dest['/Page'], NullObject):
page_num = self.pdf.getDestinationPageNumber(dest)
chapter_pagerange = {
"title": dest['/Title'].replace('\xa0', ' '),
"page_start": page_num if index != 0 else 0,
"page_end": self.pdf.numPages,
}
if subchapters:
chapter_pagerange["children"] = []
chapters.append(chapter_pagerange)
if index > 0:
# Go back to previous chapter and set page_end
chapters[index - 1]["page_end"] = page_num
if subchapters:
previous_chapter = chapters[index - 1]
if previous_chapter["children"]:
# Go back to previous subchapter and set page_end
previous_chapter["children"][-1]["page_end"] = page_num
index += 1
# Attach subchapters (lists) as children to last chapter
elif subchapters and isinstance(dest, list):
parent = chapters[index - 1]
subindex = 0
for subdest in dest:
if isinstance(subdest, CustomDestination) and not isinstance(subdest['/Page'], NullObject):
subpage_num = self.pdf.getDestinationPageNumber(subdest)
parent['children'].append({
"title": subdest['/Title'].replace('\xa0', ' '),
"page_start": subpage_num,
"page_end": self.pdf.numPages
})
if subindex > 0:
parent['children'][subindex - 1]["page_end"] = subpage_num
subindex +=1
return chapters | python | def get_toc(self, subchapters=False):
"""
Returns table-of-contents information extracted from the PDF doc.
When `subchapters=False`, the output is a list of this form
.. code-block:: python
[
{'title': 'First chapter', 'page_start': 0, 'page_end': 10},
{'title': 'Second chapter', 'page_start': 10, 'page_end': 20},
...
]
Use the `split_chapters` method to process this list.
When `subchapters=True`, the output is chapter-subchapter tree structure,
that can be processed using the `split_subchapters` method.
"""
self.check_path()
chapters = []
index = 0
for dest in self.pdf.getOutlines():
# Process chapters
if isinstance(dest, CustomDestination) and not isinstance(dest['/Page'], NullObject):
page_num = self.pdf.getDestinationPageNumber(dest)
chapter_pagerange = {
"title": dest['/Title'].replace('\xa0', ' '),
"page_start": page_num if index != 0 else 0,
"page_end": self.pdf.numPages,
}
if subchapters:
chapter_pagerange["children"] = []
chapters.append(chapter_pagerange)
if index > 0:
# Go back to previous chapter and set page_end
chapters[index - 1]["page_end"] = page_num
if subchapters:
previous_chapter = chapters[index - 1]
if previous_chapter["children"]:
# Go back to previous subchapter and set page_end
previous_chapter["children"][-1]["page_end"] = page_num
index += 1
# Attach subchapters (lists) as children to last chapter
elif subchapters and isinstance(dest, list):
parent = chapters[index - 1]
subindex = 0
for subdest in dest:
if isinstance(subdest, CustomDestination) and not isinstance(subdest['/Page'], NullObject):
subpage_num = self.pdf.getDestinationPageNumber(subdest)
parent['children'].append({
"title": subdest['/Title'].replace('\xa0', ' '),
"page_start": subpage_num,
"page_end": self.pdf.numPages
})
if subindex > 0:
parent['children'][subindex - 1]["page_end"] = subpage_num
subindex +=1
return chapters | [
"def",
"get_toc",
"(",
"self",
",",
"subchapters",
"=",
"False",
")",
":",
"self",
".",
"check_path",
"(",
")",
"chapters",
"=",
"[",
"]",
"index",
"=",
"0",
"for",
"dest",
"in",
"self",
".",
"pdf",
".",
"getOutlines",
"(",
")",
":",
"# Process chapt... | Returns table-of-contents information extracted from the PDF doc.
When `subchapters=False`, the output is a list of this form
.. code-block:: python
[
{'title': 'First chapter', 'page_start': 0, 'page_end': 10},
{'title': 'Second chapter', 'page_start': 10, 'page_end': 20},
...
]
Use the `split_chapters` method to process this list.
When `subchapters=True`, the output is chapter-subchapter tree structure,
that can be processed using the `split_subchapters` method. | [
"Returns",
"table",
"-",
"of",
"-",
"contents",
"information",
"extracted",
"from",
"the",
"PDF",
"doc",
".",
"When",
"subchapters",
"=",
"False",
"the",
"output",
"is",
"a",
"list",
"of",
"this",
"form"
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/pdf.py#L75-L136 | train | 28,882 |
learningequality/ricecooker | ricecooker/utils/metadata_provider.py | get_metadata_file_path | def get_metadata_file_path(channeldir, filename):
"""
Return the path to the metadata file named `filename` that is a sibling of `channeldir`.
"""
channelparentdir, channeldirname = os.path.split(channeldir)
return os.path.join(channelparentdir, filename) | python | def get_metadata_file_path(channeldir, filename):
"""
Return the path to the metadata file named `filename` that is a sibling of `channeldir`.
"""
channelparentdir, channeldirname = os.path.split(channeldir)
return os.path.join(channelparentdir, filename) | [
"def",
"get_metadata_file_path",
"(",
"channeldir",
",",
"filename",
")",
":",
"channelparentdir",
",",
"channeldirname",
"=",
"os",
".",
"path",
".",
"split",
"(",
"channeldir",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"channelparentdir",
",",
"f... | Return the path to the metadata file named `filename` that is a sibling of `channeldir`. | [
"Return",
"the",
"path",
"to",
"the",
"metadata",
"file",
"named",
"filename",
"that",
"is",
"a",
"sibling",
"of",
"channeldir",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L144-L149 | train | 28,883 |
learningequality/ricecooker | ricecooker/utils/metadata_provider.py | _read_csv_lines | def _read_csv_lines(path):
"""
Opens CSV file `path` and returns list of rows.
Pass output of this function to `csv.DictReader` for reading data.
"""
csv_file = open(path, 'r')
csv_lines_raw = csv_file.readlines()
csv_lines_clean = [line for line in csv_lines_raw if len(line.strip()) > 0]
return csv_lines_clean | python | def _read_csv_lines(path):
"""
Opens CSV file `path` and returns list of rows.
Pass output of this function to `csv.DictReader` for reading data.
"""
csv_file = open(path, 'r')
csv_lines_raw = csv_file.readlines()
csv_lines_clean = [line for line in csv_lines_raw if len(line.strip()) > 0]
return csv_lines_clean | [
"def",
"_read_csv_lines",
"(",
"path",
")",
":",
"csv_file",
"=",
"open",
"(",
"path",
",",
"'r'",
")",
"csv_lines_raw",
"=",
"csv_file",
".",
"readlines",
"(",
")",
"csv_lines_clean",
"=",
"[",
"line",
"for",
"line",
"in",
"csv_lines_raw",
"if",
"len",
... | Opens CSV file `path` and returns list of rows.
Pass output of this function to `csv.DictReader` for reading data. | [
"Opens",
"CSV",
"file",
"path",
"and",
"returns",
"list",
"of",
"rows",
".",
"Pass",
"output",
"of",
"this",
"function",
"to",
"csv",
".",
"DictReader",
"for",
"reading",
"data",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L875-L883 | train | 28,884 |
learningequality/ricecooker | ricecooker/utils/metadata_provider.py | _clean_dict | def _clean_dict(row):
"""
Transform empty strings values of dict `row` to None.
"""
row_cleaned = {}
for key, val in row.items():
if val is None or val == '':
row_cleaned[key] = None
else:
row_cleaned[key] = val
return row_cleaned | python | def _clean_dict(row):
"""
Transform empty strings values of dict `row` to None.
"""
row_cleaned = {}
for key, val in row.items():
if val is None or val == '':
row_cleaned[key] = None
else:
row_cleaned[key] = val
return row_cleaned | [
"def",
"_clean_dict",
"(",
"row",
")",
":",
"row_cleaned",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"row",
".",
"items",
"(",
")",
":",
"if",
"val",
"is",
"None",
"or",
"val",
"==",
"''",
":",
"row_cleaned",
"[",
"key",
"]",
"=",
"None",
... | Transform empty strings values of dict `row` to None. | [
"Transform",
"empty",
"strings",
"values",
"of",
"dict",
"row",
"to",
"None",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L886-L896 | train | 28,885 |
learningequality/ricecooker | ricecooker/utils/metadata_provider.py | CsvMetadataProvider.get | def get(self, path_tuple):
"""
Returns metadata dict for path in `path_tuple`.
"""
if path_tuple in self.contentcache:
metadata = self.contentcache[path_tuple]
else:
# TODO: make chef robust to missing metadata
# LOGGER.error(
LOGGER.warning('No metadata found for path_tuple ' + str(path_tuple))
metadata = dict(
filepath=os.path.sep.join(path_tuple),
title=os.path.sep.join(path_tuple)
)
return metadata | python | def get(self, path_tuple):
"""
Returns metadata dict for path in `path_tuple`.
"""
if path_tuple in self.contentcache:
metadata = self.contentcache[path_tuple]
else:
# TODO: make chef robust to missing metadata
# LOGGER.error(
LOGGER.warning('No metadata found for path_tuple ' + str(path_tuple))
metadata = dict(
filepath=os.path.sep.join(path_tuple),
title=os.path.sep.join(path_tuple)
)
return metadata | [
"def",
"get",
"(",
"self",
",",
"path_tuple",
")",
":",
"if",
"path_tuple",
"in",
"self",
".",
"contentcache",
":",
"metadata",
"=",
"self",
".",
"contentcache",
"[",
"path_tuple",
"]",
"else",
":",
"# TODO: make chef robust to missing metadata",
"# LOGGER.error("... | Returns metadata dict for path in `path_tuple`. | [
"Returns",
"metadata",
"dict",
"for",
"path",
"in",
"path_tuple",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L239-L253 | train | 28,886 |
learningequality/ricecooker | ricecooker/utils/metadata_provider.py | CsvMetadataProvider.get_channel_info | def get_channel_info(self):
"""
Returns the first data row from Channel.csv
"""
csv_filename = get_metadata_file_path(channeldir=self.channeldir, filename=self.channelinfo)
csv_lines = _read_csv_lines(csv_filename)
dict_reader = csv.DictReader(csv_lines)
channel_csvs_list = list(dict_reader)
channel_csv = channel_csvs_list[0]
if len(channel_csvs_list) > 1:
raise ValueError('Found multiple channel rows in ' + self.channelinfo)
channel_cleaned = _clean_dict(channel_csv)
channel_info = self._map_channel_row_to_dict(channel_cleaned)
return channel_info | python | def get_channel_info(self):
"""
Returns the first data row from Channel.csv
"""
csv_filename = get_metadata_file_path(channeldir=self.channeldir, filename=self.channelinfo)
csv_lines = _read_csv_lines(csv_filename)
dict_reader = csv.DictReader(csv_lines)
channel_csvs_list = list(dict_reader)
channel_csv = channel_csvs_list[0]
if len(channel_csvs_list) > 1:
raise ValueError('Found multiple channel rows in ' + self.channelinfo)
channel_cleaned = _clean_dict(channel_csv)
channel_info = self._map_channel_row_to_dict(channel_cleaned)
return channel_info | [
"def",
"get_channel_info",
"(",
"self",
")",
":",
"csv_filename",
"=",
"get_metadata_file_path",
"(",
"channeldir",
"=",
"self",
".",
"channeldir",
",",
"filename",
"=",
"self",
".",
"channelinfo",
")",
"csv_lines",
"=",
"_read_csv_lines",
"(",
"csv_filename",
"... | Returns the first data row from Channel.csv | [
"Returns",
"the",
"first",
"data",
"row",
"from",
"Channel",
".",
"csv"
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L255-L268 | train | 28,887 |
learningequality/ricecooker | ricecooker/utils/metadata_provider.py | CsvMetadataProvider.get_thumbnail_paths | def get_thumbnail_paths(self):
"""
Helper function used to avoid processing thumbnail files during `os.walk`.
"""
thumbnail_path_tuples = []
# channel thumbnail
channel_info = self.get_channel_info()
chthumbnail_path = channel_info.get('thumbnail_chan_path', None)
if chthumbnail_path:
chthumbnail_path_tuple = path_to_tuple(chthumbnail_path, windows=self.winpaths)
thumbnail_path_tuples.append(chthumbnail_path_tuple)
# content thumbnails
for content_file_path_tuple, row in self.contentcache.items():
thumbnail_path = row.get('thumbnail_chan_path', None)
if thumbnail_path:
thumbnail_path_tuple = path_to_tuple(thumbnail_path, windows=self.winpaths)
thumbnail_path_tuples.append(thumbnail_path_tuple)
return thumbnail_path_tuples | python | def get_thumbnail_paths(self):
"""
Helper function used to avoid processing thumbnail files during `os.walk`.
"""
thumbnail_path_tuples = []
# channel thumbnail
channel_info = self.get_channel_info()
chthumbnail_path = channel_info.get('thumbnail_chan_path', None)
if chthumbnail_path:
chthumbnail_path_tuple = path_to_tuple(chthumbnail_path, windows=self.winpaths)
thumbnail_path_tuples.append(chthumbnail_path_tuple)
# content thumbnails
for content_file_path_tuple, row in self.contentcache.items():
thumbnail_path = row.get('thumbnail_chan_path', None)
if thumbnail_path:
thumbnail_path_tuple = path_to_tuple(thumbnail_path, windows=self.winpaths)
thumbnail_path_tuples.append(thumbnail_path_tuple)
return thumbnail_path_tuples | [
"def",
"get_thumbnail_paths",
"(",
"self",
")",
":",
"thumbnail_path_tuples",
"=",
"[",
"]",
"# channel thumbnail",
"channel_info",
"=",
"self",
".",
"get_channel_info",
"(",
")",
"chthumbnail_path",
"=",
"channel_info",
".",
"get",
"(",
"'thumbnail_chan_path'",
","... | Helper function used to avoid processing thumbnail files during `os.walk`. | [
"Helper",
"function",
"used",
"to",
"avoid",
"processing",
"thumbnail",
"files",
"during",
"os",
".",
"walk",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L270-L287 | train | 28,888 |
learningequality/ricecooker | ricecooker/utils/metadata_provider.py | CsvMetadataProvider._map_exercise_row_to_dict | def _map_exercise_row_to_dict(self, row):
"""
Convert dictionary keys from raw CSV Exercise format to ricecooker keys.
"""
row_cleaned = _clean_dict(row)
license_id = row_cleaned[CONTENT_LICENSE_ID_KEY]
if license_id:
license_dict = dict(
license_id=row_cleaned[CONTENT_LICENSE_ID_KEY],
description=row_cleaned.get(CONTENT_LICENSE_DESCRIPTION_KEY, None),
copyright_holder=row_cleaned.get(CONTENT_LICENSE_COPYRIGHT_HOLDER_KEY, None)
)
else:
license_dict = None
# Parse exercise_data
randomize_raw = row_cleaned.get(EXERCISE_RANDOMIZE_KEY, None)
if randomize_raw is None or randomize_raw.lower() in CSV_STR_TRUE_VALUES:
randomize = True
elif randomize_raw.lower() in CSV_STR_FALSE_VALUES:
randomize = False
else:
raise ValueError('Unrecognized value ' + randomize_raw + ' for randomzied key')
exercise_data = dict(
mastery_model=exercises.M_OF_N,
randomize=randomize,
)
m_value = row_cleaned.get(EXERCISE_M_KEY, None)
if m_value:
exercise_data['m'] = m_value
n_value = row_cleaned.get(EXERCISE_N_KEY, None)
if n_value:
exercise_data['n'] = n_value
exercise_dict = dict(
chan_path=row_cleaned[CONTENT_PATH_KEY],
title=row_cleaned[CONTENT_TITLE_KEY],
source_id=row_cleaned[EXERCISE_SOURCEID_KEY],
description=row_cleaned.get(CONTENT_DESCRIPTION_KEY, None),
author=row_cleaned.get(CONTENT_AUTHOR_KEY, None),
language=row_cleaned.get(CONTENT_LANGUAGE_KEY, None),
license=license_dict,
exercise_data=exercise_data,
thumbnail_chan_path=row_cleaned.get(CONTENT_THUMBNAIL_KEY, None)
)
return exercise_dict | python | def _map_exercise_row_to_dict(self, row):
"""
Convert dictionary keys from raw CSV Exercise format to ricecooker keys.
"""
row_cleaned = _clean_dict(row)
license_id = row_cleaned[CONTENT_LICENSE_ID_KEY]
if license_id:
license_dict = dict(
license_id=row_cleaned[CONTENT_LICENSE_ID_KEY],
description=row_cleaned.get(CONTENT_LICENSE_DESCRIPTION_KEY, None),
copyright_holder=row_cleaned.get(CONTENT_LICENSE_COPYRIGHT_HOLDER_KEY, None)
)
else:
license_dict = None
# Parse exercise_data
randomize_raw = row_cleaned.get(EXERCISE_RANDOMIZE_KEY, None)
if randomize_raw is None or randomize_raw.lower() in CSV_STR_TRUE_VALUES:
randomize = True
elif randomize_raw.lower() in CSV_STR_FALSE_VALUES:
randomize = False
else:
raise ValueError('Unrecognized value ' + randomize_raw + ' for randomzied key')
exercise_data = dict(
mastery_model=exercises.M_OF_N,
randomize=randomize,
)
m_value = row_cleaned.get(EXERCISE_M_KEY, None)
if m_value:
exercise_data['m'] = m_value
n_value = row_cleaned.get(EXERCISE_N_KEY, None)
if n_value:
exercise_data['n'] = n_value
exercise_dict = dict(
chan_path=row_cleaned[CONTENT_PATH_KEY],
title=row_cleaned[CONTENT_TITLE_KEY],
source_id=row_cleaned[EXERCISE_SOURCEID_KEY],
description=row_cleaned.get(CONTENT_DESCRIPTION_KEY, None),
author=row_cleaned.get(CONTENT_AUTHOR_KEY, None),
language=row_cleaned.get(CONTENT_LANGUAGE_KEY, None),
license=license_dict,
exercise_data=exercise_data,
thumbnail_chan_path=row_cleaned.get(CONTENT_THUMBNAIL_KEY, None)
)
return exercise_dict | [
"def",
"_map_exercise_row_to_dict",
"(",
"self",
",",
"row",
")",
":",
"row_cleaned",
"=",
"_clean_dict",
"(",
"row",
")",
"license_id",
"=",
"row_cleaned",
"[",
"CONTENT_LICENSE_ID_KEY",
"]",
"if",
"license_id",
":",
"license_dict",
"=",
"dict",
"(",
"license_i... | Convert dictionary keys from raw CSV Exercise format to ricecooker keys. | [
"Convert",
"dictionary",
"keys",
"from",
"raw",
"CSV",
"Exercise",
"format",
"to",
"ricecooker",
"keys",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L359-L404 | train | 28,889 |
learningequality/ricecooker | ricecooker/utils/metadata_provider.py | CsvMetadataProvider._map_exercise_question_row_to_dict | def _map_exercise_question_row_to_dict(self, row):
"""
Convert dictionary keys from raw CSV Exercise Question format to ricecooker keys.
"""
row_cleaned = _clean_dict(row)
# Parse answers
all_answers = []
ansA = row_cleaned[EXERCISE_QUESTIONS_OPTION_A_KEY]
all_answers.append(ansA)
ansB = row_cleaned.get(EXERCISE_QUESTIONS_OPTION_B_KEY, None)
if ansB:
all_answers.append(ansB)
ansC = row_cleaned.get(EXERCISE_QUESTIONS_OPTION_C_KEY, None)
if ansC:
all_answers.append(ansC)
ansD = row_cleaned.get(EXERCISE_QUESTIONS_OPTION_D_KEY, None)
if ansD:
all_answers.append(ansD)
ansE = row_cleaned.get(EXERCISE_QUESTIONS_OPTION_E_KEY, None)
if ansE:
all_answers.append(ansE)
more_answers_str = row_cleaned.get(EXERCISE_QUESTIONS_OPTION_FGHI_KEY, None)
if more_answers_str:
more_answers = more_answers_str.split(DEFAULT_EXTRA_ITEMS_SEPARATOR)
all_answers.extend([ans.strip() for ans in more_answers])
# Parse correct answers
correct_answers = []
correct_ans = row_cleaned[EXERCISE_QUESTIONS_CORRECT_ANSWER_KEY]
correct_answers.append(correct_ans)
correct_ans2 = row_cleaned.get(EXERCISE_QUESTIONS_CORRECT_ANSWER2_KEY, None)
if correct_ans2:
correct_answers.append(correct_ans2)
correct_ans3 = row_cleaned.get(EXERCISE_QUESTIONS_CORRECT_ANSWER3_KEY, None)
if correct_ans3:
correct_answers.append(correct_ans3)
# Parse hints
hints = []
hint1 = row_cleaned.get(EXERCISE_QUESTIONS_HINT_1_KEY, None)
if hint1:
hints.append(hint1)
hint2 = row_cleaned.get(EXERCISE_QUESTIONS_HINT_2_KEY, None)
if hint2:
hints.append(hint2)
hint3 = row_cleaned.get(EXERCISE_QUESTIONS_HINT_3_KEY, None)
if hint3:
hints.append(hint3)
hint4 = row_cleaned.get(EXERCISE_QUESTIONS_HINT_4_KEY, None)
if hint4:
hints.append(hint4)
hint5 = row_cleaned.get(EXERCISE_QUESTIONS_HINT_5_KEY, None)
if hint5:
hints.append(hint5)
more_hints_str = row_cleaned.get(EXERCISE_QUESTIONS_HINT_6789_KEY, None)
if more_hints_str:
more_hints = more_hints_str.split(DEFAULT_EXTRA_ITEMS_SEPARATOR)
hints.extend([hint.strip() for hint in more_hints])
# Build appropriate dictionary depending on question_type
question_type = row_cleaned[EXERCISE_QUESTIONS_TYPE_KEY]
if question_type == exercises.MULTIPLE_SELECTION:
question_dict = dict(
question_type=exercises.MULTIPLE_SELECTION,
source_id=row_cleaned[EXERCISE_SOURCEID_KEY],
id=row_cleaned[EXERCISE_QUESTIONS_QUESTIONID_KEY],
question=row_cleaned[EXERCISE_QUESTIONS_QUESTION_KEY],
correct_answers=correct_answers,
all_answers=all_answers,
hints=hints,
)
elif question_type == exercises.SINGLE_SELECTION:
question_dict = dict(
question_type=exercises.SINGLE_SELECTION,
source_id=row_cleaned[EXERCISE_SOURCEID_KEY],
id=row_cleaned[EXERCISE_QUESTIONS_QUESTIONID_KEY],
question=row_cleaned[EXERCISE_QUESTIONS_QUESTION_KEY],
correct_answer=correct_answers[0],
all_answers=all_answers,
hints=hints,
)
elif question_type == exercises.INPUT_QUESTION:
question_dict = dict(
question_type=exercises.INPUT_QUESTION,
source_id=row_cleaned[EXERCISE_SOURCEID_KEY],
id=row_cleaned[EXERCISE_QUESTIONS_QUESTIONID_KEY],
question=row_cleaned[EXERCISE_QUESTIONS_QUESTION_KEY],
answers=correct_answers,
hints=hints,
)
elif question_type == exercises.PERSEUS_QUESTION:
raise ValueError('Perseus questions not currently supported in CSV workflow.')
return question_dict | python | def _map_exercise_question_row_to_dict(self, row):
"""
Convert dictionary keys from raw CSV Exercise Question format to ricecooker keys.
"""
row_cleaned = _clean_dict(row)
# Parse answers
all_answers = []
ansA = row_cleaned[EXERCISE_QUESTIONS_OPTION_A_KEY]
all_answers.append(ansA)
ansB = row_cleaned.get(EXERCISE_QUESTIONS_OPTION_B_KEY, None)
if ansB:
all_answers.append(ansB)
ansC = row_cleaned.get(EXERCISE_QUESTIONS_OPTION_C_KEY, None)
if ansC:
all_answers.append(ansC)
ansD = row_cleaned.get(EXERCISE_QUESTIONS_OPTION_D_KEY, None)
if ansD:
all_answers.append(ansD)
ansE = row_cleaned.get(EXERCISE_QUESTIONS_OPTION_E_KEY, None)
if ansE:
all_answers.append(ansE)
more_answers_str = row_cleaned.get(EXERCISE_QUESTIONS_OPTION_FGHI_KEY, None)
if more_answers_str:
more_answers = more_answers_str.split(DEFAULT_EXTRA_ITEMS_SEPARATOR)
all_answers.extend([ans.strip() for ans in more_answers])
# Parse correct answers
correct_answers = []
correct_ans = row_cleaned[EXERCISE_QUESTIONS_CORRECT_ANSWER_KEY]
correct_answers.append(correct_ans)
correct_ans2 = row_cleaned.get(EXERCISE_QUESTIONS_CORRECT_ANSWER2_KEY, None)
if correct_ans2:
correct_answers.append(correct_ans2)
correct_ans3 = row_cleaned.get(EXERCISE_QUESTIONS_CORRECT_ANSWER3_KEY, None)
if correct_ans3:
correct_answers.append(correct_ans3)
# Parse hints
hints = []
hint1 = row_cleaned.get(EXERCISE_QUESTIONS_HINT_1_KEY, None)
if hint1:
hints.append(hint1)
hint2 = row_cleaned.get(EXERCISE_QUESTIONS_HINT_2_KEY, None)
if hint2:
hints.append(hint2)
hint3 = row_cleaned.get(EXERCISE_QUESTIONS_HINT_3_KEY, None)
if hint3:
hints.append(hint3)
hint4 = row_cleaned.get(EXERCISE_QUESTIONS_HINT_4_KEY, None)
if hint4:
hints.append(hint4)
hint5 = row_cleaned.get(EXERCISE_QUESTIONS_HINT_5_KEY, None)
if hint5:
hints.append(hint5)
more_hints_str = row_cleaned.get(EXERCISE_QUESTIONS_HINT_6789_KEY, None)
if more_hints_str:
more_hints = more_hints_str.split(DEFAULT_EXTRA_ITEMS_SEPARATOR)
hints.extend([hint.strip() for hint in more_hints])
# Build appropriate dictionary depending on question_type
question_type = row_cleaned[EXERCISE_QUESTIONS_TYPE_KEY]
if question_type == exercises.MULTIPLE_SELECTION:
question_dict = dict(
question_type=exercises.MULTIPLE_SELECTION,
source_id=row_cleaned[EXERCISE_SOURCEID_KEY],
id=row_cleaned[EXERCISE_QUESTIONS_QUESTIONID_KEY],
question=row_cleaned[EXERCISE_QUESTIONS_QUESTION_KEY],
correct_answers=correct_answers,
all_answers=all_answers,
hints=hints,
)
elif question_type == exercises.SINGLE_SELECTION:
question_dict = dict(
question_type=exercises.SINGLE_SELECTION,
source_id=row_cleaned[EXERCISE_SOURCEID_KEY],
id=row_cleaned[EXERCISE_QUESTIONS_QUESTIONID_KEY],
question=row_cleaned[EXERCISE_QUESTIONS_QUESTION_KEY],
correct_answer=correct_answers[0],
all_answers=all_answers,
hints=hints,
)
elif question_type == exercises.INPUT_QUESTION:
question_dict = dict(
question_type=exercises.INPUT_QUESTION,
source_id=row_cleaned[EXERCISE_SOURCEID_KEY],
id=row_cleaned[EXERCISE_QUESTIONS_QUESTIONID_KEY],
question=row_cleaned[EXERCISE_QUESTIONS_QUESTION_KEY],
answers=correct_answers,
hints=hints,
)
elif question_type == exercises.PERSEUS_QUESTION:
raise ValueError('Perseus questions not currently supported in CSV workflow.')
return question_dict | [
"def",
"_map_exercise_question_row_to_dict",
"(",
"self",
",",
"row",
")",
":",
"row_cleaned",
"=",
"_clean_dict",
"(",
"row",
")",
"# Parse answers",
"all_answers",
"=",
"[",
"]",
"ansA",
"=",
"row_cleaned",
"[",
"EXERCISE_QUESTIONS_OPTION_A_KEY",
"]",
"all_answers... | Convert dictionary keys from raw CSV Exercise Question format to ricecooker keys. | [
"Convert",
"dictionary",
"keys",
"from",
"raw",
"CSV",
"Exercise",
"Question",
"format",
"to",
"ricecooker",
"keys",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L406-L500 | train | 28,890 |
learningequality/ricecooker | ricecooker/utils/metadata_provider.py | CsvMetadataProvider.validate_headers | def validate_headers(self):
"""
Check if CSV metadata files have the right format.
"""
super().validate()
self.validate_header(self.channeldir, self.channelinfo, CHANNEL_INFO_HEADER)
self.validate_header(self.channeldir, self.contentinfo, CONTENT_INFO_HEADER)
if self.has_exercises():
self.validate_header(self.channeldir, self.exercisesinfo, EXERCISE_INFO_HEADER)
self.validate_header(self.channeldir, self.questionsinfo, EXERCISE_QUESTIONS_INFO_HEADER) | python | def validate_headers(self):
"""
Check if CSV metadata files have the right format.
"""
super().validate()
self.validate_header(self.channeldir, self.channelinfo, CHANNEL_INFO_HEADER)
self.validate_header(self.channeldir, self.contentinfo, CONTENT_INFO_HEADER)
if self.has_exercises():
self.validate_header(self.channeldir, self.exercisesinfo, EXERCISE_INFO_HEADER)
self.validate_header(self.channeldir, self.questionsinfo, EXERCISE_QUESTIONS_INFO_HEADER) | [
"def",
"validate_headers",
"(",
"self",
")",
":",
"super",
"(",
")",
".",
"validate",
"(",
")",
"self",
".",
"validate_header",
"(",
"self",
".",
"channeldir",
",",
"self",
".",
"channelinfo",
",",
"CHANNEL_INFO_HEADER",
")",
"self",
".",
"validate_header",
... | Check if CSV metadata files have the right format. | [
"Check",
"if",
"CSV",
"metadata",
"files",
"have",
"the",
"right",
"format",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L509-L518 | train | 28,891 |
learningequality/ricecooker | ricecooker/utils/metadata_provider.py | CsvMetadataProvider.validate_header | def validate_header(self, channeldir, filename, expected_header):
"""
Check if CSV metadata file `filename` have the expected header format.
"""
expected = set(expected_header)
csv_filename = get_metadata_file_path(channeldir, filename)
csv_lines = _read_csv_lines(csv_filename)
dict_reader = csv.DictReader(csv_lines)
actual = set(dict_reader.fieldnames)
if not actual == expected:
raise ValueError('Unexpected CSV file header in ' + csv_filename \
+ ' Expected header:' + str(expected)) | python | def validate_header(self, channeldir, filename, expected_header):
"""
Check if CSV metadata file `filename` have the expected header format.
"""
expected = set(expected_header)
csv_filename = get_metadata_file_path(channeldir, filename)
csv_lines = _read_csv_lines(csv_filename)
dict_reader = csv.DictReader(csv_lines)
actual = set(dict_reader.fieldnames)
if not actual == expected:
raise ValueError('Unexpected CSV file header in ' + csv_filename \
+ ' Expected header:' + str(expected)) | [
"def",
"validate_header",
"(",
"self",
",",
"channeldir",
",",
"filename",
",",
"expected_header",
")",
":",
"expected",
"=",
"set",
"(",
"expected_header",
")",
"csv_filename",
"=",
"get_metadata_file_path",
"(",
"channeldir",
",",
"filename",
")",
"csv_lines",
... | Check if CSV metadata file `filename` have the expected header format. | [
"Check",
"if",
"CSV",
"metadata",
"file",
"filename",
"have",
"the",
"expected",
"header",
"format",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L520-L531 | train | 28,892 |
learningequality/ricecooker | ricecooker/utils/metadata_provider.py | CsvMetadataProvider.generate_contentinfo_from_channeldir | def generate_contentinfo_from_channeldir(self, args, options):
"""
Create rows in Content.csv for each folder and file in `self.channeldir`.
"""
LOGGER.info('Generating Content.csv rows folders and file in channeldir')
file_path = get_metadata_file_path(self.channeldir, self.contentinfo)
with open(file_path, 'a') as csv_file:
csvwriter = csv.DictWriter(csv_file, CONTENT_INFO_HEADER)
channeldir = args['channeldir']
if channeldir.endswith(os.path.sep):
channeldir.rstrip(os.path.sep)
# MAIN PROCESSING OF os.walk OUTPUT
content_folders = sorted(os.walk(channeldir))
_ = content_folders.pop(0) # Skip over channel root folder
for rel_path, _subfolders, filenames in content_folders:
LOGGER.info('processing folder ' + str(rel_path))
sorted_filenames = sorted(filenames)
self.generate_contentinfo_from_folder(csvwriter, rel_path, sorted_filenames)
LOGGER.info('Generted {} row for all folders and files in {}'.format(self.contentinfo, self.channeldir)) | python | def generate_contentinfo_from_channeldir(self, args, options):
"""
Create rows in Content.csv for each folder and file in `self.channeldir`.
"""
LOGGER.info('Generating Content.csv rows folders and file in channeldir')
file_path = get_metadata_file_path(self.channeldir, self.contentinfo)
with open(file_path, 'a') as csv_file:
csvwriter = csv.DictWriter(csv_file, CONTENT_INFO_HEADER)
channeldir = args['channeldir']
if channeldir.endswith(os.path.sep):
channeldir.rstrip(os.path.sep)
# MAIN PROCESSING OF os.walk OUTPUT
content_folders = sorted(os.walk(channeldir))
_ = content_folders.pop(0) # Skip over channel root folder
for rel_path, _subfolders, filenames in content_folders:
LOGGER.info('processing folder ' + str(rel_path))
sorted_filenames = sorted(filenames)
self.generate_contentinfo_from_folder(csvwriter, rel_path, sorted_filenames)
LOGGER.info('Generted {} row for all folders and files in {}'.format(self.contentinfo, self.channeldir)) | [
"def",
"generate_contentinfo_from_channeldir",
"(",
"self",
",",
"args",
",",
"options",
")",
":",
"LOGGER",
".",
"info",
"(",
"'Generating Content.csv rows folders and file in channeldir'",
")",
"file_path",
"=",
"get_metadata_file_path",
"(",
"self",
".",
"channeldir",
... | Create rows in Content.csv for each folder and file in `self.channeldir`. | [
"Create",
"rows",
"in",
"Content",
".",
"csv",
"for",
"each",
"folder",
"and",
"file",
"in",
"self",
".",
"channeldir",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L777-L797 | train | 28,893 |
learningequality/ricecooker | ricecooker/utils/metadata_provider.py | CsvMetadataProvider.generate_contentinfo_from_folder | def generate_contentinfo_from_folder(self, csvwriter, rel_path, filenames):
"""
Create a topic node row in Content.csv for the folder at `rel_path` and
add content node rows for all the files in the `rel_path` folder.
"""
LOGGER.debug('IN process_folder ' + str(rel_path) + ' ' + str(filenames))
from ricecooker.utils.linecook import filter_filenames, filter_thumbnail_files, chan_path_from_rel_path
# WRITE TOPIC ROW
topicrow = self.channeldir_node_to_row( rel_path.split(os.path.sep) )
csvwriter.writerow(topicrow)
# WRITE CONTENT NODE ROWS
chan_path = chan_path_from_rel_path(rel_path, self.channeldir)
filenames_cleaned = filter_filenames(filenames)
# filenames_cleaned2 = filter_thumbnail_files(chan_path, filenames_cleaned, self)
for filename in filenames_cleaned:
path_tuple = rel_path.split(os.path.sep)
path_tuple.append(filename)
filerow = self.channeldir_node_to_row(path_tuple)
csvwriter.writerow(filerow) | python | def generate_contentinfo_from_folder(self, csvwriter, rel_path, filenames):
"""
Create a topic node row in Content.csv for the folder at `rel_path` and
add content node rows for all the files in the `rel_path` folder.
"""
LOGGER.debug('IN process_folder ' + str(rel_path) + ' ' + str(filenames))
from ricecooker.utils.linecook import filter_filenames, filter_thumbnail_files, chan_path_from_rel_path
# WRITE TOPIC ROW
topicrow = self.channeldir_node_to_row( rel_path.split(os.path.sep) )
csvwriter.writerow(topicrow)
# WRITE CONTENT NODE ROWS
chan_path = chan_path_from_rel_path(rel_path, self.channeldir)
filenames_cleaned = filter_filenames(filenames)
# filenames_cleaned2 = filter_thumbnail_files(chan_path, filenames_cleaned, self)
for filename in filenames_cleaned:
path_tuple = rel_path.split(os.path.sep)
path_tuple.append(filename)
filerow = self.channeldir_node_to_row(path_tuple)
csvwriter.writerow(filerow) | [
"def",
"generate_contentinfo_from_folder",
"(",
"self",
",",
"csvwriter",
",",
"rel_path",
",",
"filenames",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'IN process_folder '",
"+",
"str",
"(",
"rel_path",
")",
"+",
"' '",
"+",
"str",
"(",
"filenames",
")",
"... | Create a topic node row in Content.csv for the folder at `rel_path` and
add content node rows for all the files in the `rel_path` folder. | [
"Create",
"a",
"topic",
"node",
"row",
"in",
"Content",
".",
"csv",
"for",
"the",
"folder",
"at",
"rel_path",
"and",
"add",
"content",
"node",
"rows",
"for",
"all",
"the",
"files",
"in",
"the",
"rel_path",
"folder",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L799-L819 | train | 28,894 |
learningequality/ricecooker | ricecooker/utils/metadata_provider.py | CsvMetadataProvider.channeldir_node_to_row | def channeldir_node_to_row(self, path_tuple):
"""
Return a dict with keys corresponding to Content.csv columns.
"""
row = dict()
for key in CONTENT_INFO_HEADER:
row[key] = None
row[CONTENT_PATH_KEY] = "/".join(path_tuple) # use / in .csv on Windows and UNIX
title = path_tuple[-1].replace('_', ' ')
for ext in content_kinds.MAPPING.keys():
if title.endswith(ext):
title = title.replace('.'+ext, '')
row[CONTENT_TITLE_KEY] = title
row[CONTENT_SOURCEID_KEY] = path_tuple[-1]
return row | python | def channeldir_node_to_row(self, path_tuple):
"""
Return a dict with keys corresponding to Content.csv columns.
"""
row = dict()
for key in CONTENT_INFO_HEADER:
row[key] = None
row[CONTENT_PATH_KEY] = "/".join(path_tuple) # use / in .csv on Windows and UNIX
title = path_tuple[-1].replace('_', ' ')
for ext in content_kinds.MAPPING.keys():
if title.endswith(ext):
title = title.replace('.'+ext, '')
row[CONTENT_TITLE_KEY] = title
row[CONTENT_SOURCEID_KEY] = path_tuple[-1]
return row | [
"def",
"channeldir_node_to_row",
"(",
"self",
",",
"path_tuple",
")",
":",
"row",
"=",
"dict",
"(",
")",
"for",
"key",
"in",
"CONTENT_INFO_HEADER",
":",
"row",
"[",
"key",
"]",
"=",
"None",
"row",
"[",
"CONTENT_PATH_KEY",
"]",
"=",
"\"/\"",
".",
"join",
... | Return a dict with keys corresponding to Content.csv columns. | [
"Return",
"a",
"dict",
"with",
"keys",
"corresponding",
"to",
"Content",
".",
"csv",
"columns",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L822-L836 | train | 28,895 |
learningequality/ricecooker | ricecooker/utils/metadata_provider.py | CsvMetadataProvider.generate_templates | def generate_templates(self, exercise_questions=False):
"""
Create empty .csv files with the right headers and place them in the
Will place files as siblings of directory `channeldir`.
"""
self.generate_template(channeldir=self.channeldir,
filename=self.channelinfo,
header=CHANNEL_INFO_HEADER)
self.generate_template(channeldir=self.channeldir,
filename=self.contentinfo,
header=CONTENT_INFO_HEADER)
if exercise_questions:
self.generate_template(channeldir=self.channeldir,
filename=self.exercisesinfo,
header=EXERCISE_INFO_HEADER)
self.generate_template(channeldir=self.channeldir,
filename=self.questionsinfo,
header=EXERCISE_QUESTIONS_INFO_HEADER) | python | def generate_templates(self, exercise_questions=False):
"""
Create empty .csv files with the right headers and place them in the
Will place files as siblings of directory `channeldir`.
"""
self.generate_template(channeldir=self.channeldir,
filename=self.channelinfo,
header=CHANNEL_INFO_HEADER)
self.generate_template(channeldir=self.channeldir,
filename=self.contentinfo,
header=CONTENT_INFO_HEADER)
if exercise_questions:
self.generate_template(channeldir=self.channeldir,
filename=self.exercisesinfo,
header=EXERCISE_INFO_HEADER)
self.generate_template(channeldir=self.channeldir,
filename=self.questionsinfo,
header=EXERCISE_QUESTIONS_INFO_HEADER) | [
"def",
"generate_templates",
"(",
"self",
",",
"exercise_questions",
"=",
"False",
")",
":",
"self",
".",
"generate_template",
"(",
"channeldir",
"=",
"self",
".",
"channeldir",
",",
"filename",
"=",
"self",
".",
"channelinfo",
",",
"header",
"=",
"CHANNEL_INF... | Create empty .csv files with the right headers and place them in the
Will place files as siblings of directory `channeldir`. | [
"Create",
"empty",
".",
"csv",
"files",
"with",
"the",
"right",
"headers",
"and",
"place",
"them",
"in",
"the",
"Will",
"place",
"files",
"as",
"siblings",
"of",
"directory",
"channeldir",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L844-L861 | train | 28,896 |
learningequality/ricecooker | ricecooker/utils/metadata_provider.py | CsvMetadataProvider.generate_template | def generate_template(self, channeldir, filename, header):
"""
Create empty template .csv file called `filename` as siblings of the
directory `channeldir` with header fields specified in `header`.
"""
file_path = get_metadata_file_path(channeldir, filename)
if not os.path.exists(file_path):
with open(file_path, 'w') as csv_file:
csvwriter = csv.DictWriter(csv_file, header)
csvwriter.writeheader() | python | def generate_template(self, channeldir, filename, header):
"""
Create empty template .csv file called `filename` as siblings of the
directory `channeldir` with header fields specified in `header`.
"""
file_path = get_metadata_file_path(channeldir, filename)
if not os.path.exists(file_path):
with open(file_path, 'w') as csv_file:
csvwriter = csv.DictWriter(csv_file, header)
csvwriter.writeheader() | [
"def",
"generate_template",
"(",
"self",
",",
"channeldir",
",",
"filename",
",",
"header",
")",
":",
"file_path",
"=",
"get_metadata_file_path",
"(",
"channeldir",
",",
"filename",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
... | Create empty template .csv file called `filename` as siblings of the
directory `channeldir` with header fields specified in `header`. | [
"Create",
"empty",
"template",
".",
"csv",
"file",
"called",
"filename",
"as",
"siblings",
"of",
"the",
"directory",
"channeldir",
"with",
"header",
"fields",
"specified",
"in",
"header",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L863-L872 | train | 28,897 |
learningequality/ricecooker | ricecooker/commands.py | authenticate_user | def authenticate_user(token):
"""
Add the content curation Authorizatino `token` header to `config.SESSION`.
"""
config.SESSION.headers.update({"Authorization": "Token {0}".format(token)})
try:
response = config.SESSION.post(config.authentication_url())
response.raise_for_status()
user = json.loads(response._content.decode("utf-8"))
config.LOGGER.info("Logged in with username {0}".format(user['username']))
return user['username'], token
except HTTPError:
config.LOGGER.error("Invalid token: Credentials not found")
sys.exit() | python | def authenticate_user(token):
"""
Add the content curation Authorizatino `token` header to `config.SESSION`.
"""
config.SESSION.headers.update({"Authorization": "Token {0}".format(token)})
try:
response = config.SESSION.post(config.authentication_url())
response.raise_for_status()
user = json.loads(response._content.decode("utf-8"))
config.LOGGER.info("Logged in with username {0}".format(user['username']))
return user['username'], token
except HTTPError:
config.LOGGER.error("Invalid token: Credentials not found")
sys.exit() | [
"def",
"authenticate_user",
"(",
"token",
")",
":",
"config",
".",
"SESSION",
".",
"headers",
".",
"update",
"(",
"{",
"\"Authorization\"",
":",
"\"Token {0}\"",
".",
"format",
"(",
"token",
")",
"}",
")",
"try",
":",
"response",
"=",
"config",
".",
"SES... | Add the content curation Authorizatino `token` header to `config.SESSION`. | [
"Add",
"the",
"content",
"curation",
"Authorizatino",
"token",
"header",
"to",
"config",
".",
"SESSION",
"."
] | 2f0385282500cb77ef2894646c6f9ce11bd7a853 | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/commands.py#L184-L197 | train | 28,898 |
SBRG/ssbio | ssbio/protein/structure/properties/residues.py | residue_distances | def residue_distances(res_1_num, res_1_chain, res_2_num, res_2_chain, model):
"""Distance between the last atom of 2 residues"""
res1 = model[res_1_chain][res_1_num].child_list[-1]
res2 = model[res_2_chain][res_2_num].child_list[-1]
distance = res1 - res2
return distance | python | def residue_distances(res_1_num, res_1_chain, res_2_num, res_2_chain, model):
"""Distance between the last atom of 2 residues"""
res1 = model[res_1_chain][res_1_num].child_list[-1]
res2 = model[res_2_chain][res_2_num].child_list[-1]
distance = res1 - res2
return distance | [
"def",
"residue_distances",
"(",
"res_1_num",
",",
"res_1_chain",
",",
"res_2_num",
",",
"res_2_chain",
",",
"model",
")",
":",
"res1",
"=",
"model",
"[",
"res_1_chain",
"]",
"[",
"res_1_num",
"]",
".",
"child_list",
"[",
"-",
"1",
"]",
"res2",
"=",
"mod... | Distance between the last atom of 2 residues | [
"Distance",
"between",
"the",
"last",
"atom",
"of",
"2",
"residues"
] | e9449e64ffc1a1f5ad07e5849aa12a650095f8a2 | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/properties/residues.py#L82-L89 | train | 28,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.