body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
95f6f3b36b2cec5cfc30c83566077f5e1c0112f7473caa24a6e653a37b16fb8e
def _get_catalogitem(self, catalog_item): 'Given a catalog item href returns elementree' res = self.connection.request(catalog_item, headers={'Content-Type': 'application/vnd.vmware.vcloud.catalogItem+xml'}).object return res
Given a catalog item href returns elementree
libcloud/compute/drivers/vcloud.py
_get_catalogitem
ggreer/libcloud
1
python
def _get_catalogitem(self, catalog_item): res = self.connection.request(catalog_item, headers={'Content-Type': 'application/vnd.vmware.vcloud.catalogItem+xml'}).object return res
def _get_catalogitem(self, catalog_item): res = self.connection.request(catalog_item, headers={'Content-Type': 'application/vnd.vmware.vcloud.catalogItem+xml'}).object return res<|docstring|>Given a catalog item href returns elementree<|endoftext|>
483dfc9dcc38806a2ac1926bcea0c1b5a2a976f4eb2726589cfd37b55a43593c
def create_node(self, **kwargs): 'Creates and returns node.\n\n\n See L{NodeDriver.create_node} for more keyword args.\n\n Non-standard optional keyword arguments:\n @keyword ex_network: link to a "Network" e.g., "https://services.vcloudexpress.terremark.com/api/v0.8/network/7"\n @type ex_network: C{string}\n\n @keyword ex_vdc: Name of organisation\'s virtual data center where vApp VMs will be deployed.\n @type ex_vdc: C{string}\n\n @keyword ex_cpus: number of virtual cpus (limit depends on provider)\n @type ex_cpus: C{int}\n\n @keyword row: ????\n @type row: C{????}\n\n @keyword group: ????\n @type group: C{????}\n ' name = kwargs['name'] image = kwargs['image'] size = kwargs['size'] try: network = kwargs.get('ex_network', self.networks[0].get('href')) except IndexError: network = '' password = None if ('auth' in kwargs): auth = kwargs['auth'] if isinstance(auth, NodeAuthPassword): password = auth.password else: raise ValueError('auth must be of NodeAuthPassword type') instantiate_xml = InstantiateVAppXML(name=name, template=image.id, net_href=network, cpus=str(kwargs.get('ex_cpus', 1)), memory=str(size.ram), password=password, row=kwargs.get('ex_row', None), group=kwargs.get('ex_group', None)) vdc = self._get_vdc(kwargs.get('ex_vdc', None)) res = self.connection.request(('%s/action/instantiateVAppTemplate' % vdc.id), data=instantiate_xml.tostring(), method='POST', headers={'Content-Type': 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'}) vapp_href = get_url_path(res.object.get('href')) res = self.connection.request(('%s/action/deploy' % vapp_href), method='POST') self._wait_for_task_completion(res.object.get('href')) res = self.connection.request(('%s/power/action/powerOn' % vapp_href), method='POST') res = self.connection.request(vapp_href) node = self._to_node(res.object) return node
Creates and returns node. See L{NodeDriver.create_node} for more keyword args. Non-standard optional keyword arguments: @keyword ex_network: link to a "Network" e.g., "https://services.vcloudexpress.terremark.com/api/v0.8/network/7" @type ex_network: C{string} @keyword ex_vdc: Name of organisation's virtual data center where vApp VMs will be deployed. @type ex_vdc: C{string} @keyword ex_cpus: number of virtual cpus (limit depends on provider) @type ex_cpus: C{int} @keyword row: ???? @type row: C{????} @keyword group: ???? @type group: C{????}
libcloud/compute/drivers/vcloud.py
create_node
ggreer/libcloud
1
python
def create_node(self, **kwargs): 'Creates and returns node.\n\n\n See L{NodeDriver.create_node} for more keyword args.\n\n Non-standard optional keyword arguments:\n @keyword ex_network: link to a "Network" e.g., "https://services.vcloudexpress.terremark.com/api/v0.8/network/7"\n @type ex_network: C{string}\n\n @keyword ex_vdc: Name of organisation\'s virtual data center where vApp VMs will be deployed.\n @type ex_vdc: C{string}\n\n @keyword ex_cpus: number of virtual cpus (limit depends on provider)\n @type ex_cpus: C{int}\n\n @keyword row: ????\n @type row: C{????}\n\n @keyword group: ????\n @type group: C{????}\n ' name = kwargs['name'] image = kwargs['image'] size = kwargs['size'] try: network = kwargs.get('ex_network', self.networks[0].get('href')) except IndexError: network = password = None if ('auth' in kwargs): auth = kwargs['auth'] if isinstance(auth, NodeAuthPassword): password = auth.password else: raise ValueError('auth must be of NodeAuthPassword type') instantiate_xml = InstantiateVAppXML(name=name, template=image.id, net_href=network, cpus=str(kwargs.get('ex_cpus', 1)), memory=str(size.ram), password=password, row=kwargs.get('ex_row', None), group=kwargs.get('ex_group', None)) vdc = self._get_vdc(kwargs.get('ex_vdc', None)) res = self.connection.request(('%s/action/instantiateVAppTemplate' % vdc.id), data=instantiate_xml.tostring(), method='POST', headers={'Content-Type': 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'}) vapp_href = get_url_path(res.object.get('href')) res = self.connection.request(('%s/action/deploy' % vapp_href), method='POST') self._wait_for_task_completion(res.object.get('href')) res = self.connection.request(('%s/power/action/powerOn' % vapp_href), method='POST') res = self.connection.request(vapp_href) node = self._to_node(res.object) return node
def create_node(self, **kwargs): 'Creates and returns node.\n\n\n See L{NodeDriver.create_node} for more keyword args.\n\n Non-standard optional keyword arguments:\n @keyword ex_network: link to a "Network" e.g., "https://services.vcloudexpress.terremark.com/api/v0.8/network/7"\n @type ex_network: C{string}\n\n @keyword ex_vdc: Name of organisation\'s virtual data center where vApp VMs will be deployed.\n @type ex_vdc: C{string}\n\n @keyword ex_cpus: number of virtual cpus (limit depends on provider)\n @type ex_cpus: C{int}\n\n @keyword row: ????\n @type row: C{????}\n\n @keyword group: ????\n @type group: C{????}\n ' name = kwargs['name'] image = kwargs['image'] size = kwargs['size'] try: network = kwargs.get('ex_network', self.networks[0].get('href')) except IndexError: network = password = None if ('auth' in kwargs): auth = kwargs['auth'] if isinstance(auth, NodeAuthPassword): password = auth.password else: raise ValueError('auth must be of NodeAuthPassword type') instantiate_xml = InstantiateVAppXML(name=name, template=image.id, net_href=network, cpus=str(kwargs.get('ex_cpus', 1)), memory=str(size.ram), password=password, row=kwargs.get('ex_row', None), group=kwargs.get('ex_group', None)) vdc = self._get_vdc(kwargs.get('ex_vdc', None)) res = self.connection.request(('%s/action/instantiateVAppTemplate' % vdc.id), data=instantiate_xml.tostring(), method='POST', headers={'Content-Type': 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'}) vapp_href = get_url_path(res.object.get('href')) res = self.connection.request(('%s/action/deploy' % vapp_href), method='POST') self._wait_for_task_completion(res.object.get('href')) res = self.connection.request(('%s/power/action/powerOn' % vapp_href), method='POST') res = self.connection.request(vapp_href) node = self._to_node(res.object) return node<|docstring|>Creates and returns node. See L{NodeDriver.create_node} for more keyword args. Non-standard optional keyword arguments: @keyword ex_network: link to a "Network" e.g., "https://services.vcloudexpress.terremark.com/api/v0.8/network/7" @type ex_network: C{string} @keyword ex_vdc: Name of organisation's virtual data center where vApp VMs will be deployed. @type ex_vdc: C{string} @keyword ex_cpus: number of virtual cpus (limit depends on provider) @type ex_cpus: C{int} @keyword row: ???? @type row: C{????} @keyword group: ???? @type group: C{????}<|endoftext|>
6437cb821c2ea79a5a36ad76cf74d2111e67b9c3b06b1aefd8f03816e8cf9c26
def _get_auth_headers(self): "hosting.com doesn't follow the standard vCloud authentication API" return {'Authentication': base64.b64encode(b(('%s:%s' % (self.user_id, self.key)))), 'Content-Length': 0}
hosting.com doesn't follow the standard vCloud authentication API
libcloud/compute/drivers/vcloud.py
_get_auth_headers
ggreer/libcloud
1
python
def _get_auth_headers(self): return {'Authentication': base64.b64encode(b(('%s:%s' % (self.user_id, self.key)))), 'Content-Length': 0}
def _get_auth_headers(self): return {'Authentication': base64.b64encode(b(('%s:%s' % (self.user_id, self.key)))), 'Content-Length': 0}<|docstring|>hosting.com doesn't follow the standard vCloud authentication API<|endoftext|>
7b260abc5e21a1eea854d43d90d1f26474b525138fa5cde35352d3f2698eb63c
def ex_find_node(self, node_name, vdcs=None): '\n Searches for node across specified vDCs. This is more effective than querying all nodes to get a single\n instance.\n\n @param node_name: The name of the node to search for\n @type node_name: C{string}\n\n @param vdcs: None, vDC or a list of vDCs to search in. If None all vDCs will be searched.\n @type node_name: L{Vdc}\n\n @return: C{Node} node instance or None if not found\n ' if (not vdcs): vdcs = self.vdcs if (not getattr(vdcs, '__iter__', False)): vdcs = [vdcs] for vdc in vdcs: res = self.connection.request(vdc.id) entity_elems = res.object.findall(fixxpath(res.object, 'ResourceEntities/ResourceEntity')) for entity_elem in entity_elems: if ((entity_elem.get('type') == 'application/vnd.vmware.vcloud.vApp+xml') and (entity_elem.get('name') == node_name)): res = self.connection.request(entity_elem.get('href'), headers={'Content-Type': 'application/vnd.vmware.vcloud.vApp+xml'}) return self._to_node(res.object) return None
Searches for node across specified vDCs. This is more effective than querying all nodes to get a single instance. @param node_name: The name of the node to search for @type node_name: C{string} @param vdcs: None, vDC or a list of vDCs to search in. If None all vDCs will be searched. @type node_name: L{Vdc} @return: C{Node} node instance or None if not found
libcloud/compute/drivers/vcloud.py
ex_find_node
ggreer/libcloud
1
python
def ex_find_node(self, node_name, vdcs=None): '\n Searches for node across specified vDCs. This is more effective than querying all nodes to get a single\n instance.\n\n @param node_name: The name of the node to search for\n @type node_name: C{string}\n\n @param vdcs: None, vDC or a list of vDCs to search in. If None all vDCs will be searched.\n @type node_name: L{Vdc}\n\n @return: C{Node} node instance or None if not found\n ' if (not vdcs): vdcs = self.vdcs if (not getattr(vdcs, '__iter__', False)): vdcs = [vdcs] for vdc in vdcs: res = self.connection.request(vdc.id) entity_elems = res.object.findall(fixxpath(res.object, 'ResourceEntities/ResourceEntity')) for entity_elem in entity_elems: if ((entity_elem.get('type') == 'application/vnd.vmware.vcloud.vApp+xml') and (entity_elem.get('name') == node_name)): res = self.connection.request(entity_elem.get('href'), headers={'Content-Type': 'application/vnd.vmware.vcloud.vApp+xml'}) return self._to_node(res.object) return None
def ex_find_node(self, node_name, vdcs=None): '\n Searches for node across specified vDCs. This is more effective than querying all nodes to get a single\n instance.\n\n @param node_name: The name of the node to search for\n @type node_name: C{string}\n\n @param vdcs: None, vDC or a list of vDCs to search in. If None all vDCs will be searched.\n @type node_name: L{Vdc}\n\n @return: C{Node} node instance or None if not found\n ' if (not vdcs): vdcs = self.vdcs if (not getattr(vdcs, '__iter__', False)): vdcs = [vdcs] for vdc in vdcs: res = self.connection.request(vdc.id) entity_elems = res.object.findall(fixxpath(res.object, 'ResourceEntities/ResourceEntity')) for entity_elem in entity_elems: if ((entity_elem.get('type') == 'application/vnd.vmware.vcloud.vApp+xml') and (entity_elem.get('name') == node_name)): res = self.connection.request(entity_elem.get('href'), headers={'Content-Type': 'application/vnd.vmware.vcloud.vApp+xml'}) return self._to_node(res.object) return None<|docstring|>Searches for node across specified vDCs. This is more effective than querying all nodes to get a single instance. @param node_name: The name of the node to search for @type node_name: C{string} @param vdcs: None, vDC or a list of vDCs to search in. If None all vDCs will be searched. @type node_name: L{Vdc} @return: C{Node} node instance or None if not found<|endoftext|>
968e1246c474b38c392ec810374ac2db38b064ca76edb81863969d957f969b69
def ex_deploy_node(self, node): '\n Deploys existing node. Equal to vApp "start" operation.\n\n @param node: The node to be deployed\n @type node: L{Node}\n\n @return: C{Node} deployed node\n ' deploy_xml = ET.Element('DeployVAppParams', {'powerOn': 'true', 'xmlns': 'http://www.vmware.com/vcloud/v1.5'}) res = self.connection.request(('%s/action/deploy' % get_url_path(node.id)), data=ET.tostring(deploy_xml), method='POST', headers={'Content-Type': 'application/vnd.vmware.vcloud.deployVAppParams+xml'}) self._wait_for_task_completion(res.object.get('href')) res = self.connection.request(get_url_path(node.id)) return self._to_node(res.object)
Deploys existing node. Equal to vApp "start" operation. @param node: The node to be deployed @type node: L{Node} @return: C{Node} deployed node
libcloud/compute/drivers/vcloud.py
ex_deploy_node
ggreer/libcloud
1
python
def ex_deploy_node(self, node): '\n Deploys existing node. Equal to vApp "start" operation.\n\n @param node: The node to be deployed\n @type node: L{Node}\n\n @return: C{Node} deployed node\n ' deploy_xml = ET.Element('DeployVAppParams', {'powerOn': 'true', 'xmlns': 'http://www.vmware.com/vcloud/v1.5'}) res = self.connection.request(('%s/action/deploy' % get_url_path(node.id)), data=ET.tostring(deploy_xml), method='POST', headers={'Content-Type': 'application/vnd.vmware.vcloud.deployVAppParams+xml'}) self._wait_for_task_completion(res.object.get('href')) res = self.connection.request(get_url_path(node.id)) return self._to_node(res.object)
def ex_deploy_node(self, node): '\n Deploys existing node. Equal to vApp "start" operation.\n\n @param node: The node to be deployed\n @type node: L{Node}\n\n @return: C{Node} deployed node\n ' deploy_xml = ET.Element('DeployVAppParams', {'powerOn': 'true', 'xmlns': 'http://www.vmware.com/vcloud/v1.5'}) res = self.connection.request(('%s/action/deploy' % get_url_path(node.id)), data=ET.tostring(deploy_xml), method='POST', headers={'Content-Type': 'application/vnd.vmware.vcloud.deployVAppParams+xml'}) self._wait_for_task_completion(res.object.get('href')) res = self.connection.request(get_url_path(node.id)) return self._to_node(res.object)<|docstring|>Deploys existing node. Equal to vApp "start" operation. @param node: The node to be deployed @type node: L{Node} @return: C{Node} deployed node<|endoftext|>
ebe69aaddd283499ad88ba0e9b22968c5eb5d542b17352b8d1ffecd46f9ca10f
def ex_undeploy_node(self, node): '\n Undeploys existing node. Equal to vApp "stop" operation.\n\n @param node: The node to be deployed\n @type node: L{Node}\n\n @return: C{Node} undeployed node instance\n ' undeploy_xml = ET.Element('UndeployVAppParams', {'xmlns': 'http://www.vmware.com/vcloud/v1.5'}) undeploy_power_action_xml = ET.SubElement(undeploy_xml, 'UndeployPowerAction') undeploy_power_action_xml.text = 'shutdown' try: res = self.connection.request(('%s/action/undeploy' % get_url_path(node.id)), data=ET.tostring(undeploy_xml), method='POST', headers={'Content-Type': 'application/vnd.vmware.vcloud.undeployVAppParams+xml'}) self._wait_for_task_completion(res.object.get('href')) except Exception: undeploy_power_action_xml.text = 'powerOff' res = self.connection.request(('%s/action/undeploy' % get_url_path(node.id)), data=ET.tostring(undeploy_xml), method='POST', headers={'Content-Type': 'application/vnd.vmware.vcloud.undeployVAppParams+xml'}) self._wait_for_task_completion(res.object.get('href')) res = self.connection.request(get_url_path(node.id)) return self._to_node(res.object)
Undeploys existing node. Equal to vApp "stop" operation. @param node: The node to be deployed @type node: L{Node} @return: C{Node} undeployed node instance
libcloud/compute/drivers/vcloud.py
ex_undeploy_node
ggreer/libcloud
1
python
def ex_undeploy_node(self, node): '\n Undeploys existing node. Equal to vApp "stop" operation.\n\n @param node: The node to be deployed\n @type node: L{Node}\n\n @return: C{Node} undeployed node instance\n ' undeploy_xml = ET.Element('UndeployVAppParams', {'xmlns': 'http://www.vmware.com/vcloud/v1.5'}) undeploy_power_action_xml = ET.SubElement(undeploy_xml, 'UndeployPowerAction') undeploy_power_action_xml.text = 'shutdown' try: res = self.connection.request(('%s/action/undeploy' % get_url_path(node.id)), data=ET.tostring(undeploy_xml), method='POST', headers={'Content-Type': 'application/vnd.vmware.vcloud.undeployVAppParams+xml'}) self._wait_for_task_completion(res.object.get('href')) except Exception: undeploy_power_action_xml.text = 'powerOff' res = self.connection.request(('%s/action/undeploy' % get_url_path(node.id)), data=ET.tostring(undeploy_xml), method='POST', headers={'Content-Type': 'application/vnd.vmware.vcloud.undeployVAppParams+xml'}) self._wait_for_task_completion(res.object.get('href')) res = self.connection.request(get_url_path(node.id)) return self._to_node(res.object)
def ex_undeploy_node(self, node): '\n Undeploys existing node. Equal to vApp "stop" operation.\n\n @param node: The node to be deployed\n @type node: L{Node}\n\n @return: C{Node} undeployed node instance\n ' undeploy_xml = ET.Element('UndeployVAppParams', {'xmlns': 'http://www.vmware.com/vcloud/v1.5'}) undeploy_power_action_xml = ET.SubElement(undeploy_xml, 'UndeployPowerAction') undeploy_power_action_xml.text = 'shutdown' try: res = self.connection.request(('%s/action/undeploy' % get_url_path(node.id)), data=ET.tostring(undeploy_xml), method='POST', headers={'Content-Type': 'application/vnd.vmware.vcloud.undeployVAppParams+xml'}) self._wait_for_task_completion(res.object.get('href')) except Exception: undeploy_power_action_xml.text = 'powerOff' res = self.connection.request(('%s/action/undeploy' % get_url_path(node.id)), data=ET.tostring(undeploy_xml), method='POST', headers={'Content-Type': 'application/vnd.vmware.vcloud.undeployVAppParams+xml'}) self._wait_for_task_completion(res.object.get('href')) res = self.connection.request(get_url_path(node.id)) return self._to_node(res.object)<|docstring|>Undeploys existing node. Equal to vApp "stop" operation. @param node: The node to be deployed @type node: L{Node} @return: C{Node} undeployed node instance<|endoftext|>
43c079b3b8d628b4885208b0d548feb824822a2c3efa44008a7b41eb60edad23
def create_node(self, **kwargs): "Creates and returns node. If the source image is:\n - vApp template - a new vApp is instantiated from template\n - existing vApp - a new vApp is cloned from the source vApp. Can not clone more vApps is parallel otherwise\n resource busy error is raised.\n\n\n See L{NodeDriver.create_node} for more keyword args.\n\n @keyword image: OS Image to boot on node. (required). Can be a NodeImage or existing Node that will be\n cloned.\n @type image: L{NodeImage} or L{Node}\n\n Non-standard optional keyword arguments:\n @keyword ex_network: Organisation's network name for attaching vApp VMs to.\n @type ex_network: C{string}\n\n @keyword ex_vdc: Name of organisation's virtual data center where vApp VMs will be deployed.\n @type ex_vdc: C{string}\n\n @keyword ex_vm_names: list of names to be used as a VM and computer name. The name must be max. 15 characters\n long and follow the host name requirements.\n @type ex_vm_names: C{list} of L{string}\n\n @keyword ex_vm_cpu: number of virtual CPUs/cores to allocate for each vApp VM.\n @type ex_vm_cpu: C{number}\n\n @keyword ex_vm_memory: amount of memory in MB to allocate for each vApp VM.\n @type ex_vm_memory: C{number}\n\n @keyword ex_vm_script: full path to file containing guest customisation script for each vApp VM.\n Useful for creating users & pushing out public SSH keys etc.\n @type ex_vm_script: C{string}\n\n @keyword ex_vm_network: Override default vApp VM network name. Useful for when you've imported an OVF\n originating from outside of the vCloud.\n @type ex_vm_network: C{string}\n\n @keyword ex_vm_fence: Fence mode for connecting the vApp VM network (ex_vm_network) to the parent\n organisation network (ex_network).\n @type ex_vm_fence: C{string}\n\n @keyword ex_vm_ipmode: IP address allocation mode for all vApp VM network connections.\n @type ex_vm_ipmode: C{string}\n\n @keyword ex_deploy: set to False if the node shouldn't be deployed (started) after creation\n @type ex_deploy: C{bool}\n " name = kwargs['name'] image = kwargs['image'] ex_vm_names = kwargs.get('ex_vm_names') ex_vm_cpu = kwargs.get('ex_vm_cpu') ex_vm_memory = kwargs.get('ex_vm_memory') ex_vm_script = kwargs.get('ex_vm_script') ex_vm_fence = kwargs.get('ex_vm_fence', None) ex_network = kwargs.get('ex_network', None) ex_vm_network = kwargs.get('ex_vm_network', None) ex_vm_ipmode = kwargs.get('ex_vm_ipmode', None) ex_deploy = kwargs.get('ex_deploy', True) ex_vdc = kwargs.get('ex_vdc', None) self._validate_vm_names(ex_vm_names) self._validate_vm_cpu(ex_vm_cpu) self._validate_vm_memory(ex_vm_memory) self._validate_vm_fence(ex_vm_fence) self._validate_vm_ipmode(ex_vm_ipmode) ex_vm_script = self._validate_vm_script(ex_vm_script) if ex_network: network_href = self._get_network_href(ex_network) network_elem = self.connection.request(network_href).object else: network_elem = None vdc = self._get_vdc(ex_vdc) if self._is_node(image): (vapp_name, vapp_href) = self._clone_node(name, image, vdc) else: (vapp_name, vapp_href) = self._instantiate_node(name, image, network_elem, vdc, ex_vm_network, ex_vm_fence) self._change_vm_names(vapp_href, ex_vm_names) self._change_vm_cpu(vapp_href, ex_vm_cpu) self._change_vm_memory(vapp_href, ex_vm_memory) self._change_vm_script(vapp_href, ex_vm_script) self._change_vm_ipmode(vapp_href, ex_vm_ipmode) if ex_deploy: retry = 3 while True: try: res = self.connection.request(('%s/power/action/powerOn' % vapp_href), method='POST') self._wait_for_task_completion(res.object.get('href')) break except Exception: if (retry <= 0): raise retry -= 1 time.sleep(10) res = self.connection.request(vapp_href) node = self._to_node(res.object) return node
Creates and returns node. If the source image is: - vApp template - a new vApp is instantiated from template - existing vApp - a new vApp is cloned from the source vApp. Can not clone more vApps is parallel otherwise resource busy error is raised. See L{NodeDriver.create_node} for more keyword args. @keyword image: OS Image to boot on node. (required). Can be a NodeImage or existing Node that will be cloned. @type image: L{NodeImage} or L{Node} Non-standard optional keyword arguments: @keyword ex_network: Organisation's network name for attaching vApp VMs to. @type ex_network: C{string} @keyword ex_vdc: Name of organisation's virtual data center where vApp VMs will be deployed. @type ex_vdc: C{string} @keyword ex_vm_names: list of names to be used as a VM and computer name. The name must be max. 15 characters long and follow the host name requirements. @type ex_vm_names: C{list} of L{string} @keyword ex_vm_cpu: number of virtual CPUs/cores to allocate for each vApp VM. @type ex_vm_cpu: C{number} @keyword ex_vm_memory: amount of memory in MB to allocate for each vApp VM. @type ex_vm_memory: C{number} @keyword ex_vm_script: full path to file containing guest customisation script for each vApp VM. Useful for creating users & pushing out public SSH keys etc. @type ex_vm_script: C{string} @keyword ex_vm_network: Override default vApp VM network name. Useful for when you've imported an OVF originating from outside of the vCloud. @type ex_vm_network: C{string} @keyword ex_vm_fence: Fence mode for connecting the vApp VM network (ex_vm_network) to the parent organisation network (ex_network). @type ex_vm_fence: C{string} @keyword ex_vm_ipmode: IP address allocation mode for all vApp VM network connections. @type ex_vm_ipmode: C{string} @keyword ex_deploy: set to False if the node shouldn't be deployed (started) after creation @type ex_deploy: C{bool}
libcloud/compute/drivers/vcloud.py
create_node
ggreer/libcloud
1
python
def create_node(self, **kwargs): "Creates and returns node. If the source image is:\n - vApp template - a new vApp is instantiated from template\n - existing vApp - a new vApp is cloned from the source vApp. Can not clone more vApps is parallel otherwise\n resource busy error is raised.\n\n\n See L{NodeDriver.create_node} for more keyword args.\n\n @keyword image: OS Image to boot on node. (required). Can be a NodeImage or existing Node that will be\n cloned.\n @type image: L{NodeImage} or L{Node}\n\n Non-standard optional keyword arguments:\n @keyword ex_network: Organisation's network name for attaching vApp VMs to.\n @type ex_network: C{string}\n\n @keyword ex_vdc: Name of organisation's virtual data center where vApp VMs will be deployed.\n @type ex_vdc: C{string}\n\n @keyword ex_vm_names: list of names to be used as a VM and computer name. The name must be max. 15 characters\n long and follow the host name requirements.\n @type ex_vm_names: C{list} of L{string}\n\n @keyword ex_vm_cpu: number of virtual CPUs/cores to allocate for each vApp VM.\n @type ex_vm_cpu: C{number}\n\n @keyword ex_vm_memory: amount of memory in MB to allocate for each vApp VM.\n @type ex_vm_memory: C{number}\n\n @keyword ex_vm_script: full path to file containing guest customisation script for each vApp VM.\n Useful for creating users & pushing out public SSH keys etc.\n @type ex_vm_script: C{string}\n\n @keyword ex_vm_network: Override default vApp VM network name. Useful for when you've imported an OVF\n originating from outside of the vCloud.\n @type ex_vm_network: C{string}\n\n @keyword ex_vm_fence: Fence mode for connecting the vApp VM network (ex_vm_network) to the parent\n organisation network (ex_network).\n @type ex_vm_fence: C{string}\n\n @keyword ex_vm_ipmode: IP address allocation mode for all vApp VM network connections.\n @type ex_vm_ipmode: C{string}\n\n @keyword ex_deploy: set to False if the node shouldn't be deployed (started) after creation\n @type ex_deploy: C{bool}\n " name = kwargs['name'] image = kwargs['image'] ex_vm_names = kwargs.get('ex_vm_names') ex_vm_cpu = kwargs.get('ex_vm_cpu') ex_vm_memory = kwargs.get('ex_vm_memory') ex_vm_script = kwargs.get('ex_vm_script') ex_vm_fence = kwargs.get('ex_vm_fence', None) ex_network = kwargs.get('ex_network', None) ex_vm_network = kwargs.get('ex_vm_network', None) ex_vm_ipmode = kwargs.get('ex_vm_ipmode', None) ex_deploy = kwargs.get('ex_deploy', True) ex_vdc = kwargs.get('ex_vdc', None) self._validate_vm_names(ex_vm_names) self._validate_vm_cpu(ex_vm_cpu) self._validate_vm_memory(ex_vm_memory) self._validate_vm_fence(ex_vm_fence) self._validate_vm_ipmode(ex_vm_ipmode) ex_vm_script = self._validate_vm_script(ex_vm_script) if ex_network: network_href = self._get_network_href(ex_network) network_elem = self.connection.request(network_href).object else: network_elem = None vdc = self._get_vdc(ex_vdc) if self._is_node(image): (vapp_name, vapp_href) = self._clone_node(name, image, vdc) else: (vapp_name, vapp_href) = self._instantiate_node(name, image, network_elem, vdc, ex_vm_network, ex_vm_fence) self._change_vm_names(vapp_href, ex_vm_names) self._change_vm_cpu(vapp_href, ex_vm_cpu) self._change_vm_memory(vapp_href, ex_vm_memory) self._change_vm_script(vapp_href, ex_vm_script) self._change_vm_ipmode(vapp_href, ex_vm_ipmode) if ex_deploy: retry = 3 while True: try: res = self.connection.request(('%s/power/action/powerOn' % vapp_href), method='POST') self._wait_for_task_completion(res.object.get('href')) break except Exception: if (retry <= 0): raise retry -= 1 time.sleep(10) res = self.connection.request(vapp_href) node = self._to_node(res.object) return node
def create_node(self, **kwargs): "Creates and returns node. If the source image is:\n - vApp template - a new vApp is instantiated from template\n - existing vApp - a new vApp is cloned from the source vApp. Can not clone more vApps is parallel otherwise\n resource busy error is raised.\n\n\n See L{NodeDriver.create_node} for more keyword args.\n\n @keyword image: OS Image to boot on node. (required). Can be a NodeImage or existing Node that will be\n cloned.\n @type image: L{NodeImage} or L{Node}\n\n Non-standard optional keyword arguments:\n @keyword ex_network: Organisation's network name for attaching vApp VMs to.\n @type ex_network: C{string}\n\n @keyword ex_vdc: Name of organisation's virtual data center where vApp VMs will be deployed.\n @type ex_vdc: C{string}\n\n @keyword ex_vm_names: list of names to be used as a VM and computer name. The name must be max. 15 characters\n long and follow the host name requirements.\n @type ex_vm_names: C{list} of L{string}\n\n @keyword ex_vm_cpu: number of virtual CPUs/cores to allocate for each vApp VM.\n @type ex_vm_cpu: C{number}\n\n @keyword ex_vm_memory: amount of memory in MB to allocate for each vApp VM.\n @type ex_vm_memory: C{number}\n\n @keyword ex_vm_script: full path to file containing guest customisation script for each vApp VM.\n Useful for creating users & pushing out public SSH keys etc.\n @type ex_vm_script: C{string}\n\n @keyword ex_vm_network: Override default vApp VM network name. Useful for when you've imported an OVF\n originating from outside of the vCloud.\n @type ex_vm_network: C{string}\n\n @keyword ex_vm_fence: Fence mode for connecting the vApp VM network (ex_vm_network) to the parent\n organisation network (ex_network).\n @type ex_vm_fence: C{string}\n\n @keyword ex_vm_ipmode: IP address allocation mode for all vApp VM network connections.\n @type ex_vm_ipmode: C{string}\n\n @keyword ex_deploy: set to False if the node shouldn't be deployed (started) after creation\n @type ex_deploy: C{bool}\n " name = kwargs['name'] image = kwargs['image'] ex_vm_names = kwargs.get('ex_vm_names') ex_vm_cpu = kwargs.get('ex_vm_cpu') ex_vm_memory = kwargs.get('ex_vm_memory') ex_vm_script = kwargs.get('ex_vm_script') ex_vm_fence = kwargs.get('ex_vm_fence', None) ex_network = kwargs.get('ex_network', None) ex_vm_network = kwargs.get('ex_vm_network', None) ex_vm_ipmode = kwargs.get('ex_vm_ipmode', None) ex_deploy = kwargs.get('ex_deploy', True) ex_vdc = kwargs.get('ex_vdc', None) self._validate_vm_names(ex_vm_names) self._validate_vm_cpu(ex_vm_cpu) self._validate_vm_memory(ex_vm_memory) self._validate_vm_fence(ex_vm_fence) self._validate_vm_ipmode(ex_vm_ipmode) ex_vm_script = self._validate_vm_script(ex_vm_script) if ex_network: network_href = self._get_network_href(ex_network) network_elem = self.connection.request(network_href).object else: network_elem = None vdc = self._get_vdc(ex_vdc) if self._is_node(image): (vapp_name, vapp_href) = self._clone_node(name, image, vdc) else: (vapp_name, vapp_href) = self._instantiate_node(name, image, network_elem, vdc, ex_vm_network, ex_vm_fence) self._change_vm_names(vapp_href, ex_vm_names) self._change_vm_cpu(vapp_href, ex_vm_cpu) self._change_vm_memory(vapp_href, ex_vm_memory) self._change_vm_script(vapp_href, ex_vm_script) self._change_vm_ipmode(vapp_href, ex_vm_ipmode) if ex_deploy: retry = 3 while True: try: res = self.connection.request(('%s/power/action/powerOn' % vapp_href), method='POST') self._wait_for_task_completion(res.object.get('href')) break except Exception: if (retry <= 0): raise retry -= 1 time.sleep(10) res = self.connection.request(vapp_href) node = self._to_node(res.object) return node<|docstring|>Creates and returns node. If the source image is: - vApp template - a new vApp is instantiated from template - existing vApp - a new vApp is cloned from the source vApp. Can not clone more vApps is parallel otherwise resource busy error is raised. See L{NodeDriver.create_node} for more keyword args. @keyword image: OS Image to boot on node. (required). Can be a NodeImage or existing Node that will be cloned. @type image: L{NodeImage} or L{Node} Non-standard optional keyword arguments: @keyword ex_network: Organisation's network name for attaching vApp VMs to. @type ex_network: C{string} @keyword ex_vdc: Name of organisation's virtual data center where vApp VMs will be deployed. @type ex_vdc: C{string} @keyword ex_vm_names: list of names to be used as a VM and computer name. The name must be max. 15 characters long and follow the host name requirements. @type ex_vm_names: C{list} of L{string} @keyword ex_vm_cpu: number of virtual CPUs/cores to allocate for each vApp VM. @type ex_vm_cpu: C{number} @keyword ex_vm_memory: amount of memory in MB to allocate for each vApp VM. @type ex_vm_memory: C{number} @keyword ex_vm_script: full path to file containing guest customisation script for each vApp VM. Useful for creating users & pushing out public SSH keys etc. @type ex_vm_script: C{string} @keyword ex_vm_network: Override default vApp VM network name. Useful for when you've imported an OVF originating from outside of the vCloud. @type ex_vm_network: C{string} @keyword ex_vm_fence: Fence mode for connecting the vApp VM network (ex_vm_network) to the parent organisation network (ex_network). @type ex_vm_fence: C{string} @keyword ex_vm_ipmode: IP address allocation mode for all vApp VM network connections. @type ex_vm_ipmode: C{string} @keyword ex_deploy: set to False if the node shouldn't be deployed (started) after creation @type ex_deploy: C{bool}<|endoftext|>
6042da7ab774eb9ec849bbcdaa1a56ad6afafc8676ef9a0e296e097a8c78e25f
def ex_set_vm_cpu(self, vapp_or_vm_id, vm_cpu): '\n Sets the number of virtual CPUs for the specified VM or VMs under the vApp. If the vapp_or_vm_id param\n represents a link to an vApp all VMs that are attached to this vApp will be modified.\n\n Please ensure that hot-adding a virtual CPU is enabled for the powered on virtual machines.\n Otherwise use this method on undeployed vApp.\n\n @keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs\n will be modified\n @type vapp_or_vm_id: C{string}\n\n @keyword vm_cpu: number of virtual CPUs/cores to allocate for specified VMs\n @type vm_cpu: C{number}\n ' self._validate_vm_cpu(vm_cpu) self._change_vm_cpu(vapp_or_vm_id, vm_cpu)
Sets the number of virtual CPUs for the specified VM or VMs under the vApp. If the vapp_or_vm_id param represents a link to an vApp all VMs that are attached to this vApp will be modified. Please ensure that hot-adding a virtual CPU is enabled for the powered on virtual machines. Otherwise use this method on undeployed vApp. @keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs will be modified @type vapp_or_vm_id: C{string} @keyword vm_cpu: number of virtual CPUs/cores to allocate for specified VMs @type vm_cpu: C{number}
libcloud/compute/drivers/vcloud.py
ex_set_vm_cpu
ggreer/libcloud
1
python
def ex_set_vm_cpu(self, vapp_or_vm_id, vm_cpu): '\n Sets the number of virtual CPUs for the specified VM or VMs under the vApp. If the vapp_or_vm_id param\n represents a link to an vApp all VMs that are attached to this vApp will be modified.\n\n Please ensure that hot-adding a virtual CPU is enabled for the powered on virtual machines.\n Otherwise use this method on undeployed vApp.\n\n @keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs\n will be modified\n @type vapp_or_vm_id: C{string}\n\n @keyword vm_cpu: number of virtual CPUs/cores to allocate for specified VMs\n @type vm_cpu: C{number}\n ' self._validate_vm_cpu(vm_cpu) self._change_vm_cpu(vapp_or_vm_id, vm_cpu)
def ex_set_vm_cpu(self, vapp_or_vm_id, vm_cpu): '\n Sets the number of virtual CPUs for the specified VM or VMs under the vApp. If the vapp_or_vm_id param\n represents a link to an vApp all VMs that are attached to this vApp will be modified.\n\n Please ensure that hot-adding a virtual CPU is enabled for the powered on virtual machines.\n Otherwise use this method on undeployed vApp.\n\n @keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs\n will be modified\n @type vapp_or_vm_id: C{string}\n\n @keyword vm_cpu: number of virtual CPUs/cores to allocate for specified VMs\n @type vm_cpu: C{number}\n ' self._validate_vm_cpu(vm_cpu) self._change_vm_cpu(vapp_or_vm_id, vm_cpu)<|docstring|>Sets the number of virtual CPUs for the specified VM or VMs under the vApp. If the vapp_or_vm_id param represents a link to an vApp all VMs that are attached to this vApp will be modified. Please ensure that hot-adding a virtual CPU is enabled for the powered on virtual machines. Otherwise use this method on undeployed vApp. @keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs will be modified @type vapp_or_vm_id: C{string} @keyword vm_cpu: number of virtual CPUs/cores to allocate for specified VMs @type vm_cpu: C{number}<|endoftext|>
617f3dd5aa0e65ad6667c10c958d4b04c85a8c45e86cf86864bbf12946373aee
def ex_set_vm_memory(self, vapp_or_vm_id, vm_memory): '\n Sets the virtual memory in MB to allocate for the specified VM or VMs under the vApp.\n If the vapp_or_vm_id param represents a link to an vApp all VMs that are attached to\n this vApp will be modified.\n\n Please ensure that hot-change of virtual memory is enabled for the powered on virtual machines.\n Otherwise use this method on undeployed vApp.\n\n @keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs\n will be modified\n @type vapp_or_vm_id: C{string}\n\n @keyword vm_memory: virtual memory in MB to allocate for the specified VM or VMs\n @type vm_memory: C{number}\n ' self._validate_vm_memory(vm_memory) self._change_vm_memory(vapp_or_vm_id, vm_memory)
Sets the virtual memory in MB to allocate for the specified VM or VMs under the vApp. If the vapp_or_vm_id param represents a link to an vApp all VMs that are attached to this vApp will be modified. Please ensure that hot-change of virtual memory is enabled for the powered on virtual machines. Otherwise use this method on undeployed vApp. @keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs will be modified @type vapp_or_vm_id: C{string} @keyword vm_memory: virtual memory in MB to allocate for the specified VM or VMs @type vm_memory: C{number}
libcloud/compute/drivers/vcloud.py
ex_set_vm_memory
ggreer/libcloud
1
python
def ex_set_vm_memory(self, vapp_or_vm_id, vm_memory): '\n Sets the virtual memory in MB to allocate for the specified VM or VMs under the vApp.\n If the vapp_or_vm_id param represents a link to an vApp all VMs that are attached to\n this vApp will be modified.\n\n Please ensure that hot-change of virtual memory is enabled for the powered on virtual machines.\n Otherwise use this method on undeployed vApp.\n\n @keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs\n will be modified\n @type vapp_or_vm_id: C{string}\n\n @keyword vm_memory: virtual memory in MB to allocate for the specified VM or VMs\n @type vm_memory: C{number}\n ' self._validate_vm_memory(vm_memory) self._change_vm_memory(vapp_or_vm_id, vm_memory)
def ex_set_vm_memory(self, vapp_or_vm_id, vm_memory): '\n Sets the virtual memory in MB to allocate for the specified VM or VMs under the vApp.\n If the vapp_or_vm_id param represents a link to an vApp all VMs that are attached to\n this vApp will be modified.\n\n Please ensure that hot-change of virtual memory is enabled for the powered on virtual machines.\n Otherwise use this method on undeployed vApp.\n\n @keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs\n will be modified\n @type vapp_or_vm_id: C{string}\n\n @keyword vm_memory: virtual memory in MB to allocate for the specified VM or VMs\n @type vm_memory: C{number}\n ' self._validate_vm_memory(vm_memory) self._change_vm_memory(vapp_or_vm_id, vm_memory)<|docstring|>Sets the virtual memory in MB to allocate for the specified VM or VMs under the vApp. If the vapp_or_vm_id param represents a link to an vApp all VMs that are attached to this vApp will be modified. Please ensure that hot-change of virtual memory is enabled for the powered on virtual machines. Otherwise use this method on undeployed vApp. @keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs will be modified @type vapp_or_vm_id: C{string} @keyword vm_memory: virtual memory in MB to allocate for the specified VM or VMs @type vm_memory: C{number}<|endoftext|>
5675629d1c368b93b27d389b70edaf43539a6655514901d93a3af50aedea9fd7
def ex_add_vm_disk(self, vapp_or_vm_id, vm_disk_size): '\n Adds a virtual disk to the specified VM or VMs under the vApp. If the vapp_or_vm_id param\n represents a link to an vApp all VMs that are attached to this vApp will be modified.\n\n @keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs\n will be modified\n @type vapp_or_vm_id: C{string}\n\n @keyword vm_disk_size: the disk capacity in GB that will be added to the specified VM or VMs\n @type vm_disk_size: C{number}\n ' self._validate_vm_disk_size(vm_disk_size) self._add_vm_disk(vapp_or_vm_id, vm_disk_size)
Adds a virtual disk to the specified VM or VMs under the vApp. If the vapp_or_vm_id param represents a link to an vApp all VMs that are attached to this vApp will be modified. @keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs will be modified @type vapp_or_vm_id: C{string} @keyword vm_disk_size: the disk capacity in GB that will be added to the specified VM or VMs @type vm_disk_size: C{number}
libcloud/compute/drivers/vcloud.py
ex_add_vm_disk
ggreer/libcloud
1
python
def ex_add_vm_disk(self, vapp_or_vm_id, vm_disk_size): '\n Adds a virtual disk to the specified VM or VMs under the vApp. If the vapp_or_vm_id param\n represents a link to an vApp all VMs that are attached to this vApp will be modified.\n\n @keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs\n will be modified\n @type vapp_or_vm_id: C{string}\n\n @keyword vm_disk_size: the disk capacity in GB that will be added to the specified VM or VMs\n @type vm_disk_size: C{number}\n ' self._validate_vm_disk_size(vm_disk_size) self._add_vm_disk(vapp_or_vm_id, vm_disk_size)
def ex_add_vm_disk(self, vapp_or_vm_id, vm_disk_size): '\n Adds a virtual disk to the specified VM or VMs under the vApp. If the vapp_or_vm_id param\n represents a link to an vApp all VMs that are attached to this vApp will be modified.\n\n @keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs\n will be modified\n @type vapp_or_vm_id: C{string}\n\n @keyword vm_disk_size: the disk capacity in GB that will be added to the specified VM or VMs\n @type vm_disk_size: C{number}\n ' self._validate_vm_disk_size(vm_disk_size) self._add_vm_disk(vapp_or_vm_id, vm_disk_size)<|docstring|>Adds a virtual disk to the specified VM or VMs under the vApp. If the vapp_or_vm_id param represents a link to an vApp all VMs that are attached to this vApp will be modified. @keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a vApp ID is used here all attached VMs will be modified @type vapp_or_vm_id: C{string} @keyword vm_disk_size: the disk capacity in GB that will be added to the specified VM or VMs @type vm_disk_size: C{number}<|endoftext|>
39fcc30c2c408458b0c1b30704318cf6f407efe4e41ab6ca928ec45691ff00b4
def setup_logger(verbose): '\n Setup the global logger according to the verbosity level passed in the command-line.\n This method should be called only once!\n ' if (verbose >= 2): logging_level = logging.DEBUG elif (verbose == 1): logging_level = logging.INFO else: logging_level = logging.WARNING logger.logger.setLevel(logging_level) handler = logging.StreamHandler(sys.stdout) handler.setFormatter(logging.Formatter('%(levelname)-10s| %(message)s')) handler.setLevel(logging_level) logger.logger.addHandler(handler) if (verbose > 2): logging.warning('Exceeded max verbosity, using -vv')
Setup the global logger according to the verbosity level passed in the command-line. This method should be called only once!
scripts/logger.py
setup_logger
shsh999/WPP-
7
python
def setup_logger(verbose): '\n Setup the global logger according to the verbosity level passed in the command-line.\n This method should be called only once!\n ' if (verbose >= 2): logging_level = logging.DEBUG elif (verbose == 1): logging_level = logging.INFO else: logging_level = logging.WARNING logger.logger.setLevel(logging_level) handler = logging.StreamHandler(sys.stdout) handler.setFormatter(logging.Formatter('%(levelname)-10s| %(message)s')) handler.setLevel(logging_level) logger.logger.addHandler(handler) if (verbose > 2): logging.warning('Exceeded max verbosity, using -vv')
def setup_logger(verbose): '\n Setup the global logger according to the verbosity level passed in the command-line.\n This method should be called only once!\n ' if (verbose >= 2): logging_level = logging.DEBUG elif (verbose == 1): logging_level = logging.INFO else: logging_level = logging.WARNING logger.logger.setLevel(logging_level) handler = logging.StreamHandler(sys.stdout) handler.setFormatter(logging.Formatter('%(levelname)-10s| %(message)s')) handler.setLevel(logging_level) logger.logger.addHandler(handler) if (verbose > 2): logging.warning('Exceeded max verbosity, using -vv')<|docstring|>Setup the global logger according to the verbosity level passed in the command-line. This method should be called only once!<|endoftext|>
ee473327890ea51102569444c630c84f98f895f2d926372be962ee8f4f849adb
def __init__(self, proxy_protocol=None, proxy_host=None, proxy_port=None, proxy_user=None, proxy_password=None, ignore_ssl_verification=False, ssl_ca_cert=None, cert_file=None, key_file=None, timeout=(60, 120), retry_times=0, pool_connections=1, pool_maxsize=1): '\n :param proxy_protocol(optional) : proxy protocol, http or https\n :type proxy_protocol: str\n\n :param proxy_host(optional) : hostname or ip address of proxy server\n :type proxy_host: str\n\n :param proxy_port(optional) : port of proxy server\n :type proxy_port: str\n\n :param proxy_user(optional) : user name used for proxy authentication\n :type proxy_user: str\n\n :param proxy_password(optional) : user name used for proxy authentication\n :type proxy_password: str\n\n :param ignore_ssl_verification: whether skip SSL certificate validation while sending https request,\n default, value is False\n :type ignore_ssl_verification: bool\n\n :param ssl_ca_cert: (optional) a path to a CA bundle to use\n :type ssl_ca_cert: str\n\n :param cert_file: (optional) a path to ssl client cert file (.pem)\n :type cert_file: str\n\n :param key_file: (optional) a path to a ssl client cert key file (.key)\n :type key_file: str\n\n :param timeout: (optional) seconds to wait for the server to send data before giving up,\n as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple.\n :type timeout: float or tuple\n\n :param retry_times: maximum number of retries each connection should attempt,\n default, does not retry failed connections.\n :type retry_times: int\n\n :param pool_connections: number of urllib3 connection pools to cache,\n default, value is 1\n :type pool_connections: int\n\n :param pool_maxsize: maximum number of connections to save in the pool,\n default, value is 1\n :type pool_maxsize: int\n ' self.proxy_protocol = proxy_protocol self.proxy_host = proxy_host self.proxy_port = proxy_port self.proxy_user = proxy_user self.proxy_password = proxy_password self.ignore_ssl_verification = ignore_ssl_verification self.ssl_ca_cert = ssl_ca_cert self.cert_file = cert_file self.key_file = key_file self.timeout = timeout self.retry_times = retry_times self.pool_connections = pool_connections self.pool_maxsize = pool_maxsize
:param proxy_protocol(optional) : proxy protocol, http or https :type proxy_protocol: str :param proxy_host(optional) : hostname or ip address of proxy server :type proxy_host: str :param proxy_port(optional) : port of proxy server :type proxy_port: str :param proxy_user(optional) : user name used for proxy authentication :type proxy_user: str :param proxy_password(optional) : user name used for proxy authentication :type proxy_password: str :param ignore_ssl_verification: whether skip SSL certificate validation while sending https request, default, value is False :type ignore_ssl_verification: bool :param ssl_ca_cert: (optional) a path to a CA bundle to use :type ssl_ca_cert: str :param cert_file: (optional) a path to ssl client cert file (.pem) :type cert_file: str :param key_file: (optional) a path to a ssl client cert key file (.key) :type key_file: str :param timeout: (optional) seconds to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param retry_times: maximum number of retries each connection should attempt, default, does not retry failed connections. :type retry_times: int :param pool_connections: number of urllib3 connection pools to cache, default, value is 1 :type pool_connections: int :param pool_maxsize: maximum number of connections to save in the pool, default, value is 1 :type pool_maxsize: int
huaweicloud-sdk-core/huaweicloudsdkcore/http/http_config.py
__init__
wuchen-huawei/huaweicloud-sdk-python-v3
64
python
def __init__(self, proxy_protocol=None, proxy_host=None, proxy_port=None, proxy_user=None, proxy_password=None, ignore_ssl_verification=False, ssl_ca_cert=None, cert_file=None, key_file=None, timeout=(60, 120), retry_times=0, pool_connections=1, pool_maxsize=1): '\n :param proxy_protocol(optional) : proxy protocol, http or https\n :type proxy_protocol: str\n\n :param proxy_host(optional) : hostname or ip address of proxy server\n :type proxy_host: str\n\n :param proxy_port(optional) : port of proxy server\n :type proxy_port: str\n\n :param proxy_user(optional) : user name used for proxy authentication\n :type proxy_user: str\n\n :param proxy_password(optional) : user name used for proxy authentication\n :type proxy_password: str\n\n :param ignore_ssl_verification: whether skip SSL certificate validation while sending https request,\n default, value is False\n :type ignore_ssl_verification: bool\n\n :param ssl_ca_cert: (optional) a path to a CA bundle to use\n :type ssl_ca_cert: str\n\n :param cert_file: (optional) a path to ssl client cert file (.pem)\n :type cert_file: str\n\n :param key_file: (optional) a path to a ssl client cert key file (.key)\n :type key_file: str\n\n :param timeout: (optional) seconds to wait for the server to send data before giving up,\n as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple.\n :type timeout: float or tuple\n\n :param retry_times: maximum number of retries each connection should attempt,\n default, does not retry failed connections.\n :type retry_times: int\n\n :param pool_connections: number of urllib3 connection pools to cache,\n default, value is 1\n :type pool_connections: int\n\n :param pool_maxsize: maximum number of connections to save in the pool,\n default, value is 1\n :type pool_maxsize: int\n ' self.proxy_protocol = proxy_protocol self.proxy_host = proxy_host self.proxy_port = proxy_port self.proxy_user = proxy_user self.proxy_password = proxy_password self.ignore_ssl_verification = ignore_ssl_verification self.ssl_ca_cert = ssl_ca_cert self.cert_file = cert_file self.key_file = key_file self.timeout = timeout self.retry_times = retry_times self.pool_connections = pool_connections self.pool_maxsize = pool_maxsize
def __init__(self, proxy_protocol=None, proxy_host=None, proxy_port=None, proxy_user=None, proxy_password=None, ignore_ssl_verification=False, ssl_ca_cert=None, cert_file=None, key_file=None, timeout=(60, 120), retry_times=0, pool_connections=1, pool_maxsize=1): '\n :param proxy_protocol(optional) : proxy protocol, http or https\n :type proxy_protocol: str\n\n :param proxy_host(optional) : hostname or ip address of proxy server\n :type proxy_host: str\n\n :param proxy_port(optional) : port of proxy server\n :type proxy_port: str\n\n :param proxy_user(optional) : user name used for proxy authentication\n :type proxy_user: str\n\n :param proxy_password(optional) : user name used for proxy authentication\n :type proxy_password: str\n\n :param ignore_ssl_verification: whether skip SSL certificate validation while sending https request,\n default, value is False\n :type ignore_ssl_verification: bool\n\n :param ssl_ca_cert: (optional) a path to a CA bundle to use\n :type ssl_ca_cert: str\n\n :param cert_file: (optional) a path to ssl client cert file (.pem)\n :type cert_file: str\n\n :param key_file: (optional) a path to a ssl client cert key file (.key)\n :type key_file: str\n\n :param timeout: (optional) seconds to wait for the server to send data before giving up,\n as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple.\n :type timeout: float or tuple\n\n :param retry_times: maximum number of retries each connection should attempt,\n default, does not retry failed connections.\n :type retry_times: int\n\n :param pool_connections: number of urllib3 connection pools to cache,\n default, value is 1\n :type pool_connections: int\n\n :param pool_maxsize: maximum number of connections to save in the pool,\n default, value is 1\n :type pool_maxsize: int\n ' self.proxy_protocol = proxy_protocol self.proxy_host = proxy_host self.proxy_port = proxy_port self.proxy_user = proxy_user self.proxy_password = proxy_password self.ignore_ssl_verification = ignore_ssl_verification self.ssl_ca_cert = ssl_ca_cert self.cert_file = cert_file self.key_file = key_file self.timeout = timeout self.retry_times = retry_times self.pool_connections = pool_connections self.pool_maxsize = pool_maxsize<|docstring|>:param proxy_protocol(optional) : proxy protocol, http or https :type proxy_protocol: str :param proxy_host(optional) : hostname or ip address of proxy server :type proxy_host: str :param proxy_port(optional) : port of proxy server :type proxy_port: str :param proxy_user(optional) : user name used for proxy authentication :type proxy_user: str :param proxy_password(optional) : user name used for proxy authentication :type proxy_password: str :param ignore_ssl_verification: whether skip SSL certificate validation while sending https request, default, value is False :type ignore_ssl_verification: bool :param ssl_ca_cert: (optional) a path to a CA bundle to use :type ssl_ca_cert: str :param cert_file: (optional) a path to ssl client cert file (.pem) :type cert_file: str :param key_file: (optional) a path to a ssl client cert key file (.key) :type key_file: str :param timeout: (optional) seconds to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param retry_times: maximum number of retries each connection should attempt, default, does not retry failed connections. :type retry_times: int :param pool_connections: number of urllib3 connection pools to cache, default, value is 1 :type pool_connections: int :param pool_maxsize: maximum number of connections to save in the pool, default, value is 1 :type pool_maxsize: int<|endoftext|>
2cf225b0e3b3e5bab9bf9182142cc8fb67fb9a9a60774a075c345e572421731d
def __init__(self, paper=None): '\n Args:\n paper (Paper): The paper to use in the document. If None,\n this defaults to `constants.DEFAULT_PAPER_TYPE`\n ' if (paper is None): try: self._paper = Paper.from_template(constants.DEFAULT_PAPER_TYPE) except KeyError: raise ValueError('DEFAULT_PAPER_TYPE of {} is not supported.'.format(constants.DEFAULT_PAPER_TYPE)) else: self._paper = paper self._pages = PageSupplier(self)
Args: paper (Paper): The paper to use in the document. If None, this defaults to `constants.DEFAULT_PAPER_TYPE`
brown/core/document.py
__init__
craigvear/school_improv_bot
0
python
def __init__(self, paper=None): '\n Args:\n paper (Paper): The paper to use in the document. If None,\n this defaults to `constants.DEFAULT_PAPER_TYPE`\n ' if (paper is None): try: self._paper = Paper.from_template(constants.DEFAULT_PAPER_TYPE) except KeyError: raise ValueError('DEFAULT_PAPER_TYPE of {} is not supported.'.format(constants.DEFAULT_PAPER_TYPE)) else: self._paper = paper self._pages = PageSupplier(self)
def __init__(self, paper=None): '\n Args:\n paper (Paper): The paper to use in the document. If None,\n this defaults to `constants.DEFAULT_PAPER_TYPE`\n ' if (paper is None): try: self._paper = Paper.from_template(constants.DEFAULT_PAPER_TYPE) except KeyError: raise ValueError('DEFAULT_PAPER_TYPE of {} is not supported.'.format(constants.DEFAULT_PAPER_TYPE)) else: self._paper = paper self._pages = PageSupplier(self)<|docstring|>Args: paper (Paper): The paper to use in the document. If None, this defaults to `constants.DEFAULT_PAPER_TYPE`<|endoftext|>
ebef1786c38c0b6d22359acc10970c5a5c1e5939550ec8c2f7ac749a34bf7376
@property def paper(self): 'Paper: The paper type of the document' return self._paper
Paper: The paper type of the document
brown/core/document.py
paper
craigvear/school_improv_bot
0
python
@property def paper(self): return self._paper
@property def paper(self): return self._paper<|docstring|>Paper: The paper type of the document<|endoftext|>
f4ddd202a74f371004c064178e01d3e4192dc34729e83b642b0097c5fe49a313
@property def pages(self): 'PageSupplier: The `Page`s in the document.\n\n Pages are created on-demand by accessing this property.\n\n This property can be treated like a managed list:\n\n >>> from brown.core import brown; brown.setup()\n >>> len(brown.document.pages) # No pages exist yet\n 0\n >>> first_page = brown.document.pages[0] # Get the first page\n >>> len(brown.document.pages) # One page now exists\n 1\n >>> sixth_page = brown.document.pages[5] # Get the sixth page\n >>> len(brown.document.pages) # 5 new pages are created\n 6\n\n # Pages can be accessed by negative indexing too\n >>> assert(first_page == brown.document.pages[-6])\n >>> assert(sixth_page == brown.document.pages[-1])\n\n For more information on this object, see `PageSupplier`.\n ' return self._pages
PageSupplier: The `Page`s in the document. Pages are created on-demand by accessing this property. This property can be treated like a managed list: >>> from brown.core import brown; brown.setup() >>> len(brown.document.pages) # No pages exist yet 0 >>> first_page = brown.document.pages[0] # Get the first page >>> len(brown.document.pages) # One page now exists 1 >>> sixth_page = brown.document.pages[5] # Get the sixth page >>> len(brown.document.pages) # 5 new pages are created 6 # Pages can be accessed by negative indexing too >>> assert(first_page == brown.document.pages[-6]) >>> assert(sixth_page == brown.document.pages[-1]) For more information on this object, see `PageSupplier`.
brown/core/document.py
pages
craigvear/school_improv_bot
0
python
@property def pages(self): 'PageSupplier: The `Page`s in the document.\n\n Pages are created on-demand by accessing this property.\n\n This property can be treated like a managed list:\n\n >>> from brown.core import brown; brown.setup()\n >>> len(brown.document.pages) # No pages exist yet\n 0\n >>> first_page = brown.document.pages[0] # Get the first page\n >>> len(brown.document.pages) # One page now exists\n 1\n >>> sixth_page = brown.document.pages[5] # Get the sixth page\n >>> len(brown.document.pages) # 5 new pages are created\n 6\n\n # Pages can be accessed by negative indexing too\n >>> assert(first_page == brown.document.pages[-6])\n >>> assert(sixth_page == brown.document.pages[-1])\n\n For more information on this object, see `PageSupplier`.\n ' return self._pages
@property def pages(self): 'PageSupplier: The `Page`s in the document.\n\n Pages are created on-demand by accessing this property.\n\n This property can be treated like a managed list:\n\n >>> from brown.core import brown; brown.setup()\n >>> len(brown.document.pages) # No pages exist yet\n 0\n >>> first_page = brown.document.pages[0] # Get the first page\n >>> len(brown.document.pages) # One page now exists\n 1\n >>> sixth_page = brown.document.pages[5] # Get the sixth page\n >>> len(brown.document.pages) # 5 new pages are created\n 6\n\n # Pages can be accessed by negative indexing too\n >>> assert(first_page == brown.document.pages[-6])\n >>> assert(sixth_page == brown.document.pages[-1])\n\n For more information on this object, see `PageSupplier`.\n ' return self._pages<|docstring|>PageSupplier: The `Page`s in the document. Pages are created on-demand by accessing this property. This property can be treated like a managed list: >>> from brown.core import brown; brown.setup() >>> len(brown.document.pages) # No pages exist yet 0 >>> first_page = brown.document.pages[0] # Get the first page >>> len(brown.document.pages) # One page now exists 1 >>> sixth_page = brown.document.pages[5] # Get the sixth page >>> len(brown.document.pages) # 5 new pages are created 6 # Pages can be accessed by negative indexing too >>> assert(first_page == brown.document.pages[-6]) >>> assert(sixth_page == brown.document.pages[-1]) For more information on this object, see `PageSupplier`.<|endoftext|>
e07e246dd0aca0f73d19039d3969f8ba15209fc5c5c8a2ffcc33f6c7d7e45443
@property def _page_display_gap(self): 'Unit: The visual horizontal gap between pages on the canvas.\n\n This only affects page display in the rendered preview,\n and has no effect on exported documents.\n\n To change this, set `constants.PAGE_DISPLAY_GAP`.\n ' return constants.PAGE_DISPLAY_GAP
Unit: The visual horizontal gap between pages on the canvas. This only affects page display in the rendered preview, and has no effect on exported documents. To change this, set `constants.PAGE_DISPLAY_GAP`.
brown/core/document.py
_page_display_gap
craigvear/school_improv_bot
0
python
@property def _page_display_gap(self): 'Unit: The visual horizontal gap between pages on the canvas.\n\n This only affects page display in the rendered preview,\n and has no effect on exported documents.\n\n To change this, set `constants.PAGE_DISPLAY_GAP`.\n ' return constants.PAGE_DISPLAY_GAP
@property def _page_display_gap(self): 'Unit: The visual horizontal gap between pages on the canvas.\n\n This only affects page display in the rendered preview,\n and has no effect on exported documents.\n\n To change this, set `constants.PAGE_DISPLAY_GAP`.\n ' return constants.PAGE_DISPLAY_GAP<|docstring|>Unit: The visual horizontal gap between pages on the canvas. This only affects page display in the rendered preview, and has no effect on exported documents. To change this, set `constants.PAGE_DISPLAY_GAP`.<|endoftext|>
b68f8035105ee3ede5ab32b09231ae124b426ac9fd4408ad882a8b34b1d1a19c
def _render(self): 'Render all items in the document.\n\n Returns: None\n ' for page in self.pages: page._render()
Render all items in the document. Returns: None
brown/core/document.py
_render
craigvear/school_improv_bot
0
python
def _render(self): 'Render all items in the document.\n\n Returns: None\n ' for page in self.pages: page._render()
def _render(self): 'Render all items in the document.\n\n Returns: None\n ' for page in self.pages: page._render()<|docstring|>Render all items in the document. Returns: None<|endoftext|>
92a938e5f540a503423fbd93666081ca56ed7bfa66d1dc88df2cbf8bd9675878
def page_range_of(self, graphic_objects): "Find the page indices an iter of `GraphicObject`s appears on.\n\n >>> from brown.common import *\n >>> brown.setup()\n >>> graphic_objects = [\n ... InvisibleObject((0, 0), brown.document.pages[1]),\n ... InvisibleObject((0, 0), brown.document.pages[5]),\n ... ]\n >>> brown.document.page_range_of(graphic_objects)\n range(1, 6)\n\n Args:\n graphic_objects (iter[GraphicObject]):\n\n Returns:\n range: The range from the first page index to one after the last.\n In order to be consistent with python's `range` semantics,\n the range goes 1 past the maximum page objects appear on.\n " min_page = float('inf') max_page = (- float('inf')) for current in graphic_objects: current_page_num = current.page_index if current.children: child_min_max = self.page_range_of(current.children) min_page = min(min_page, current_page_num, child_min_max[0]) max_page = max(max_page, current_page_num, child_min_max[1]) else: min_page = min(min_page, current_page_num) max_page = max(max_page, current_page_num) return range(min_page, (max_page + 1))
Find the page indices an iter of `GraphicObject`s appears on. >>> from brown.common import * >>> brown.setup() >>> graphic_objects = [ ... InvisibleObject((0, 0), brown.document.pages[1]), ... InvisibleObject((0, 0), brown.document.pages[5]), ... ] >>> brown.document.page_range_of(graphic_objects) range(1, 6) Args: graphic_objects (iter[GraphicObject]): Returns: range: The range from the first page index to one after the last. In order to be consistent with python's `range` semantics, the range goes 1 past the maximum page objects appear on.
brown/core/document.py
page_range_of
craigvear/school_improv_bot
0
python
def page_range_of(self, graphic_objects): "Find the page indices an iter of `GraphicObject`s appears on.\n\n >>> from brown.common import *\n >>> brown.setup()\n >>> graphic_objects = [\n ... InvisibleObject((0, 0), brown.document.pages[1]),\n ... InvisibleObject((0, 0), brown.document.pages[5]),\n ... ]\n >>> brown.document.page_range_of(graphic_objects)\n range(1, 6)\n\n Args:\n graphic_objects (iter[GraphicObject]):\n\n Returns:\n range: The range from the first page index to one after the last.\n In order to be consistent with python's `range` semantics,\n the range goes 1 past the maximum page objects appear on.\n " min_page = float('inf') max_page = (- float('inf')) for current in graphic_objects: current_page_num = current.page_index if current.children: child_min_max = self.page_range_of(current.children) min_page = min(min_page, current_page_num, child_min_max[0]) max_page = max(max_page, current_page_num, child_min_max[1]) else: min_page = min(min_page, current_page_num) max_page = max(max_page, current_page_num) return range(min_page, (max_page + 1))
def page_range_of(self, graphic_objects): "Find the page indices an iter of `GraphicObject`s appears on.\n\n >>> from brown.common import *\n >>> brown.setup()\n >>> graphic_objects = [\n ... InvisibleObject((0, 0), brown.document.pages[1]),\n ... InvisibleObject((0, 0), brown.document.pages[5]),\n ... ]\n >>> brown.document.page_range_of(graphic_objects)\n range(1, 6)\n\n Args:\n graphic_objects (iter[GraphicObject]):\n\n Returns:\n range: The range from the first page index to one after the last.\n In order to be consistent with python's `range` semantics,\n the range goes 1 past the maximum page objects appear on.\n " min_page = float('inf') max_page = (- float('inf')) for current in graphic_objects: current_page_num = current.page_index if current.children: child_min_max = self.page_range_of(current.children) min_page = min(min_page, current_page_num, child_min_max[0]) max_page = max(max_page, current_page_num, child_min_max[1]) else: min_page = min(min_page, current_page_num) max_page = max(max_page, current_page_num) return range(min_page, (max_page + 1))<|docstring|>Find the page indices an iter of `GraphicObject`s appears on. >>> from brown.common import * >>> brown.setup() >>> graphic_objects = [ ... InvisibleObject((0, 0), brown.document.pages[1]), ... InvisibleObject((0, 0), brown.document.pages[5]), ... ] >>> brown.document.page_range_of(graphic_objects) range(1, 6) Args: graphic_objects (iter[GraphicObject]): Returns: range: The range from the first page index to one after the last. In order to be consistent with python's `range` semantics, the range goes 1 past the maximum page objects appear on.<|endoftext|>
57157d2c5a9866bb64c783726a8e09bfccb0f56e974a219d1c8d397b38868e5e
def page_origin(self, page_number): "Find the origin point of a given page number.\n\n The origin is the top left corner of the live area, equivalent to\n the real page corner + margins and gutter.\n\n Args:\n page_number (int): The number of the page to locate,\n where 0 is the first page.\n\n Returns:\n Point: The position of the origin of the given page.\n The page number of this Point will be 0, as this\n is considered relative to the document's origin.\n " x_page_left = ((self.paper.width + self._page_display_gap) * page_number) x_page_origin = (x_page_left + self.paper.margin_left) y_page_origin = self.paper.margin_top return Point(x_page_origin, y_page_origin)
Find the origin point of a given page number. The origin is the top left corner of the live area, equivalent to the real page corner + margins and gutter. Args: page_number (int): The number of the page to locate, where 0 is the first page. Returns: Point: The position of the origin of the given page. The page number of this Point will be 0, as this is considered relative to the document's origin.
brown/core/document.py
page_origin
craigvear/school_improv_bot
0
python
def page_origin(self, page_number): "Find the origin point of a given page number.\n\n The origin is the top left corner of the live area, equivalent to\n the real page corner + margins and gutter.\n\n Args:\n page_number (int): The number of the page to locate,\n where 0 is the first page.\n\n Returns:\n Point: The position of the origin of the given page.\n The page number of this Point will be 0, as this\n is considered relative to the document's origin.\n " x_page_left = ((self.paper.width + self._page_display_gap) * page_number) x_page_origin = (x_page_left + self.paper.margin_left) y_page_origin = self.paper.margin_top return Point(x_page_origin, y_page_origin)
def page_origin(self, page_number): "Find the origin point of a given page number.\n\n The origin is the top left corner of the live area, equivalent to\n the real page corner + margins and gutter.\n\n Args:\n page_number (int): The number of the page to locate,\n where 0 is the first page.\n\n Returns:\n Point: The position of the origin of the given page.\n The page number of this Point will be 0, as this\n is considered relative to the document's origin.\n " x_page_left = ((self.paper.width + self._page_display_gap) * page_number) x_page_origin = (x_page_left + self.paper.margin_left) y_page_origin = self.paper.margin_top return Point(x_page_origin, y_page_origin)<|docstring|>Find the origin point of a given page number. The origin is the top left corner of the live area, equivalent to the real page corner + margins and gutter. Args: page_number (int): The number of the page to locate, where 0 is the first page. Returns: Point: The position of the origin of the given page. The page number of this Point will be 0, as this is considered relative to the document's origin.<|endoftext|>
d9d234d01910e50b5c6b022e0050767a51d36f2b0784bdf7e46d78f64bbe1146
def paper_origin(self, page_number): "Find the paper origin point of a given page number.\n\n This gives the position of the top left corner of the actual\n sheet of paper - regardless of margins and gutter.\n\n Args:\n page_number (int): The number of the page to locate,\n where 0 is the first page.\n\n Returns:\n Point: The position of the paper origin of the given page.\n The page number of this Point will be 0, as this\n is considered relative to the document's origin.\n " return Point(((self.paper.width + self._page_display_gap) * page_number), GraphicUnit(0))
Find the paper origin point of a given page number. This gives the position of the top left corner of the actual sheet of paper - regardless of margins and gutter. Args: page_number (int): The number of the page to locate, where 0 is the first page. Returns: Point: The position of the paper origin of the given page. The page number of this Point will be 0, as this is considered relative to the document's origin.
brown/core/document.py
paper_origin
craigvear/school_improv_bot
0
python
def paper_origin(self, page_number): "Find the paper origin point of a given page number.\n\n This gives the position of the top left corner of the actual\n sheet of paper - regardless of margins and gutter.\n\n Args:\n page_number (int): The number of the page to locate,\n where 0 is the first page.\n\n Returns:\n Point: The position of the paper origin of the given page.\n The page number of this Point will be 0, as this\n is considered relative to the document's origin.\n " return Point(((self.paper.width + self._page_display_gap) * page_number), GraphicUnit(0))
def paper_origin(self, page_number): "Find the paper origin point of a given page number.\n\n This gives the position of the top left corner of the actual\n sheet of paper - regardless of margins and gutter.\n\n Args:\n page_number (int): The number of the page to locate,\n where 0 is the first page.\n\n Returns:\n Point: The position of the paper origin of the given page.\n The page number of this Point will be 0, as this\n is considered relative to the document's origin.\n " return Point(((self.paper.width + self._page_display_gap) * page_number), GraphicUnit(0))<|docstring|>Find the paper origin point of a given page number. This gives the position of the top left corner of the actual sheet of paper - regardless of margins and gutter. Args: page_number (int): The number of the page to locate, where 0 is the first page. Returns: Point: The position of the paper origin of the given page. The page number of this Point will be 0, as this is considered relative to the document's origin.<|endoftext|>
bb074625b45f656dde7c412068d6684cf572224d7a2efee0f391631b5b420caa
def canvas_pos_of(self, graphic_object): "Find the paged document position of a GraphicObject.\n\n Args:\n graphic_object (GraphicObject): Any object in the document.\n\n Returns: Point: The object's paged position relative to the document.\n " pos = Point(GraphicUnit(0), GraphicUnit(0)) current = graphic_object while (current != self): pos += current.pos current = current.parent if (type(current).__name__ == 'Flowable'): return current.map_to_canvas(pos) return pos
Find the paged document position of a GraphicObject. Args: graphic_object (GraphicObject): Any object in the document. Returns: Point: The object's paged position relative to the document.
brown/core/document.py
canvas_pos_of
craigvear/school_improv_bot
0
python
def canvas_pos_of(self, graphic_object): "Find the paged document position of a GraphicObject.\n\n Args:\n graphic_object (GraphicObject): Any object in the document.\n\n Returns: Point: The object's paged position relative to the document.\n " pos = Point(GraphicUnit(0), GraphicUnit(0)) current = graphic_object while (current != self): pos += current.pos current = current.parent if (type(current).__name__ == 'Flowable'): return current.map_to_canvas(pos) return pos
def canvas_pos_of(self, graphic_object): "Find the paged document position of a GraphicObject.\n\n Args:\n graphic_object (GraphicObject): Any object in the document.\n\n Returns: Point: The object's paged position relative to the document.\n " pos = Point(GraphicUnit(0), GraphicUnit(0)) current = graphic_object while (current != self): pos += current.pos current = current.parent if (type(current).__name__ == 'Flowable'): return current.map_to_canvas(pos) return pos<|docstring|>Find the paged document position of a GraphicObject. Args: graphic_object (GraphicObject): Any object in the document. Returns: Point: The object's paged position relative to the document.<|endoftext|>
f1b3d1b9df7ff05e68a79beef785d371f4f83c56dc701937a1df48a788ec98a6
def page_bounding_rect(self, page_number): 'Find the bounding rect of a page in the document.\n\n The resulting rect will cover the *live page area* - that is,\n the area within the margins of the page\n\n Args:\n page_number (int):\n\n Returns: Rect\n ' page_origin = self.page_origin(page_number) return Rect(page_origin.x, page_origin.y, self.paper.live_width, self.paper.live_height)
Find the bounding rect of a page in the document. The resulting rect will cover the *live page area* - that is, the area within the margins of the page Args: page_number (int): Returns: Rect
brown/core/document.py
page_bounding_rect
craigvear/school_improv_bot
0
python
def page_bounding_rect(self, page_number): 'Find the bounding rect of a page in the document.\n\n The resulting rect will cover the *live page area* - that is,\n the area within the margins of the page\n\n Args:\n page_number (int):\n\n Returns: Rect\n ' page_origin = self.page_origin(page_number) return Rect(page_origin.x, page_origin.y, self.paper.live_width, self.paper.live_height)
def page_bounding_rect(self, page_number): 'Find the bounding rect of a page in the document.\n\n The resulting rect will cover the *live page area* - that is,\n the area within the margins of the page\n\n Args:\n page_number (int):\n\n Returns: Rect\n ' page_origin = self.page_origin(page_number) return Rect(page_origin.x, page_origin.y, self.paper.live_width, self.paper.live_height)<|docstring|>Find the bounding rect of a page in the document. The resulting rect will cover the *live page area* - that is, the area within the margins of the page Args: page_number (int): Returns: Rect<|endoftext|>
75c25f5d5ecbe2bd92557c35a8a42fb2e700b94eafd63b7ef27d8bef4be02257
def paper_bounding_rect(self, page_number): 'Find the paper bounding rect of a page in the document.\n\n The resulting rect will cover the entire paper sheet -\n regardless of margins and gutter.\n\n Args:\n page_number (int):\n\n Returns: Rect\n ' paper_origin = self.paper_origin(page_number) return Rect(paper_origin.x, paper_origin.y, self.paper.width, self.paper.height)
Find the paper bounding rect of a page in the document. The resulting rect will cover the entire paper sheet - regardless of margins and gutter. Args: page_number (int): Returns: Rect
brown/core/document.py
paper_bounding_rect
craigvear/school_improv_bot
0
python
def paper_bounding_rect(self, page_number): 'Find the paper bounding rect of a page in the document.\n\n The resulting rect will cover the entire paper sheet -\n regardless of margins and gutter.\n\n Args:\n page_number (int):\n\n Returns: Rect\n ' paper_origin = self.paper_origin(page_number) return Rect(paper_origin.x, paper_origin.y, self.paper.width, self.paper.height)
def paper_bounding_rect(self, page_number): 'Find the paper bounding rect of a page in the document.\n\n The resulting rect will cover the entire paper sheet -\n regardless of margins and gutter.\n\n Args:\n page_number (int):\n\n Returns: Rect\n ' paper_origin = self.paper_origin(page_number) return Rect(paper_origin.x, paper_origin.y, self.paper.width, self.paper.height)<|docstring|>Find the paper bounding rect of a page in the document. The resulting rect will cover the entire paper sheet - regardless of margins and gutter. Args: page_number (int): Returns: Rect<|endoftext|>
6b1ceec9fb15cac5f8b501ae24af2f8158467c26f1dc02e7c92893a880f058fa
def POST(self): 'Respond to inbound webhook JSON HTTP POSTs from Cisco Spark.' json_data = web.data() print('\nWEBHOOK POST RECEIVED:') json_notification = json.loads(json_data) print(json.dumps(json_notification), '\n') webhook_obj = Webhook(json_notification) room = api.rooms.get(webhook_obj.data.roomId) message = api.messages.get(webhook_obj.data.id) person = api.people.get(message.personId) print("NEW MESSAGE IN ROOM '{}'".format(room.title)) print("FROM '{}'".format(person.displayName)) print("MESSAGE '{}'\n".format(message.text)) me = api.people.me() if (message.personId == me.id): print('Message sent by me. Return OK') return 'OK' else: print('Not ME') if ('IOTA' in message.text): print('Requesting IOTA rate') currentPriceJSON = GetBitFinexPrice('iotusd') convertedPrice = GetCurrencyConversion('USD', 'AUD', currentPriceJSON['last_price']) timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(float(currentPriceJSON['timestamp']))) message_text = ((((('IOTA: $' + currentPriceJSON['last_price']) + ' (USD). $') + convertedPrice) + '(AU). Timestamp: ') + timestamp) response_message = api.messages.create(room.id, text=message_text) if ('BTC' in message.text): print('Requesting BTC rate') currentPriceJSON = GetBitFinexPrice('btcusd') convertedPrice = GetCurrencyConversion('USD', 'AUD', currentPriceJSON['last_price']) timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(float(currentPriceJSON['timestamp']))) message_text = ((((('IOTA: $' + currentPriceJSON['last_price']) + ' (USD). $') + convertedPrice) + '(AU). Timestamp: ') + timestamp) response_message = api.messages.create(room.id, text=message_text) if ('DASH' in message.text): print('Requesting BTC rate') currentPriceJSON = GetBitFinexPrice('dshusd') convertedPrice = GetCurrencyConversion('USD', 'AUD', currentPriceJSON['last_price']) timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(float(currentPriceJSON['timestamp']))) message_text = ((((('DASH: $' + currentPriceJSON['last_price']) + ' (USD). $') + convertedPrice) + '(AU). Timestamp: ') + timestamp) response_message = api.messages.create(room.id, text=message_text) if ('LITE' in message.text): print('Requesting BTC rate') currentPriceJSON = GetBitFinexPrice('ltcusd') convertedPrice = GetCurrencyConversion('USD', 'AUD', currentPriceJSON['last_price']) timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(float(currentPriceJSON['timestamp']))) message_text = ((((('LITECOIN: $' + currentPriceJSON['last_price']) + ' (USD). $') + convertedPrice) + '(AU). Timestamp: ') + timestamp) response_message = api.messages.create(room.id, text=message_text) if ('zork' in message.text): print('Requesting Zork') currentBTCPriceJSON = GetBitFinexPrice('btcusd') message_text = ('NO! Just No ' + person.displayName) response_message = api.messages.create(room.id, text=message_text) return 'OK'
Respond to inbound webhook JSON HTTP POSTs from Cisco Spark.
spark-cc-bot-webhook-receiver.py
POST
chris-gibbs/spark-cc-bot-webhook-receiver
0
python
def POST(self): json_data = web.data() print('\nWEBHOOK POST RECEIVED:') json_notification = json.loads(json_data) print(json.dumps(json_notification), '\n') webhook_obj = Webhook(json_notification) room = api.rooms.get(webhook_obj.data.roomId) message = api.messages.get(webhook_obj.data.id) person = api.people.get(message.personId) print("NEW MESSAGE IN ROOM '{}'".format(room.title)) print("FROM '{}'".format(person.displayName)) print("MESSAGE '{}'\n".format(message.text)) me = api.people.me() if (message.personId == me.id): print('Message sent by me. Return OK') return 'OK' else: print('Not ME') if ('IOTA' in message.text): print('Requesting IOTA rate') currentPriceJSON = GetBitFinexPrice('iotusd') convertedPrice = GetCurrencyConversion('USD', 'AUD', currentPriceJSON['last_price']) timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(float(currentPriceJSON['timestamp']))) message_text = ((((('IOTA: $' + currentPriceJSON['last_price']) + ' (USD). $') + convertedPrice) + '(AU). Timestamp: ') + timestamp) response_message = api.messages.create(room.id, text=message_text) if ('BTC' in message.text): print('Requesting BTC rate') currentPriceJSON = GetBitFinexPrice('btcusd') convertedPrice = GetCurrencyConversion('USD', 'AUD', currentPriceJSON['last_price']) timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(float(currentPriceJSON['timestamp']))) message_text = ((((('IOTA: $' + currentPriceJSON['last_price']) + ' (USD). $') + convertedPrice) + '(AU). Timestamp: ') + timestamp) response_message = api.messages.create(room.id, text=message_text) if ('DASH' in message.text): print('Requesting BTC rate') currentPriceJSON = GetBitFinexPrice('dshusd') convertedPrice = GetCurrencyConversion('USD', 'AUD', currentPriceJSON['last_price']) timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(float(currentPriceJSON['timestamp']))) message_text = ((((('DASH: $' + currentPriceJSON['last_price']) + ' (USD). $') + convertedPrice) + '(AU). Timestamp: ') + timestamp) response_message = api.messages.create(room.id, text=message_text) if ('LITE' in message.text): print('Requesting BTC rate') currentPriceJSON = GetBitFinexPrice('ltcusd') convertedPrice = GetCurrencyConversion('USD', 'AUD', currentPriceJSON['last_price']) timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(float(currentPriceJSON['timestamp']))) message_text = ((((('LITECOIN: $' + currentPriceJSON['last_price']) + ' (USD). $') + convertedPrice) + '(AU). Timestamp: ') + timestamp) response_message = api.messages.create(room.id, text=message_text) if ('zork' in message.text): print('Requesting Zork') currentBTCPriceJSON = GetBitFinexPrice('btcusd') message_text = ('NO! Just No ' + person.displayName) response_message = api.messages.create(room.id, text=message_text) return 'OK'
def POST(self): json_data = web.data() print('\nWEBHOOK POST RECEIVED:') json_notification = json.loads(json_data) print(json.dumps(json_notification), '\n') webhook_obj = Webhook(json_notification) room = api.rooms.get(webhook_obj.data.roomId) message = api.messages.get(webhook_obj.data.id) person = api.people.get(message.personId) print("NEW MESSAGE IN ROOM '{}'".format(room.title)) print("FROM '{}'".format(person.displayName)) print("MESSAGE '{}'\n".format(message.text)) me = api.people.me() if (message.personId == me.id): print('Message sent by me. Return OK') return 'OK' else: print('Not ME') if ('IOTA' in message.text): print('Requesting IOTA rate') currentPriceJSON = GetBitFinexPrice('iotusd') convertedPrice = GetCurrencyConversion('USD', 'AUD', currentPriceJSON['last_price']) timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(float(currentPriceJSON['timestamp']))) message_text = ((((('IOTA: $' + currentPriceJSON['last_price']) + ' (USD). $') + convertedPrice) + '(AU). Timestamp: ') + timestamp) response_message = api.messages.create(room.id, text=message_text) if ('BTC' in message.text): print('Requesting BTC rate') currentPriceJSON = GetBitFinexPrice('btcusd') convertedPrice = GetCurrencyConversion('USD', 'AUD', currentPriceJSON['last_price']) timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(float(currentPriceJSON['timestamp']))) message_text = ((((('IOTA: $' + currentPriceJSON['last_price']) + ' (USD). $') + convertedPrice) + '(AU). Timestamp: ') + timestamp) response_message = api.messages.create(room.id, text=message_text) if ('DASH' in message.text): print('Requesting BTC rate') currentPriceJSON = GetBitFinexPrice('dshusd') convertedPrice = GetCurrencyConversion('USD', 'AUD', currentPriceJSON['last_price']) timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(float(currentPriceJSON['timestamp']))) message_text = ((((('DASH: $' + currentPriceJSON['last_price']) + ' (USD). $') + convertedPrice) + '(AU). Timestamp: ') + timestamp) response_message = api.messages.create(room.id, text=message_text) if ('LITE' in message.text): print('Requesting BTC rate') currentPriceJSON = GetBitFinexPrice('ltcusd') convertedPrice = GetCurrencyConversion('USD', 'AUD', currentPriceJSON['last_price']) timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(float(currentPriceJSON['timestamp']))) message_text = ((((('LITECOIN: $' + currentPriceJSON['last_price']) + ' (USD). $') + convertedPrice) + '(AU). Timestamp: ') + timestamp) response_message = api.messages.create(room.id, text=message_text) if ('zork' in message.text): print('Requesting Zork') currentBTCPriceJSON = GetBitFinexPrice('btcusd') message_text = ('NO! Just No ' + person.displayName) response_message = api.messages.create(room.id, text=message_text) return 'OK'<|docstring|>Respond to inbound webhook JSON HTTP POSTs from Cisco Spark.<|endoftext|>
db2c7de32a005c04b96d2bf61a59e12b15715370e8813dc9a8b79ce4de38479f
@api.multi def open_related_action(self): ' Open the related action associated to the job ' self.ensure_one() job = Job.load(self.env, self.uuid) action = job.related_action() if (action is None): raise exceptions.Warning(_('No action available for this job')) return action
Open the related action associated to the job
py_queue/queue_job/models/queue_job.py
open_related_action
xiaoming719/python_material
0
python
@api.multi def open_related_action(self): ' ' self.ensure_one() job = Job.load(self.env, self.uuid) action = job.related_action() if (action is None): raise exceptions.Warning(_('No action available for this job')) return action
@api.multi def open_related_action(self): ' ' self.ensure_one() job = Job.load(self.env, self.uuid) action = job.related_action() if (action is None): raise exceptions.Warning(_('No action available for this job')) return action<|docstring|>Open the related action associated to the job<|endoftext|>
0647e15102e2d6e43acbbae8d3645f1c26c2bc58de92b2c330d6dc885c677cf9
@api.multi def _change_job_state(self, state, result=None): ' Change the state of the `Job` object itself so it\n will change the other fields (date, result, ...)\n ' for record in self: job_ = Job.load(record.env, record.uuid) if (state == DONE): job_.set_done(result=result) elif (state == PENDING): job_.set_pending(result=result) else: raise ValueError(('State not supported: %s' % state)) job_.store()
Change the state of the `Job` object itself so it will change the other fields (date, result, ...)
py_queue/queue_job/models/queue_job.py
_change_job_state
xiaoming719/python_material
0
python
@api.multi def _change_job_state(self, state, result=None): ' Change the state of the `Job` object itself so it\n will change the other fields (date, result, ...)\n ' for record in self: job_ = Job.load(record.env, record.uuid) if (state == DONE): job_.set_done(result=result) elif (state == PENDING): job_.set_pending(result=result) else: raise ValueError(('State not supported: %s' % state)) job_.store()
@api.multi def _change_job_state(self, state, result=None): ' Change the state of the `Job` object itself so it\n will change the other fields (date, result, ...)\n ' for record in self: job_ = Job.load(record.env, record.uuid) if (state == DONE): job_.set_done(result=result) elif (state == PENDING): job_.set_pending(result=result) else: raise ValueError(('State not supported: %s' % state)) job_.store()<|docstring|>Change the state of the `Job` object itself so it will change the other fields (date, result, ...)<|endoftext|>
a9e31973bcb0e60e689e059acd64b9fe742dbd2e7ebaf7b380a73f356f72f007
@api.multi def _subscribe_users_domain(self): " Subscribe all users having the 'Queue Job Manager' group " group = self.env.ref('queue_job.group_queue_job_manager') if (not group): return companies = self.mapped('company_id') domain = [('groups_id', '=', group.id)] if companies: domain.append(('company_id', 'child_of', companies.ids)) return domain
Subscribe all users having the 'Queue Job Manager' group
py_queue/queue_job/models/queue_job.py
_subscribe_users_domain
xiaoming719/python_material
0
python
@api.multi def _subscribe_users_domain(self): " " group = self.env.ref('queue_job.group_queue_job_manager') if (not group): return companies = self.mapped('company_id') domain = [('groups_id', '=', group.id)] if companies: domain.append(('company_id', 'child_of', companies.ids)) return domain
@api.multi def _subscribe_users_domain(self): " " group = self.env.ref('queue_job.group_queue_job_manager') if (not group): return companies = self.mapped('company_id') domain = [('groups_id', '=', group.id)] if companies: domain.append(('company_id', 'child_of', companies.ids)) return domain<|docstring|>Subscribe all users having the 'Queue Job Manager' group<|endoftext|>
a2f72a090221dd4f437f70f06cce5459b8900512db86b257b31f02a9a50eb42f
@api.multi def _message_failed_job(self): ' Return a message which will be posted on the job when it is failed.\n\n It can be inherited to allow more precise messages based on the\n exception informations.\n\n If nothing is returned, no message will be posted.\n ' self.ensure_one() return _("Something bad happened during the execution of the job. More details in the 'Exception Information' section.")
Return a message which will be posted on the job when it is failed. It can be inherited to allow more precise messages based on the exception informations. If nothing is returned, no message will be posted.
py_queue/queue_job/models/queue_job.py
_message_failed_job
xiaoming719/python_material
0
python
@api.multi def _message_failed_job(self): ' Return a message which will be posted on the job when it is failed.\n\n It can be inherited to allow more precise messages based on the\n exception informations.\n\n If nothing is returned, no message will be posted.\n ' self.ensure_one() return _("Something bad happened during the execution of the job. More details in the 'Exception Information' section.")
@api.multi def _message_failed_job(self): ' Return a message which will be posted on the job when it is failed.\n\n It can be inherited to allow more precise messages based on the\n exception informations.\n\n If nothing is returned, no message will be posted.\n ' self.ensure_one() return _("Something bad happened during the execution of the job. More details in the 'Exception Information' section.")<|docstring|>Return a message which will be posted on the job when it is failed. It can be inherited to allow more precise messages based on the exception informations. If nothing is returned, no message will be posted.<|endoftext|>
6368c68c329e0114a34e921526a64ef344ad54b97420ef653c775d41e7cc4ac6
@api.model def _needaction_domain_get(self): ' Returns the domain to filter records that require an action\n :return: domain or False is no action\n ' return [('state', '=', 'failed')]
Returns the domain to filter records that require an action :return: domain or False is no action
py_queue/queue_job/models/queue_job.py
_needaction_domain_get
xiaoming719/python_material
0
python
@api.model def _needaction_domain_get(self): ' Returns the domain to filter records that require an action\n :return: domain or False is no action\n ' return [('state', '=', 'failed')]
@api.model def _needaction_domain_get(self): ' Returns the domain to filter records that require an action\n :return: domain or False is no action\n ' return [('state', '=', 'failed')]<|docstring|>Returns the domain to filter records that require an action :return: domain or False is no action<|endoftext|>
ea0070f385a40779337bb95226b48ee1191247e4940592241b03b9d8e672379e
@api.model def autovacuum(self): ' Delete all jobs done since more than ``_removal_interval`` days.\n\n Called from a cron.\n ' deadline = (datetime.now() - timedelta(days=self._removal_interval)) jobs = self.search([('date_done', '<=', fields.Datetime.to_string(deadline))]) jobs.unlink() return True
Delete all jobs done since more than ``_removal_interval`` days. Called from a cron.
py_queue/queue_job/models/queue_job.py
autovacuum
xiaoming719/python_material
0
python
@api.model def autovacuum(self): ' Delete all jobs done since more than ``_removal_interval`` days.\n\n Called from a cron.\n ' deadline = (datetime.now() - timedelta(days=self._removal_interval)) jobs = self.search([('date_done', '<=', fields.Datetime.to_string(deadline))]) jobs.unlink() return True
@api.model def autovacuum(self): ' Delete all jobs done since more than ``_removal_interval`` days.\n\n Called from a cron.\n ' deadline = (datetime.now() - timedelta(days=self._removal_interval)) jobs = self.search([('date_done', '<=', fields.Datetime.to_string(deadline))]) jobs.unlink() return True<|docstring|>Delete all jobs done since more than ``_removal_interval`` days. Called from a cron.<|endoftext|>
b640b7adcb549a537f98c2c3d3b365c420afd72986a2f5cfeca4ab4fb115362f
@click.group(cls=DYMGroup) def index(): 'Index operations.' pass
Index operations.
src/esok/commands/index.py
index
ahaeger/esok
2
python
@click.group(cls=DYMGroup) def index(): pass
@click.group(cls=DYMGroup) def index(): pass<|docstring|>Index operations.<|endoftext|>
a1afd5da019c5dd61e8368338c5bef5b76581e6207796c8a740bf7c926cfd9a1
@index.command(name='list') @click.option('-s', '--sort', default='index', metavar='COLUMN', show_default=True, help='Comma-separated list of column names or column aliases used to sort the response.') @per_connection() def list_indices(client, sort): 'List indices.' r = client.cat.indices(v=True, s=sort) click.echo(r)
List indices.
src/esok/commands/index.py
list_indices
ahaeger/esok
2
python
@index.command(name='list') @click.option('-s', '--sort', default='index', metavar='COLUMN', show_default=True, help='Comma-separated list of column names or column aliases used to sort the response.') @per_connection() def list_indices(client, sort): r = client.cat.indices(v=True, s=sort) click.echo(r)
@index.command(name='list') @click.option('-s', '--sort', default='index', metavar='COLUMN', show_default=True, help='Comma-separated list of column names or column aliases used to sort the response.') @per_connection() def list_indices(client, sort): r = client.cat.indices(v=True, s=sort) click.echo(r)<|docstring|>List indices.<|endoftext|>
bc171371698bf50f80183b3285d79a7435191312ec3d3ee8a7fdc115dcc4f2d2
@index.command() @click.argument('name', type=click.STRING) @per_connection() def touch(client, name): 'Create an index without mapping.' r = client.indices.create(index=name) LOG.info(json.dumps(r)) ok = r.get('acknowledged') if (not ok): sys.exit(UNKNOWN_ERROR)
Create an index without mapping.
src/esok/commands/index.py
touch
ahaeger/esok
2
python
@index.command() @click.argument('name', type=click.STRING) @per_connection() def touch(client, name): r = client.indices.create(index=name) LOG.info(json.dumps(r)) ok = r.get('acknowledged') if (not ok): sys.exit(UNKNOWN_ERROR)
@index.command() @click.argument('name', type=click.STRING) @per_connection() def touch(client, name): r = client.indices.create(index=name) LOG.info(json.dumps(r)) ok = r.get('acknowledged') if (not ok): sys.exit(UNKNOWN_ERROR)<|docstring|>Create an index without mapping.<|endoftext|>
a08bd69433ea4a3befb78f275102c54d905a4765fc1c44171af3e378eb6b9d7d
@index.command() @click.argument('name', type=click.STRING) @click.argument('mapping', type=click.Path(exists=True, dir_okay=False, readable=True)) @per_connection() def create(client, name, mapping): 'Create a new index from given mapping.' with open(mapping, 'r') as f: mapping_json = json.load(f) r = client.indices.create(index=name, body=mapping_json) ok = r.get('acknowledged') LOG.info(json.dumps(r)) if (not ok): sys.exit(UNKNOWN_ERROR)
Create a new index from given mapping.
src/esok/commands/index.py
create
ahaeger/esok
2
python
@index.command() @click.argument('name', type=click.STRING) @click.argument('mapping', type=click.Path(exists=True, dir_okay=False, readable=True)) @per_connection() def create(client, name, mapping): with open(mapping, 'r') as f: mapping_json = json.load(f) r = client.indices.create(index=name, body=mapping_json) ok = r.get('acknowledged') LOG.info(json.dumps(r)) if (not ok): sys.exit(UNKNOWN_ERROR)
@index.command() @click.argument('name', type=click.STRING) @click.argument('mapping', type=click.Path(exists=True, dir_okay=False, readable=True)) @per_connection() def create(client, name, mapping): with open(mapping, 'r') as f: mapping_json = json.load(f) r = client.indices.create(index=name, body=mapping_json) ok = r.get('acknowledged') LOG.info(json.dumps(r)) if (not ok): sys.exit(UNKNOWN_ERROR)<|docstring|>Create a new index from given mapping.<|endoftext|>
d21f6274a57182b4a4a2356a9d3b3ef1b47a475a706fd27fba3d5aca98d51114
@index.command() @click.argument('source_index', type=click.STRING) @click.argument('new_index', type=click.STRING) @click.option('-R', '--remote', type=click.STRING, metavar='HOSTNAME', help='Remote cluster or hostname to copy from. If no cluster with the given name is configured, the passed value will be used as the hostname directly.') @click.option('-m', '--skip-mapping', is_flag=True, help='Skip the mapping part of the index when copying.') @click.option('-s', '--skip-settings', is_flag=True, help='Skip the settings part of the index when copying.') @per_connection(include_site=True) def copy(client, site, source_index, new_index, remote, skip_mapping, skip_settings): "Copy an existing index's mapping and setting to a new index.\n\n This will not copy the data of the index - use the reindex command for that.\n " if (remote is not None): source_client = resolve_remote(remote, site) else: source_client = client try: source_mapping = source_client.indices.get(index=source_index) except TransportError as e: LOG.error('Error occurred when checking for source index.') if (e.status_code == 404): LOG.error('The source index does not exist.') exit(USER_ERROR) raise cleaned_mapping = clean_index(source_mapping, skip_settings, skip_mapping) r = client.indices.create(index=new_index, body=cleaned_mapping) ack = r.get('acknowledged') LOG.info(json.dumps(r)) if (not ack): LOG.warning('Request timed out before acknowledge was received. Index might not have been created yet.')
Copy an existing index's mapping and setting to a new index. This will not copy the data of the index - use the reindex command for that.
src/esok/commands/index.py
copy
ahaeger/esok
2
python
@index.command() @click.argument('source_index', type=click.STRING) @click.argument('new_index', type=click.STRING) @click.option('-R', '--remote', type=click.STRING, metavar='HOSTNAME', help='Remote cluster or hostname to copy from. If no cluster with the given name is configured, the passed value will be used as the hostname directly.') @click.option('-m', '--skip-mapping', is_flag=True, help='Skip the mapping part of the index when copying.') @click.option('-s', '--skip-settings', is_flag=True, help='Skip the settings part of the index when copying.') @per_connection(include_site=True) def copy(client, site, source_index, new_index, remote, skip_mapping, skip_settings): "Copy an existing index's mapping and setting to a new index.\n\n This will not copy the data of the index - use the reindex command for that.\n " if (remote is not None): source_client = resolve_remote(remote, site) else: source_client = client try: source_mapping = source_client.indices.get(index=source_index) except TransportError as e: LOG.error('Error occurred when checking for source index.') if (e.status_code == 404): LOG.error('The source index does not exist.') exit(USER_ERROR) raise cleaned_mapping = clean_index(source_mapping, skip_settings, skip_mapping) r = client.indices.create(index=new_index, body=cleaned_mapping) ack = r.get('acknowledged') LOG.info(json.dumps(r)) if (not ack): LOG.warning('Request timed out before acknowledge was received. Index might not have been created yet.')
@index.command() @click.argument('source_index', type=click.STRING) @click.argument('new_index', type=click.STRING) @click.option('-R', '--remote', type=click.STRING, metavar='HOSTNAME', help='Remote cluster or hostname to copy from. If no cluster with the given name is configured, the passed value will be used as the hostname directly.') @click.option('-m', '--skip-mapping', is_flag=True, help='Skip the mapping part of the index when copying.') @click.option('-s', '--skip-settings', is_flag=True, help='Skip the settings part of the index when copying.') @per_connection(include_site=True) def copy(client, site, source_index, new_index, remote, skip_mapping, skip_settings): "Copy an existing index's mapping and setting to a new index.\n\n This will not copy the data of the index - use the reindex command for that.\n " if (remote is not None): source_client = resolve_remote(remote, site) else: source_client = client try: source_mapping = source_client.indices.get(index=source_index) except TransportError as e: LOG.error('Error occurred when checking for source index.') if (e.status_code == 404): LOG.error('The source index does not exist.') exit(USER_ERROR) raise cleaned_mapping = clean_index(source_mapping, skip_settings, skip_mapping) r = client.indices.create(index=new_index, body=cleaned_mapping) ack = r.get('acknowledged') LOG.info(json.dumps(r)) if (not ack): LOG.warning('Request timed out before acknowledge was received. Index might not have been created yet.')<|docstring|>Copy an existing index's mapping and setting to a new index. This will not copy the data of the index - use the reindex command for that.<|endoftext|>
ffaa8a5d0e64dde455c2d6c8acf3ccacdef9b0c862e402de6ad6c3d9b4172243
@index.command() @click.argument('name', type=click.STRING) @per_connection() def delete(client: Elasticsearch, name): 'Delete an index.' if (name in ['_all', '*']): click.confirm('Really delete ALL indices on the cluster?', abort=True) r = client.indices.delete(index=name) LOG.info(json.dumps(r)) ok = r.get('acknowledged') if (not ok): sys.exit(UNKNOWN_ERROR)
Delete an index.
src/esok/commands/index.py
delete
ahaeger/esok
2
python
@index.command() @click.argument('name', type=click.STRING) @per_connection() def delete(client: Elasticsearch, name): if (name in ['_all', '*']): click.confirm('Really delete ALL indices on the cluster?', abort=True) r = client.indices.delete(index=name) LOG.info(json.dumps(r)) ok = r.get('acknowledged') if (not ok): sys.exit(UNKNOWN_ERROR)
@index.command() @click.argument('name', type=click.STRING) @per_connection() def delete(client: Elasticsearch, name): if (name in ['_all', '*']): click.confirm('Really delete ALL indices on the cluster?', abort=True) r = client.indices.delete(index=name) LOG.info(json.dumps(r)) ok = r.get('acknowledged') if (not ok): sys.exit(UNKNOWN_ERROR)<|docstring|>Delete an index.<|endoftext|>
692c5fb00d35dc48fb3ac92cdf5377c258076c946ae98122f62b61c15b112798
@index.command() @click.argument('name', type=click.STRING) @per_connection() def get(client, name): 'Get index details.' r = client.indices.get(index=name) click.echo(json.dumps(r))
Get index details.
src/esok/commands/index.py
get
ahaeger/esok
2
python
@index.command() @click.argument('name', type=click.STRING) @per_connection() def get(client, name): r = client.indices.get(index=name) click.echo(json.dumps(r))
@index.command() @click.argument('name', type=click.STRING) @per_connection() def get(client, name): r = client.indices.get(index=name) click.echo(json.dumps(r))<|docstring|>Get index details.<|endoftext|>
e98a2eaae70200635c22cd13a3e2c6da7f7d0361cd0c7594116a837f7ffabfb5
@index.command() @click.argument('name', type=click.STRING) @per_connection() def stats(client, name): 'Fetch index statistics.' r = client.indices.stats(index=name) click.echo(json.dumps(r)) ok = ('_all' in r.keys()) if (not ok): sys.exit(UNKNOWN_ERROR)
Fetch index statistics.
src/esok/commands/index.py
stats
ahaeger/esok
2
python
@index.command() @click.argument('name', type=click.STRING) @per_connection() def stats(client, name): r = client.indices.stats(index=name) click.echo(json.dumps(r)) ok = ('_all' in r.keys()) if (not ok): sys.exit(UNKNOWN_ERROR)
@index.command() @click.argument('name', type=click.STRING) @per_connection() def stats(client, name): r = client.indices.stats(index=name) click.echo(json.dumps(r)) ok = ('_all' in r.keys()) if (not ok): sys.exit(UNKNOWN_ERROR)<|docstring|>Fetch index statistics.<|endoftext|>
1947ccd91543fbe4ee8be85aa9d67bed6289a21c69a44a9d99bccb12f05eb10a
@index.command() @click.argument('name', type=click.STRING) @click.argument('shard_count', type=click.INT) @click.option('-a', '--absolute', is_flag=True, help='Indicates that the given number of shards is an absolute replica count.') @click.option('-n', '--nodes', type=click.INT, help='Manually set node count of the cluster.') @per_connection() def shards(client, name, shard_count, absolute, nodes): 'Set the number of shards per machine.' if absolute: replica_count = shard_count LOG.info('Using manually set replica count: {}'.format(shard_count)) else: if (not nodes): r = client.nodes.stats(metric='process') cluster_node_count = r.get('nodes').values() data_node_count = sum((1 for item in cluster_node_count if ('data' in item.get('roles')))) LOG.debug('Resolved data node count from cluster: {}'.format(data_node_count)) else: LOG.debug('Using manually set node count: {}'.format(nodes)) data_node_count = nodes r = client.indices.get(index=name) primary_count = int(r.get(name).get('settings').get('index').get('number_of_shards')) LOG.debug('Primary shard count: {}'.format(primary_count)) LOG.debug('Desired shard count per host: {}'.format(shard_count)) replica_count = (((shard_count * data_node_count) - primary_count) / primary_count) replica_count = max(replica_count, 0.0) if (not replica_count.is_integer()): click.echo('The cluster configuration and desired shards per machine resulted in {} total replicas.\nThis will be rounded to {} replicas in total.'.format(replica_count, int(replica_count))) if (not click.confirm('Do you want to continue?')): sys.exit() LOG.info('Calculated replica count: {}'.format(replica_count)) body = {'index': {'number_of_replicas': int(replica_count)}} r = client.indices.put_settings(body, index=name) ok = r.get('acknowledged') LOG.info(json.dumps(r)) if (not ok): sys.exit(UNKNOWN_ERROR)
Set the number of shards per machine.
src/esok/commands/index.py
shards
ahaeger/esok
2
python
@index.command() @click.argument('name', type=click.STRING) @click.argument('shard_count', type=click.INT) @click.option('-a', '--absolute', is_flag=True, help='Indicates that the given number of shards is an absolute replica count.') @click.option('-n', '--nodes', type=click.INT, help='Manually set node count of the cluster.') @per_connection() def shards(client, name, shard_count, absolute, nodes): if absolute: replica_count = shard_count LOG.info('Using manually set replica count: {}'.format(shard_count)) else: if (not nodes): r = client.nodes.stats(metric='process') cluster_node_count = r.get('nodes').values() data_node_count = sum((1 for item in cluster_node_count if ('data' in item.get('roles')))) LOG.debug('Resolved data node count from cluster: {}'.format(data_node_count)) else: LOG.debug('Using manually set node count: {}'.format(nodes)) data_node_count = nodes r = client.indices.get(index=name) primary_count = int(r.get(name).get('settings').get('index').get('number_of_shards')) LOG.debug('Primary shard count: {}'.format(primary_count)) LOG.debug('Desired shard count per host: {}'.format(shard_count)) replica_count = (((shard_count * data_node_count) - primary_count) / primary_count) replica_count = max(replica_count, 0.0) if (not replica_count.is_integer()): click.echo('The cluster configuration and desired shards per machine resulted in {} total replicas.\nThis will be rounded to {} replicas in total.'.format(replica_count, int(replica_count))) if (not click.confirm('Do you want to continue?')): sys.exit() LOG.info('Calculated replica count: {}'.format(replica_count)) body = {'index': {'number_of_replicas': int(replica_count)}} r = client.indices.put_settings(body, index=name) ok = r.get('acknowledged') LOG.info(json.dumps(r)) if (not ok): sys.exit(UNKNOWN_ERROR)
@index.command() @click.argument('name', type=click.STRING) @click.argument('shard_count', type=click.INT) @click.option('-a', '--absolute', is_flag=True, help='Indicates that the given number of shards is an absolute replica count.') @click.option('-n', '--nodes', type=click.INT, help='Manually set node count of the cluster.') @per_connection() def shards(client, name, shard_count, absolute, nodes): if absolute: replica_count = shard_count LOG.info('Using manually set replica count: {}'.format(shard_count)) else: if (not nodes): r = client.nodes.stats(metric='process') cluster_node_count = r.get('nodes').values() data_node_count = sum((1 for item in cluster_node_count if ('data' in item.get('roles')))) LOG.debug('Resolved data node count from cluster: {}'.format(data_node_count)) else: LOG.debug('Using manually set node count: {}'.format(nodes)) data_node_count = nodes r = client.indices.get(index=name) primary_count = int(r.get(name).get('settings').get('index').get('number_of_shards')) LOG.debug('Primary shard count: {}'.format(primary_count)) LOG.debug('Desired shard count per host: {}'.format(shard_count)) replica_count = (((shard_count * data_node_count) - primary_count) / primary_count) replica_count = max(replica_count, 0.0) if (not replica_count.is_integer()): click.echo('The cluster configuration and desired shards per machine resulted in {} total replicas.\nThis will be rounded to {} replicas in total.'.format(replica_count, int(replica_count))) if (not click.confirm('Do you want to continue?')): sys.exit() LOG.info('Calculated replica count: {}'.format(replica_count)) body = {'index': {'number_of_replicas': int(replica_count)}} r = client.indices.put_settings(body, index=name) ok = r.get('acknowledged') LOG.info(json.dumps(r)) if (not ok): sys.exit(UNKNOWN_ERROR)<|docstring|>Set the number of shards per machine.<|endoftext|>
98c80649b1529e7ed1365426ffa90b14dfe568eaff9255ca7dd1082f5b6e97a2
@index.command() @click.argument('name', type=click.STRING) @click.option('-o', '--output-file', type=click.File('w'), default='-', show_default=True, help='Specify file to output to.') @click.option('-c', '--chunk-size', type=click.INT, default=1000, show_default=True, help='Number of documents (per shard) to fetch in each individual request.') @click.option('-s', '--scroll-time', type=click.STRING, default='5m', show_default=True, help='The duration the cluster shall maintain a consistent view.') @per_connection() def read(client, name, output_file, chunk_size, scroll_time): "Dump index contents to stdout or file.\n\n Note: subsequent reads from several cluster will overwrite contents in output file.\n\n Examples:\n\n \x08\n $ esok index read index-name\n $ esok index read -o output.json index-name\n $ esok index read index-name | jq -c '{_id, _source}' > output.json\n " r = scan(client, index=name, size=chunk_size, scroll=scroll_time) for doc in r: click.echo(json.dumps(doc), output_file)
Dump index contents to stdout or file. Note: subsequent reads from several cluster will overwrite contents in output file. Examples:  $ esok index read index-name $ esok index read -o output.json index-name $ esok index read index-name | jq -c '{_id, _source}' > output.json
src/esok/commands/index.py
read
ahaeger/esok
2
python
@index.command() @click.argument('name', type=click.STRING) @click.option('-o', '--output-file', type=click.File('w'), default='-', show_default=True, help='Specify file to output to.') @click.option('-c', '--chunk-size', type=click.INT, default=1000, show_default=True, help='Number of documents (per shard) to fetch in each individual request.') @click.option('-s', '--scroll-time', type=click.STRING, default='5m', show_default=True, help='The duration the cluster shall maintain a consistent view.') @per_connection() def read(client, name, output_file, chunk_size, scroll_time): "Dump index contents to stdout or file.\n\n Note: subsequent reads from several cluster will overwrite contents in output file.\n\n Examples:\n\n \x08\n $ esok index read index-name\n $ esok index read -o output.json index-name\n $ esok index read index-name | jq -c '{_id, _source}' > output.json\n " r = scan(client, index=name, size=chunk_size, scroll=scroll_time) for doc in r: click.echo(json.dumps(doc), output_file)
@index.command() @click.argument('name', type=click.STRING) @click.option('-o', '--output-file', type=click.File('w'), default='-', show_default=True, help='Specify file to output to.') @click.option('-c', '--chunk-size', type=click.INT, default=1000, show_default=True, help='Number of documents (per shard) to fetch in each individual request.') @click.option('-s', '--scroll-time', type=click.STRING, default='5m', show_default=True, help='The duration the cluster shall maintain a consistent view.') @per_connection() def read(client, name, output_file, chunk_size, scroll_time): "Dump index contents to stdout or file.\n\n Note: subsequent reads from several cluster will overwrite contents in output file.\n\n Examples:\n\n \x08\n $ esok index read index-name\n $ esok index read -o output.json index-name\n $ esok index read index-name | jq -c '{_id, _source}' > output.json\n " r = scan(client, index=name, size=chunk_size, scroll=scroll_time) for doc in r: click.echo(json.dumps(doc), output_file)<|docstring|>Dump index contents to stdout or file. Note: subsequent reads from several cluster will overwrite contents in output file. Examples:  $ esok index read index-name $ esok index read -o output.json index-name $ esok index read index-name | jq -c '{_id, _source}' > output.json<|endoftext|>
21fff5ac4021e5a1c9f23da82fea27b92a74d6ea557eb04602ac03931061fcc5
@index.command() @per_connection() @click.argument('docs', type=click.Path(allow_dash=True)) @click.option('-i', '--index-name', type=click.STRING, help='Forces target index to given name for all documents.') @click.option('-c', '--chunk-size', type=click.INT, help='Number of docs in one chunk', default=500, show_default=True) @click.option('--refresh', is_flag=True, help='Refresh the index after indexing.') @click.option('-C', '--max-chunk-bytes', type=click.INT, help='The maximum size of the request in bytes', default=int(100000000.0), show_default=True) @click.option('-R', '--max-retries', type=click.INT, help='Maximum number of times a document will be retried when 429 is received', default=0, show_default=True) @click.option('-b', '--initial-backoff', type=click.INT, help='Number of seconds to wait before the first retry. Any subsequent retries will be powers of initial-backoff * 2^retry_number', default=2, show_default=True) @click.option('-B', '--max-backoff', type=click.INT, help='Maximum number of seconds a retry will wait', default=600, show_default=True) def write(client, docs, index_name, refresh, chunk_size, max_chunk_bytes, max_retries, initial_backoff, max_backoff): ' Write to a given index.\n\n The input file is expected to be in the "JSON-lines" format, i.e. with one valid\n JSON-object per row. Pass - to read from stdin.\n\n Reserved keys include \'_index\', \'_type\', \'_id\' and \'_source\' (among others), which\n are all optional. If \'_source\' is present Elasticsearch will assume that the\n document to index resides within it. If \'_source\' is not present, all other\n non-reserved keys will be indexed.\n\n For more details, see:\n\n \x08\n http://elasticsearch-py.readthedocs.io/en/5.5.1/helpers.html#bulk-helpers\n\n Examples:\n\n \x08\n $ esok index write -i index-name ./data.json\n $ echo \'{"hello": "world"}\' | esok index write -i index-name -\n $ esok index read index-name | jq -c \'{_id, stuff: ._source.title}\' \\\n | esok index write -i index-name-1 -\n ' for actions in _read_actions(docs, max_chunk_bytes): for action in actions: if (index_name is not None): action['_index'] = index_name action['_type'] = ('_doc' if ('_type' not in action.keys()) else action['_type']) (successful_request_count, errors) = bulk(client, actions, chunk_size=chunk_size, raise_on_error=False, max_chunk_bytes=max_chunk_bytes, max_retries=max_retries, initial_backoff=initial_backoff, max_backoff=max_backoff, refresh=refresh) chunk_count = len(actions) ok = (successful_request_count == chunk_count) if (not ok): LOG.error('{} / {} failed documents.'.format((chunk_count - successful_request_count), chunk_count)) for error in errors: LOG.error(json.dumps(error)) sys.exit(UNKNOWN_ERROR)
Write to a given index. The input file is expected to be in the "JSON-lines" format, i.e. with one valid JSON-object per row. Pass - to read from stdin. Reserved keys include '_index', '_type', '_id' and '_source' (among others), which are all optional. If '_source' is present Elasticsearch will assume that the document to index resides within it. If '_source' is not present, all other non-reserved keys will be indexed. For more details, see:  http://elasticsearch-py.readthedocs.io/en/5.5.1/helpers.html#bulk-helpers Examples:  $ esok index write -i index-name ./data.json $ echo '{"hello": "world"}' | esok index write -i index-name - $ esok index read index-name | jq -c '{_id, stuff: ._source.title}' \ | esok index write -i index-name-1 -
src/esok/commands/index.py
write
ahaeger/esok
2
python
@index.command() @per_connection() @click.argument('docs', type=click.Path(allow_dash=True)) @click.option('-i', '--index-name', type=click.STRING, help='Forces target index to given name for all documents.') @click.option('-c', '--chunk-size', type=click.INT, help='Number of docs in one chunk', default=500, show_default=True) @click.option('--refresh', is_flag=True, help='Refresh the index after indexing.') @click.option('-C', '--max-chunk-bytes', type=click.INT, help='The maximum size of the request in bytes', default=int(100000000.0), show_default=True) @click.option('-R', '--max-retries', type=click.INT, help='Maximum number of times a document will be retried when 429 is received', default=0, show_default=True) @click.option('-b', '--initial-backoff', type=click.INT, help='Number of seconds to wait before the first retry. Any subsequent retries will be powers of initial-backoff * 2^retry_number', default=2, show_default=True) @click.option('-B', '--max-backoff', type=click.INT, help='Maximum number of seconds a retry will wait', default=600, show_default=True) def write(client, docs, index_name, refresh, chunk_size, max_chunk_bytes, max_retries, initial_backoff, max_backoff): ' Write to a given index.\n\n The input file is expected to be in the "JSON-lines" format, i.e. with one valid\n JSON-object per row. Pass - to read from stdin.\n\n Reserved keys include \'_index\', \'_type\', \'_id\' and \'_source\' (among others), which\n are all optional. If \'_source\' is present Elasticsearch will assume that the\n document to index resides within it. If \'_source\' is not present, all other\n non-reserved keys will be indexed.\n\n For more details, see:\n\n \x08\n http://elasticsearch-py.readthedocs.io/en/5.5.1/helpers.html#bulk-helpers\n\n Examples:\n\n \x08\n $ esok index write -i index-name ./data.json\n $ echo \'{"hello": "world"}\' | esok index write -i index-name -\n $ esok index read index-name | jq -c \'{_id, stuff: ._source.title}\' \\\n | esok index write -i index-name-1 -\n ' for actions in _read_actions(docs, max_chunk_bytes): for action in actions: if (index_name is not None): action['_index'] = index_name action['_type'] = ('_doc' if ('_type' not in action.keys()) else action['_type']) (successful_request_count, errors) = bulk(client, actions, chunk_size=chunk_size, raise_on_error=False, max_chunk_bytes=max_chunk_bytes, max_retries=max_retries, initial_backoff=initial_backoff, max_backoff=max_backoff, refresh=refresh) chunk_count = len(actions) ok = (successful_request_count == chunk_count) if (not ok): LOG.error('{} / {} failed documents.'.format((chunk_count - successful_request_count), chunk_count)) for error in errors: LOG.error(json.dumps(error)) sys.exit(UNKNOWN_ERROR)
@index.command() @per_connection() @click.argument('docs', type=click.Path(allow_dash=True)) @click.option('-i', '--index-name', type=click.STRING, help='Forces target index to given name for all documents.') @click.option('-c', '--chunk-size', type=click.INT, help='Number of docs in one chunk', default=500, show_default=True) @click.option('--refresh', is_flag=True, help='Refresh the index after indexing.') @click.option('-C', '--max-chunk-bytes', type=click.INT, help='The maximum size of the request in bytes', default=int(100000000.0), show_default=True) @click.option('-R', '--max-retries', type=click.INT, help='Maximum number of times a document will be retried when 429 is received', default=0, show_default=True) @click.option('-b', '--initial-backoff', type=click.INT, help='Number of seconds to wait before the first retry. Any subsequent retries will be powers of initial-backoff * 2^retry_number', default=2, show_default=True) @click.option('-B', '--max-backoff', type=click.INT, help='Maximum number of seconds a retry will wait', default=600, show_default=True) def write(client, docs, index_name, refresh, chunk_size, max_chunk_bytes, max_retries, initial_backoff, max_backoff): ' Write to a given index.\n\n The input file is expected to be in the "JSON-lines" format, i.e. with one valid\n JSON-object per row. Pass - to read from stdin.\n\n Reserved keys include \'_index\', \'_type\', \'_id\' and \'_source\' (among others), which\n are all optional. If \'_source\' is present Elasticsearch will assume that the\n document to index resides within it. If \'_source\' is not present, all other\n non-reserved keys will be indexed.\n\n For more details, see:\n\n \x08\n http://elasticsearch-py.readthedocs.io/en/5.5.1/helpers.html#bulk-helpers\n\n Examples:\n\n \x08\n $ esok index write -i index-name ./data.json\n $ echo \'{"hello": "world"}\' | esok index write -i index-name -\n $ esok index read index-name | jq -c \'{_id, stuff: ._source.title}\' \\\n | esok index write -i index-name-1 -\n ' for actions in _read_actions(docs, max_chunk_bytes): for action in actions: if (index_name is not None): action['_index'] = index_name action['_type'] = ('_doc' if ('_type' not in action.keys()) else action['_type']) (successful_request_count, errors) = bulk(client, actions, chunk_size=chunk_size, raise_on_error=False, max_chunk_bytes=max_chunk_bytes, max_retries=max_retries, initial_backoff=initial_backoff, max_backoff=max_backoff, refresh=refresh) chunk_count = len(actions) ok = (successful_request_count == chunk_count) if (not ok): LOG.error('{} / {} failed documents.'.format((chunk_count - successful_request_count), chunk_count)) for error in errors: LOG.error(json.dumps(error)) sys.exit(UNKNOWN_ERROR)<|docstring|>Write to a given index. The input file is expected to be in the "JSON-lines" format, i.e. with one valid JSON-object per row. Pass - to read from stdin. Reserved keys include '_index', '_type', '_id' and '_source' (among others), which are all optional. If '_source' is present Elasticsearch will assume that the document to index resides within it. If '_source' is not present, all other non-reserved keys will be indexed. For more details, see:  http://elasticsearch-py.readthedocs.io/en/5.5.1/helpers.html#bulk-helpers Examples:  $ esok index write -i index-name ./data.json $ echo '{"hello": "world"}' | esok index write -i index-name - $ esok index read index-name | jq -c '{_id, stuff: ._source.title}' \ | esok index write -i index-name-1 -<|endoftext|>
2c79c3f2526e6846fc2e91a8fe2662fcc637bd6fdf114a322c0674d3fe995c34
def is_all_executions_finished(client, deployment_id=None): "\n Checks if all system workflows or given deployment id are finished\n running (successful not).\n\n :param client: cloudify http client.\n :param deployment_id: Optional, will check all it's executions\n :return: True if all executions had ended\n " offset = 0 while True: executions = client.executions.list(include_system_workflows=True, _offset=offset, _size=PAGINATION_SIZE) for execution in executions: execution_status = execution.get('status') if (execution.get('is_system_workflow') or (deployment_id and (deployment_id == execution.get('deployment_id')))): if _is_execution_not_ended(execution_status): return False if (executions.metadata.pagination.total <= executions.metadata.pagination.offset): break offset = (offset + len(executions)) return True
Checks if all system workflows or given deployment id are finished running (successful not). :param client: cloudify http client. :param deployment_id: Optional, will check all it's executions :return: True if all executions had ended
cloudify_types/cloudify_types/component/polling.py
is_all_executions_finished
cloudify-cosmo/cloudify-manager
124
python
def is_all_executions_finished(client, deployment_id=None): "\n Checks if all system workflows or given deployment id are finished\n running (successful not).\n\n :param client: cloudify http client.\n :param deployment_id: Optional, will check all it's executions\n :return: True if all executions had ended\n " offset = 0 while True: executions = client.executions.list(include_system_workflows=True, _offset=offset, _size=PAGINATION_SIZE) for execution in executions: execution_status = execution.get('status') if (execution.get('is_system_workflow') or (deployment_id and (deployment_id == execution.get('deployment_id')))): if _is_execution_not_ended(execution_status): return False if (executions.metadata.pagination.total <= executions.metadata.pagination.offset): break offset = (offset + len(executions)) return True
def is_all_executions_finished(client, deployment_id=None): "\n Checks if all system workflows or given deployment id are finished\n running (successful not).\n\n :param client: cloudify http client.\n :param deployment_id: Optional, will check all it's executions\n :return: True if all executions had ended\n " offset = 0 while True: executions = client.executions.list(include_system_workflows=True, _offset=offset, _size=PAGINATION_SIZE) for execution in executions: execution_status = execution.get('status') if (execution.get('is_system_workflow') or (deployment_id and (deployment_id == execution.get('deployment_id')))): if _is_execution_not_ended(execution_status): return False if (executions.metadata.pagination.total <= executions.metadata.pagination.offset): break offset = (offset + len(executions)) return True<|docstring|>Checks if all system workflows or given deployment id are finished running (successful not). :param client: cloudify http client. :param deployment_id: Optional, will check all it's executions :return: True if all executions had ended<|endoftext|>
12f94583c77a0be62e5eab054f34287a2f3548f33118edcd224c902b7988ef1f
def room_of(p): '\n Tell in what room a given node is located\n :param p: a point name (node)\n :return: a room name\n ' for r in rooms.keys(): if (p in rooms[r]): return r return False
Tell in what room a given node is located :param p: a point name (node) :return: a room name
map.py
room_of
marcoparadina/pyhop
1
python
def room_of(p): '\n Tell in what room a given node is located\n :param p: a point name (node)\n :return: a room name\n ' for r in rooms.keys(): if (p in rooms[r]): return r return False
def room_of(p): '\n Tell in what room a given node is located\n :param p: a point name (node)\n :return: a room name\n ' for r in rooms.keys(): if (p in rooms[r]): return r return False<|docstring|>Tell in what room a given node is located :param p: a point name (node) :return: a room name<|endoftext|>
31b4db8abdb63da8dc00705be77326701f5e32d7ba097e359d9a0acd607ccd30
def side_of(d, r): '\n Look at the pair of nodes around a door d, and return the one which is located in room r\n :param d: a door\n :param r: a room\n :return: a node\n ' (p1, p2) = doors[d] if (p1 in rooms[r]): return p1 if (p2 in rooms[r]): return p2 return False
Look at the pair of nodes around a door d, and return the one which is located in room r :param d: a door :param r: a room :return: a node
map.py
side_of
marcoparadina/pyhop
1
python
def side_of(d, r): '\n Look at the pair of nodes around a door d, and return the one which is located in room r\n :param d: a door\n :param r: a room\n :return: a node\n ' (p1, p2) = doors[d] if (p1 in rooms[r]): return p1 if (p2 in rooms[r]): return p2 return False
def side_of(d, r): '\n Look at the pair of nodes around a door d, and return the one which is located in room r\n :param d: a door\n :param r: a room\n :return: a node\n ' (p1, p2) = doors[d] if (p1 in rooms[r]): return p1 if (p2 in rooms[r]): return p2 return False<|docstring|>Look at the pair of nodes around a door d, and return the one which is located in room r :param d: a door :param r: a room :return: a node<|endoftext|>
369396a75baefdb4dbb0417c24675422f46d8dd4a8de854509f9f6715d63b063
def other_side_of(d, r): "\n Like 'side_of' but it returns the other node\n :param d: a door\n :param r: a room\n :return: the node beside d that is located opposite to r\n " (p1, p2) = doors[d] if (p1 in rooms[r]): return p2 if (p2 in rooms[r]): return p1 return False
Like 'side_of' but it returns the other node :param d: a door :param r: a room :return: the node beside d that is located opposite to r
map.py
other_side_of
marcoparadina/pyhop
1
python
def other_side_of(d, r): "\n Like 'side_of' but it returns the other node\n :param d: a door\n :param r: a room\n :return: the node beside d that is located opposite to r\n " (p1, p2) = doors[d] if (p1 in rooms[r]): return p2 if (p2 in rooms[r]): return p1 return False
def other_side_of(d, r): "\n Like 'side_of' but it returns the other node\n :param d: a door\n :param r: a room\n :return: the node beside d that is located opposite to r\n " (p1, p2) = doors[d] if (p1 in rooms[r]): return p2 if (p2 in rooms[r]): return p1 return False<|docstring|>Like 'side_of' but it returns the other node :param d: a door :param r: a room :return: the node beside d that is located opposite to r<|endoftext|>
726f2b5e80a8ef8401df469c0f7bbc7fbe5de2b78596615a8b4c03a8b3ecb177
def doors_of(r): '\n Returns the list of doors connected to room r\n :param r: a room\n :return: a list of doors\n ' res = [] for d in doors.keys(): if side_of(d, r): res.append(d) return res
Returns the list of doors connected to room r :param r: a room :return: a list of doors
map.py
doors_of
marcoparadina/pyhop
1
python
def doors_of(r): '\n Returns the list of doors connected to room r\n :param r: a room\n :return: a list of doors\n ' res = [] for d in doors.keys(): if side_of(d, r): res.append(d) return res
def doors_of(r): '\n Returns the list of doors connected to room r\n :param r: a room\n :return: a list of doors\n ' res = [] for d in doors.keys(): if side_of(d, r): res.append(d) return res<|docstring|>Returns the list of doors connected to room r :param r: a room :return: a list of doors<|endoftext|>
2c4bf0638131271fbe42fe62c20167ce0769d72b0ce4e0f5bad6b321ca9716bb
def _opener(self): '\n Creates an SSL or default urllib opener\n :return: an opener object\n ' if (self.cert and os.path.exists(self.cert)): context = ssl.create_default_context() context.load_cert_chain(self.cert, keyfile=self.key, password=self.password) opener = build_opener(HTTPSHandler(context=context)) else: opener = build_opener() if self.headers: opener.addheaders = [(k, v) for (k, v) in self.headers.items()] return opener
Creates an SSL or default urllib opener :return: an opener object
common_utils/http_client27.py
_opener
kiltlifter/common_utils
0
python
def _opener(self): '\n Creates an SSL or default urllib opener\n :return: an opener object\n ' if (self.cert and os.path.exists(self.cert)): context = ssl.create_default_context() context.load_cert_chain(self.cert, keyfile=self.key, password=self.password) opener = build_opener(HTTPSHandler(context=context)) else: opener = build_opener() if self.headers: opener.addheaders = [(k, v) for (k, v) in self.headers.items()] return opener
def _opener(self): '\n Creates an SSL or default urllib opener\n :return: an opener object\n ' if (self.cert and os.path.exists(self.cert)): context = ssl.create_default_context() context.load_cert_chain(self.cert, keyfile=self.key, password=self.password) opener = build_opener(HTTPSHandler(context=context)) else: opener = build_opener() if self.headers: opener.addheaders = [(k, v) for (k, v) in self.headers.items()] return opener<|docstring|>Creates an SSL or default urllib opener :return: an opener object<|endoftext|>
9e0799406c6e511ca2ab89323733f4db1c35a72eebf9adcb3d7de91996897226
def request(self, url, data=None): '\n Makes a request on behalf of the client\n :param url: Resource to access\n :param data: payload for a POST operation\n :return: request metadata and response in form of a dict\n ' opener = self._opener() resp = opener.open(url, data=(data.encode(self.encoding) if data else None)) meta = vars(resp) return {'meta': meta, 'response': resp.read()}
Makes a request on behalf of the client :param url: Resource to access :param data: payload for a POST operation :return: request metadata and response in form of a dict
common_utils/http_client27.py
request
kiltlifter/common_utils
0
python
def request(self, url, data=None): '\n Makes a request on behalf of the client\n :param url: Resource to access\n :param data: payload for a POST operation\n :return: request metadata and response in form of a dict\n ' opener = self._opener() resp = opener.open(url, data=(data.encode(self.encoding) if data else None)) meta = vars(resp) return {'meta': meta, 'response': resp.read()}
def request(self, url, data=None): '\n Makes a request on behalf of the client\n :param url: Resource to access\n :param data: payload for a POST operation\n :return: request metadata and response in form of a dict\n ' opener = self._opener() resp = opener.open(url, data=(data.encode(self.encoding) if data else None)) meta = vars(resp) return {'meta': meta, 'response': resp.read()}<|docstring|>Makes a request on behalf of the client :param url: Resource to access :param data: payload for a POST operation :return: request metadata and response in form of a dict<|endoftext|>
0eab8572fa041bffcd8f04da97aa50922788d4f2bc8d2e5810f9b23f1bef2dc2
@protocol.commands.add('count') def count(context, *args): '\n *musicpd.org, music database section:*\n\n ``count {TAG} {NEEDLE}``\n\n Counts the number of songs and their total playtime in the db\n matching ``TAG`` exactly.\n\n *GMPC:*\n\n - use multiple tag-needle pairs to make more specific searches.\n ' try: query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING) except ValueError: raise exceptions.MpdArgError('incorrect arguments') results = context.core.library.search(query=query, exact=True).get() result_tracks = _get_tracks(results) total_length = sum((t.length for t in result_tracks if t.length)) return [('songs', len(result_tracks)), ('playtime', int((total_length / 1000)))]
*musicpd.org, music database section:* ``count {TAG} {NEEDLE}`` Counts the number of songs and their total playtime in the db matching ``TAG`` exactly. *GMPC:* - use multiple tag-needle pairs to make more specific searches.
mopidy_mpd/protocol/music_db.py
count
NickHu/mopidy-mpd
68
python
@protocol.commands.add('count') def count(context, *args): '\n *musicpd.org, music database section:*\n\n ``count {TAG} {NEEDLE}``\n\n Counts the number of songs and their total playtime in the db\n matching ``TAG`` exactly.\n\n *GMPC:*\n\n - use multiple tag-needle pairs to make more specific searches.\n ' try: query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING) except ValueError: raise exceptions.MpdArgError('incorrect arguments') results = context.core.library.search(query=query, exact=True).get() result_tracks = _get_tracks(results) total_length = sum((t.length for t in result_tracks if t.length)) return [('songs', len(result_tracks)), ('playtime', int((total_length / 1000)))]
@protocol.commands.add('count') def count(context, *args): '\n *musicpd.org, music database section:*\n\n ``count {TAG} {NEEDLE}``\n\n Counts the number of songs and their total playtime in the db\n matching ``TAG`` exactly.\n\n *GMPC:*\n\n - use multiple tag-needle pairs to make more specific searches.\n ' try: query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING) except ValueError: raise exceptions.MpdArgError('incorrect arguments') results = context.core.library.search(query=query, exact=True).get() result_tracks = _get_tracks(results) total_length = sum((t.length for t in result_tracks if t.length)) return [('songs', len(result_tracks)), ('playtime', int((total_length / 1000)))]<|docstring|>*musicpd.org, music database section:* ``count {TAG} {NEEDLE}`` Counts the number of songs and their total playtime in the db matching ``TAG`` exactly. *GMPC:* - use multiple tag-needle pairs to make more specific searches.<|endoftext|>
20568f563623e2adff3e06a462794a06da9af7a4b3342d308c319804d04e5029
@protocol.commands.add('find') def find(context, *args): '\n *musicpd.org, music database section:*\n\n ``find {TYPE} {WHAT}``\n\n Finds songs in the db that are exactly ``WHAT``. ``TYPE`` can be any\n tag supported by MPD, or one of the two special parameters - ``file``\n to search by full path (relative to database root), and ``any`` to\n match against all available tags. ``WHAT`` is what to find.\n\n *GMPC:*\n\n - also uses ``find album "[ALBUM]" artist "[ARTIST]"`` to list album\n tracks.\n\n *ncmpc:*\n\n - capitalizes the type argument.\n\n *ncmpcpp:*\n\n - also uses the search type "date".\n - uses "file" instead of "filename".\n ' try: query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING) except ValueError: return results = context.core.library.search(query=query, exact=True).get() result_tracks = [] if (('artist' not in query) and ('albumartist' not in query) and ('composer' not in query) and ('performer' not in query)): result_tracks += [_artist_as_track(a) for a in _get_artists(results)] if ('album' not in query): result_tracks += [_album_as_track(a) for a in _get_albums(results)] result_tracks += _get_tracks(results) return translator.tracks_to_mpd_format(result_tracks)
*musicpd.org, music database section:* ``find {TYPE} {WHAT}`` Finds songs in the db that are exactly ``WHAT``. ``TYPE`` can be any tag supported by MPD, or one of the two special parameters - ``file`` to search by full path (relative to database root), and ``any`` to match against all available tags. ``WHAT`` is what to find. *GMPC:* - also uses ``find album "[ALBUM]" artist "[ARTIST]"`` to list album tracks. *ncmpc:* - capitalizes the type argument. *ncmpcpp:* - also uses the search type "date". - uses "file" instead of "filename".
mopidy_mpd/protocol/music_db.py
find
NickHu/mopidy-mpd
68
python
@protocol.commands.add('find') def find(context, *args): '\n *musicpd.org, music database section:*\n\n ``find {TYPE} {WHAT}``\n\n Finds songs in the db that are exactly ``WHAT``. ``TYPE`` can be any\n tag supported by MPD, or one of the two special parameters - ``file``\n to search by full path (relative to database root), and ``any`` to\n match against all available tags. ``WHAT`` is what to find.\n\n *GMPC:*\n\n - also uses ``find album "[ALBUM]" artist "[ARTIST]"`` to list album\n tracks.\n\n *ncmpc:*\n\n - capitalizes the type argument.\n\n *ncmpcpp:*\n\n - also uses the search type "date".\n - uses "file" instead of "filename".\n ' try: query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING) except ValueError: return results = context.core.library.search(query=query, exact=True).get() result_tracks = [] if (('artist' not in query) and ('albumartist' not in query) and ('composer' not in query) and ('performer' not in query)): result_tracks += [_artist_as_track(a) for a in _get_artists(results)] if ('album' not in query): result_tracks += [_album_as_track(a) for a in _get_albums(results)] result_tracks += _get_tracks(results) return translator.tracks_to_mpd_format(result_tracks)
@protocol.commands.add('find') def find(context, *args): '\n *musicpd.org, music database section:*\n\n ``find {TYPE} {WHAT}``\n\n Finds songs in the db that are exactly ``WHAT``. ``TYPE`` can be any\n tag supported by MPD, or one of the two special parameters - ``file``\n to search by full path (relative to database root), and ``any`` to\n match against all available tags. ``WHAT`` is what to find.\n\n *GMPC:*\n\n - also uses ``find album "[ALBUM]" artist "[ARTIST]"`` to list album\n tracks.\n\n *ncmpc:*\n\n - capitalizes the type argument.\n\n *ncmpcpp:*\n\n - also uses the search type "date".\n - uses "file" instead of "filename".\n ' try: query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING) except ValueError: return results = context.core.library.search(query=query, exact=True).get() result_tracks = [] if (('artist' not in query) and ('albumartist' not in query) and ('composer' not in query) and ('performer' not in query)): result_tracks += [_artist_as_track(a) for a in _get_artists(results)] if ('album' not in query): result_tracks += [_album_as_track(a) for a in _get_albums(results)] result_tracks += _get_tracks(results) return translator.tracks_to_mpd_format(result_tracks)<|docstring|>*musicpd.org, music database section:* ``find {TYPE} {WHAT}`` Finds songs in the db that are exactly ``WHAT``. ``TYPE`` can be any tag supported by MPD, or one of the two special parameters - ``file`` to search by full path (relative to database root), and ``any`` to match against all available tags. ``WHAT`` is what to find. *GMPC:* - also uses ``find album "[ALBUM]" artist "[ARTIST]"`` to list album tracks. *ncmpc:* - capitalizes the type argument. *ncmpcpp:* - also uses the search type "date". - uses "file" instead of "filename".<|endoftext|>
e96b4a700f365423f7f5c95a3f55867c4aef54f65468361a5c0a53b0ec7a7aa8
@protocol.commands.add('findadd') def findadd(context, *args): '\n *musicpd.org, music database section:*\n\n ``findadd {TYPE} {WHAT}``\n\n Finds songs in the db that are exactly ``WHAT`` and adds them to\n current playlist. Parameters have the same meaning as for ``find``.\n ' try: query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING) except ValueError: return results = context.core.library.search(query=query, exact=True).get() context.core.tracklist.add(uris=[track.uri for track in _get_tracks(results)]).get()
*musicpd.org, music database section:* ``findadd {TYPE} {WHAT}`` Finds songs in the db that are exactly ``WHAT`` and adds them to current playlist. Parameters have the same meaning as for ``find``.
mopidy_mpd/protocol/music_db.py
findadd
NickHu/mopidy-mpd
68
python
@protocol.commands.add('findadd') def findadd(context, *args): '\n *musicpd.org, music database section:*\n\n ``findadd {TYPE} {WHAT}``\n\n Finds songs in the db that are exactly ``WHAT`` and adds them to\n current playlist. Parameters have the same meaning as for ``find``.\n ' try: query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING) except ValueError: return results = context.core.library.search(query=query, exact=True).get() context.core.tracklist.add(uris=[track.uri for track in _get_tracks(results)]).get()
@protocol.commands.add('findadd') def findadd(context, *args): '\n *musicpd.org, music database section:*\n\n ``findadd {TYPE} {WHAT}``\n\n Finds songs in the db that are exactly ``WHAT`` and adds them to\n current playlist. Parameters have the same meaning as for ``find``.\n ' try: query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING) except ValueError: return results = context.core.library.search(query=query, exact=True).get() context.core.tracklist.add(uris=[track.uri for track in _get_tracks(results)]).get()<|docstring|>*musicpd.org, music database section:* ``findadd {TYPE} {WHAT}`` Finds songs in the db that are exactly ``WHAT`` and adds them to current playlist. Parameters have the same meaning as for ``find``.<|endoftext|>
25dd4342b44302f67ca9d19f5bccb3347c1de52d539b5eab0ab5645c5fbb002c
@protocol.commands.add('list') def list_(context, *args): '\n *musicpd.org, music database section:*\n\n ``list {TYPE} [ARTIST]``\n\n Lists all tags of the specified type. ``TYPE`` should be ``album``,\n ``artist``, ``albumartist``, ``date``, or ``genre``.\n\n ``ARTIST`` is an optional parameter when type is ``album``,\n ``date``, or ``genre``. This filters the result list by an artist.\n\n *Clarifications:*\n\n The musicpd.org documentation for ``list`` is far from complete. The\n command also supports the following variant:\n\n ``list {TYPE} {QUERY}``\n\n Where ``QUERY`` applies to all ``TYPE``. ``QUERY`` is one or more pairs\n of a field name and a value. If the ``QUERY`` consists of more than one\n pair, the pairs are AND-ed together to find the result. Examples of\n valid queries and what they should return:\n\n ``list "artist" "artist" "ABBA"``\n List artists where the artist name is "ABBA". Response::\n\n Artist: ABBA\n OK\n\n ``list "album" "artist" "ABBA"``\n Lists albums where the artist name is "ABBA". Response::\n\n Album: More ABBA Gold: More ABBA Hits\n Album: Absolute More Christmas\n Album: Gold: Greatest Hits\n OK\n\n ``list "artist" "album" "Gold: Greatest Hits"``\n Lists artists where the album name is "Gold: Greatest Hits".\n Response::\n\n Artist: ABBA\n OK\n\n ``list "artist" "artist" "ABBA" "artist" "TLC"``\n Lists artists where the artist name is "ABBA" *and* "TLC". Should\n never match anything. Response::\n\n OK\n\n ``list "date" "artist" "ABBA"``\n Lists dates where artist name is "ABBA". Response::\n\n Date:\n Date: 1992\n Date: 1993\n OK\n\n ``list "date" "artist" "ABBA" "album" "Gold: Greatest Hits"``\n Lists dates where artist name is "ABBA" and album name is "Gold:\n Greatest Hits". Response::\n\n Date: 1992\n OK\n\n ``list "genre" "artist" "The Rolling Stones"``\n Lists genres where artist name is "The Rolling Stones". Response::\n\n Genre:\n Genre: Rock\n OK\n\n *ncmpc:*\n\n - capitalizes the field argument.\n ' params = list(args) if (not params): raise exceptions.MpdArgError('too few arguments for "list"') field_arg = params.pop(0).lower() field = _LIST_MAPPING.get(field_arg) if (field is None): raise exceptions.MpdArgError(f'Unknown tag type: {field_arg}') query = None if (len(params) == 1): if (field != 'album'): raise exceptions.MpdArgError('should be "Album" for 3 arguments') if params[0].strip(): query = {'artist': params} else: try: query = _query_from_mpd_search_parameters(params, _SEARCH_MAPPING) except exceptions.MpdArgError as exc: exc.message = 'Unknown filter type' raise except ValueError: return name = _LIST_NAME_MAPPING[field] result = context.core.library.get_distinct(field, query) return [(name, value) for value in result.get()]
*musicpd.org, music database section:* ``list {TYPE} [ARTIST]`` Lists all tags of the specified type. ``TYPE`` should be ``album``, ``artist``, ``albumartist``, ``date``, or ``genre``. ``ARTIST`` is an optional parameter when type is ``album``, ``date``, or ``genre``. This filters the result list by an artist. *Clarifications:* The musicpd.org documentation for ``list`` is far from complete. The command also supports the following variant: ``list {TYPE} {QUERY}`` Where ``QUERY`` applies to all ``TYPE``. ``QUERY`` is one or more pairs of a field name and a value. If the ``QUERY`` consists of more than one pair, the pairs are AND-ed together to find the result. Examples of valid queries and what they should return: ``list "artist" "artist" "ABBA"`` List artists where the artist name is "ABBA". Response:: Artist: ABBA OK ``list "album" "artist" "ABBA"`` Lists albums where the artist name is "ABBA". Response:: Album: More ABBA Gold: More ABBA Hits Album: Absolute More Christmas Album: Gold: Greatest Hits OK ``list "artist" "album" "Gold: Greatest Hits"`` Lists artists where the album name is "Gold: Greatest Hits". Response:: Artist: ABBA OK ``list "artist" "artist" "ABBA" "artist" "TLC"`` Lists artists where the artist name is "ABBA" *and* "TLC". Should never match anything. Response:: OK ``list "date" "artist" "ABBA"`` Lists dates where artist name is "ABBA". Response:: Date: Date: 1992 Date: 1993 OK ``list "date" "artist" "ABBA" "album" "Gold: Greatest Hits"`` Lists dates where artist name is "ABBA" and album name is "Gold: Greatest Hits". Response:: Date: 1992 OK ``list "genre" "artist" "The Rolling Stones"`` Lists genres where artist name is "The Rolling Stones". Response:: Genre: Genre: Rock OK *ncmpc:* - capitalizes the field argument.
mopidy_mpd/protocol/music_db.py
list_
NickHu/mopidy-mpd
68
python
@protocol.commands.add('list') def list_(context, *args): '\n *musicpd.org, music database section:*\n\n ``list {TYPE} [ARTIST]``\n\n Lists all tags of the specified type. ``TYPE`` should be ``album``,\n ``artist``, ``albumartist``, ``date``, or ``genre``.\n\n ``ARTIST`` is an optional parameter when type is ``album``,\n ``date``, or ``genre``. This filters the result list by an artist.\n\n *Clarifications:*\n\n The musicpd.org documentation for ``list`` is far from complete. The\n command also supports the following variant:\n\n ``list {TYPE} {QUERY}``\n\n Where ``QUERY`` applies to all ``TYPE``. ``QUERY`` is one or more pairs\n of a field name and a value. If the ``QUERY`` consists of more than one\n pair, the pairs are AND-ed together to find the result. Examples of\n valid queries and what they should return:\n\n ``list "artist" "artist" "ABBA"``\n List artists where the artist name is "ABBA". Response::\n\n Artist: ABBA\n OK\n\n ``list "album" "artist" "ABBA"``\n Lists albums where the artist name is "ABBA". Response::\n\n Album: More ABBA Gold: More ABBA Hits\n Album: Absolute More Christmas\n Album: Gold: Greatest Hits\n OK\n\n ``list "artist" "album" "Gold: Greatest Hits"``\n Lists artists where the album name is "Gold: Greatest Hits".\n Response::\n\n Artist: ABBA\n OK\n\n ``list "artist" "artist" "ABBA" "artist" "TLC"``\n Lists artists where the artist name is "ABBA" *and* "TLC". Should\n never match anything. Response::\n\n OK\n\n ``list "date" "artist" "ABBA"``\n Lists dates where artist name is "ABBA". Response::\n\n Date:\n Date: 1992\n Date: 1993\n OK\n\n ``list "date" "artist" "ABBA" "album" "Gold: Greatest Hits"``\n Lists dates where artist name is "ABBA" and album name is "Gold:\n Greatest Hits". Response::\n\n Date: 1992\n OK\n\n ``list "genre" "artist" "The Rolling Stones"``\n Lists genres where artist name is "The Rolling Stones". Response::\n\n Genre:\n Genre: Rock\n OK\n\n *ncmpc:*\n\n - capitalizes the field argument.\n ' params = list(args) if (not params): raise exceptions.MpdArgError('too few arguments for "list"') field_arg = params.pop(0).lower() field = _LIST_MAPPING.get(field_arg) if (field is None): raise exceptions.MpdArgError(f'Unknown tag type: {field_arg}') query = None if (len(params) == 1): if (field != 'album'): raise exceptions.MpdArgError('should be "Album" for 3 arguments') if params[0].strip(): query = {'artist': params} else: try: query = _query_from_mpd_search_parameters(params, _SEARCH_MAPPING) except exceptions.MpdArgError as exc: exc.message = 'Unknown filter type' raise except ValueError: return name = _LIST_NAME_MAPPING[field] result = context.core.library.get_distinct(field, query) return [(name, value) for value in result.get()]
@protocol.commands.add('list') def list_(context, *args): '\n *musicpd.org, music database section:*\n\n ``list {TYPE} [ARTIST]``\n\n Lists all tags of the specified type. ``TYPE`` should be ``album``,\n ``artist``, ``albumartist``, ``date``, or ``genre``.\n\n ``ARTIST`` is an optional parameter when type is ``album``,\n ``date``, or ``genre``. This filters the result list by an artist.\n\n *Clarifications:*\n\n The musicpd.org documentation for ``list`` is far from complete. The\n command also supports the following variant:\n\n ``list {TYPE} {QUERY}``\n\n Where ``QUERY`` applies to all ``TYPE``. ``QUERY`` is one or more pairs\n of a field name and a value. If the ``QUERY`` consists of more than one\n pair, the pairs are AND-ed together to find the result. Examples of\n valid queries and what they should return:\n\n ``list "artist" "artist" "ABBA"``\n List artists where the artist name is "ABBA". Response::\n\n Artist: ABBA\n OK\n\n ``list "album" "artist" "ABBA"``\n Lists albums where the artist name is "ABBA". Response::\n\n Album: More ABBA Gold: More ABBA Hits\n Album: Absolute More Christmas\n Album: Gold: Greatest Hits\n OK\n\n ``list "artist" "album" "Gold: Greatest Hits"``\n Lists artists where the album name is "Gold: Greatest Hits".\n Response::\n\n Artist: ABBA\n OK\n\n ``list "artist" "artist" "ABBA" "artist" "TLC"``\n Lists artists where the artist name is "ABBA" *and* "TLC". Should\n never match anything. Response::\n\n OK\n\n ``list "date" "artist" "ABBA"``\n Lists dates where artist name is "ABBA". Response::\n\n Date:\n Date: 1992\n Date: 1993\n OK\n\n ``list "date" "artist" "ABBA" "album" "Gold: Greatest Hits"``\n Lists dates where artist name is "ABBA" and album name is "Gold:\n Greatest Hits". Response::\n\n Date: 1992\n OK\n\n ``list "genre" "artist" "The Rolling Stones"``\n Lists genres where artist name is "The Rolling Stones". Response::\n\n Genre:\n Genre: Rock\n OK\n\n *ncmpc:*\n\n - capitalizes the field argument.\n ' params = list(args) if (not params): raise exceptions.MpdArgError('too few arguments for "list"') field_arg = params.pop(0).lower() field = _LIST_MAPPING.get(field_arg) if (field is None): raise exceptions.MpdArgError(f'Unknown tag type: {field_arg}') query = None if (len(params) == 1): if (field != 'album'): raise exceptions.MpdArgError('should be "Album" for 3 arguments') if params[0].strip(): query = {'artist': params} else: try: query = _query_from_mpd_search_parameters(params, _SEARCH_MAPPING) except exceptions.MpdArgError as exc: exc.message = 'Unknown filter type' raise except ValueError: return name = _LIST_NAME_MAPPING[field] result = context.core.library.get_distinct(field, query) return [(name, value) for value in result.get()]<|docstring|>*musicpd.org, music database section:* ``list {TYPE} [ARTIST]`` Lists all tags of the specified type. ``TYPE`` should be ``album``, ``artist``, ``albumartist``, ``date``, or ``genre``. ``ARTIST`` is an optional parameter when type is ``album``, ``date``, or ``genre``. This filters the result list by an artist. *Clarifications:* The musicpd.org documentation for ``list`` is far from complete. The command also supports the following variant: ``list {TYPE} {QUERY}`` Where ``QUERY`` applies to all ``TYPE``. ``QUERY`` is one or more pairs of a field name and a value. If the ``QUERY`` consists of more than one pair, the pairs are AND-ed together to find the result. Examples of valid queries and what they should return: ``list "artist" "artist" "ABBA"`` List artists where the artist name is "ABBA". Response:: Artist: ABBA OK ``list "album" "artist" "ABBA"`` Lists albums where the artist name is "ABBA". Response:: Album: More ABBA Gold: More ABBA Hits Album: Absolute More Christmas Album: Gold: Greatest Hits OK ``list "artist" "album" "Gold: Greatest Hits"`` Lists artists where the album name is "Gold: Greatest Hits". Response:: Artist: ABBA OK ``list "artist" "artist" "ABBA" "artist" "TLC"`` Lists artists where the artist name is "ABBA" *and* "TLC". Should never match anything. Response:: OK ``list "date" "artist" "ABBA"`` Lists dates where artist name is "ABBA". Response:: Date: Date: 1992 Date: 1993 OK ``list "date" "artist" "ABBA" "album" "Gold: Greatest Hits"`` Lists dates where artist name is "ABBA" and album name is "Gold: Greatest Hits". Response:: Date: 1992 OK ``list "genre" "artist" "The Rolling Stones"`` Lists genres where artist name is "The Rolling Stones". Response:: Genre: Genre: Rock OK *ncmpc:* - capitalizes the field argument.<|endoftext|>
62aed483af246cda506e23eb940f6bf87b156cbd4fd7e3d71e93b3b7abb90e64
@protocol.commands.add('listall') def listall(context, uri=None): "\n *musicpd.org, music database section:*\n\n ``listall [URI]``\n\n Lists all songs and directories in ``URI``.\n\n Do not use this command. Do not manage a client-side copy of MPD's\n database. That is fragile and adds huge overhead. It will break with\n large databases. Instead, query MPD whenever you need something.\n\n\n .. warning:: This command is disabled by default in Mopidy installs.\n " result = [] for (path, track_ref) in context.browse(uri, lookup=False): if (not track_ref): result.append(('directory', path.lstrip('/'))) else: result.append(('file', track_ref.uri)) if (not result): raise exceptions.MpdNoExistError('Not found') return result
*musicpd.org, music database section:* ``listall [URI]`` Lists all songs and directories in ``URI``. Do not use this command. Do not manage a client-side copy of MPD's database. That is fragile and adds huge overhead. It will break with large databases. Instead, query MPD whenever you need something. .. warning:: This command is disabled by default in Mopidy installs.
mopidy_mpd/protocol/music_db.py
listall
NickHu/mopidy-mpd
68
python
@protocol.commands.add('listall') def listall(context, uri=None): "\n *musicpd.org, music database section:*\n\n ``listall [URI]``\n\n Lists all songs and directories in ``URI``.\n\n Do not use this command. Do not manage a client-side copy of MPD's\n database. That is fragile and adds huge overhead. It will break with\n large databases. Instead, query MPD whenever you need something.\n\n\n .. warning:: This command is disabled by default in Mopidy installs.\n " result = [] for (path, track_ref) in context.browse(uri, lookup=False): if (not track_ref): result.append(('directory', path.lstrip('/'))) else: result.append(('file', track_ref.uri)) if (not result): raise exceptions.MpdNoExistError('Not found') return result
@protocol.commands.add('listall') def listall(context, uri=None): "\n *musicpd.org, music database section:*\n\n ``listall [URI]``\n\n Lists all songs and directories in ``URI``.\n\n Do not use this command. Do not manage a client-side copy of MPD's\n database. That is fragile and adds huge overhead. It will break with\n large databases. Instead, query MPD whenever you need something.\n\n\n .. warning:: This command is disabled by default in Mopidy installs.\n " result = [] for (path, track_ref) in context.browse(uri, lookup=False): if (not track_ref): result.append(('directory', path.lstrip('/'))) else: result.append(('file', track_ref.uri)) if (not result): raise exceptions.MpdNoExistError('Not found') return result<|docstring|>*musicpd.org, music database section:* ``listall [URI]`` Lists all songs and directories in ``URI``. Do not use this command. Do not manage a client-side copy of MPD's database. That is fragile and adds huge overhead. It will break with large databases. Instead, query MPD whenever you need something. .. warning:: This command is disabled by default in Mopidy installs.<|endoftext|>
e64e16ff08dd65770c1c5d99858c7cbe71b49a3d5876ed2d5fcbef97da2e0fbb
@protocol.commands.add('listallinfo') def listallinfo(context, uri=None): "\n *musicpd.org, music database section:*\n\n ``listallinfo [URI]``\n\n Same as ``listall``, except it also returns metadata info in the\n same format as ``lsinfo``.\n\n Do not use this command. Do not manage a client-side copy of MPD's\n database. That is fragile and adds huge overhead. It will break with\n large databases. Instead, query MPD whenever you need something.\n\n\n .. warning:: This command is disabled by default in Mopidy installs.\n " result = [] for (path, lookup_future) in context.browse(uri): if (not lookup_future): result.append(('directory', path.lstrip('/'))) else: for tracks in lookup_future.get().values(): for track in tracks: result.extend(translator.track_to_mpd_format(track)) return result
*musicpd.org, music database section:* ``listallinfo [URI]`` Same as ``listall``, except it also returns metadata info in the same format as ``lsinfo``. Do not use this command. Do not manage a client-side copy of MPD's database. That is fragile and adds huge overhead. It will break with large databases. Instead, query MPD whenever you need something. .. warning:: This command is disabled by default in Mopidy installs.
mopidy_mpd/protocol/music_db.py
listallinfo
NickHu/mopidy-mpd
68
python
@protocol.commands.add('listallinfo') def listallinfo(context, uri=None): "\n *musicpd.org, music database section:*\n\n ``listallinfo [URI]``\n\n Same as ``listall``, except it also returns metadata info in the\n same format as ``lsinfo``.\n\n Do not use this command. Do not manage a client-side copy of MPD's\n database. That is fragile and adds huge overhead. It will break with\n large databases. Instead, query MPD whenever you need something.\n\n\n .. warning:: This command is disabled by default in Mopidy installs.\n " result = [] for (path, lookup_future) in context.browse(uri): if (not lookup_future): result.append(('directory', path.lstrip('/'))) else: for tracks in lookup_future.get().values(): for track in tracks: result.extend(translator.track_to_mpd_format(track)) return result
@protocol.commands.add('listallinfo') def listallinfo(context, uri=None): "\n *musicpd.org, music database section:*\n\n ``listallinfo [URI]``\n\n Same as ``listall``, except it also returns metadata info in the\n same format as ``lsinfo``.\n\n Do not use this command. Do not manage a client-side copy of MPD's\n database. That is fragile and adds huge overhead. It will break with\n large databases. Instead, query MPD whenever you need something.\n\n\n .. warning:: This command is disabled by default in Mopidy installs.\n " result = [] for (path, lookup_future) in context.browse(uri): if (not lookup_future): result.append(('directory', path.lstrip('/'))) else: for tracks in lookup_future.get().values(): for track in tracks: result.extend(translator.track_to_mpd_format(track)) return result<|docstring|>*musicpd.org, music database section:* ``listallinfo [URI]`` Same as ``listall``, except it also returns metadata info in the same format as ``lsinfo``. Do not use this command. Do not manage a client-side copy of MPD's database. That is fragile and adds huge overhead. It will break with large databases. Instead, query MPD whenever you need something. .. warning:: This command is disabled by default in Mopidy installs.<|endoftext|>
596eb395085bef34046c58058d2e9673045735215795d618a53d0f2e389b90ef
@protocol.commands.add('listfiles') def listfiles(context, uri=None): '\n *musicpd.org, music database section:*\n\n ``listfiles [URI]``\n\n Lists the contents of the directory URI, including files are not\n recognized by MPD. URI can be a path relative to the music directory or\n an URI understood by one of the storage plugins. The response contains\n at least one line for each directory entry with the prefix "file: " or\n "directory: ", and may be followed by file attributes such as\n "Last-Modified" and "size".\n\n For example, "smb://SERVER" returns a list of all shares on the given\n SMB/CIFS server; "nfs://servername/path" obtains a directory listing\n from the NFS server.\n\n .. versionadded:: 0.19\n New in MPD protocol version 0.19\n ' raise exceptions.MpdNotImplemented
*musicpd.org, music database section:* ``listfiles [URI]`` Lists the contents of the directory URI, including files are not recognized by MPD. URI can be a path relative to the music directory or an URI understood by one of the storage plugins. The response contains at least one line for each directory entry with the prefix "file: " or "directory: ", and may be followed by file attributes such as "Last-Modified" and "size". For example, "smb://SERVER" returns a list of all shares on the given SMB/CIFS server; "nfs://servername/path" obtains a directory listing from the NFS server. .. versionadded:: 0.19 New in MPD protocol version 0.19
mopidy_mpd/protocol/music_db.py
listfiles
NickHu/mopidy-mpd
68
python
@protocol.commands.add('listfiles') def listfiles(context, uri=None): '\n *musicpd.org, music database section:*\n\n ``listfiles [URI]``\n\n Lists the contents of the directory URI, including files are not\n recognized by MPD. URI can be a path relative to the music directory or\n an URI understood by one of the storage plugins. The response contains\n at least one line for each directory entry with the prefix "file: " or\n "directory: ", and may be followed by file attributes such as\n "Last-Modified" and "size".\n\n For example, "smb://SERVER" returns a list of all shares on the given\n SMB/CIFS server; "nfs://servername/path" obtains a directory listing\n from the NFS server.\n\n .. versionadded:: 0.19\n New in MPD protocol version 0.19\n ' raise exceptions.MpdNotImplemented
@protocol.commands.add('listfiles') def listfiles(context, uri=None): '\n *musicpd.org, music database section:*\n\n ``listfiles [URI]``\n\n Lists the contents of the directory URI, including files are not\n recognized by MPD. URI can be a path relative to the music directory or\n an URI understood by one of the storage plugins. The response contains\n at least one line for each directory entry with the prefix "file: " or\n "directory: ", and may be followed by file attributes such as\n "Last-Modified" and "size".\n\n For example, "smb://SERVER" returns a list of all shares on the given\n SMB/CIFS server; "nfs://servername/path" obtains a directory listing\n from the NFS server.\n\n .. versionadded:: 0.19\n New in MPD protocol version 0.19\n ' raise exceptions.MpdNotImplemented<|docstring|>*musicpd.org, music database section:* ``listfiles [URI]`` Lists the contents of the directory URI, including files are not recognized by MPD. URI can be a path relative to the music directory or an URI understood by one of the storage plugins. The response contains at least one line for each directory entry with the prefix "file: " or "directory: ", and may be followed by file attributes such as "Last-Modified" and "size". For example, "smb://SERVER" returns a list of all shares on the given SMB/CIFS server; "nfs://servername/path" obtains a directory listing from the NFS server. .. versionadded:: 0.19 New in MPD protocol version 0.19<|endoftext|>
dbcd37f8565e324c30c621b219964766fcaee2ab99989e30262a31cf28191140
@protocol.commands.add('lsinfo') def lsinfo(context, uri=None): '\n *musicpd.org, music database section:*\n\n ``lsinfo [URI]``\n\n Lists the contents of the directory ``URI``.\n\n When listing the root directory, this currently returns the list of\n stored playlists. This behavior is deprecated; use\n ``listplaylists`` instead.\n\n MPD returns the same result, including both playlists and the files and\n directories located at the root level, for both ``lsinfo``, ``lsinfo\n ""``, and ``lsinfo "/"``.\n ' result = [] for (path, lookup_future) in context.browse(uri, recursive=False): if (not lookup_future): result.append(('directory', path.lstrip('/'))) else: for tracks in lookup_future.get().values(): if tracks: result.extend(translator.track_to_mpd_format(tracks[0])) if (uri in (None, '', '/')): result.extend(protocol.stored_playlists.listplaylists(context)) return result
*musicpd.org, music database section:* ``lsinfo [URI]`` Lists the contents of the directory ``URI``. When listing the root directory, this currently returns the list of stored playlists. This behavior is deprecated; use ``listplaylists`` instead. MPD returns the same result, including both playlists and the files and directories located at the root level, for both ``lsinfo``, ``lsinfo ""``, and ``lsinfo "/"``.
mopidy_mpd/protocol/music_db.py
lsinfo
NickHu/mopidy-mpd
68
python
@protocol.commands.add('lsinfo') def lsinfo(context, uri=None): '\n *musicpd.org, music database section:*\n\n ``lsinfo [URI]``\n\n Lists the contents of the directory ``URI``.\n\n When listing the root directory, this currently returns the list of\n stored playlists. This behavior is deprecated; use\n ``listplaylists`` instead.\n\n MPD returns the same result, including both playlists and the files and\n directories located at the root level, for both ``lsinfo``, ``lsinfo\n ``, and ``lsinfo "/"``.\n ' result = [] for (path, lookup_future) in context.browse(uri, recursive=False): if (not lookup_future): result.append(('directory', path.lstrip('/'))) else: for tracks in lookup_future.get().values(): if tracks: result.extend(translator.track_to_mpd_format(tracks[0])) if (uri in (None, , '/')): result.extend(protocol.stored_playlists.listplaylists(context)) return result
@protocol.commands.add('lsinfo') def lsinfo(context, uri=None): '\n *musicpd.org, music database section:*\n\n ``lsinfo [URI]``\n\n Lists the contents of the directory ``URI``.\n\n When listing the root directory, this currently returns the list of\n stored playlists. This behavior is deprecated; use\n ``listplaylists`` instead.\n\n MPD returns the same result, including both playlists and the files and\n directories located at the root level, for both ``lsinfo``, ``lsinfo\n ``, and ``lsinfo "/"``.\n ' result = [] for (path, lookup_future) in context.browse(uri, recursive=False): if (not lookup_future): result.append(('directory', path.lstrip('/'))) else: for tracks in lookup_future.get().values(): if tracks: result.extend(translator.track_to_mpd_format(tracks[0])) if (uri in (None, , '/')): result.extend(protocol.stored_playlists.listplaylists(context)) return result<|docstring|>*musicpd.org, music database section:* ``lsinfo [URI]`` Lists the contents of the directory ``URI``. When listing the root directory, this currently returns the list of stored playlists. This behavior is deprecated; use ``listplaylists`` instead. MPD returns the same result, including both playlists and the files and directories located at the root level, for both ``lsinfo``, ``lsinfo ""``, and ``lsinfo "/"``.<|endoftext|>
97ecb905566731a15253e1bc1826ff91cd780c8820e691278a167a57431bfb5a
@protocol.commands.add('rescan') def rescan(context, uri=None): '\n *musicpd.org, music database section:*\n\n ``rescan [URI]``\n\n Same as ``update``, but also rescans unmodified files.\n ' return {'updating_db': 0}
*musicpd.org, music database section:* ``rescan [URI]`` Same as ``update``, but also rescans unmodified files.
mopidy_mpd/protocol/music_db.py
rescan
NickHu/mopidy-mpd
68
python
@protocol.commands.add('rescan') def rescan(context, uri=None): '\n *musicpd.org, music database section:*\n\n ``rescan [URI]``\n\n Same as ``update``, but also rescans unmodified files.\n ' return {'updating_db': 0}
@protocol.commands.add('rescan') def rescan(context, uri=None): '\n *musicpd.org, music database section:*\n\n ``rescan [URI]``\n\n Same as ``update``, but also rescans unmodified files.\n ' return {'updating_db': 0}<|docstring|>*musicpd.org, music database section:* ``rescan [URI]`` Same as ``update``, but also rescans unmodified files.<|endoftext|>
35e9c7fad4b4013667424776b361b47439d349a03d6ac41879bfecc8022932cb
@protocol.commands.add('search') def search(context, *args): '\n *musicpd.org, music database section:*\n\n ``search {TYPE} {WHAT} [...]``\n\n Searches for any song that contains ``WHAT``. Parameters have the same\n meaning as for ``find``, except that search is not case sensitive.\n\n *GMPC:*\n\n - uses the undocumented field ``any``.\n - searches for multiple words like this::\n\n search any "foo" any "bar" any "baz"\n\n *ncmpc:*\n\n - capitalizes the field argument.\n\n *ncmpcpp:*\n\n - also uses the search type "date".\n - uses "file" instead of "filename".\n ' try: query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING) except ValueError: return results = context.core.library.search(query).get() artists = [_artist_as_track(a) for a in _get_artists(results)] albums = [_album_as_track(a) for a in _get_albums(results)] tracks = _get_tracks(results) return translator.tracks_to_mpd_format(((artists + albums) + tracks))
*musicpd.org, music database section:* ``search {TYPE} {WHAT} [...]`` Searches for any song that contains ``WHAT``. Parameters have the same meaning as for ``find``, except that search is not case sensitive. *GMPC:* - uses the undocumented field ``any``. - searches for multiple words like this:: search any "foo" any "bar" any "baz" *ncmpc:* - capitalizes the field argument. *ncmpcpp:* - also uses the search type "date". - uses "file" instead of "filename".
mopidy_mpd/protocol/music_db.py
search
NickHu/mopidy-mpd
68
python
@protocol.commands.add('search') def search(context, *args): '\n *musicpd.org, music database section:*\n\n ``search {TYPE} {WHAT} [...]``\n\n Searches for any song that contains ``WHAT``. Parameters have the same\n meaning as for ``find``, except that search is not case sensitive.\n\n *GMPC:*\n\n - uses the undocumented field ``any``.\n - searches for multiple words like this::\n\n search any "foo" any "bar" any "baz"\n\n *ncmpc:*\n\n - capitalizes the field argument.\n\n *ncmpcpp:*\n\n - also uses the search type "date".\n - uses "file" instead of "filename".\n ' try: query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING) except ValueError: return results = context.core.library.search(query).get() artists = [_artist_as_track(a) for a in _get_artists(results)] albums = [_album_as_track(a) for a in _get_albums(results)] tracks = _get_tracks(results) return translator.tracks_to_mpd_format(((artists + albums) + tracks))
@protocol.commands.add('search') def search(context, *args): '\n *musicpd.org, music database section:*\n\n ``search {TYPE} {WHAT} [...]``\n\n Searches for any song that contains ``WHAT``. Parameters have the same\n meaning as for ``find``, except that search is not case sensitive.\n\n *GMPC:*\n\n - uses the undocumented field ``any``.\n - searches for multiple words like this::\n\n search any "foo" any "bar" any "baz"\n\n *ncmpc:*\n\n - capitalizes the field argument.\n\n *ncmpcpp:*\n\n - also uses the search type "date".\n - uses "file" instead of "filename".\n ' try: query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING) except ValueError: return results = context.core.library.search(query).get() artists = [_artist_as_track(a) for a in _get_artists(results)] albums = [_album_as_track(a) for a in _get_albums(results)] tracks = _get_tracks(results) return translator.tracks_to_mpd_format(((artists + albums) + tracks))<|docstring|>*musicpd.org, music database section:* ``search {TYPE} {WHAT} [...]`` Searches for any song that contains ``WHAT``. Parameters have the same meaning as for ``find``, except that search is not case sensitive. *GMPC:* - uses the undocumented field ``any``. - searches for multiple words like this:: search any "foo" any "bar" any "baz" *ncmpc:* - capitalizes the field argument. *ncmpcpp:* - also uses the search type "date". - uses "file" instead of "filename".<|endoftext|>
ca55044fdd1c0cf0cc312fc5451e1a877f52e506e805fe965264c88717870012
@protocol.commands.add('searchadd') def searchadd(context, *args): '\n *musicpd.org, music database section:*\n\n ``searchadd {TYPE} {WHAT} [...]``\n\n Searches for any song that contains ``WHAT`` in tag ``TYPE`` and adds\n them to current playlist.\n\n Parameters have the same meaning as for ``find``, except that search is\n not case sensitive.\n ' try: query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING) except ValueError: return results = context.core.library.search(query).get() context.core.tracklist.add(uris=[track.uri for track in _get_tracks(results)]).get()
*musicpd.org, music database section:* ``searchadd {TYPE} {WHAT} [...]`` Searches for any song that contains ``WHAT`` in tag ``TYPE`` and adds them to current playlist. Parameters have the same meaning as for ``find``, except that search is not case sensitive.
mopidy_mpd/protocol/music_db.py
searchadd
NickHu/mopidy-mpd
68
python
@protocol.commands.add('searchadd') def searchadd(context, *args): '\n *musicpd.org, music database section:*\n\n ``searchadd {TYPE} {WHAT} [...]``\n\n Searches for any song that contains ``WHAT`` in tag ``TYPE`` and adds\n them to current playlist.\n\n Parameters have the same meaning as for ``find``, except that search is\n not case sensitive.\n ' try: query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING) except ValueError: return results = context.core.library.search(query).get() context.core.tracklist.add(uris=[track.uri for track in _get_tracks(results)]).get()
@protocol.commands.add('searchadd') def searchadd(context, *args): '\n *musicpd.org, music database section:*\n\n ``searchadd {TYPE} {WHAT} [...]``\n\n Searches for any song that contains ``WHAT`` in tag ``TYPE`` and adds\n them to current playlist.\n\n Parameters have the same meaning as for ``find``, except that search is\n not case sensitive.\n ' try: query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING) except ValueError: return results = context.core.library.search(query).get() context.core.tracklist.add(uris=[track.uri for track in _get_tracks(results)]).get()<|docstring|>*musicpd.org, music database section:* ``searchadd {TYPE} {WHAT} [...]`` Searches for any song that contains ``WHAT`` in tag ``TYPE`` and adds them to current playlist. Parameters have the same meaning as for ``find``, except that search is not case sensitive.<|endoftext|>
65897b9a2503c8b347777e282bdc980ae894ffa9e06f14226f3f0abede6629e0
@protocol.commands.add('searchaddpl') def searchaddpl(context, *args): "\n *musicpd.org, music database section:*\n\n ``searchaddpl {NAME} {TYPE} {WHAT} [...]``\n\n Searches for any song that contains ``WHAT`` in tag ``TYPE`` and adds\n them to the playlist named ``NAME``.\n\n If a playlist by that name doesn't exist it is created.\n\n Parameters have the same meaning as for ``find``, except that search is\n not case sensitive.\n " parameters = list(args) if (not parameters): raise exceptions.MpdArgError('incorrect arguments') playlist_name = parameters.pop(0) try: query = _query_from_mpd_search_parameters(parameters, _SEARCH_MAPPING) except ValueError: return results = context.core.library.search(query).get() uri = context.lookup_playlist_uri_from_name(playlist_name) playlist = ((uri is not None) and context.core.playlists.lookup(uri).get()) if (not playlist): playlist = context.core.playlists.create(playlist_name).get() tracks = (list(playlist.tracks) + _get_tracks(results)) playlist = playlist.replace(tracks=tracks) context.core.playlists.save(playlist)
*musicpd.org, music database section:* ``searchaddpl {NAME} {TYPE} {WHAT} [...]`` Searches for any song that contains ``WHAT`` in tag ``TYPE`` and adds them to the playlist named ``NAME``. If a playlist by that name doesn't exist it is created. Parameters have the same meaning as for ``find``, except that search is not case sensitive.
mopidy_mpd/protocol/music_db.py
searchaddpl
NickHu/mopidy-mpd
68
python
@protocol.commands.add('searchaddpl') def searchaddpl(context, *args): "\n *musicpd.org, music database section:*\n\n ``searchaddpl {NAME} {TYPE} {WHAT} [...]``\n\n Searches for any song that contains ``WHAT`` in tag ``TYPE`` and adds\n them to the playlist named ``NAME``.\n\n If a playlist by that name doesn't exist it is created.\n\n Parameters have the same meaning as for ``find``, except that search is\n not case sensitive.\n " parameters = list(args) if (not parameters): raise exceptions.MpdArgError('incorrect arguments') playlist_name = parameters.pop(0) try: query = _query_from_mpd_search_parameters(parameters, _SEARCH_MAPPING) except ValueError: return results = context.core.library.search(query).get() uri = context.lookup_playlist_uri_from_name(playlist_name) playlist = ((uri is not None) and context.core.playlists.lookup(uri).get()) if (not playlist): playlist = context.core.playlists.create(playlist_name).get() tracks = (list(playlist.tracks) + _get_tracks(results)) playlist = playlist.replace(tracks=tracks) context.core.playlists.save(playlist)
@protocol.commands.add('searchaddpl') def searchaddpl(context, *args): "\n *musicpd.org, music database section:*\n\n ``searchaddpl {NAME} {TYPE} {WHAT} [...]``\n\n Searches for any song that contains ``WHAT`` in tag ``TYPE`` and adds\n them to the playlist named ``NAME``.\n\n If a playlist by that name doesn't exist it is created.\n\n Parameters have the same meaning as for ``find``, except that search is\n not case sensitive.\n " parameters = list(args) if (not parameters): raise exceptions.MpdArgError('incorrect arguments') playlist_name = parameters.pop(0) try: query = _query_from_mpd_search_parameters(parameters, _SEARCH_MAPPING) except ValueError: return results = context.core.library.search(query).get() uri = context.lookup_playlist_uri_from_name(playlist_name) playlist = ((uri is not None) and context.core.playlists.lookup(uri).get()) if (not playlist): playlist = context.core.playlists.create(playlist_name).get() tracks = (list(playlist.tracks) + _get_tracks(results)) playlist = playlist.replace(tracks=tracks) context.core.playlists.save(playlist)<|docstring|>*musicpd.org, music database section:* ``searchaddpl {NAME} {TYPE} {WHAT} [...]`` Searches for any song that contains ``WHAT`` in tag ``TYPE`` and adds them to the playlist named ``NAME``. If a playlist by that name doesn't exist it is created. Parameters have the same meaning as for ``find``, except that search is not case sensitive.<|endoftext|>
50d0de367ee5f8575d31a4cd332087ecd4f229d6a1de559dacd9d7339914550d
@protocol.commands.add('update') def update(context, uri=None): '\n *musicpd.org, music database section:*\n\n ``update [URI]``\n\n Updates the music database: find new files, remove deleted files,\n update modified files.\n\n ``URI`` is a particular directory or song/file to update. If you do\n not specify it, everything is updated.\n\n Prints ``updating_db: JOBID`` where ``JOBID`` is a positive number\n identifying the update job. You can read the current job id in the\n ``status`` response.\n ' return {'updating_db': 0}
*musicpd.org, music database section:* ``update [URI]`` Updates the music database: find new files, remove deleted files, update modified files. ``URI`` is a particular directory or song/file to update. If you do not specify it, everything is updated. Prints ``updating_db: JOBID`` where ``JOBID`` is a positive number identifying the update job. You can read the current job id in the ``status`` response.
mopidy_mpd/protocol/music_db.py
update
NickHu/mopidy-mpd
68
python
@protocol.commands.add('update') def update(context, uri=None): '\n *musicpd.org, music database section:*\n\n ``update [URI]``\n\n Updates the music database: find new files, remove deleted files,\n update modified files.\n\n ``URI`` is a particular directory or song/file to update. If you do\n not specify it, everything is updated.\n\n Prints ``updating_db: JOBID`` where ``JOBID`` is a positive number\n identifying the update job. You can read the current job id in the\n ``status`` response.\n ' return {'updating_db': 0}
@protocol.commands.add('update') def update(context, uri=None): '\n *musicpd.org, music database section:*\n\n ``update [URI]``\n\n Updates the music database: find new files, remove deleted files,\n update modified files.\n\n ``URI`` is a particular directory or song/file to update. If you do\n not specify it, everything is updated.\n\n Prints ``updating_db: JOBID`` where ``JOBID`` is a positive number\n identifying the update job. You can read the current job id in the\n ``status`` response.\n ' return {'updating_db': 0}<|docstring|>*musicpd.org, music database section:* ``update [URI]`` Updates the music database: find new files, remove deleted files, update modified files. ``URI`` is a particular directory or song/file to update. If you do not specify it, everything is updated. Prints ``updating_db: JOBID`` where ``JOBID`` is a positive number identifying the update job. You can read the current job id in the ``status`` response.<|endoftext|>
db44dd0dced2635b27ae40adbebc0cc8656f95a16fca538aa6ee58042b7913ae
def readcomments(context, uri): '\n *musicpd.org, music database section:*\n\n ``readcomments [URI]``\n\n Read "comments" (i.e. key-value pairs) from the file specified by\n "URI". This "URI" can be a path relative to the music directory or a\n URL in the form "file:///foo/bar.ogg".\n\n This command may be used to list metadata of remote files (e.g. URI\n beginning with "http://" or "smb://").\n\n The response consists of lines in the form "KEY: VALUE". Comments with\n suspicious characters (e.g. newlines) are ignored silently.\n\n The meaning of these depends on the codec, and not all decoder plugins\n support it. For example, on Ogg files, this lists the Vorbis comments.\n ' pass
*musicpd.org, music database section:* ``readcomments [URI]`` Read "comments" (i.e. key-value pairs) from the file specified by "URI". This "URI" can be a path relative to the music directory or a URL in the form "file:///foo/bar.ogg". This command may be used to list metadata of remote files (e.g. URI beginning with "http://" or "smb://"). The response consists of lines in the form "KEY: VALUE". Comments with suspicious characters (e.g. newlines) are ignored silently. The meaning of these depends on the codec, and not all decoder plugins support it. For example, on Ogg files, this lists the Vorbis comments.
mopidy_mpd/protocol/music_db.py
readcomments
NickHu/mopidy-mpd
68
python
def readcomments(context, uri): '\n *musicpd.org, music database section:*\n\n ``readcomments [URI]``\n\n Read "comments" (i.e. key-value pairs) from the file specified by\n "URI". This "URI" can be a path relative to the music directory or a\n URL in the form "file:///foo/bar.ogg".\n\n This command may be used to list metadata of remote files (e.g. URI\n beginning with "http://" or "smb://").\n\n The response consists of lines in the form "KEY: VALUE". Comments with\n suspicious characters (e.g. newlines) are ignored silently.\n\n The meaning of these depends on the codec, and not all decoder plugins\n support it. For example, on Ogg files, this lists the Vorbis comments.\n ' pass
def readcomments(context, uri): '\n *musicpd.org, music database section:*\n\n ``readcomments [URI]``\n\n Read "comments" (i.e. key-value pairs) from the file specified by\n "URI". This "URI" can be a path relative to the music directory or a\n URL in the form "file:///foo/bar.ogg".\n\n This command may be used to list metadata of remote files (e.g. URI\n beginning with "http://" or "smb://").\n\n The response consists of lines in the form "KEY: VALUE". Comments with\n suspicious characters (e.g. newlines) are ignored silently.\n\n The meaning of these depends on the codec, and not all decoder plugins\n support it. For example, on Ogg files, this lists the Vorbis comments.\n ' pass<|docstring|>*musicpd.org, music database section:* ``readcomments [URI]`` Read "comments" (i.e. key-value pairs) from the file specified by "URI". This "URI" can be a path relative to the music directory or a URL in the form "file:///foo/bar.ogg". This command may be used to list metadata of remote files (e.g. URI beginning with "http://" or "smb://"). The response consists of lines in the form "KEY: VALUE". Comments with suspicious characters (e.g. newlines) are ignored silently. The meaning of these depends on the codec, and not all decoder plugins support it. For example, on Ogg files, this lists the Vorbis comments.<|endoftext|>
2301bfa7f5a2204897e40f2a6bdf40f16c90977d7183b183664736fa091fce05
def __init__(self, methods): '\n methods : list\n list of tuples, (transform_name, kwargs)\n ' self._pipeline = [] for (method, kwargs) in methods: trans_cls = self.TRANSFORMER_MAP.get(method, None) if (trans_cls is None): raise ValueError('Unknown transformation method: {}'.format(method)) transformer = trans_cls(**kwargs) self._pipeline.append(transformer)
methods : list list of tuples, (transform_name, kwargs)
utensor_cgen/transformer/pipeline.py
__init__
dboyliao/utensor_cgen
1
python
def __init__(self, methods): '\n methods : list\n list of tuples, (transform_name, kwargs)\n ' self._pipeline = [] for (method, kwargs) in methods: trans_cls = self.TRANSFORMER_MAP.get(method, None) if (trans_cls is None): raise ValueError('Unknown transformation method: {}'.format(method)) transformer = trans_cls(**kwargs) self._pipeline.append(transformer)
def __init__(self, methods): '\n methods : list\n list of tuples, (transform_name, kwargs)\n ' self._pipeline = [] for (method, kwargs) in methods: trans_cls = self.TRANSFORMER_MAP.get(method, None) if (trans_cls is None): raise ValueError('Unknown transformation method: {}'.format(method)) transformer = trans_cls(**kwargs) self._pipeline.append(transformer)<|docstring|>methods : list list of tuples, (transform_name, kwargs)<|endoftext|>
b498d4115a85e1a991bf5837bfe641d48098666476e47ef7fdf8d261f4639c0d
def closure(exc): '\n Return a function which will accept any arguments\n but raise the exception when called.\n\n Parameters\n ------------\n exc : Exception\n Will be raised later\n\n Returns\n -------------\n failed : function\n When called will raise `exc`\n ' def failed(*args, **kwargs): raise exc return failed
Return a function which will accept any arguments but raise the exception when called. Parameters ------------ exc : Exception Will be raised later Returns ------------- failed : function When called will raise `exc`
basis/trimesh_new/exceptions.py
closure
liang324/wrs
1,882
python
def closure(exc): '\n Return a function which will accept any arguments\n but raise the exception when called.\n\n Parameters\n ------------\n exc : Exception\n Will be raised later\n\n Returns\n -------------\n failed : function\n When called will raise `exc`\n ' def failed(*args, **kwargs): raise exc return failed
def closure(exc): '\n Return a function which will accept any arguments\n but raise the exception when called.\n\n Parameters\n ------------\n exc : Exception\n Will be raised later\n\n Returns\n -------------\n failed : function\n When called will raise `exc`\n ' def failed(*args, **kwargs): raise exc return failed<|docstring|>Return a function which will accept any arguments but raise the exception when called. Parameters ------------ exc : Exception Will be raised later Returns ------------- failed : function When called will raise `exc`<|endoftext|>
ad3eb4d80e4ac034edd6bcc0005ee39701e8a77fa1c9e6c5182e5ddee2611514
def pairwise(iterable): 's -> (s0,s1), (s1,s2), (s2, s3), ...' (a, b) = tee(iterable) next(b, None) return zip(a, b)
s -> (s0,s1), (s1,s2), (s2, s3), ...
check.py
pairwise
EvanTheB/joint_call_shards
0
python
def pairwise(iterable): (a, b) = tee(iterable) next(b, None) return zip(a, b)
def pairwise(iterable): (a, b) = tee(iterable) next(b, None) return zip(a, b)<|docstring|>s -> (s0,s1), (s1,s2), (s2, s3), ...<|endoftext|>
c1fc7380472410805a40296998532e274b0668c3cb50a64bd1be8ea4a435a892
def save(self, *args, **kwargs): '\n Create a unique slug from title - append an index and increment if it\n already exists.\n ' if (not self.slug): slug = slugify(self) self.slug = unique_slug(self.__class__.objects, 'slug', slug) super(AbstractForm, self).save(*args, **kwargs)
Create a unique slug from title - append an index and increment if it already exists.
forms_builder/forms/models.py
save
moaxey/django-forms-builder
0
python
def save(self, *args, **kwargs): '\n Create a unique slug from title - append an index and increment if it\n already exists.\n ' if (not self.slug): slug = slugify(self) self.slug = unique_slug(self.__class__.objects, 'slug', slug) super(AbstractForm, self).save(*args, **kwargs)
def save(self, *args, **kwargs): '\n Create a unique slug from title - append an index and increment if it\n already exists.\n ' if (not self.slug): slug = slugify(self) self.slug = unique_slug(self.__class__.objects, 'slug', slug) super(AbstractForm, self).save(*args, **kwargs)<|docstring|>Create a unique slug from title - append an index and increment if it already exists.<|endoftext|>
0631ca0dd4de4a9130e229a902fdf37b5355730ab2caf4bb43b4dde31698d18e
def total_entries(self): '\n Called by the admin list view where the queryset is annotated\n with the number of entries.\n ' return self.total_entries
Called by the admin list view where the queryset is annotated with the number of entries.
forms_builder/forms/models.py
total_entries
moaxey/django-forms-builder
0
python
def total_entries(self): '\n Called by the admin list view where the queryset is annotated\n with the number of entries.\n ' return self.total_entries
def total_entries(self): '\n Called by the admin list view where the queryset is annotated\n with the number of entries.\n ' return self.total_entries<|docstring|>Called by the admin list view where the queryset is annotated with the number of entries.<|endoftext|>
230e4c00161e1671c66e9c8f136fa5d5777042c9477d30ca93bb2fe7cee9ba5e
def get_choices(self): '\n Parse a comma separated choice string into a list of choices taking\n into account quoted choices using the ``settings.CHOICES_QUOTE`` and\n ``settings.CHOICES_UNQUOTE`` settings.\n ' choice = '' quoted = False for char in self.choices: if ((not quoted) and (char == settings.CHOICES_QUOTE)): quoted = True elif (quoted and (char == settings.CHOICES_UNQUOTE)): quoted = False elif ((char == ',') and (not quoted)): choice = choice.strip() if choice: (yield (choice, choice)) choice = '' else: choice += char choice = choice.strip() if choice: (yield (choice, choice))
Parse a comma separated choice string into a list of choices taking into account quoted choices using the ``settings.CHOICES_QUOTE`` and ``settings.CHOICES_UNQUOTE`` settings.
forms_builder/forms/models.py
get_choices
moaxey/django-forms-builder
0
python
def get_choices(self): '\n Parse a comma separated choice string into a list of choices taking\n into account quoted choices using the ``settings.CHOICES_QUOTE`` and\n ``settings.CHOICES_UNQUOTE`` settings.\n ' choice = quoted = False for char in self.choices: if ((not quoted) and (char == settings.CHOICES_QUOTE)): quoted = True elif (quoted and (char == settings.CHOICES_UNQUOTE)): quoted = False elif ((char == ',') and (not quoted)): choice = choice.strip() if choice: (yield (choice, choice)) choice = else: choice += char choice = choice.strip() if choice: (yield (choice, choice))
def get_choices(self): '\n Parse a comma separated choice string into a list of choices taking\n into account quoted choices using the ``settings.CHOICES_QUOTE`` and\n ``settings.CHOICES_UNQUOTE`` settings.\n ' choice = quoted = False for char in self.choices: if ((not quoted) and (char == settings.CHOICES_QUOTE)): quoted = True elif (quoted and (char == settings.CHOICES_UNQUOTE)): quoted = False elif ((char == ',') and (not quoted)): choice = choice.strip() if choice: (yield (choice, choice)) choice = else: choice += char choice = choice.strip() if choice: (yield (choice, choice))<|docstring|>Parse a comma separated choice string into a list of choices taking into account quoted choices using the ``settings.CHOICES_QUOTE`` and ``settings.CHOICES_UNQUOTE`` settings.<|endoftext|>
8c2a132124506995e24056090b061361cdd4d245bee86f33504d4e71989d6fa1
def is_a(self, *args): "\n Helper that returns True if the field's type is given in any arg.\n " return (self.field_type in args)
Helper that returns True if the field's type is given in any arg.
forms_builder/forms/models.py
is_a
moaxey/django-forms-builder
0
python
def is_a(self, *args): "\n \n " return (self.field_type in args)
def is_a(self, *args): "\n \n " return (self.field_type in args)<|docstring|>Helper that returns True if the field's type is given in any arg.<|endoftext|>
e5dd028e1773c76e5f92906966c63f2758bd18b5b45e9d6c168f5b29b0ad0a79
def maximise(cost_function, initial_guess, algorithm, dtype=None, **options): "\n Calculates the inputs that maximise a real-valued cost function.\n\n The `cost_function` is considered either a function of real variables (and the real maximiser is returned)\n or as a function of complex variables (and the complex maximiser is returned), depending on `dtype`\n\n The `cost_function` must take a single argument, which can be an arbitrarily nested container structure\n of real or complex (or mixed) scalars and/or arrays.\n e.g. a dict of lists of array, or just a single array or number.\n\n The cost function must be traceable by jax.grad\n\n\n Parameters\n ----------\n cost_function : callable\n The function to be maximised\n\n ``cost_function(x) -> float``\n\n where `x` is an arbitrary pytree of scalars and/or arrays\n initial_guess : pytree\n The initial best guess. `cost_function(initial_guess)` must be valid.\n algorithm : str, optional\n Which algorithm to be uses. currently supported:\n\n - 'L-BFGS' see lbfgs.py\n - 'GD' see gradient_descent.py\n\n defaults to 'L-BFGS'\n dtype : jax.numpy.dtype or pytree, optional\n The datatype for the arguments of `cost_function`.\n Note that this determines if `cost_function` is considered a function of real or of complex variables.\n Either a single `jax.numpy.dtype` used for all entries, or a pytree of matching the structure of `initial_guess`\n or None: per default, the dtype of `initial_guess` is used.\n options : dict\n Algorithm specific keyword arguments.\n See the docstring of the corresponding algorithm-specific function (e.g. `jax_optimise.lbfgs.minimise`)\n for details.\n All algorithms accept these generic options:\n\n max_iter : int or None\n Maximum number of iterations. Default: algorithm specific.\n Note that the meaning of an iteration, and in particular the number of calls per iteration of\n `cost_function` or its gradient can be different for different algorithms.\n max_fun_evals : int or None\n Maximum number of function evaluations.\n Default: no restriction.\n max_gad_evals : int or None\n Maximum number of gradient evaluations.\n Default: no restriction.\n cost_print_name : str\n A string representation for the name of `cost_function` function-values for logging.\n Default: `Cost`\n display_fun : callable or None\n Function that is called to display convergence info after each iteration.\n or `None`: no display.\n Default: built-in `print` function\n callback : callable or None\n Function that is called as `callback(xk, k)` for every iterate `xk`, with iteration number `k`\n including the initial guess (k=0).\n or `None` (default): No callback.\n This allows, e.g., monitoring of the convergence.\n\n Returns\n -------\n x_optim : pytree\n Same structure as `initial_guess`. The optimal input to `cost_function`, that maximises it\n cost_optim : float\n The optimal value `cost_function(x_optim)`\n info : dict\n A dictionary of convergence info. Keys are algorithm-specific but generically include\n\n converged : bool\n If the algorithm was successful\n reason : str\n A short description of why the algorithm terminated\n\n " return _minimise(cost_function, initial_guess, algorithm, dtype, maximising=True, **options)
Calculates the inputs that maximise a real-valued cost function. The `cost_function` is considered either a function of real variables (and the real maximiser is returned) or as a function of complex variables (and the complex maximiser is returned), depending on `dtype` The `cost_function` must take a single argument, which can be an arbitrarily nested container structure of real or complex (or mixed) scalars and/or arrays. e.g. a dict of lists of array, or just a single array or number. The cost function must be traceable by jax.grad Parameters ---------- cost_function : callable The function to be maximised ``cost_function(x) -> float`` where `x` is an arbitrary pytree of scalars and/or arrays initial_guess : pytree The initial best guess. `cost_function(initial_guess)` must be valid. algorithm : str, optional Which algorithm to be uses. currently supported: - 'L-BFGS' see lbfgs.py - 'GD' see gradient_descent.py defaults to 'L-BFGS' dtype : jax.numpy.dtype or pytree, optional The datatype for the arguments of `cost_function`. Note that this determines if `cost_function` is considered a function of real or of complex variables. Either a single `jax.numpy.dtype` used for all entries, or a pytree of matching the structure of `initial_guess` or None: per default, the dtype of `initial_guess` is used. options : dict Algorithm specific keyword arguments. See the docstring of the corresponding algorithm-specific function (e.g. `jax_optimise.lbfgs.minimise`) for details. All algorithms accept these generic options: max_iter : int or None Maximum number of iterations. Default: algorithm specific. Note that the meaning of an iteration, and in particular the number of calls per iteration of `cost_function` or its gradient can be different for different algorithms. max_fun_evals : int or None Maximum number of function evaluations. Default: no restriction. max_gad_evals : int or None Maximum number of gradient evaluations. Default: no restriction. cost_print_name : str A string representation for the name of `cost_function` function-values for logging. Default: `Cost` display_fun : callable or None Function that is called to display convergence info after each iteration. or `None`: no display. Default: built-in `print` function callback : callable or None Function that is called as `callback(xk, k)` for every iterate `xk`, with iteration number `k` including the initial guess (k=0). or `None` (default): No callback. This allows, e.g., monitoring of the convergence. Returns ------- x_optim : pytree Same structure as `initial_guess`. The optimal input to `cost_function`, that maximises it cost_optim : float The optimal value `cost_function(x_optim)` info : dict A dictionary of convergence info. Keys are algorithm-specific but generically include converged : bool If the algorithm was successful reason : str A short description of why the algorithm terminated
jax_optimise/main.py
maximise
Jakob-Unfried/msc-legacy
1
python
def maximise(cost_function, initial_guess, algorithm, dtype=None, **options): "\n Calculates the inputs that maximise a real-valued cost function.\n\n The `cost_function` is considered either a function of real variables (and the real maximiser is returned)\n or as a function of complex variables (and the complex maximiser is returned), depending on `dtype`\n\n The `cost_function` must take a single argument, which can be an arbitrarily nested container structure\n of real or complex (or mixed) scalars and/or arrays.\n e.g. a dict of lists of array, or just a single array or number.\n\n The cost function must be traceable by jax.grad\n\n\n Parameters\n ----------\n cost_function : callable\n The function to be maximised\n\n ``cost_function(x) -> float``\n\n where `x` is an arbitrary pytree of scalars and/or arrays\n initial_guess : pytree\n The initial best guess. `cost_function(initial_guess)` must be valid.\n algorithm : str, optional\n Which algorithm to be uses. currently supported:\n\n - 'L-BFGS' see lbfgs.py\n - 'GD' see gradient_descent.py\n\n defaults to 'L-BFGS'\n dtype : jax.numpy.dtype or pytree, optional\n The datatype for the arguments of `cost_function`.\n Note that this determines if `cost_function` is considered a function of real or of complex variables.\n Either a single `jax.numpy.dtype` used for all entries, or a pytree of matching the structure of `initial_guess`\n or None: per default, the dtype of `initial_guess` is used.\n options : dict\n Algorithm specific keyword arguments.\n See the docstring of the corresponding algorithm-specific function (e.g. `jax_optimise.lbfgs.minimise`)\n for details.\n All algorithms accept these generic options:\n\n max_iter : int or None\n Maximum number of iterations. Default: algorithm specific.\n Note that the meaning of an iteration, and in particular the number of calls per iteration of\n `cost_function` or its gradient can be different for different algorithms.\n max_fun_evals : int or None\n Maximum number of function evaluations.\n Default: no restriction.\n max_gad_evals : int or None\n Maximum number of gradient evaluations.\n Default: no restriction.\n cost_print_name : str\n A string representation for the name of `cost_function` function-values for logging.\n Default: `Cost`\n display_fun : callable or None\n Function that is called to display convergence info after each iteration.\n or `None`: no display.\n Default: built-in `print` function\n callback : callable or None\n Function that is called as `callback(xk, k)` for every iterate `xk`, with iteration number `k`\n including the initial guess (k=0).\n or `None` (default): No callback.\n This allows, e.g., monitoring of the convergence.\n\n Returns\n -------\n x_optim : pytree\n Same structure as `initial_guess`. The optimal input to `cost_function`, that maximises it\n cost_optim : float\n The optimal value `cost_function(x_optim)`\n info : dict\n A dictionary of convergence info. Keys are algorithm-specific but generically include\n\n converged : bool\n If the algorithm was successful\n reason : str\n A short description of why the algorithm terminated\n\n " return _minimise(cost_function, initial_guess, algorithm, dtype, maximising=True, **options)
def maximise(cost_function, initial_guess, algorithm, dtype=None, **options): "\n Calculates the inputs that maximise a real-valued cost function.\n\n The `cost_function` is considered either a function of real variables (and the real maximiser is returned)\n or as a function of complex variables (and the complex maximiser is returned), depending on `dtype`\n\n The `cost_function` must take a single argument, which can be an arbitrarily nested container structure\n of real or complex (or mixed) scalars and/or arrays.\n e.g. a dict of lists of array, or just a single array or number.\n\n The cost function must be traceable by jax.grad\n\n\n Parameters\n ----------\n cost_function : callable\n The function to be maximised\n\n ``cost_function(x) -> float``\n\n where `x` is an arbitrary pytree of scalars and/or arrays\n initial_guess : pytree\n The initial best guess. `cost_function(initial_guess)` must be valid.\n algorithm : str, optional\n Which algorithm to be uses. currently supported:\n\n - 'L-BFGS' see lbfgs.py\n - 'GD' see gradient_descent.py\n\n defaults to 'L-BFGS'\n dtype : jax.numpy.dtype or pytree, optional\n The datatype for the arguments of `cost_function`.\n Note that this determines if `cost_function` is considered a function of real or of complex variables.\n Either a single `jax.numpy.dtype` used for all entries, or a pytree of matching the structure of `initial_guess`\n or None: per default, the dtype of `initial_guess` is used.\n options : dict\n Algorithm specific keyword arguments.\n See the docstring of the corresponding algorithm-specific function (e.g. `jax_optimise.lbfgs.minimise`)\n for details.\n All algorithms accept these generic options:\n\n max_iter : int or None\n Maximum number of iterations. Default: algorithm specific.\n Note that the meaning of an iteration, and in particular the number of calls per iteration of\n `cost_function` or its gradient can be different for different algorithms.\n max_fun_evals : int or None\n Maximum number of function evaluations.\n Default: no restriction.\n max_gad_evals : int or None\n Maximum number of gradient evaluations.\n Default: no restriction.\n cost_print_name : str\n A string representation for the name of `cost_function` function-values for logging.\n Default: `Cost`\n display_fun : callable or None\n Function that is called to display convergence info after each iteration.\n or `None`: no display.\n Default: built-in `print` function\n callback : callable or None\n Function that is called as `callback(xk, k)` for every iterate `xk`, with iteration number `k`\n including the initial guess (k=0).\n or `None` (default): No callback.\n This allows, e.g., monitoring of the convergence.\n\n Returns\n -------\n x_optim : pytree\n Same structure as `initial_guess`. The optimal input to `cost_function`, that maximises it\n cost_optim : float\n The optimal value `cost_function(x_optim)`\n info : dict\n A dictionary of convergence info. Keys are algorithm-specific but generically include\n\n converged : bool\n If the algorithm was successful\n reason : str\n A short description of why the algorithm terminated\n\n " return _minimise(cost_function, initial_guess, algorithm, dtype, maximising=True, **options)<|docstring|>Calculates the inputs that maximise a real-valued cost function. The `cost_function` is considered either a function of real variables (and the real maximiser is returned) or as a function of complex variables (and the complex maximiser is returned), depending on `dtype` The `cost_function` must take a single argument, which can be an arbitrarily nested container structure of real or complex (or mixed) scalars and/or arrays. e.g. a dict of lists of array, or just a single array or number. The cost function must be traceable by jax.grad Parameters ---------- cost_function : callable The function to be maximised ``cost_function(x) -> float`` where `x` is an arbitrary pytree of scalars and/or arrays initial_guess : pytree The initial best guess. `cost_function(initial_guess)` must be valid. algorithm : str, optional Which algorithm to be uses. currently supported: - 'L-BFGS' see lbfgs.py - 'GD' see gradient_descent.py defaults to 'L-BFGS' dtype : jax.numpy.dtype or pytree, optional The datatype for the arguments of `cost_function`. Note that this determines if `cost_function` is considered a function of real or of complex variables. Either a single `jax.numpy.dtype` used for all entries, or a pytree of matching the structure of `initial_guess` or None: per default, the dtype of `initial_guess` is used. options : dict Algorithm specific keyword arguments. See the docstring of the corresponding algorithm-specific function (e.g. `jax_optimise.lbfgs.minimise`) for details. All algorithms accept these generic options: max_iter : int or None Maximum number of iterations. Default: algorithm specific. Note that the meaning of an iteration, and in particular the number of calls per iteration of `cost_function` or its gradient can be different for different algorithms. max_fun_evals : int or None Maximum number of function evaluations. Default: no restriction. max_gad_evals : int or None Maximum number of gradient evaluations. Default: no restriction. cost_print_name : str A string representation for the name of `cost_function` function-values for logging. Default: `Cost` display_fun : callable or None Function that is called to display convergence info after each iteration. or `None`: no display. Default: built-in `print` function callback : callable or None Function that is called as `callback(xk, k)` for every iterate `xk`, with iteration number `k` including the initial guess (k=0). or `None` (default): No callback. This allows, e.g., monitoring of the convergence. Returns ------- x_optim : pytree Same structure as `initial_guess`. The optimal input to `cost_function`, that maximises it cost_optim : float The optimal value `cost_function(x_optim)` info : dict A dictionary of convergence info. Keys are algorithm-specific but generically include converged : bool If the algorithm was successful reason : str A short description of why the algorithm terminated<|endoftext|>
d4b7de605716e6970476c06e2776ddf30cf6ba09fe92bb05dd46712b4d047cd2
def minimise(cost_function, initial_guess, algorithm, dtype=None, **options): "\n Calculates the inputs that minimise a real-valued cost function.\n\n The `cost_function` is considered either a function of real variables (and the real minimiser is returned)\n or as a function of complex variables (and the complex minimiser is returned), depending on `dtype`\n\n The `cost_function` must take a single argument, which can be an arbitrarily nested container structure\n of real or complex (or mixed) scalars and/or arrays.\n e.g. a dict of lists of array, or just a single array or number.\n\n The cost function must be traceable by jax.grad\n\n\n Parameters\n ----------\n cost_function : callable\n The function to be minimised\n\n ``cost_function(x) -> float``\n\n where `x` is an arbitrary pytree of scalars and/or arrays\n initial_guess : pytree\n The initial best guess. `cost_function(initial_guess)` must be valid.\n algorithm : str, optional\n Which algorithm to be uses. currently supported:\n\n - 'L-BFGS' see lbfgs.py\n - 'GD' see gradient_descent.py\n\n defaults to 'L-BFGS'\n dtype : jax.numpy.dtype or pytree, optional\n The datatype for the arguments of `cost_function`.\n Note that this determines if `cost_function` is considered a function of real or of complex variables.\n Either a single `jax.numpy.dtype` used for all entries, or a pytree of matching the structure of `initial_guess`\n or None: per default, the dtype of `initial_guess` is used.\n options : dict\n Algorithm specific keyword arguments.\n See the docstring of the corresponding algorithm-specific function (e.g. `jax_optimise.lbfgs.minimise`)\n for details.\n All algorithms accept these generic options:\n\n max_iter : int or None\n Maximum number of iterations. Default: algorithm specific.\n Note that the meaning of an iteration, and in particular the number of calls per iteration of\n `cost_function` or its gradient can be different for different algorithms.\n max_fun_evals : int or None\n Maximum number of function evaluations.\n Default: no restriction.\n max_gad_evals : int or None\n Maximum number of gradient evaluations.\n Default: no restriction.\n cost_print_name : str\n A string representation for the name of cost-function values for logging.\n Default: `Cost`\n display_fun : callable or None\n Function that is called to display convergence info after each iteration.\n or `None`: no display.\n Default: built-in `print` function\n callback : callable or None\n Function that is called as `callback(xk, k)` for every iterate `xk`, with iteration number `k`\n including the initial guess (k=0).\n or `None` (default): No callback.\n\n Returns\n -------\n x_optim : pytree\n Same structure as `initial_guess`. The optimal input to `cost_function`, that minimises it\n cost_optim : float\n The optimal value `cost_function(x_optim)`\n info : dict\n A dictionary of convergence info. Keys are algorithm-specific but generically include\n\n converged : bool\n If the algorithm was successful\n reason : str\n A short description of why the algorithm terminated\n\n " return _minimise(cost_function, initial_guess, algorithm, dtype, maximising=False, **options)
Calculates the inputs that minimise a real-valued cost function. The `cost_function` is considered either a function of real variables (and the real minimiser is returned) or as a function of complex variables (and the complex minimiser is returned), depending on `dtype` The `cost_function` must take a single argument, which can be an arbitrarily nested container structure of real or complex (or mixed) scalars and/or arrays. e.g. a dict of lists of array, or just a single array or number. The cost function must be traceable by jax.grad Parameters ---------- cost_function : callable The function to be minimised ``cost_function(x) -> float`` where `x` is an arbitrary pytree of scalars and/or arrays initial_guess : pytree The initial best guess. `cost_function(initial_guess)` must be valid. algorithm : str, optional Which algorithm to be uses. currently supported: - 'L-BFGS' see lbfgs.py - 'GD' see gradient_descent.py defaults to 'L-BFGS' dtype : jax.numpy.dtype or pytree, optional The datatype for the arguments of `cost_function`. Note that this determines if `cost_function` is considered a function of real or of complex variables. Either a single `jax.numpy.dtype` used for all entries, or a pytree of matching the structure of `initial_guess` or None: per default, the dtype of `initial_guess` is used. options : dict Algorithm specific keyword arguments. See the docstring of the corresponding algorithm-specific function (e.g. `jax_optimise.lbfgs.minimise`) for details. All algorithms accept these generic options: max_iter : int or None Maximum number of iterations. Default: algorithm specific. Note that the meaning of an iteration, and in particular the number of calls per iteration of `cost_function` or its gradient can be different for different algorithms. max_fun_evals : int or None Maximum number of function evaluations. Default: no restriction. max_gad_evals : int or None Maximum number of gradient evaluations. Default: no restriction. cost_print_name : str A string representation for the name of cost-function values for logging. Default: `Cost` display_fun : callable or None Function that is called to display convergence info after each iteration. or `None`: no display. Default: built-in `print` function callback : callable or None Function that is called as `callback(xk, k)` for every iterate `xk`, with iteration number `k` including the initial guess (k=0). or `None` (default): No callback. Returns ------- x_optim : pytree Same structure as `initial_guess`. The optimal input to `cost_function`, that minimises it cost_optim : float The optimal value `cost_function(x_optim)` info : dict A dictionary of convergence info. Keys are algorithm-specific but generically include converged : bool If the algorithm was successful reason : str A short description of why the algorithm terminated
jax_optimise/main.py
minimise
Jakob-Unfried/msc-legacy
1
python
def minimise(cost_function, initial_guess, algorithm, dtype=None, **options): "\n Calculates the inputs that minimise a real-valued cost function.\n\n The `cost_function` is considered either a function of real variables (and the real minimiser is returned)\n or as a function of complex variables (and the complex minimiser is returned), depending on `dtype`\n\n The `cost_function` must take a single argument, which can be an arbitrarily nested container structure\n of real or complex (or mixed) scalars and/or arrays.\n e.g. a dict of lists of array, or just a single array or number.\n\n The cost function must be traceable by jax.grad\n\n\n Parameters\n ----------\n cost_function : callable\n The function to be minimised\n\n ``cost_function(x) -> float``\n\n where `x` is an arbitrary pytree of scalars and/or arrays\n initial_guess : pytree\n The initial best guess. `cost_function(initial_guess)` must be valid.\n algorithm : str, optional\n Which algorithm to be uses. currently supported:\n\n - 'L-BFGS' see lbfgs.py\n - 'GD' see gradient_descent.py\n\n defaults to 'L-BFGS'\n dtype : jax.numpy.dtype or pytree, optional\n The datatype for the arguments of `cost_function`.\n Note that this determines if `cost_function` is considered a function of real or of complex variables.\n Either a single `jax.numpy.dtype` used for all entries, or a pytree of matching the structure of `initial_guess`\n or None: per default, the dtype of `initial_guess` is used.\n options : dict\n Algorithm specific keyword arguments.\n See the docstring of the corresponding algorithm-specific function (e.g. `jax_optimise.lbfgs.minimise`)\n for details.\n All algorithms accept these generic options:\n\n max_iter : int or None\n Maximum number of iterations. Default: algorithm specific.\n Note that the meaning of an iteration, and in particular the number of calls per iteration of\n `cost_function` or its gradient can be different for different algorithms.\n max_fun_evals : int or None\n Maximum number of function evaluations.\n Default: no restriction.\n max_gad_evals : int or None\n Maximum number of gradient evaluations.\n Default: no restriction.\n cost_print_name : str\n A string representation for the name of cost-function values for logging.\n Default: `Cost`\n display_fun : callable or None\n Function that is called to display convergence info after each iteration.\n or `None`: no display.\n Default: built-in `print` function\n callback : callable or None\n Function that is called as `callback(xk, k)` for every iterate `xk`, with iteration number `k`\n including the initial guess (k=0).\n or `None` (default): No callback.\n\n Returns\n -------\n x_optim : pytree\n Same structure as `initial_guess`. The optimal input to `cost_function`, that minimises it\n cost_optim : float\n The optimal value `cost_function(x_optim)`\n info : dict\n A dictionary of convergence info. Keys are algorithm-specific but generically include\n\n converged : bool\n If the algorithm was successful\n reason : str\n A short description of why the algorithm terminated\n\n " return _minimise(cost_function, initial_guess, algorithm, dtype, maximising=False, **options)
def minimise(cost_function, initial_guess, algorithm, dtype=None, **options): "\n Calculates the inputs that minimise a real-valued cost function.\n\n The `cost_function` is considered either a function of real variables (and the real minimiser is returned)\n or as a function of complex variables (and the complex minimiser is returned), depending on `dtype`\n\n The `cost_function` must take a single argument, which can be an arbitrarily nested container structure\n of real or complex (or mixed) scalars and/or arrays.\n e.g. a dict of lists of array, or just a single array or number.\n\n The cost function must be traceable by jax.grad\n\n\n Parameters\n ----------\n cost_function : callable\n The function to be minimised\n\n ``cost_function(x) -> float``\n\n where `x` is an arbitrary pytree of scalars and/or arrays\n initial_guess : pytree\n The initial best guess. `cost_function(initial_guess)` must be valid.\n algorithm : str, optional\n Which algorithm to be uses. currently supported:\n\n - 'L-BFGS' see lbfgs.py\n - 'GD' see gradient_descent.py\n\n defaults to 'L-BFGS'\n dtype : jax.numpy.dtype or pytree, optional\n The datatype for the arguments of `cost_function`.\n Note that this determines if `cost_function` is considered a function of real or of complex variables.\n Either a single `jax.numpy.dtype` used for all entries, or a pytree of matching the structure of `initial_guess`\n or None: per default, the dtype of `initial_guess` is used.\n options : dict\n Algorithm specific keyword arguments.\n See the docstring of the corresponding algorithm-specific function (e.g. `jax_optimise.lbfgs.minimise`)\n for details.\n All algorithms accept these generic options:\n\n max_iter : int or None\n Maximum number of iterations. Default: algorithm specific.\n Note that the meaning of an iteration, and in particular the number of calls per iteration of\n `cost_function` or its gradient can be different for different algorithms.\n max_fun_evals : int or None\n Maximum number of function evaluations.\n Default: no restriction.\n max_gad_evals : int or None\n Maximum number of gradient evaluations.\n Default: no restriction.\n cost_print_name : str\n A string representation for the name of cost-function values for logging.\n Default: `Cost`\n display_fun : callable or None\n Function that is called to display convergence info after each iteration.\n or `None`: no display.\n Default: built-in `print` function\n callback : callable or None\n Function that is called as `callback(xk, k)` for every iterate `xk`, with iteration number `k`\n including the initial guess (k=0).\n or `None` (default): No callback.\n\n Returns\n -------\n x_optim : pytree\n Same structure as `initial_guess`. The optimal input to `cost_function`, that minimises it\n cost_optim : float\n The optimal value `cost_function(x_optim)`\n info : dict\n A dictionary of convergence info. Keys are algorithm-specific but generically include\n\n converged : bool\n If the algorithm was successful\n reason : str\n A short description of why the algorithm terminated\n\n " return _minimise(cost_function, initial_guess, algorithm, dtype, maximising=False, **options)<|docstring|>Calculates the inputs that minimise a real-valued cost function. The `cost_function` is considered either a function of real variables (and the real minimiser is returned) or as a function of complex variables (and the complex minimiser is returned), depending on `dtype` The `cost_function` must take a single argument, which can be an arbitrarily nested container structure of real or complex (or mixed) scalars and/or arrays. e.g. a dict of lists of array, or just a single array or number. The cost function must be traceable by jax.grad Parameters ---------- cost_function : callable The function to be minimised ``cost_function(x) -> float`` where `x` is an arbitrary pytree of scalars and/or arrays initial_guess : pytree The initial best guess. `cost_function(initial_guess)` must be valid. algorithm : str, optional Which algorithm to be uses. currently supported: - 'L-BFGS' see lbfgs.py - 'GD' see gradient_descent.py defaults to 'L-BFGS' dtype : jax.numpy.dtype or pytree, optional The datatype for the arguments of `cost_function`. Note that this determines if `cost_function` is considered a function of real or of complex variables. Either a single `jax.numpy.dtype` used for all entries, or a pytree of matching the structure of `initial_guess` or None: per default, the dtype of `initial_guess` is used. options : dict Algorithm specific keyword arguments. See the docstring of the corresponding algorithm-specific function (e.g. `jax_optimise.lbfgs.minimise`) for details. All algorithms accept these generic options: max_iter : int or None Maximum number of iterations. Default: algorithm specific. Note that the meaning of an iteration, and in particular the number of calls per iteration of `cost_function` or its gradient can be different for different algorithms. max_fun_evals : int or None Maximum number of function evaluations. Default: no restriction. max_gad_evals : int or None Maximum number of gradient evaluations. Default: no restriction. cost_print_name : str A string representation for the name of cost-function values for logging. Default: `Cost` display_fun : callable or None Function that is called to display convergence info after each iteration. or `None`: no display. Default: built-in `print` function callback : callable or None Function that is called as `callback(xk, k)` for every iterate `xk`, with iteration number `k` including the initial guess (k=0). or `None` (default): No callback. Returns ------- x_optim : pytree Same structure as `initial_guess`. The optimal input to `cost_function`, that minimises it cost_optim : float The optimal value `cost_function(x_optim)` info : dict A dictionary of convergence info. Keys are algorithm-specific but generically include converged : bool If the algorithm was successful reason : str A short description of why the algorithm terminated<|endoftext|>
2bdb4cd37fc6107a5b705e166f78dd544f555c2b728d2e235a19db6a15d62e4e
def _minimise(cost_function, initial_guess, algorithm, dtype=None, maximising=False, **options): '\n Internal version of `minimise`.\n Allows interface for maximisation.\n In that case, the negative of the cost_function is minimised and the sign is readjusted in\n logs and output\n ' if dtype: if (tree_flatten(dtype)[1] == tree_flatten(np.float32)[1]): initial_guess = tree_map((lambda arr: np.asarray(arr, dtype=dtype)), initial_guess) else: initial_guess = tree_multimap((lambda arr, dt: np.asarray(arr, dtype=dt)), initial_guess, dtype) (initial_guess_flat, tree_def) = tree_flatten(initial_guess) if maximising: def cost_function_flat(x_flat): x = tree_unflatten(tree_def, x_flat) return (- cost_function(x)) else: def cost_function_flat(x_flat): x = tree_unflatten(tree_def, x_flat) return cost_function(x) if ('callback' in options): callback = options['callback'] def callback_flat(x_flat, k): x = tree_unflatten(tree_def, x_flat) callback(x, k) options['callback'] = callback_flat if (algorithm == LBFGS): (x_optim_flat, cost_optim, info) = lbfgs.minimise(cost_function_flat, initial_guess_flat, maximising=maximising, **options) elif (algorithm == GD): (x_optim_flat, cost_optim, info) = gradient_descent.minimise(cost_function_flat, initial_guess_flat, maximising=maximising, **options) else: raise ValueError(f'Algorithm Keyword "{algorithm}" is not a valid keyword or the algorithm is not implemented') x_optim = tree_unflatten(tree_def, x_optim_flat) if maximising: cost_optim = (- cost_optim) return (x_optim, cost_optim, info)
Internal version of `minimise`. Allows interface for maximisation. In that case, the negative of the cost_function is minimised and the sign is readjusted in logs and output
jax_optimise/main.py
_minimise
Jakob-Unfried/msc-legacy
1
python
def _minimise(cost_function, initial_guess, algorithm, dtype=None, maximising=False, **options): '\n Internal version of `minimise`.\n Allows interface for maximisation.\n In that case, the negative of the cost_function is minimised and the sign is readjusted in\n logs and output\n ' if dtype: if (tree_flatten(dtype)[1] == tree_flatten(np.float32)[1]): initial_guess = tree_map((lambda arr: np.asarray(arr, dtype=dtype)), initial_guess) else: initial_guess = tree_multimap((lambda arr, dt: np.asarray(arr, dtype=dt)), initial_guess, dtype) (initial_guess_flat, tree_def) = tree_flatten(initial_guess) if maximising: def cost_function_flat(x_flat): x = tree_unflatten(tree_def, x_flat) return (- cost_function(x)) else: def cost_function_flat(x_flat): x = tree_unflatten(tree_def, x_flat) return cost_function(x) if ('callback' in options): callback = options['callback'] def callback_flat(x_flat, k): x = tree_unflatten(tree_def, x_flat) callback(x, k) options['callback'] = callback_flat if (algorithm == LBFGS): (x_optim_flat, cost_optim, info) = lbfgs.minimise(cost_function_flat, initial_guess_flat, maximising=maximising, **options) elif (algorithm == GD): (x_optim_flat, cost_optim, info) = gradient_descent.minimise(cost_function_flat, initial_guess_flat, maximising=maximising, **options) else: raise ValueError(f'Algorithm Keyword "{algorithm}" is not a valid keyword or the algorithm is not implemented') x_optim = tree_unflatten(tree_def, x_optim_flat) if maximising: cost_optim = (- cost_optim) return (x_optim, cost_optim, info)
def _minimise(cost_function, initial_guess, algorithm, dtype=None, maximising=False, **options): '\n Internal version of `minimise`.\n Allows interface for maximisation.\n In that case, the negative of the cost_function is minimised and the sign is readjusted in\n logs and output\n ' if dtype: if (tree_flatten(dtype)[1] == tree_flatten(np.float32)[1]): initial_guess = tree_map((lambda arr: np.asarray(arr, dtype=dtype)), initial_guess) else: initial_guess = tree_multimap((lambda arr, dt: np.asarray(arr, dtype=dt)), initial_guess, dtype) (initial_guess_flat, tree_def) = tree_flatten(initial_guess) if maximising: def cost_function_flat(x_flat): x = tree_unflatten(tree_def, x_flat) return (- cost_function(x)) else: def cost_function_flat(x_flat): x = tree_unflatten(tree_def, x_flat) return cost_function(x) if ('callback' in options): callback = options['callback'] def callback_flat(x_flat, k): x = tree_unflatten(tree_def, x_flat) callback(x, k) options['callback'] = callback_flat if (algorithm == LBFGS): (x_optim_flat, cost_optim, info) = lbfgs.minimise(cost_function_flat, initial_guess_flat, maximising=maximising, **options) elif (algorithm == GD): (x_optim_flat, cost_optim, info) = gradient_descent.minimise(cost_function_flat, initial_guess_flat, maximising=maximising, **options) else: raise ValueError(f'Algorithm Keyword "{algorithm}" is not a valid keyword or the algorithm is not implemented') x_optim = tree_unflatten(tree_def, x_optim_flat) if maximising: cost_optim = (- cost_optim) return (x_optim, cost_optim, info)<|docstring|>Internal version of `minimise`. Allows interface for maximisation. In that case, the negative of the cost_function is minimised and the sign is readjusted in logs and output<|endoftext|>
496438ba2bb52263c913755365fa828b54e1521af519929813d56280492f282d
def save_heatmap(save_folder, base_name, data_array, label=None, tick_labels=None, scale_results=True, ignore_in_scale=np.inf): '\n Save a heatmap of results using np.imshow(). Specify optional colorbar label.\n :param save_folder:\n :param base_name: The base name of the file (sans extension or folder path)\n :param data_array:\n :param label: The label for the colorbar\n :param tick_marks: Tick mark labels (numerical); if applicable\n :param scale_results:\n :param ignore_in_scale:\n ' v_min = (np.min(data_array[(data_array != ignore_in_scale)]) if scale_results else None) v_max = (np.max(data_array[(data_array != ignore_in_scale)]) if scale_results else None) cimg = plt.imshow(data_array, vmin=v_min, vmax=v_max, norm=None) cbar = plt.colorbar(cimg, ticks=tick_labels) if (label is not None): cbar.set_label(label) out_fn = os.path.join(save_folder, (base_name + '.png')) plt.savefig(out_fn) plt.close() logging.debug('Saved heatmap visualization to {}'.format(out_fn))
Save a heatmap of results using np.imshow(). Specify optional colorbar label. :param save_folder: :param base_name: The base name of the file (sans extension or folder path) :param data_array: :param label: The label for the colorbar :param tick_marks: Tick mark labels (numerical); if applicable :param scale_results: :param ignore_in_scale:
analysis_tools/PYTHON_RICARDO/output_ingress_egress/scripts/render_results.py
save_heatmap
lefevre-fraser/openmeta-mms
0
python
def save_heatmap(save_folder, base_name, data_array, label=None, tick_labels=None, scale_results=True, ignore_in_scale=np.inf): '\n Save a heatmap of results using np.imshow(). Specify optional colorbar label.\n :param save_folder:\n :param base_name: The base name of the file (sans extension or folder path)\n :param data_array:\n :param label: The label for the colorbar\n :param tick_marks: Tick mark labels (numerical); if applicable\n :param scale_results:\n :param ignore_in_scale:\n ' v_min = (np.min(data_array[(data_array != ignore_in_scale)]) if scale_results else None) v_max = (np.max(data_array[(data_array != ignore_in_scale)]) if scale_results else None) cimg = plt.imshow(data_array, vmin=v_min, vmax=v_max, norm=None) cbar = plt.colorbar(cimg, ticks=tick_labels) if (label is not None): cbar.set_label(label) out_fn = os.path.join(save_folder, (base_name + '.png')) plt.savefig(out_fn) plt.close() logging.debug('Saved heatmap visualization to {}'.format(out_fn))
def save_heatmap(save_folder, base_name, data_array, label=None, tick_labels=None, scale_results=True, ignore_in_scale=np.inf): '\n Save a heatmap of results using np.imshow(). Specify optional colorbar label.\n :param save_folder:\n :param base_name: The base name of the file (sans extension or folder path)\n :param data_array:\n :param label: The label for the colorbar\n :param tick_marks: Tick mark labels (numerical); if applicable\n :param scale_results:\n :param ignore_in_scale:\n ' v_min = (np.min(data_array[(data_array != ignore_in_scale)]) if scale_results else None) v_max = (np.max(data_array[(data_array != ignore_in_scale)]) if scale_results else None) cimg = plt.imshow(data_array, vmin=v_min, vmax=v_max, norm=None) cbar = plt.colorbar(cimg, ticks=tick_labels) if (label is not None): cbar.set_label(label) out_fn = os.path.join(save_folder, (base_name + '.png')) plt.savefig(out_fn) plt.close() logging.debug('Saved heatmap visualization to {}'.format(out_fn))<|docstring|>Save a heatmap of results using np.imshow(). Specify optional colorbar label. :param save_folder: :param base_name: The base name of the file (sans extension or folder path) :param data_array: :param label: The label for the colorbar :param tick_marks: Tick mark labels (numerical); if applicable :param scale_results: :param ignore_in_scale:<|endoftext|>
ae9c5b39894ae0dd55ec02d0f7ac6df3111fbff3400b1dbcaa9bfc0123ea62fb
def create_parking_spots(n_clicks, destination, accept_distance, cu_instance): '\n This function refreshes the map when the submit button is clicked with\n user input destination address and acceptable distance.\n\n Parameters\n ----------\n n_clicks: integer\n the number of the submit button got clicked.\n\n destination: str\n the user input destination address.\n\n accept_distance: str\n the user input of acceptable walking distance from the\n destination address.\n\n cu_instance: object\n object to call the functions in CoordinatesUtil\n\n Returns\n -------\n JSON\n recommended parking spots to be marked on the scatter mapbox.\n ' if (destination and accept_distance): if (n_clicks > 0): top_spots_on_map = [] (spots, destination_coordinates) = cu_instance.get_parking_spots(destination, accept_distance) if ((not destination_coordinates) or (not spots) or (len(spots) == 0)): return ({'data': [{'type': 'scattermapbox', 'lat': [], 'lon': [], 'hoverinfo': 'text', 'mode': 'lines', 'marker': {'size': 4, 'color': 'green'}, 'visible': True}], 'layout': layout}, 'Input Address is Invalid!') for spot in spots: lats = spot.street_meet_expect_coordinates[0] longs = spot.street_meet_expect_coordinates[1] street_details = ((f'Address: <a href="https://www.google.com/maps/place/{spot.street_lat_mid},{spot.street_lon_mid}" target=_blank>' + spot.street_name) + f'</a> <br />Distance: {round(spot.calculated_distance, 2)} miles<br />Spots Available: {math.floor(spot.spaceavail)}') top_spots_on_map.append({'type': 'scattermapbox', 'lat': lats, 'lon': longs, 'mode': 'lines', 'marker': {'size': 4, 'color': 'green'}, 'hovertemplate': f'{street_details}<extra></extra>', 'hoverlabel': {'bgcolor': 'white', 'font_size': 10, 'align': 'left'}, 'showlegend': False, 'visible': True}) destination_address_link = ((f'Address: <a href="https://www.google.com/maps/place/{destination_coordinates[0]},{destination_coordinates[1]}" target=_blank>' + destination) + '</a>') top_spots_on_map.append({'type': 'scattermapbox', 'lat': [destination_coordinates[0]], 'lon': [destination_coordinates[1]], 'mode': 'point', 'marker': {'size': 8, 'color': 'red'}, 'hovertemplate': f'{destination_address_link}<extra></extra>', 'hoverlabel': {'bgcolor': 'white', 'font_size': 10}, 'showlegend': False, 'visible': True}) return ({'data': top_spots_on_map, 'layout': layout}, '') else: return ({'data': [{'type': 'scattermapbox', 'lat': [], 'lon': [], 'hoverinfo': 'text', 'mode': 'lines', 'marker': {'size': 4, 'color': 'green'}, 'visible': True}], 'layout': layout}, '') else: return ({'data': [{'type': 'scattermapbox', 'lat': [], 'lon': [], 'hoverinfo': 'text', 'mode': 'lines', 'marker': {'size': 4, 'color': 'green'}, 'visible': True}], 'layout': layout}, '')
This function refreshes the map when the submit button is clicked with user input destination address and acceptable distance. Parameters ---------- n_clicks: integer the number of the submit button got clicked. destination: str the user input destination address. accept_distance: str the user input of acceptable walking distance from the destination address. cu_instance: object object to call the functions in CoordinatesUtil Returns ------- JSON recommended parking spots to be marked on the scatter mapbox.
seattlepark/src/parking_app.py
create_parking_spots
qhsun/seattleparking
1
python
def create_parking_spots(n_clicks, destination, accept_distance, cu_instance): '\n This function refreshes the map when the submit button is clicked with\n user input destination address and acceptable distance.\n\n Parameters\n ----------\n n_clicks: integer\n the number of the submit button got clicked.\n\n destination: str\n the user input destination address.\n\n accept_distance: str\n the user input of acceptable walking distance from the\n destination address.\n\n cu_instance: object\n object to call the functions in CoordinatesUtil\n\n Returns\n -------\n JSON\n recommended parking spots to be marked on the scatter mapbox.\n ' if (destination and accept_distance): if (n_clicks > 0): top_spots_on_map = [] (spots, destination_coordinates) = cu_instance.get_parking_spots(destination, accept_distance) if ((not destination_coordinates) or (not spots) or (len(spots) == 0)): return ({'data': [{'type': 'scattermapbox', 'lat': [], 'lon': [], 'hoverinfo': 'text', 'mode': 'lines', 'marker': {'size': 4, 'color': 'green'}, 'visible': True}], 'layout': layout}, 'Input Address is Invalid!') for spot in spots: lats = spot.street_meet_expect_coordinates[0] longs = spot.street_meet_expect_coordinates[1] street_details = ((f'Address: <a href="https://www.google.com/maps/place/{spot.street_lat_mid},{spot.street_lon_mid}" target=_blank>' + spot.street_name) + f'</a> <br />Distance: {round(spot.calculated_distance, 2)} miles<br />Spots Available: {math.floor(spot.spaceavail)}') top_spots_on_map.append({'type': 'scattermapbox', 'lat': lats, 'lon': longs, 'mode': 'lines', 'marker': {'size': 4, 'color': 'green'}, 'hovertemplate': f'{street_details}<extra></extra>', 'hoverlabel': {'bgcolor': 'white', 'font_size': 10, 'align': 'left'}, 'showlegend': False, 'visible': True}) destination_address_link = ((f'Address: <a href="https://www.google.com/maps/place/{destination_coordinates[0]},{destination_coordinates[1]}" target=_blank>' + destination) + '</a>') top_spots_on_map.append({'type': 'scattermapbox', 'lat': [destination_coordinates[0]], 'lon': [destination_coordinates[1]], 'mode': 'point', 'marker': {'size': 8, 'color': 'red'}, 'hovertemplate': f'{destination_address_link}<extra></extra>', 'hoverlabel': {'bgcolor': 'white', 'font_size': 10}, 'showlegend': False, 'visible': True}) return ({'data': top_spots_on_map, 'layout': layout}, ) else: return ({'data': [{'type': 'scattermapbox', 'lat': [], 'lon': [], 'hoverinfo': 'text', 'mode': 'lines', 'marker': {'size': 4, 'color': 'green'}, 'visible': True}], 'layout': layout}, ) else: return ({'data': [{'type': 'scattermapbox', 'lat': [], 'lon': [], 'hoverinfo': 'text', 'mode': 'lines', 'marker': {'size': 4, 'color': 'green'}, 'visible': True}], 'layout': layout}, )
def create_parking_spots(n_clicks, destination, accept_distance, cu_instance): '\n This function refreshes the map when the submit button is clicked with\n user input destination address and acceptable distance.\n\n Parameters\n ----------\n n_clicks: integer\n the number of the submit button got clicked.\n\n destination: str\n the user input destination address.\n\n accept_distance: str\n the user input of acceptable walking distance from the\n destination address.\n\n cu_instance: object\n object to call the functions in CoordinatesUtil\n\n Returns\n -------\n JSON\n recommended parking spots to be marked on the scatter mapbox.\n ' if (destination and accept_distance): if (n_clicks > 0): top_spots_on_map = [] (spots, destination_coordinates) = cu_instance.get_parking_spots(destination, accept_distance) if ((not destination_coordinates) or (not spots) or (len(spots) == 0)): return ({'data': [{'type': 'scattermapbox', 'lat': [], 'lon': [], 'hoverinfo': 'text', 'mode': 'lines', 'marker': {'size': 4, 'color': 'green'}, 'visible': True}], 'layout': layout}, 'Input Address is Invalid!') for spot in spots: lats = spot.street_meet_expect_coordinates[0] longs = spot.street_meet_expect_coordinates[1] street_details = ((f'Address: <a href="https://www.google.com/maps/place/{spot.street_lat_mid},{spot.street_lon_mid}" target=_blank>' + spot.street_name) + f'</a> <br />Distance: {round(spot.calculated_distance, 2)} miles<br />Spots Available: {math.floor(spot.spaceavail)}') top_spots_on_map.append({'type': 'scattermapbox', 'lat': lats, 'lon': longs, 'mode': 'lines', 'marker': {'size': 4, 'color': 'green'}, 'hovertemplate': f'{street_details}<extra></extra>', 'hoverlabel': {'bgcolor': 'white', 'font_size': 10, 'align': 'left'}, 'showlegend': False, 'visible': True}) destination_address_link = ((f'Address: <a href="https://www.google.com/maps/place/{destination_coordinates[0]},{destination_coordinates[1]}" target=_blank>' + destination) + '</a>') top_spots_on_map.append({'type': 'scattermapbox', 'lat': [destination_coordinates[0]], 'lon': [destination_coordinates[1]], 'mode': 'point', 'marker': {'size': 8, 'color': 'red'}, 'hovertemplate': f'{destination_address_link}<extra></extra>', 'hoverlabel': {'bgcolor': 'white', 'font_size': 10}, 'showlegend': False, 'visible': True}) return ({'data': top_spots_on_map, 'layout': layout}, ) else: return ({'data': [{'type': 'scattermapbox', 'lat': [], 'lon': [], 'hoverinfo': 'text', 'mode': 'lines', 'marker': {'size': 4, 'color': 'green'}, 'visible': True}], 'layout': layout}, ) else: return ({'data': [{'type': 'scattermapbox', 'lat': [], 'lon': [], 'hoverinfo': 'text', 'mode': 'lines', 'marker': {'size': 4, 'color': 'green'}, 'visible': True}], 'layout': layout}, )<|docstring|>This function refreshes the map when the submit button is clicked with user input destination address and acceptable distance. Parameters ---------- n_clicks: integer the number of the submit button got clicked. destination: str the user input destination address. accept_distance: str the user input of acceptable walking distance from the destination address. cu_instance: object object to call the functions in CoordinatesUtil Returns ------- JSON recommended parking spots to be marked on the scatter mapbox.<|endoftext|>
08e8ba9fa5e9cc92b58108e4078cc1b71b6dec2905baac34109407b37a61618d
def test_create_valid_user_success(self): 'Test creating user with valid payload is successful' payload = {'email': 'example@example.com', 'password': 'testpass', 'name': 'Test name'} res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) user = get_user_model().objects.get(**res.data) self.assertTrue(user.check_password(payload['password'])) self.assertNotIn('password', res.data)
Test creating user with valid payload is successful
app/user/tests/test_user_api.py
test_create_valid_user_success
csukel/recipe-app-api
0
python
def test_create_valid_user_success(self): payload = {'email': 'example@example.com', 'password': 'testpass', 'name': 'Test name'} res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) user = get_user_model().objects.get(**res.data) self.assertTrue(user.check_password(payload['password'])) self.assertNotIn('password', res.data)
def test_create_valid_user_success(self): payload = {'email': 'example@example.com', 'password': 'testpass', 'name': 'Test name'} res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) user = get_user_model().objects.get(**res.data) self.assertTrue(user.check_password(payload['password'])) self.assertNotIn('password', res.data)<|docstring|>Test creating user with valid payload is successful<|endoftext|>
390698110dd05c8cf5f5b70b41ca448062b9f4465d5164552f86f23cc280605a
def test_user_exists(self): 'Test create a user that already exists fails' payload = {'email': 'example@example.com', 'password': 'testpass', 'name': 'Test name'} create_user(**payload) res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
Test create a user that already exists fails
app/user/tests/test_user_api.py
test_user_exists
csukel/recipe-app-api
0
python
def test_user_exists(self): payload = {'email': 'example@example.com', 'password': 'testpass', 'name': 'Test name'} create_user(**payload) res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_exists(self): payload = {'email': 'example@example.com', 'password': 'testpass', 'name': 'Test name'} create_user(**payload) res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)<|docstring|>Test create a user that already exists fails<|endoftext|>
c3e0cdce649980dc18f7f2dfeca673eaaf714befba16e10debf9c5819dbbd4d7
def test_password_too_short(self): 'Test that the password must be more than 5 characters' payload = {'email': 'example@example.com', 'password': 'pw', 'name': 'Test name'} res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) user_exists = get_user_model().objects.filter(email=payload['email']).exists() self.assertFalse(user_exists)
Test that the password must be more than 5 characters
app/user/tests/test_user_api.py
test_password_too_short
csukel/recipe-app-api
0
python
def test_password_too_short(self): payload = {'email': 'example@example.com', 'password': 'pw', 'name': 'Test name'} res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) user_exists = get_user_model().objects.filter(email=payload['email']).exists() self.assertFalse(user_exists)
def test_password_too_short(self): payload = {'email': 'example@example.com', 'password': 'pw', 'name': 'Test name'} res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) user_exists = get_user_model().objects.filter(email=payload['email']).exists() self.assertFalse(user_exists)<|docstring|>Test that the password must be more than 5 characters<|endoftext|>
fb42053f6d9e4f625e25207ac2309521ec7b3970f63c8dabea88d64bf769b039
def test_create_token_for_user(self): 'Test that a token is created for the user' payload = {'email': 'example@example.com', 'password': '12345134', 'name': 'Test name'} create_user(**payload) res = self.client.post(TOKEN_URL, payload) self.assertIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_200_OK)
Test that a token is created for the user
app/user/tests/test_user_api.py
test_create_token_for_user
csukel/recipe-app-api
0
python
def test_create_token_for_user(self): payload = {'email': 'example@example.com', 'password': '12345134', 'name': 'Test name'} create_user(**payload) res = self.client.post(TOKEN_URL, payload) self.assertIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_for_user(self): payload = {'email': 'example@example.com', 'password': '12345134', 'name': 'Test name'} create_user(**payload) res = self.client.post(TOKEN_URL, payload) self.assertIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_200_OK)<|docstring|>Test that a token is created for the user<|endoftext|>
336eb2fb28202f927a3a148eab6986cedf48c7ff263dbd797ad0732d5c5794d0
def test_create_token_invalid_credentials(self): 'Test that token is not created if invalid credentials are give' create_user(email='example@example.com', password='12345134') payload = {'email': 'example@example.com', 'password': '4534dshfdjsfhjk'} res = self.client.post(TOKEN_URL, payload) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
Test that token is not created if invalid credentials are give
app/user/tests/test_user_api.py
test_create_token_invalid_credentials
csukel/recipe-app-api
0
python
def test_create_token_invalid_credentials(self): create_user(email='example@example.com', password='12345134') payload = {'email': 'example@example.com', 'password': '4534dshfdjsfhjk'} res = self.client.post(TOKEN_URL, payload) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_invalid_credentials(self): create_user(email='example@example.com', password='12345134') payload = {'email': 'example@example.com', 'password': '4534dshfdjsfhjk'} res = self.client.post(TOKEN_URL, payload) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)<|docstring|>Test that token is not created if invalid credentials are give<|endoftext|>
523fcad9fe0e1d1b4dee65bd496bb38098c18e8f45dcad59904b4db7e060adc2
def test_create_token_no_user(self): "Test that token is not created if user doesn't exist" payload = {'email': 'example@example.com', 'password': '4534dshfdjsfhjk'} res = self.client.post(TOKEN_URL, payload) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
Test that token is not created if user doesn't exist
app/user/tests/test_user_api.py
test_create_token_no_user
csukel/recipe-app-api
0
python
def test_create_token_no_user(self): payload = {'email': 'example@example.com', 'password': '4534dshfdjsfhjk'} res = self.client.post(TOKEN_URL, payload) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self): payload = {'email': 'example@example.com', 'password': '4534dshfdjsfhjk'} res = self.client.post(TOKEN_URL, payload) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)<|docstring|>Test that token is not created if user doesn't exist<|endoftext|>
8a88ea42e4be0997f62e198cc99550dc219441658b2c1062a81b62ef62e7e5d3
def test_create_token_missing_field(self): 'Test that email and password are required' res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''}) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
Test that email and password are required
app/user/tests/test_user_api.py
test_create_token_missing_field
csukel/recipe-app-api
0
python
def test_create_token_missing_field(self): res = self.client.post(TOKEN_URL, {'email': 'one', 'password': }) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self): res = self.client.post(TOKEN_URL, {'email': 'one', 'password': }) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)<|docstring|>Test that email and password are required<|endoftext|>
e3ce014b3b3e72b7cbf71a9fc666067aebaf2e9e8cbf5aeb3009ffd9ebb99c8e
def test_retrieve_user_unauthorized(self): 'Test that authentication is required for users' res = self.client.get(ME_URL) self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
Test that authentication is required for users
app/user/tests/test_user_api.py
test_retrieve_user_unauthorized
csukel/recipe-app-api
0
python
def test_retrieve_user_unauthorized(self): res = self.client.get(ME_URL) self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
def test_retrieve_user_unauthorized(self): res = self.client.get(ME_URL) self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)<|docstring|>Test that authentication is required for users<|endoftext|>
b0becf4a3f3da9ef35c2813de19aebf239f7ebecec94a4d377b3a983449bbdcb
def test_retrieve_profile_success(self): 'Test retrieving profile for logged in user' res = self.client.get(ME_URL) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, {'name': self.user.name, 'email': self.user.email})
Test retrieving profile for logged in user
app/user/tests/test_user_api.py
test_retrieve_profile_success
csukel/recipe-app-api
0
python
def test_retrieve_profile_success(self): res = self.client.get(ME_URL) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, {'name': self.user.name, 'email': self.user.email})
def test_retrieve_profile_success(self): res = self.client.get(ME_URL) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, {'name': self.user.name, 'email': self.user.email})<|docstring|>Test retrieving profile for logged in user<|endoftext|>
e252950697f0925c0b839b1fcfabc0abe89950ccb0669193c724c25618005e12
def test_post_me_not_allowed(self): 'Test that POST is not allowed on the me url' res = self.client.post(ME_URL, {}) self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
Test that POST is not allowed on the me url
app/user/tests/test_user_api.py
test_post_me_not_allowed
csukel/recipe-app-api
0
python
def test_post_me_not_allowed(self): res = self.client.post(ME_URL, {}) self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_post_me_not_allowed(self): res = self.client.post(ME_URL, {}) self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)<|docstring|>Test that POST is not allowed on the me url<|endoftext|>
6206015a00f0c14b492430788e1ea4cb91846096d8ea1302977e1884af36b1b0
def test_update_user_profile(self): 'Test updating the user profile for authenticated user' payload = {'name': 'Test1', 'password': 'newpassword123'} res = self.client.patch(ME_URL, payload) self.user.refresh_from_db() self.assertEqual(self.user.name, payload['name']) self.assertTrue(self.user.check_password(payload['password'])) self.assertEqual(res.status_code, status.HTTP_200_OK)
Test updating the user profile for authenticated user
app/user/tests/test_user_api.py
test_update_user_profile
csukel/recipe-app-api
0
python
def test_update_user_profile(self): payload = {'name': 'Test1', 'password': 'newpassword123'} res = self.client.patch(ME_URL, payload) self.user.refresh_from_db() self.assertEqual(self.user.name, payload['name']) self.assertTrue(self.user.check_password(payload['password'])) self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_update_user_profile(self): payload = {'name': 'Test1', 'password': 'newpassword123'} res = self.client.patch(ME_URL, payload) self.user.refresh_from_db() self.assertEqual(self.user.name, payload['name']) self.assertTrue(self.user.check_password(payload['password'])) self.assertEqual(res.status_code, status.HTTP_200_OK)<|docstring|>Test updating the user profile for authenticated user<|endoftext|>
29d5ae5d2f0724dabf8d45a483860d905d339bb2b76ed6c884ccd53ab6cba707
def create_data(casepath): 'Create *.data files (and topography & hydrological files) in case folder.\n\n Args:\n casepath [in]: a string indicating the abs path of case folder.\n ' import createtopo import createhydro if (not os.path.isdir(casepath)): print('Error: case folder {} does not exist.'.format(casepath), file=sys.stderr) sys.exit(1) setrunpath = os.path.join(casepath, 'setrun.py') if (not os.path.isfile(setrunpath)): print('Error: case folder {} does not have setrun.py.'.format(casepath), file=sys.stderr) sys.exit(1) sys.path.insert(0, casepath) import setrun pwd = os.getcwd() os.chdir(casepath) rundata = setrun.setrun() rundata.write() os.chdir(pwd) createtopo.check_download_topo(casepath, rundata) createhydro.check_download_hydro(casepath, rundata)
Create *.data files (and topography & hydrological files) in case folder. Args: casepath [in]: a string indicating the abs path of case folder.
utilities/run.py
create_data
psurendrababu/geoclaw-landspill-cases
0
python
def create_data(casepath): 'Create *.data files (and topography & hydrological files) in case folder.\n\n Args:\n casepath [in]: a string indicating the abs path of case folder.\n ' import createtopo import createhydro if (not os.path.isdir(casepath)): print('Error: case folder {} does not exist.'.format(casepath), file=sys.stderr) sys.exit(1) setrunpath = os.path.join(casepath, 'setrun.py') if (not os.path.isfile(setrunpath)): print('Error: case folder {} does not have setrun.py.'.format(casepath), file=sys.stderr) sys.exit(1) sys.path.insert(0, casepath) import setrun pwd = os.getcwd() os.chdir(casepath) rundata = setrun.setrun() rundata.write() os.chdir(pwd) createtopo.check_download_topo(casepath, rundata) createhydro.check_download_hydro(casepath, rundata)
def create_data(casepath): 'Create *.data files (and topography & hydrological files) in case folder.\n\n Args:\n casepath [in]: a string indicating the abs path of case folder.\n ' import createtopo import createhydro if (not os.path.isdir(casepath)): print('Error: case folder {} does not exist.'.format(casepath), file=sys.stderr) sys.exit(1) setrunpath = os.path.join(casepath, 'setrun.py') if (not os.path.isfile(setrunpath)): print('Error: case folder {} does not have setrun.py.'.format(casepath), file=sys.stderr) sys.exit(1) sys.path.insert(0, casepath) import setrun pwd = os.getcwd() os.chdir(casepath) rundata = setrun.setrun() rundata.write() os.chdir(pwd) createtopo.check_download_topo(casepath, rundata) createhydro.check_download_hydro(casepath, rundata)<|docstring|>Create *.data files (and topography & hydrological files) in case folder. Args: casepath [in]: a string indicating the abs path of case folder.<|endoftext|>
cbfd35a8ea90d3e68506898462a003a9b80ab0c49cc377ef87505894af7b1e24
def __init__(self, n=30000): ' Initialize galaxy ' self._inner_excentricity = 0.8 self._outer_excentricity = 1.0 self._center_velocity = 30 self._inner_velocity = 200 self._outer_velocity = 300 self._angular_offset = 0.019 self._core_radius = 6000 self._galaxy_radius = 15000 self._distant_radius = 0 self._star_distribution = 0.45 self._angular_velocity = 1e-06 self._stars_count = n self._dust_count = int((self._stars_count * 0.75)) self._h2_count = 200 dtype = [('theta', float, 1), ('velocity', float, 1), ('angle', float, 1), ('m_a', float, 1), ('m_b', float, 1), ('size', float, 1), ('type', float, 1), ('temperature', float, 1), ('brightness', float, 1), ('position', float, 2), ('planets', list, 1)] n = ((self._stars_count + self._dust_count) + (2 * self._h2_count)) self._particles = np.zeros(n, dtype=dtype) i0 = 0 i1 = (i0 + self._stars_count) self._stars = self._particles[i0:i1] self._stars['size'] = 4 self._stars['type'] = 0 i0 = i1 i1 = (i0 + self._dust_count) self._dust = self._particles[i0:i1] self._dust['size'] = 64 self._dust['type'] = 1 i0 = i1 i1 = (i0 + self._h2_count) self._h2a = self._particles[i0:i1] self._h2a['size'] = 64 self._h2a['type'] = 2 i0 = i1 i1 = (i0 + self._h2_count) self._h2b = self._particles[i0:i1] self._h2b['size'] = 8 self._h2b['type'] = 3
Initialize galaxy
galaxy.py
__init__
Stephenjcl/GalaxyMapGeneration
0
python
def __init__(self, n=30000): ' ' self._inner_excentricity = 0.8 self._outer_excentricity = 1.0 self._center_velocity = 30 self._inner_velocity = 200 self._outer_velocity = 300 self._angular_offset = 0.019 self._core_radius = 6000 self._galaxy_radius = 15000 self._distant_radius = 0 self._star_distribution = 0.45 self._angular_velocity = 1e-06 self._stars_count = n self._dust_count = int((self._stars_count * 0.75)) self._h2_count = 200 dtype = [('theta', float, 1), ('velocity', float, 1), ('angle', float, 1), ('m_a', float, 1), ('m_b', float, 1), ('size', float, 1), ('type', float, 1), ('temperature', float, 1), ('brightness', float, 1), ('position', float, 2), ('planets', list, 1)] n = ((self._stars_count + self._dust_count) + (2 * self._h2_count)) self._particles = np.zeros(n, dtype=dtype) i0 = 0 i1 = (i0 + self._stars_count) self._stars = self._particles[i0:i1] self._stars['size'] = 4 self._stars['type'] = 0 i0 = i1 i1 = (i0 + self._dust_count) self._dust = self._particles[i0:i1] self._dust['size'] = 64 self._dust['type'] = 1 i0 = i1 i1 = (i0 + self._h2_count) self._h2a = self._particles[i0:i1] self._h2a['size'] = 64 self._h2a['type'] = 2 i0 = i1 i1 = (i0 + self._h2_count) self._h2b = self._particles[i0:i1] self._h2b['size'] = 8 self._h2b['type'] = 3
def __init__(self, n=30000): ' ' self._inner_excentricity = 0.8 self._outer_excentricity = 1.0 self._center_velocity = 30 self._inner_velocity = 200 self._outer_velocity = 300 self._angular_offset = 0.019 self._core_radius = 6000 self._galaxy_radius = 15000 self._distant_radius = 0 self._star_distribution = 0.45 self._angular_velocity = 1e-06 self._stars_count = n self._dust_count = int((self._stars_count * 0.75)) self._h2_count = 200 dtype = [('theta', float, 1), ('velocity', float, 1), ('angle', float, 1), ('m_a', float, 1), ('m_b', float, 1), ('size', float, 1), ('type', float, 1), ('temperature', float, 1), ('brightness', float, 1), ('position', float, 2), ('planets', list, 1)] n = ((self._stars_count + self._dust_count) + (2 * self._h2_count)) self._particles = np.zeros(n, dtype=dtype) i0 = 0 i1 = (i0 + self._stars_count) self._stars = self._particles[i0:i1] self._stars['size'] = 4 self._stars['type'] = 0 i0 = i1 i1 = (i0 + self._dust_count) self._dust = self._particles[i0:i1] self._dust['size'] = 64 self._dust['type'] = 1 i0 = i1 i1 = (i0 + self._h2_count) self._h2a = self._particles[i0:i1] self._h2a['size'] = 64 self._h2a['type'] = 2 i0 = i1 i1 = (i0 + self._h2_count) self._h2b = self._particles[i0:i1] self._h2b['size'] = 8 self._h2b['type'] = 3<|docstring|>Initialize galaxy<|endoftext|>
916b5c8774c3d8742a5caa0062774fc218250efe9a943690dc31e1d64dc5374a
def __len__(self): ' Number of particles ' if (self._particles is not None): return len(self._particles) return 0
Number of particles
galaxy.py
__len__
Stephenjcl/GalaxyMapGeneration
0
python
def __len__(self): ' ' if (self._particles is not None): return len(self._particles) return 0
def __len__(self): ' ' if (self._particles is not None): return len(self._particles) return 0<|docstring|>Number of particles<|endoftext|>
eeb7fca0ee37300fac549d49acd3c557f527c50b27c41ceff6cd5fa2503faf63
def __getitem__(self, key): ' x.__getitem__(y) <==> x[y] ' if (self._particles is not None): return self._particles[key] return None
x.__getitem__(y) <==> x[y]
galaxy.py
__getitem__
Stephenjcl/GalaxyMapGeneration
0
python
def __getitem__(self, key): ' ' if (self._particles is not None): return self._particles[key] return None
def __getitem__(self, key): ' ' if (self._particles is not None): return self._particles[key] return None<|docstring|>x.__getitem__(y) <==> x[y]<|endoftext|>
b2ab37ac364a4b6d8828f91de94281892bf6a3072b6cd919507872b0bb15bc4d
def update(self, timestep=100000): ' Update simulation ' self._particles['theta'] += (self._particles['velocity'] * timestep) P = self._particles (a, b) = (P['m_a'], P['m_b']) (theta, beta) = (P['theta'], (- P['angle'])) alpha = ((theta * math.pi) / 180.0) cos_alpha = np.cos(alpha) sin_alpha = np.sin(alpha) cos_beta = np.cos(beta) sin_beta = np.sin(beta) P['position'][(:, 0)] = (((a * cos_alpha) * cos_beta) - ((b * sin_alpha) * sin_beta)) P['position'][(:, 1)] = (((a * cos_alpha) * sin_beta) + ((b * sin_alpha) * cos_beta)) D = np.sqrt(((self._h2a['position'] - self._h2b['position']) ** 2).sum(axis=1)) S = np.maximum(1, (((1000 - D) / 10) - 50)) self._h2a['size'] = S self._h2b['size'] = np.maximum((S / 6), 1)
Update simulation
galaxy.py
update
Stephenjcl/GalaxyMapGeneration
0
python
def update(self, timestep=100000): ' ' self._particles['theta'] += (self._particles['velocity'] * timestep) P = self._particles (a, b) = (P['m_a'], P['m_b']) (theta, beta) = (P['theta'], (- P['angle'])) alpha = ((theta * math.pi) / 180.0) cos_alpha = np.cos(alpha) sin_alpha = np.sin(alpha) cos_beta = np.cos(beta) sin_beta = np.sin(beta) P['position'][(:, 0)] = (((a * cos_alpha) * cos_beta) - ((b * sin_alpha) * sin_beta)) P['position'][(:, 1)] = (((a * cos_alpha) * sin_beta) + ((b * sin_alpha) * cos_beta)) D = np.sqrt(((self._h2a['position'] - self._h2b['position']) ** 2).sum(axis=1)) S = np.maximum(1, (((1000 - D) / 10) - 50)) self._h2a['size'] = S self._h2b['size'] = np.maximum((S / 6), 1)
def update(self, timestep=100000): ' ' self._particles['theta'] += (self._particles['velocity'] * timestep) P = self._particles (a, b) = (P['m_a'], P['m_b']) (theta, beta) = (P['theta'], (- P['angle'])) alpha = ((theta * math.pi) / 180.0) cos_alpha = np.cos(alpha) sin_alpha = np.sin(alpha) cos_beta = np.cos(beta) sin_beta = np.sin(beta) P['position'][(:, 0)] = (((a * cos_alpha) * cos_beta) - ((b * sin_alpha) * sin_beta)) P['position'][(:, 1)] = (((a * cos_alpha) * sin_beta) + ((b * sin_alpha) * cos_beta)) D = np.sqrt(((self._h2a['position'] - self._h2b['position']) ** 2).sum(axis=1)) S = np.maximum(1, (((1000 - D) / 10) - 50)) self._h2a['size'] = S self._h2b['size'] = np.maximum((S / 6), 1)<|docstring|>Update simulation<|endoftext|>
02d10658a2e692ad8a248e7d4aa4615d2267ab0966fe08b2cede457753ed6d12
@staticmethod def parse_resource_usage_header(header): 'parse string like WORKER_TIME-3600=11.7/10000000;DB_QUERY_TIME-21600=4.62/2000 into dict' if (not header): return {} result = {} try: for line in header.split(';'): (right, left) = line.split('=') (metric, time_span) = right.split('-') (used, limit) = left.split('/') result.setdefault(metric, {}).setdefault(time_span, {})[limit] = float(used) except ValueError: return {} return result
parse string like WORKER_TIME-3600=11.7/10000000;DB_QUERY_TIME-21600=4.62/2000 into dict
rtbhouse_sdk/reports_api.py
parse_resource_usage_header
hzegota/rtbhouse-python-sdk
7
python
@staticmethod def parse_resource_usage_header(header): if (not header): return {} result = {} try: for line in header.split(';'): (right, left) = line.split('=') (metric, time_span) = right.split('-') (used, limit) = left.split('/') result.setdefault(metric, {}).setdefault(time_span, {})[limit] = float(used) except ValueError: return {} return result
@staticmethod def parse_resource_usage_header(header): if (not header): return {} result = {} try: for line in header.split(';'): (right, left) = line.split('=') (metric, time_span) = right.split('-') (used, limit) = left.split('/') result.setdefault(metric, {}).setdefault(time_span, {})[limit] = float(used) except ValueError: return {} return result<|docstring|>parse string like WORKER_TIME-3600=11.7/10000000;DB_QUERY_TIME-21600=4.62/2000 into dict<|endoftext|>
72a8eb4ef16ec850ece4a13034c947dbba93713fad72f7a27304f484ec253ca3
def gen_string(min_length: int=8, max_length: int=12) -> str: 'Generate a random string with letter, numbers, and symbols\n Args:\n min_length: minimum length of string\n max_length: maximum length of string\n Returns:\n str Random length string with random characters\n ' allchar = ((string.ascii_letters + string.punctuation) + string.digits) return ''.join((random.choice(allchar) for _ in range(random.randint(min_length, max_length))))
Generate a random string with letter, numbers, and symbols Args: min_length: minimum length of string max_length: maximum length of string Returns: str Random length string with random characters
tests/test_autodict.py
gen_string
WattsUp/AutoDict
0
python
def gen_string(min_length: int=8, max_length: int=12) -> str: 'Generate a random string with letter, numbers, and symbols\n Args:\n min_length: minimum length of string\n max_length: maximum length of string\n Returns:\n str Random length string with random characters\n ' allchar = ((string.ascii_letters + string.punctuation) + string.digits) return .join((random.choice(allchar) for _ in range(random.randint(min_length, max_length))))
def gen_string(min_length: int=8, max_length: int=12) -> str: 'Generate a random string with letter, numbers, and symbols\n Args:\n min_length: minimum length of string\n max_length: maximum length of string\n Returns:\n str Random length string with random characters\n ' allchar = ((string.ascii_letters + string.punctuation) + string.digits) return .join((random.choice(allchar) for _ in range(random.randint(min_length, max_length))))<|docstring|>Generate a random string with letter, numbers, and symbols Args: min_length: minimum length of string max_length: maximum length of string Returns: str Random length string with random characters<|endoftext|>
c2acc6e8d1acdbee959f8c53a58e93fb0c2230d06f8f912388895a49de85d99f
async def guild_create_middleware(self, payload: GatewayDispatch): '\n Middleware for ``on_guild_create``,\n generate the guild class that was created\n\n :param self:\n The current client.\n\n :param payload:\n The data received from the guild creation event.\n\n :return Guild:\n\n ' return ('on_guild_create', [Guild.from_dict(construct_client_dict(self, payload.data))])
Middleware for ``on_guild_create``, generate the guild class that was created :param self: The current client. :param payload: The data received from the guild creation event. :return Guild:
pincer/middleware/guild_create.py
guild_create_middleware
gillesigot/Pincer
0
python
async def guild_create_middleware(self, payload: GatewayDispatch): '\n Middleware for ``on_guild_create``,\n generate the guild class that was created\n\n :param self:\n The current client.\n\n :param payload:\n The data received from the guild creation event.\n\n :return Guild:\n\n ' return ('on_guild_create', [Guild.from_dict(construct_client_dict(self, payload.data))])
async def guild_create_middleware(self, payload: GatewayDispatch): '\n Middleware for ``on_guild_create``,\n generate the guild class that was created\n\n :param self:\n The current client.\n\n :param payload:\n The data received from the guild creation event.\n\n :return Guild:\n\n ' return ('on_guild_create', [Guild.from_dict(construct_client_dict(self, payload.data))])<|docstring|>Middleware for ``on_guild_create``, generate the guild class that was created :param self: The current client. :param payload: The data received from the guild creation event. :return Guild:<|endoftext|>
d0cc4195d5520ee2f0cf6ba5a3e6e1de6685e2fd60cd07b5ed1650861b64459a
def infer_spaces(s, words): 'Uses dynamic programming to infer the location of spaces in a string\n without spaces.' if (not (s[0] == ' ')): s = (' ' + s) s = s.lower().replace(' ', '▁') wordcost = dict(((k, log(((i + 1) * log(len(words))))) for (i, k) in enumerate(words))) maxword = max((len(x) for x in words)) def best_match(i): candidates = enumerate(reversed(cost[max(0, (i - maxword)):i])) return min((((c + wordcost.get(s[((i - k) - 1):i], 1e309)), (k + 1)) for (k, c) in candidates)) cost = [0] for i in range(1, (len(s) + 1)): (c, k) = best_match(i) cost.append(c) out = [] i = len(s) while (i > 0): (c, k) = best_match(i) assert (c == cost[i]) out.append(s[(i - k):i]) i -= k return list(reversed(out))
Uses dynamic programming to infer the location of spaces in a string without spaces.
src/util/bpe.py
infer_spaces
bjerva/cwi18
0
python
def infer_spaces(s, words): 'Uses dynamic programming to infer the location of spaces in a string\n without spaces.' if (not (s[0] == ' ')): s = (' ' + s) s = s.lower().replace(' ', '▁') wordcost = dict(((k, log(((i + 1) * log(len(words))))) for (i, k) in enumerate(words))) maxword = max((len(x) for x in words)) def best_match(i): candidates = enumerate(reversed(cost[max(0, (i - maxword)):i])) return min((((c + wordcost.get(s[((i - k) - 1):i], 1e309)), (k + 1)) for (k, c) in candidates)) cost = [0] for i in range(1, (len(s) + 1)): (c, k) = best_match(i) cost.append(c) out = [] i = len(s) while (i > 0): (c, k) = best_match(i) assert (c == cost[i]) out.append(s[(i - k):i]) i -= k return list(reversed(out))
def infer_spaces(s, words): 'Uses dynamic programming to infer the location of spaces in a string\n without spaces.' if (not (s[0] == ' ')): s = (' ' + s) s = s.lower().replace(' ', '▁') wordcost = dict(((k, log(((i + 1) * log(len(words))))) for (i, k) in enumerate(words))) maxword = max((len(x) for x in words)) def best_match(i): candidates = enumerate(reversed(cost[max(0, (i - maxword)):i])) return min((((c + wordcost.get(s[((i - k) - 1):i], 1e309)), (k + 1)) for (k, c) in candidates)) cost = [0] for i in range(1, (len(s) + 1)): (c, k) = best_match(i) cost.append(c) out = [] i = len(s) while (i > 0): (c, k) = best_match(i) assert (c == cost[i]) out.append(s[(i - k):i]) i -= k return list(reversed(out))<|docstring|>Uses dynamic programming to infer the location of spaces in a string without spaces.<|endoftext|>
255636261fe869889541e8a2bd6e803afcc005c052c785b4705f628c4f60f796
def skip_header(reader: TextIO) -> str: "Skip the header in reader and return the first real piece of data.\n >>> infile = StringIO('Example\\n# Comment\\n# Comment\\nData line\\n')\n >>> skip_header(infile)\n 'Data line\\n'\n " line = reader.readline() line = reader.readline() while line.startswith('#'): line = reader.readline() return line
Skip the header in reader and return the first real piece of data. >>> infile = StringIO('Example\n# Comment\n# Comment\nData line\n') >>> skip_header(infile) 'Data line\n'
chapter-10/exercise04.py
skip_header
krastin/pp-cs3.0
0
python
def skip_header(reader: TextIO) -> str: "Skip the header in reader and return the first real piece of data.\n >>> infile = StringIO('Example\\n# Comment\\n# Comment\\nData line\\n')\n >>> skip_header(infile)\n 'Data line\\n'\n " line = reader.readline() line = reader.readline() while line.startswith('#'): line = reader.readline() return line
def skip_header(reader: TextIO) -> str: "Skip the header in reader and return the first real piece of data.\n >>> infile = StringIO('Example\\n# Comment\\n# Comment\\nData line\\n')\n >>> skip_header(infile)\n 'Data line\\n'\n " line = reader.readline() line = reader.readline() while line.startswith('#'): line = reader.readline() return line<|docstring|>Skip the header in reader and return the first real piece of data. >>> infile = StringIO('Example\n# Comment\n# Comment\nData line\n') >>> skip_header(infile) 'Data line\n'<|endoftext|>
192f0913ade5d98b46243f9868118ee237af2538b081ac57e93a0918f2e6429a
def find_largest(line: str) -> int: "Return the largest value in line, which is a whitespace-delimited string\n of integers that each end with a '.'.\n >>> find_largest('1. 3. 2. 5. 2.')\n 5\n " largest = (- 1) for value in line.split(): v = int(value[:(- 1)]) if (v > largest): largest = v return largest
Return the largest value in line, which is a whitespace-delimited string of integers that each end with a '.'. >>> find_largest('1. 3. 2. 5. 2.') 5
chapter-10/exercise04.py
find_largest
krastin/pp-cs3.0
0
python
def find_largest(line: str) -> int: "Return the largest value in line, which is a whitespace-delimited string\n of integers that each end with a '.'.\n >>> find_largest('1. 3. 2. 5. 2.')\n 5\n " largest = (- 1) for value in line.split(): v = int(value[:(- 1)]) if (v > largest): largest = v return largest
def find_largest(line: str) -> int: "Return the largest value in line, which is a whitespace-delimited string\n of integers that each end with a '.'.\n >>> find_largest('1. 3. 2. 5. 2.')\n 5\n " largest = (- 1) for value in line.split(): v = int(value[:(- 1)]) if (v > largest): largest = v return largest<|docstring|>Return the largest value in line, which is a whitespace-delimited string of integers that each end with a '.'. >>> find_largest('1. 3. 2. 5. 2.') 5<|endoftext|>
eae10311b51dc8f0bd41238b9375d9fd7219534ae44b5d445ff017de67dee401
def process_file(reader: TextIO) -> int: "Read and process reader, which must start with a time_series header.\n Return the largest value after the header. There may be multiple pieces\n of data on each line.\n >>> infile = StringIO('Example\\n 20. 3.\\n 100. 17. 15.\\n')\n >>> process_file(infile)\n 100\n " line = skip_header(reader).strip() largest = find_largest(line) for line in reader: large = find_largest(line) if (large > largest): largest = large return largest
Read and process reader, which must start with a time_series header. Return the largest value after the header. There may be multiple pieces of data on each line. >>> infile = StringIO('Example\n 20. 3.\n 100. 17. 15.\n') >>> process_file(infile) 100
chapter-10/exercise04.py
process_file
krastin/pp-cs3.0
0
python
def process_file(reader: TextIO) -> int: "Read and process reader, which must start with a time_series header.\n Return the largest value after the header. There may be multiple pieces\n of data on each line.\n >>> infile = StringIO('Example\\n 20. 3.\\n 100. 17. 15.\\n')\n >>> process_file(infile)\n 100\n " line = skip_header(reader).strip() largest = find_largest(line) for line in reader: large = find_largest(line) if (large > largest): largest = large return largest
def process_file(reader: TextIO) -> int: "Read and process reader, which must start with a time_series header.\n Return the largest value after the header. There may be multiple pieces\n of data on each line.\n >>> infile = StringIO('Example\\n 20. 3.\\n 100. 17. 15.\\n')\n >>> process_file(infile)\n 100\n " line = skip_header(reader).strip() largest = find_largest(line) for line in reader: large = find_largest(line) if (large > largest): largest = large return largest<|docstring|>Read and process reader, which must start with a time_series header. Return the largest value after the header. There may be multiple pieces of data on each line. >>> infile = StringIO('Example\n 20. 3.\n 100. 17. 15.\n') >>> process_file(infile) 100<|endoftext|>
b726b7c7d330906d9713448fdf57769b4cc1a43a9f6b8d735af4e985efe8f3b1
def __init__(self, temboo_session): '\n Create a new instance of the UnfollowUser Choreo. A TembooSession object, containing a valid\n set of Temboo credentials, must be supplied.\n ' super(UnfollowUser, self).__init__(temboo_session, '/Library/Tumblr/User/UnfollowUser')
Create a new instance of the UnfollowUser Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied.
temboo/core/Library/Tumblr/User/UnfollowUser.py
__init__
jordanemedlock/psychtruths
7
python
def __init__(self, temboo_session): '\n Create a new instance of the UnfollowUser Choreo. A TembooSession object, containing a valid\n set of Temboo credentials, must be supplied.\n ' super(UnfollowUser, self).__init__(temboo_session, '/Library/Tumblr/User/UnfollowUser')
def __init__(self, temboo_session): '\n Create a new instance of the UnfollowUser Choreo. A TembooSession object, containing a valid\n set of Temboo credentials, must be supplied.\n ' super(UnfollowUser, self).__init__(temboo_session, '/Library/Tumblr/User/UnfollowUser')<|docstring|>Create a new instance of the UnfollowUser Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied.<|endoftext|>
faa1eb4e6910a605bbc16f4b984696da07cd1f8407a53abf3061f2221f299d6d
def set_URL(self, value): '\n Set the value of the URL input for this Choreo. ((required, string) The URL of the user / blog to follow, without http:. Ex: username.tumblr.com)\n ' super(UnfollowUserInputSet, self)._set_input('URL', value)
Set the value of the URL input for this Choreo. ((required, string) The URL of the user / blog to follow, without http:. Ex: username.tumblr.com)
temboo/core/Library/Tumblr/User/UnfollowUser.py
set_URL
jordanemedlock/psychtruths
7
python
def set_URL(self, value): '\n \n ' super(UnfollowUserInputSet, self)._set_input('URL', value)
def set_URL(self, value): '\n \n ' super(UnfollowUserInputSet, self)._set_input('URL', value)<|docstring|>Set the value of the URL input for this Choreo. ((required, string) The URL of the user / blog to follow, without http:. Ex: username.tumblr.com)<|endoftext|>
90c8e598438068c8c1e015a056c244c82a09ae7b2f743e8afcc62cec50c10db7
def set_APIKey(self, value): '\n Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Tumblr (AKA the OAuth Consumer Key).)\n ' super(UnfollowUserInputSet, self)._set_input('APIKey', value)
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Tumblr (AKA the OAuth Consumer Key).)
temboo/core/Library/Tumblr/User/UnfollowUser.py
set_APIKey
jordanemedlock/psychtruths
7
python
def set_APIKey(self, value): '\n \n ' super(UnfollowUserInputSet, self)._set_input('APIKey', value)
def set_APIKey(self, value): '\n \n ' super(UnfollowUserInputSet, self)._set_input('APIKey', value)<|docstring|>Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Tumblr (AKA the OAuth Consumer Key).)<|endoftext|>