id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8091196 | """render sample comand line tool.
This is invoked interally by ``two4two.blender.render``.
"""
import json
import os
from pathlib import Path
import sys
import bpy
# the two4two package is not visible for the blender python.
# we therfore add the package directory to the path.
blend_dir = os.path.dirname(bpy.data.filepath)
package_base_dir = str(Path(__file__).parents[2])
if package_base_dir not in sys.path:
sys.path.append(package_base_dir)
from two4two._blender.scene import Scene # noqa: E402
from two4two.scene_parameters import SceneParameters # noqa: E402
def _render_files(param_file: str, save_location: str, save_blender_file: str):
with open(param_file) as fparam:
for line in fparam.readlines():
params = SceneParameters.load(json.loads(line))
scene = Scene(params)
image_fname = os.path.join(save_location, params.filename)
mask_fname = os.path.join(save_location, params.mask_filename)
scene.render(image_fname, mask_fname, )
if save_blender_file == "True":
scene.save_blender_file(
os.path.join(save_location, f"{params.id}.blender"))
if __name__ == '__main__':
try:
# starts coverage if tests are running
# otherwise nothing happens
import coverage
coverage.process_startup()
except ImportError:
pass
_render_files(sys.argv[-3], sys.argv[-2], sys.argv[-1])
| StarcoderdataPython |
6635275 | <reponame>hldh214/libcloud
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
VMware vCloud driver.
"""
import copy
import sys
import re
import base64
import os
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlencode
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import b
from libcloud.utils.py3 import next
from libcloud.utils.py3 import ET
urlparse = urlparse.urlparse
import time
from xml.parsers.expat import ExpatError
from libcloud.common.base import XmlResponse, ConnectionUserAndKey
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.compute.providers import Provider
from libcloud.compute.types import NodeState
from libcloud.compute.base import Node, NodeDriver, NodeLocation
from libcloud.compute.base import NodeSize, NodeImage
"""
From vcloud api "The VirtualQuantity element defines the number of MB
of memory. This should be either 512 or a multiple of 1024 (1 GB)."
"""
VIRTUAL_MEMORY_VALS = [512] + [1024 * i for i in range(1, 9)]
# Default timeout (in seconds) for long running tasks
DEFAULT_TASK_COMPLETION_TIMEOUT = 600
DEFAULT_API_VERSION = '0.8'
"""
Valid vCloud API v1.5 input values.
"""
VIRTUAL_CPU_VALS_1_5 = [i for i in range(1, 9)]
FENCE_MODE_VALS_1_5 = ['bridged', 'isolated', 'natRouted']
IP_MODE_VALS_1_5 = ['POOL', 'DHCP', 'MANUAL', 'NONE']
def fixxpath(root, xpath):
"""ElementTree wants namespaces in its xpaths, so here we add them."""
namespace, root_tag = root.tag[1:].split("}", 1)
fixed_xpath = "/".join(["{%s}%s" % (namespace, e)
for e in xpath.split("/")])
return fixed_xpath
def get_url_path(url):
return urlparse(url.strip()).path
class Vdc(object):
"""
Virtual datacenter (vDC) representation
"""
def __init__(self, id, name, driver, allocation_model=None, cpu=None,
memory=None, storage=None):
self.id = id
self.name = name
self.driver = driver
self.allocation_model = allocation_model
self.cpu = cpu
self.memory = memory
self.storage = storage
def __repr__(self):
return ('<Vdc: id=%s, name=%s, driver=%s ...>'
% (self.id, self.name, self.driver.name))
class Capacity(object):
"""
Represents CPU, Memory or Storage capacity of vDC.
"""
def __init__(self, limit, used, units):
self.limit = limit
self.used = used
self.units = units
def __repr__(self):
return ('<Capacity: limit=%s, used=%s, units=%s>'
% (self.limit, self.used, self.units))
class ControlAccess(object):
"""
Represents control access settings of a node
"""
class AccessLevel(object):
READ_ONLY = 'ReadOnly'
CHANGE = 'Change'
FULL_CONTROL = 'FullControl'
def __init__(self, node, everyone_access_level, subjects=None):
self.node = node
self.everyone_access_level = everyone_access_level
if not subjects:
subjects = []
self.subjects = subjects
def __repr__(self):
return ('<ControlAccess: node=%s, everyone_access_level=%s, '
'subjects=%s>'
% (self.node, self.everyone_access_level, self.subjects))
class Subject(object):
"""
User or group subject
"""
def __init__(self, type, name, access_level, id=None):
self.type = type
self.name = name
self.access_level = access_level
self.id = id
def __repr__(self):
return ('<Subject: type=%s, name=%s, access_level=%s>'
% (self.type, self.name, self.access_level))
class InstantiateVAppXML(object):
def __init__(self, name, template, net_href, cpus, memory,
password=<PASSWORD>, row=None, group=None):
self.name = name
self.template = template
self.net_href = net_href
self.cpus = cpus
self.memory = memory
self.password = password
self.row = row
self.group = group
self._build_xmltree()
def tostring(self):
return ET.tostring(self.root)
def _build_xmltree(self):
self.root = self._make_instantiation_root()
self._add_vapp_template(self.root)
instantiation_params = ET.SubElement(self.root,
"InstantiationParams")
# product and virtual hardware
self._make_product_section(instantiation_params)
self._make_virtual_hardware(instantiation_params)
network_config_section = ET.SubElement(instantiation_params,
"NetworkConfigSection")
network_config = ET.SubElement(network_config_section,
"NetworkConfig")
self._add_network_association(network_config)
def _make_instantiation_root(self):
return ET.Element(
"InstantiateVAppTemplateParams",
{'name': self.name,
'xml:lang': 'en',
'xmlns': "http://www.vmware.com/vcloud/v0.8",
'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"}
)
def _add_vapp_template(self, parent):
return ET.SubElement(
parent,
"VAppTemplate",
{'href': self.template}
)
def _make_product_section(self, parent):
prod_section = ET.SubElement(
parent,
"ProductSection",
{'xmlns:q1': "http://www.vmware.com/vcloud/v0.8",
'xmlns:ovf': "http://schemas.dmtf.org/ovf/envelope/1"}
)
if self.password:
self._add_property(prod_section, 'password', self.password)
if self.row:
self._add_property(prod_section, 'row', self.row)
if self.group:
self._add_property(prod_section, 'group', self.group)
return prod_section
def _add_property(self, parent, ovfkey, ovfvalue):
return ET.SubElement(
parent,
"Property",
{'xmlns': 'http://schemas.dmtf.org/ovf/envelope/1',
'ovf:key': ovfkey,
'ovf:value': ovfvalue}
)
def _make_virtual_hardware(self, parent):
vh = ET.SubElement(
parent,
"VirtualHardwareSection",
{'xmlns:q1': "http://www.vmware.com/vcloud/v0.8"}
)
self._add_cpu(vh)
self._add_memory(vh)
return vh
def _add_cpu(self, parent):
cpu_item = ET.SubElement(
parent,
"Item",
{'xmlns': "http://schemas.dmtf.org/ovf/envelope/1"}
)
self._add_instance_id(cpu_item, '1')
self._add_resource_type(cpu_item, '3')
self._add_virtual_quantity(cpu_item, self.cpus)
return cpu_item
def _add_memory(self, parent):
mem_item = ET.SubElement(
parent,
'Item',
{'xmlns': "http://schemas.dmtf.org/ovf/envelope/1"}
)
self._add_instance_id(mem_item, '2')
self._add_resource_type(mem_item, '4')
self._add_virtual_quantity(mem_item, self.memory)
return mem_item
def _add_instance_id(self, parent, id):
elm = ET.SubElement(
parent,
'InstanceID',
{'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/'
'CIM_ResourceAllocationSettingData'}
)
elm.text = id
return elm
def _add_resource_type(self, parent, type):
elm = ET.SubElement(
parent,
'ResourceType',
{'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/'
'CIM_ResourceAllocationSettingData'}
)
elm.text = type
return elm
def _add_virtual_quantity(self, parent, amount):
elm = ET.SubElement(
parent,
'VirtualQuantity',
{'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/'
'CIM_ResourceAllocationSettingData'}
)
elm.text = amount
return elm
def _add_network_association(self, parent):
return ET.SubElement(
parent,
'NetworkAssociation',
{'href': self.net_href}
)
class VCloudResponse(XmlResponse):
def success(self):
return self.status in (httplib.OK, httplib.CREATED,
httplib.NO_CONTENT, httplib.ACCEPTED)
class VCloudConnection(ConnectionUserAndKey):
"""
Connection class for the vCloud driver
"""
responseCls = VCloudResponse
token = None
host = None
def request(self, *args, **kwargs):
self._get_auth_token()
return super(VCloudConnection, self).request(*args, **kwargs)
def check_org(self):
# the only way to get our org is by logging in.
self._get_auth_token()
def _get_auth_headers(self):
"""Some providers need different headers than others"""
return {
'Authorization': "Basic %s" % base64.b64encode(
b('%s:%s' % (self.user_id, self.key))).decode('utf-8'),
'Content-Length': '0',
'Accept': 'application/*+xml'
}
def _get_auth_token(self):
if not self.token:
self.connection.request(method='POST', url='/api/v0.8/login',
headers=self._get_auth_headers())
resp = self.connection.getresponse()
headers = resp.headers
body = ET.XML(resp.text)
try:
self.token = headers['set-cookie']
except KeyError:
raise InvalidCredsError()
self.driver.org = get_url_path(
body.find(fixxpath(body, 'Org')).get('href')
)
def add_default_headers(self, headers):
headers['Cookie'] = self.token
headers['Accept'] = 'application/*+xml'
return headers
class VCloudNodeDriver(NodeDriver):
"""
vCloud node driver
"""
type = Provider.VCLOUD
name = 'vCloud'
website = 'http://www.vmware.com/products/vcloud/'
connectionCls = VCloudConnection
org = None
_vdcs = None
NODE_STATE_MAP = {'0': NodeState.PENDING,
'1': NodeState.PENDING,
'2': NodeState.PENDING,
'3': NodeState.PENDING,
'4': NodeState.RUNNING}
features = {'create_node': ['password']}
def __new__(cls, key, secret=None, secure=True, host=None, port=None,
api_version=DEFAULT_API_VERSION, **kwargs):
if cls is VCloudNodeDriver:
if api_version == '0.8':
cls = VCloudNodeDriver
elif api_version == '1.5':
cls = VCloud_1_5_NodeDriver
elif api_version == '5.1':
cls = VCloud_5_1_NodeDriver
elif api_version == '5.5':
cls = VCloud_5_5_NodeDriver
else:
raise NotImplementedError(
"No VCloudNodeDriver found for API version %s" %
(api_version))
return super(VCloudNodeDriver, cls).__new__(cls)
@property
def vdcs(self):
"""
vCloud virtual data centers (vDCs).
:return: list of vDC objects
:rtype: ``list`` of :class:`Vdc`
"""
if not self._vdcs:
self.connection.check_org() # make sure the org is set.
res = self.connection.request(self.org)
self._vdcs = [
self._to_vdc(
self.connection.request(get_url_path(i.get('href'))).object
)
for i in res.object.findall(fixxpath(res.object, "Link"))
if i.get('type') == 'application/vnd.vmware.vcloud.vdc+xml'
]
return self._vdcs
def _to_vdc(self, vdc_elm):
return Vdc(vdc_elm.get('href'), vdc_elm.get('name'), self)
def _get_vdc(self, vdc_name):
vdc = None
if not vdc_name:
# Return the first organisation VDC found
vdc = self.vdcs[0]
else:
for v in self.vdcs:
if v.name == vdc_name:
vdc = v
if vdc is None:
raise ValueError('%s virtual data centre could not be found',
vdc_name)
return vdc
@property
def networks(self):
networks = []
for vdc in self.vdcs:
res = self.connection.request(get_url_path(vdc.id)).object
networks.extend(
[network
for network in res.findall(
fixxpath(res, 'AvailableNetworks/Network')
)]
)
return networks
def _to_image(self, image):
image = NodeImage(id=image.get('href'),
name=image.get('name'),
driver=self.connection.driver)
return image
def _to_node(self, elm):
state = self.NODE_STATE_MAP[elm.get('status')]
name = elm.get('name')
public_ips = []
private_ips = []
# Following code to find private IPs works for Terremark
connections = elm.findall('%s/%s' % (
'{http://schemas.dmtf.org/ovf/envelope/1}NetworkConnectionSection',
fixxpath(elm, 'NetworkConnection'))
)
if not connections:
connections = elm.findall(
fixxpath(
elm,
'Children/Vm/NetworkConnectionSection/NetworkConnection'))
for connection in connections:
ips = [ip.text
for ip
in connection.findall(fixxpath(elm, "IpAddress"))]
if connection.get('Network') == 'Internal':
private_ips.extend(ips)
else:
public_ips.extend(ips)
node = Node(id=elm.get('href'),
name=name,
state=state,
public_ips=public_ips,
private_ips=private_ips,
driver=self.connection.driver)
return node
def _get_catalog_hrefs(self):
res = self.connection.request(self.org)
catalogs = [
i.get('href')
for i in res.object.findall(fixxpath(res.object, "Link"))
if i.get('type') == 'application/vnd.vmware.vcloud.catalog+xml'
]
return catalogs
def _wait_for_task_completion(self, task_href,
timeout=DEFAULT_TASK_COMPLETION_TIMEOUT):
start_time = time.time()
res = self.connection.request(get_url_path(task_href))
status = res.object.get('status')
while status != 'success':
if status == 'error':
# Get error reason from the response body
error_elem = res.object.find(fixxpath(res.object, 'Error'))
error_msg = "Unknown error"
if error_elem is not None:
error_msg = error_elem.get('message')
raise Exception("Error status returned by task %s.: %s"
% (task_href, error_msg))
if status == 'canceled':
raise Exception("Canceled status returned by task %s."
% task_href)
if (time.time() - start_time >= timeout):
raise Exception("Timeout (%s sec) while waiting for task %s."
% (timeout, task_href))
time.sleep(5)
res = self.connection.request(get_url_path(task_href))
status = res.object.get('status')
def destroy_node(self, node):
node_path = get_url_path(node.id)
# blindly poweroff node, it will throw an exception if already off
try:
res = self.connection.request('%s/power/action/poweroff'
% node_path,
method='POST')
self._wait_for_task_completion(res.object.get('href'))
except Exception:
pass
try:
res = self.connection.request('%s/action/undeploy' % node_path,
method='POST')
self._wait_for_task_completion(res.object.get('href'))
except ExpatError:
# The undeploy response is malformed XML atm.
# We can remove this whent he providers fix the problem.
pass
except Exception:
# Some vendors don't implement undeploy at all yet,
# so catch this and move on.
pass
res = self.connection.request(node_path, method='DELETE')
return res.status == httplib.ACCEPTED
def reboot_node(self, node):
res = self.connection.request('%s/power/action/reset'
% get_url_path(node.id),
method='POST')
return res.status in [httplib.ACCEPTED, httplib.NO_CONTENT]
def list_nodes(self):
return self.ex_list_nodes()
def ex_list_nodes(self, vdcs=None):
"""
List all nodes across all vDCs. Using 'vdcs' you can specify which vDCs
should be queried.
:param vdcs: None, vDC or a list of vDCs to query. If None all vDCs
will be queried.
:type vdcs: :class:`Vdc`
:rtype: ``list`` of :class:`Node`
"""
if not vdcs:
vdcs = self.vdcs
if not isinstance(vdcs, (list, tuple)):
vdcs = [vdcs]
nodes = []
for vdc in vdcs:
res = self.connection.request(get_url_path(vdc.id))
elms = res.object.findall(fixxpath(
res.object, "ResourceEntities/ResourceEntity")
)
vapps = [
(i.get('name'), i.get('href'))
for i in elms if
i.get('type') == 'application/vnd.vmware.vcloud.vApp+xml' and
i.get('name')
]
for vapp_name, vapp_href in vapps:
try:
res = self.connection.request(
get_url_path(vapp_href),
headers={'Content-Type':
'application/vnd.vmware.vcloud.vApp+xml'}
)
nodes.append(self._to_node(res.object))
except Exception:
# The vApp was probably removed since the previous vDC
# query, ignore
e = sys.exc_info()[1]
if not (e.args[0].tag.endswith('Error') and
e.args[0].get('minorErrorCode') ==
'ACCESS_TO_RESOURCE_IS_FORBIDDEN'):
raise
return nodes
def _to_size(self, ram):
ns = NodeSize(
id=None,
name="%s Ram" % ram,
ram=ram,
disk=None,
bandwidth=None,
price=None,
driver=self.connection.driver
)
return ns
def list_sizes(self, location=None):
sizes = [self._to_size(i) for i in VIRTUAL_MEMORY_VALS]
return sizes
def _get_catalogitems_hrefs(self, catalog):
"""Given a catalog href returns contained catalog item hrefs"""
res = self.connection.request(
get_url_path(catalog),
headers={
'Content-Type': 'application/vnd.vmware.vcloud.catalog+xml'
}
).object
cat_items = res.findall(fixxpath(res, "CatalogItems/CatalogItem"))
cat_item_hrefs = [i.get('href')
for i in cat_items
if i.get('type') ==
'application/vnd.vmware.vcloud.catalogItem+xml']
return cat_item_hrefs
def _get_catalogitem(self, catalog_item):
"""Given a catalog item href returns elementree"""
res = self.connection.request(
get_url_path(catalog_item),
headers={
'Content-Type': 'application/vnd.vmware.vcloud.catalogItem+xml'
}
).object
return res
def list_images(self, location=None):
images = []
for vdc in self.vdcs:
res = self.connection.request(get_url_path(vdc.id)).object
res_ents = res.findall(fixxpath(
res, "ResourceEntities/ResourceEntity")
)
images += [
self._to_image(i)
for i in res_ents
if i.get('type') ==
'application/vnd.vmware.vcloud.vAppTemplate+xml'
]
for catalog in self._get_catalog_hrefs():
for cat_item in self._get_catalogitems_hrefs(catalog):
res = self._get_catalogitem(cat_item)
res_ents = res.findall(fixxpath(res, 'Entity'))
images += [
self._to_image(i)
for i in res_ents
if i.get('type') ==
'application/vnd.vmware.vcloud.vAppTemplate+xml'
]
def idfun(image):
return image.id
return self._uniquer(images, idfun)
def _uniquer(self, seq, idfun=None):
if idfun is None:
def idfun(x):
return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen:
continue
seen[marker] = 1
result.append(item)
return result
def create_node(self, **kwargs):
"""
Creates and returns node.
:keyword ex_network: link to a "Network" e.g.,
``https://services.vcloudexpress...``
:type ex_network: ``str``
:keyword ex_vdc: Name of organisation's virtual data
center where vApp VMs will be deployed.
:type ex_vdc: ``str``
:keyword ex_cpus: number of virtual cpus (limit depends on provider)
:type ex_cpus: ``int``
:type ex_row: ``str``
:type ex_group: ``str``
"""
name = kwargs['name']
image = kwargs['image']
size = kwargs['size']
# Some providers don't require a network link
try:
network = kwargs.get('ex_network', self.networks[0].get('href'))
except IndexError:
network = ''
password = <PASSWORD>
auth = self._get_and_check_auth(kwargs.get('auth'))
password = <PASSWORD>
instantiate_xml = InstantiateVAppXML(
name=name,
template=image.id,
net_href=network,
cpus=str(kwargs.get('ex_cpus', 1)),
memory=str(size.ram),
password=password,
row=kwargs.get('ex_row', None),
group=kwargs.get('ex_group', None)
)
vdc = self._get_vdc(kwargs.get('ex_vdc', None))
# Instantiate VM and get identifier.
content_type = \
'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
res = self.connection.request(
'%s/action/instantiateVAppTemplate' % get_url_path(vdc.id),
data=instantiate_xml.tostring(),
method='POST',
headers={'Content-Type': content_type}
)
vapp_path = get_url_path(res.object.get('href'))
# Deploy the VM from the identifier.
res = self.connection.request('%s/action/deploy' % vapp_path,
method='POST')
self._wait_for_task_completion(res.object.get('href'))
# Power on the VM.
res = self.connection.request('%s/power/action/powerOn' % vapp_path,
method='POST')
res = self.connection.request(vapp_path)
node = self._to_node(res.object)
if getattr(auth, "generated", False):
node.extra['password'] = auth.password
return node
class HostingComConnection(VCloudConnection):
"""
vCloud connection subclass for Hosting.com
"""
host = "vcloud.safesecureweb.com"
def _get_auth_headers(self):
"""hosting.com doesn't follow the standard vCloud authentication API"""
return {
'Authentication': base64.b64encode(b('%s:%s' % (self.user_id,
self.key))),
'Content-Length': '0'
}
class HostingComDriver(VCloudNodeDriver):
"""
vCloud node driver for Hosting.com
"""
connectionCls = HostingComConnection
class TerremarkConnection(VCloudConnection):
"""
vCloud connection subclass for Terremark
"""
host = "services.vcloudexpress.terremark.com"
class TerremarkDriver(VCloudNodeDriver):
"""
vCloud node driver for Terremark
"""
connectionCls = TerremarkConnection
def list_locations(self):
return [NodeLocation(0, "Terremark Texas", 'US', self)]
class VCloud_1_5_Connection(VCloudConnection):
def _get_auth_headers(self):
"""Compatibility for using v1.5 API under vCloud Director 5.1"""
return {
'Authorization': "Basic %s" % base64.b64encode(
b('%s:%s' % (self.user_id, self.key))).decode('utf-8'),
'Content-Length': '0',
'Accept': 'application/*+xml;version=1.5'
}
def _get_auth_token(self):
if not self.token:
# Log In
self.connection.request(method='POST', url='/api/sessions',
headers=self._get_auth_headers())
resp = self.connection.getresponse()
headers = resp.headers
# Set authorization token
try:
self.token = headers['x-vcloud-authorization']
except KeyError:
raise InvalidCredsError()
# Get the URL of the Organization
body = ET.XML(resp.text)
self.org_name = body.get('org')
org_list_url = get_url_path(
next((link for link in body.findall(fixxpath(body, 'Link'))
if link.get('type') ==
'application/vnd.vmware.vcloud.orgList+xml')).get('href')
)
if self.proxy_url is not None:
self.connection.set_http_proxy(self.proxy_url)
self.connection.request(method='GET', url=org_list_url,
headers=self.add_default_headers({}))
body = ET.XML(self.connection.getresponse().text)
self.driver.org = get_url_path(
next((org for org in body.findall(fixxpath(body, 'Org'))
if org.get('name') == self.org_name)).get('href')
)
def add_default_headers(self, headers):
headers['Accept'] = 'application/*+xml;version=1.5'
headers['x-vcloud-authorization'] = self.token
return headers
class VCloud_5_5_Connection(VCloud_1_5_Connection):
def add_default_headers(self, headers):
headers['Accept'] = 'application/*+xml;version=5.5'
headers['x-vcloud-authorization'] = self.token
return headers
class Instantiate_1_5_VAppXML(object):
def __init__(self, name, template, network, vm_network=None,
vm_fence=None):
self.name = name
self.template = template
self.network = network
self.vm_network = vm_network
self.vm_fence = vm_fence
self._build_xmltree()
def tostring(self):
return ET.tostring(self.root)
def _build_xmltree(self):
self.root = self._make_instantiation_root()
if self.network is not None:
instantionation_params = ET.SubElement(self.root,
'InstantiationParams')
network_config_section = ET.SubElement(instantionation_params,
'NetworkConfigSection')
ET.SubElement(
network_config_section,
'Info',
{'xmlns': 'http://schemas.dmtf.org/ovf/envelope/1'}
)
network_config = ET.SubElement(network_config_section,
'NetworkConfig')
self._add_network_association(network_config)
self._add_vapp_template(self.root)
def _make_instantiation_root(self):
return ET.Element(
'InstantiateVAppTemplateParams',
{'name': self.name,
'deploy': 'false',
'powerOn': 'false',
'xml:lang': 'en',
'xmlns': 'http://www.vmware.com/vcloud/v1.5',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance'}
)
def _add_vapp_template(self, parent):
return ET.SubElement(
parent,
'Source',
{'href': self.template}
)
def _add_network_association(self, parent):
if self.vm_network is None:
# Don't set a custom vApp VM network name
parent.set('networkName', self.network.get('name'))
else:
# Set a custom vApp VM network name
parent.set('networkName', self.vm_network)
configuration = ET.SubElement(parent, 'Configuration')
ET.SubElement(configuration, 'ParentNetwork',
{'href': self.network.get('href')})
if self.vm_fence is None:
fencemode = self.network.find(fixxpath(self.network,
'Configuration/FenceMode')).text
else:
fencemode = self.vm_fence
ET.SubElement(configuration, 'FenceMode').text = fencemode
class VCloud_1_5_NodeDriver(VCloudNodeDriver):
connectionCls = VCloud_1_5_Connection
# Based on
# http://pubs.vmware.com/vcloud-api-1-5/api_prog/
# GUID-843BE3AD-5EF6-4442-B864-BCAE44A51867.html
NODE_STATE_MAP = {'-1': NodeState.UNKNOWN,
'0': NodeState.PENDING,
'1': NodeState.PENDING,
'2': NodeState.PENDING,
'3': NodeState.PENDING,
'4': NodeState.RUNNING,
'5': NodeState.RUNNING,
'6': NodeState.UNKNOWN,
'7': NodeState.UNKNOWN,
'8': NodeState.STOPPED,
'9': NodeState.UNKNOWN,
'10': NodeState.UNKNOWN}
def list_locations(self):
return [NodeLocation(id=self.connection.host,
name=self.connection.host, country="N/A", driver=self)]
def ex_find_node(self, node_name, vdcs=None):
"""
Searches for node across specified vDCs. This is more effective than
querying all nodes to get a single instance.
:param node_name: The name of the node to search for
:type node_name: ``str``
:param vdcs: None, vDC or a list of vDCs to search in. If None all vDCs
will be searched.
:type vdcs: :class:`Vdc`
:return: node instance or None if not found
:rtype: :class:`Node` or ``None``
"""
if not vdcs:
vdcs = self.vdcs
if not getattr(vdcs, '__iter__', False):
vdcs = [vdcs]
for vdc in vdcs:
res = self.connection.request(get_url_path(vdc.id))
xpath = fixxpath(res.object, "ResourceEntities/ResourceEntity")
entity_elems = res.object.findall(xpath)
for entity_elem in entity_elems:
if entity_elem.get('type') == \
'application/vnd.vmware.vcloud.vApp+xml' and \
entity_elem.get('name') == node_name:
path = get_url_path(entity_elem.get('href'))
headers = {'Content-Type':
'application/vnd.vmware.vcloud.vApp+xml'}
res = self.connection.request(path,
headers=headers)
return self._to_node(res.object)
return None
def destroy_node(self, node):
try:
self.ex_undeploy_node(node)
except Exception:
# Some vendors don't implement undeploy at all yet,
# so catch this and move on.
pass
res = self.connection.request(get_url_path(node.id), method='DELETE')
return res.status == httplib.ACCEPTED
def reboot_node(self, node):
res = self.connection.request('%s/power/action/reset'
% get_url_path(node.id),
method='POST')
if res.status in [httplib.ACCEPTED, httplib.NO_CONTENT]:
self._wait_for_task_completion(res.object.get('href'))
return True
else:
return False
def ex_deploy_node(self, node, ex_force_customization=False):
"""
Deploys existing node. Equal to vApp "start" operation.
:param node: The node to be deployed
:type node: :class:`Node`
:param ex_force_customization: Used to specify whether to force
customization on deployment,
if not set default value is False.
:type ex_force_customization: ``bool``
:rtype: :class:`Node`
"""
if ex_force_customization:
vms = self._get_vm_elements(node.id)
for vm in vms:
self._ex_deploy_node_or_vm(vm.get('href'),
ex_force_customization=True)
else:
self._ex_deploy_node_or_vm(node.id)
res = self.connection.request(get_url_path(node.id))
return self._to_node(res.object)
def _ex_deploy_node_or_vm(self, vapp_or_vm_path,
ex_force_customization=False):
data = {'powerOn': 'true',
'forceCustomization': str(ex_force_customization).lower(),
'xmlns': 'http://www.vmware.com/vcloud/v1.5'}
deploy_xml = ET.Element('DeployVAppParams', data)
path = get_url_path(vapp_or_vm_path)
headers = {
'Content-Type':
'application/vnd.vmware.vcloud.deployVAppParams+xml'
}
res = self.connection.request('%s/action/deploy' % path,
data=ET.tostring(deploy_xml),
method='POST',
headers=headers)
self._wait_for_task_completion(res.object.get('href'))
def ex_undeploy_node(self, node):
"""
Undeploys existing node. Equal to vApp "stop" operation.
:param node: The node to be deployed
:type node: :class:`Node`
:rtype: :class:`Node`
"""
data = {'xmlns': 'http://www.vmware.com/vcloud/v1.5'}
undeploy_xml = ET.Element('UndeployVAppParams', data)
undeploy_power_action_xml = ET.SubElement(undeploy_xml,
'UndeployPowerAction')
undeploy_power_action_xml.text = 'shutdown'
headers = {
'Content-Type':
'application/vnd.vmware.vcloud.undeployVAppParams+xml'
}
try:
res = self.connection.request(
'%s/action/undeploy' % get_url_path(node.id),
data=ET.tostring(undeploy_xml),
method='POST',
headers=headers)
self._wait_for_task_completion(res.object.get('href'))
except Exception:
undeploy_power_action_xml.text = 'powerOff'
res = self.connection.request(
'%s/action/undeploy' % get_url_path(node.id),
data=ET.tostring(undeploy_xml),
method='POST',
headers=headers)
self._wait_for_task_completion(res.object.get('href'))
res = self.connection.request(get_url_path(node.id))
return self._to_node(res.object)
def ex_power_off_node(self, node):
"""
Powers on all VMs under specified node. VMs need to be This operation
is allowed only when the vApp/VM is powered on.
:param node: The node to be powered off
:type node: :class:`Node`
:rtype: :class:`Node`
"""
return self._perform_power_operation(node, 'powerOff')
def ex_power_on_node(self, node):
"""
Powers on all VMs under specified node. This operation is allowed
only when the vApp/VM is powered off or suspended.
:param node: The node to be powered on
:type node: :class:`Node`
:rtype: :class:`Node`
"""
return self._perform_power_operation(node, 'powerOn')
def ex_shutdown_node(self, node):
"""
Shutdowns all VMs under specified node. This operation is allowed only
when the vApp/VM is powered on.
:param node: The node to be shut down
:type node: :class:`Node`
:rtype: :class:`Node`
"""
return self._perform_power_operation(node, 'shutdown')
def ex_suspend_node(self, node):
"""
Suspends all VMs under specified node. This operation is allowed only
when the vApp/VM is powered on.
:param node: The node to be suspended
:type node: :class:`Node`
:rtype: :class:`Node`
"""
return self._perform_power_operation(node, 'suspend')
def _perform_power_operation(self, node, operation):
res = self.connection.request(
'%s/power/action/%s' % (get_url_path(node.id), operation),
method='POST')
self._wait_for_task_completion(res.object.get('href'))
res = self.connection.request(get_url_path(node.id))
return self._to_node(res.object)
def ex_get_control_access(self, node):
"""
Returns the control access settings for specified node.
:param node: node to get the control access for
:type node: :class:`Node`
:rtype: :class:`ControlAccess`
"""
res = self.connection.request(
'%s/controlAccess' % get_url_path(node.id))
everyone_access_level = None
is_shared_elem = res.object.find(
fixxpath(res.object, "IsSharedToEveryone"))
if is_shared_elem is not None and is_shared_elem.text == 'true':
everyone_access_level = res.object.find(
fixxpath(res.object, "EveryoneAccessLevel")).text
# Parse all subjects
subjects = []
xpath = fixxpath(res.object, "AccessSettings/AccessSetting")
for elem in res.object.findall(xpath):
access_level = elem.find(fixxpath(res.object, "AccessLevel")).text
subject_elem = elem.find(fixxpath(res.object, "Subject"))
if subject_elem.get('type') == \
'application/vnd.vmware.admin.group+xml':
subj_type = 'group'
else:
subj_type = 'user'
path = get_url_path(subject_elem.get('href'))
res = self.connection.request(path)
name = res.object.get('name')
subject = Subject(type=subj_type,
name=name,
access_level=access_level,
id=subject_elem.get('href'))
subjects.append(subject)
return ControlAccess(node, everyone_access_level, subjects)
def ex_set_control_access(self, node, control_access):
"""
Sets control access for the specified node.
:param node: node
:type node: :class:`Node`
:param control_access: control access settings
:type control_access: :class:`ControlAccess`
:rtype: ``None``
"""
xml = ET.Element('ControlAccessParams',
{'xmlns': 'http://www.vmware.com/vcloud/v1.5'})
shared_to_everyone = ET.SubElement(xml, 'IsSharedToEveryone')
if control_access.everyone_access_level:
shared_to_everyone.text = 'true'
everyone_access_level = ET.SubElement(xml, 'EveryoneAccessLevel')
everyone_access_level.text = control_access.everyone_access_level
else:
shared_to_everyone.text = 'false'
# Set subjects
if control_access.subjects:
access_settings_elem = ET.SubElement(xml, 'AccessSettings')
for subject in control_access.subjects:
setting = ET.SubElement(access_settings_elem, 'AccessSetting')
if subject.id:
href = subject.id
else:
res = self.ex_query(type=subject.type, filter='name==' +
subject.name)
if not res:
raise LibcloudError('Specified subject "%s %s" not found '
% (subject.type, subject.name))
href = res[0]['href']
ET.SubElement(setting, 'Subject', {'href': href})
ET.SubElement(setting, 'AccessLevel').text = subject.access_level
headers = {
'Content-Type': 'application/vnd.vmware.vcloud.controlAccess+xml'
}
self.connection.request(
'%s/action/controlAccess' % get_url_path(node.id),
data=ET.tostring(xml),
headers=headers,
method='POST')
def ex_get_metadata(self, node):
"""
:param node: node
:type node: :class:`Node`
:return: dictionary mapping metadata keys to metadata values
:rtype: dictionary mapping ``str`` to ``str``
"""
res = self.connection.request('%s/metadata' % (get_url_path(node.id)))
xpath = fixxpath(res.object, 'MetadataEntry')
metadata_entries = res.object.findall(xpath)
res_dict = {}
for entry in metadata_entries:
key = entry.findtext(fixxpath(res.object, 'Key'))
value = entry.findtext(fixxpath(res.object, 'Value'))
res_dict[key] = value
return res_dict
def ex_set_metadata_entry(self, node, key, value):
"""
:param node: node
:type node: :class:`Node`
:param key: metadata key to be set
:type key: ``str``
:param value: metadata value to be set
:type value: ``str``
:rtype: ``None``
"""
metadata_elem = ET.Element(
'Metadata',
{'xmlns': "http://www.vmware.com/vcloud/v1.5",
'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"}
)
entry = ET.SubElement(metadata_elem, 'MetadataEntry')
key_elem = ET.SubElement(entry, 'Key')
key_elem.text = key
value_elem = ET.SubElement(entry, 'Value')
value_elem.text = value
# send it back to the server
res = self.connection.request(
'%s/metadata' % get_url_path(node.id),
data=ET.tostring(metadata_elem),
headers={
'Content-Type': 'application/vnd.vmware.vcloud.metadata+xml'
},
method='POST')
self._wait_for_task_completion(res.object.get('href'))
def ex_query(self, type, filter=None, page=1, page_size=100, sort_asc=None,
sort_desc=None):
"""
Queries vCloud for specified type. See
http://www.vmware.com/pdf/vcd_15_api_guide.pdf for details. Each
element of the returned list is a dictionary with all attributes from
the record.
:param type: type to query (r.g. user, group, vApp etc.)
:type type: ``str``
:param filter: filter expression (see documentation for syntax)
:type filter: ``str``
:param page: page number
:type page: ``int``
:param page_size: page size
:type page_size: ``int``
:param sort_asc: sort in ascending order by specified field
:type sort_asc: ``str``
:param sort_desc: sort in descending order by specified field
:type sort_desc: ``str``
:rtype: ``list`` of dict
"""
# This is a workaround for filter parameter encoding
# the urllib encodes (name==Developers%20Only) into
# %28name%3D%3DDevelopers%20Only%29) which is not accepted by vCloud
params = {
'type': type,
'pageSize': page_size,
'page': page,
}
if sort_asc:
params['sortAsc'] = sort_asc
if sort_desc:
params['sortDesc'] = sort_desc
url = '/api/query?' + urlencode(params)
if filter:
if not filter.startswith('('):
filter = '(' + filter + ')'
url += '&filter=' + filter.replace(' ', '+')
results = []
res = self.connection.request(url)
for elem in res.object:
if not elem.tag.endswith('Link'):
result = elem.attrib
result['type'] = elem.tag.split('}')[1]
results.append(result)
return results
def create_node(self, **kwargs):
"""
Creates and returns node. If the source image is:
- vApp template - a new vApp is instantiated from template
- existing vApp - a new vApp is cloned from the source vApp. Can
not clone more vApps is parallel otherwise
resource busy error is raised.
@inherits: :class:`NodeDriver.create_node`
:keyword image: OS Image to boot on node. (required). Can be a
NodeImage or existing Node that will be cloned.
:type image: :class:`NodeImage` or :class:`Node`
:keyword ex_network: Organisation's network name for attaching vApp
VMs to.
:type ex_network: ``str``
:keyword ex_vdc: Name of organisation's virtual data center where
vApp VMs will be deployed.
:type ex_vdc: ``str``
:keyword ex_vm_names: list of names to be used as a VM and computer
name. The name must be max. 15 characters
long and follow the host name requirements.
:type ex_vm_names: ``list`` of ``str``
:keyword ex_vm_cpu: number of virtual CPUs/cores to allocate for
each vApp VM.
:type ex_vm_cpu: ``int``
:keyword ex_vm_memory: amount of memory in MB to allocate for each
vApp VM.
:type ex_vm_memory: ``int``
:keyword ex_vm_script: full path to file containing guest
customisation script for each vApp VM.
Useful for creating users & pushing out
public SSH keys etc.
:type ex_vm_script: ``str``
:keyword ex_vm_network: Override default vApp VM network name.
Useful for when you've imported an OVF
originating from outside of the vCloud.
:type ex_vm_network: ``str``
:keyword ex_vm_fence: Fence mode for connecting the vApp VM network
(ex_vm_network) to the parent
organisation network (ex_network).
:type ex_vm_fence: ``str``
:keyword ex_vm_ipmode: IP address allocation mode for all vApp VM
network connections.
:type ex_vm_ipmode: ``str``
:keyword ex_deploy: set to False if the node shouldn't be deployed
(started) after creation
:type ex_deploy: ``bool``
:keyword ex_force_customization: Used to specify whether to force
customization on deployment,
if not set default value is False.
:type ex_force_customization: ``bool``
:keyword ex_clone_timeout: timeout in seconds for clone/instantiate
VM operation.
Cloning might be a time consuming
operation especially when linked clones
are disabled or VMs are created on
different datastores.
Overrides the default task completion
value.
:type ex_clone_timeout: ``int``
:keyword ex_admin_password: set the node admin password explicitly.
:type ex_admin_password: ``str``
"""
name = kwargs['name']
image = kwargs['image']
ex_vm_names = kwargs.get('ex_vm_names')
ex_vm_cpu = kwargs.get('ex_vm_cpu')
ex_vm_memory = kwargs.get('ex_vm_memory')
ex_vm_script = kwargs.get('ex_vm_script')
ex_vm_fence = kwargs.get('ex_vm_fence', None)
ex_network = kwargs.get('ex_network', None)
ex_vm_network = kwargs.get('ex_vm_network', None)
ex_vm_ipmode = kwargs.get('ex_vm_ipmode', None)
ex_deploy = kwargs.get('ex_deploy', True)
ex_force_customization = kwargs.get('ex_force_customization', False)
ex_vdc = kwargs.get('ex_vdc', None)
ex_clone_timeout = kwargs.get('ex_clone_timeout',
DEFAULT_TASK_COMPLETION_TIMEOUT)
ex_admin_password = kwargs.get('ex_admin_password', None)
self._validate_vm_names(ex_vm_names)
self._validate_vm_cpu(ex_vm_cpu)
self._validate_vm_memory(ex_vm_memory)
self._validate_vm_fence(ex_vm_fence)
self._validate_vm_ipmode(ex_vm_ipmode)
ex_vm_script = self._validate_vm_script(ex_vm_script)
# Some providers don't require a network link
if ex_network:
network_href = self._get_network_href(ex_network)
network_elem = self.connection.request(
get_url_path(network_href)).object
else:
network_elem = None
vdc = self._get_vdc(ex_vdc)
if self._is_node(image):
vapp_name, vapp_href = self._clone_node(name,
image,
vdc,
ex_clone_timeout)
else:
vapp_name, vapp_href = self._instantiate_node(name, image,
network_elem,
vdc, ex_vm_network,
ex_vm_fence,
ex_clone_timeout)
self._change_vm_names(vapp_href, ex_vm_names)
self._change_vm_cpu(vapp_href, ex_vm_cpu)
self._change_vm_memory(vapp_href, ex_vm_memory)
self._change_vm_script(vapp_href, ex_vm_script)
self._change_vm_ipmode(vapp_href, ex_vm_ipmode)
if ex_admin_password is not None:
self.ex_change_vm_admin_password(vapp_href, ex_admin_password)
# Power on the VM.
if ex_deploy:
res = self.connection.request(get_url_path(vapp_href))
node = self._to_node(res.object)
# Retry 3 times: when instantiating large number of VMs at the same
# time some may fail on resource allocation
retry = 3
while True:
try:
self.ex_deploy_node(node, ex_force_customization)
break
except Exception:
if retry <= 0:
raise
retry -= 1
time.sleep(10)
res = self.connection.request(get_url_path(vapp_href))
node = self._to_node(res.object)
return node
def _instantiate_node(self, name, image, network_elem, vdc, vm_network,
vm_fence, instantiate_timeout):
instantiate_xml = Instantiate_1_5_VAppXML(
name=name,
template=image.id,
network=network_elem,
vm_network=vm_network,
vm_fence=vm_fence
)
# Instantiate VM and get identifier.
headers = {
'Content-Type':
'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
}
res = self.connection.request(
'%s/action/instantiateVAppTemplate' % get_url_path(vdc.id),
data=instantiate_xml.tostring(),
method='POST',
headers=headers
)
vapp_name = res.object.get('name')
vapp_href = res.object.get('href')
task_href = res.object.find(fixxpath(res.object, "Tasks/Task")).get(
'href')
self._wait_for_task_completion(task_href, instantiate_timeout)
return vapp_name, vapp_href
def _clone_node(self, name, sourceNode, vdc, clone_timeout):
clone_xml = ET.Element(
"CloneVAppParams",
{'name': name, 'deploy': 'false', 'powerOn': 'false',
'xmlns': "http://www.vmware.com/vcloud/v1.5",
'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"}
)
ET.SubElement(clone_xml,
'Description').text = 'Clone of ' + sourceNode.name
ET.SubElement(clone_xml, 'Source', {'href': sourceNode.id})
headers = {
'Content-Type': 'application/vnd.vmware.vcloud.cloneVAppParams+xml'
}
res = self.connection.request(
'%s/action/cloneVApp' % get_url_path(vdc.id),
data=ET.tostring(clone_xml),
method='POST',
headers=headers
)
vapp_name = res.object.get('name')
vapp_href = res.object.get('href')
task_href = res.object.find(
fixxpath(res.object, "Tasks/Task")).get('href')
self._wait_for_task_completion(task_href, clone_timeout)
res = self.connection.request(get_url_path(vapp_href))
vms = res.object.findall(fixxpath(res.object, "Children/Vm"))
# Fix the networking for VMs
for i, vm in enumerate(vms):
# Remove network
network_xml = ET.Element("NetworkConnectionSection", {
'ovf:required': 'false',
'xmlns': "http://www.vmware.com/vcloud/v1.5",
'xmlns:ovf': 'http://schemas.dmtf.org/ovf/envelope/1'})
ET.SubElement(network_xml, "ovf:Info").text = \
'Specifies the available VM network connections'
headers = {
'Content-Type':
'application/vnd.vmware.vcloud.networkConnectionSection+xml'
}
res = self.connection.request(
'%s/networkConnectionSection' % get_url_path(vm.get('href')),
data=ET.tostring(network_xml),
method='PUT',
headers=headers
)
self._wait_for_task_completion(res.object.get('href'))
# Re-add network
network_xml = vm.find(fixxpath(vm, 'NetworkConnectionSection'))
network_conn_xml = network_xml.find(
fixxpath(network_xml, 'NetworkConnection'))
network_conn_xml.set('needsCustomization', 'true')
network_conn_xml.remove(
network_conn_xml.find(fixxpath(network_xml, 'IpAddress')))
network_conn_xml.remove(
network_conn_xml.find(fixxpath(network_xml, 'MACAddress')))
headers = {
'Content-Type':
'application/vnd.vmware.vcloud.networkConnectionSection+xml'
}
res = self.connection.request(
'%s/networkConnectionSection' % get_url_path(vm.get('href')),
data=ET.tostring(network_xml),
method='PUT',
headers=headers
)
self._wait_for_task_completion(res.object.get('href'))
return vapp_name, vapp_href
def ex_set_vm_cpu(self, vapp_or_vm_id, vm_cpu):
"""
Sets the number of virtual CPUs for the specified VM or VMs under
the vApp. If the vapp_or_vm_id param represents a link to an vApp
all VMs that are attached to this vApp will be modified.
Please ensure that hot-adding a virtual CPU is enabled for the
powered on virtual machines. Otherwise use this method on undeployed
vApp.
:keyword vapp_or_vm_id: vApp or VM ID that will be modified. If
a vApp ID is used here all attached VMs
will be modified
:type vapp_or_vm_id: ``str``
:keyword vm_cpu: number of virtual CPUs/cores to allocate for
specified VMs
:type vm_cpu: ``int``
:rtype: ``None``
"""
self._validate_vm_cpu(vm_cpu)
self._change_vm_cpu(vapp_or_vm_id, vm_cpu)
def ex_set_vm_memory(self, vapp_or_vm_id, vm_memory):
"""
Sets the virtual memory in MB to allocate for the specified VM or
VMs under the vApp. If the vapp_or_vm_id param represents a link
to an vApp all VMs that are attached to this vApp will be modified.
Please ensure that hot-change of virtual memory is enabled for the
powered on virtual machines. Otherwise use this method on undeployed
vApp.
:keyword vapp_or_vm_id: vApp or VM ID that will be modified. If
a vApp ID is used here all attached VMs
will be modified
:type vapp_or_vm_id: ``str``
:keyword vm_memory: virtual memory in MB to allocate for the
specified VM or VMs
:type vm_memory: ``int``
:rtype: ``None``
"""
self._validate_vm_memory(vm_memory)
self._change_vm_memory(vapp_or_vm_id, vm_memory)
def ex_add_vm_disk(self, vapp_or_vm_id, vm_disk_size):
"""
Adds a virtual disk to the specified VM or VMs under the vApp. If the
vapp_or_vm_id param represents a link to an vApp all VMs that are
attached to this vApp will be modified.
:keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a
vApp ID is used here all attached VMs
will be modified
:type vapp_or_vm_id: ``str``
:keyword vm_disk_size: the disk capacity in GB that will be added
to the specified VM or VMs
:type vm_disk_size: ``int``
:rtype: ``None``
"""
self._validate_vm_disk_size(vm_disk_size)
self._add_vm_disk(vapp_or_vm_id, vm_disk_size)
@staticmethod
def _validate_vm_names(names):
if names is None:
return
hname_re = re.compile(
'^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9]*)[\-])*([A-Za-z]|[A-Za-z][A-Za-z0-9]*[A-Za-z0-9])$') # NOQA
for name in names:
if len(name) > 15:
raise ValueError(
'The VM name "' + name + '" is too long for the computer '
'name (max 15 chars allowed).')
if not hname_re.match(name):
raise ValueError('The VM name "' + name + '" can not be '
'used. "' + name + '" is not a valid '
'computer name for the VM.')
@staticmethod
def _validate_vm_memory(vm_memory):
if vm_memory is None:
return
elif vm_memory not in VIRTUAL_MEMORY_VALS:
raise ValueError(
'%s is not a valid vApp VM memory value' % vm_memory)
@staticmethod
def _validate_vm_cpu(vm_cpu):
if vm_cpu is None:
return
elif vm_cpu not in VIRTUAL_CPU_VALS_1_5:
raise ValueError('%s is not a valid vApp VM CPU value' % vm_cpu)
@staticmethod
def _validate_vm_disk_size(vm_disk):
if vm_disk is None:
return
elif int(vm_disk) < 0:
raise ValueError('%s is not a valid vApp VM disk space value',
vm_disk)
@staticmethod
def _validate_vm_script(vm_script):
if vm_script is None:
return
# Try to locate the script file
if not os.path.isabs(vm_script):
vm_script = os.path.expanduser(vm_script)
vm_script = os.path.abspath(vm_script)
if not os.path.isfile(vm_script):
raise LibcloudError(
"%s the VM script file does not exist" % vm_script)
try:
open(vm_script).read()
except Exception:
raise
return vm_script
@staticmethod
def _validate_vm_fence(vm_fence):
if vm_fence is None:
return
elif vm_fence not in FENCE_MODE_VALS_1_5:
raise ValueError('%s is not a valid fencing mode value' % vm_fence)
@staticmethod
def _validate_vm_ipmode(vm_ipmode):
if vm_ipmode is None:
return
elif vm_ipmode == 'MANUAL':
raise NotImplementedError(
'MANUAL IP mode: The interface for supplying '
'IPAddress does not exist yet')
elif vm_ipmode not in IP_MODE_VALS_1_5:
raise ValueError(
'%s is not a valid IP address allocation mode value'
% vm_ipmode)
def _change_vm_names(self, vapp_or_vm_id, vm_names):
if vm_names is None:
return
vms = self._get_vm_elements(vapp_or_vm_id)
for i, vm in enumerate(vms):
if len(vm_names) <= i:
return
# Get GuestCustomizationSection
res = self.connection.request(
'%s/guestCustomizationSection' % get_url_path(vm.get('href')))
# Update GuestCustomizationSection
res.object.find(
fixxpath(res.object, 'ComputerName')).text = vm_names[i]
# Remove AdminPassword from customization section
admin_pass = res.object.find(fixxpath(res.object, 'AdminPassword'))
if admin_pass is not None:
res.object.remove(admin_pass)
headers = {
'Content-Type':
'application/vnd.vmware.vcloud.guestCustomizationSection+xml'
}
res = self.connection.request(
'%s/guestCustomizationSection' % get_url_path(vm.get('href')),
data=ET.tostring(res.object),
method='PUT',
headers=headers
)
self._wait_for_task_completion(res.object.get('href'))
# Update Vm name
req_xml = ET.Element("Vm", {
'name': vm_names[i],
'xmlns': "http://www.vmware.com/vcloud/v1.5"})
res = self.connection.request(
get_url_path(vm.get('href')),
data=ET.tostring(req_xml),
method='PUT',
headers={
'Content-Type': 'application/vnd.vmware.vcloud.vm+xml'}
)
self._wait_for_task_completion(res.object.get('href'))
def _change_vm_cpu(self, vapp_or_vm_id, vm_cpu):
if vm_cpu is None:
return
vms = self._get_vm_elements(vapp_or_vm_id)
for vm in vms:
# Get virtualHardwareSection/cpu section
res = self.connection.request(
'%s/virtualHardwareSection/cpu' % get_url_path(vm.get('href')))
# Update VirtualQuantity field
xpath = ('{http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/'
'CIM_ResourceAllocationSettingData}VirtualQuantity')
res.object.find(xpath).text = str(vm_cpu)
headers = {
'Content-Type': 'application/vnd.vmware.vcloud.rasdItem+xml'
}
res = self.connection.request(
'%s/virtualHardwareSection/cpu' % get_url_path(vm.get('href')),
data=ET.tostring(res.object),
method='PUT',
headers=headers
)
self._wait_for_task_completion(res.object.get('href'))
def _change_vm_memory(self, vapp_or_vm_id, vm_memory):
if vm_memory is None:
return
vms = self._get_vm_elements(vapp_or_vm_id)
for vm in vms:
# Get virtualHardwareSection/memory section
res = self.connection.request(
'%s/virtualHardwareSection/memory' %
get_url_path(vm.get('href')))
# Update VirtualQuantity field
xpath = ('{http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/'
'CIM_ResourceAllocationSettingData}VirtualQuantity')
res.object.find(xpath).text = str(vm_memory)
headers = {
'Content-Type': 'application/vnd.vmware.vcloud.rasdItem+xml'
}
res = self.connection.request(
'%s/virtualHardwareSection/memory' % get_url_path(
vm.get('href')),
data=ET.tostring(res.object),
method='PUT',
headers=headers
)
self._wait_for_task_completion(res.object.get('href'))
def _add_vm_disk(self, vapp_or_vm_id, vm_disk):
if vm_disk is None:
return
rasd_ns = ('{http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/'
'CIM_ResourceAllocationSettingData}')
vms = self._get_vm_elements(vapp_or_vm_id)
for vm in vms:
# Get virtualHardwareSection/disks section
res = self.connection.request(
'%s/virtualHardwareSection/disks' %
get_url_path(vm.get('href')))
existing_ids = []
new_disk = None
for item in res.object.findall(fixxpath(res.object, 'Item')):
# Clean Items from unnecessary stuff
for elem in item:
if elem.tag == '%sInstanceID' % rasd_ns:
existing_ids.append(int(elem.text))
if elem.tag in ['%sAddressOnParent' % rasd_ns,
'%sParent' % rasd_ns]:
item.remove(elem)
if item.find('%sHostResource' % rasd_ns) is not None:
new_disk = item
new_disk = copy.deepcopy(new_disk)
disk_id = max(existing_ids) + 1
new_disk.find('%sInstanceID' % rasd_ns).text = str(disk_id)
new_disk.find('%sElementName' %
rasd_ns).text = 'Hard Disk ' + str(disk_id)
new_disk.find('%sHostResource' % rasd_ns).set(
fixxpath(new_disk, 'capacity'), str(int(vm_disk) * 1024))
res.object.append(new_disk)
headers = {
'Content-Type':
'application/vnd.vmware.vcloud.rasditemslist+xml'
}
res = self.connection.request(
'%s/virtualHardwareSection/disks' % get_url_path(
vm.get('href')),
data=ET.tostring(res.object),
method='PUT',
headers=headers
)
self._wait_for_task_completion(res.object.get('href'))
def _change_vm_script(self, vapp_or_vm_id, vm_script):
if vm_script is None:
return
vms = self._get_vm_elements(vapp_or_vm_id)
try:
script = open(vm_script).read()
except Exception:
return
# ElementTree escapes script characters automatically. Escape
# requirements:
# http://www.vmware.com/support/vcd/doc/rest-api-doc-1.5-html/types/
# GuestCustomizationSectionType.html
for vm in vms:
# Get GuestCustomizationSection
res = self.connection.request(
'%s/guestCustomizationSection' % get_url_path(vm.get('href')))
# Attempt to update any existing CustomizationScript element
try:
res.object.find(
fixxpath(res.object, 'CustomizationScript')).text = script
except Exception:
# CustomizationScript section does not exist, insert it just
# before ComputerName
for i, e in enumerate(res.object):
if e.tag == \
'{http://www.vmware.com/vcloud/v1.5}ComputerName':
break
e = ET.Element(
'{http://www.vmware.com/vcloud/v1.5}CustomizationScript')
e.text = script
res.object.insert(i, e)
# Remove AdminPassword from customization section due to an API
# quirk
admin_pass = res.object.find(fixxpath(res.object, 'AdminPassword'))
if admin_pass is not None:
res.object.remove(admin_pass)
# Update VM's GuestCustomizationSection
headers = {
'Content-Type':
'application/vnd.vmware.vcloud.guestCustomizationSection+xml'
}
res = self.connection.request(
'%s/guestCustomizationSection' % get_url_path(vm.get('href')),
data=ET.tostring(res.object),
method='PUT',
headers=headers
)
self._wait_for_task_completion(res.object.get('href'))
def _change_vm_ipmode(self, vapp_or_vm_id, vm_ipmode):
if vm_ipmode is None:
return
vms = self._get_vm_elements(vapp_or_vm_id)
for vm in vms:
res = self.connection.request(
'%s/networkConnectionSection' % get_url_path(vm.get('href')))
net_conns = res.object.findall(
fixxpath(res.object, 'NetworkConnection'))
for c in net_conns:
c.find(fixxpath(c, 'IpAddressAllocationMode')).text = vm_ipmode
headers = {
'Content-Type':
'application/vnd.vmware.vcloud.networkConnectionSection+xml'
}
res = self.connection.request(
'%s/networkConnectionSection' % get_url_path(vm.get('href')),
data=ET.tostring(res.object),
method='PUT',
headers=headers
)
self._wait_for_task_completion(res.object.get('href'))
def _update_or_insert_section(self, res, section, prev_section, text):
try:
res.object.find(
fixxpath(res.object, section)).text = text
except Exception:
# "section" section does not exist, insert it just
# before "prev_section"
for i, e in enumerate(res.object):
tag = '{http://www.vmware.com/vcloud/v1.5}%s' % prev_section
if e.tag == tag:
break
e = ET.Element(
'{http://www.vmware.com/vcloud/v1.5}%s' % section)
e.text = text
res.object.insert(i, e)
return res
def ex_change_vm_admin_password(self, vapp_or_vm_id, ex_admin_password):
"""
Changes the admin (or root) password of VM or VMs under the vApp. If
the vapp_or_vm_id param represents a link to an vApp all VMs that
are attached to this vApp will be modified.
:keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a
vApp ID is used here all attached VMs
will be modified
:type vapp_or_vm_id: ``str``
:keyword ex_admin_password: <PASSWORD> to be used.
:type ex_admin_password: ``str``
:rtype: ``None``
"""
if ex_admin_password is None:
return
vms = self._get_vm_elements(vapp_or_vm_id)
for vm in vms:
# Get GuestCustomizationSection
res = self.connection.request(
'%s/guestCustomizationSection' % get_url_path(vm.get('href')))
headers = {
'Content-Type':
'application/vnd.vmware.vcloud.guestCustomizationSection+xml'
}
# Fix API quirk.
# If AdminAutoLogonEnabled==False the guestCustomizationSection
# must have AdminAutoLogonCount==0, even though
# it might have AdminAutoLogonCount==1 when requesting it for
# the first time.
auto_logon = res.object.find(
fixxpath(res.object, "AdminAutoLogonEnabled"))
if auto_logon is not None and auto_logon.text == 'false':
self._update_or_insert_section(res,
"AdminAutoLogonCount",
"ResetPasswordRequired",
'0')
# If we are establishing a password we do not want it
# to be automatically chosen.
self._update_or_insert_section(res,
'AdminPasswordAuto',
'AdminPassword',
'false')
# API does not allow to set AdminPassword if
# AdminPasswordEnabled is not enabled.
self._update_or_insert_section(res,
'AdminPasswordEnabled',
'AdminPasswordAuto',
'true')
self._update_or_insert_section(res,
'<PASSWORD>',
'AdminAutoLogonEnabled',
ex_admin_password)
res = self.connection.request(
'%s/guestCustomizationSection' % get_url_path(vm.get('href')),
data=ET.tostring(res.object),
method='PUT',
headers=headers
)
self._wait_for_task_completion(res.object.get('href'))
def _get_network_href(self, network_name):
network_href = None
# Find the organisation's network href
res = self.connection.request(self.org)
links = res.object.findall(fixxpath(res.object, 'Link'))
for l in links:
if l.attrib['type'] == \
'application/vnd.vmware.vcloud.orgNetwork+xml' \
and l.attrib['name'] == network_name:
network_href = l.attrib['href']
if network_href is None:
raise ValueError(
'%s is not a valid organisation network name' % network_name)
else:
return network_href
def _get_vm_elements(self, vapp_or_vm_id):
res = self.connection.request(get_url_path(vapp_or_vm_id))
if res.object.tag.endswith('VApp'):
vms = res.object.findall(fixxpath(res.object, 'Children/Vm'))
elif res.object.tag.endswith('Vm'):
vms = [res.object]
else:
raise ValueError(
'Specified ID value is not a valid VApp or Vm identifier.')
return vms
def _is_node(self, node_or_image):
return isinstance(node_or_image, Node)
def _to_node(self, node_elm):
# Parse snapshots and VMs as extra
if node_elm.find(fixxpath(node_elm, "SnapshotSection")) is None:
snapshots = None
else:
snapshots = []
for snapshot_elem in node_elm.findall(
fixxpath(node_elm, 'SnapshotSection/Snapshot')):
snapshots.append({
"created": snapshot_elem.get("created"),
"poweredOn": snapshot_elem.get("poweredOn"),
"size": snapshot_elem.get("size"),
})
vms = []
for vm_elem in node_elm.findall(fixxpath(node_elm, 'Children/Vm')):
public_ips = []
private_ips = []
xpath = fixxpath(vm_elem,
'NetworkConnectionSection/NetworkConnection')
for connection in vm_elem.findall(xpath):
ip = connection.find(fixxpath(connection, "IpAddress"))
if ip is not None:
private_ips.append(ip.text)
external_ip = connection.find(
fixxpath(connection, "ExternalIpAddress"))
if external_ip is not None:
public_ips.append(external_ip.text)
elif ip is not None:
public_ips.append(ip.text)
xpath = ('{http://schemas.dmtf.org/ovf/envelope/1}'
'OperatingSystemSection')
os_type_elem = vm_elem.find(xpath)
if os_type_elem is not None:
os_type = os_type_elem.get(
'{http://www.vmware.com/schema/ovf}osType')
else:
os_type = None
vm = {
'id': vm_elem.get('href'),
'name': vm_elem.get('name'),
'state': self.NODE_STATE_MAP[vm_elem.get('status')],
'public_ips': public_ips,
'private_ips': private_ips,
'os_type': os_type
}
vms.append(vm)
# Take the node IP addresses from all VMs
public_ips = []
private_ips = []
for vm in vms:
public_ips.extend(vm['public_ips'])
private_ips.extend(vm['private_ips'])
# Find vDC
vdc_id = next(link.get('href') for link
in node_elm.findall(fixxpath(node_elm, 'Link'))
if link.get('type') ==
'application/vnd.vmware.vcloud.vdc+xml')
vdc = next(vdc for vdc in self.vdcs if vdc.id == vdc_id)
extra = {'vdc': vdc.name, 'vms': vms}
if snapshots is not None:
extra['snapshots'] = snapshots
node = Node(id=node_elm.get('href'),
name=node_elm.get('name'),
state=self.NODE_STATE_MAP[node_elm.get('status')],
public_ips=public_ips,
private_ips=private_ips,
driver=self.connection.driver,
extra=extra)
return node
def _to_vdc(self, vdc_elm):
def get_capacity_values(capacity_elm):
if capacity_elm is None:
return None
limit = int(capacity_elm.findtext(fixxpath(capacity_elm, 'Limit')))
used = int(capacity_elm.findtext(fixxpath(capacity_elm, 'Used')))
units = capacity_elm.findtext(fixxpath(capacity_elm, 'Units'))
return Capacity(limit, used, units)
cpu = get_capacity_values(
vdc_elm.find(fixxpath(vdc_elm, 'ComputeCapacity/Cpu')))
memory = get_capacity_values(
vdc_elm.find(fixxpath(vdc_elm, 'ComputeCapacity/Memory')))
storage = get_capacity_values(
vdc_elm.find(fixxpath(vdc_elm, 'StorageCapacity')))
return Vdc(id=vdc_elm.get('href'),
name=vdc_elm.get('name'),
driver=self,
allocation_model=vdc_elm.findtext(
fixxpath(vdc_elm, 'AllocationModel')),
cpu=cpu,
memory=memory,
storage=storage)
class VCloud_5_1_NodeDriver(VCloud_1_5_NodeDriver):
@staticmethod
def _validate_vm_memory(vm_memory):
if vm_memory is None:
return None
elif (vm_memory % 4) != 0:
# The vcd 5.1 virtual machine memory size must be a multiple of 4
# MB
raise ValueError(
'%s is not a valid vApp VM memory value' % (vm_memory))
class VCloud_5_5_NodeDriver(VCloud_5_1_NodeDriver):
"""Use 5.5 Connection class to explicitly set 5.5 for the version in
Accept headers
"""
connectionCls = VCloud_5_5_Connection
def ex_create_snapshot(self, node):
"""
Creates new snapshot of a virtual machine or of all
the virtual machines in a vApp. Prior to creation of the new
snapshots, any existing user created snapshots associated
with the virtual machines are removed.
:param node: node
:type node: :class:`Node`
:rtype: :class:`Node`
"""
snapshot_xml = ET.Element(
"CreateSnapshotParams",
{'memory': 'true',
'name': 'name',
'quiesce': 'true',
'xmlns': "http://www.vmware.com/vcloud/v1.5",
'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"}
)
ET.SubElement(snapshot_xml, 'Description').text = 'Description'
content_type = 'application/vnd.vmware.vcloud.createSnapshotParams+xml'
headers = {
'Content-Type': content_type
}
return self._perform_snapshot_operation(node,
"createSnapshot",
snapshot_xml,
headers)
def ex_remove_snapshots(self, node):
"""
Removes all user created snapshots for a vApp or virtual machine.
:param node: node
:type node: :class:`Node`
:rtype: :class:`Node`
"""
return self._perform_snapshot_operation(node,
"removeAllSnapshots",
None,
None)
def ex_revert_to_snapshot(self, node):
"""
Reverts a vApp or virtual machine to the current snapshot, if any.
:param node: node
:type node: :class:`Node`
:rtype: :class:`Node`
"""
return self._perform_snapshot_operation(node,
"revertToCurrentSnapshot",
None,
None)
def _perform_snapshot_operation(self, node, operation, xml_data, headers):
res = self.connection.request(
'%s/action/%s' % (get_url_path(node.id), operation),
data=ET.tostring(xml_data) if xml_data is not None else None,
method='POST',
headers=headers)
self._wait_for_task_completion(res.object.get('href'))
res = self.connection.request(get_url_path(node.id))
return self._to_node(res.object)
def ex_acquire_mks_ticket(self, vapp_or_vm_id, vm_num=0):
"""
Retrieve a mks ticket that you can use to gain access to the console
of a running VM. If successful, returns a dict with the following
keys:
- host: host (or proxy) through which the console connection
is made
- vmx: a reference to the VMX file of the VM for which this
ticket was issued
- ticket: screen ticket to use to authenticate the client
- port: host port to be used for console access
:param vapp_or_vm_id: vApp or VM ID you want to connect to.
:type vapp_or_vm_id: ``str``
:param vm_num: If a vApp ID is provided, vm_num is position in the
vApp VM list of the VM you want to get a screen ticket.
Default is 0.
:type vm_num: ``int``
:rtype: ``dict``
"""
vm = self._get_vm_elements(vapp_or_vm_id)[vm_num]
try:
res = self.connection.request('%s/screen/action/acquireMksTicket' %
(get_url_path(vm.get('href'))),
method='POST')
output = {
"host": res.object.find(fixxpath(res.object, 'Host')).text,
"vmx": res.object.find(fixxpath(res.object, 'Vmx')).text,
"ticket": res.object.find(fixxpath(res.object, 'Ticket')).text,
"port": res.object.find(fixxpath(res.object, 'Port')).text,
}
return output
except Exception:
return None
| StarcoderdataPython |
3323754 | <reponame>yoonjieun/blender
import bpy
class RemoveDuplicateMaterial(bpy.types.Operator):
'''
replace material with 3 characters at the end to material without number
ex) 'material.001' --> 'material'
'material.099' --> 'material'
'''
bl_idname = "lazypic.remove_duplicate_material"
bl_label = "remove_duplicate_material"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
#make all material list
mat_list = [mat.name for mat in bpy.data.materials]
#remove duplicate material name
for obj in bpy.context.scene.objects:
for objmat in obj.material_slots:
if objmat.material.name[-3:].isnumeric():
# last 3 character are numbers
if objmat.material.name[:-4] in mat_list:
#material without number exist
objmat.material=bpy.data.materials[objmat.material.name[:-4]]
#replace duplicated material to orign material
#remove unused matrials
for material in bpy.data.materials:
if not material.users:
bpy.data.materials.remove(material)
return {'FINISHED'}
| StarcoderdataPython |
9678262 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""srcnn evaluation"""
import argparse
import mindspore as ms
import mindspore.nn as nn
from mindspore import context, Tensor
from mindspore.train.model import Model
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.config import srcnn_cfg as config
from src.dataset import create_eval_dataset
from src.srcnn import SRCNN
from src.metric import SRCNNpsnr
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="srcnn eval")
parser.add_argument('--dataset_path', type=str, required=True, help="Dataset, default is None.")
parser.add_argument('--checkpoint_path', type=str, required=True, help="checkpoint file path")
parser.add_argument('--device_target', type=str, default='GPU', choices=("GPU"),
help="Device target, support GPU.")
args, _ = parser.parse_known_args()
if args.device_target == "GPU":
context.set_context(mode=context.GRAPH_MODE,
device_target=args.device_target,
save_graphs=False)
else:
raise ValueError("Unsupported device target.")
eval_ds = create_eval_dataset(args.dataset_path)
net = SRCNN()
lr = Tensor(config.lr, ms.float32)
opt = nn.Adam(params=net.trainable_params(), learning_rate=lr, eps=1e-07)
loss = nn.MSELoss(reduction='mean')
param_dict = load_checkpoint(args.checkpoint_path)
load_param_into_net(net, param_dict)
net.set_train(False)
model = Model(net, loss_fn=loss, optimizer=opt, metrics={'PSNR': SRCNNpsnr()})
res = model.eval(eval_ds, dataset_sink_mode=False)
print("result ", res)
| StarcoderdataPython |
1687502 | import sys
import pathlib
from .Group import Group
from .Zn import Zn
from .Zn_coprime import Zn_coprime
from .Klein4 import Klein4
from .Sn import Sn
from .Element import Element
from .order import O
# Import helpers module
sys.path.insert(1, pathlib.Path(__file__).parent.absolute().__str__() + '/helpers')
from divisible_by import divisible_by
from is_prime import is_prime
from find_primes_up_to import find_primes_up_to
from prime_factorization import prime_factorization
from phi import phi
from permutation import permute_n, permute
__all__ = ['Group', 'Zn'] | StarcoderdataPython |
1676165 | <filename>server/the_water_project/tags/models.py
from django.db import models
class Tag(models.Model):
name = models.CharField(max_length=25)
# FIXME: Should I made the "name" field unique?
def __str__(self):
return self.name
| StarcoderdataPython |
3224680 | #
# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
#
import re
import socket
from cfgm_common import PERMS_RX
from vnc_api.gen.resource_common import VirtualDns
from vnc_cfg_api_server.resources._resource_base import ResourceMixin
class VirtualDnsServer(ResourceMixin, VirtualDns):
@classmethod
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn):
# enable domain level sharing for virtual DNS
domain_uuid = obj_dict.get('parent_uuid')
if domain_uuid is None:
domain_uuid = db_conn.fq_name_to_uuid('domain',
obj_dict['fq_name'][0:1])
share_item = {
'tenant': 'domain:%s' % domain_uuid,
'tenant_access': PERMS_RX
}
obj_dict['perms2'].setdefault('share', []).append(share_item)
return cls.validate_dns_server(obj_dict, db_conn)
@classmethod
def pre_dbe_update(cls, id, fq_name, obj_dict, db_conn, **kwargs):
return cls.validate_dns_server(obj_dict, db_conn)
@classmethod
def pre_dbe_delete(cls, id, obj_dict, db_conn):
vdns_name = ":".join(obj_dict['fq_name'])
if 'parent_uuid' in obj_dict:
ok, read_result = cls.dbe_read(db_conn, 'domain',
obj_dict['parent_uuid'])
if not ok:
return ok, read_result, None
virtual_DNSs = read_result.get('virtual_DNSs') or []
for vdns in virtual_DNSs:
ok, read_result = cls.dbe_read(db_conn, 'virtual_DNS',
vdns['uuid'])
if not ok:
code, msg = read_result
if code == 404:
continue
return ok, (code, msg), None
vdns_data = read_result['virtual_DNS_data']
if 'next_virtual_DNS' in vdns_data:
if vdns_data['next_virtual_DNS'] == vdns_name:
return (
False,
(403,
"Virtual DNS server is referred"
" by other virtual DNS servers"), None)
return True, "", None
@classmethod
def is_valid_dns_name(cls, name):
if len(name) > 255:
return False
if name.endswith("."): # A single trailing dot is legal
# strip exactly one dot from the right, if present
name = name[:-1]
disallowed = re.compile(r'[^A-Z\d-]', re.IGNORECASE)
return all( # Split by labels and verify individually
(label and len(label) <= 63 and # length is within proper range
# no bordering hyphens
not label.startswith("-") and not label.endswith("-") and
not disallowed.search(label)) # contains only legal char
for label in name.split("."))
@classmethod
def is_valid_ipv4_address(cls, address):
parts = address.split(".")
if len(parts) != 4:
return False
for item in parts:
try:
if not 0 <= int(item) <= 255:
return False
except ValueError:
return False
return True
@classmethod
def is_valid_ipv6_address(cls, address):
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error:
return False
return True
@classmethod
def validate_dns_server(cls, obj_dict, db_conn):
if 'fq_name' in obj_dict:
virtual_dns = obj_dict['fq_name'][1]
disallowed = re.compile(r'[^A-Z\d-]', re.IGNORECASE)
if disallowed.search(virtual_dns) or virtual_dns.startswith("-"):
msg = ("Special characters are not allowed in Virtual DNS "
"server name")
return False, (403, msg)
vdns_data = obj_dict['virtual_DNS_data']
if not cls.is_valid_dns_name(vdns_data['domain_name']):
return (
False,
(403, "Domain name does not adhere to DNS name requirements"))
record_order = ["fixed", "random", "round-robin"]
if not str(vdns_data['record_order']).lower() in record_order:
return (False, (403, "Invalid value for record order"))
ttl = vdns_data['default_ttl_seconds']
if ttl < 0 or ttl > 2147483647:
return (False, (400, "Invalid value for TTL"))
if 'next_virtual_DNS' in vdns_data:
vdns_next = vdns_data['next_virtual_DNS']
if not vdns_next or vdns_next is None:
return True, ""
next_vdns = vdns_data['next_virtual_DNS'].split(":")
# check that next vdns exists
try:
next_vdns_uuid = db_conn.fq_name_to_uuid(
'virtual_DNS', next_vdns)
except Exception:
if not cls.is_valid_ipv4_address(
vdns_data['next_virtual_DNS']):
return (
False,
(400,
"Invalid Virtual Forwarder(next virtual dns server)"))
else:
return True, ""
# check that next virtual dns servers arent referring to each other
# above check doesnt allow during create, but entry could be
# modified later
ok, read_result = cls.dbe_read(db_conn, 'virtual_DNS',
next_vdns_uuid)
if ok:
next_vdns_data = read_result['virtual_DNS_data']
if 'next_virtual_DNS' in next_vdns_data:
vdns_name = ":".join(obj_dict['fq_name'])
if next_vdns_data['next_virtual_DNS'] == vdns_name:
return (
False,
(403,
"Cannot have Virtual DNS Servers "
"referring to each other"))
return True, ""
| StarcoderdataPython |
5172056 | """
Methods used to convert/handle raw input data
"""
import os
import json
import h5py
import numpy as np
import csv
import execnet
def init_obj_catalogue(path_to_data):
obj_dict = {}
subfols = sorted(os.listdir(os.path.join(path_to_data, 'test-item-data')))
known_classes = sorted(os.listdir(os.path.join(path_to_data, 'train-item-data')))
for i,class_fol in enumerate(subfols):
cname = class_fol.lower()
try:
with open(os.path.join(path_to_data, 'test-item-data',class_fol, cname+'.json')) as fin:
obj_node = json.load(fin) #start from json data given
obj_node['known'] = True if class_fol in known_classes else False #Known v Novel?
except FileNotFoundError:
print("No json file found for object %s" % cname)
print("Adding empty node")
obj_node = {"dimensions": [0,0,0] }
obj_node['known'] = True #'Empty' is known at training time
obj_node['label'] = str(i + 1) # add class label in same format as gt (starts from 1)
obj_dict[cname] = obj_node
return obj_dict
def load_emb_space(args,fname='snapshot-test-results.h5'):
"""
Assumes the input are the HDF5 files
as produced by the baselines provided at
https://github.com/andyzeng/arc-robot-vision/image-matching
"""
if args.baseline =="imprk-net" or args.set=='KMi':
path_to_hdf5 = os.path.join(args.test_res, args.baseline, 'snapshots-with-class',fname)
tgt_impr = h5py.File(path_to_hdf5, 'r')
return np.array(tgt_impr['prodFeat'], dtype='<f4'), np.array(tgt_impr['testFeat'], dtype='<f4'),\
None, None
else:
path_to_hdf5 = args.test_res
tgt_novel = os.path.join(path_to_hdf5, 'snapshots-no-class', 'results-snapshot-8000.h5') #default folder structure by Zeng et al.
tgt_known = os.path.join(path_to_hdf5, 'snapshots-with-class', 'results-snapshot-170000.h5')
nnetf = h5py.File(tgt_novel, 'r')
knetf = h5py.File(tgt_known, 'r')
return np.array(knetf['prodFeat'], dtype='<f4'), np.array(knetf['testFeat'], dtype='<f4'), \
np.array(nnetf['prodFeat'], dtype='<f4'), np.array(nnetf['testFeat'], dtype='<f4')
def load_camera_intrinsics_txt(path_to_intr):
"""
Expects 3x3 intrinsics matrix as tab-separated txt
"""
intrinsics=[]
with open(path_to_intr) as f:
reader = csv.reader(f,delimiter='\t')
for row in reader:
if row==[]: continue
for cell in row:
if cell=='': continue
try:
intrinsics.append(float(cell.split(" ")[1]))
except IndexError:
try:
intrinsics.append(float(cell.split(" ")[1]))
except IndexError:
intrinsics.append(float(cell))
return intrinsics
##################################################################
#
# Creating txt ground truth files in the same format as ARC2017
#
##################################################################
def arcify(root_img_path):
base = root_img_path.split('/')[-1]
for root, dirs, files in os.walk(root_img_path):
for name in dirs:
imgpaths=[]
imglabels=[]
first =True
for sroot, sdirs, sfiles in os.walk(os.path.join(root_img_path,name)):
if first:
obj_classes=sdirs
first = False
continue
if sfiles:
classname = sroot.split('/')[-1]
label = obj_classes.index(classname) + 1
imgpaths.extend([os.path.join(base,name,classname,f) for f in sfiles])
imglabels.extend([str(label) for f in sfiles]) # as many labels an no of files in that subfolder
lname = name.split("-imgs")[0]
with open(os.path.join(root_img_path,name+'.txt'), mode='w') as outf, \
open(os.path.join(root_img_path,lname+'-labels.txt'), mode='w') as outl:
outf.write('\n'.join(imgpaths))
outl.write('\n'.join(imglabels))
break # skip outer for, after first folder level
def create_class_map(path_to_json):
"""
Assuming arcify was already run locally
"""
base= path_to_json.split("class_to_index.json")[0]
path_train = os.path.join(base,"train-product-imgs")
class_names = os.listdir(path_train)
class_index={}
try:
with open(os.path.join(base,"train-product-imgs.txt")) as fpath, \
open(os.path.join(base, "train-product-labels.txt")) as flabels:
fileps= fpath.read().splitlines()
labels = flabels.read().splitlines()
except FileNotFoundError:
print("Run arcify method locally before to generate reference txts")
return 0
for pth, label in zip(fileps, labels):
category = pth.split("train-product-imgs/")[1].split("/")[0]
if category not in class_index.keys():
class_index[category] = label
with open(path_to_json, 'w') as fout:
json.dump(class_index, fout)
print("Class - numeric index mapping saved locally")
return None
def call_python_version(Version, Module, Function, ArgumentList):
gw = execnet.makegateway("popen//python=python%s" % Version)
channel = gw.remote_exec("""
from %s import %s as the_function
channel.send(the_function(*channel.receive()))
""" % (Module, Function))
channel.send(ArgumentList)
return channel.receive()
def list_depth_filenames(input_path):
"""
Returns path to files containing "depth" ih their name
Expects a macro folder with sub-folder structure divided class by class
e.g., passing ./data will search over ./data/class1, ./data/class2 ... etc.
"""
fnamelist = []
for root, dirs, files in os.walk(input_path):
for name in dirs:
base = os.path.join(root, name)
fnamelist.extend([os.path.join(base,f) for f in os.listdir(base) if 'depth' in f])
if len(fnamelist)> 0 : return fnamelist
else: return None
| StarcoderdataPython |
8122606 | from __future__ import unicode_literals
from pepper.framework import AbstractComponent, AbstractBackend
from pepper.framework.component import ContextComponent, TextToSpeechComponent, SpeechRecognitionComponent
from pepper.language import Utterance
from pepper import config
import urllib
import re
from threading import Timer
from typing import Optional
class SubtitlesComponent(AbstractComponent):
SUBTITLES_URL = "https://bramkraai.github.io/subtitle?text={}"
SUBTITLES_TIMEOUT = 15
def __init__(self, backend):
# type: (AbstractBackend) -> None
super(SubtitlesComponent, self).__init__(backend)
self._subtitles_timeout_timer = None # type: Optional[Timer]
self.require(SubtitlesComponent, TextToSpeechComponent)
self.require(SubtitlesComponent, SpeechRecognitionComponent)
def say(self, text, animation=None, block=False):
# type: (str, str, bool) -> None
self._show_subtitles('{}:/"{}"'.format(config.NAME, text))
super(SubtitlesComponent, self).say(text, animation, block)
def on_transcript(self, hypotheses, audio):
speaker = "Human"
try:
if isinstance(self, ContextComponent) and self.context.chatting:
speaker = self.context.chat.speaker
except AttributeError as e:
pass
self._show_subtitles('{}:/"{}"'.format(speaker, hypotheses[0].transcript))
super(SubtitlesComponent, self).on_transcript(hypotheses, audio)
def _show_subtitles(self, text):
# Stop Timeout Timer if running
if self._subtitles_timeout_timer: self._subtitles_timeout_timer.cancel()
# Show Subtitles
text_websafe = urllib.quote(''.join([i for i in re.sub(r'\\\\\S+\\\\', "", text) if ord(i) < 128]))
self.backend.tablet.show(self.SUBTITLES_URL.format(text_websafe))
# Start Timeout Timer
self._subtitles_timeout_timer = Timer(self.SUBTITLES_TIMEOUT, self.backend.tablet.hide)
self._subtitles_timeout_timer.start()
| StarcoderdataPython |
6462999 | import os.path as osp
from setuptools import find_packages, setup
requirements = ["h5py==2.10.0", "matplotlib==3.3.4", "munch==2.5.0", "open3d==0.9.0", "PyYAML==5.3.1"]
setup(
name="vrcnet",
version="1.0.0",
author="paul007pl",
packages=find_packages(),
install_requires=requirements,
)
| StarcoderdataPython |
358734 | <filename>pyplus/autotest/__init__.py
from .manager import Manager
def add(*args, **kwargs): return Manager().add(*args, **kwargs)
| StarcoderdataPython |
6665107 | <reponame>arthurdarcet/motor<filename>motor/core.py<gh_stars>1-10
# Copyright 2011-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
"""Framework-agnostic core of Motor, an asynchronous driver for MongoDB."""
import functools
import sys
import textwrap
import pymongo
import pymongo.auth
import pymongo.common
import pymongo.database
import pymongo.errors
import pymongo.mongo_client
import pymongo.mongo_replica_set_client
import pymongo.son_manipulator
from pymongo.bulk import BulkOperationBuilder
from pymongo.database import Database
from pymongo.change_stream import ChangeStream
from pymongo.collection import Collection
from pymongo.cursor import Cursor, _QUERY_OPTIONS
from pymongo.command_cursor import CommandCursor
from .metaprogramming import (AsyncCommand,
AsyncRead,
AsyncWrite,
coroutine_annotation,
create_class_with_framework,
DelegateMethod,
motor_coroutine,
MotorCursorChainingMethod,
ReadOnlyProperty)
from .motor_common import callback_type_error
from motor.docstrings import *
HAS_SSL = True
try:
import ssl
except ImportError:
ssl = None
HAS_SSL = False
PY35 = sys.version_info >= (3, 5)
class AgnosticBase(object):
def __eq__(self, other):
if (isinstance(other, self.__class__)
and hasattr(self, 'delegate')
and hasattr(other, 'delegate')):
return self.delegate == other.delegate
return NotImplemented
def __init__(self, delegate):
self.delegate = delegate
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.delegate)
class AgnosticBaseProperties(AgnosticBase):
codec_options = ReadOnlyProperty()
read_preference = ReadOnlyProperty()
read_concern = ReadOnlyProperty()
write_concern = ReadOnlyProperty()
class AgnosticClient(AgnosticBaseProperties):
__motor_class_name__ = 'MotorClient'
__delegate_class__ = pymongo.mongo_client.MongoClient
address = ReadOnlyProperty()
arbiters = ReadOnlyProperty()
close = DelegateMethod()
close_cursor = AsyncCommand()
database_names = AsyncRead()
drop_database = AsyncCommand().unwrap('MotorDatabase')
event_listeners = ReadOnlyProperty()
fsync = AsyncCommand()
get_database = DelegateMethod(doc=get_database_doc).wrap(Database)
get_default_database = DelegateMethod().wrap(Database)
HOST = ReadOnlyProperty()
is_mongos = ReadOnlyProperty()
is_primary = ReadOnlyProperty()
kill_cursors = AsyncCommand()
list_databases = AsyncRead().wrap(CommandCursor)
list_database_names = AsyncRead()
local_threshold_ms = ReadOnlyProperty()
max_bson_size = ReadOnlyProperty()
max_idle_time_ms = ReadOnlyProperty()
max_message_size = ReadOnlyProperty()
max_pool_size = ReadOnlyProperty()
max_write_batch_size = ReadOnlyProperty()
min_pool_size = ReadOnlyProperty()
nodes = ReadOnlyProperty()
PORT = ReadOnlyProperty()
primary = ReadOnlyProperty()
read_concern = ReadOnlyProperty()
retry_writes = ReadOnlyProperty()
secondaries = ReadOnlyProperty()
server_info = AsyncRead()
server_selection_timeout = ReadOnlyProperty()
start_session = AsyncRead(doc=start_session_doc)
unlock = AsyncCommand()
def __init__(self, *args, **kwargs):
"""Create a new connection to a single MongoDB instance at *host:port*.
Takes the same constructor arguments as
:class:`~pymongo.mongo_client.MongoClient`, as well as:
:Parameters:
- `io_loop` (optional): Special :class:`tornado.ioloop.IOLoop`
instance to use instead of default
"""
if 'io_loop' in kwargs:
io_loop = kwargs.pop('io_loop')
else:
io_loop = self._framework.get_event_loop()
kwargs.setdefault('connect', False)
delegate = self.__delegate_class__(*args, **kwargs)
super(AgnosticBaseProperties, self).__init__(delegate)
if io_loop:
self._framework.check_event_loop(io_loop)
self.io_loop = io_loop
else:
self.io_loop = self._framework.get_event_loop()
def get_io_loop(self):
return self.io_loop
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError(
"%s has no attribute %r. To access the %s"
" database, use client['%s']." % (
self.__class__.__name__, name, name, name))
return self[name]
def __getitem__(self, name):
db_class = create_class_with_framework(
AgnosticDatabase, self._framework, self.__module__)
return db_class(self, name)
def wrap(self, obj):
if obj.__class__ == Database:
db_class = create_class_with_framework(
AgnosticDatabase,
self._framework,
self.__module__)
return db_class(self, obj.name, _delegate=obj)
elif obj.__class__ == CommandCursor:
command_cursor_class = create_class_with_framework(
AgnosticCommandCursor,
self._framework,
self.__module__)
return command_cursor_class(obj, self)
class AgnosticDatabase(AgnosticBaseProperties):
__motor_class_name__ = 'MotorDatabase'
__delegate_class__ = Database
add_user = AsyncCommand()
authenticate = AsyncCommand()
collection_names = AsyncRead()
command = AsyncCommand(doc=cmd_doc)
create_collection = AsyncCommand().wrap(Collection)
current_op = AsyncRead()
dereference = AsyncRead()
drop_collection = AsyncCommand().unwrap('MotorCollection')
error = AsyncRead(doc="OBSOLETE")
eval = AsyncCommand()
get_collection = DelegateMethod().wrap(Collection)
last_status = AsyncRead(doc="OBSOLETE")
list_collection_names = AsyncRead()
list_collections = AsyncRead()
logout = AsyncCommand()
name = ReadOnlyProperty()
previous_error = AsyncRead(doc="OBSOLETE")
profiling_info = AsyncRead()
profiling_level = AsyncRead()
remove_user = AsyncCommand()
reset_error_history = AsyncCommand(doc="OBSOLETE")
set_profiling_level = AsyncCommand()
validate_collection = AsyncRead().unwrap('MotorCollection')
incoming_manipulators = ReadOnlyProperty()
incoming_copying_manipulators = ReadOnlyProperty()
outgoing_manipulators = ReadOnlyProperty()
outgoing_copying_manipulators = ReadOnlyProperty()
def __init__(self, client, name, **kwargs):
self._client = client
delegate = kwargs.get('_delegate') or Database(
client.delegate, name, **kwargs)
super(self.__class__, self).__init__(delegate)
@property
def client(self):
"""This MotorDatabase's :class:`MotorClient`."""
return self._client
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError(
"%s has no attribute %r. To access the %s"
" collection, use database['%s']." % (
self.__class__.__name__, name, name, name))
return self[name]
def __getitem__(self, name):
collection_class = create_class_with_framework(
AgnosticCollection, self._framework, self.__module__)
return collection_class(self, name)
def __call__(self, *args, **kwargs):
database_name = self.delegate.name
client_class_name = self._client.__class__.__name__
if database_name == 'open_sync':
raise TypeError(
"%s.open_sync() is unnecessary Motor 0.2, "
"see changelog for details." % client_class_name)
raise TypeError(
"MotorDatabase object is not callable. If you meant to "
"call the '%s' method on a %s object it is "
"failing because no such method exists." % (
database_name, client_class_name))
def wrap(self, collection):
# Replace pymongo.collection.Collection with MotorCollection.
klass = create_class_with_framework(
AgnosticCollection,
self._framework,
self.__module__)
return klass(self, collection.name, _delegate=collection)
def add_son_manipulator(self, manipulator):
"""Add a new son manipulator to this database.
Newly added manipulators will be applied before existing ones.
:Parameters:
- `manipulator`: the manipulator to add
"""
# We override add_son_manipulator to unwrap the AutoReference's
# database attribute.
if isinstance(manipulator, pymongo.son_manipulator.AutoReference):
db = manipulator.database
db_class = create_class_with_framework(
AgnosticDatabase,
self._framework,
self.__module__)
if isinstance(db, db_class):
# db is a MotorDatabase; get the PyMongo Database instance.
manipulator.database = db.delegate
self.delegate.add_son_manipulator(manipulator)
def get_io_loop(self):
return self._client.get_io_loop()
class AgnosticCollection(AgnosticBaseProperties):
__motor_class_name__ = 'MotorCollection'
__delegate_class__ = Collection
bulk_write = AsyncCommand(doc=bulk_write_doc)
count = AsyncRead()
create_index = AsyncCommand()
create_indexes = AsyncCommand(doc=create_indexes_doc)
delete_many = AsyncCommand(doc=delete_many_doc)
delete_one = AsyncCommand(doc=delete_one_doc)
distinct = AsyncRead()
drop = AsyncCommand(doc=drop_doc)
drop_index = AsyncCommand()
drop_indexes = AsyncCommand()
ensure_index = AsyncCommand()
find_and_modify = AsyncCommand()
find_one = AsyncRead(doc=find_one_doc)
find_one_and_delete = AsyncCommand(doc=find_one_and_delete_doc)
find_one_and_replace = AsyncCommand(doc=find_one_and_replace_doc)
find_one_and_update = AsyncCommand(doc=find_one_and_update_doc)
full_name = ReadOnlyProperty()
group = AsyncRead()
index_information = AsyncRead(doc=index_information_doc)
inline_map_reduce = AsyncRead()
insert = AsyncWrite()
insert_many = AsyncWrite(doc=insert_many_doc)
insert_one = AsyncCommand(doc=insert_one_doc)
map_reduce = AsyncCommand(doc=mr_doc).wrap(Collection)
name = ReadOnlyProperty()
options = AsyncRead()
reindex = AsyncCommand()
remove = AsyncWrite()
rename = AsyncCommand()
replace_one = AsyncCommand(doc=replace_one_doc)
save = AsyncWrite()
update = AsyncWrite(doc=update_doc)
update_many = AsyncCommand(doc=update_many_doc)
update_one = AsyncCommand(doc=update_one_doc)
with_options = DelegateMethod().wrap(Collection)
_async_aggregate = AsyncRead(attr_name='aggregate')
_async_list_indexes = AsyncRead(attr_name='list_indexes')
__parallel_scan = AsyncRead(attr_name='parallel_scan')
def __init__(self, database, name, codec_options=None,
read_preference=None, write_concern=None, read_concern=None,
_delegate=None):
db_class = create_class_with_framework(
AgnosticDatabase, self._framework, self.__module__)
if not isinstance(database, db_class):
raise TypeError("First argument to MotorCollection must be "
"MotorDatabase, not %r" % database)
delegate = _delegate or Collection(
database.delegate, name, codec_options=codec_options,
read_preference=read_preference, write_concern=write_concern,
read_concern=read_concern)
super(self.__class__, self).__init__(delegate)
self.database = database
def __getattr__(self, name):
# Dotted collection name, like "foo.bar".
if name.startswith('_'):
full_name = "%s.%s" % (self.name, name)
raise AttributeError(
"%s has no attribute %r. To access the %s"
" collection, use database['%s']." % (
self.__class__.__name__, name, full_name, full_name))
return self[name]
def __getitem__(self, name):
collection_class = create_class_with_framework(
AgnosticCollection, self._framework, self.__module__)
return collection_class(self.database, self.name + '.' + name)
def __call__(self, *args, **kwargs):
raise TypeError(
"MotorCollection object is not callable. If you meant to "
"call the '%s' method on a MotorCollection object it is "
"failing because no such method exists." %
self.delegate.name)
def find(self, *args, **kwargs):
"""Create a :class:`MotorCursor`. Same parameters as for
PyMongo's :meth:`~pymongo.collection.Collection.find`.
Note that ``find`` does not take a `callback` parameter, nor does
it return a Future, because ``find`` merely creates a
:class:`MotorCursor` without performing any operations on the server.
``MotorCursor`` methods such as :meth:`~MotorCursor.to_list` or
:meth:`~MotorCursor.count` perform actual operations.
"""
if 'callback' in kwargs:
raise pymongo.errors.InvalidOperation(
"Pass a callback to each, to_list, or count, not to find.")
cursor = self.delegate.find(*args, **kwargs)
cursor_class = create_class_with_framework(
AgnosticCursor, self._framework, self.__module__)
return cursor_class(cursor, self)
def aggregate(self, pipeline, **kwargs):
"""Execute an aggregation pipeline on this collection.
The aggregation can be run on a secondary if the client is connected
to a replica set and its ``read_preference`` is not :attr:`PRIMARY`.
:Parameters:
- `pipeline`: a single command or list of aggregation commands
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`, created with
:meth:`~MotorClient.start_session`.
- `**kwargs`: send arbitrary parameters to the aggregate command
Returns a :class:`MotorCommandCursor` that can be iterated like a
cursor from :meth:`find`::
pipeline = [{'$project': {'name': {'$toUpper': '$name'}}}]
cursor = collection.aggregate(pipeline)
while (yield cursor.fetch_next):
doc = cursor.next_object()
print(doc)
In Python 3.5 and newer, aggregation cursors can be iterated elegantly
in native coroutines with `async for`::
async def f():
async for doc in collection.aggregate(pipeline):
print(doc)
.. versionchanged:: 1.0
:meth:`aggregate` now **always** returns a cursor.
.. versionchanged:: 0.5
:meth:`aggregate` now returns a cursor by default,
and the cursor is returned immediately without a ``yield``.
See :ref:`aggregation changes in Motor 0.5 <aggregate_changes_0_5>`.
.. versionchanged:: 0.2
Added cursor support.
.. _aggregate command:
http://docs.mongodb.org/manual/applications/aggregation
"""
if kwargs.get('cursor') is False:
kwargs.pop('cursor')
# One-shot aggregation, no cursor. Send command now, return Future.
return self._async_aggregate(pipeline, **kwargs)
else:
if 'callback' in kwargs:
raise pymongo.errors.InvalidOperation(
"Pass a callback to to_list or each, not to aggregate.")
kwargs.setdefault('cursor', {})
cursor_class = create_class_with_framework(
AgnosticLatentCommandCursor, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, self._async_aggregate, pipeline, **kwargs)
def watch(self, pipeline=None, full_document='default', resume_after=None,
max_await_time_ms=None, batch_size=None, collation=None,
session=None):
"""Watch changes on this collection.
Returns a :class:`~MotorChangeStream` cursor which iterates over changes
on this collection. Introduced in MongoDB 3.6.
A change stream continues waiting indefinitely for matching change
events. Code like the following allows a program to cancel the change
stream and exit.
.. code-block:: python3
change_stream = None
async def watch_collection():
global change_stream
# Using the change stream in an "async with" block
# ensures it is canceled promptly if your code breaks
# from the loop or throws an exception.
async with db.collection.watch() as change_stream:
async for change in stream:
print(change)
# Tornado
from tornado.ioloop import IOLoop
def main():
loop = IOLoop.current()
# Start watching collection for changes.
loop.add_callback(watch_collection)
try:
loop.start()
except KeyboardInterrupt:
pass
finally:
if change_stream is not None:
change_stream.close()
# asyncio
from asyncio import get_event_loop
def main():
loop = get_event_loop()
task = loop.create_task(watch_collection)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
if change_stream is not None:
change_stream.close()
# Prevent "Task was destroyed but it is pending!"
loop.run_until_complete(task)
The :class:`~MotorChangeStream` async iterable blocks
until the next change document is returned or an error is raised. If
the :meth:`~MotorChangeStream.next` method encounters
a network error when retrieving a batch from the server, it will
automatically attempt to recreate the cursor such that no change
events are missed. Any error encountered during the resume attempt
indicates there may be an outage and will be raised.
.. code-block:: python3
try:
pipeline = [{'$match': {'operationType': 'insert'}}]
async with db.collection.watch(pipeline) as stream:
async for change in stream:
print(change)
except pymongo.errors.PyMongoError:
# The ChangeStream encountered an unrecoverable error or the
# resume attempt failed to recreate the cursor.
logging.error('...')
For a precise description of the resume process see the
`change streams specification`_.
:Parameters:
- `pipeline` (optional): A list of aggregation pipeline stages to
append to an initial ``$changeStream`` stage. Not all
pipeline stages are valid after a ``$changeStream`` stage, see the
MongoDB documentation on change streams for the supported stages.
- `full_document` (optional): The fullDocument option to pass
to the ``$changeStream`` stage. Allowed values: 'default',
'updateLookup'. Defaults to 'default'.
When set to 'updateLookup', the change notification for partial
updates will include both a delta describing the changes to the
document, as well as a copy of the entire document that was
changed from some time after the change occurred.
- `resume_after` (optional): The logical starting point for this
change stream.
- `max_await_time_ms` (optional): The maximum time in milliseconds
for the server to wait for changes before responding to a getMore
operation.
- `batch_size` (optional): The maximum number of documents to return
per batch.
- `collation` (optional): The :class:`~pymongo.collation.Collation`
to use for the aggregation.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
A :class:`~MotorChangeStream`.
See the :ref:`tornado_change_stream_example`.
.. versionadded:: 1.2
.. mongodoc:: changeStreams
.. _change streams specification:
https://github.com/mongodb/specifications/blob/master/source/change-streams.rst
"""
cursor_class = create_class_with_framework(
AgnosticChangeStream, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, pipeline, full_document, resume_after,
max_await_time_ms, batch_size, collation, session)
def list_indexes(self, session=None):
"""Get a cursor over the index documents for this collection. ::
async def print_indexes():
for index in await db.test.list_indexes():
print(index)
If the only index is the default index on ``_id``, this might print::
SON([('v', 1), ('key', SON([('_id', 1)])), ('name', '_id_'), ('ns', 'test.test')])
"""
cursor_class = create_class_with_framework(
AgnosticLatentCommandCursor, self._framework, self.__module__)
# Latent cursor that will send initial command on first "async for".
return cursor_class(self, self._async_list_indexes, session=session)
def parallel_scan(self, num_cursors, **kwargs):
"""Scan this entire collection in parallel.
Returns a list of up to ``num_cursors`` cursors that can be iterated
concurrently. As long as the collection is not modified during
scanning, each document appears once in one of the cursors' result
sets.
For example, to process each document in a collection using some
function ``process_document()``::
@gen.coroutine
def process_cursor(cursor):
while (yield cursor.fetch_next):
process_document(cursor.next_object())
# Get up to 4 cursors.
cursors = yield collection.parallel_scan(4)
yield [process_cursor(cursor) for cursor in cursors]
# All documents have now been processed.
If ``process_document()`` is a coroutine, do
``yield process_document(document)``.
With a replica set, pass `read_preference` of
:attr:`~pymongo.read_preference.ReadPreference.SECONDARY_PREFERRED`
to scan a secondary.
:Parameters:
- `num_cursors`: the number of cursors to return
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`, created with
:meth:`~MotorClient.start_session`.
.. note:: Requires server version **>= 2.5.5**.
"""
io_loop = self.get_io_loop()
original_future = self._framework.get_future(io_loop)
# Return a future, or if user passed a callback chain it to the future.
callback = kwargs.pop('callback', None)
retval = self._framework.future_or_callback(original_future,
callback,
io_loop)
# Once we have PyMongo Cursors, wrap in MotorCursors and resolve the
# future with them, or pass them to the callback.
self._framework.add_future(
io_loop,
self.__parallel_scan(num_cursors, **kwargs),
self._scan_callback, original_future)
return retval
def _scan_callback(self, original_future, future):
try:
command_cursors = future.result()
except Exception as exc:
original_future.set_exception(exc)
else:
command_cursor_class = create_class_with_framework(
AgnosticCommandCursor, self._framework, self.__module__)
motor_command_cursors = [
command_cursor_class(cursor, self)
for cursor in command_cursors]
original_future.set_result(motor_command_cursors)
def initialize_unordered_bulk_op(self, bypass_document_validation=False):
"""Initialize an unordered batch of write operations.
Operations will be performed on the server in arbitrary order,
possibly in parallel. All operations will be attempted.
:Parameters:
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is ``False``.
Returns a :class:`~motor.MotorBulkOperationBuilder` instance.
See :ref:`unordered_bulk` for examples.
.. versionchanged:: 1.0
Added bypass_document_validation support
.. versionadded:: 0.2
"""
bob_class = create_class_with_framework(
AgnosticBulkOperationBuilder, self._framework, self.__module__)
return bob_class(self,
ordered=False,
bypass_document_validation=bypass_document_validation)
def initialize_ordered_bulk_op(self, bypass_document_validation=False):
"""Initialize an ordered batch of write operations.
Operations will be performed on the server serially, in the
order provided. If an error occurs all remaining operations
are aborted.
:Parameters:
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is ``False``.
Returns a :class:`~motor.MotorBulkOperationBuilder` instance.
See :ref:`ordered_bulk` for examples.
.. versionchanged:: 1.0
Added bypass_document_validation support
.. versionadded:: 0.2
"""
bob_class = create_class_with_framework(
AgnosticBulkOperationBuilder,
self._framework,
self.__module__)
return bob_class(self,
ordered=True,
bypass_document_validation=bypass_document_validation)
def wrap(self, obj):
if obj.__class__ is Collection:
# Replace pymongo.collection.Collection with MotorCollection.
return self.__class__(self.database, obj.name, _delegate=obj)
elif obj.__class__ is Cursor:
return AgnosticCursor(obj, self)
elif obj.__class__ is CommandCursor:
command_cursor_class = create_class_with_framework(
AgnosticCommandCursor,
self._framework,
self.__module__)
return command_cursor_class(obj, self)
elif obj.__class__ is ChangeStream:
change_stream_class = create_class_with_framework(
AgnosticChangeStream,
self._framework,
self.__module__)
return change_stream_class(obj, self)
else:
return obj
def get_io_loop(self):
return self.database.get_io_loop()
class AgnosticBaseCursor(AgnosticBase):
"""Base class for AgnosticCursor and AgnosticCommandCursor"""
_refresh = AsyncRead()
address = ReadOnlyProperty()
cursor_id = ReadOnlyProperty()
alive = ReadOnlyProperty()
batch_size = MotorCursorChainingMethod()
session = ReadOnlyProperty()
def __init__(self, cursor, collection):
"""Don't construct a cursor yourself, but acquire one from methods like
:meth:`MotorCollection.find` or :meth:`MotorCollection.aggregate`.
.. note::
There is no need to manually close cursors; they are closed
by the server after being fully iterated
with :meth:`to_list`, :meth:`each`, or :attr:`fetch_next`, or
automatically closed by the client when the :class:`MotorCursor` is
cleaned up by the garbage collector.
"""
# 'cursor' is a PyMongo Cursor, CommandCursor, or a _LatentCursor.
super(AgnosticBaseCursor, self).__init__(delegate=cursor)
self.collection = collection
self.started = False
self.closed = False
# python.org/dev/peps/pep-0492/#api-design-and-implementation-revisions
if PY35:
exec(textwrap.dedent("""
def __aiter__(self):
return self
async def __anext__(self):
# An optimization: skip the "await" if possible.
if self._buffer_size() or await self.fetch_next:
return self.next_object()
raise StopAsyncIteration()
"""), globals(), locals())
def _get_more(self):
"""Initial query or getMore. Returns a Future."""
if not self.alive:
raise pymongo.errors.InvalidOperation(
"Can't call get_more() on a MotorCursor that has been"
" exhausted or killed.")
self.started = True
return self._refresh()
@property
@coroutine_annotation
def fetch_next(self):
"""A Future used with `gen.coroutine`_ to asynchronously retrieve the
next document in the result set, fetching a batch of documents from the
server if necessary. Resolves to ``False`` if there are no more
documents, otherwise :meth:`next_object` is guaranteed to return a
document.
.. _`gen.coroutine`: http://tornadoweb.org/en/stable/gen.html
.. testsetup:: fetch_next
MongoClient().test.test_collection.delete_many({})
collection = MotorClient().test.test_collection
.. doctest:: fetch_next
>>> @gen.coroutine
... def f():
... yield collection.insert_many([{'_id': i} for i in range(5)])
... cursor = collection.find().sort([('_id', 1)])
... while (yield cursor.fetch_next):
... doc = cursor.next_object()
... sys.stdout.write(str(doc['_id']) + ', ')
... print('done')
...
>>> IOLoop.current().run_sync(f)
0, 1, 2, 3, 4, done
While it appears that fetch_next retrieves each document from
the server individually, the cursor actually fetches documents
efficiently in `large batches`_.
In Python 3.5 and newer, cursors can be iterated elegantly and very
efficiently in native coroutines with `async for`:
.. doctest:: fetch_next
>>> async def f():
... async for doc in collection.find():
... sys.stdout.write(str(doc['_id']) + ', ')
... print('done')
...
>>> IOLoop.current().run_sync(f)
0, 1, 2, 3, 4, done
.. _`large batches`: https://docs.mongodb.com/manual/tutorial/iterate-a-cursor/#cursor-batches
"""
if not self._buffer_size() and self.alive:
# Return the Future, which resolves to number of docs fetched or 0.
return self._get_more()
elif self._buffer_size():
future = self._framework.get_future(self.get_io_loop())
future.set_result(True)
return future
else:
# Dead
future = self._framework.get_future(self.get_io_loop())
future.set_result(False)
return future
def next_object(self):
"""Get a document from the most recently fetched batch, or ``None``.
See :attr:`fetch_next`.
"""
if not self._buffer_size():
return None
return next(self.delegate)
def each(self, callback):
"""Iterates over all the documents for this cursor.
:meth:`each` returns immediately, and `callback` is executed asynchronously
for each document. `callback` is passed ``(None, None)`` when iteration
is complete.
Cancel iteration early by returning ``False`` from the callback. (Only
``False`` cancels iteration: returning ``None`` or 0 does not.)
.. testsetup:: each
from tornado.ioloop import IOLoop
MongoClient().test.test_collection.delete_many({})
collection = MotorClient().test.test_collection
.. doctest:: each
>>> def inserted(result, error):
... if error:
... raise error
... cursor = collection.find().sort([('_id', 1)])
... cursor.each(callback=each)
...
>>> def each(result, error):
... if error:
... raise error
... elif result:
... sys.stdout.write(str(result['_id']) + ', ')
... else:
... # Iteration complete
... IOLoop.current().stop()
... print('done')
...
>>> collection.insert_many(
... [{'_id': i} for i in range(5)], callback=inserted)
>>> IOLoop.current().start()
0, 1, 2, 3, 4, done
.. note:: Unlike other Motor methods, ``each`` requires a callback and
does not return a Future, so it cannot be used in a coroutine.
``async for``, :meth:`to_list`, :attr:`fetch_next` are much easier to
use.
:Parameters:
- `callback`: function taking (document, error)
"""
if not callable(callback):
raise callback_type_error
self._each_got_more(callback, None)
def _each_got_more(self, callback, future):
if future:
try:
future.result()
except Exception as error:
callback(None, error)
return
while self._buffer_size() > 0:
doc = next(self.delegate) # decrements self.buffer_size
# Quit if callback returns exactly False (not None). Note we
# don't close the cursor: user may want to resume iteration.
if callback(doc, None) is False:
return
# The callback closed this cursor?
if self.closed:
return
if self.alive and (self.cursor_id or not self.started):
self._framework.add_future(
self.get_io_loop(),
self._get_more(),
self._each_got_more, callback)
else:
# Complete
self._framework.call_soon(
self.get_io_loop(),
functools.partial(callback, None, None))
@coroutine_annotation
def to_list(self, length, callback=None):
"""Get a list of documents.
.. testsetup:: to_list
MongoClient().test.test_collection.delete_many({})
from tornado import ioloop
.. doctest:: to_list
>>> from motor.motor_tornado import MotorClient
>>> collection = MotorClient().test.test_collection
>>>
>>> @gen.coroutine
... def f():
... yield collection.insert_many([{'_id': i} for i in range(4)])
... cursor = collection.find().sort([('_id', 1)])
... docs = yield cursor.to_list(length=2)
... while docs:
... print(docs)
... docs = yield cursor.to_list(length=2)
...
... print('done')
...
>>> ioloop.IOLoop.current().run_sync(f)
[{'_id': 0}, {'_id': 1}]
[{'_id': 2}, {'_id': 3}]
done
:Parameters:
- `length`: maximum number of documents to return for this call, or
None
- `callback` (optional): function taking (documents, error)
If a callback is passed, returns None, else returns a Future.
.. versionchanged:: 0.2
`callback` must be passed as a keyword argument, like
``to_list(10, callback=callback)``, and the
`length` parameter is no longer optional.
"""
if length is not None:
if not isinstance(length, int):
raise TypeError('length must be an int, not %r' % length)
elif length < 0:
raise ValueError('length must be non-negative')
if self._query_flags() & _QUERY_OPTIONS['tailable_cursor']:
raise pymongo.errors.InvalidOperation(
"Can't call to_list on tailable cursor")
to_list_future = self._framework.get_future(self.get_io_loop())
# Run future_or_callback's type checking before we change anything.
retval = self._framework.future_or_callback(to_list_future,
callback,
self.get_io_loop())
if not self.alive:
to_list_future.set_result([])
else:
the_list = []
self._framework.add_future(
self.get_io_loop(),
self._get_more(),
self._to_list, length, the_list, to_list_future)
return retval
def _to_list(self, length, the_list, to_list_future, get_more_result):
# get_more_result is the result of self._get_more().
# to_list_future will be the result of the user's to_list() call.
try:
result = get_more_result.result()
collection = self.collection
fix_outgoing = collection.database.delegate._fix_outgoing
if length is None:
n = result
else:
n = min(length, result)
for _ in range(n):
the_list.append(fix_outgoing(self._data().popleft(),
collection))
reached_length = (length is not None and len(the_list) >= length)
if reached_length or not self.alive:
to_list_future.set_result(the_list)
else:
self._framework.add_future(
self.get_io_loop(),
self._get_more(),
self._to_list, length, the_list, to_list_future)
except Exception as exc:
to_list_future.set_exception(exc)
def get_io_loop(self):
return self.collection.get_io_loop()
@motor_coroutine
def close(self):
"""Explicitly kill this cursor on the server. Call like (in Tornado):
.. code-block:: python
yield cursor.close()
"""
if not self.closed:
self.closed = True
yield self._framework.yieldable(self._close())
def _buffer_size(self):
return len(self._data())
# Paper over some differences between PyMongo Cursor and CommandCursor.
def _query_flags(self):
raise NotImplementedError
def _data(self):
raise NotImplementedError
def _clear_cursor_id(self):
raise NotImplementedError
def _close_exhaust_cursor(self):
raise NotImplementedError
def _killed(self):
raise NotImplementedError
@motor_coroutine
def _close(self):
raise NotImplementedError()
class AgnosticCursor(AgnosticBaseCursor):
__motor_class_name__ = 'MotorCursor'
__delegate_class__ = Cursor
address = ReadOnlyProperty()
count = AsyncRead()
collation = MotorCursorChainingMethod()
distinct = AsyncRead()
explain = AsyncRead()
add_option = MotorCursorChainingMethod()
remove_option = MotorCursorChainingMethod()
limit = MotorCursorChainingMethod()
skip = MotorCursorChainingMethod()
max_scan = MotorCursorChainingMethod()
sort = MotorCursorChainingMethod(doc=cursor_sort_doc)
hint = MotorCursorChainingMethod()
where = MotorCursorChainingMethod()
max_await_time_ms = MotorCursorChainingMethod()
max_time_ms = MotorCursorChainingMethod()
min = MotorCursorChainingMethod()
max = MotorCursorChainingMethod()
comment = MotorCursorChainingMethod()
_Cursor__die = AsyncRead()
def rewind(self):
"""Rewind this cursor to its unevaluated state."""
self.delegate.rewind()
self.started = False
return self
def clone(self):
"""Get a clone of this cursor."""
return self.__class__(self.delegate.clone(), self.collection)
def __copy__(self):
return self.__class__(self.delegate.__copy__(), self.collection)
def __deepcopy__(self, memo):
return self.__class__(self.delegate.__deepcopy__(memo), self.collection)
def _query_flags(self):
return self.delegate._Cursor__query_flags
def _data(self):
return self.delegate._Cursor__data
def _clear_cursor_id(self):
self.delegate._Cursor__id = 0
def _close_exhaust_cursor(self):
# If an exhaust cursor is dying without fully iterating its results,
# it must close the socket. PyMongo's Cursor does this, but we've
# disabled its cleanup so we must do it ourselves.
if self.delegate._Cursor__exhaust:
manager = self.delegate._Cursor__exhaust_mgr
if manager.sock:
manager.sock.close()
manager.close()
def _killed(self):
return self.delegate._Cursor__killed
@motor_coroutine
def _close(self):
yield self._framework.yieldable(self._Cursor__die())
class AgnosticCommandCursor(AgnosticBaseCursor):
__motor_class_name__ = 'MotorCommandCursor'
__delegate_class__ = CommandCursor
_CommandCursor__die = AsyncRead()
def _query_flags(self):
return 0
def _data(self):
return self.delegate._CommandCursor__data
def _clear_cursor_id(self):
self.delegate._CommandCursor__id = 0
def _close_exhaust_cursor(self):
# MongoDB doesn't have exhaust command cursors yet.
pass
def _killed(self):
return self.delegate._CommandCursor__killed
@motor_coroutine
def _close(self):
yield self._framework.yieldable(self._CommandCursor__die())
class _LatentCursor(object):
"""Take the place of a PyMongo CommandCursor until aggregate() begins."""
alive = True
_CommandCursor__data = []
_CommandCursor__id = None
_CommandCursor__killed = False
cursor_id = None
def clone(self):
return _LatentCursor()
def rewind(self):
pass
class AgnosticLatentCommandCursor(AgnosticCommandCursor):
__motor_class_name__ = 'MotorLatentCommandCursor'
def __init__(self, collection, start, *args, **kwargs):
# We're being constructed without yield or await, like:
#
# cursor = collection.aggregate(pipeline)
#
# ... so we can't send the "aggregate" command to the server and get
# a PyMongo CommandCursor back yet. Set self.delegate to a latent
# cursor until the first yield or await triggers _get_more(), which
# will execute the callback "start", which gets a PyMongo CommandCursor.
super(self.__class__, self).__init__(_LatentCursor(), collection)
self.start = start
self.args = args
self.kwargs = kwargs
def _get_more(self):
if not self.started:
self.started = True
original_future = self._framework.get_future(self.get_io_loop())
future = self.start(
*self.args,
**self.kwargs)
self.start = self.args = self.kwargs = None
self._framework.add_future(
self.get_io_loop(),
future,
self._on_get_more, original_future)
return original_future
return super(self.__class__, self)._get_more()
def _on_get_more(self, original_future, future):
try:
# "result" is a CommandCursor from PyMongo's aggregate().
self.delegate = future.result()
except Exception as exc:
original_future.set_exception(exc)
else:
# _get_more is complete.
original_future.set_result(len(self.delegate._CommandCursor__data))
class AgnosticBulkOperationBuilder(AgnosticBase):
__motor_class_name__ = 'MotorBulkOperationBuilder'
__delegate_class__ = BulkOperationBuilder
find = DelegateMethod()
insert = DelegateMethod()
execute = AsyncCommand()
def __init__(self, collection, ordered, bypass_document_validation):
self.io_loop = collection.get_io_loop()
delegate = BulkOperationBuilder(collection.delegate,
ordered,
bypass_document_validation)
super(self.__class__, self).__init__(delegate)
def get_io_loop(self):
return self.io_loop
class AgnosticChangeStream(AgnosticBase):
"""A change stream cursor.
Should not be called directly by application developers. See
:meth:`~MotorCollection.watch` for example usage.
.. versionadded: 1.2
.. mongodoc:: changeStreams
"""
__delegate_class__ = ChangeStream
__motor_class_name__ = 'MotorChangeStream'
_close = AsyncCommand(attr_name='close')
def __init__(self, collection, pipeline, full_document, resume_after,
max_await_time_ms, batch_size, collation, session):
super(self.__class__, self).__init__(delegate=None)
self._collection = collection
self._kwargs = {'pipeline': pipeline,
'full_document': full_document,
'resume_after': resume_after,
'max_await_time_ms': max_await_time_ms,
'batch_size': batch_size,
'collation': collation,
'session': session}
def _next(self):
# This method is run on a thread.
try:
if not self.delegate:
self.delegate = self._collection.delegate.watch(**self._kwargs)
return self.delegate.next()
except StopIteration:
raise StopAsyncIteration()
@coroutine_annotation(callback=False)
def next(self):
"""Advance the cursor.
This method blocks until the next change document is returned or an
unrecoverable error is raised.
Raises :exc:`StopAsyncIteration` if this change stream is closed.
You can iterate the change stream by calling
``await change_stream.next()`` repeatedly, or with an "async for" loop:
.. code-block:: python3
async for change in db.collection.watch():
print(change)
"""
loop = self.get_io_loop()
return self._framework.run_on_executor(loop, self._next)
@coroutine_annotation(callback=False)
def close(self):
"""Close this change stream.
Stops any "async for" loops using this change stream.
"""
if self.delegate:
return self._close()
# Never started.
future = self._framework.get_future(self.get_io_loop())
future.set_result(None)
return future
if PY35:
exec(textwrap.dedent("""
async def __aiter__(self):
return self
__anext__ = next
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self.delegate:
self.delegate.close()
"""), globals(), locals())
def get_io_loop(self):
return self._collection.get_io_loop()
def __enter__(self):
raise RuntimeError('Use a change stream in "async with", not "with"')
def __exit__(self, exc_type, exc_val, exc_tb):
pass
| StarcoderdataPython |
87919 | <filename>inference/run_inference.py
import gc
import os
from typing import List
import numpy as np
import tifffile
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from inference.tiling import Tiler, TileSlice
from training.val_dataset import normalize_band
class SliceDataset(Dataset):
def __init__(self, scene_dir: str, tiler: Tiler) -> None:
super().__init__()
self.scene_dir = scene_dir
self.tiler = tiler
self.slices = tiler.generate_slices()
self.vv_full = tifffile.imread(os.path.join(self.scene_dir, "VV_dB.tif"))
self.vh_full = tifffile.imread(os.path.join(self.scene_dir, "VH_dB.tif"))
def __getitem__(self, index):
slice = self.slices[index]
vv = self.tiler.get_crop(self.vv_full, slice)
vh = self.tiler.get_crop(self.vh_full, slice)
vv = normalize_band(vv)
vh = normalize_band(vh)
img = torch.from_numpy(np.stack([vv, vh], axis=0)).float()
return img, np.array([slice.row, slice.column, slice.y, slice.x])
def __len__(self):
return len(self.slices)
def predict_scene_and_return_mm(models: List[nn.Module], dataset_dir, scene_id: str, use_fp16: bool = False,
rotate=False, output_dir=None):
vh_full = tifffile.memmap(os.path.join(dataset_dir, scene_id, "VH_dB.tif"), mode="r")
height, width = vh_full.shape
tiler = Tiler(height, width, 3584, overlap=704)
vessel_preds = np.zeros_like(vh_full, dtype=np.uint8)
fishing_preds = np.zeros_like(vh_full, dtype=np.uint8)
length_preds = np.zeros_like(vh_full, dtype=np.float16)
center_preds = np.zeros_like(vh_full, dtype=np.uint8)
slice_dataset = SliceDataset(os.path.join(dataset_dir, scene_id), tiler)
slice_loader = DataLoader(
slice_dataset, batch_size=1, shuffle=False, num_workers=8, pin_memory=False
)
for batch, slice_vals in tqdm(slice_loader):
slice = TileSlice(*slice_vals[0])
with torch.no_grad():
batch = batch.cuda()
with torch.cuda.amp.autocast(enabled=use_fp16):
outputs = []
for model in models:
output = model(batch)
sigmoid_keys = ["fishing_mask", "vessel_mask"]
for k in sigmoid_keys:
output[k] = torch.sigmoid(output[k])
if rotate:
out180 = model(torch.rot90(batch, 2, dims=(2, 3)))
for key in list(output.keys()):
val = torch.rot90(out180[key], 2, dims=(2, 3))
if key in sigmoid_keys:
val = torch.sigmoid(val)
output[key] += val
output[key] *= 0.5
outputs.append(output)
output = {}
for k in outputs[0].keys():
vs = [o[k][:, :3] for o in outputs]
output[k] = sum(vs) / len(models)
vessel_mask = (output["vessel_mask"][0][0] * 255).cpu().numpy().astype(np.uint8)
fishing_mask = (output["fishing_mask"][0][0] * 255).cpu().numpy().astype(np.uint8)
center_mask = torch.clamp(output["center_mask"][0][0], 0, 255).cpu().numpy().astype(np.uint8)
length_mask = output["length_mask"][0][0].cpu().numpy().astype(np.float16)
tiler.update_crop(vessel_preds, vessel_mask, slice)
tiler.update_crop(fishing_preds, fishing_mask, slice)
tiler.update_crop(center_preds, center_mask, slice)
tiler.update_crop(length_preds, length_mask, slice)
# tiler.update_crop(conf_preds, conf_mask, slice)
if output_dir:
os.makedirs(os.path.join(output_dir, scene_id), exist_ok=True)
np.save(os.path.join(output_dir, scene_id, "center_preds"), center_preds)
np.save(os.path.join(output_dir, scene_id, "vessel_preds"), vessel_preds)
np.save(os.path.join(output_dir, scene_id, "fishing_preds"), fishing_preds)
np.save(os.path.join(output_dir, scene_id, "length_preds"), length_preds)
gc.collect()
return {
"center_mask": center_preds,
"vessel_mask": vessel_preds,
"fishing_mask": fishing_preds,
"length_mask": length_preds,
}
| StarcoderdataPython |
1841048 | <filename>waitlist/blueprints/xup/submission.py
import logging
from datetime import datetime
from flask import request, flash, redirect, url_for, render_template, abort
from flask_login import current_user, login_required
import re
from waitlist.blueprints.api.fittings.self import self_remove_fit
from waitlist.data.names import WaitlistNames
from waitlist.data.sse import EntryAddedSSE, send_server_sent_event,\
FitAddedSSE
from waitlist.storage.database import WaitlistGroup, WaitlistEntry, Shipfit,\
TeamspeakDatum, InvType, FitModule, MarketGroup, HistoryEntry, Waitlist,\
ShipCheckCollection
from waitlist.storage.modules import resist_ships, logi_ships
from waitlist.utility.history_utils import create_history_object
from waitlist.utility.fitting_utils import get_fit_format, parse_dna_fitting,\
parse_eft, get_waitlist_type_for_fit
from waitlist.base import db
from . import bp
from flask_babel import gettext, ngettext
from typing import Dict, List, Tuple
from waitlist.utility.constants import location_flags, groups
from waitlist.utility.settings import sget_active_ts_id
from waitlist.utility.config import disable_teamspeak, disable_scruffy_mode
import operator
logger = logging.getLogger(__name__)
@bp.route('/submit', methods=['POST'])
@login_required
def submit():
"""
Parse the submited fitts
Check which fits need additional Info
Rattlesnake, Rokh, that other ugly thing Caldari BS lvl
Basilisk, Scimitar Logi Lvl
-> put info into comment of the fit
"""
# used for spawning the right SSEs
_newEntryCreated = False
_newFits = []
fittings = request.form['fits']
group_id = int(request.form['groupID'])
logger.info("%s submitted %s for group %d", current_user.get_eve_name(), fittings, group_id)
eve_id = current_user.get_eve_id()
group = db.session.query(WaitlistGroup).filter(WaitlistGroup.groupID == group_id).one()
if not group.enabled:
# xups are disabled atm
flash(gettext("X-UP is disabled!!!"))
return redirect(url_for("index"))
poke_me = 'pokeMe' in request.form
if current_user.poke_me != poke_me:
current_user.poke_me = poke_me
db.session.commit()
# check if it is scruffy
if not disable_scruffy_mode and fittings.lower().startswith("scruffy"):
# scruffy mode scruffy
fittings = fittings.lower()
_, _, ship_type = fittings.rpartition(" ")
ship_types = []
# check for , to see if it is a multi value shiptype
allowed_types = [tpl[0].strip() for tpl in db.session.query(Waitlist.waitlistType).filter((~Waitlist.group.has(WaitlistGroup.queueID == Waitlist.id)) & (Waitlist.groupID == group_id))]
if "," in ship_type:
for stype in ship_type.split(","):
stype = stype.strip()
if stype in allowed_types:
ship_types.append(stype)
else:
if ship_type in allowed_types:
ship_types.append(ship_type)
# check if shiptype is valid
if len(ship_types) <= 0:
flash(gettext("Valid entries are scruffy %(types)s", types=','.join(allowed_types)), 'danger')
return redirect(url_for('index'))
queue = group.xuplist
wl_entry = db.session.query(WaitlistEntry).filter(
(WaitlistEntry.waitlist_id == queue.id) & (WaitlistEntry.user == eve_id)).first()
if wl_entry is None:
wl_entry = WaitlistEntry()
wl_entry.creation = datetime.utcnow()
wl_entry.user = eve_id
queue.entries.append(wl_entry)
_newEntryCreated = True
h_entry = create_history_object(current_user.get_eve_id(), "xup")
for stype in ship_types:
wl = db.session.query(Waitlist).filter(
(Waitlist.groupID == group_id) & (Waitlist.waitlistType == stype)
).first()
target_wl_id = None
if wl is not None:
target_wl_id = wl.id
if target_wl_id is None:
target_wl_id = db.session.query(ShipCheckCollection).filter(
(ShipCheckCollection.waitlistGroupID == group_id)
).one().defaultTargetID
fit: Shipfit = Shipfit()
fit.ship_type = 0 # #System >.>
fit.wl_type = stype
fit.modules = ':'
fit.targetWaitlistID = target_wl_id
wl_entry.fittings.append(fit)
if not _newEntryCreated:
_newFits.append(fit)
h_entry.fittings.append(fit)
db.session.add(h_entry)
db.session.commit()
if _newEntryCreated:
event = EntryAddedSSE(wl_entry, group_id, queue.id, True)
send_server_sent_event(event)
else:
for fit in _newFits:
event = FitAddedSSE(group_id, queue.id, wl_entry.id, fit, True, wl_entry.user)
send_server_sent_event(event)
flash(gettext("You were added as %(ship_type)s", ship_type=ship_type),
"success")
return redirect(url_for('index') + "?groupId=" + str(group_id))
# ### END SCRUFFY CODE
logilvl = request.form['logi']
if logilvl == "":
logilvl = "0"
caldari_bs_lvl = request.form['cbs']
if caldari_bs_lvl == "":
caldari_bs_lvl = "0"
logilvl = int(logilvl)
caldari_bs_lvl = int(caldari_bs_lvl)
newbro = request.form.get('newbro', "off")
newbro = (newbro is not "off")
current_user.is_new = newbro
current_user.cbs_level = caldari_bs_lvl
current_user.lc_level = logilvl
logger.debug("Fittings to parse: %s", fittings)
# lets normalize linebreaks
fittings = fittings.replace("[\n\r]+", "\n")
fittings = fittings.strip()
# lets first find out what kind of fitting is used
end_line_idx = fittings.find('\n') + 1
first_line = fittings[:end_line_idx]
format_type = get_fit_format(first_line)
fits = []
if format_type == "eft":
# split fittings up in its fittings
string_fits = []
fit_iter = re.finditer("\[.*,.*\]", fittings)
s_idx = 0
first_iter = True
for fitMatch in fit_iter:
if not first_iter:
e_idx = fitMatch.start() - 1
string_fits.append(fittings[s_idx:e_idx].split('\n'))
else:
first_iter = False
s_idx = fitMatch.start()
string_fits.append(fittings[s_idx:].split('\n'))
logger.debug("Split fittings into %d fits", len(string_fits))
for fit in string_fits:
try:
dbfit = parse_eft(fit)
if dbfit is None:
abort(400, "Fit was not parseable.")
fits.append(dbfit)
except ValueError:
abort(400, "Invalid module amounts")
else:
# parse chat links
lines = fittings.split('\n')
for line in lines:
fit_iter = re.finditer(
"<url=fitting:(\d+):((?:\d+_{0,1};\d+:)+:)>",
line)
for fitMatch in fit_iter:
ship_type = int(fitMatch.group(1))
dna_fit = fitMatch.group(2)
fit = Shipfit()
fit.ship_type = ship_type
fit.modules = dna_fit
mod_list = parse_dna_fitting(dna_fit)
for location_flag, mod_map in enumerate(mod_list):
for mod_id in mod_map:
mod = mod_map[mod_id]
# lets check the value actually exists
inv_type = db.session.query(InvType).get(mod[0])
if inv_type is None:
raise ValueError(
'No module with ID=' + str(mod[0]))
db_module = FitModule(moduleID=mod[0], amount=mod[1],
locationFlag=location_flag)
fit.moduleslist.append(db_module)
fits.append(fit)
fit_count = len(fits)
logger.debug("Parsed %d fits", fit_count)
if fit_count <= 0:
flash(ngettext("You submitted one fit to be check by a fleet comp before getting on the waitlist.",
"You submitted %(num)d fits to be check by a fleet comp before getting on the waitlist.",
fit_count),
"danger")
return redirect(url_for('index') + "?groupId=" + str(group_id))
for fit in fits:
if fit.ship_type in resist_ships:
if logilvl == 0:
pass # TODO ask for caldari bs lvl
if fit.comment is None:
fit.comment = "<b>Cal BS: " + str(caldari_bs_lvl) + "</b>"
else:
fit.comment += " <b>Cal BS: " + str(caldari_bs_lvl) + "</b>"
else:
if fit.ship_type in logi_ships:
if logilvl == 0:
pass # TODO ask for logi
if logilvl <= 3:
comment_string = "<b class=\"bg-danger\">Logi: {0}</b>"
else:
comment_string = "<b>Logi: {0}</b>"
if fit.comment is None:
fit.comment = comment_string.format(logilvl)
else:
fit.comment += " " + comment_string.format(logilvl)
# get current users id
eve_id = current_user.get_eve_id()
fits_ready = []
# split his fits into types for the different waitlist_entries
for fit in fits:
tag, waitlist_id = get_waitlist_type_for_fit(fit, group_id)
fit.wl_type = tag
fit.targetWaitlistID = waitlist_id
fits_ready.append(fit)
# get the waitlist entries of this user
queue = group.xuplist
wl_entry = db.session.query(WaitlistEntry).filter(
(WaitlistEntry.waitlist_id == queue.id) & (WaitlistEntry.user == eve_id)).first()
if wl_entry is None:
wl_entry = WaitlistEntry()
wl_entry.creation = datetime.utcnow()
wl_entry.user = eve_id
queue.entries.append(wl_entry)
_newEntryCreated = True
logger.info("%s submitted %s fits to be checked by a fleetcomp", current_user.get_eve_name(), len(fits_ready))
for fit in fits_ready:
logger.debug("%s submits %s", current_user.get_eve_name(), fit.get_dna())
wl_entry.fittings.append(fit)
h_entry = create_history_object(current_user.get_eve_id(), HistoryEntry.EVENT_XUP, None, fits_ready)
db.session.add(h_entry)
db.session.commit()
if _newEntryCreated:
event = EntryAddedSSE(wl_entry, group_id, queue.id, True)
send_server_sent_event(event)
else:
for fit in fits_ready:
event = FitAddedSSE(group_id, queue.id, wl_entry.id, fit, True, wl_entry.user)
send_server_sent_event(event)
flash(ngettext("You submitted one fit to be check by a fleet comp before getting on the waitlist.",
"You submitted %(num)d fits to be check by a fleet comp before getting on the waitlist.",
fit_count),
"success")
return redirect(url_for('index') + "?groupId=" + str(group_id))
@bp.route('/', methods=['GET'])
@login_required
def index():
new_bro = current_user.is_new
# noinspection PyPep8
defaultgroup = db.session.query(WaitlistGroup).filter(WaitlistGroup.enabled == True) \
.order_by(WaitlistGroup.ordering).first()
# noinspection PyPep8
activegroups = db.session.query(WaitlistGroup).filter(WaitlistGroup.enabled == True).all()
ts_settings = None
ts_id = sget_active_ts_id()
if not disable_teamspeak and ts_id is not None:
ts_settings = db.session.query(TeamspeakDatum).get(ts_id)
return render_template("xup.html", newbro=new_bro, group=defaultgroup,
groups=activegroups, ts=ts_settings)
@bp.route("/<int:fit_id>", methods=['GET'])
@login_required
def update(fit_id: int):
new_bro: bool = current_user.is_new
# noinspection PyPep8
defaultgroup = db.session.query(WaitlistGroup).filter(WaitlistGroup.enabled == True) \
.order_by(WaitlistGroup.ordering).first()
# noinspection PyPep8
activegroups = db.session.query(WaitlistGroup).filter(WaitlistGroup.enabled == True).all()
ts_settings = None
ts_id = sget_active_ts_id()
if ts_id is not None:
ts_settings = db.session.query(TeamspeakDatum).get(ts_id)
return render_template("xup.html", newbro=new_bro, group=defaultgroup,
groups=activegroups, update=True, oldFitID=fit_id,
ts=ts_settings)
@bp.route("/update", methods=['POST'])
@login_required
def update_submit():
oldfit_id_str = request.form.get('old-fit-id')
try:
old_fit_id = int(oldfit_id_str)
except ValueError:
abort(400, "No valid id for the fit to update given!")
return None
response = submit()
self_remove_fit(old_fit_id)
return response
| StarcoderdataPython |
6675224 | <gh_stars>0
# -*- coding: utf-8 -*-
# Coded by <NAME>
# 2020-12-13
# IDE: Jupyter Notebook
def F(a, b):
if a == 1:
return 1
elif a > 10:
while a > 10:
aStr = str(a)
aLen = len(aStr)
a = a - pow(10, aLen - 1)
if a == 0 and b != 0:
return 10
elif a == 6 and b != 0:
return 6
elif a == 5 and b != 0:
return 5
else:
a = a
if b == 0:
return 1
elif b > 100: # a가 2, 4, 8, 9 가능 3, 7은 4번 마다, 5, 6, 1은 무조건 가능. 10은 조정 해놓음
while b > 100:
bStr = str(b)
bLen = len(bStr)
b = b - pow(10, bLen - 1)
res = pow(a, b)
if res >= 10:
while res >= 10:
resStr = str(res)
resLen = len(resStr)
res = res - pow(10, resLen - 1)
if res == 0:
return 10
else:
return res
T = int(input())
for i in range(T):
a, b = map(int, input().split())
print(F(a, b)) | StarcoderdataPython |
11361416 | <filename>modules/2.79/bpy/types/NodeSocketVectorAcceleration.py<gh_stars>0
NodeSocketVectorAcceleration.links = None
| StarcoderdataPython |
11283852 | <reponame>hide-dog/kaggle_titanic
# ------------------------------------------------
# import
# ------------------------------------------------
import re
# ------------------------------------------------
# read file
# ------------------------------------------------
def rewrite_train_file(inf, of):
# read file
with open(inf) as f:
lines = f.readlines()
#end
s = len(lines)
for i in range(s-1):
# del name
n = re.findall('".+"', lines[i+1])[0]
stn = lines[i+1].find(n)
enn = stn + len(n)
lines[i+1] = lines[i+1][:stn] + lines[i+1][enn:]
# split
l = lines[i+1].split(",")
# refine data
line = ""
# Pclass
if l[2] == "":
l[2] = "3"
else:
pass
#end
# Name
l[3] = "1"
# Sex
if l[4] == "male":
l[4] = "1"
else:
l[4] = "0"
#end
# Age
if l[5] == "":
l[5] = "30"
else:
pass
#end
# SibSp
if l[6] == "":
l[6] = "1"
else:
pass
#end
# Parch
if l[7] == "":
l[7] = "0"
else:
pass
#end
# Ticket
l[8] = "1"
# Fare
if l[9] == "":
l[9] = "20.0"
else:
pass
#end
# Cabin
l[10] = "1"
# Embarked
if l[11].find("S") > -1:
l[11] = "0"
elif l[11].find("C") > -1:
l[11] = "1"
elif l[11].find("Q") > -1:
l[11] = "2"
else:
l[11] = "0"
#end
for j in range(len(l)):
if j == len(l) - 1:
line = line + l[j] + "\n"
else:
line = line + l[j] + ","
#end
#end
lines[i+1] = line
#end
# write file
with open(of, "w") as f:
for i in range(len(lines)):
f.write(lines[i])
#end
#end
#end
# ------------------------------------------------
# main
# ------------------------------------------------
def main():
# rewrite file
rewrite_train_file("train.csv", "train.txt")
rewrite_train_file("test.csv", "test.txt")
#end
if __name__ == "__main__":
main() | StarcoderdataPython |
3588203 | raise ImportError("AWS Cognito can be configured via GenericOAuthenticator")
| StarcoderdataPython |
1678891 | import os
import re
import shutil
import datetime
import jinja2
_template_pattern = re.compile(r"^.*(\.t)\.[^\.]+$")
def default_template_arguments():
return {"now":datetime.datetime.now()}
def apply_template_dir(src_dir, dest_dir, template_params, filter_name=None):
# print(f"D {src_dir} -> {dest_dir}")
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for item in os.listdir(src_dir):
if filter_name and not filter_name(src_dir, item):
continue
s = os.path.join(src_dir, item)
if os.path.isdir(s):
d = os.path.join(dest_dir, item)
apply_template_dir(s, d, template_params, filter_name)
else:
match = _template_pattern.match(item)
if match:
i,j = match.span(1)
d = os.path.join(dest_dir, item[:i] + item[j:])
with open(s, "r", encoding='utf8') as sf:
t = jinja2.Template(sf.read())
dtext = t.render(template_params)
with open(d, "w", encoding='utf8') as df:
df.write(dtext)
#print(f"T {s} -> {d}")
else:
d = os.path.join(dest_dir, item)
shutil.copyfile(s,d)
#print(f"C {s} -> {d}")
| StarcoderdataPython |
9788340 | <reponame>HumbleRetreat/meru
from unittest.mock import create_autospec
import pytest
from meru.handlers import handle_action, register_action_handler
@pytest.mark.asyncio
async def test_dont_handle_unknown_action(dummy_action):
with pytest.raises(StopAsyncIteration):
await handle_action(dummy_action()).__anext__()
@pytest.mark.xfail
@pytest.mark.asyncio
@pytest.mark.freeze_time
async def test_handle_async_generator_handler(
dummy_action, dummy_state_cls, mocker, mocked_states
):
"""
This test will fail, because I have no idea how to create a stub with annotations.
"""
mocker.patch("meru.handlers.HANDLERS", {})
async def dummy_handler(action: dummy_action, state: dummy_state_cls):
yield dummy_action()
stub = create_autospec(dummy_handler)
register_action_handler(stub)
await handle_action(dummy_action()).__anext__()
stub.assert_awaited_once_with(
action=dummy_action(),
state=mocked_states[dummy_state_cls],
)
| StarcoderdataPython |
12824304 | import inspect
import pytest
from sqlalchemy.exc import ArgumentError
from sqlalchemy.ext.declarative import declared_attr
def test_name(db):
class FOOBar(db.Model):
id = db.Column(db.Integer, primary_key=True)
class BazBar(db.Model):
id = db.Column(db.Integer, primary_key=True)
class Ham(db.Model):
__tablename__ = "spam"
id = db.Column(db.Integer, primary_key=True)
assert FOOBar.__tablename__ == "foo_bar"
assert BazBar.__tablename__ == "baz_bar"
assert Ham.__tablename__ == "spam"
def test_single_name(db):
"""Single table inheritance should not set a new name."""
class Duck(db.Model):
id = db.Column(db.Integer, primary_key=True)
class Mallard(Duck):
pass
assert "__tablename__" not in Mallard.__dict__
assert Mallard.__tablename__ == "duck"
def test_joined_name(db):
"""Model has a separate primary key; it should set a new name."""
class Duck(db.Model):
id = db.Column(db.Integer, primary_key=True)
class Donald(Duck):
id = db.Column(db.Integer, db.ForeignKey(Duck.id), primary_key=True)
assert Donald.__tablename__ == "donald"
def test_mixin_id(db):
"""Primary key provided by mixin should still allow model to set
tablename.
"""
class Base:
id = db.Column(db.Integer, primary_key=True)
class Duck(Base, db.Model):
pass
assert not hasattr(Base, "__tablename__")
assert Duck.__tablename__ == "duck"
def test_mixin_attr(db):
"""A declared attr tablename will be used down multiple levels of
inheritance.
"""
class Mixin:
@declared_attr
def __tablename__(cls): # noqa: B902
return cls.__name__.upper()
class Bird(Mixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
class Duck(Bird):
# object reference
id = db.Column(db.ForeignKey(Bird.id), primary_key=True)
class Mallard(Duck):
# string reference
id = db.Column(db.ForeignKey("DUCK.id"), primary_key=True)
assert Bird.__tablename__ == "BIRD"
assert Duck.__tablename__ == "DUCK"
assert Mallard.__tablename__ == "MALLARD"
def test_abstract_name(db):
"""Abstract model should not set a name. Subclass should set a name."""
class Base(db.Model):
__abstract__ = True
id = db.Column(db.Integer, primary_key=True)
class Duck(Base):
pass
assert "__tablename__" not in Base.__dict__
assert Duck.__tablename__ == "duck"
def test_complex_inheritance(db):
"""Joined table inheritance, but the new primary key is provided by a
mixin, not directly on the class.
"""
class Duck(db.Model):
id = db.Column(db.Integer, primary_key=True)
class IdMixin:
@declared_attr
def id(cls): # noqa: B902
return db.Column(db.Integer, db.ForeignKey(Duck.id), primary_key=True)
class RubberDuck(IdMixin, Duck):
pass
assert RubberDuck.__tablename__ == "rubber_duck"
def test_manual_name(db):
"""Setting a manual name prevents generation for the immediate model. A
name is generated for joined but not single-table inheritance.
"""
class Duck(db.Model):
__tablename__ = "DUCK"
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String)
__mapper_args__ = {"polymorphic_on": type}
class Daffy(Duck):
id = db.Column(db.Integer, db.ForeignKey(Duck.id), primary_key=True)
__mapper_args__ = {"polymorphic_identity": "Warner"}
class Donald(Duck):
__mapper_args__ = {"polymorphic_identity": "Disney"}
assert Duck.__tablename__ == "DUCK"
assert Daffy.__tablename__ == "daffy"
assert "__tablename__" not in Donald.__dict__
assert Donald.__tablename__ == "DUCK"
# polymorphic condition for single-table query
assert 'WHERE "DUCK".type' in str(Donald.query)
def test_primary_constraint(db):
"""Primary key will be picked up from table args."""
class Duck(db.Model):
id = db.Column(db.Integer)
__table_args__ = (db.PrimaryKeyConstraint(id),)
assert Duck.__table__ is not None
assert Duck.__tablename__ == "duck"
def test_no_access_to_class_property(db):
"""Ensure the implementation doesn't access class properties or declared
attrs while inspecting the unmapped model.
"""
class class_property:
def __init__(self, f):
self.f = f
def __get__(self, instance, owner):
return self.f(owner)
class Duck(db.Model):
id = db.Column(db.Integer, primary_key=True)
class ns:
is_duck = False
floats = False
class Witch(Duck):
@declared_attr
def is_duck(self):
# declared attrs will be accessed during mapper configuration,
# but make sure they're not accessed before that
info = inspect.getouterframes(inspect.currentframe())[2]
assert info[3] != "_should_set_tablename"
ns.is_duck = True
@class_property
def floats(self):
ns.floats = True
assert ns.is_duck
assert not ns.floats
def test_metadata_has_table(db):
user = db.Table("user", db.Column("id", db.Integer, primary_key=True))
class User(db.Model):
pass
assert User.__table__ is user
def test_correct_error_for_no_primary_key(db):
with pytest.raises(ArgumentError) as info:
class User(db.Model):
pass
assert "could not assemble any primary key" in str(info.value)
def test_single_has_parent_table(db):
class Duck(db.Model):
id = db.Column(db.Integer, primary_key=True)
class Call(Duck):
pass
assert Call.__table__ is Duck.__table__
assert "__table__" not in Call.__dict__
| StarcoderdataPython |
296527 | from actstream.models import Follow
from rest_framework import serializers
from grandchallenge.notifications.models import Notification
class NotificationSerializer(serializers.ModelSerializer):
class Meta:
model = Notification
fields = ("read",)
class FollowSerializer(serializers.ModelSerializer):
class Meta:
model = Follow
fields = ("pk", "flag")
| StarcoderdataPython |
6528899 | <reponame>darwinbeing/deepdriving-tensorflow<filename>python/modules/deep_learning/trainer/__init__.py
from .CFactory import CFactory
from .CTrainer import CTrainer | StarcoderdataPython |
9611644 | from Clusters.models import ClusterDetails
from Clusters.serializers import ClusterDetailsSerializer
from django.http import JsonResponse
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.generics import GenericAPIView
def index(request):
return render(request, 'index.html', context={}, status=200)
class ClusterDetailsView(viewsets.ModelViewSet):
queryset = ClusterDetails.objects.all()
serializer_class = ClusterDetailsSerializer
class ClustersView(GenericAPIView):
http_method_names = ('post', 'create')
serializer_class = ClusterDetailsSerializer
def post(self, request, **kwargs):
print('ClustersView - post')
post_data = request.data
post_data.pop('url', None)
if post_data.get('action') == 'delete':
selected_ids = post_data.get('selected_ids')
ClusterDetails.objects.filter(id__in=selected_ids).delete()
else:
ClusterDetails.objects.filter(id=post_data.pop('id')).update(**post_data)
return JsonResponse({'status': 'success'}, safe=False)
def create(self, request, **kwargs):
print('ClustersView - create')
ClusterDetails(
cluster_name=request.data['cluster_name'],
nodes_count=request.data['nodes_count']
).save()
return JsonResponse({'status': 'success'}, safe=False)
| StarcoderdataPython |
6571513 | <filename>apps/utils/management/commands/introspect.py<gh_stars>0
from django.core.management.base import AppCommand
from django.db.models import get_models
from django.db.models.fields.related import OneToOneField, ForeignKey
class Command(AppCommand):
help = "Generate search template with all fields for each model in an app"
def printfield(self, field, accessor):
print """
{%% if %s %%}
<dt>%s</dt>
<dd>{{ %s }}</dd>
{%% endif %%}""" % (accessor, field.verbose_name, accessor)
# print '{{ %s }}' % accessor
def handle_app(self, app, **options):
models = get_models(app)
processed_models = set()
for model in models:
if model in processed_models:
continue
processed_models.add(model)
print model
for field in model._meta.fields:
fieldaccessor = 'object.%s' % field.name
self.printfield(field, fieldaccessor)
for related in model._meta.get_all_related_objects():
processed_models.add(related.model)
accessor = related.get_accessor_name()
if isinstance(related.field, OneToOneField):
for field in related.model._meta.fields:
fieldaccessor = 'object.%s.%s' % (accessor, field.name)
self.printfield(field, fieldaccessor)
elif isinstance(related.field, ForeignKey):
print '{%% for related in objects.%s %%}' % accessor
for field in related.model._meta.fields:
fieldaccessor = 'related.%s' % field.name
self.printfield(field, fieldaccessor)
print '{% endfor %}'
| StarcoderdataPython |
6588241 | #integer right triangles
""""
If p is the perimeter of a right angle triangle with integral length sides,
{a,b,c}, there are exactly three solutions for p = 120.
{20,48,52}, {24,45,51}, {30,40,50}
For which value of p ≤ 1000, is the number of solutions maximised?
"""""
import math, time
def gcd(a, b):
while b != 0:
a, b = b, a % b
return a
def project_euler_1(limit):
res = 0
cnt = 0
for k in range(12, 1001):
n = 0
for i in range(1, int(k * (1 - math.sqrt(2) / 2))):
for j in range(int(k * (1 - math.sqrt(2) / 2)), k // 2):
if i ** 2 + j ** 2 == (k - i - j) ** 2:
n += 1
if cnt < n:
res = k
cnt = n
print(res)
def project_euler_2(limit):
t = [0] * (limit + 1)
for i in range(2, int((limit // 2) ** 0.5)):
for j in range(1, i):
if (i - j) % 2 == 1 and gcd(i, j) == 1:
s = 2 * i * (i + j)
for x in range(s, limit + 1, s):
t[x] += 1
print(t.index(max(t)))
if __name__ == '__main__':
start = time.clock()
project_euler_2(1000)
print('Runtime is ', time.clock() - start) | StarcoderdataPython |
1972555 | """Ampio data models."""
from __future__ import annotations
import base64
from collections import defaultdict
import datetime as dt
from enum import Enum, IntEnum
import logging
from typing import Any, Callable, Dict, List, Optional, Union
import attr
from homeassistant.const import (
CONF_DEVICE,
CONF_DEVICE_CLASS,
CONF_FRIENDLY_NAME,
CONF_ICON,
CONF_NAME,
CONF_UNIT_OF_MEASUREMENT,
)
from homeassistant.helpers import device_registry
from .const import (
CONF_ALARM_TOPIC,
CONF_ARMED_TOPIC,
CONF_AWAY_ZONES,
CONF_BRIGHTNESS_COMMAND_TOPIC,
CONF_BRIGHTNESS_STATE_TOPIC,
CONF_CLOSING_STATE_TOPIC,
CONF_COMMAND_TOPIC,
CONF_ENTRYTIME_TOPIC,
CONF_EXITTIME10_TOPIC,
CONF_EXITTIME_TOPIC,
CONF_HOME_ZONES,
CONF_OPENING_STATE_TOPIC,
CONF_RAW_TOPIC,
CONF_RGB_COMMAND_TOPIC,
CONF_RGB_STATE_TOPIC,
CONF_STATE_TOPIC,
CONF_TILT_POSITION_TOPIC,
CONF_UNIQUE_ID,
CONF_WHITE_VALUE_COMMAND_TOPIC,
CONF_WHITE_VALUE_STATE_TOPIC,
DOMAIN,
)
from .validators import (
AMPIO_DESCRIPTIONS_SCHEMA,
AMPIO_DEVICES_SCHEMA,
ATTR_A,
ATTR_AU,
ATTR_I,
ATTR_O,
ATTR_D,
ATTR_DATE_PROD,
ATTR_FLAG,
ATTR_MAC,
ATTR_N,
ATTR_NAME,
ATTR_PCB,
ATTR_PROTOCOL,
ATTR_SOFTWARE,
ATTR_T,
ATTR_TYPE,
ATTR_USERMAC,
)
DEVICE_CLASSES = {
"B": "battery",
"BC": "battery_charging",
"C": "cold",
"CO": "connectivity",
"D": "door",
"GD": "garage_door",
"GA": "gas",
"HE": "heat",
"L": "light",
"LO": "lock",
"MI": "moisture",
"M": "motion",
"MV": "moving",
"OC": "occupancy",
"O": "opening",
"P": "plug",
"PW": "power",
"PR": "presence",
"PB": "problem",
"S": "safety",
"SO": "sound",
"V": "vibration",
"W": "window",
# switches
"OU": "outlet",
# sensors
"T": "temperature",
"H": "humidity",
"I": "illuminance",
"SS": "signal_strength",
"PS": "pressure",
"TS": "timestamp",
# covers
"VA": "valve",
"G": "garage",
"BL": "blind",
}
TYPE_CODES = {
44: "MSENS",
3: "MROL-4s",
4: "MPR-8s",
5: "MDIM-8s",
8: "MDOT-4",
10: "MSERV-3s",
11: "MDOT-9",
12: "MRGBu-1",
17: "MLED-1",
22: "MRT-16s",
25: "MCON",
26: "MOC-4",
27: "MDOT-15LCD",
33: "MDOT-2",
34: "METEO-1s",
38: "MRDN-1s",
49: "MWRC",
}
class ModuleCodes(IntEnum):
"""Module codes enum."""
MLED1 = 17
MCON = 25
MDIM8s = 5
MSENS = 44
MDOT2 = 33
DOMAIN = "ampio"
_LOGGER = logging.getLogger(__name__)
PublishPayloadType = Union[str, bytes, int, float, None]
@attr.s(slots=True, frozen=True)
class Message:
"""MQTT Message."""
topic = attr.ib(type=str)
payload = attr.ib(type=PublishPayloadType)
qos = attr.ib(type=int)
retain = attr.ib(type=bool)
subscribed_topic = attr.ib(type=str, default=None)
timestamp = attr.ib(type=dt.datetime, default=None)
MessageCallbackType = Callable[[Message], None]
def extract_index_from_topic(topic: str) -> Optional[int]:
"""Takes last part of topic as number."""
parts = topic.split("/")
try:
return int(parts[-1])
except ValueError:
return None
@attr.s(slots=True, frozen=True)
class IndexIntData:
"""Represents the index from last part of topic with data."""
index = attr.ib(type=int)
value = attr.ib(type=int)
@classmethod
def from_msg(cls, msg: Message) -> Optional[IndexIntData]:
"""Create from MQTT message."""
index = extract_index_from_topic(msg.topic)
if index is None:
_LOGGER.error("Unable to extract index from topic: %s", msg.topic)
return None
try:
value = int(msg.payload)
except ValueError:
_LOGGER.error("Unable to parse data message tp ind: %s", msg.payload)
return None
return cls(index, value)
class ItemTypes(str, Enum):
"""Item type codes."""
# OW = "t"
# # BinaryFlag = "f"
# BinaryInput254 = "i"
# BinaryInput509 = "i"
# BinaryOutput254 = "o"
# BinaryOutput509 = "o"
# AnalogInput254 = "a"
# AnalogInput509 = "a"
# AnalogOutput254 = "au"
# AnalogOutput509 = "au"
Temperature = "t"
BinaryFlag = "f"
BinaryInput = "i"
BinaryOutput = "o"
AnalogInput = "a"
AnalogOutput = "au"
def base64decode(value: str):
"""Decode base64 string."""
try:
return base64.b64decode(value).decode("utf-8").strip()
except UnicodeDecodeError:
return base64.b64decode(value).decode("cp1254").strip()
def base64encode(value: str):
"""Encode base64 string."""
return base64.b64encode(value.encode("utf-8"))
@attr.s()
class ItemName:
"""Name of the ampio module Item (input, output, flag, etc)."""
d = attr.ib(type=str, converter=base64decode) # pylint: disable=invalid-name
name = attr.ib(type=str)
device_class = attr.ib(type=str)
prefix = attr.ib(type=str)
@name.default
def extract_name(self):
"""Compute name."""
parts = self.d.split(":")
if len(parts) > 1:
return "".join(parts[1:])
return self.d
@device_class.default
def extract_device_class(self):
"""Compute device_class."""
parts = self.d.split(":")
if len(parts) > 1:
prefix = parts[0]
return DEVICE_CLASSES.get(prefix)
return None
@prefix.default
def extract_prefix(self):
"""Extrct device type code from name."""
parts = self.d.split(":")
if len(parts) > 1:
return parts[0]
return None
@classmethod
def from_topic_payload(cls, payload: Dict) -> List[ItemName]:
"""Read from topic payload."""
names: Dict[str, Union[int, Dict]] = AMPIO_DESCRIPTIONS_SCHEMA(payload)
result = {}
for name in names[ATTR_D]:
name_data = name[ATTR_D]
name_type = name[ATTR_T]
name_index = name[ATTR_N]
if name_type not in result.keys():
result[name_type] = {}
result[name_type][name_index] = ItemName(name_data)
return result
@attr.s()
class AmpioModuleInfo:
"""Ampio Module Information."""
mac = attr.ib(type=str, converter=str.upper)
user_mac = attr.ib(type=str, converter=str.upper)
code = attr.ib(type=int)
pcb = attr.ib(type=int)
software = attr.ib(type=int)
protocol = attr.ib(type=int)
date_prod = attr.ib(type=str)
i = attr.ib(type=int) # pylint: disable=invalid-name
o = attr.ib(type=int) # pylint: disable=invalid-name
a = attr.ib(type=int) # pylint: disable=invalid-name
au = attr.ib(type=int) # pylint: disable=invalid-name
t = attr.ib(type=int) # pylint: disable=invalid-name
flags = attr.ib(type=int)
name = attr.ib(type=str, converter=base64decode)
names = attr.ib(factory=dict)
configs = attr.ib(factory=dict)
unique_ids = attr.ib(factory=set)
def update_configs(self) -> None:
"""Update the config data for entities."""
self.configs = defaultdict(list) # clean up current configs
self.unique_ids = set()
for index, item in self.names.get(ItemTypes.BinaryFlag, {}).items():
data = AmpioFlagConfig.from_ampio_device(self, item, index + 1)
if data:
self.configs["switch"].append(data.config)
self.unique_ids.add(data.unique_id)
for index, item in self.names.get(ItemTypes.Temperature, {}).items():
data = AmpioTempSensorConfig.from_ampio_device(self, item, index + 1)
if data:
self.configs["sensor"].append(data.config)
self.unique_ids.add(data.unique_id)
@property
def part_number(self) -> str:
"""Return module part number (code)."""
return TYPE_CODES.get(self.code, self.code)
@property
def model(self) -> str:
"""Return model name."""
return f"{self.part_number} [{self.mac.upper()}/{self.user_mac.upper()}]"
def as_hass_device(self) -> Dict[str, Any]:
"""Return info in hass device format."""
return {
"connections": {(device_registry.CONNECTION_NETWORK_MAC, self.user_mac)},
"identifiers": {(DOMAIN, self.user_mac)},
"name": self.name,
"manufacturer": "Ampio",
"model": self.model,
"sw_version": self.software,
"via_device": (DOMAIN, "ampio-mqtt"),
}
@classmethod
def from_topic_payload(cls, payload: dict) -> List[AmpioModuleInfo]:
"""Create a module object from topic payload."""
devices = AMPIO_DEVICES_SCHEMA(payload)
result = []
for device in devices[ATTR_D]:
klass = CLASS_FACTORY.get(device[ATTR_TYPE], AmpioModuleInfo)
result.append(
klass(
device[ATTR_MAC],
device[ATTR_USERMAC],
device[ATTR_TYPE],
device[ATTR_PCB],
device[ATTR_SOFTWARE],
device[ATTR_PROTOCOL],
device[ATTR_DATE_PROD],
device[ATTR_I],
device[ATTR_O],
device[ATTR_A],
device[ATTR_AU],
device[ATTR_T],
device[ATTR_FLAG],
device[ATTR_NAME],
)
)
return result
def get_config_for_component(self, component: str) -> List:
"""Return list of entities for specific component."""
return self.configs.get(component, [])
class MSENSModuleInfo(AmpioModuleInfo):
"""MSENS Ampio module information."""
def update_configs(self) -> None:
super().update_configs()
for ampio_config in (
AmpioTempSensorConfig.from_ampio_device(
self, ItemName(base64encode("T:Temperature"))
),
AmpioHumiditySensorConfig.from_ampio_device(
self, ItemName(base64encode("HU:Humidity"))
),
AmpioPressureSensorConfig.from_ampio_device(
self, ItemName(base64encode("OS:Pressure"))
),
AmpioNoiseSensorConfig.from_ampio_device(
self, ItemName(base64encode("SS:Noise"))
),
AmpioIlluminanceSensorConfig.from_ampio_device(
self, ItemName(base64encode("I:Illuminance"))
),
AmpioAirqualitySensorConfig.from_ampio_device(
self, ItemName(base64encode("Air Quality"))
),
):
if ampio_config:
self.configs["sensor"].append(ampio_config.config)
self.unique_ids.add(ampio_config.unique_id)
class MCONModuleInfo(AmpioModuleInfo):
"""MCON Ampio module information."""
def update_configs(self) -> None:
"""Update config."""
super().update_configs()
if self.software % 100 == 1: # INTEGRA
for index, item in self.names.get(ItemTypes.BinaryInput, {}).items():
data = AmpioBinarySensorExtendedConfig.from_ampio_device(
self, item, index
)
if data:
self.configs["binary_sensor"].append(data.config)
self.unique_ids.add(data.unique_id)
# for index, item in self.names.get(ItemTypes.BinaryInput509, {}).items():
# data = AmpioBinarySensorExtendedConfig.from_ampio_device(
# self, item, index + 255
# )
# if data:
# self.configs["binary_sensor"].append(data.config)
# self.unique_ids.add(data.unique_id)
data = AmpioSatelConfig.from_ampio_device(self)
if data:
self.configs["alarm_control_panel"].append(data.config)
self.unique_ids.add(data.unique_id)
class MLED1ModuleInfo(AmpioModuleInfo):
"""MLED-1 Ampio module information."""
def update_configs(self) -> None:
super().update_configs()
_LOGGER.debug("MLED1: %s", self.names)
for index, item in self.names.get(ItemTypes.AnalogOutput.value, {}).items():
data = AmpioDimmableLightConfig.from_ampio_device(self, item, index)
if data:
self.configs["light"].append(data.config)
self.unique_ids.add(data.unique_id)
class MDIM8sModuleInfo(AmpioModuleInfo):
"""MDIM-8s Ampio module information."""
def update_configs(self) -> None:
super().update_configs()
_LOGGER.debug("MDIM8s: %s", self.names)
for index, item in self.names.get(ItemTypes.BinaryOutput, {}).items():
data = AmpioDimmableLightConfig.from_ampio_device(self, item, index)
if data:
self.configs["light"].append(data.config)
self.unique_ids.add(data.unique_id)
class MOC4ModuleInfo(AmpioModuleInfo):
"""MOC-4 Ampio module information."""
def update_configs(self) -> None:
super().update_configs()
_LOGGER.debug("MDIM8s: %s", self.names)
for index, item in self.names.get(ItemTypes.BinaryOutput, {}).items():
data = AmpioDimmableLightConfig.from_ampio_device(self, item, index)
if data:
self.configs["light"].append(data.config)
self.unique_ids.add(data.unique_id)
class MPR8sModuleInfo(AmpioModuleInfo):
"""MPR-8s Ampio module information."""
def update_configs(self) -> None:
super().update_configs()
for index, item in self.names.get(ItemTypes.BinaryOutput, {}).items():
if item.device_class == "light":
data = AmpioLightConfig.from_ampio_device(self, item, index)
self.configs["light"].append(data.config)
self.unique_ids.add(data.unique_id)
else:
data = AmpioSwitchConfig.from_ampio_device(self, item, index)
self.configs["switch"].append(data.config)
self.unique_ids.add(data.unique_id)
for index, item in self.names.get(ItemTypes.BinaryInput, {}).items():
data = AmpioBinarySensorConfig.from_ampio_device(self, item, index)
if data:
self.configs["binary_sensor"].append(data.config)
self.unique_ids.add(data.unique_id)
class MDOTModuleInfo(AmpioModuleInfo):
"""Generic MDOT Ampio module information class."""
_BUTTONS: int
def update_configs(self) -> None:
"""Generat module configuration."""
super().update_configs()
for index in range(1,
self._BUTTONS + 1
): # regardles of names module has always fixed physical touch buttons
item = self.names.get(ItemTypes.BinaryInput, {}).get(index)
if item is None:
item = ItemName(base64encode(f"{self.name} Button {index}"))
data = AmpioTouchSensorConfig.from_ampio_device(self, item, index)
if data:
self.configs["binary_sensor"].append(data.config)
self.unique_ids.add(data.unique_id)
class MDOT2ModuleInfo(MDOTModuleInfo):
"""MDOT-2 Ampio module information."""
_BUTTONS = 2
class MDOT4ModuleInfo(MDOTModuleInfo):
"""MDOT-4 Ampio module information."""
_BUTTONS = 4
class MDOT9ModuleInfo(MDOTModuleInfo):
"""MDOT-9 Ampio module information."""
_BUTTONS = 9
class MDOT15LCDModuleInfo(MDOTModuleInfo):
"""MDOT-15LCD Ampio module information."""
_BUTTONS = 15
class MRGBu1ModuleInfo(AmpioModuleInfo):
"""MRGB-1u Ampio module information."""
def update_configs(self) -> None:
"""Update module specific configuration."""
super().update_configs()
data = AmpioRGBLightConfig.from_ampio_device(self, None, 1)
if data:
self.configs["light"].append(data.config)
self.unique_ids.add(data.unique_id)
class MSERV3sModuleInfo(AmpioModuleInfo):
"""MSERV-3s Ampio module information."""
def update_configs(self) -> None:
super().update_configs()
for index, item in self.names.get(ItemTypes.BinaryOutput, {}).items():
data = AmpioSwitchConfig.from_ampio_device(self, item, index)
if data:
self.configs["switch"].append(data.config)
self.unique_ids.add(data.unique_id)
for index, item in self.names.get(ItemTypes.BinaryInput, {}).items():
data = AmpioBinarySensorConfig.from_ampio_device(self, item, index)
if data:
self.configs["binary_sensor"].append(data.config)
self.unique_ids.add(data.unique_id)
class MROL4sModuleInfo(AmpioModuleInfo):
"""MROL-4s Ampio module information."""
def update_configs(self) -> None:
super().update_configs()
for index, item in self.names.get(ItemTypes.BinaryOutput, {}).items():
data = AmpioCoverConfig.from_ampio_device(self, item, index)
if data:
self.configs["cover"].append(data.config)
self.unique_ids.add(data.unique_id)
for index, item in self.names.get(ItemTypes.BinaryInput, {}).items():
data = AmpioBinarySensorConfig.from_ampio_device(self, item, index)
if data:
self.configs["binary_sensor"].append(data.config)
self.unique_ids.add(data.unique_id)
CLASS_FACTORY = {
44: MSENSModuleInfo,
25: MCONModuleInfo,
17: MLED1ModuleInfo,
5: MDIM8sModuleInfo,
26: MOC4ModuleInfo,
4: MPR8sModuleInfo,
33: MDOT2ModuleInfo,
8: MDOT4ModuleInfo,
11: MDOT9ModuleInfo,
27: MDOT15LCDModuleInfo,
12: MRGBu1ModuleInfo,
10: MSERV3sModuleInfo,
3: MROL4sModuleInfo,
}
@attr.s
class AmpioConfig:
"""Generic Ampio Config class."""
config = attr.ib(type=dict)
@property
def unique_id(self):
"""Return unique_id from config."""
return self.config.get(CONF_UNIQUE_ID)
class AmpioTempSensorConfig(AmpioConfig):
"""Ampio Temperature Entity Configuration."""
@classmethod
def from_ampio_device(cls, ampio_device: AmpioModuleInfo, item: ItemName, index=1):
"""Create config from ampio device."""
if not item.name:
name = f"Temperature {ampio_device.name}"
else:
name = item.name
mac = ampio_device.user_mac
config = {
CONF_UNIQUE_ID: f"ampio-{mac}-t{index}",
CONF_NAME: f"ampio-{mac}-t{index}",
CONF_FRIENDLY_NAME: name,
CONF_UNIT_OF_MEASUREMENT: "°C",
CONF_DEVICE_CLASS: "temperature",
CONF_STATE_TOPIC: f"ampio/from/{mac}/state/t/{index}",
CONF_DEVICE: ampio_device.as_hass_device(),
}
return cls(config=config)
class AmpioHumiditySensorConfig(AmpioConfig):
"""Ampio Humidity Entity Configuration."""
@classmethod
def from_ampio_device(cls, ampio_device: AmpioModuleInfo, item: ItemName, index=1):
"""Create config from ampio device."""
mac = ampio_device.user_mac
if ampio_device.pcb < 3: # MSENS-1
state_topic = f"ampio/from/{mac}/state/au32/0"
else:
state_topic = f"ampio/from/{mac}/state/au16l/1"
name = f"Humidity {ampio_device.name}"
config = {
CONF_UNIQUE_ID: f"ampio-{mac}-h{index}",
CONF_NAME: f"ampio-{mac}-h{index}",
CONF_FRIENDLY_NAME: name,
CONF_UNIT_OF_MEASUREMENT: "%",
CONF_DEVICE_CLASS: "humidity",
CONF_STATE_TOPIC: state_topic,
CONF_DEVICE: ampio_device.as_hass_device(),
}
return cls(config=config)
class AmpioPressureSensorConfig(AmpioConfig):
"""Ampio Pressure Entity Configuration."""
@classmethod
def from_ampio_device(cls, ampio_device: AmpioModuleInfo, item: ItemName, index=1):
"""Create config from ampio device."""
mac = ampio_device.user_mac
if ampio_device.pcb < 3: # MSENS-1
state_topic = f"ampio/from/{mac}/state/au32/1"
else:
state_topic = f"ampio/from/{mac}/state/au16l/6"
name = f"Pressure {ampio_device.name}"
config = {
CONF_UNIQUE_ID: f"ampio-{mac}-ps{index}",
CONF_NAME: f"ampio-{mac}-ps{index}",
CONF_FRIENDLY_NAME: name,
CONF_DEVICE_CLASS: "pressure",
CONF_STATE_TOPIC: state_topic,
CONF_UNIT_OF_MEASUREMENT: "hPa",
CONF_DEVICE: ampio_device.as_hass_device(),
}
return cls(config=config)
class AmpioNoiseSensorConfig(AmpioConfig):
"""Ampio Noise Entity Configuration."""
@classmethod
def from_ampio_device(cls, ampio_device: AmpioModuleInfo, item: ItemName, index=1):
"""Create config from ampio device."""
if ampio_device.pcb < 3: # MSENS-1
return None
mac = ampio_device.user_mac
name = f"Noise {ampio_device.name}"
config = {
CONF_UNIQUE_ID: f"ampio-{mac}-n{index}",
CONF_NAME: f"ampio-{mac}-n{index}",
CONF_FRIENDLY_NAME: name,
CONF_DEVICE_CLASS: "signal_strength",
CONF_STATE_TOPIC: f"ampio/from/{mac}/state/au16l/3",
CONF_UNIT_OF_MEASUREMENT: "dB",
CONF_DEVICE: ampio_device.as_hass_device(),
}
return cls(config=config)
class AmpioIlluminanceSensorConfig(AmpioConfig):
"""Ampio Illuminance Entity Configuration."""
@classmethod
def from_ampio_device(cls, ampio_device: AmpioModuleInfo, item: ItemName, index=1):
"""Create config from ampio device."""
if ampio_device.pcb < 3: # MSENS-1
return None
mac = ampio_device.user_mac
name = f"Illuminance {ampio_device.name}"
config = {
CONF_UNIQUE_ID: f"ampio-{mac}-i{index}",
CONF_NAME: f"ampio-{mac}-i{index}",
CONF_FRIENDLY_NAME: name,
CONF_DEVICE_CLASS: "illuminance",
CONF_STATE_TOPIC: f"ampio/from/{mac}/state/au16l/4",
CONF_UNIT_OF_MEASUREMENT: "lx",
CONF_DEVICE: ampio_device.as_hass_device(),
}
return cls(config=config)
class AmpioAirqualitySensorConfig(AmpioConfig):
"""Ampio AirQuality Entity Configuration."""
@classmethod
def from_ampio_device(cls, ampio_device: AmpioModuleInfo, item: ItemName, index=1):
"""Create config from ampio device."""
mac = ampio_device.user_mac
if ampio_device.pcb < 3: # MSENS-1
return None
name = f"Air Quality {ampio_device.name}"
config = {
CONF_UNIQUE_ID: f"ampio-{mac}-aq{index}",
CONF_NAME: f"ampio-{mac}-aq{index}",
CONF_FRIENDLY_NAME: name,
CONF_STATE_TOPIC: f"ampio/from/{mac}/state/au16l/5",
CONF_UNIT_OF_MEASUREMENT: "AQI",
CONF_DEVICE: ampio_device.as_hass_device(),
}
return cls(config=config)
class AmpioTouchSensorConfig(AmpioConfig):
"""Ampio Binary Sensor Entity Configuration."""
@classmethod
def from_ampio_device(cls, ampio_device: AmpioModuleInfo, item: ItemName, index=1):
"""Create config from ampio device."""
mac = ampio_device.user_mac
config = {
CONF_UNIQUE_ID: f"ampio-{mac}-i{index}",
CONF_NAME: f"ampio-{mac}-i{index}",
CONF_FRIENDLY_NAME: item.name,
CONF_STATE_TOPIC: f"ampio/from/{mac}/state/i/{index}",
CONF_DEVICE: ampio_device.as_hass_device(),
CONF_DEVICE_CLASS: "opening",
}
return cls(config=config)
class AmpioBinarySensorExtendedConfig(AmpioConfig):
"""Ampio Binary Sensor Entity Configuration."""
@classmethod
def from_ampio_device(cls, ampio_device: AmpioModuleInfo, item: ItemName, index=1):
"""Create config from ampio device."""
mac = ampio_device.user_mac
device_class = item.device_class
config = {
CONF_UNIQUE_ID: f"ampio-{mac}-bi{index}",
CONF_NAME: f"ampio-{mac}-bi{index}",
CONF_FRIENDLY_NAME: item.name,
CONF_STATE_TOPIC: f"ampio/from/{mac}/state/bi/{index}",
CONF_DEVICE: ampio_device.as_hass_device(),
}
if device_class:
config[CONF_DEVICE_CLASS] = device_class
return cls(config=config)
class AmpioBinarySensorConfig(AmpioConfig):
"""Ampio Binary Sensor Entity Configuration."""
@classmethod
def from_ampio_device(cls, ampio_device: AmpioModuleInfo, item: ItemName, index=1):
"""Create config from ampio device."""
mac = ampio_device.user_mac
device_class = item.device_class
config = {
CONF_UNIQUE_ID: f"ampio-{mac}-i{index}",
CONF_NAME: f"ampio-{mac}-i{index}",
CONF_FRIENDLY_NAME: item.name,
CONF_STATE_TOPIC: f"ampio/from/{mac}/state/i/{index}",
CONF_DEVICE: ampio_device.as_hass_device(),
}
if device_class:
config[CONF_DEVICE_CLASS] = device_class
return cls(config=config)
class AmpioDimmableLightConfig(AmpioConfig):
"""Ampio Dimable Light Entity Configuration."""
@classmethod
def from_ampio_device(cls, ampio_device: AmpioModuleInfo, item: ItemName, index=1):
"""Create config from ampio device."""
mac = ampio_device.user_mac
device_class = item.device_class
config = {
CONF_UNIQUE_ID: f"ampio-{mac}-a{index}",
CONF_NAME: f"ampio-{mac}-a{index}",
CONF_FRIENDLY_NAME: item.name,
CONF_STATE_TOPIC: f"ampio/from/{mac}/state/o/{index}",
CONF_COMMAND_TOPIC: f"ampio/to/{mac}/o/{index}/cmd",
CONF_BRIGHTNESS_COMMAND_TOPIC: f"ampio/to/{mac}/o/{index}/cmd",
CONF_BRIGHTNESS_STATE_TOPIC: f"ampio/from/{mac}/state/a/{index}",
CONF_DEVICE: ampio_device.as_hass_device(),
}
if device_class:
config[CONF_DEVICE_CLASS] = device_class
if ampio_device.code == ModuleCodes.MLED1:
config[CONF_ICON] = "mdi:spotlight"
return cls(config=config)
class AmpioLightConfig(AmpioConfig):
"""Ampio Light Entity Configuration."""
@classmethod
def from_ampio_device(cls, ampio_device: AmpioModuleInfo, item: ItemName, index=1):
"""Create config from ampio device."""
mac = ampio_device.user_mac
device_class = item.device_class
config = {
CONF_UNIQUE_ID: f"ampio-{mac}-a{index}",
CONF_NAME: f"ampio-{mac}-a{index}",
CONF_FRIENDLY_NAME: item.name,
CONF_STATE_TOPIC: f"ampio/from/{mac}/state/o/{index}",
CONF_COMMAND_TOPIC: f"ampio/to/{mac}/o/{index}/cmd",
CONF_DEVICE: ampio_device.as_hass_device(),
}
if device_class:
config[CONF_DEVICE_CLASS] = device_class
return cls(config=config)
class AmpioRGBLightConfig(AmpioConfig):
"""Ampio RGB Light Entity Configuration."""
@classmethod
def from_ampio_device(cls, ampio_device: AmpioModuleInfo, item=None, index=1):
"""Create config from ampio device."""
mac = ampio_device.user_mac
name = ampio_device.name or "RGBW"
index = 1
config = {
CONF_UNIQUE_ID: f"ampio-{mac}-rgbw{index}",
CONF_NAME: f"ampio-{mac}-rgbw{index}",
CONF_FRIENDLY_NAME: name,
CONF_RGB_STATE_TOPIC: f"ampio/from/{mac}/state/rgbw/{index}",
CONF_RGB_COMMAND_TOPIC: f"ampio/to/{mac}/rgbw/{index}/cmd",
CONF_WHITE_VALUE_STATE_TOPIC: f"ampio/from/{mac}/state/a/4",
CONF_WHITE_VALUE_COMMAND_TOPIC: f"ampio/to/{mac}/o/4/cmd",
CONF_DEVICE: ampio_device.as_hass_device(),
}
return cls(config=config)
class AmpioSwitchConfig(AmpioConfig):
"""Ampio Switch Entity Configuration."""
@classmethod
def from_ampio_device(cls, ampio_device: AmpioModuleInfo, item: ItemName, index=1):
"""Create config from ampio device."""
mac = ampio_device.user_mac
device_class = item.device_class
config = {
CONF_UNIQUE_ID: f"ampio-{mac}-bo{index}",
CONF_NAME: f"ampio-{mac}-bo{index}",
CONF_FRIENDLY_NAME: item.name,
CONF_STATE_TOPIC: f"ampio/from/{mac}/state/o/{index}",
CONF_COMMAND_TOPIC: f"ampio/to/{mac}/o/{index}/cmd",
CONF_DEVICE: ampio_device.as_hass_device(),
}
if device_class:
config[CONF_DEVICE_CLASS] = device_class
if device_class == "heat":
config[CONF_ICON] = "mdi:radiator"
return cls(config=config)
class AmpioFlagConfig(AmpioConfig):
"""Ampio Flag Entity Configuration."""
@classmethod
def from_ampio_device(cls, ampio_device: AmpioModuleInfo, item: ItemName, index=1):
"""Create config from ampio device."""
mac = ampio_device.user_mac
device_class = item.device_class
config = {
CONF_UNIQUE_ID: f"ampio-{mac}-f{index}",
CONF_NAME: f"ampio-{mac}-f{index}",
CONF_FRIENDLY_NAME: item.name,
CONF_STATE_TOPIC: f"ampio/from/{mac}/state/f/{index}",
CONF_COMMAND_TOPIC: f"ampio/to/{mac}/f/{index}/cmd",
CONF_DEVICE: ampio_device.as_hass_device(),
}
if device_class:
config[CONF_DEVICE_CLASS] = device_class
config[CONF_ICON] = "mdi:flag"
return cls(config=config)
class AmpioCoverConfig(AmpioConfig):
"""Ampio Cover Entity Configuration."""
@classmethod
def from_ampio_device(cls, ampio_device: AmpioModuleInfo, item: ItemName, index=1):
"""Create config from ampio device."""
mac = ampio_device.user_mac
icon = None
device_class = item.device_class
if device_class is None:
device_class = "shutter"
if device_class == "valve":
icon = "mdi:valve"
config = {
CONF_UNIQUE_ID: f"ampio-{mac}-co{index}",
CONF_NAME: f"ampio-{mac}-co{index}",
CONF_FRIENDLY_NAME: item.name,
CONF_STATE_TOPIC: f"ampio/from/{mac}/state/a/{index}",
CONF_CLOSING_STATE_TOPIC: f"ampio/from/{mac}/state/o/{2*(index-1)+1}",
CONF_OPENING_STATE_TOPIC: f"ampio/from/{mac}/state/o/{2*(index)}",
CONF_COMMAND_TOPIC: f"ampio/to/{mac}/o/{index}/cmd",
CONF_RAW_TOPIC: f"ampio/to/{mac}/raw",
CONF_DEVICE: ampio_device.as_hass_device(),
}
if device_class not in ["garage", "valve"]:
config.update(
{CONF_TILT_POSITION_TOPIC: f"ampio/from/{mac}/state/a/{6+index}",}
)
if device_class:
config[CONF_DEVICE_CLASS] = device_class
if icon:
config[CONF_ICON] = icon
return cls(config=config)
class AmpioSatelConfig(AmpioConfig):
"""Ampio Satel single Entity configuration."""
@classmethod
def from_ampio_device(cls, ampio_device: AmpioModuleInfo):
"""Create alarm config from ampio device."""
away = set()
home = set()
items: Dict[int, ItemName] = ampio_device.names.get(
ItemTypes.AnalogOutput, {}
).items()
mac = ampio_device.user_mac
for index, item in items:
if item.prefix in ("A", "B", None): # Away or Both or Not defined
away.add(index)
if item.prefix in ("H", "B"): # Home or Both
home.add(index)
mac = ampio_device.user_mac
prefix = f"ampio/from/{mac}/state"
config = {
CONF_AWAY_ZONES: away,
CONF_HOME_ZONES: home,
CONF_UNIQUE_ID: f"ampio-{mac}-alarm",
CONF_NAME: f"ampio-{mac}-alarm",
CONF_FRIENDLY_NAME: ampio_device.name,
CONF_RAW_TOPIC: f"ampio/to/{mac}/raw",
CONF_ARMED_TOPIC: f"{prefix}/armed/+",
CONF_ALARM_TOPIC: f"{prefix}/alarm/+",
CONF_ENTRYTIME_TOPIC: f"{prefix}/entrytime/+",
CONF_EXITTIME10_TOPIC: f"{prefix}/exittime10/+",
CONF_EXITTIME_TOPIC: f"{prefix}/exittime/+",
CONF_DEVICE: ampio_device.as_hass_device(),
}
return cls(config=config)
| StarcoderdataPython |
3486389 | import bs4
from urllib.request import urlopen
#import datetime as dt
import pandas as pd
import re
import json
import sqlite3
con = sqlite3.connect('db/fsdb01.db3')
class Price2DB:
def get_daily_price_naver(self, cd, count):
url = 'https://fchart.stock.naver.com/sise.nhn?symbol='+cd+'&timeframe=day&count='+str(count)+'&requestType=0'
source = urlopen(url).read()
soup = bs4.BeautifulSoup(source, 'lxml')
prices = soup.find_all('item')
daily = []
for p in prices:
data = p['data'].split('|')
trade_date = pd.to_datetime(data[0]).date()
price_open = float(data[1])
price_high = float(data[2])
price_low = float(data[3])
price_close = float(data[4])
trade_volume = int(data[5])
daily.append([trade_date, price_open, price_high, price_low, price_close, trade_volume])
df = pd.DataFrame(daily, columns=['trade_date', 'price_open', 'price_high', 'price_low', 'price_close', 'trade_volume'])
df['Code'] = cd
return(df)
def make_query(self, df):
query_string = ''
for i in range(len(df)):
# 시/고/저/종
query = 'INSERT or REPLACE INTO price_daily (trade_date, code, price_open, price_high, price_low, price_close, trade_volume) VALUES '
query = query + '("' + str(df.iloc[i, 0]) + '", '\
+ '"' + str(df.iloc[i, 6]) + '", '\
+ str(df.iloc[i, 1]) + ', '\
+ str(df.iloc[i, 2]) + ', '\
+ str(df.iloc[i, 3]) + ', '\
+ str(df.iloc[i, 4]) + ', '\
+ str(df.iloc[i, 5]) + '); '
query_string = query_string + query
return(query_string)
def db_update(self, query_string):
with con:
cur = con.cursor()
cur.executescript(query_string)
class DB2DF:
def make_df(self, s_cd):
if type(s_cd) == str:
cds = []
cds.append(s_cd)
else:
cds = s_cd
price_df = pd.DataFrame()
for cd in cds:
tmp = dict()
sql = 'SELECT trade_date, price_close FROM price_daily WHERE code=?'
with con:
cur = con.cursor()
cur.execute(sql, [cd])
rows = cur.fetchall()
for r in rows:
tmp[r[0]] = r[1]
sr = pd.Series(tmp)
sr.name = cd
price_df = pd.concat([price_df, sr], axis=1, sort=True)
return(price_df) | StarcoderdataPython |
9715647 | from django.db import models
from django.urls import reverse
# Create your models here.
class Funcionario(models.Model):
nome = models.CharField(max_length=25)
sobrenome = models.CharField(max_length=25)
cargo = models.CharField(max_length=25)
email = models.CharField(max_length=30)
mostrar = models.BooleanField(default=True)
class Meta:
db_table = 'funcionarios'
def __str__(self):
return self.nome
def get_absolute_url(self):
return reverse("atualizar-funcionario", kwargs={'funcionario_id':self.id})
def get_delete_url(self):
return reverse("deletar-funcionario", kwargs={'funcionario_id':self.id}) | StarcoderdataPython |
9745456 | import collections
import itertools
import jinja2
import json
import nbformat
import pathlib
import shutil
import sys
import tempfile
import tqdm
from nbconvert import HTMLExporter, PDFExporter
ROOT = "cfm"
TITLE = "Computing for mathematics"
DESCRIPTION = "An undergraduate course introducing programming, through Python, to mathematicians."
KEYWORDS = "python, mathematics, jupyter"
AUTHOR = "<NAME>"
def get_id(path):
"""
Return the id of a file
"""
stem = path.stem
try:
return stem[: stem.index("-")]
except ValueError:
stem = stem.lower()
stem = stem.replace(" ", "-")
stem = stem.replace(",", "")
return stem
def get_name(path):
"""
Return the name of a file
"""
stem = path.stem
try:
return stem[stem.index("-") :].replace("-", " ").lstrip()
except ValueError:
return stem
def convert_html(nb_path, tags_to_ignore=["solution"]):
"""
Convert a notebook to html
"""
contents = nb_path.read_text()
nb = json.loads(contents)
cells = []
for cell in nb["cells"]:
if "tags" not in cell["metadata"] or all(
tag not in cell["metadata"]["tags"] for tag in tags_to_ignore
):
cells.append(cell)
nb["cells"] = cells
temporary_nb = tempfile.NamedTemporaryFile()
with open(temporary_nb.name, "w") as f:
f.write(json.dumps(nb))
html_exporter = HTMLExporter()
html_exporter.template_file = "basic"
return html_exporter.from_file(temporary_nb)
def render_template(template_file, template_vars, searchpath="./templates/"):
"""
Render a jinja2 template
"""
templateLoader = jinja2.FileSystemLoader(searchpath=searchpath)
template_env = jinja2.Environment(loader=templateLoader)
template = template_env.get_template(template_file)
return template.render(template_vars)
def make_dir(
path, directory, previous_url=None, next_url=None, chapters=None, **kwargs
):
"""
Create a directory for the name of the file
"""
path_id = get_id(path)
p = pathlib.Path(f"./{directory}/{path_id}")
p.mkdir(exist_ok=True)
nb, _ = convert_html(path)
check = False
nb = nb.replace("{{root}}", ROOT)
html = render_template(
"content.html",
{
"nb": nb,
"root": ROOT,
"id": path_id,
"previous_url": previous_url,
"next_url": next_url,
"chapters": chapters,
**kwargs,
},
)
(p / "index.html").write_text(html)
def make_collection(
paths,
directory,
make_previous_url=True,
make_next_url=True,
chapters=None,
**kwargs,
):
number_of_paths = len(paths)
for index, filename in enumerate(paths):
previous_path = paths[(index - 1) % number_of_paths]
previous_id = get_id(previous_path)
next_path = paths[(index + 1) % number_of_paths]
next_id = get_id(next_path)
make_dir(
pathlib.Path(filename),
directory=directory,
previous_url=previous_id,
next_url=next_id,
chapters=chapters,
**kwargs,
)
Chapter = collections.namedtuple("chapter", ["dir", "title", "nb"])
if __name__ == "__main__":
nb_dir = pathlib.Path("nbs")
chapter_paths = sorted(nb_dir.glob("./chapters/*ipynb"))
other_paths = list(nb_dir.glob("./other/*ipynb"))
chapters = []
for path in tqdm.tqdm(sorted(chapter_paths)):
chapters.append(Chapter(f"{get_id(path)}", get_name(path), str(path)))
for paths, directory in [
(chapter_paths, "chapters"),
(other_paths, "other"),
]:
make_collection(
paths=paths,
directory=directory,
chapters=chapters,
title=TITLE,
description=DESCRIPTION,
keywords=KEYWORDS,
author=AUTHOR,
)
html = render_template(
"home.html",
{
"chapters": chapters,
"root": ROOT,
"title": TITLE,
"description": DESCRIPTION,
"keywords": KEYWORDS,
"author": AUTHOR,
},
)
with open("index.html", "w") as f:
f.write(html)
| StarcoderdataPython |
11326595 | <filename>book2vec/core.py
import numpy as np
import json
from typing import List, Union
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
import logging
logger = logging.getLogger(__name__)
class Book2VecAnalysis:
def __init__(self, file_obj=None):
self.loaded = False
self.embedding_matrix = None
self.vocab_size = None
self.index_to_review = None
self.index_to_metadata = None
if file_obj:
self.load(file_obj)
def load(self, file_obj):
json_data = json.load(file_obj)
self.embedding_matrix = json_data["embedding_matrix"]
self.vocab_size = json_data["vocab_size"]
self.index_to_review = {
int(k): v for k, v in json_data["index_to_review"].items()
}
self.index_to_metadata = {
int(k): v for k, v in json_data["index_to_metadata"].items()
}
self.loaded = True
def get_suggestions(self, idxs: Union[List[int], int]) -> pd.DataFrame:
"""
Takes a list of book IDs and returns a list of suggestions.
This is calculated by taking the mean of the closest 100 books to the
given book IDs.
:param idxs:
:return:
"""
if isinstance(idxs, int):
idxs = [idxs]
suggestions = pd.concat(
{idx: self._get_nearest(idx, 250) for idx in idxs}, axis=1, join="outer"
)
suggestions["cumulative"] = suggestions.mean(axis=1)
suggestions.sort_values(by="cumulative", ascending=False, inplace=True)
return suggestions[~suggestions.index.isin(idxs)]
def _get_nearest(self, idx: int, limit: int = None) -> pd.Series:
"""
Calculates the nearest neighbours of the given book via consine similarity
:param idx: the book ID
:param limit: if provided will limit to this number of nearest neighbours
:return: a DataFrame containing the similarity values with book ID as index
"""
x = np.array(self.embedding_matrix[idx]).reshape((1, -1))
y = np.array(self.embedding_matrix)
similarity_values = cosine_similarity(x, y).T
similarity_values = pd.Series(similarity_values.flatten())
if limit:
similarity_values = similarity_values.sort_values(ascending=False)
return similarity_values[:limit]
return similarity_values
| StarcoderdataPython |
4958293 | import hashlib
h = hashlib.new('ripemd160')
h.update(input().encode('utf-8'))
print(h.hexdigest())
| StarcoderdataPython |
1911898 | # Copyright (C) 2009 The MITRE Corporation. See the toplevel
# file LICENSE for license terms.
# XML reader/writer.
from MAT.DocumentIO import declareDocumentIO, DocumentFileIO, SaveError
from MAT.Document import LoadError
from MAT.Annotation import AnnotationAttributeType, StringAttributeType, FloatAttributeType, \
BooleanAttributeType, IntAttributeType, AttributeValueList, AttributeValueSet
import xml.parsers.expat, xml.sax.saxutils, re, base64
import sys
from MAT import json
# We need to know what the annotations are in the task,
# if we're supposed to filter out the "junk" tags and only translate
# the tags for the task.
# There are going to be a couple special things in here
# now. First, there will be the <_mat:atypes> element, which
# optionally declares all the available annotation types and
# their attributes and types. This is important for recording
# spanless annotations and for recording annotations which have
# annotation-valued attributes (and the attribute for that will be
# _mat:id). So <_mat:atypes> looks like this:
# <_mat:atypes><atype name="..." hasSpan="yes|no"><attr name="..." type="string|attribute"/>...</atype></atypes>
# It will have no whitespace at all, so it doesn't count toward
# the offsets, and it should appear, if it appears at all,
# before the first annotation. If I can, when I render, I'll
# put it at the beginning of the document, but if it's
# an overlay, it'll go immediately before the first annotation.
class _ParserState:
def __init__(self, annotDoc, inputIsOverlay, translateAll):
self.inputIsOverlay = inputIsOverlay
self.xmlTranslateAll = translateAll
if self.xmlTranslateAll:
# The annotDoc will likely come in with an annotation repository.
# It's not enough to cut it off from the global repository;
# we have to convince it that the annotation types it ALREADY
# has are not closed.
annotDoc.unlockAtypeRepository()
self.annotDoc = annotDoc
# The parser expects BYTES, not unicode characters.
# The documentation does not make this clear at all.
self.parser = xml.parsers.expat.ParserCreate("UTF-8")
self.parser.StartElementHandler = self._startElement
self.parser.EndElementHandler = self._endElement
self.parser.CharacterDataHandler = self._handleCdata
self.parser.CommentHandler = self._handleComment
self.parser.DefaultHandler = self._handleDefault
self.stack = []
self.pos = 0
self.signalChunks = []
self._digestingAtypes = False
self._digestedAtypes = False
self._curAtype = None
# The annotation attributes must be postponed, because some of
# them may be annotation-valued.
self._annotPairs = []
self._idMap = {}
def _addSignalChunk(self, chunk):
self.signalChunks.append(chunk)
self.pos += len(chunk)
# VERY subtle bug. Because inputIsOverlay counts in BYTES,
# and because I have to pass a byte string to the parser, I
# can't store the input as a Unicode string; I have to encode
# it into UTF-8 first.
def parse(self, s):
# Boy, this is hideous. XML doesn't permit CRLF - each
# XML processor is obligated to convert them to LF. The
# only way to block that is to replace \r with the
# appropriate entity - but then the parser barfs on any
# 
 outside the toplevel tag, because it's not actual whitespace.
# Poo.
# So the answer is to do the replacement ONLY inside the
# toplevel tag. That's easy enough.
if s.find("\r") > -1:
# Find the first toplevel tag.
# Exclude ! (comment and doctype) and ? (xmldesc and processing instruction).
# This will barf if there's nothing in the toplevel tag.
mStart = re.search("<([^!?][^\s>]*)", s)
if mStart is not None:
# Gotta be something in the toplevel tag in order for
# me to care.
topStart = mStart.start()
topEnd = re.search("</" + mStart.group(1) + ">", s).end()
s = s[:topStart] + s[topStart:topEnd].replace("\r", "
") + s[topEnd:]
self.input = s.encode('utf-8')
self.parser.Parse(self.input, True)
# Now that we're done parsing, we have to update all the
# attributes for those which have annotation-valued attributes.
atypeAttrHash = {}
for newAnnot, attrDict in self._annotPairs:
atype = newAnnot.atype
try:
attrHash = atypeAttrHash[atype]
except KeyError:
attrHash = {}
for t in atype.attr_list:
# Barf.
if isinstance(t, AnnotationAttributeType):
# The IDs are all strings. We need to look
# them up in the _idMap.
if t.aggregation == "list":
attrHash[t.name] = lambda x: AttributeValueList([self._idMap[v] for v in x.split(",")])
elif t.aggregation == "set":
attrHash[t.name] = lambda x: AttributeValueSet([self._idMap[v] for v in x.split(",")])
else:
attrHash[t.name] = lambda x: self._idMap[x]
elif isinstance(t, StringAttributeType):
if t.aggregation == "set":
attrHash[t.name] = lambda x: AttributeValueSet(x.split(","))
elif t.aggregation == "list":
attrHash[t.name] = lambda x: AttributeValueList(x.split(","))
else:
attrHash[t.name] = lambda x: x
else:
if isinstance(t, FloatAttributeType):
mapper = float
elif isinstance(t, IntAttributeType):
mapper = int
elif isinstance(t, BooleanAttributeType):
mapper = lambda x: x == "yes"
if t.aggregation == "set":
attrHash[t.name] = lambda x: AttributeValueSet([mapper(v) for v in x.split(",")])
elif t.aggregation == "list":
attrHash[t.name] = lambda x: AttributeValueList([mapper(v) for v in x.split(",")])
else:
attrHash[t.name] = mapper
atypeAttrHash[atype] = attrHash
for k, v in attrDict.items():
newAnnot[k] = attrHash[k](v)
newSignal = "".join(self.signalChunks)
if self.annotDoc.signal and (self.annotDoc.signal != newSignal):
raise LoadError, "signal from XML file doesn't match original signal"
self.annotDoc.signal = newSignal
if self.inputIsOverlay:
self.annotDoc.metadata["signal_type"] = "xml"
def _startElement(self, eltName, attrDict):
if eltName == "_mat:atype":
if not self._digestingAtypes:
raise LoadError, "found _mat:atype outside _mat:atypes"
self._digestAtype(attrDict)
return
elif eltName == "_mat:attr":
if not self._digestingAtypes:
raise LoadError, "found _mat:attr outside _mat:atypes"
self._digestAttr(attrDict)
return
elif eltName == "_mat:atypes":
self._digestingAtypes = True
return
if (not self.xmlTranslateAll) and (not self.annotDoc.findAnnotationType(eltName, create = False)):
if self.inputIsOverlay:
# There doesn't appear to be any way of finding the
# end of the element in the input from the parser bindings, grrr.
# So we have to look for ">" in the signal.
tag = self.input[self.parser.CurrentByteIndex:self.input.find(">", self.parser.CurrentByteIndex) + 1]
self._addSignalChunk(tag)
return
# If the atypes were declared, you don't want to check the attributes.
t = self.annotDoc.findAnnotationType(eltName, create = not self._digestedAtypes)
if not self._digestedAtypes:
for key in attrDict.keys():
if key != "_mat:id":
t.ensureAttribute(key)
if t.hasSpan:
newAnnot = self.annotDoc.createAnnotation(self.pos, self.pos, t, blockAdd = True)
else:
newAnnot = self.annotDoc.createSpanlessAnnotation(t, blockAdd = True)
id = attrDict.get("_mat:id")
if id is not None:
newAnnot.setID(id)
self._idMap[id] = newAnnot
del attrDict["_mat:id"]
self._annotPairs.append((newAnnot, attrDict))
self.stack[0:0] = [newAnnot]
def _digestAtype(self, attrDict):
self._curAtype = self.annotDoc.findAnnotationType(attrDict["name"], hasSpan = attrDict["hasSpan"] == "yes")
def _digestAttr(self, attrDict):
self._curAtype.ensureAttribute(attrDict["name"], aType = attrDict.get("type"),
aggregation = attrDict.get("aggregation"))
def _endElement(self, eltName):
if eltName in ("_mat:attr", "_mat:atype"):
return
elif eltName == "_mat:atypes":
self._digestingAtypes = False
self._digestedAtypes = True
return
if (not self.xmlTranslateAll) and (not self.annotDoc.findAnnotationType(eltName, create = False)):
if self.inputIsOverlay:
tag = self.input[self.parser.CurrentByteIndex:self.input.find(">", self.parser.CurrentByteIndex) + 1]
self._addSignalChunk(tag)
return
if self.stack[0].atype.lab == eltName:
if self.stack[0].atype.hasSpan:
self.stack[0].end = self.pos
self.annotDoc._addAnnotation(self.stack[0])
self.stack[0:1] = []
else:
raise LoadError, "annotation close doesn't match annotation open"
# I'm going to do something special with comments: I'm going to
# dump the document metadata in a comment when I encounter it. I believe
# the comment can appear after the final tag, so I'll put it at the very
# end of the file.
METADATA_PAT = re.compile("^ _mat_metadata_ (.*) $")
def _handleComment(self, data):
m = self.METADATA_PAT.match(data)
if m is None:
if self.inputIsOverlay:
# Pass through the comment.
self._addSignalChunk("<!--")
self._addSignalChunk(data)
self._addSignalChunk("-->")
else:
# We've got the metadata.
jsonMetadata = base64.b64decode(m.group(1))
self.annotDoc.metadata = json.loads(jsonMetadata)
def _handleCdata(self, data):
if self.inputIsOverlay:
data = xml.sax.saxutils.escape(data)
self._addSignalChunk(data)
# Pass through anything you find if it's a default.
def _handleDefault(self, data):
if self.inputIsOverlay:
self._addSignalChunk(data)
from MAT.Operation import OpArgument, OptionTemplate
class XMLDocumentIO(DocumentFileIO):
def __init__(self, xml_input_is_overlay = False, xml_translate_all = False, signal_is_xml = False,
xml_output_tag_exclusions = None, xml_output_exclude_metadata = False,
encoding = None, **kw):
# Changing the default encoding.
if encoding is None:
encoding = "utf-8"
DocumentFileIO.__init__(self, encoding = encoding, **kw)
if xml_input_is_overlay:
signal_is_xml = True
self.excludeMetadata = xml_output_exclude_metadata
self.xmlInputIsOverlay = xml_input_is_overlay
self.tagExclusions = None
if xml_output_tag_exclusions is not None:
if type(xml_output_tag_exclusions) is str:
xml_output_tag_exclusions = xml_output_tag_exclusions.split(",")
if xml_output_tag_exclusions == [""]:
xml_output_tag_exclusions = []
self.tagExclusions = dict([(t, True) for t in xml_output_tag_exclusions])
self.signalIsXML = signal_is_xml
self.xmlTranslateAll = xml_translate_all
# This must be implemented by the children. s is a Unicode string.
# annotDoc is an annotated document.
def deserialize(self, s, annotDoc):
# If there's no global annotation type repository, we want xmlTranslateAll to be True.
state = _ParserState(annotDoc, self.xmlInputIsOverlay, self.xmlTranslateAll or (not annotDoc.atypeRepository.globalTypeRepository))
state.parse(s)
def writeToUnicodeString(self, annotDoc):
signalIsXML = self.signalIsXML or \
(annotDoc.metadata.has_key("signal_type") and
annotDoc.metadata["signal_type"] == "xml")
# Get all the annotations. Let's not care about overlap right now,
# since overlap will happen if I'm writing everything out, because
# it'll be nested. So just get the annotations and then
# sort them, and if we ever get a crossing dependency, we'll
# have to check otherwise.
# Split the atypes into spanned and spanless.
spanned = []
spanless = []
# I used to remove the tag exclusions when I collect the
# indices, but I need to do it earlier in order to figure out
# if I need a top element or not.
annots = []
spanlessAnnots = []
for atype in annotDoc.atypeDict.keys():
if self.tagExclusions and self.tagExclusions.has_key(atype.lab):
continue
if atype.hasSpan:
spanned.append(atype.lab)
else:
spanless.append(atype.lab)
if spanned:
annots = annotDoc.getAnnotations(atypes = spanned)
if spanless:
spanlessAnnots = annotDoc.getAnnotations(atypes = spanless)
# We now know they can nest. So let's sort them.
# Sort them first by earliest start, latest end.
annots.sort(self._cmpAnnot)
# Now, I can loop through the annots, and keep a stack,
# and we know when to pop the stack because of
# how the indexes work.
indices = {}
lastAnnot = None
for annot in annots:
if lastAnnot and \
(((lastAnnot.start < annot.start) and (lastAnnot.end < annot.end)) or \
((lastAnnot.start > annot.start) and (lastAnnot.end > annot.end))):
raise SaveError, "crossing dependencies"
try:
indices[annot.start][0].append(annot)
except KeyError:
indices[annot.start] = [[annot], []]
try:
indices[annot.end][1].append(annot)
except KeyError:
indices[annot.end] = [[], [annot]]
indexList = indices.keys()
indexList.sort()
segs = []
# So we need to add a toplevel XML tag if we don't already have one, and if
# we're not adding our own info.
# The signal is not XML, and
# There are no spanless annotations, or
# The maximal annotation starts after the beginning, or
# the maximal annotation ends before the end, or
# there are spanless annots, which will be inserted as zero-length
# annots before the first spanned annotation, or
# we add metadata, which will insert the annotation types
# in a similar position.
addTop = (not signalIsXML) and \
((not annots) or \
(annots[0].start > 0) or \
(annots[0].end < len(annotDoc.signal)) or \
spanlessAnnots or \
(not self.excludeMetadata))
if addTop:
segs.append("<__top>")
pos = 0
atypesInserted = False
for i in indexList:
if pos < i:
seg = annotDoc.signal[pos:i]
if not signalIsXML:
seg = xml.sax.saxutils.escape(seg)
segs.append(seg)
pos = i
[starts, ends] = indices[i]
# Reverse the ends.
ends.reverse()
for endAnnot in ends:
segs.append("</" + endAnnot.atype.lab + ">")
for startAnnot in starts:
if not atypesInserted:
if not self.excludeMetadata:
segs.append(self._formatAtypes(annotDoc))
atypesInserted = True
if spanlessAnnots:
for sAnnot in spanlessAnnots:
segs.append(self._formatAnnot(sAnnot, spanless = True))
segs.append(self._formatAnnot(startAnnot))
if pos < len(annotDoc.signal):
seg = annotDoc.signal[pos:]
if not signalIsXML:
seg = xml.sax.saxutils.escape(seg)
segs.append(seg)
if addTop:
segs.append("</__top>")
if not self.excludeMetadata:
segs.append("<!-- _mat_metadata_ "+ base64.b64encode(json.dumps(annotDoc.metadata)) + " -->")
return "".join(segs)
def _cmpAnnot(self, ann1, ann2):
return cmp(ann1.start, ann2.start) or -cmp(ann1.end, ann2.end)
def _formatAtypes(self, annotDoc):
segs = ["<_mat:atypes>"]
for atype in annotDoc.atypeDict.keys():
segs.append("<_mat:atype name='%s' hasSpan='%s'>" %
(atype.lab, (atype.hasSpan and "yes") or "no"))
for attr in atype.attr_list:
segs.append("<_mat:attr name='%s' type='%s' aggregation='%s'/>" % (attr.name, attr._typename_, attr.aggregation or "none"))
segs.append("</_mat:atype>")
segs.append("</_mat:atypes>")
return "".join(segs)
def _formatAnnot(self, annot, spanless = False):
elts = ["<", annot.atype.lab]
if annot.attrs:
for attr, val in zip(annot.atype.attr_list, annot.attrs):
if val is not None:
# Handle annotations specially.
if attr._typename_ == "annotation":
if attr.aggregation:
v = ",".join([str(a.id) for a in val])
else:
v = str(val.id)
else:
v = attr.toStringNonNull(val)
elts.append(" " + attr.name + "="+ xml.sax.saxutils.quoteattr(v))
if annot.id is not None:
elts.append(" _mat:id=" + xml.sax.saxutils.quoteattr(annot.id))
if spanless:
elts.append("/>")
else:
elts.append(">")
return "".join(elts)
# Best to use the OpArgument infrastructure, so we can extract
# arguments cleanly from CGI and cmdline.
inputArgs = OptionTemplate([OpArgument("xml_input_is_overlay",
help = "If specified, the input XML will be treated as a mix of task-relevant annotations and underlying XML, and the extracted signal will be a well-formed XML file"),
OpArgument("xml_translate_all",
help = "If specified, all tags will be converted, whether or not they're found in whatever task is specified")],
heading = "Options for XML input")
outputArgs = OptionTemplate([OpArgument("signal_is_xml",
help = "If specified, the underlying signal will be treated as a well-formed XML file when the output file is rendered. If the input file type is also 'xml-inline', use the --xml_input_is_overlay flag to control this setting instead."),
OpArgument("xml_output_tag_exclusions",
help = "A comma-delimited list of annotation labels to exclude from the XML output",
hasArg = True),
OpArgument("xml_output_exclude_metadata",
help = "Normally, the XML writer saves the document metadata inside an XML comment, so it can be read back in by the XML reader. This flag causes the metadata not to be written.")],
heading = "Options for XML output")
declareDocumentIO("xml-inline", XMLDocumentIO, True, True)
#
# And now, the "fake" version, for when somebody just randomly inserted SGML-ish tags.
#
# ONLY a reader.
class FakeXMLIO(DocumentFileIO):
def writeToUnicodeString(self, annotDoc):
raise NotImplementedError
# In fake xml IO, it's just SGML-ish tags.
def deserialize(self, s, annotDoc):
signalStrings = []
startI = 0
contentLen = 0
for m in self.TAG_LOCATOR.finditer(s):
startTag, content, endTag = m.groups()
# Let's do all the bookkeeping first.
prefix = s[startI:m.start()]
contentLen += len(prefix)
signalStrings.append(prefix)
annotStart = contentLen
signalStrings.append(content)
contentLen += len(content)
annotEnd = contentLen
startI = m.end()
# Now, let's take the startTag eapart.
labToks = startTag.split(None, 1)
if labToks[0] != endTag:
raise LoadError, ("mismatched start and end tags (%s vs. %s) at %d" % (labToks[0], endTag, m.start()))
t = annotDoc.findAnnotationType(endTag, create = True)
attrs = {}
if len(labToks) > 1:
# There are attributes. Grab a token (no whitespace, no =), then
# look for an =, then find the following value.
attrStr = labToks[1]
while attrStr:
m = self.ATTR_NAME_LOCATOR.match(attrStr)
if m is None:
raise LoadError, ("ill-formed attribute string '%s'" % labToks[1])
attr = m.group(1)
t.ensureAttribute(attr)
attrStr = attrStr[m.end():]
# Now, we need a value.
m = self.ATTR_VALUE_LOCATOR.match(attrStr)
if m is None:
raise LoadError, ("ill-formed attribute string '%s'" % labToks[1])
val = m.group(2)
# Since this sucks up trailing whitespace, it should
# terminate the string when we reach the end of it.
attrStr = attrStr[m.end():]
attrs[attr] = val
# OK, now we add the annotation.
annotDoc.createAnnotation(annotStart, annotEnd, t, attrs)
signalStrings.append(s[startI:])
annotDoc.signal = "".join(signalStrings)
TAG_LOCATOR = re.compile("\<([^>]+)\>", re.S)
ATTR_NAME_LOCATOR = re.compile("^\s*([^\s=]+)\s*=\s*")
# The escape for the double quote isn't strictly necessary,
# but it works better with the Python Emacs mode.
ATTR_VALUE_LOCATOR = re.compile("""^([\"'])(.*?)\\1\s*""")
# We can't quite do this the way we did the fake XML IO reader, because
# we have (a) null annotations and (b) nested annotations. So we need
# to look for the tags, and keep gathering input.
# Because this is pseudo-XML, there may be <> in the signal. So if
# we find a tag which we can't parse, assume it's signal.
def _parseTag(self, startTag):
labToks = startTag.split(None, 1)
attrs = {}
if len(labToks) > 1:
# There are attributes. Grab a token (no whitespace, no =), then
# look for an =, then find the following value.
attrStr = labToks[1]
while attrStr:
m = self.ATTR_NAME_LOCATOR.match(attrStr)
if m is None:
# Attribute string is ill-formed.
return None, None
attr = m.group(1)
attrStr = attrStr[m.end():]
# Now, we need a value.
m = self.ATTR_VALUE_LOCATOR.match(attrStr)
if m is None:
# Attribute string is ill-formed.
return None, None
val = m.group(2)
# Since this sucks up trailing whitespace, it should
# terminate the string when we reach the end of it.
attrStr = attrStr[m.end():]
attrs[attr] = val
return labToks[0], attrs
# Toplevel.
def deserialize(self, s, annotDoc):
annotations = []
tagStack = []
signalStrings = []
startI = 0
contentLen = 0
NULL, OPEN, CLOSE = 0, 1, 2
while True:
m = self.TAG_LOCATOR.search(s, startI)
if m is None:
signalStrings.append(s[startI:])
break
tagContent = m.group(1)
# If it starts with a /, it's an end tag.
# if it ends with a /, it's a null tag.
if tagContent[-1] == "/":
tagLiteral = tagContent[:-1]
tagStatus = NULL
elif tagContent[0] == "/":
tagLiteral = tagContent[1:]
tagStatus = CLOSE
else:
tagLiteral = tagContent
tagStatus = OPEN
if tagStatus in (NULL, OPEN):
label, attrs = self._parseTag(tagLiteral)
if label is None:
# Probably actually signal instead. Skip it. Well, don't
# skip the WHOLE thing; the pattern may have captured
# a real tag too.
print >> sys.stderr, "Found bogus tag match '%s'; treating first character as text and trying again." % s[m.start():m.end()].encode('ascii', 'replace')
signalStrings.append(s[startI:m.start() + 1])
contentLen += ((m.start() + 1) - startI)
startI = m.start() + 1
continue
# So now, we know it's a non-bogus tag.
prefix = s[startI:m.start()]
contentLen += len(prefix)
signalStrings.append(prefix)
annotLoc = contentLen
startI = m.end()
if tagStatus is NULL:
annotations.append([label, attrs, annotLoc, annotLoc])
elif tagStatus is CLOSE:
# Gotta match what's on the top of the stack.
tagContent = tagContent[1:]
if not tagStack:
raise IOError, "no start tag for end tag"
elif tagStack[-1][0] != tagContent:
raise IOError, "mismatch start and end tags (%s vs %s)" % (tagStack[-1][0], tagContent)
else:
[label, attrs, startIdx] = tagStack[-1]
tagStack[-1:] = []
annotations.append([label, attrs, startIdx, annotLoc])
else:
label, attrs = self._parseTag(tagContent)
tagStack.append([label, attrs, annotLoc])
# OK, now we add the annotations.
for t, attrs, annotStart, annotEnd in annotations:
annotDoc.createAnnotation(annotStart, annotEnd, t, attrs)
annotDoc.signal = "".join(signalStrings)
declareDocumentIO('fake-xml-inline', FakeXMLIO, True, False)
| StarcoderdataPython |
220328 | #!/usr/bin/env python
__author__ = '<NAME>'
import argparse
from RouToolPa.Routines import DrawingRoutines
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file", action="store", dest="input_file",
help="Input file with data")
parser.add_argument("-o", "--output_prefix", action="store", dest="output_prefix",
help="Prefix of output files")
parser.add_argument("-s", "--separator", action="store", dest="separator", default="\t",
help="Separator between values in input file. Default - '\\t' ")
parser.add_argument("-a", "--x_column_index", action="store", dest="x_column_index", type=int,
help="Index of column with x values. 0-based")
parser.add_argument("-b", "--y_column_index", action="store", dest="y_column_index", type=int,
help="Index of column with y values. 0-based")
parser.add_argument("-n", "--min_x", action="store", dest="min_x", type=float,
help="Minimum x value to show. Default - not set")
parser.add_argument("-x", "--max_x", action="store", dest="max_x", type=float,
help="Maximum x value to show. Default - not set")
parser.add_argument("-q", "--min_y", action="store", dest="min_y", type=float,
help="Minimum y value to show. Default - not set")
parser.add_argument("-r", "--max_y", action="store", dest="max_y", type=float,
help="Maximum y value to show. Default - not set")
parser.add_argument("-e", "--extensions", action="store", dest="extensions", type=lambda x: x.split(","),
default=["png", "svg"],
help="Comma-separated list of extensions for histogram files")
parser.add_argument("-l", "--xlabel", action="store", dest="xlabel",
help="X label")
parser.add_argument("-y", "--ylabel", action="store", dest="ylabel",
help="Y label")
parser.add_argument("-t", "--title", action="store", dest="title",
help="Title of histogram")
parser.add_argument("--width", action="store", dest="width", default=6, type=int,
help="Figure width. Default: 6")
parser.add_argument("--height", action="store", dest="height", default=6, type=int,
help="Figure height. Default: 6")
parser.add_argument("-m", "--markersize", action="store", dest="markersize", default=2, type=int,
help="Size of marker. Default: 2")
parser.add_argument("--ylog", action="store", dest="ylogbase", default=10, type=int,
help="Log base for figure with logarithmic scale on y axis. Default: 10")
parser.add_argument("--type", action="store", dest="type", default="plot",
help="Type of figure. Allowed: plot(default), scatter")
parser.add_argument("-g", "--grid", action="store_true", dest="grid",
help="Show grid. Default: False")
args = parser.parse_args()
DrawingRoutines.draw_plot(args.input_file, args.output_prefix,
x_column_index=args.x_column_index, y_column_index=args.y_column_index,
separator=args.separator, min_x=args.min_x,
max_x=args.max_x, min_y=args.min_y, max_y=args.max_y,
extensions=args.extensions, xlabel=args.xlabel, ylabel=args.ylabel,
title=args.title, width=args.width, height=args.height,
markersize=args.markersize, ylogbase=args.ylogbase, type=args.type,
grid=args.grid, correlation=True)
"""
data = np.loadtxt(args.input_file, comments="#", usecols=(args.x_column_index, args.y_column_index))
plt.figure(1, figsize=(args.width, args.height), dpi=300)
plt.subplot(1, 1, 1)
if args.type == "plot":
plt.plot(data[:, 0], data[:, 1], markersize=args.markersize)
elif args.type == "scatter":
plt.scatter(data[:, 0], data[:, 1], s=args.markersize)
plt.xlim(xmin=args.min_x, xmax=args.max_x)
plt.ylim(ymin=args.min_y, ymax=args.max_y)
if args.xlabel:
plt.xlabel(args.xlabel)
if args.ylabel:
plt.ylabel(args.ylabel)
if args.title:
plt.title(args.title)
if args.grid:
plt.grid()
print("Kendal's tau")
print(stats.kendalltau(data[:, 0], data[:, 1]))
print("Pearson's r")
print(stats.pearsonr(data[:, 0], data[:, 1]))
for ext in args.extensions:
plt.savefig("%s.%s.%s" % (args.output_prefix, args.type, ext))
plt.yscale("log")
for ext in args.extensions:
plt.savefig("%s.%s.ylog%i.%s" % (args.output_prefix, args.type, args.ylogbase, ext))
""" | StarcoderdataPython |
6640196 | import re
import pandas as pd
from bs4 import BeautifulSoup
import mechanize
data = pd.read_csv('goodreads_bestsellers.csv')
for rownum, row in data.iterrows():
print row['url']
br = mechanize.Browser()
r = br.open(row['url'])
soup = BeautifulSoup(r.read(), 'html.parser')
pages = soup.find(itemprop="numberOfPages").get_text()
print pages
data.loc[rownum, 'Pages'] = pages
data.to_csv('goodreads_bestsellers.csv', index = False)
| StarcoderdataPython |
3523852 | # coding: utf-8
# app: mesa de atención
# module: forms
# date: jueves, 14 de junio de 2018 - 08:56
# description: Formulario para la bitácora de ciudadanos rechazados en mac
# pylint: disable=W0613,R0201,R0903
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, Div, HTML, Field, Button
from crispy_forms.bootstrap import FormActions
from django import forms
from apps.mesas.models import Registro
class MesaForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(MesaForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Div(
Field('modulo', wrapper_class='col-md-2', autocomplete='off'),
Field('fecha', wrapper_class='col-md-2', autocomplete='off'),
Field('lugar', wrapper_class='col-md-3'),
css_class='row'
),
Div(
Field('sexo', wrapper_class='col-md-2'),
Field('causa', wrapper_class='col-md-3'),
Field('observaciones', wrapper_class='col-md-6', rows='3'),
css_class='row'
),
Div(
HTML('<hr>'),
FormActions(
Submit('save', 'Guardar cambios'),
Button('cancel', 'Cancelar')
)
)
)
class Meta:
model = Registro
fields = '__all__'
| StarcoderdataPython |
8068255 | # python3 construct.py 0 8 &
# python3 construct.py 1 8 &
# python3 construct.py 2 8 &
# python3 construct.py 3 8 &
# python3 construct.py 4 8 &
# python3 construct.py 5 8 &
# python3 construct.py 6 8 &
# python3 construct.py 7 8 &
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from statistics import mean
def weird_division(n, d):
return n / d if d else 0
def labeling(price, bp, k):
label = []
for i, p, in enumerate(price[:-k]):
slope = (mean(price[i+1:i+k+1])-price[i])/price[i]
if abs(slope) < bp:
label.append(0)
else:
label.append(weird_division(slope,abs(slope)))
return label
def calc_profit(price, label):
total_profit = 0
trade_times = 0
status = label[0]
start_price = price[0]
for i, lbl in enumerate(label[1:]):
if lbl == status or lbl == 0:
continue
else:
total_profit += (price[i+1]-start_price)*status
# print((price[i+1]-start_price),status)
trade_times += 1 if status != 0 else 0
status = lbl
start_price = price[i+1]
if trade_times == 0:
trade_times = 1
return total_profit, trade_times
def main():
prices = np.load('price_201001~06.npy', allow_pickle=True)
thread_no = int(sys.argv[1])
thread_num = 8
if len(sys.argv) > 2:
thread_num =int(sys.argv[2])
# lbl = labeling(prices[0], 0.0,10)
# # print(lbl)
# print(calc_profit(prices[0], lbl))
# plot_labeling(prices[0], lbl)
### apply thread channel:
### usage:
### python construct.py 0 8
return_dir = os.path.join(os.getcwd(), 'total_return/')
if not os.path.isdir(return_dir):
os.makedirs(return_dir)
time_dir = os.path.join(os.getcwd(), 'trade_time/')
if not os.path.isdir(time_dir):
os.makedirs(time_dir)
for i, price in enumerate(prices):
if i % thread_num != thread_no:
continue
total_profit_mat = []
trade_time_mat = []
for k in range(10, 201, 10):
total_profit_vec = []
trade_time_vec = []
for bp in np.arange(0.0000,0.0051,0.0001):
lbl = labeling(price, bp, k)
tr, tt = calc_profit(price, lbl)
# print(tr)
total_profit_vec.append(tr)
trade_time_vec.append(tt)
total_profit_mat.append(total_profit_vec)
trade_time_mat.append(trade_time_vec)
np.save('total_return/{}.npy'.format(i),total_profit_mat)
np.save('trade_time/{}.npy'.format(i),trade_time_mat)
def trans2rect(label):
status = label[0]
position = 0
width = 1
rects = []
for i,l in enumerate(label[1:]):
if status == l:
width += 1
else:
rects.append((status,position,width))
status = l
position = i+1
width = 1
return rects
def plot_labeling(price, label):
rects = trans2rect(label)
ymin, ymax = min(price), max(price)-min(price)
for rect in rects:
color = (1,1,1)
if rect[0]==1: color = (1, .2, .2)
elif rect[0]==-1: color = (.2, 1, .2)
elif rect[0]==0: color = (.8, .8, .8)
plt.gca().add_patch(patches.Rectangle((rect[1], ymin), rect[2], ymax,color=color, alpha=0.5))
plt.plot(price)
plt.show()
if __name__ == '__main__':
main()
| StarcoderdataPython |
8141609 | import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import preprocessing
matplotlib.use("Agg")
import datetime
import torch
from finrl.config import config
from finrl.marketdata.yahoodownloader import YahooDownloader
from finrl.preprocessing.preprocessors import FeatureEngineer
from finrl.preprocessing.data import data_split
from finrl.env.multi_env_stocktrading import StockTradingEnv
from finrl.lxcalgorithms.gateway import Gateway
from finrl.model.multi_models import DRLAgent
from finrl.trade.backtest import backtest_stats as BackTestStats
def getEveryDay(begin_date,end_date):
# 前闭后闭
date_list = []
begin_date = datetime.datetime.strptime(begin_date, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date,"%Y-%m-%d")
while begin_date <= end_date:
date_str = begin_date.strftime("%Y-%m-%d")
date_list.append(date_str)
begin_date += datetime.timedelta(days=1)
return date_list
def train_one():
"""
train an agent
"""
print("==============Start Fetching Data===========")
print('GPU is :', torch.cuda.is_available())
start_date = config.START_DATE,
end_date = config.START_TRADE_DATE,
start_date = start_date[0]
end_date = end_date[0]
date_list = getEveryDay(start_date,end_date)
food = Gateway()
now = datetime.datetime.now().strftime("%Y%m%d-%Hh%M")
for i in range(0,1):
df = YahooDownloader(
start_date=start_date,
end_date=end_date,
ticker_list=config.DOW_30_TICKER,
).lxc_fetch_data()
print("==============Start Feature Engineering===========")
fe = FeatureEngineer(
use_technical_indicator=True,
tech_indicator_list=config.TECHNICAL_INDICATORS_LIST,
use_turbulence=True,
user_defined_feature=False,
)
processed = fe.preprocess_data(df)
train = data_split(processed, start_date, end_date)
stock_dimension = len(train.tic.unique())
#print("train.tic.unique() is:")
#print(train.tic.unique())
print('stock_dimension is:', stock_dimension)
state_space = (
1
+ 2 * stock_dimension
+ len(config.TECHNICAL_INDICATORS_LIST) * stock_dimension
)
env_kwargs = {
"hmax": 100,
"initial_amount": 1000000,
"buy_cost_pct": 0.001,
"sell_cost_pct": 0.001,
"state_space": state_space,
"stock_dim": stock_dimension,
"tech_indicator_list": config.TECHNICAL_INDICATORS_LIST,
# "action_space": stock_dimension,
"action_space": 1,
"reward_scaling": 1e-4
}
e_train_gym = StockTradingEnv(df=train, **env_kwargs)
env_train, _ = e_train_gym.get_sb_env()
agent = DRLAgent(env=env_train)
print("==============Model Training===========")
print("start training ddpg model")
##################################################################################################a2c
print("start pre_training model")
multi_number = stock_dimension
#temp = agent.get_model(model_name="a2c", lxc_stock_number=0)
#agent.train_model(model=temp, tb_log_name="a2c", total_timesteps=1000)
#print("e_train_gym.normal_high is:", e_train_gym.normal_high)
#print("e_train_gym.normal_low is:", e_train_gym.normal_low)
#e_trade_gym.normal_high = e_train_gym.normal_high
#e_trade_gym.normal_low = e_train_gym.normal_low
print("start main_training model")
for j in range(0, multi_number):
if(j+1>food.agents_number):
model_a2c = agent.get_model(model_name="a2c", lxc_stock_number=j,all_stock_number = multi_number)
model_a2c.all_stock_number = multi_number
model_a2c.env.reset()
if(j!=0):
trained_a2c = agent.get_pre_model(model=model_a2c, tb_log_name="a2c", total_timesteps=1000,
lxcName="lxcMulti" + str(j - 1))
trained_a2c.lxc_stock_number = j
trained_a2c.all_stock_number = multi_number
trained_a2c = trained_a2c.online_learning(total_timesteps=1000, tb_log_name="a2c")
agent.save_pre_model(model=trained_a2c, tb_log_name="a2c", total_timesteps=1000,
lxcName="lxcMulti" + str(j))
else:
trained_a2c = agent.train_lxc_model(
# model=model_a2c, tb_log_name="a2c", total_timesteps=80000,lxcType=1,lxcName="lxc2"
model=model_a2c, tb_log_name="a2c", total_timesteps=1000, lxcType= None, lxcName="lxcMulti" + str(j)
)
food.agents.append(trained_a2c)
print(j,"'s model is trained done")
else:
print("here!!!")
food.agents[j].all_stock_number = multi_number
food.agents[j].env = env_train
food.agents[j] = food.agents[j].online_learning(total_timesteps=1000, tb_log_name="a2c")
env_train.reset()
food.agents_number = multi_number
# names=["date","open","high","low","close","volume","tic","day",]
# df = pd.read_csv("./" + config.DATA_SAVE_DIR + "/" + "20210315-07h382" + ".csv",index_col=0)
print('GPU is :', torch.cuda.is_available())
#########################################################################
print("==============Start Trading===========")
start_date = config.START_TRADE_DATE,
end_date = config.END_DATE,
df_account_value = pd.DataFrame()
df_actions = pd.DataFrame()
start_date = start_date[0]
end_date = end_date[0]
date_list = getEveryDay(start_date, end_date)
time_step = 30
#for i in range(0, len(date_list) - time_step, time_step):
for i in range(0, 1):
df = YahooDownloader(
start_date=start_date,
end_date=end_date,
ticker_list=config.DOW_30_TICKER,
).lxc_fetch_data()
print("==============Start Feature Engineering===========")
fe = FeatureEngineer(
use_technical_indicator=True,
tech_indicator_list=config.TECHNICAL_INDICATORS_LIST,
use_turbulence=True,
user_defined_feature=False,
)
processed = fe.preprocess_data(df)
train = data_split(processed, start_date, end_date)
stock_dimension = len(train.tic.unique())
# print("train.tic.unique() is:")
# print(train.tic.unique())
print('stock_dimension is:', stock_dimension)
state_space = (
1
+ 2 * stock_dimension
+ len(config.TECHNICAL_INDICATORS_LIST) * stock_dimension
)
env_kwargs = {
"hmax": 100,
"initial_amount": 1000000,
"buy_cost_pct": 0.001,
"sell_cost_pct": 0.001,
"state_space": state_space,
"stock_dim": stock_dimension,
"tech_indicator_list": config.TECHNICAL_INDICATORS_LIST,
# "action_space": stock_dimension,
"action_space": 1,
"reward_scaling": 1e-4
}
e_train_gym = StockTradingEnv(df=train,turbulence_threshold=250.0, **env_kwargs)
env_train, _ = e_train_gym.get_sb_env()
agent = DRLAgent(env=env_train)
print("==============Model Training===========")
print("start training ddpg model")
##################################################################################################a2c
print("start pre_training model")
multi_number = stock_dimension
# temp = agent.get_model(model_name="a2c", lxc_stock_number=0)
# agent.train_model(model=temp, tb_log_name="a2c", total_timesteps=1000)
# print("e_train_gym.normal_high is:", e_train_gym.normal_high)
# print("e_train_gym.normal_low is:", e_train_gym.normal_low)
# e_trade_gym.normal_high = e_train_gym.normal_high
# e_trade_gym.normal_low = e_train_gym.normal_low
print("start main_training model")
#################################检测是否有新股票上市##################################
for j in range(food.agents_number,multi_number):
model_a2c = agent.get_model(model_name="a2c", lxc_stock_number=j, all_stock_number=multi_number)
model_a2c.all_stock_number = multi_number
model_a2c.env.reset()
trained_a2c = agent.get_pre_model(model=model_a2c, tb_log_name="a2c", total_timesteps=1000,lxcName="lxcMulti" + str(food.agents_number-1))
trained_a2c.lxc_stock_number = j
trained_a2c.all_stock_number = multi_number
food.agents.append(trained_a2c)
food.agents_number = multi_number
#################################测试##################################
temp_account_value, temp_actions = DRLAgent.Multi_DRL_prediction(
model=food.agents, environment=e_train_gym,all_stock_number=multi_number
)
#print("temp_account_value is:",temp_account_value)
#print("temp_actions is:", temp_actions)
df_account_value = df_account_value.append(temp_account_value)
df_actions = df_actions.append(temp_actions)
#print("df_account_value is:", df_account_value)
#print("df_actions is:", df_actions)
#exit(0)
################################再训练######################################
'''
for j in range(0, multi_number):
food.agents[j].all_stock_number = multi_number
env_train.reset()
food.agents[j].env = env_train
food.agents[j] = food.agents[j].online_learning(total_timesteps=1000, tb_log_name="a2c")
agent.save_pre_model(model=model_a2c, tb_log_name="a2c", total_timesteps=1000,lxcName="lxcMulti" + str(j))
#env_train.reset()
'''
'''
df_account_value, df_actions = DRLAgent.DRL_prediction(
model=multi_A2C, environment=e_trade_gym,lxc_env = e_train_gym
# model=trained_ddpg, environment=e_trade_gym
)
'''
print("df_account_value is:",df_account_value)
print("df_actions is:",df_actions)
df_account_value.to_csv(
"./" + config.RESULTS_DIR + "/df_account_value_" + now + ".csv"
)
df_actions.to_csv("./" + config.RESULTS_DIR + "/df_actions_" + now + ".csv")
print("==============Get Backtest all Results===========")
perf_stats_all = BackTestStats(df_account_value)
perf_stats_all = pd.DataFrame(perf_stats_all)
perf_stats_all.to_csv("./" + config.RESULTS_DIR + "/perf_stats_all_" + now + ".csv")
| StarcoderdataPython |
6524467 | <filename>eduFaucet_config-EXAMPLE.py
# Common config for EduFaucet
VERSION = '1.0'
GITHUB = 'https://github.com/bitcoinedu-io/EduFaucet'
DBFILE = 'eduFaucet-db.sqlite3'
rpc_user = 'BTE'
rpc_pass = '<PASSWORD>'
URL = 'http://%s:%s@localhost:8908' % (rpc_user, rpc_pass)
chaininfo = {
'name': 'EduFaucet',
'unit': 'BTE'
}
params = {
'SubsidyHalvingInterval': 210000, # blocks, miner reward halving
'PowTargetTimespan': 1*24*60*60, # sec, retarget time: one day
'PowTargetSpacing': 10*60, # sec, block time 10 min
'DifficultyAdjustmentInterval': 144 # blocks, PowTargetTimespan / PowTargetSpacing
}
| StarcoderdataPython |
12828774 |
# Github : https://github.com/adarsh2104
# HR-Profile: https://www.hackerrank.com/adarsh_2104
# Challenge : https://www.hackerrank.com/challenges/s10-quartiles
# Max Score : 30
def find_median(array):
if len(array) % 2 == 1:
return array[len(array) // 2]
else:
return (array[len(array) // 2] + array[len(array) // 2 - 1]) // 2
n = input()
input_array = sorted([int(x) for x in input().split()])
print(find_median(input_array[:len(input_array)//2]))
print(find_median(input_array))
print(find_median(input_array[len(input_array) // 2 + len(input_array) % 2:]))
| StarcoderdataPython |
1608778 | <reponame>renjunxiang/enjoy_myself
import pandas as pd
import os
import numpy as np
# DIR = os.path.dirname(__file__)
DIR = 'D:\\github\\enjoy_myself\\crawler\\crawler_story'
data = np.load(DIR + '/data/' + '女频言情.npy')
data = pd.DataFrame(data,
columns=['title', 'url', 'author', 'size',
'update', 'describtions', 'stars', 'classify'])
from TextClustering.TextClustering import TextClustering
model = TextClustering(texts=list(data.iloc[:, 5]))
model.text_cut(stopwords_path='default')
model.creat_vocab(size=50, window=5, vocab_savepath=DIR + '/model/size50_window5.model')
# model.word2matrix(method='frequency', top=50)
model.word2matrix(method='vector', top=20, similar_n=10)
model.decomposition()
print('前两个成分的特征占比:', model.pca.explained_variance_ratio_[0:2].sum())
model.clustering(X=model.decomposition_data, model_name='KMeans', n_clusters=10)
model.show_decomposition(background=True, show=True, pixel=None, textsize=50)
| StarcoderdataPython |
295137 | <gh_stars>1-10
import os
cur_dir = os.path.dirname(__file__)
eosio_token_abi = None
with open(os.path.join(cur_dir, 'data/eosio.token.abi'), 'r') as f:
eosio_token_abi = f.read()
with open(os.path.join(cur_dir, 'data/eosio.system_eosio.abi'), 'r') as f:
eosio_system_abi_eosio = f.read()
with open(os.path.join(cur_dir, 'data/eosio.system_uuos.abi'), 'r') as f:
eosio_system_abi_uuos = f.read()
eosio_system_abi = dict(
EOS = eosio_system_abi_eosio,
UUOS = eosio_system_abi_uuos
) | StarcoderdataPython |
1713307 |
from mxnet import nd
from mxnet.gluon import nn
from models.pointnet_globalfeat import PointNetfeat_vanilla
class PointNetDenseCls(nn.Block):
def __init__(self, num_points=2500, k=2, routing=None):
super(PointNetDenseCls, self).__init__()
self.num_points = num_points
self.k = k
self.feat = PointNetfeat_vanilla(num_points, global_feat=False, routing=routing)
self.conv1 = nn.Conv1D(512, 1)
self.conv2 = nn.Conv1D(256, 1)
self.conv3 = nn.Conv1D(128, 1)
self.conv4 = nn.Conv1D(self.k, 1)
self.bn1 = nn.BatchNorm(in_channels=512)
self.bn2 = nn.BatchNorm(in_channels=256)
self.bn3 = nn.BatchNorm(in_channels=128)
def forward(self, x):
x, trans = self.feat(x)
x = nd.relu(self.bn1(self.conv1(x)))
x = nd.relu(self.bn2(self.conv2(x)))
x = nd.relu(self.bn3(self.conv3(x)))
x = self.conv4(x)
x = nd.transpose(x, (0,2,1))
return x, trans | StarcoderdataPython |
1880392 | from flask import render_template, jsonify
from werkzeug.exceptions import HTTPException
from backend.restplus import api
from backend.ws.devices import ns as devices_namespace
def ndb_wsgi_middleware(wsgi_app, client):
def middleware(environ, start_response):
with client.context():
return wsgi_app(environ, start_response)
return middleware
def create_app(app, client):
app.wsgi_app = ndb_wsgi_middleware(app.wsgi_app, client) # Wrap the app in middleware.
@app.route('/')
def vue_client():
return render_template('index.html')
@app.errorhandler(Exception)
def default_error_handler(e):
'''Default error handler'''
# pass through HTTP errors
if isinstance(e, HTTPException):
return jsonify(error=str(e)), getattr(e, 'code', 500)
return jsonify(error=str(e)), 500
api.init_app(app)
api.add_namespace(devices_namespace) | StarcoderdataPython |
79075 | <filename>mavenn/__init__.py
"""MAVE-NN software package."""
# The functions imported here are the ONLY "maven.xxx()" functions that
# users are expected to interact with
# To regularize log calculations
import numpy as np
TINY = np.sqrt(np.finfo(np.float32).tiny)
# Primary model class
from mavenn.src.model import Model
# For running functional tests
from mavenn.tests import run_tests
# Examples
from mavenn.src.examples import list_tutorials
from mavenn.src.examples import run_demo
from mavenn.src.examples import load_example_dataset
from mavenn.src.examples import load_example_model
# For loading models
from mavenn.src.utils import load
from mavenn.src.utils import split_dataset
# For visualizing G-P maps
from mavenn.src.visualization import heatmap
from mavenn.src.visualization import heatmap_pairwise
| StarcoderdataPython |
9695869 | """
This file is part of pybacnet.
pybacnet is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pybacnet is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pybacnet. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Copyright (c) 2013 Building Robotics, Inc.
"""
"""
@author <NAME> <<EMAIL>>
@author <NAME> <<EMAIL>>
"""
import json
import re
import operator
import sys
from twisted.internet import threads, defer
from smap.driver import SmapDriver
from smap.util import periodicSequentialCall, find
from smap import actuate
from pybacnet import bacnet
def _get_class(name):
cmps = name.split('.')
assert len(cmps) > 1
(mod_name, class_name) = ('.'.join(cmps[:-1]), cmps[-1])
if mod_name in sys.modules:
mod = sys.modules[mod_name]
else:
mod = __import__(mod_name, globals(), locals(), [class_name])
return getattr(mod, class_name)
class BACnetDriver(SmapDriver):
"""Driver for polling BACnet points"""
def setup(self, opts):
bacnet.Init(opts.get('iface', 'eth0'), '47900')
with open(opts.get('db'), 'r') as fp:
self.db = json.load(fp)
self.rate = int(opts.get('rate', 60))
self.devices = map(re.compile, opts.get('devices', ['.*']))
self.points = map(re.compile, opts.get('points', ['.*']))
self.ffilter = _get_class(opts.get('filter')) if opts.get('filter') else None
self.pathnamer = _get_class(opts.get('pathnamer')) if opts.get('pathnamer') else None
self.actuators = _get_class(opts.get('actuators')) if opts.get('actuators') else None
if self.actuators:
act_names = [a['name'] for a in self.actuators]
for (dev, obj, path) in self._iter_points():
unit = str(obj['unit']).strip()
if unit.isdigit():
unit = str(bacnet.type_str(int(unit)))
self.add_timeseries(path, unit, data_type='double')
# Add actuators
if self.actuators and obj['name'] in act_names:
actuator = find(lambda a: a['name'] == obj['name'], self.actuators)
setup = {'obj': obj, 'dev': dev}
if obj['props']['type'] in [bacnet.OBJECT_ANALOG_INPUT,
bacnet.OBJECT_ANALOG_OUTPUT,
bacnet.OBJECT_ANALOG_VALUE]:
setup['range'] = actuator['range']
setup['application_tag'] = bacnet.BACNET_APPLICATION_TAG_REAL
act = ContinuousActuator(**setup)
data_type = 'double'
elif obj['props']['type'] in [bacnet.OBJECT_BINARY_INPUT,
bacnet.OBJECT_BINARY_OUTPUT,
bacnet.OBJECT_BINARY_VALUE]:
setup['application_tag'] = bacnet.BACNET_APPLICATION_TAG_ENUMERATED
act = BinaryActuator(**setup)
data_type = 'long'
elif obj['props']['type'] in [bacnet.OBJECT_MULTI_STATE_INPUT,
bacnet.OBJECT_MULTI_STATE_OUTPUT,
bacnet.OBJECT_MULTI_STATE_VALUE]:
setup['application_tag'] = bacnet.BACNET_APPLICATION_TAG_ENUMERATED
setup['states'] = actuator['states']
act = DiscreteActuator(**setup)
data_type = 'long'
if act:
print "adding actuator:", path, unit, act
self.add_actuator(path + "_act", unit, act, data_type=data_type, write_limit=5)
@staticmethod
def _matches(s, pats):
return len(filter(None, map(lambda p: p.match(s), pats))) > 0
def get_path(self, dev, obj):
if self.pathnamer:
path = str(self.pathnamer(dev['name'], obj['name']))
else:
path = str('/' + dev['name'] + '/' + obj['name'])
return (dev, obj, path)
def _iter_points(self):
for dev in self.db:
if self.ffilter:
for obj in dev['objs']:
if self.ffilter(dev['name'], obj['name']):
yield self.get_path(dev, obj)
else:
if not self._matches(dev['name'], self.devices): continue
for obj in dev['objs'][1:]:
if not self._matches(obj['name'], self.points): continue
yield self.get_path(dev, obj)
def start(self):
self.caller = periodicSequentialCall(self.update)
self.caller.start(self.rate)
@defer.inlineCallbacks
def update(self):
for (dev, obj, path) in self._iter_points():
try:
val = yield threads.deferToThread(bacnet.read_prop,
dev['props'],
obj['props']['type'],
obj['props']['instance'],
bacnet.PROP_PRESENT_VALUE,
-1)
except IOError:
pass
else:
self._add(path, float(val))
class BACnetActuator(actuate.SmapActuator):
def __init__(self, **opts):
self.dev = opts['dev']
self.obj = opts['obj']
self.priority = 16
self.application_tag = opts['application_tag']
def get_state(self, request):
return bacnet.read_prop(self.dev['props'],
self.obj['props']['type'],
self.obj['props']['instance'],
bacnet.PROP_PRESENT_VALUE,
-1)
def set_state(self, request, state):
if 'priority' in request.args:
self.priority = int(request.args['priority'][0])
if 'clear' in request.args:
self.clear()
else:
bacnet.write_prop(self.dev['props'],
self.obj['props']['type'],
self.obj['props']['instance'],
bacnet.PROP_PRESENT_VALUE,
self.application_tag,
str(state),
self.priority)
return self.get_state(None)
def clear(self):
return bacnet.write_prop(self.dev['props'],
self.obj['props']['type'],
self.obj['props']['instance'],
bacnet.PROP_PRESENT_VALUE,
bacnet.BACNET_APPLICATION_TAG_NULL,
"",
self.priority)
class ContinuousActuator(BACnetActuator, actuate.ContinuousActuator):
def __init__(self, **opts):
actuate.ContinuousActuator.__init__(self, opts['range'])
BACnetActuator.__init__(self, **opts)
class BinaryActuator(BACnetActuator, actuate.BinaryActuator):
def __init__(self, **opts):
actuate.BinaryActuator.__init__(self)
BACnetActuator.__init__(self, **opts)
class DiscreteActuator(BACnetActuator, actuate.NStateActuator):
def __init__(self, **opts):
actuate.NStateActuator.__init__(self, opts['states'])
BACnetActuator.__init__(self, **opts)
| StarcoderdataPython |
9752742 | # Write a program that reads a string and returns a table of the letters of the alphabet in alphabetical order
# which occur in the string together with the number of times each letter occurs.
# Case should be ignored.
def lettercount(s):
tally = {}
s = s.lower()
for letter in s:
if not letter.isalpha():
continue
tally[letter] = tally.get(letter, 0) + 1
letterlist = list(tally.items())
letterlist.sort()
for pair in letterlist:
print("".join(str(pair[0])), "".join(str(pair[1]))) # so it doesn't return a list of tuples, ie format cleanup
# lettercount("The Quick Brown Fox Jumps Over The Lazy Dog") #test case
| StarcoderdataPython |
5151944 | <gh_stars>0
from copy import deepcopy
from typing import List, Union, Optional, Tuple, Dict
from fedot.core.dag.graph import Graph
from fedot.core.dag.graph_operator import GraphOperator
from fedot.core.pipelines.node import Node, PrimaryNode, SecondaryNode
from fedot.core.pipelines.pipeline import Pipeline
class PipelineBuilder:
""" Builder for incremental construction of directed acyclic Pipelines.
Semantics:
- Forward-only & addition-only (can't prepend or delete nodes).
- Doesn't throw, doesn't fail: methods always have a way to interpret input given current graph state.
- Is not responsible for the validity of resulting Pipeline (e.g. correct order, valid operations).
- Builds always new pipelines (on copies of nodes), preserves its state between builds. State doesn't leak outside.
"""
OperationType = Union[str, Tuple[str, dict]]
def __init__(self, *initial_nodes: Optional[Node]):
""" Create builder with prebuilt nodes as origins of the branches. """
self.heads: List[Node] = list(filter(None, initial_nodes))
@property
def _iend(self) -> int:
return len(self.heads)
def add_node(self, operation_type: Optional[str], branch_idx: int = 0, params: Optional[Dict] = None):
""" Add single node to pipeline branch of specified index.
If there are no heads => adds single PrimaryNode.
If there is single head => adds single SecondaryNode using head as input.
If there are several heads => adds single SecondaryNode using as input the head indexed by head_idx.
If input is None => do nothing.
If branch_idx is out of bounds => appends new PrimaryNode.
:param operation_type: new operation, possibly None
:param branch_idx: index of the head to use as input for the new node
:param params: parameters dictionary for the specific operation
:return: self
"""
if not operation_type:
return self
params = self._pack_params(operation_type, params)
if branch_idx < len(self.heads):
input_node = self.heads[branch_idx]
self.heads[branch_idx] = SecondaryNode(operation_type, nodes_from=[input_node], content=params)
else:
self.heads.append(PrimaryNode(operation_type, content=params))
return self
def add_sequence(self, *operation_type: OperationType, branch_idx: int = 0):
""" Same as .node() but for many operations at once.
:param operation_type: operations for new nodes, either as an operation name
or as a tuple of operation name and operation parameters.
"""
for operation in operation_type:
operation, params = self._unpack_operation(operation)
self.add_node(operation, branch_idx, params)
return self
def grow_branches(self, *operation_type: Optional[OperationType]):
""" Add single node to each branch.
Argument position means index of the branch to grow.
None operation means don't grow that branch.
If there are no nodes => creates new branches.
If number of input nodes is bigger than number of branches => extra operations create new branches.
:param operation_type: operations for new nodes, either as an operation name
or as a tuple of operation name and operation parameters.
:return: self
"""
for i, operation in enumerate(operation_type):
operation, params = self._unpack_operation(operation)
self.add_node(operation, i, params)
return self
def add_branch(self, *operation_type: Optional[OperationType], branch_idx: int = 0):
""" Create branches at the tip of branch with branch_idx.
None operations are filtered out.
Number of new branches equals to number of provided operations.
If there are no heads => will add several primary nodes.
If there is single head => add several SecondaryNodes using head as the previous.
If there are several heads => branch head indexed by head_idx.
If branch_idx is out of bounds => adds PrimaryNodes as new heads at the end.
If no not-None operations are provided, nothing is changed.
:param operation_type: operations for new nodes, either as an operation name
or as a tuple of operation name and operation parameters.
:param branch_idx: index of the branch for branching its tip
:return: self
"""
operations = list(filter(None, operation_type))
if not operations:
return self
if branch_idx < len(self.heads):
input_node = self.heads.pop(branch_idx)
for i, operation in enumerate(operations):
operation, params = self._unpack_operation(operation)
self.heads.insert(branch_idx + i, SecondaryNode(operation,
nodes_from=[input_node],
content=self._pack_params(operation, params)))
else:
for operation in operations:
operation, params = self._unpack_operation(operation)
self.add_node(operation, self._iend, params)
return self
def join_branches(self, operation_type: Optional[str], params: Optional[Dict] = None):
""" Joins all current branches with provided operation as ensemble node.
If there are no branches => does nothing.
If there is single branch => adds single SecondaryNode using it as input.
If there are several branches => adds single SecondaryNode using all heads as inputs.
:param operation_type: operation to use for joined node
:param params: parameters dictionary for the specific operation
:return: self
"""
if self.heads and operation_type:
content = self._pack_params(operation_type, params)
new_head = SecondaryNode(operation_type, nodes_from=self.heads, content=content)
self.heads = [new_head]
return self
def reset(self):
""" Reset builder state. """
self.heads = []
def merge_with(self, following_builder) -> Optional['PipelineBuilder']:
return merge_pipeline_builders(self, following_builder)
def to_nodes(self) -> List[Node]:
"""
Return list of final nodes and reset internal state.
:return: list of final nodes, possibly empty.
"""
return deepcopy(self.heads)
def to_pipeline(self) -> Optional[Pipeline]:
"""
Builds new Pipeline from current tips of branches. Preserves builder state.
:return: Pipeline if there exist nodes, None if there're no nodes.
"""
return Pipeline(self.to_nodes()) if self.heads else None
@staticmethod
def _unpack_operation(operation: Optional[OperationType]) -> Tuple[Optional[str], Optional[Dict]]:
if isinstance(operation, str) or operation is None:
return operation, None
else:
return operation
@staticmethod
def _pack_params(name: str, params: Optional[dict]) -> Optional[dict]:
return {'name': name, 'params': params} if params else None
def merge_pipeline_builders(previous: PipelineBuilder, following: PipelineBuilder) -> Optional[PipelineBuilder]:
""" Merge two pipeline builders.
Merging is defined for cases one-to-many and many-to-one nodes,
i.e. one final node to many initial nodes and many final nodes to one initial node.
Merging is undefined for the case of many-to-many nodes and None is returned.
Merging of the builder with itself is well-defined and leads to duplication of the pipeline.
If one of the builders is empty -- the other one is returned, no merging is performed.
State of the passed builders is preserved as they were, after merging new builder is returned.
:return: PipelineBuilder if merging is well-defined, None otherwise.
"""
if not following.heads:
return previous
elif not previous.heads:
return following
lhs_nodes_final = previous.to_nodes()
rhs_tmp_graph = Graph(following.to_nodes())
rhs_nodes_initial = list(filter(lambda node: not node.nodes_from, rhs_tmp_graph.nodes))
# If merging one-to-one or one-to-many
if len(lhs_nodes_final) == 1:
final_node = lhs_nodes_final[0]
for initial_node in rhs_nodes_initial:
rhs_tmp_graph.update_node(initial_node,
SecondaryNode(initial_node.operation.operation_type, nodes_from=[final_node]))
# If merging many-to-one
elif len(rhs_nodes_initial) == 1:
initial_node = rhs_nodes_initial[0]
rhs_tmp_graph.update_node(initial_node,
SecondaryNode(initial_node.operation.operation_type, nodes_from=lhs_nodes_final))
# Merging is not defined for many-to-many case
else:
return None
# Check that Graph didn't mess up with node types
if not all(map(lambda n: isinstance(n, Node), rhs_tmp_graph.nodes)):
raise ValueError("Expected Graph only with nodes of type 'Node'")
# Need all root_nodes, hence GraphOperator (Pipeline.root_node returns just a single node or throws)
root_nodes = GraphOperator(rhs_tmp_graph).root_node()
merged_builder = PipelineBuilder(root_nodes) if isinstance(root_nodes, Node) else PipelineBuilder(*root_nodes)
return merged_builder
| StarcoderdataPython |
12819916 | <gh_stars>10-100
import json
import os
import time
import sys
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataset import prepare_dataset
from experiments.utils import construct_passport_kwargs_from_dict
from models.alexnet_passport_private import AlexNetPassportPrivate
from models.resnet_passport_private import ResNetPrivate
from models.layers.passportconv2d import PassportBlock
from models.layers.passportconv2d_private import PassportPrivateBlock
from models.losses.sign_loss import SignLoss
import shutil
class DatasetArgs():
pass
def train_maximize(origpassport, fakepassport, model, optimizer, criterion, trainloader, device, type):
model.train()
loss_meter = 0
signloss_meter = 0
maximizeloss_meter = 0
mseloss_meter = 0
csloss_meter = 0
acc_meter = 0
signacc_meter = 0
start_time = time.time()
mse_criterion = nn.MSELoss()
cs_criterion = nn.CosineSimilarity() #余弦相似性
for k, (d, t) in enumerate(trainloader):
d = d.to(device)
t = t.to(device)
optimizer.zero_grad()
# if scheme == 1:
# pred = model(d)
# else:
pred = model(d, ind=1) #private graph
loss = criterion(pred, t)
signloss = torch.tensor(0.).to(device)
signacc = torch.tensor(0.).to(device)
count = 0
for m in model.modules():
if isinstance(m, SignLoss):
signloss += m.loss
signacc += m.acc
count += 1
maximizeloss = torch.tensor(0.).to(device)
mseloss = torch.tensor(0.).to(device)
csloss = torch.tensor(0.).to(device)
for l, r in zip(origpassport, fakepassport):
mse = mse_criterion(l, r)
cs = cs_criterion(l.view(1, -1), r.view(1, -1)).mean()
csloss += cs
mseloss += mse
maximizeloss += 1 / mse
if 'fake2-' in type :
(loss).backward() #only cross-entropy loss backward fake2
elif 'fake3-' in type :
(loss + maximizeloss).backward() #csloss do not backward kafe3
else:
# print("FFFFFFFFFFFFFFFFFF")
(loss + maximizeloss + signloss).backward() #csloss backward #fake3_S
torch.nn.utils.clip_grad_norm_(fakepassport, 2) #梯度裁剪
optimizer.step()
acc = (pred.max(dim=1)[1] == t).float().mean()
loss_meter += loss.item()
acc_meter += acc.item()
signloss_meter += signloss.item()
signacc_meter += signacc.item() / count
maximizeloss_meter += maximizeloss.item()
mseloss_meter += mseloss.item()
csloss_meter += csloss.item()
print(f'Batch [{k + 1}/{len(trainloader)}]: '
f'Loss: {loss_meter / (k + 1):.4f} '
f'Acc: {acc_meter / (k + 1):.4f} '
f'Sign Loss: {signloss_meter / (k + 1):.4f} '
f'Sign Acc: {signacc_meter / (k + 1):.4f} '
f'MSE Loss: {mseloss_meter / (k + 1):.4f} '
f'Maximize Dist: {maximizeloss_meter / (k + 1):.4f} '
f'CS: {csloss_meter / (k + 1):.4f} ({time.time() - start_time:.2f}s)',
end='\r')
print()
loss_meter /= len(trainloader)
acc_meter /= len(trainloader)
signloss_meter /= len(trainloader)
signacc_meter /= len(trainloader)
maximizeloss_meter /= len(trainloader)
mseloss_meter /= len(trainloader)
csloss_meter /= len(trainloader)
return {'loss': loss_meter,
'signloss': signloss_meter,
'acc': acc_meter,
'signacc': signacc_meter,
'maximizeloss': maximizeloss_meter,
'mseloss': mseloss_meter,
'csloss': csloss_meter,
'time': start_time - time.time()}
def test_fake(model, criterion, valloader, device):
model.eval()
loss_meter = 0
signloss_meter = 0
acc_meter = 0
signacc_meter = 0
start_time = time.time()
with torch.no_grad():
for k, (d, t) in enumerate(valloader):
d = d.to(device)
t = t.to(device)
# if scheme == 1:
# pred = model(d)
# else:
pred = model(d, ind=1)
loss = criterion(pred, t)
signloss = torch.tensor(0.).to(device)
signacc = torch.tensor(0.).to(device)
count = 0
for m in model.modules():
if isinstance(m, SignLoss):
signloss += m.get_loss()
signacc += m.get_acc()
count += 1
acc = (pred.max(dim=1)[1] == t).float().mean()
loss_meter += loss.item()
acc_meter += acc.item()
signloss_meter += signloss.item()
try:
signacc_meter += signacc.item() / count
except:
pass
print(f'Batch [{k + 1}/{len(valloader)}]: '
f'Loss: {loss_meter / (k + 1):.4f} '
f'Acc: {acc_meter / (k + 1):.4f} '
f'Sign Loss: {signloss_meter / (k + 1):.4f} '
f'Sign Acc: {signacc_meter / (k + 1):.4f} ({time.time() - start_time:.2f}s)',
end='\r')
print()
loss_meter /= len(valloader)
acc_meter /= len(valloader)
signloss_meter /= len(valloader)
signacc_meter /= len(valloader)
return {'loss': loss_meter,
'signloss': signloss_meter,
'acc': acc_meter,
'signacc': signacc_meter,
'time': time.time() - start_time}
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='fake attack 3: create another passport maximized from current passport')
# parser.add_argument('--rep', default=1, type=int, help='training id')
parser.add_argument('--rep', default=1, type=str, help='training comment')
parser.add_argument('--arch', default='resnet18', choices=['alexnet', 'resnet18'])
parser.add_argument('--dataset', default='cifar100', choices=['cifar10', 'cifar100'])
parser.add_argument('--flipperc', default=0, type=float,
help='flip percentange 0~1')
parser.add_argument('--scheme', default=1, choices=[1, 2, 3], type=int)
parser.add_argument('--loadpath', default='', help='path to model to be attacked')
parser.add_argument('--passport-config', default='', help='path to passport config')
args = parser.parse_args()
args.scheme = 3
args.loadpath = "/data-x/g12/zhangjie/DeepIPR/baseline/resnet18_cifar100_v3_all/"
# print(args.loadpath.split('/')[-2])
# sys.exit(0)
print(args.loadpath)
| StarcoderdataPython |
5010111 | # -*- coding: utf-8 -*-
# Импорт библиотек
import logging
import os.path
import sys
import time
import click
import pandas as pd
from datetime import datetime, timedelta
from terminaltables import AsciiTable
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from cointrader import db, STRATEGIES
from cointrader.config import Config, get_path_to_config
from cointrader.exchange import Poloniex, Market
from cointrader.bot import init_db, get_bot, create_bot, Active, Bots_list
# Создание лога
logging.basicConfig(format=u'%(levelname)-8s [%(asctime)s] %(message)s', level=logging.DEBUG,
filename=u'cointrader.log')
log = logging.getLogger(__name__)
class Context(object):
"""Docstring for Context. """
def __init__(self):
self.exchange = None
@property
def nonce(self):
"""
Returns a nonce
Used in authentication
"""
return int((time.time() + 0.5) * 1000 * 1050)
# Создание пустого декоратора
pass_context = click.make_pass_decorator(Context, ensure=True)
# Создание группы команд
@click.group()
@pass_context
def main(ctx):
"""Console script for cointrader on the Poloniex exchange
:param ctx:
"""
init_db()
config = Config(open(get_path_to_config(), "r"))
ctx.exchange = Poloniex(config, ctx.nonce)
# Добавляем команды
@click.command()
@click.argument("market")
@click.option("--resolution", help="Resolution of the chart which is used for trend analysis", default="30m")
@click.option("--automatic", help="Start cointrader in automatic mode.", is_flag=True)
@click.option("--strategy", help="Stratgegy used for trading.", default="trend", type=click.Choice(STRATEGIES.keys()))
@click.option("--verbose", help="Вывод на экран логируемых сообщений.", is_flag=False)
@click.option("--percent", help="Процент торговли от всей суммы.", is_flag=False)
@click.option("--best", help="", is_flag=False)
@click.option("--searchpoint", help="", is_flag=False)
@click.option("--btc", help="trading value of BTC", default=0.0, type=float)
@click.option("--update_profit", help="Start circle updates profit list for bots", is_flag=False)
# @click.option("--best_pass_nth", help="", default="0")
@pass_context
def start(ctx, market, resolution, automatic, strategy, verbose, percent, best, searchpoint, btc, update_profit):
"""Start a new bot on the given market and the given amount of BTC"""
# Build the market on which the bot will operate
# First check if the given market is a valid market. If not exit
# here with a error message.
# If the market is valid create a real market instance of and
# instance for backtests depending on the user input.
market = set_market(ctx, market, backtrade=True)
# Check if the given resolution is supported
if not ctx.exchange.is_valid_resolution(resolution):
valid_resolutions = ", ".join(ctx.exchange.resolutions.keys())
click.echo("Resolution {} is not supported.\n"
"Please choose one of the following: {}".format(resolution,
valid_resolutions))
sys.exit(1)
# Initialise a strategy.
strategy = STRATEGIES[strategy]()
start, end = set_start_end()
best_pair, best_testing_market = find_best_pair(automatic, ctx, end, market, percent, resolution, start, strategy,
verbose, searchpoint, btc, update_profit)
trade_to_minus = False
# if int(best_pass_nth) == 0:
if not best_testing_market:
trade_to_minus = True
elif best_pair is not None:
if best and not is_active(best_pair["market"]):
print("\nВыбрана пара: %s, заработок: %f" % (best_pair["market"]._name, best_pair["profit"]))
best_pair["market"]._backtrade = False
bot = get_bot(best_pair["market"], strategy, resolution, start, end, verbose, percent, automatic,
memory_only=False, btc=btc)
# trade_to_minus = bot.start(backtest=False, automatic=automatic)
trade_to_minus = bot.start(backtest=False, automatic=True)
delete_bot(bot)
elif best_testing_market[-1]["profit"] > 1 and not best and not is_active(best_testing_market[-1]["market"]):
best_testing_market[-1]["market"]._backtrade = False
bot = get_bot(best_testing_market[-1]["market"], strategy, resolution, start, end, verbose, percent, automatic,
memory_only=False, btc=btc)
trade_to_minus = bot.start(backtest=False, automatic=automatic)
delete_bot(bot)
if trade_to_minus:
print("На данной паре заработок отсутсвует.")
if best:
to_do = True
elif trade_to_minus:
to_do = True
else:
to_do = False
if to_do:
while to_do:
if trade_to_minus:
best_pair, best_testing_market = find_best_pair(automatic, ctx, end, market, percent, resolution, start,
strategy,
verbose, searchpoint, btc=btc)
for item in best_testing_market:
item["market"]._backtrade = False
if not is_active(item["market"]):
print("\nВыбрана пара: %s, заработок: %f" % (item["market"]._name, item["profit"]))
bot = get_bot(item["market"], strategy, resolution, start, end, verbose, percent, automatic,
memory_only=False, btc=btc)
to_do = bot.start(backtest=False, automatic=automatic)
delete_bot(bot)
trade_to_minus = True
def delete_bot(bot):
try:
for active in bot.activity:
db.delete(active)
for current_trade in bot.trades:
db.delete(current_trade)
db.delete(bot)
db.commit()
del bot
finally:
pass
def is_active(market):
activities = db.query(Active).filter(Active.currency == market._name).first()
if activities is None:
return False
else:
return True
def find_best_pair(automatic, ctx, end, market, percent, resolution, start, strategy, verbose, searchpoint, btc,
update_profit=False):
to_do = True
while to_do:
test_markets = []
bot = get_bot(market, strategy, resolution, start, end, verbose, percent, automatic, memory_only=False, btc=btc)
df = pd.DataFrame.from_dict(bot._market._exchange.markets, orient='index')
if len(df):
while not test_markets:
if update_profit:
mask = (df['volume'] > 50)
else:
mask = (df['volume'] > 50) & (df['change'] > -15) & (df['change'] < 3)
test_markets = print_top_trends(ctx, df, market, backtrade=False, searchpoint=searchpoint, mask=mask)
if not test_markets:
time.sleep(1)
if update_profit:
print("Ищу пары по условию: (df['volume'] > 50) & (df['change'] > -15) & (df['change'] < 15)")
else:
print("Ищу пары по условию: (df['volume'] > 50) & (df['change'] > -15) & (df['change'] < 3)")
delete_bot(bot)
best_testing_market = []
test_markets.append(set_market(ctx, market._name, backtrade=True))
index = 0
for current_market in test_markets:
if index > 7 and not update_profit:
break
bot = create_bot(current_market, strategy, resolution, start, end, verbose, percent, automatic=True,
btc=btc)
for trade in bot.trades:
try:
if trade != bot.trades[0]:
db.delete(trade)
except:
pass
if bot.spread > 0.5:
print(
"Валюта {} имеет порог покупки {:.2f}%, будет пропущена.".format(bot._market.currency, bot.spread))
continue
bot.start(backtest=True, automatic=True)
delete_bot(bot)
if bot.profit > 1 and bot.trend != 'Рынок ВВЕРХ':
best_testing_market.append({"market": bot._market, "profit": bot.profit})
if not is_active(bot._market) and not update_profit:
break
index += 1
from operator import itemgetter
best_testing_market = sorted(best_testing_market, key=itemgetter('profit'), reverse=True)
best_pair = best_markets_print(best_testing_market)
if update_profit and best_testing_market:
bots_list = Bots_list('list =' + str(best_testing_market))
exec(bots_list)
db.add(bots_list)
db.commit
else:
to_do = False
return best_pair, best_testing_market
def best_markets_print(best_testing_market):
best_pair = None
out = [["ПАРА", "ЗАРАБОТОК"]]
for item in best_testing_market:
if item["profit"] > 0:
if best_pair == None:
best_pair = item
elif item["profit"] > best_pair["profit"]:
best_pair = item
values = []
values.append(item["market"]._name)
values.append(round(item["profit"], 2))
out.append(values)
table = AsciiTable(out).table
print("\n".join(["\nПрибыльные пары:", table]))
return best_pair
def print_top_trends(ctx, df, market, backtrade, searchpoint, mask):
df['change'] = df.change.astype(float)
df['volume'] = df.volume.astype(float)
df_filtered = df[mask]
df_filtered = df_filtered.sort_values(by=['change'], ascending=True)
test_markets = []
out = [["ПАРА", "ОБЪЕМ", "ИЗМЕНЕНИЯ"]]
for current_market, row in df_filtered.iterrows():
volume, change = (row['volume'], row['change'])
values = []
values.append(current_market)
values.append(volume)
values.append(str(change) + "%")
out.append(values)
if current_market != market._name:
test_markets.append(set_market(ctx, current_market, backtrade=True))
table = AsciiTable(out).table
print("\n".join(["\nРастущий тренд:", table]))
return test_markets
def set_market(ctx, market, backtrade):
if ctx.exchange.is_valid_market(market):
market = Market(ctx.exchange, market, backTrade=backtrade)
else:
click.echo("Market {} is not available".format(market))
sys.exit(1)
return market
def set_start_end():
try:
now_time = datetime.now()
delta = timedelta(days=1.5)
date_N_days_ago = now_time - delta
start = date_N_days_ago
end = now_time
except ValueError:
click.echo("Days is not valid. Must be in 1 2 34 ...'")
sys.exit(1)
return start, end
main.add_command(start)
# Запуск сценария
if __name__ == "__main__":
main()
| StarcoderdataPython |
110388 | <gh_stars>100-1000
"""An implementation of the Python Database API Specification v2.0
using Teradata ODBC."""
# The MIT License (MIT)
#
# Copyright (c) 2015 by Teradata
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import atexit
import collections
import ctypes
import platform
import re
import sys
import threading
from . import util, datatypes
from .api import * # @UnusedWildImport # noqa
logger = logging.getLogger(__name__)
# ODBC Constants
SQL_ATTR_ODBC_VERSION, SQL_OV_ODBC2, SQL_OV_ODBC3 = 200, 2, 3
SQL_ATTR_QUERY_TIMEOUT, SQL_ATTR_AUTOCOMMIT = 0, 102
SQL_NULL_HANDLE, SQL_HANDLE_ENV, SQL_HANDLE_DBC, SQL_HANDLE_STMT = 0, 1, 2, 3
SQL_SUCCESS, SQL_SUCCESS_WITH_INFO = 0, 1,
SQL_ERROR, SQL_INVALID_HANDLE = -1, -2
SQL_NEED_DATA, SQL_NO_DATA = 99, 100
SQL_CLOSE, SQL_UNBIND, SQL_RESET_PARAMS = 0, 2, 3
SQL_PARAM_TYPE_UNKNOWN = 0
SQL_PARAM_INPUT, SQL_PARAM_INPUT_OUTPUT, SQL_PARAM_OUTPUT = 1, 2, 4
SQL_ATTR_PARAM_BIND_TYPE = 18
SQL_ATTR_ROWS_FETCHED_PTR, SQL_ATTR_ROW_STATUS_PTR = 26, 25
SQL_ATTR_ROW_ARRAY_SIZE = 27
SQL_ATTR_PARAMS_PROCESSED_PTR, SQL_ATTR_PARAM_STATUS_PTR = 21, 20
SQL_ATTR_PARAMSET_SIZE = 22
SQL_PARAM_BIND_BY_COLUMN = 0
SQL_NULL_DATA, SQL_NTS = -1, -3
SQL_IS_POINTER, SQL_IS_UINTEGER, SQL_IS_INTEGER = -4, -5, -6
SQL_FETCH_NEXT, SQL_FETCH_FIRST, SQL_FETCH_LAST = 1, 2, 4
SQL_SIGNED_OFFSET = -20
SQL_C_BINARY, SQL_BINARY, SQL_VARBINARY, SQL_LONGVARBINARY = -2, -2, -3, -4
SQL_C_WCHAR, SQL_WCHAR, SQL_WVARCHAR, SQL_WLONGVARCHAR = -8, -8, -9, -10
SQL_C_SBIGINT = -5 + SQL_SIGNED_OFFSET
SQL_FLOAT = 6
SQL_C_FLOAT = SQL_REAL = 7
SQL_C_DOUBLE = SQL_DOUBLE = 8
SQL_DESC_TYPE_NAME = 14
SQL_COMMIT, SQL_ROLLBACK = 0, 1
SQL_STATE_DATA_TRUNCATED = '01004'
SQL_STATE_CONNECTION_NOT_OPEN = '08003'
SQL_STATE_INVALID_TRANSACTION_STATE = '25000'
SQLLEN = ctypes.c_ssize_t
SQLULEN = ctypes.c_size_t
SQLUSMALLINT = ctypes.c_ushort
SQLSMALLINT = ctypes.c_short
SQLINTEGER = ctypes.c_int
SQLFLOAT = ctypes.c_float
SQLDOUBLE = ctypes.c_double
SQLBYTE = ctypes.c_ubyte
SQLCHAR = ctypes.c_char
SQLWCHAR = ctypes.c_wchar
SQLRETURN = SQLSMALLINT
SQLPOINTER = ctypes.c_void_p
SQLHANDLE = ctypes.c_void_p
ADDR = ctypes.byref
PTR = ctypes.POINTER
ERROR_BUFFER_SIZE = 2 ** 10
SMALL_BUFFER_SIZE = 2 ** 12
LARGE_BUFFER_SIZE = 2 ** 20
TRUE = 1
FALSE = 0
odbc = None
hEnv = None
drivers = None
lock = threading.Lock()
pyVer = sys.version_info[0]
osType = platform.system()
# The amount of seconds to wait when submitting non-user defined SQL (e.g.
# set query bands, etc).
QUERY_TIMEOUT = 120
if pyVer > 2:
unicode = str # @ReservedAssignment
# Define OS specific methods for handling buffers and strings.
if osType == "Darwin" or osType == "Windows" or osType.startswith('CYGWIN'):
# Mac OSx and Windows
def _createBuffer(l):
return ctypes.create_unicode_buffer(l)
def _inputStr(s, l=None):
if s is None:
return None
return ctypes.create_unicode_buffer(
(s if util.isString(s) else str(s)), l)
def _outputStr(s):
return s.value
def _convertParam(s):
if s is None:
return None
return s if util.isString(s) else str(s)
else:
# Unix/Linux
# Multiply by 3 as one UTF-16 character can require 3 UTF-8 bytes.
def _createBuffer(l):
return ctypes.create_string_buffer(l * 3)
def _inputStr(s, l=None):
if s is None:
return None
return ctypes.create_string_buffer(
(s if util.isString(s) else str(s)).encode('utf8'), l)
def _outputStr(s):
return unicode(s.raw.partition(b'\00')[0], 'utf8')
def _convertParam(s):
if s is None:
return None
return (s if util.isString(s) else str(s)).encode('utf8')
SQLWCHAR = ctypes.c_char
connections = []
def cleanupConnections():
"""Cleanup open connections."""
if connections:
logger.warn(
"%s open connections found on exit, attempting to close...",
len(connections))
for conn in list(connections):
conn.close()
def getDiagnosticInfo(handle, handleType=SQL_HANDLE_STMT):
"""Gets diagnostic information associated with ODBC calls, particularly
when errors occur."""
info = []
infoNumber = 1
while True:
sqlState = _createBuffer(6)
nativeError = SQLINTEGER()
messageBuffer = _createBuffer(ERROR_BUFFER_SIZE)
messageLength = SQLSMALLINT()
rc = odbc.SQLGetDiagRecW(handleType, handle, infoNumber, sqlState,
ADDR(nativeError), messageBuffer,
len(messageBuffer), ADDR(messageLength))
if rc == SQL_SUCCESS_WITH_INFO and \
messageLength.value > ctypes.sizeof(messageBuffer):
# Resize buffer to fit entire message.
messageBuffer = _createBuffer(messageLength.value)
continue
if rc == SQL_SUCCESS or rc == SQL_SUCCESS_WITH_INFO:
info.append(
(_outputStr(sqlState), _outputStr(messageBuffer),
abs(nativeError.value)))
infoNumber += 1
elif rc == SQL_NO_DATA:
return info
elif rc == SQL_INVALID_HANDLE:
raise InterfaceError(
'SQL_INVALID_HANDLE',
"Invalid handle passed to SQLGetDiagRecW.")
elif rc == SQL_ERROR:
if infoNumber > 1:
return info
raise InterfaceError(
"SQL_ERROR", "SQL_ERROR returned from SQLGetDiagRecW.")
else:
raise InterfaceError(
"UNKNOWN_RETURN_CODE",
"SQLGetDiagRecW returned an unknown return code: %s", rc)
def checkStatus(rc, hEnv=SQL_NULL_HANDLE, hDbc=SQL_NULL_HANDLE,
hStmt=SQL_NULL_HANDLE, method="Method", ignore=None):
""" Check return status code and log any information or error messages.
If error is returned, raise exception."""
sqlState = []
logger.trace("%s returned status code %s", method, rc)
if rc not in (SQL_SUCCESS, SQL_NO_DATA):
if hStmt != SQL_NULL_HANDLE:
info = getDiagnosticInfo(hStmt, SQL_HANDLE_STMT)
elif hDbc != SQL_NULL_HANDLE:
info = getDiagnosticInfo(hDbc, SQL_HANDLE_DBC)
else:
info = getDiagnosticInfo(hEnv, SQL_HANDLE_ENV)
for i in info:
sqlState.append(i[0])
if rc == SQL_SUCCESS_WITH_INFO:
logger.debug(
u"{} succeeded with info: [{}] {}".format(method,
i[0], i[1]))
elif not ignore or i[0] not in ignore:
logger.debug((u"{} returned non-successful error code "
u"{}: [{}] {}").format(method, rc, i[0], i[1]))
msg = ", ".join(map(lambda m: m[1], info))
if re.search(r'[^0-9\s]', msg) is None or i[0] == 'I':
msg = msg + (". Check that the ODBC driver is installed "
"and the ODBCINI or ODBCINST environment "
"variables are correctly set.")
raise DatabaseError(i[2], u"[{}] {}".format(i[0], msg), i[0])
else:
logger.debug(
u"Ignoring return of {} from {}: [{}] {}".format(rc,
method,
i[0],
i[1]))
# Breaking here because this error is ignored and info could
# contain older error messages.
# E.g. if error was SQL_STATE_CONNECTION_NOT_OPEN, the next
# error would be the original connection error.
break
if not info:
logger.info(
"No information associated with return code %s from %s",
rc, method)
return sqlState
def prototype(func, *args):
"""Setup function prototype"""
func.restype = SQLRETURN
func.argtypes = args
def initFunctionPrototypes():
"""Initialize function prototypes for ODBC calls."""
prototype(odbc.SQLAllocHandle, SQLSMALLINT, SQLHANDLE, PTR(SQLHANDLE))
prototype(odbc.SQLGetDiagRecW, SQLSMALLINT, SQLHANDLE, SQLSMALLINT,
PTR(SQLWCHAR), PTR(SQLINTEGER), PTR(SQLWCHAR), SQLSMALLINT,
PTR(SQLSMALLINT))
prototype(odbc.SQLSetEnvAttr, SQLHANDLE,
SQLINTEGER, SQLPOINTER, SQLINTEGER)
prototype(odbc.SQLDriverConnectW, SQLHANDLE, SQLHANDLE,
PTR(SQLWCHAR), SQLSMALLINT, PTR(SQLWCHAR), SQLSMALLINT,
PTR(SQLSMALLINT), SQLUSMALLINT)
prototype(odbc.SQLFreeHandle, SQLSMALLINT, SQLHANDLE)
prototype(odbc.SQLExecDirectW, SQLHANDLE, PTR(SQLWCHAR), SQLINTEGER)
prototype(odbc.SQLNumResultCols, SQLHANDLE, PTR(SQLSMALLINT))
prototype(odbc.SQLDescribeColW, SQLHANDLE, SQLUSMALLINT, PTR(SQLWCHAR),
SQLSMALLINT, PTR(SQLSMALLINT), PTR(SQLSMALLINT), PTR(SQLULEN),
PTR(SQLSMALLINT), PTR(SQLSMALLINT))
prototype(odbc.SQLColAttributeW, SQLHANDLE, SQLUSMALLINT,
SQLUSMALLINT, SQLPOINTER, SQLSMALLINT, PTR(SQLSMALLINT),
PTR(SQLLEN))
prototype(odbc.SQLFetch, SQLHANDLE)
prototype(odbc.SQLGetData, SQLHANDLE, SQLUSMALLINT,
SQLSMALLINT, SQLPOINTER, SQLLEN, PTR(SQLLEN))
prototype(odbc.SQLFreeStmt, SQLHANDLE, SQLUSMALLINT)
prototype(odbc.SQLPrepareW, SQLHANDLE, PTR(SQLWCHAR), SQLINTEGER)
prototype(odbc.SQLNumParams, SQLHANDLE, PTR(SQLSMALLINT))
prototype(odbc.SQLDescribeParam, SQLHANDLE, SQLUSMALLINT, PTR(
SQLSMALLINT), PTR(SQLULEN), PTR(SQLSMALLINT), PTR(SQLSMALLINT))
prototype(odbc.SQLBindParameter, SQLHANDLE, SQLUSMALLINT, SQLSMALLINT,
SQLSMALLINT, SQLSMALLINT, SQLULEN, SQLSMALLINT, SQLPOINTER,
SQLLEN, PTR(SQLLEN))
prototype(odbc.SQLExecute, SQLHANDLE)
prototype(odbc.SQLSetStmtAttr, SQLHANDLE,
SQLINTEGER, SQLPOINTER, SQLINTEGER)
prototype(odbc.SQLMoreResults, SQLHANDLE)
prototype(odbc.SQLDisconnect, SQLHANDLE)
prototype(odbc.SQLSetConnectAttr, SQLHANDLE,
SQLINTEGER, SQLPOINTER, SQLINTEGER)
prototype(odbc.SQLEndTran, SQLSMALLINT, SQLHANDLE, SQLSMALLINT)
prototype(odbc.SQLRowCount, SQLHANDLE, PTR(SQLLEN))
prototype(odbc.SQLBindCol, SQLHANDLE, SQLUSMALLINT, SQLSMALLINT,
SQLPOINTER, SQLLEN, PTR(SQLLEN))
prototype(odbc.SQLDrivers, SQLHANDLE, SQLUSMALLINT, PTR(SQLCHAR),
SQLSMALLINT, PTR(SQLSMALLINT), PTR(SQLCHAR), SQLSMALLINT,
PTR(SQLSMALLINT))
def initOdbcLibrary(odbcLibPath=None):
"""Initialize the ODBC Library."""
global odbc
if odbc is None:
if osType == "Windows":
odbc = ctypes.windll.odbc32
else:
if not odbcLibPath:
# If MAC OSx
if osType == "Darwin":
odbcLibPath = "libiodbc.dylib"
elif osType.startswith("CYGWIN"):
odbcLibPath = "odbc32.dll"
else:
odbcLibPath = 'libodbc.so'
logger.info("Loading ODBC Library: %s", odbcLibPath)
odbc = ctypes.cdll.LoadLibrary(odbcLibPath)
def initDriverList():
global drivers
if drivers is None:
drivers = []
description = ctypes.create_string_buffer(SMALL_BUFFER_SIZE)
descriptionLength = SQLSMALLINT()
attributesLength = SQLSMALLINT()
rc = SQL_SUCCESS
direction = SQL_FETCH_FIRST
while True:
rc = odbc.SQLDrivers(hEnv, direction, description,
len(description), ADDR(descriptionLength),
None, 0, attributesLength)
checkStatus(rc, hEnv=hEnv)
if rc == SQL_NO_DATA:
break
drivers.append(description.value.decode("utf-8"))
direction = SQL_FETCH_NEXT
logger.info("Available drivers: {}".format(", ".join(drivers)))
def initOdbcEnv():
"""Initialize ODBC environment handle."""
global hEnv
if hEnv is None:
hEnv = SQLPOINTER()
rc = odbc.SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, ADDR(hEnv))
checkStatus(rc, hEnv=hEnv)
atexit.register(cleanupOdbcEnv)
atexit.register(cleanupConnections)
# Set the ODBC environment's compatibility level to ODBC 3.0
rc = odbc.SQLSetEnvAttr(hEnv, SQL_ATTR_ODBC_VERSION, SQL_OV_ODBC3, 0)
checkStatus(rc, hEnv=hEnv)
def cleanupOdbcEnv():
"""Cleanup ODBC environment handle."""
if hEnv:
odbc.SQLFreeHandle(SQL_HANDLE_ENV, hEnv)
def init(odbcLibPath=None):
try:
lock.acquire()
initOdbcLibrary(odbcLibPath)
initFunctionPrototypes()
initOdbcEnv()
initDriverList()
finally:
lock.release()
def determineDriver(dbType, driver):
retval = driver
if driver is not None:
if driver not in drivers:
raise InterfaceError(
"DRIVER_NOT_FOUND",
"No driver found with name '{}'. "
" Available drivers: {}".format(driver, ",".join(drivers)))
else:
matches = []
for driver in drivers:
if dbType in driver:
matches.append(driver)
if not matches:
raise InterfaceError(
"DRIVER_NOT_FOUND",
"No driver found for '{}'. "
"Available drivers: {}".format(dbType, ",".join(drivers)))
else:
retval = matches[len(matches) - 1]
if len(matches) > 1:
logger.warning(
"More than one driver found "
"for '{}'. Using '{}'."
" Specify the 'driver' option to "
"select a specific driver.".format(dbType, retval))
return retval
class OdbcConnection:
"""Represents a Connection to Teradata using ODBC."""
def __init__(self, dbType="Teradata", system=None,
username=None, password=<PASSWORD>, autoCommit=False,
transactionMode=None, queryBands=None, odbcLibPath=None,
dataTypeConverter=datatypes.DefaultDataTypeConverter(),
driver=None, **kwargs):
"""Creates an ODBC connection."""
self.hDbc = SQLPOINTER()
self.cursorCount = 0
self.sessionno = 0
self.cursors = []
self.dbType = dbType
self.converter = dataTypeConverter
# Initialize connection handle
init(odbcLibPath)
# Build connect string
extraParams = set(k.lower() for k in kwargs)
connectParams = collections.OrderedDict()
if "dsn" not in extraParams:
connectParams["DRIVER"] = determineDriver(dbType, driver)
if system:
connectParams["DBCNAME"] = system
if username:
connectParams["UID"] = username
if password:
connectParams["PWD"] = password
if transactionMode:
connectParams["SESSIONMODE"] = "Teradata" \
if transactionMode == "TERA" else transactionMode
connectParams.update(kwargs)
connectString = u";".join(u"{}={}".format(key, value)
for key, value in connectParams.items())
rc = odbc.SQLAllocHandle(SQL_HANDLE_DBC, hEnv, ADDR(self.hDbc))
checkStatus(rc, hEnv=hEnv, method="SQLAllocHandle")
# Create connection
logger.debug("Creating connection using ODBC ConnectString: %s",
re.sub("PWD=.*?(;|$)", "PWD=XXX;", connectString))
try:
lock.acquire()
rc = odbc.SQLDriverConnectW(self.hDbc, 0, _inputStr(connectString),
SQL_NTS, None, 0, None, 0)
finally:
lock.release()
try:
checkStatus(rc, hDbc=self.hDbc, method="SQLDriverConnectW")
except:
rc = odbc.SQLFreeHandle(SQL_HANDLE_DBC, self.hDbc)
self.hDbc = None
raise
connections.append(self)
# Setup autocommit, query bands, etc.
try:
logger.debug("Setting AUTOCOMMIT to %s",
"True" if util.booleanValue(autoCommit) else "False")
rc = odbc.SQLSetConnectAttr(
self.hDbc, SQL_ATTR_AUTOCOMMIT,
TRUE if util.booleanValue(autoCommit) else FALSE, 0)
checkStatus(
rc, hDbc=self.hDbc,
method="SQLSetConnectAttr - SQL_ATTR_AUTOCOMMIT")
if dbType == "Teradata":
with self.cursor() as c:
self.sessionno = c.execute(
"SELECT SESSION",
queryTimeout=QUERY_TIMEOUT).fetchone()[0]
logger.debug("SELECT SESSION returned %s", self.sessionno)
if queryBands:
c.execute(u"SET QUERY_BAND = '{};' FOR SESSION".format(
u";".join(u"{}={}".format(util.toUnicode(k),
util.toUnicode(v))
for k, v in queryBands.items())),
queryTimeout=QUERY_TIMEOUT)
self.commit()
logger.debug("Created session %s.", self.sessionno)
except Exception:
self.close()
raise
def close(self):
"""CLoses an ODBC Connection."""
if self.hDbc:
if self.sessionno:
logger.debug("Closing session %s...", self.sessionno)
for cursor in list(self.cursors):
cursor.close()
rc = odbc.SQLDisconnect(self.hDbc)
sqlState = checkStatus(
rc, hDbc=self.hDbc, method="SQLDisconnect",
ignore=[SQL_STATE_CONNECTION_NOT_OPEN,
SQL_STATE_INVALID_TRANSACTION_STATE])
if SQL_STATE_INVALID_TRANSACTION_STATE in sqlState:
logger.warning("Rolling back open transaction for session %s "
"so it can be closed.", self.sessionno)
rc = odbc.SQLEndTran(SQL_HANDLE_DBC, self.hDbc, SQL_ROLLBACK)
checkStatus(
rc, hDbc=self.hDbc,
method="SQLEndTran - SQL_ROLLBACK - Disconnect")
rc = odbc.SQLDisconnect(self.hDbc)
checkStatus(rc, hDbc=self.hDbc, method="SQLDisconnect")
rc = odbc.SQLFreeHandle(SQL_HANDLE_DBC, self.hDbc)
if rc != SQL_INVALID_HANDLE:
checkStatus(rc, hDbc=self.hDbc, method="SQLFreeHandle")
connections.remove(self)
self.hDbc = None
if self.sessionno:
logger.debug("Session %s closed.", self.sessionno)
def commit(self):
"""Commits a transaction."""
logger.debug("Committing transaction...")
rc = odbc.SQLEndTran(SQL_HANDLE_DBC, self.hDbc, SQL_COMMIT)
checkStatus(rc, hDbc=self.hDbc, method="SQLEndTran - SQL_COMMIT")
def rollback(self):
"""Rollsback a transaction."""
logger.debug("Rolling back transaction...")
rc = odbc.SQLEndTran(SQL_HANDLE_DBC, self.hDbc, SQL_ROLLBACK)
checkStatus(rc, hDbc=self.hDbc, method="SQLEndTran - SQL_ROLLBACK")
def cursor(self):
"""Returns a cursor."""
cursor = OdbcCursor(
self, self.dbType, self.converter, self.cursorCount)
self.cursorCount += 1
return cursor
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, t, value, traceback):
self.close()
def __repr__(self):
return "OdbcConnection(sessionno={})".format(self.sessionno)
connect = OdbcConnection
class OdbcCursor (util.Cursor):
"""Represents an ODBC Cursor."""
def __init__(self, connection, dbType, converter, num):
util.Cursor.__init__(self, connection, dbType, converter)
self.num = num
self.moreResults = None
if num > 0:
logger.debug(
"Creating cursor %s for session %s.", self.num,
self.connection.sessionno)
self.hStmt = SQLPOINTER()
rc = odbc.SQLAllocHandle(
SQL_HANDLE_STMT, connection.hDbc, ADDR(self.hStmt))
checkStatus(rc, hStmt=self.hStmt)
connection.cursors.append(self)
def callproc(self, procname, params, queryTimeout=0):
self._checkClosed()
query = "CALL {} (".format(procname)
for i in range(0, len(params)):
if i > 0:
query += ", "
query += "?"
query += ")"
logger.debug("Executing Procedure: %s", query)
self.execute(query, params, queryTimeout=queryTimeout)
return util.OutParams(params, self.dbType, self.converter)
def close(self):
if self.hStmt:
if self.num > 0:
logger.debug(
"Closing cursor %s for session %s.", self.num,
self.connection.sessionno)
rc = odbc.SQLFreeHandle(SQL_HANDLE_STMT, self.hStmt)
checkStatus(rc, hStmt=self.hStmt)
self.connection.cursors.remove(self)
self.hStmt = None
def _setQueryTimeout(self, queryTimeout):
rc = odbc.SQLSetStmtAttr(
self.hStmt, SQL_ATTR_QUERY_TIMEOUT, SQLPOINTER(queryTimeout),
SQL_IS_UINTEGER)
checkStatus(
rc, hStmt=self.hStmt,
method="SQLSetStmtStmtAttr - SQL_ATTR_QUERY_TIMEOUT")
def execute(self, query, params=None, queryTimeout=0):
self._checkClosed()
if params:
self.executemany(query, [params, ], queryTimeout)
else:
if self.connection.sessionno:
logger.debug(
"Executing query on session %s using SQLExecDirectW: %s",
self.connection.sessionno, query)
self._free()
self._setQueryTimeout(queryTimeout)
rc = odbc.SQLExecDirectW(
self.hStmt, _inputStr(_convertLineFeeds(query)), SQL_NTS)
checkStatus(rc, hStmt=self.hStmt, method="SQLExecDirectW")
self._handleResults()
return self
def executemany(self, query, params, batch=False, queryTimeout=0):
self._checkClosed()
self._free()
# Prepare the query
rc = odbc.SQLPrepareW(
self.hStmt, _inputStr(_convertLineFeeds(query)), SQL_NTS)
checkStatus(rc, hStmt=self.hStmt, method="SQLPrepare")
self._setQueryTimeout(queryTimeout)
# Get the number of parameters in the SQL statement.
numParams = SQLSMALLINT()
rc = odbc.SQLNumParams(self.hStmt, ADDR(numParams))
checkStatus(rc, hStmt=self.hStmt, method="SQLNumParams")
numParams = numParams.value
# The argument types.
dataTypes = []
for paramNum in range(0, numParams):
dataType = SQLSMALLINT()
parameterSize = SQLULEN()
decimalDigits = SQLSMALLINT()
nullable = SQLSMALLINT()
rc = odbc.SQLDescribeParam(
self.hStmt, paramNum + 1, ADDR(dataType), ADDR(parameterSize),
ADDR(decimalDigits), ADDR(nullable))
checkStatus(rc, hStmt=self.hStmt, method="SQLDescribeParams")
dataTypes.append(dataType.value)
if batch:
logger.debug(
"Executing query on session %s using batched SQLExecute: %s",
self.connection.sessionno, query)
self._executeManyBatch(params, numParams, dataTypes)
else:
logger.debug(
"Executing query on session %s using SQLExecute: %s",
self.connection.sessionno, query)
rc = odbc.SQLSetStmtAttr(self.hStmt, SQL_ATTR_PARAMSET_SIZE, 1, 0)
checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
paramSetNum = 0
for p in params:
paramSetNum += 1
logger.trace("ParamSet %s: %s", paramSetNum, p)
if len(p) != numParams:
raise InterfaceError(
"PARAMS_MISMATCH", "The number of supplied parameters "
"({}) does not match the expected number of "
"parameters ({}).".format(len(p), numParams))
paramArray = []
lengthArray = []
for paramNum in range(0, numParams):
val = p[paramNum]
inputOutputType = _getInputOutputType(val)
valueType, paramType = _getParamValueType(
dataTypes[paramNum])
param, length, null = _getParamValue(val, valueType, False)
paramArray.append(param)
if param is not None:
if valueType == SQL_C_BINARY:
bufSize = SQLLEN(length)
lengthArray.append(SQLLEN(length))
columnSize = SQLULEN(length)
elif valueType == SQL_C_DOUBLE:
bufSize = SQLLEN(length)
lengthArray.append(SQLLEN(length))
columnSize = SQLULEN(length)
param = ADDR(param)
else:
bufSize = SQLLEN(ctypes.sizeof(param))
lengthArray.append(SQLLEN(SQL_NTS))
columnSize = SQLULEN(length)
if null:
# Handle INOUT parameter with NULL input value.
lengthArray.pop(-1)
lengthArray.append(SQLLEN(SQL_NULL_DATA))
else:
bufSize = SQLLEN(0)
columnSize = SQLULEN(0)
lengthArray.append(SQLLEN(SQL_NULL_DATA))
logger.trace("Binding parameter %s...", paramNum + 1)
rc = odbc.SQLBindParameter(
self.hStmt, paramNum + 1, inputOutputType, valueType,
paramType, columnSize, 0, param, bufSize,
ADDR(lengthArray[paramNum]))
checkStatus(
rc, hStmt=self.hStmt, method="SQLBindParameter")
logger.debug("Executing prepared statement.")
rc = odbc.SQLExecute(self.hStmt)
for paramNum in range(0, numParams):
val = p[paramNum]
if isinstance(val, OutParam):
val.size = lengthArray[paramNum].value
checkStatus(rc, hStmt=self.hStmt, method="SQLExecute")
self._handleResults()
return self
def _executeManyBatch(self, params, numParams, dataTypes):
# Get the number of parameter sets.
paramSetSize = len(params)
# Set the SQL_ATTR_PARAM_BIND_TYPE statement attribute to use
# column-wise binding.
rc = odbc.SQLSetStmtAttr(
self.hStmt, SQL_ATTR_PARAM_BIND_TYPE, SQL_PARAM_BIND_BY_COLUMN, 0)
checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
# Specify the number of elements in each parameter array.
rc = odbc.SQLSetStmtAttr(
self.hStmt, SQL_ATTR_PARAMSET_SIZE, paramSetSize, 0)
checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
# Specify a PTR to get the number of parameters processed.
# paramsProcessed = SQLULEN()
# rc = odbc.SQLSetStmtAttr(self.hStmt, SQL_ATTR_PARAMS_PROCESSED_PTR,
# ADDR(paramsProcessed), SQL_IS_POINTER)
# checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
# Specify a PTR to get the status of the parameters processed.
# paramsStatus = (SQLUSMALLINT * paramSetSize)()
# rc = odbc.SQLSetStmtAttr(self.hStmt, SQL_ATTR_PARAM_STATUS_PTR,
# ADDR(paramsStatus), SQL_IS_POINTER)
# checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
# Bind the parameters.
paramArrays = []
lengthArrays = []
paramSetSize = len(params)
paramSetNum = 0
debugEnabled = logger.isEnabledFor(logging.DEBUG)
for p in params:
paramSetNum += 1
if debugEnabled:
logger.debug("ParamSet %s: %s", paramSetNum, p)
if len(p) != numParams:
raise InterfaceError(
"PARAMS_MISMATCH", "The number of supplied parameters "
"({}) does not match the expected number of parameters "
"({}).".format(len(p), numParams))
for paramNum in range(0, numParams):
p = []
valueType, paramType = _getParamValueType(dataTypes[paramNum])
maxLen = 0
for paramSetNum in range(0, paramSetSize):
param, length, null = _getParamValue( # @UnusedVariable
params[paramSetNum][paramNum], valueType, True)
if length > maxLen:
maxLen = length
p.append(param)
if debugEnabled:
logger.debug("Max length for parameter %s is %s.",
paramNum + 1, maxLen)
if valueType == SQL_C_BINARY:
valueSize = SQLLEN(maxLen)
paramArrays.append((SQLBYTE * (paramSetSize * maxLen))())
elif valueType == SQL_C_DOUBLE:
valueSize = SQLLEN(maxLen)
paramArrays.append((SQLDOUBLE * paramSetSize)())
else:
maxLen += 1
valueSize = SQLLEN(ctypes.sizeof(SQLWCHAR) * maxLen)
paramArrays.append(_createBuffer(paramSetSize * maxLen))
lengthArrays.append((SQLLEN * paramSetSize)())
for paramSetNum in range(0, paramSetSize):
index = paramSetNum * maxLen
if p[paramSetNum] is not None:
if valueType == SQL_C_DOUBLE:
paramArrays[paramNum][paramSetNum] = p[paramSetNum]
else:
for c in p[paramSetNum]:
paramArrays[paramNum][index] = c
index += 1
if valueType == SQL_C_BINARY:
lengthArrays[paramNum][
paramSetNum] = len(p[paramSetNum])
else:
lengthArrays[paramNum][
paramSetNum] = SQLLEN(SQL_NTS)
paramArrays[paramNum][
index] = _convertParam("\x00")[0]
else:
lengthArrays[paramNum][paramSetNum] = SQLLEN(SQL_NULL_DATA)
if valueType == SQL_C_WCHAR:
paramArrays[paramNum][index] = _convertParam("\x00")[0]
if debugEnabled:
logger.debug("Binding parameter %s...", paramNum + 1)
rc = odbc.SQLBindParameter(self.hStmt, paramNum + 1,
SQL_PARAM_INPUT, valueType, paramType,
SQLULEN(maxLen), 0,
paramArrays[paramNum], valueSize,
lengthArrays[paramNum])
checkStatus(rc, hStmt=self.hStmt, method="SQLBindParameter")
# Execute the SQL statement.
if debugEnabled:
logger.debug("Executing prepared statement.")
rc = odbc.SQLExecute(self.hStmt)
checkStatus(rc, hStmt=self.hStmt, method="SQLExecute")
def _handleResults(self):
# Rest cursor attributes.
self.description = None
self.rowcount = -1
self.rownumber = None
self.columns = {}
self.types = []
self.moreResults = None
# Get column count in result set.
columnCount = SQLSMALLINT()
rc = odbc.SQLNumResultCols(self.hStmt, ADDR(columnCount))
checkStatus(rc, hStmt=self.hStmt, method="SQLNumResultCols")
rowCount = SQLLEN()
rc = odbc.SQLRowCount(self.hStmt, ADDR(rowCount))
checkStatus(rc, hStmt=self.hStmt, method="SQLRowCount")
self.rowcount = rowCount.value
# Get column meta data and create row iterator.
if columnCount.value > 0:
self.description = []
nameBuf = _createBuffer(SMALL_BUFFER_SIZE)
nameLength = SQLSMALLINT()
dataType = SQLSMALLINT()
columnSize = SQLULEN()
decimalDigits = SQLSMALLINT()
nullable = SQLSMALLINT()
for col in range(0, columnCount.value):
rc = odbc.SQLDescribeColW(
self.hStmt, col + 1, nameBuf, len(nameBuf),
ADDR(nameLength), ADDR(dataType), ADDR(columnSize),
ADDR(decimalDigits), ADDR(nullable))
checkStatus(rc, hStmt=self.hStmt, method="SQLDescribeColW")
columnName = _outputStr(nameBuf)
odbc.SQLColAttributeW(
self.hStmt, col + 1, SQL_DESC_TYPE_NAME, ADDR(nameBuf),
len(nameBuf), None, None)
checkStatus(rc, hStmt=self.hStmt, method="SQLColAttributeW")
typeName = _outputStr(nameBuf)
typeCode = self.converter.convertType(self.dbType, typeName)
self.columns[columnName.lower()] = col
self.types.append((typeName, typeCode, dataType.value))
self.description.append((
columnName, typeCode, None, columnSize.value,
decimalDigits.value, None, nullable.value))
self.iterator = rowIterator(self)
def nextset(self):
self._checkClosed()
if self.moreResults is None:
self._checkForMoreResults()
if self.moreResults:
self._handleResults()
return True
def _checkForMoreResults(self):
rc = odbc.SQLMoreResults(self.hStmt)
checkStatus(rc, hStmt=self.hStmt, method="SQLMoreResults")
self.moreResults = rc == SQL_SUCCESS or rc == SQL_SUCCESS_WITH_INFO
return self.moreResults
def _free(self):
rc = odbc.SQLFreeStmt(self.hStmt, SQL_CLOSE)
checkStatus(rc, hStmt=self.hStmt, method="SQLFreeStmt - SQL_CLOSE")
rc = odbc.SQLFreeStmt(self.hStmt, SQL_RESET_PARAMS)
checkStatus(
rc, hStmt=self.hStmt, method="SQLFreeStmt - SQL_RESET_PARAMS")
def _checkClosed(self):
if not self.hStmt:
raise InterfaceError("CURSOR_CLOSED",
"Operations cannot be performed on a "
"closed cursor.")
def _convertLineFeeds(query):
return "\r".join(util.linesplit(query))
def _getInputOutputType(val):
inputOutputType = SQL_PARAM_INPUT
if isinstance(val, InOutParam):
inputOutputType = SQL_PARAM_INPUT_OUTPUT
elif isinstance(val, OutParam):
inputOutputType = SQL_PARAM_OUTPUT
return inputOutputType
def _getParamValueType(dataType):
valueType = SQL_C_WCHAR
paramType = SQL_WVARCHAR
if dataType in (SQL_BINARY, SQL_VARBINARY, SQL_LONGVARBINARY):
valueType = SQL_C_BINARY
paramType = dataType
elif dataType == SQL_WLONGVARCHAR:
paramType = SQL_WLONGVARCHAR
elif dataType in (SQL_FLOAT, SQL_DOUBLE, SQL_REAL):
valueType = SQL_C_DOUBLE
paramType = SQL_DOUBLE
return valueType, paramType
def _getParamBufferSize(val):
return SMALL_BUFFER_SIZE if val.size is None else val.size
def _getParamValue(val, valueType, batch):
length = 0
null = False
if val is None:
param = None
elif valueType == SQL_C_BINARY:
ba = val
if isinstance(val, InOutParam):
ba = val.inValue
if val.inValue is None:
null = True
ba = bytearray(_getParamBufferSize(val))
elif isinstance(val, OutParam):
ba = bytearray(_getParamBufferSize(val))
if ba is not None and not isinstance(ba, bytearray):
raise InterfaceError("Expected bytearray for BINARY parameter.")
length = len(ba)
if batch:
param = ba
else:
byteArr = SQLBYTE * length
param = byteArr.from_buffer(ba)
if isinstance(val, OutParam):
val.setValueFunc(lambda: ba[:val.size])
elif valueType == SQL_C_DOUBLE:
f = val
if isinstance(val, InOutParam):
f = val.inValue
if f is None:
null = True
f = float(0)
elif isinstance(val, OutParam):
f = float(0)
param = SQLDOUBLE(f if not util.isString(f) else float(f))
length = ctypes.sizeof(param)
if isinstance(val, OutParam):
val.setValueFunc(lambda: param.value)
else:
if batch:
param = _convertParam(val)
length = len(param)
elif isinstance(val, InOutParam):
length = _getParamBufferSize(val)
if val.inValue is not None:
param = _inputStr(val.inValue, length)
else:
param = _createBuffer(length)
null = True
val.setValueFunc(lambda: _outputStr(param))
elif isinstance(val, OutParam):
length = _getParamBufferSize(val)
param = _createBuffer(length)
val.setValueFunc(lambda: _outputStr(param))
else:
param = _inputStr(val)
length = len(param)
return param, length, null
def _getFetchSize(cursor):
"""Gets the fetch size associated with the cursor."""
fetchSize = cursor.fetchSize
for dataType in cursor.types:
if dataType[2] in (SQL_LONGVARBINARY, SQL_WLONGVARCHAR):
fetchSize = 1
break
return fetchSize
def _getBufSize(cursor, colIndex):
bufSize = cursor.description[colIndex - 1][3] + 1
dataType = cursor.types[colIndex - 1][0]
if dataType in datatypes.BINARY_TYPES:
pass
elif dataType in datatypes.FLOAT_TYPES:
bufSize = ctypes.sizeof(ctypes.c_double)
elif dataType in datatypes.INT_TYPES:
bufSize = 30
elif cursor.types[colIndex - 1][2] in (SQL_WCHAR, SQL_WVARCHAR,
SQL_WLONGVARCHAR):
pass
elif dataType.startswith("DATE"):
bufSize = 20
elif dataType.startswith("TIMESTAMP"):
bufSize = 40
elif dataType.startswith("TIME"):
bufSize = 30
elif dataType.startswith("INTERVAL"):
bufSize = 80
elif dataType.startswith("PERIOD"):
bufSize = 80
elif dataType.startswith("DECIMAL"):
bufSize = 42
else:
bufSize = 2 ** 16 + 1
return bufSize
def _setupColumnBuffers(cursor, buffers, bufSizes, dataTypes, indicators,
lastFetchSize):
"""Sets up the column buffers for retrieving multiple rows of a result set
at a time"""
fetchSize = _getFetchSize(cursor)
# If the fetchSize hasn't changed since the last time setupBuffers
# was called, then we can reuse the previous buffers.
if fetchSize != lastFetchSize:
logger.debug("FETCH_SIZE: %s" % fetchSize)
rc = odbc.SQLSetStmtAttr(
cursor.hStmt, SQL_ATTR_ROW_ARRAY_SIZE, fetchSize, 0)
checkStatus(rc, hStmt=cursor.hStmt,
method="SQLSetStmtAttr - SQL_ATTR_ROW_ARRAY_SIZE")
for col in range(1, len(cursor.description) + 1):
dataType = SQL_C_WCHAR
buffer = None
bufSize = _getBufSize(cursor, col)
lob = False
if cursor.types[col - 1][2] == SQL_LONGVARBINARY:
lob = True
bufSize = LARGE_BUFFER_SIZE
buffer = (ctypes.c_byte * bufSize)()
dataType = SQL_LONGVARBINARY
elif cursor.types[col - 1][2] == SQL_WLONGVARCHAR:
lob = True
buffer = _createBuffer(LARGE_BUFFER_SIZE)
bufSize = ctypes.sizeof(buffer)
dataType = SQL_WLONGVARCHAR
elif cursor.description[col - 1][1] == BINARY:
dataType = SQL_C_BINARY
buffer = (ctypes.c_byte * bufSize * fetchSize)()
elif cursor.types[col - 1][0] in datatypes.FLOAT_TYPES:
dataType = SQL_C_DOUBLE
buffer = (ctypes.c_double * fetchSize)()
else:
buffer = _createBuffer(bufSize * fetchSize)
bufSize = int(ctypes.sizeof(buffer) / fetchSize)
dataTypes.append(dataType)
buffers.append(buffer)
bufSizes.append(bufSize)
logger.debug("Buffer size for column %s: %s", col, bufSize)
indicators.append((SQLLEN * fetchSize)())
if not lob:
rc = odbc.SQLBindCol(cursor.hStmt, col, dataType, buffer,
bufSize, indicators[col - 1])
checkStatus(rc, hStmt=cursor.hStmt, method="SQLBindCol")
return fetchSize
def _getLobData(cursor, colIndex, buf, binary):
""" Get LOB Data """
length = SQLLEN()
dataType = SQL_C_WCHAR
bufSize = ctypes.sizeof(buf)
if binary:
dataType = SQL_C_BINARY
rc = odbc.SQLGetData(
cursor.hStmt, colIndex, dataType, buf, bufSize, ADDR(length))
sqlState = checkStatus(rc, hStmt=cursor.hStmt, method="SQLGetData")
val = None
if length.value != SQL_NULL_DATA:
if SQL_STATE_DATA_TRUNCATED in sqlState:
logger.debug(
"Data truncated. Calling SQLGetData to get next part "
"of data for column %s of size %s.",
colIndex, length.value)
if dataType == SQL_C_BINARY:
val = bytearray(length.value)
val[0:bufSize] = buf
newBufSize = len(val) - bufSize
newBuffer = (ctypes.c_byte * newBufSize).from_buffer(
val, bufSize)
rc = odbc.SQLGetData(
cursor.hStmt, colIndex, dataType, newBuffer,
newBufSize, ADDR(length))
checkStatus(
rc, hStmt=cursor.hStmt, method="SQLGetData2")
else:
val = [_outputStr(buf), ]
while SQL_STATE_DATA_TRUNCATED in sqlState:
rc = odbc.SQLGetData(
cursor.hStmt, colIndex, dataType, buf, bufSize,
ADDR(length))
sqlState = checkStatus(
rc, hStmt=cursor.hStmt, method="SQLGetData2")
val.append(_outputStr(buf))
val = "".join(val)
else:
if dataType == SQL_C_BINARY:
val = bytearray(
(ctypes.c_byte * length.value).from_buffer(buf))
else:
val = _outputStr(buf)
return val
def _getRow(cursor, buffers, bufSizes, dataTypes, indicators, rowIndex):
"""Reads a row of data from the fetched input buffers. If the column
type is a BLOB or CLOB, then that data is obtained via calls to
SQLGetData."""
row = []
for col in range(1, len(cursor.description) + 1):
val = None
buf = buffers[col - 1]
bufSize = bufSizes[col - 1]
dataType = dataTypes[col - 1]
length = indicators[col - 1][rowIndex]
if length != SQL_NULL_DATA:
if dataType == SQL_C_BINARY:
val = bytearray((ctypes.c_byte * length).from_buffer(
buf, bufSize * rowIndex))
elif dataType == SQL_C_DOUBLE:
val = ctypes.c_double.from_buffer(buf,
bufSize * rowIndex).value
elif dataType == SQL_WLONGVARCHAR:
val = _getLobData(cursor, col, buf, False)
elif dataType == SQL_LONGVARBINARY:
val = _getLobData(cursor, col, buf, True)
else:
chLen = (int)(bufSize / ctypes.sizeof(SQLWCHAR))
chBuf = (SQLWCHAR * chLen)
val = _outputStr(chBuf.from_buffer(buf,
bufSize * rowIndex))
row.append(val)
return row
def rowIterator(cursor):
buffers = []
bufSizes = []
dataTypes = []
indicators = []
rowCount = SQLULEN()
lastFetchSize = None
rc = odbc.SQLSetStmtAttr(
cursor.hStmt, SQL_ATTR_ROWS_FETCHED_PTR, ADDR(rowCount), 0)
checkStatus(rc, hStmt=cursor.hStmt,
method="SQLSetStmtAttr - SQL_ATTR_ROWS_FETCHED_PTR")
while cursor.description is not None:
lastFetchSize = _setupColumnBuffers(cursor, buffers, bufSizes,
dataTypes, indicators,
lastFetchSize)
rc = odbc.SQLFetch(cursor.hStmt)
checkStatus(rc, hStmt=cursor.hStmt, method="SQLFetch")
if rc == SQL_NO_DATA:
break
for rowIndex in range(0, rowCount.value):
yield _getRow(cursor, buffers, bufSizes, dataTypes,
indicators, rowIndex)
if not cursor._checkForMoreResults():
cursor._free()
| StarcoderdataPython |
1727044 | from pathlib import Path
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import ScriptedLoadableModule, ScriptedLoadableModuleWidget
from slicer.util import VTKObservationMixin
from EPISURGBase import EPISURGBaseLogic # pylint: disable=import-error
EPISURG_URL = 'https://s3-eu-west-1.amazonaws.com/pstorage-ucl-2748466690/26153588/EPISURG.zip'
EPISURG_CHECKSUM = 'MD5:5ec5831a2c6fbfdc8489ba2910a6504b'
class EPISURGBrowser(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
super().__init__(parent)
self.parent.title = "EPISURG Browser"
self.parent.categories = ["EPISURG"]
self.parent.dependencies = []
self.parent.helpText = """
This module can be used to download and visualize the <a href="https://doi.org/10.5522/04/9996158.v1">EPISURG dataset</a>.
See more information in the <a href="https://github.com/fepegar/resseg-ijcars">paper repository</a>.
"""
self.parent.acknowledgementText = """This file was developed by <NAME> (University College London and King's College London).
"""
class EPISURGBrowserWidget(ScriptedLoadableModuleWidget, VTKObservationMixin):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent=None):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.__init__(self, parent)
VTKObservationMixin.__init__(self) # needed for parameter node observation
self.logic = None
self.subjects = None
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
self.logic = EPISURGBrowserLogic()
self.makeGUI()
slicer.episurgBrowser = self
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.StartCloseEvent, self.onSceneStartClose)
def makeGUI(self):
self.setDirButton = ctk.ctkCollapsibleButton()
self.setDirButton.text = 'Select dataset directory'
setDirLayout = qt.QFormLayout(self.setDirButton)
self.layout.addWidget(self.setDirButton)
self.datasetDirEdit = ctk.ctkPathLineEdit()
self.datasetDirEdit.filters = ctk.ctkPathLineEdit.Dirs
setDirLayout.addRow('EPISURG directory: ', self.datasetDirEdit)
self.datasetDirButton = qt.QPushButton('Load subjects')
self.datasetDirButton.clicked.connect(self.onLoadSubjectsButton)
setDirLayout.addWidget(self.datasetDirButton)
self.downloadDatasetButton = qt.QPushButton('Download dataset')
self.downloadDatasetButton.clicked.connect(self.onDownloadDatasetButton)
setDirLayout.addWidget(self.downloadDatasetButton)
self.subjectsButton = ctk.ctkCollapsibleButton()
self.subjectsButton.text = 'Select subject to load'
self.subjectsButton.setEnabled(False)
subjectsLayout = qt.QFormLayout(self.subjectsButton)
self.layout.addWidget(self.subjectsButton)
self.subjectsComboBox = qt.QComboBox()
self.subjectsComboBox.addItem('Select subject ID')
self.subjectsComboBox.currentIndexChanged.connect(self.onSubjectsComboBox)
subjectsLayout.addWidget(self.subjectsComboBox)
self.previousSubjectPushButton = qt.QPushButton('Previous')
self.previousSubjectPushButton.clicked.connect(self.onPreviousSubjectButton)
subjectsLayout.addWidget(self.previousSubjectPushButton)
self.nextSubjectPushButton = qt.QPushButton('Next')
self.nextSubjectPushButton.clicked.connect(self.onNextSubjectButton)
subjectsLayout.addWidget(self.nextSubjectPushButton)
self.layout.addStretch()
def getCurrentPath(self):
text = self.datasetDirEdit.currentPath
if not text:
slicer.util.errorDisplay(
f'Please enter a directory in the widget')
raise
return Path(text).expanduser().absolute()
def getSubjectsDict(self):
datasetDir = self.getCurrentPath()
if not datasetDir.is_dir():
slicer.util.errorDisplay(f'{datasetDir} is not a directory')
raise
subjectsDir = datasetDir / 'subjects'
if not subjectsDir.is_dir():
slicer.util.errorDisplay(f'"subjects" directory not found in EPISURG directory "{datasetDir}"')
raise
pattern = 'sub-*'
subjectsDirs = sorted(list(subjectsDir.glob(pattern)))
if not subjectsDirs:
slicer.util.errorDisplay(f'No directories found in {subjectsDir} with pattern {pattern}')
raise
subjectsDict = {
d.name: Subject(d)
for d in subjectsDirs
}
return subjectsDict
def cleanupSubjects(self):
for subject in self.subjects.values():
subject.cleanup()
def cleanup(self):
"""Called when the application closes and the module widget is destroyed."""
self.removeObservers()
# Slots
def onLoadSubjectsButton(self):
self.datasetDirEdit.addCurrentPathToHistory()
self.subjects = self.getSubjectsDict()
self.subjectsComboBox.blockSignals(True)
self.subjectsComboBox.addItems(list(self.subjects.keys()))
self.subjectsComboBox.setCurrentIndex(0)
self.subjectsComboBox.blockSignals(False)
self.setDirButton.setEnabled(False)
self.subjectsButton.setEnabled(True)
def onDownloadDatasetButton(self):
episurgDir = self.getCurrentPath()
subjectsDir = episurgDir / 'subjects'
if not subjectsDir.is_dir():
outputDir = episurgDir.parent
outputDir.mkdir(exist_ok=True, parents=True)
archiveFilePath = outputDir / 'episurg.zip'
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
success = slicer.util.downloadAndExtractArchive(
EPISURG_URL,
str(archiveFilePath),
str(outputDir),
checksum=EPISURG_CHECKSUM,
)
if success:
archiveFilePath.unlink()
else:
slicer.util.errorDisplay('Error downloading the dataset')
except Exception as e:
slicer.util.errorDisplay(str(e))
finally:
qt.QApplication.restoreOverrideCursor()
self.onLoadSubjectsButton()
def onSceneStartClose(self, caller, event):
"""Called just before the scene is closed."""
self.cleanupSubjects()
def onPreviousSubjectButton(self):
self.subjectsComboBox.currentIndex -= 1
def onNextSubjectButton(self):
self.subjectsComboBox.currentIndex += 1
def onSubjectsComboBox(self):
try:
subject = self.subjects[self.subjectsComboBox.currentText]
except KeyError:
return
try:
self.cleanupSubjects()
self.logic.closeScene()
subject.load()
self.logic.jumpToFirstSegment(subject.rater1SegNode)
except Exception as e:
slicer.util.errorDisplay(f'Failed to load subject: {e}')
import traceback
traceback.print_exc()
class EPISURGBrowserLogic(EPISURGBaseLogic):
def __init__(self):
super().__init__()
class Subject:
RED = 222, 53, 7
GREEN = 116, 191, 23
BLUE = 24, 80, 201
def __init__(self, subjectDir):
self.dir = Path(subjectDir)
self.id = self.dir.name
self.t1PrePath = self.dir / 'preop' / f'{self.id}_preop-t1mri-1.nii.gz'
self.t1PostDir = self.dir / 'postop'
self.t1PostPath = self.t1PostDir / f'{self.id}_postop-t1mri-1.nii.gz'
self.rater1SegPath = self.t1PostDir / f'{self.id}_postop-seg-1.nii.gz'
self.rater2SegPath = self.t1PostDir / f'{self.id}_postop-seg-2.nii.gz'
self.rater3SegPath = self.t1PostDir / f'{self.id}_postop-seg-3.nii.gz'
self.cleanup()
def __repr__(self):
return f'Subject("{self.id}")'
def cleanup(self):
self.t1PreNode = None
self.t1PostNode = None
self.rater1SegNode = None
self.rater2SegNode = None
self.rater3SegNode = None
def loadVolumeIfPresent(self, path):
if not path.is_file(): return
return slicer.util.loadVolume(str(path))
def loadSegmentationIfPresent(self, path):
if not path.is_file(): return
segmentationNode = slicer.util.loadSegmentation(str(path))
segmentation = segmentationNode.GetSegmentation()
rule = slicer.vtkBinaryLabelmapToClosedSurfaceConversionRule
segmentation.SetConversionParameter(rule.GetSmoothingFactorParameterName(), "0.2")
segmentationNode.CreateClosedSurfaceRepresentation()
displayNode = segmentationNode.GetDisplayNode()
displayNode.SetVisibility2DFill(False)
displayNode.SetAllSegmentsVisibility(True)
displayNode.SetOpacity(0.75)
return segmentationNode
def load(self):
self.t1PreNode = self.loadVolumeIfPresent(self.t1PrePath)
self.t1PostNode = self.loadVolumeIfPresent(self.t1PostPath)
self.rater1SegNode = self.loadSegmentationIfPresent(self.rater1SegPath)
self.rater2SegNode = self.loadSegmentationIfPresent(self.rater2SegPath)
self.rater3SegNode = self.loadSegmentationIfPresent(self.rater3SegPath)
self.show()
def show(self):
slicer.util.setSliceViewerLayers(background=self.t1PostNode, foreground=self.t1PreNode)
self.showSegment(self.rater1SegNode, self.GREEN)
self.showSegment(self.rater2SegNode, self.BLUE)
self.showSegment(self.rater3SegNode, self.RED)
def showSegment(self, segmentationNode, color):
if segmentationNode is None: return
color = [c / 255 for c in color]
segment = segmentationNode.GetSegmentation().GetSegment('Segment_1')
segment.SetName('Resection cavity')
segment.SetColor(color)
| StarcoderdataPython |
11280173 | <filename>app_sme12/forms.py
from django import forms
from django.forms import ModelForm
from app_backend.models import Province, Amphur, Tumbol, BusinessModel, BusinessGroup, BusinessType
from app_sme12.models import Employment, Revenue, AuthorizeCapital, ProfitPrevious, Promote, FormSiteVisit, FormInterview
class RegistrationForm(forms.Form):
""" ฟอร์มลงทะเบียนประกวด sme ครั้งที่ 12 """
regis_code = forms.CharField(label='เลขสมัคร', max_length=20)
ent_name = forms.CharField(label='ชื่อสถานประกอบการ', max_length=200)
ent_establish_date = forms.DateField(label='วันที่จัดตั้งกิจการ')
ent_sme_code = forms.CharField(
label='รหัสสมาชิก สสว.', max_length=50, required=False)
ent_sme_connext_code = forms.CharField(
label='รหัสสมาชิก SME CONNEXT', max_length=50, required=False)
ent_address_no = forms.CharField(label='เลขที่', max_length=100)
ent_mu = forms.CharField(label='หมู่', max_length=100, required=False)
ent_soi = forms.CharField(label='ซอย', max_length=100, required=False)
ent_street = forms.CharField(label='ถนน', max_length=100, required=False)
ent_province = forms.ModelChoiceField(
label='จังหวัด', queryset=Province.objects.all(), empty_label='-- โปรดเลือก --')
ent_amphur = forms.ModelChoiceField(
label='อำเภอ', queryset=Amphur.objects.all(), empty_label='-- โปรดเลือก --')
ent_tumbol = forms.ModelChoiceField(label='ตำบล', queryset=Tumbol.objects.all(
), empty_label='---------', required=False)
ent_postcode = forms.CharField(label='รหัสไปรษณีย์', max_length=200)
ent_tel = forms.CharField(label='โทรศัพท์', max_length=200)
ent_fax = forms.CharField(label='โทรสาร', max_length=200, required=False)
ent_email = forms.EmailField(label='อีเมล')
ent_website = forms.CharField(
label='เว็บไซต์', max_length=200, required=False)
owner_name = forms.CharField(label='ชื่อ-นามสกุล', max_length=200)
owner_card_id = forms.CharField(
label='เลขที่บัตรประจำตัวประชาชน', max_length=200)
owner_address_no = forms.CharField(label='เลขที่', max_length=100)
owner_mu = forms.CharField(label='หมู่', max_length=100, required=False)
owner_soi = forms.CharField(label='ซอย', max_length=100, required=False)
owner_street = forms.CharField(label='ถนน', max_length=100, required=False)
owner_province = forms.ModelChoiceField(
label='จังหวัด', queryset=Province.objects.all(), empty_label='-- โปรดเลือก --')
owner_amphur = forms.ModelChoiceField(
label='อำเภอ', queryset=Amphur.objects.all(), empty_label='-- โปรดเลือก --')
owner_tumbol = forms.ModelChoiceField(
label='ตำบล', queryset=Tumbol.objects.all(), empty_label='---------', required=False)
owner_postcode = forms.CharField(label='รหัสไปรษณีย์', max_length=200)
owner_tel = forms.CharField(label='โทรศัพท์', max_length=200)
owner_fax = forms.CharField(label='โทรสาร', max_length=200, required=False)
owner_mobile = forms.CharField(label='โทรศัพท์มือถือ', max_length=200)
owner_email = forms.EmailField(label='อีเมล')
business_model = forms.ModelChoiceField(label='รูปแบบการจัดตั้งกิจการ', queryset=BusinessModel.objects.all(
).order_by('model_group'), empty_label='-- โปรดเลือก --', required=False)
bus_model_etc1 = forms.CharField(
label='นิติบุลคลอื่นๆ', max_length=200, required=False)
bus_model_etc2 = forms.CharField(
label='ได้รับการจดทะเบียนอื่นๆ', max_length=200, required=False)
juristic_id = forms.CharField(
label='เลขทะเบียนนิติบุคคล', max_length=20, required=False)
card_id = forms.CharField(
label='เลขที่บัตรประชาชน', max_length=20, required=False)
commercial_regis_number = forms.CharField(
label='เลขทะเบียนพาณิชย์', max_length=20, required=False)
regis_number = forms.CharField(
label='เลขทะเบียน', max_length=20, required=False)
contact_name = forms.CharField(label='ชื่อ-นามสกุล', max_length=100)
contact_position = forms.CharField(label='ตำแหน่ง', max_length=100)
contact_tel = forms.CharField(label='โทรศัพท์มือถือ', max_length=30)
contact_email = forms.EmailField(label='อีเมล')
business_type = forms.ModelChoiceField(
label='ประเภทกิจการ', queryset=BusinessType.objects.all(), empty_label='-- โปรดเลือก --')
business_group = forms.ModelChoiceField(
label='ประเภทกลุ่มธุรกิจ', queryset=BusinessGroup.objects.all(), empty_label='-- โปรดเลือก --')
# business_group_etc = forms.CharField(label='กลุ่มธุรกิจอื่นๆ', max_length=100, required=False)
product_info = forms.CharField(
label='ระบุสินค้า ยี่ห้อ และบริการโดยละเอียด', widget=forms.Textarea, required=False)
material = forms.CharField(
label='วัตถุดิบในการผลิต', widget=forms.Textarea, required=False)
otop = forms.BooleanField(
label='มีการผลิตสินค้า OTOP หรือไม่', required=False)
f_employment = forms.ModelChoiceField(
label='การจ้างงาน', queryset=Employment.objects.all(), empty_label='-- โปรดเลือก --')
f_employment_etc = forms.CharField(
label='จำนวนจ้าง(คน)', max_length=10, required=False)
f_revenue = forms.ModelChoiceField(
label='รายได้ต่อปี', queryset=Revenue.objects.all(), empty_label='-- โปรดเลือก --')
f_revenue_etc = forms.CharField(
label='รายได้(บาท)', max_length=20, required=False)
f_cap = forms.ModelChoiceField(label='ทุนจดทะเบียน', queryset=AuthorizeCapital.objects.all(
).order_by('code'), empty_label='-- โปรดเลือก --')
f_profit = forms.ModelChoiceField(label='กำไร(ณ สิ้นปีก่อนหน้า)', queryset=ProfitPrevious.objects.all(
).order_by('code'), empty_label='-- โปรดเลือก --')
f_promote = forms.ModelMultipleChoiceField(
label='รับข่าวสารจากช่องทาง', queryset=Promote.objects.filter(active=True))
f_promote_etc = forms.CharField(
label='รับข่าวสารจากช่องทางอื่นๆ', max_length=100, required=False)
# f_join_choice = forms.IntegerField(label='เลือกอบรมหรือประกวด', required=False)
# f_training_course = forms.CharField(label='หลักสูตรอบรม', max_length=100, required=False)
# f_trainee1_name = forms.CharField(label='ชื่อผู้เข้าอบรมคนที่1', max_length=100, required=False)
# f_trainee1_id_card = forms.CharField(label='เลขบัตรประชาชนผู้เข้าอบรมคนที่1', max_length=100, required=False)
# f_trainee1_mobile = forms.CharField(label='เบอร์มิอถือผู้เข้าอบรมคนที่1', max_length=100, required=False)
# f_trainee1_email = forms.EmailField(label='อีเมลผู้เข้าอบรมคนที่1', required=False)
# f_trainee2_name = forms.CharField(label='ชื่อผู้เข้าอบรมคนที่2', max_length=100, required=False)
# f_trainee2_id_card = forms.CharField(label='เลขบัตรประชาชนผู้เข้าอบรมคนที่2', max_length=100, required=False)
# f_trainee2_mobile = forms.CharField(label='เบอร์มิอถือผู้เข้าอบรมคนที่2', max_length=100, required=False)
# f_trainee2_email = forms.EmailField(label='อีเมลผู้เข้าอบรมคนที่2', required=False)
# f_agreement = forms.BooleanField(label='ยินยอมข้อตกลง', required=False)
# f_tsic_no = forms.CharField(label='เลข tsic', max_length=20, required=False)
class SitevisitForm(ModelForm):
""" ฟอร์มให้คะแนนสถานประกอบการ Site visit """
class Meta:
model = FormSiteVisit
fields = '__all__'
class InterviewForm(ModelForm):
""" ฟอร์มให้คะแนนสถานประกอบการ สัมภาษณ์ """
class Meta:
model = FormInterview
fields = '__all__' | StarcoderdataPython |
172541 | """Initialize cms app"""
default_app_config = "cms.apps.CMSConfig"
| StarcoderdataPython |
143662 | <filename>PSCP/analysis-scripts/tossconfigurationsFunc.py
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 7 10:17:49 2015
@author: nps5kd
"""
from __future__ import print_function
import sys
import optparse
import numpy as np
import math
import matplotlib # for making plots, version 'matplotlib-1.1.0-1'; errors may pop up when using earlier versions
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.font_manager import FontProperties as FP
import pdb
def readgro(filename, na, nm, nam):
# Read in atom coordinate data (starts on 3rd line of the .gro file)
# acoordA and acoordB are the atom coordinates for polymorphs A and B
file = open(filename, 'r')
lines = file.readlines()
#lines = filter(None, (line.rstrip() for line in file))
file.close()
ngro = (len(lines)) / (na + 3)
line = 2
boxv = np.zeros([3, ngro])
acoord = np.zeros((ngro, nm, 3, nam))
for gro in range(int(ngro)):
mol = 0
while mol < nm:
acounter = 0
while acounter < nam:
acoord[gro, mol, 0, acounter] = float(lines[line].split()[3])
acoord[gro, mol, 1, acounter] = float(lines[line].split()[4])
acoord[gro, mol, 2, acounter] = float(lines[line].split()[5])
line += 1
acounter += 1
mol += 1
boxv[:, gro] = np.array(lines[line].split()).astype(np.float)
line += 3
return acoord, boxv, ngro
def find_centroids(acoord, ngro, boxv, nm):
# ====================================================================
# Calculate the centroid and idealized centroid of each molecule
# ====================================================================
all_centroids = np.zeros([ngro, nm, 3, 1])
ideal_centroids = np.zeros([nm, 3, 1])
for mol in range(int(nm)):
for gro in range(int(ngro)):
all_centroids[gro, mol, :, 0] = np.average(acoord[gro, mol, :, :], axis=1)
centroids = np.average(all_centroids, axis=0)
# for now, we start with centroids are the averages over all molecules (i.e. the centroids)
for mol in range(int(nm)):
ideal_centroids[mol, :, 0] = centroids[mol, :, 0].copy()
return all_centroids, ideal_centroids
def rmscalc(coord, coordPrime, gronum, nm, nam):
# Quantify error of mapping (least root mean square)
diff = np.zeros((nm, 3, nam))
lrms = np.zeros((nm, nam))
for mol in range(int(nm)):
for mole in range(int(nam)):
for co in range(3):
diff[mol, co, mole] = abs(coord[gronum, mol, co, mole] - coordPrime[gronum, mol, co, mole])
for mol in range(int(nm)):
for mole in range(int(nam)):
lrms[mol, mole] = math.sqrt(diff[mol, 0, mole] ** 2 + diff[mol, 1, mole] ** 2 + diff[mol, 2, mole] ** 2)
return lrms
def writegrofile(output, lines, numframe, nm, nam, coord, boxv):
outgrofile = open(output, 'w')
# Write output .gro file
# tacount is the total atom count
# mcount is the total molecule count
x = np.zeros(3)
na = nm * nam
molname = 'BNZ'
for gro in range(int(numframe)):
# outgrofile.write('Converted from ' + sys.argv[1] + ' by ' + sys.argv[2]+', molecule ' + str(gro) + '\n')
outgrofile.write('Frame number' + str(gro) + '\n')
outgrofile.write(' ' + str(na) + '\n')
tacount = 0
mcount = 0
for mol in range(int(nm)):
for atom in range(int(nam)):
# mcount is the current molecule tracker
mcount = mol + 1
tacount += 1
xstr = []
for d in range(3):
# x[d] = round(acoordBprime[mol,d,atom],8)
x[d] = round(coord[gro, mol, d, atom], 8)
xstr.append("%13.8f" % (x[d]))
line = str(mcount).rjust(5) + molname + lines[tacount + 1].split()[1].rjust(7) + \
str(tacount).rjust(5) + xstr[0] + xstr[1] + xstr[2] + '\n'
outgrofile.write(line)
boxvline = ''
for d in range(len(boxv)):
bline = ("%13.8f" % (round(boxv[d], 8)))
boxvline += bline
outgrofile.write(boxvline+'\n') # make sure the .gro ends on a newline
outgrofile.close()
def normalcompare(gro1, gro2):
angle = np.zeros([4, ])
for x in range(4):
C11 = gro1[x, :, 0]
C31 = gro1[x, :, 4]
C51 = gro1[x, :, 8]
C12 = gro2[x, :, 0]
C32 = gro2[x, :, 4]
C52 = gro2[x, :, 8]
A13 = C31 - C11
A15 = C51 - C11
B13 = C32 - C12
B15 = C52 - C12
gro1vect = np.cross(A13, A15)
gro2vect = np.cross(B13, B15)
gro1vectnorm = np.linalg.norm(gro1vect)
gro2vectnorm = np.linalg.norm(gro2vect)
cost = np.dot(gro1vect, gro2vect) / (gro1vectnorm * gro2vectnorm)
angle[x] = np.arccos(cost)
maxangle = angle
return maxangle
# MAIN BODY
def tossconfigs(inputname, outputname, restraint):
file = open(inputname, 'r')
lines = file.readlines()
#lines = filter(None, (line.rstrip() for line in file))
file.close()
nam = 12
nm = 72
# Read in the total number of atoms in the system (should be the only item on the second line of the .gro file)
print(lines[1])
if len(lines[1].split()) == 1:
# na is the number of total atoms
na = int(lines[1].split()[0])
# nm is the number of total molecules
nm = na / nam
else:
sys.exit('Unexpected .gro file format')
coord, boxv, ngro = readgro(inputname, na, nm, nam)
coord2, boxv, ngro2 = readgro(restraint, na, nm, nam)
cshape = np.shape(coord)
icoord = np.zeros(cshape)
for x in range(0, ngro):
icoord[x, :, :, :] = coord2[0, :, :, :]
#todel1= []
todel2 = []
allcent, idealcent = find_centroids(coord, ngro, boxv, nm)
for x in range(ngro):
for mol in range(nam):
coord[x, :, :, mol] = coord[x, :, :, mol] - allcent[x, :, :, 0]
icoord[x, :, :, mol] = icoord[x, :, :, mol] - allcent[0, :, :, 0]
maxangles = np.zeros([ngro, 4])
for x in range(ngro):
maxangles[x, :] = normalcompare(coord[x, :, :, :], icoord[x, :, :, :]) * (180 / np.pi)
"""
fig=plt.figure(0)
plt.hist(maxangles[:ngro,0],40,alpha=0.3)
plt.show()
pdb.set_trace()
plt.hist(maxangles[:ngro,1],40,alpha=0.3)
plt.show()
pdb.set_trace()
plt.hist(maxangles[:ngro,2],40,alpha=0.3)
plt.show()
pdb.set_trace()
plt.hist(maxangles[:ngro,3],40,alpha=0.3)
plt.show()
"""
for x in range(ngro):
for y in range(4):
if maxangles[x, y] < 135 and maxangles[x, y] > 45:
if x not in todel2:
todel2 = np.append(todel2, x)
for x in range(ngro):
for mol in range(nam):
coord[x, :, :, mol] = coord[x, :, :, mol] + allcent[x, :, :, 0]
icoord[x, :, :, mol] = icoord[x, :, :, mol] + allcent[0, :, :, 0]
newgro = np.delete(coord, todel2, 0)
frames = np.shape(newgro)[0]
writegrofile(outputname, lines, frames, nm, nam, newgro, boxv)
print(todel2)
return todel2
| StarcoderdataPython |
8110247 | <filename>directives.py
# Define a new directive `code-block` (aliased as `sourcecode`) that uses the
# `pygments` source highlighter to render code in color.
#
# Incorporates code from the `Pygments`_ documentation for `Using Pygments in
# ReST documents`_ and `Octopress`_.
#
# .. _Pygments: http://pygments.org/
# .. _Using Pygments in ReST documents: http://pygments.org/docs/rstdirective/
# .. _Octopress: http://octopress.org/
from pygments.lexers import get_lexer_by_name, TextLexer
from pygments import highlight
from docutils.parsers.rst import directives, Directive
from docutils import nodes
from pygments.formatters import HtmlFormatter
import re
import os
import hashlib
import __main__
# Absolute path to pygments cache dir
PYGMENTS_CACHE_DIR = os.path.abspath(os.path.join(
os.path.dirname(__main__.__file__), '../../.pygments-cache'))
# Ensure cache dir exists
if not os.path.exists(PYGMENTS_CACHE_DIR):
os.makedirs(PYGMENTS_CACHE_DIR)
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
string_opts = ['title', 'url', 'caption']
option_spec = dict([(key, directives.unchanged) for key in string_opts])
has_content = True
def run(self):
self.assert_has_content()
try:
lexer_name = self.arguments[0]
lexer = get_lexer_by_name(lexer_name)
except ValueError:
# no lexer found - use the text one instead of an exception
lexer_name = 'text'
lexer = TextLexer()
formatter = HtmlFormatter()
# Construct cache filename
cache_file = None
content_text = u'\n'.join(self.content).encode('utf-8')
cache_file_name = '%s-%s.html' % (lexer_name,
hashlib.md5(content_text).hexdigest())
cached_path = os.path.join(PYGMENTS_CACHE_DIR, cache_file_name)
# Look for cached version, otherwise parse
if os.path.exists(cached_path):
cache_file = open(cached_path, 'r', encoding='utf-8')
parsed = cache_file.read()
else:
parsed = highlight(content_text, lexer, formatter)
# Strip pre tag and everything outside it
pres = re.compile("<pre>(.+)<\/pre>", re.S)
stripped = pres.search(parsed).group(1)
# Add wrapper with optional caption and link
code = '<figure class="code">'
if self.options:
caption = ('<span>%s</span>' %
self.options['caption']) if 'caption' in self.options else ''
title = self.options['title'] if 'title' in self.options else 'link'
link = ('<a href="%s">%s</a>' %
(self.options['url'], title)) if 'url' in self.options else ''
if caption or link:
code += '<figcaption>%s %s</figcaption>' % (caption, link)
code += f'<pre class="hll">{stripped}</pre></figure>'
# Write cache
if cache_file is None:
cache_file = open(cached_path, 'w', encoding='utf-8')
cache_file.write(parsed)
cache_file.close()
return [nodes.raw('', code, format='html')]
directives.register_directive('code-block', Pygments)
directives.register_directive('sourcecode', Pygments)
| StarcoderdataPython |
4895142 | <gh_stars>0
import re
import numpy as np
import networkx as nx
from shapely.geometry.polygon import LinearRing
import drawSvg as draw
#some helper functions
def normal_length1(vector, side):
"""Normal vector which points usually outside of the molecule"""
unit_vector = vector/np.linalg.norm(vector)
unit_normal = np.array([-1*side*unit_vector[1], side*unit_vector[0]])
return unit_normal
def calc_degree(vector1, vector2):
"""Formula for calculating cosA between to vectors"""
cos_alpha = np.dot(vector1, vector2)/(np.linalg.norm(vector1)*np.linalg.norm(vector2))
degree = np.arccos(cos_alpha)*180/np.pi
return degree
def deg_to_rad(deg):
return deg*np.pi/180
#all other main functions
def comprehend_string(string, orient):
"""
string: kernel string
orient: on default 1 which is clockwise orientation, but -1 is accepted as ccw
Cuts up kernel string with a regex and saves the info into an array. Orientation is saved for each domain. By checking which bracket
is paired with which, we save pairedness into a dictionary. A list of list is created which will hold the domain types and will be
filled in later.
"""
#separate domains/count
pattern = re.compile('@?-?[0-9]* ?[a-z|A-Z|0-9|\*|\)]+[ \(]? ?\+?|\) ?\+?')
dom_raw_list = pattern.findall(string)
dom_count = len(dom_raw_list)
#create empty data structures
struct_info_array = np.zeros((dom_count, 2), dtype = 'int') #3 cols rn, sb not needed here
#empty angles indicated by -999
for row in range(dom_count):
struct_info_array[row, 0] = -999
struct_info_array[row, 1] = orient
name_list1 = []
#loop through the raw domains, extract
for dom_i in range(len(dom_raw_list)):
dom = dom_raw_list[dom_i]
dom = dom.strip()
#fill table
if re.search("\s", dom):
dom_parts = re.split("\s", dom)
for segm in dom_parts:
if segm[0] == "@": #col0 angle
segm = segm.lstrip("@")
struct_info_array[dom_i, 0] = int(segm) #col0 to write to
#struct_info_array[dom_i, 1] = np.sign(int(segm)) #col1 to write to
#elif segm is "+": #col2 strand break
# struct_info_array[dom_i, 2] = 1 #col2 to write to
elif segm != "+": #else:
name_list1.append(segm) #only keep the name part further
else:
name_list1.append(dom)
#pairedness dictionary
paired_dict = {}
for dom_i2 in range(len(dom_raw_list)):
if re.search("\(", dom_raw_list[dom_i2]):
bracket_count = 1
for next_dom in range(dom_i2 + 1, len(dom_raw_list)):
if re.search("\(", dom_raw_list[next_dom]):
bracket_count += 1
elif re.search("\)", dom_raw_list[next_dom]):
bracket_count -= 1
if bracket_count == 0:
paired_dict[dom_i2] = next_dom
break
#name list of list / finding hairpin loops
#paireds in dict, hairpin loop known, other are unpaired -> substructure from these?
name_final_lol = []
for dom_i3 in range(len(name_list1)):
#paireds
if re.search("\(", name_list1[dom_i3]):
typ = "paired"
name = name_list1[dom_i3].strip("(+ ")
#correcting closing bracket name
elif re.search("\)", name_list1[dom_i3]):
typ = "paired"
index_of_pair = list(paired_dict.keys())[list(paired_dict.values()).index(dom_i3)]
if re.search("\*", name_list1[index_of_pair]):
name = name_list1[index_of_pair].strip("\(").strip("\*")
else:
name = name_list1[index_of_pair].strip("\(") + "*"
#all other are unpaireds
else:
typ = "unpaired"
name = name_list1[dom_i3].strip("+ ")
name_final_lol.append([name, typ])
return dom_raw_list, struct_info_array, paired_dict, name_final_lol
def process_length_input(struct_info_array, length_input_dict):
"""
struct_info_array: from comprehend_string
length_input_dict: a dictionary, keys are domain indices, values are lengths
Create a new column for struct_array, where lengths are going to be stored.
"""
zeros_list = [0 for i in range(len(struct_info_array[:,1]))]
#put into list format and append list to struct_info_array as new col
for length in length_input_dict:
zeros_list[length] = length_input_dict[length]
zeros_list = np.array(zeros_list).reshape((len(zeros_list), 1))
struct_info_array = np.append(struct_info_array, zeros_list, axis=1)
return struct_info_array
def create_skeleton(dom_raw_list, paired_dict, name_final_lol):
#append edges to graph
G = nx.Graph()
for dom_to_dom in range(len(name_final_lol)-1):
if re.search("\+", dom_raw_list[dom_to_dom]) == None:
G.add_edge(dom_to_dom, dom_to_dom+1, color='r') #domains that have a connection point are conn. with red edge
for pair in paired_dict:
G.add_edge(pair, paired_dict[pair], color='g') #paired domains are connected with green edge
return G
def find_type_of_unpaired(G, paired_dict, name_final_lol):
#index of ununpaireds - these don't need to be checked
paired_node_list = []
for node in paired_dict:
paired_node_list.append(node)
paired_node_list.append(paired_dict[node])
#get unpaired too
unpaired_node_list = []
for node_index in range(len(name_final_lol)):
if node_index not in paired_node_list:
unpaired_node_list.append(node_index)
#cycles of the skeleton graph
cycle_list_G = nx.cycle_basis(G) #neat!
multiloop_list = []
#look through cycles, where 3cycles are hairpin, 5c. are bulge and longer ones nest multiloops
for cycle in cycle_list_G:
if len(cycle) == 3: #always hairpin
for item in cycle:
if item in unpaired_node_list:
name_final_lol[item][1] = "hairpin loop" #name altering
unpaired_node_list.remove(item) #trim unpaired list from known items
elif len(cycle) > 3 & len(cycle) % 2 == 1: #current bulge def
unpaired_here = []
for item in cycle:
if item in unpaired_node_list:
unpaired_here.append(item)
if len(unpaired_here) == 1:
name_final_lol[unpaired_here[0]][1] = "bulgeloop" #name altering
unpaired_node_list.remove(unpaired_here[0]) #trim unpaired list from known items
#find multiloops, the corresponding domains are their own list item in multiloop_list
if len(cycle) >= 6:
curr_multiloop = []
for index in range(len(cycle)):
if cycle[index] in unpaired_node_list:
if cycle[index-3] in unpaired_node_list :
connections = [n for n in G.edges.data(nbunch=cycle[index-1])]
for edge in connections:
#only need to check paired edge between these2
if edge[1] == cycle[index-2] and edge[2]['color'] == 'g':
name_final_lol[cycle[index-3]][1] = "multiloop"
name_final_lol[cycle[index]][1] = "multiloop" #altering names
if cycle[index] not in curr_multiloop:
curr_multiloop.append(cycle[index])
if cycle[index-3] not in curr_multiloop:
curr_multiloop.append(cycle[index-3])
if len(curr_multiloop) > 0:
multiloop_list.append(curr_multiloop)
for dom in curr_multiloop:
unpaired_node_list.remove(dom) #trim unpaired list from known items
#unhinged sequence
for node_index in unpaired_node_list:
#first neighbors
direct_neigh = [n for n in G.neighbors(node_index)] #neighbors of current node
#start from unhinged and go until all neighbors are unpaired
if len(direct_neigh) == 1:
name_final_lol[node_index][1] = "unhinged"
next_neigh = direct_neigh
before = [node_index]
while len(next_neigh) == 1 and next_neigh[0] in unpaired_node_list:
name_final_lol[next_neigh[0]][1] = "unhinged sequence"
before.append(next_neigh[0])
next_neigh = [n for n in G.neighbors(next_neigh[0])]
next_neigh.remove(before[-2])
for step in before:
unpaired_node_list.remove(step) #bug with this line, cuts out 20 below, could implement it a bit diff
#remaining is unknown but probably linear
for remain in unpaired_node_list:
name_final_lol[remain][1] = "ambiguous"
return name_final_lol, multiloop_list, cycle_list_G
def resolve_domains_in_cycles(struct_info_array, paired_dict, skeleton_graph):
"""
This function handles a lot of stuff, separated in inner functions. We take cycles and assume their geometry
with trigonometric rules and fill in the information from the string.
"""
cycle_list_G = nx.cycle_basis(skeleton_graph)
paired_node_list = []
for node in paired_dict:
paired_node_list.append(node)
paired_node_list.append(paired_dict[node])
def give_pair_of_domain(dom_index, paired_dict):
if dom_index in paired_dict:
return paired_dict[dom_index]
elif dom_index in list(paired_dict.values()):
index_of_pair = list(paired_dict.keys())[list(paired_dict.values()).index(dom_index)]
return index_of_pair
else:
raise ValueError('not actually paired')
def initiate_polygon_table(cycle, struct_info_array, paired_dict, paired_node_list):
"""
Create table for said polygon, the angles for the polygon depend only on relative angles from the string
"""
cycle.sort()
polygon_array = np.zeros((int(polygon_sides), 3), dtype='float')
paired_dist = 10 #could be changed
curr_row = 0
for cyc_index in range(len(cycle)):
input_ang = struct_info_array[cycle[cyc_index], 0] #rel. angle
side = struct_info_array[cycle[cyc_index], 1]
if cycle[cyc_index] not in paired_node_list: #at the start of an unpaired there is always a PA
polygon_array[curr_row, 0] = cycle[cyc_index] #remember dom name to write back to
polygon_array[curr_row, 2] = struct_info_array[cycle[cyc_index], 2] #dom length
curr_row += 1
if cycle[cyc_index-1] not in paired_node_list: #unpaired-unpaired
if input_ang == -999:
poly_ang = 0
else:
poly_ang = 180 + input_ang*side
polygon_array[curr_row, 1] = poly_ang
else: #unpaired - paired(prev)
if input_ang == -999:
poly_ang = 0
else:
poly_ang = 90 + input_ang*side
polygon_array[curr_row, 1] = poly_ang
else:
pair_of_curr = give_pair_of_domain(cycle[cyc_index], paired_dict)
if cycle[cyc_index - 1] not in paired_node_list: #paired-unpaired
if input_ang == -999:
poly_ang = 0
else:
poly_ang = 90 + input_ang*side
polygon_array[curr_row, 0] = cycle[cyc_index] #remember dom name to write back to
polygon_array[curr_row, 1] = poly_ang
polygon_array[curr_row, 2] = paired_dist #dom length
curr_row += 1 #update row in polygon_array
elif cycle[cyc_index - 1] in paired_node_list and cycle[cyc_index - 1] != pair_of_curr: #paired-paired
if input_ang == -999:
poly_ang = 0
else:
poly_ang = np.abs(input_ang)
polygon_array[curr_row, 0] = cycle[cyc_index] #remember dom name to write back to
polygon_array[curr_row, 1] = poly_ang
polygon_array[curr_row, 2] = paired_dist #dom length
curr_row += 1 #update row in polygon_array
return polygon_array
def indicate_polygon_case(polygon_array):
"""
Based on arbitrary rules involving properties already in the polygon table, this function determines,
in which class the current loop belongs, hence with which algorithm we want to solve it.
"""
angles_not_given = 0
length_not_given = 0
case1 = False
for row in range(len(polygon_array[:,0])):
if polygon_array[row-1, 2] == 0 and polygon_array[row-1, 1] == 0 and polygon_array[row, 1] == 0:
case1 = True
if polygon_array[row, 2] == 0:
length_not_given += 1
if polygon_array[row, 1] == 0:
angles_not_given += 1
#case1
if case1 == True:
return "case 1"
else:
if length_not_given >= 2: #parallelness needs to be solved in case2calc function
return "case 2"
elif length_not_given == 1:
if angles_not_given >= 2:
return "case 3"
else:
return "case 1" #1l 0-1a
else: #all lengths known - like crossroads!
return "case 4"
def case1calc(polygon_array):
"""
Calculates points of a vector polygon where at least 1 edge length is unknown, we start
drawing from (0, 0) with the next domain after this choosen 'loose edge'. We assume
angles and length for every cell other than this loose end. Calculate the vertices
and calculate the last edge length and two last angles.
"""
def resort_list_to_start_by_spec_value(dom_index, list_with_value):
index = list_with_value.index(dom_index)
sorted_list = []
for i in range(len(list_with_value)):
sorted_list.append(list_with_value[index-len(list_with_value)+i])
return sorted_list
n_poly = len(polygon_array[:,0])
given_ang_list = []
for row in range(n_poly):
if polygon_array[row-1, 2] == 0 and polygon_array[row-1, 1] == 0 and polygon_array[row, 1] == 0:
start = row
if polygon_array[row, 1] != 0:
given_ang_list.append(polygon_array[row, 1])
#distribute remaining angles
poly_angles_sum = (n_poly - 2) * 180
remaining_average = (poly_angles_sum - sum(given_ang_list))/(n_poly - len(given_ang_list))
for row in range(n_poly):
if polygon_array[row, 1] == 0:
polygon_array[row, 1] = remaining_average #set all unknown angles for this mean value
#get length/angle mean ratio
known_len_angle_sum = []
for row in range(n_poly):
if polygon_array[row-1, 2] != 0: #non-null length
len_angle_sum_multip_here = (polygon_array[row-1, 1] + polygon_array[row, 1]) * polygon_array[row-1, 2]
known_len_angle_sum.append(len_angle_sum_multip_here)
len_angle_ratio = np.mean(known_len_angle_sum)
#fill length col by using len_angle_ratio
for row in range(n_poly):
if polygon_array[row-1, 2] == 0: #null length! :)
new_len = len_angle_ratio / (polygon_array[row-1, 1] + polygon_array[row, 1])
polygon_array[row-1, 2] = new_len
#get drawing order by resorting index list
index_list = [n for n in range(n_poly)]
resorted_list = resort_list_to_start_by_spec_value(start, index_list)
resorted_list.pop() #remove last item which is the 'loose end' domain
#the table is filled and we can start calculating coords at domain 'start'
coords = np.array([[0, 0]])
for index_in_list in range(len(resorted_list)):
ang_here = polygon_array[resorted_list[index_in_list], 1]
len_here = polygon_array[resorted_list[index_in_list], 2]
if index_in_list == 0:
curr_coords = np.array([[len_here, 0]])
abs_ang = 0
else:
abs_ang += 180-ang_here #abs angle is the outside angle of the polygon, adding them up
curr_coords = coords[-1] + np.array([[np.cos(deg_to_rad(abs_ang)) * len_here,
np.sin(deg_to_rad(abs_ang)) * len_here]])
coords = np.append(coords, curr_coords, axis=0)
#the last prev_coords we get and [0, 0] adds the last side of the polygon
len_last = np.linalg.norm(coords[-1])
polygon_array[start-1, 2] = len_last
#angle at start-1
vector1 = coords[-2] - coords[-1]
vector2 = coords[0] - coords[-1]
angle = calc_degree(vector1, vector2)
polygon_array[start-1, 1] = angle
#angle at start
vector1 = coords[-1] - coords[0]
vector2 = coords[1] - coords[0]
angle = calc_degree(vector1, vector2)
polygon_array[start, 1] = angle
return polygon_array
def feed_polyarr_to_structarray(struct_info_array, polygon_array, cycle, paired_node_list):
"""Write back calculated relative angles and domain lengths(if appropriate) to SIA"""
for poly_index in range(len(polygon_array[:,0])):
curr_dom_index = int(polygon_array[poly_index, 0])
poly_ang = polygon_array[poly_index, 1]
poly_len = polygon_array[poly_index, 2]
side = struct_info_array[curr_dom_index, 1]
cyc_index = cycle.index(curr_dom_index)
if curr_dom_index not in paired_node_list: #at the start of an unpaired there is always a polygon angle
struct_info_array[curr_dom_index, 2] = poly_len #if unpaired, the length we write over
if cycle[cyc_index-1] not in paired_node_list: #unpaired-unpaired
rel_angle = (poly_ang - 180) / side
struct_info_array[curr_dom_index, 0] = rel_angle
else: #unpaired - paired(prev)
rel_angle = (poly_ang - 90) / side
struct_info_array[curr_dom_index, 0] = rel_angle
else:
if cycle[cyc_index - 1] not in paired_node_list: #paired-unpaired
rel_angle = (poly_ang - 90) / side
struct_info_array[curr_dom_index, 0] = rel_angle
else: #it is always the right paired-paired, as we only put those in the table
rel_angle = poly_ang*side
struct_info_array[curr_dom_index, 0] = rel_angle
return struct_info_array
#MAIN LOOP
for cycle in cycle_list_G:
#treat hairpin and paired into paired separately ~ no polygon there
polygon_sides = 0
for item in cycle:
if item in paired_node_list:
polygon_sides += 0.5
else:
polygon_sides += 1
if polygon_sides == 2: #special cases
if len(cycle) == 3: #hairpin, nothing to do here
pass
elif len(cycle) == 4: #paired into paired, can fill in two 0 angles
cycle.sort() #the domains will be after this always the secomd and fourth in cycle
struct_info_array[cycle[1], 0] = 0
struct_info_array[cycle[3], 0] = 0
elif polygon_sides >= 3: #has a polygon
polygon_array = initiate_polygon_table(cycle, struct_info_array, paired_dict, paired_node_list)
if indicate_polygon_case(polygon_array) == "case 1":
polygon_array = case1calc(polygon_array)
elif indicate_polygon_case(polygon_array) == "case 2":
pass
elif indicate_polygon_case(polygon_array) == "case 3":
pass
elif indicate_polygon_case(polygon_array) == "case 4":
pass
#add calculated info back to SIA
struct_info_array = feed_polyarr_to_structarray(struct_info_array, polygon_array, cycle, paired_node_list)
return struct_info_array
#determine order of traversal (order of domains to build up)
def determine_contstruct_order(skeleton_graph, paired_dict):
cycle_list_G = nx.cycle_basis(skeleton_graph)
paired_node_list = []
for node in paired_dict:
paired_node_list.append(node)
paired_node_list.append(paired_dict[node])
def find_cycle_where_index_belongs(dom_index, cycle_list_G): #now finds largest cycle
found = []
for cycle in cycle_list_G:
if dom_index in cycle and len(cycle) > len(found):
found = cycle
return found
def resort_list_to_start_by_spec_value(dom_index, list_with_value):
index = list_with_value.index(dom_index)
sorted_list = []
for i in range(len(list_with_value)):
sorted_list.append(list_with_value[index-len(list_with_value)+i])
return sorted_list
def give_pair_of_domain(dom_index, paired_dict):
if dom_index in paired_dict:
return paired_dict[dom_index]
elif dom_index in list(paired_dict.values()):
index_of_pair = list(paired_dict.keys())[list(paired_dict.values()).index(dom_index)]
return index_of_pair
else:
raise ValueError('not actually paired')
#
traverse_order = [0]
for node in traverse_order:
current_in_travord = traverse_order.index(node)
counter = 0 #helps with insert index
#pair primary
if node in paired_node_list:
pair_first = give_pair_of_domain(node, paired_dict) #pair first!
if pair_first not in traverse_order:
traverse_order.insert(current_in_travord + 1, pair_first)
counter += 1
#cycle secondary
its_cycle = find_cycle_where_index_belongs(node, cycle_list_G)
if len(its_cycle) != 0: #if in cycle
its_cycle.sort() #sort it first, bugfix
#resort so current is at first place
resorted_cycle = resort_list_to_start_by_spec_value(node, its_cycle)
for item in resorted_cycle:
if item not in traverse_order:
traverse_order.insert(current_in_travord + counter + 1, item)
counter += 1
#other neighbor tertiary
neighbors = skeleton_graph.neighbors(node)
for neighbor in neighbors: #other neighbor third!
if neighbor not in traverse_order:
traverse_order.append(neighbor)
return traverse_order
def stepwise_buildup(struct_info_array2, paired_dict, traverse_order, skeleton_graph):
"""
Taking the more-or-less filled SIA, we go along the traversing/drawing order, assume lengths and angles for
unpaired, out-of-loop domains and loop starters.
"""
paired_dist = 10
dom_count = len(traverse_order)
coordinate_array = np.zeros((dom_count, 4))
#add a new col for absolute angle values
struct_info_array2 = np.append(struct_info_array2, [[0] for i in range(len(struct_info_array2[:,0]))], axis=1)
#average over given lengths to get a default unpaired length
n_given_len = 0
sum_length = 0
for length in struct_info_array2[:,2]:
if length != 0:
n_given_len += 1
sum_length += length
if n_given_len == 0:
default_length = 30
else:
default_length = sum_length/n_given_len
#paired list as before
paired_node_list = []
for node in paired_dict:
paired_node_list.append(node)
paired_node_list.append(paired_dict[node])
def get_red_neigh(domain_index, skeleton_graph):
connections = [n for n in skeleton_graph.edges.data(nbunch=domain_index)]
red_neigh = []
for edge in connections:
if edge[2]['color'] is 'r':
red_neigh.append(edge[1])
return red_neigh
def give_pair_of_domain(dom_index, paired_dict):
if dom_index in paired_dict:
return paired_dict[dom_index]
elif dom_index in list(paired_dict.values()):
index_of_pair = list(paired_dict.keys())[list(paired_dict.values()).index(dom_index)]
return index_of_pair
else:
raise ValueError('not actually paired')
def get_preferred_angle(domain_index, paired_node_list, skeleton_graph, traverse_order): #would be better with name_final
neighbors = get_red_neigh(domain_index, skeleton_graph)
if min(neighbors) == domain_index-1: #connected to prev dom
if traverse_order.index(min(neighbors)) < traverse_order.index(domain_index):
if min(neighbors) in paired_node_list and domain_index in paired_node_list:
pref_angle = 60
elif min(neighbors) in paired_node_list and domain_index not in paired_node_list:
pref_angle = 90
elif min(neighbors) not in paired_node_list and domain_index in paired_node_list:
pref_angle = 90
elif min(neighbors) not in paired_node_list and domain_index not in paired_node_list:
pref_angle = 0
elif traverse_order.index(min(neighbors)) > traverse_order.index(domain_index): #if backwards (SB case)
if max(neighbors) in paired_node_list and domain_index in paired_node_list:
pref_angle = 60
elif max(neighbors) in paired_node_list and domain_index not in paired_node_list:
pref_angle = 45
elif max(neighbors) not in paired_node_list and domain_index in paired_node_list:
pref_angle = 45
elif max(neighbors) not in paired_node_list and domain_index not in paired_node_list:
pref_angle = 180
else: #not connected to prev dom, so only to next dom
if min(neighbors) in paired_node_list and domain_index in paired_node_list:
pref_angle = 60
elif min(neighbors) in paired_node_list and domain_index not in paired_node_list:
pref_angle = 45
elif min(neighbors) not in paired_node_list and domain_index in paired_node_list:
pref_angle = 45
elif min(neighbors) not in paired_node_list and domain_index not in paired_node_list:
pref_angle = 180
return pref_angle
def calc_coords_of_paired2(domain_index, struct_info_array2, coordinate_array):
side = struct_info_array2[domain_index, 1]
x0p, y0p, x1p, y1p = coordinate_array[give_pair_of_domain(domain_index, paired_dict)]
pair1vec = np.array([x1p - x0p, y1p - y0p])
#direction of normal_vec has to be opposite of the default normal_vec, as that points outside and this inside
x0, y0 = np.array([x1p, y1p]) + normal_length1(pair1vec, side) * paired_dist *-1
x1, y1 = np.array([x0, y0]) + pair1vec * -1
return x0, y0, x1, y1
def calculate_other_two_coords(x, y, abs_angle, length):
abs_angle_rad = deg_to_rad(abs_angle)
next_x = x + np.cos(abs_angle_rad) * length
next_y = y + np.sin(abs_angle_rad) * length
return next_x, next_y
def take_opposite_angle(angle):
if angle > 0:
angle -= 180
else:
angle += 180
return angle
#MAIN LOOP
for domain_index in traverse_order:
written_rows = traverse_order[0 : traverse_order.index(domain_index)] #rows before current in TO
if domain_index == 0: #0th dom
if struct_info_array2[0, 0] == -999:
abs_angle = 0
else:
abs_angle = struct_info_array2[0, 0]
struct_info_array2[0, 3] = abs_angle #the absolute angle summation starts here
if struct_info_array2[0, 2] == 0:
length_here = default_length
else:
length_here = struct_info_array2[0, 2]
x1,y1 = calculate_other_two_coords(0, 0, abs_angle, length_here)
coordinate_array[0, 2] = x1
coordinate_array[0, 3] = y1
else: #all other doms
#when drawing the second pair, just offset the first
if domain_index in paired_node_list and give_pair_of_domain(domain_index, paired_dict) in written_rows:
x0, y0, x1, y1 = calc_coords_of_paired2(domain_index, struct_info_array2, coordinate_array)
coordinate_array[domain_index, 0] = x0
coordinate_array[domain_index, 1] = y0
coordinate_array[domain_index, 2] = x1
coordinate_array[domain_index, 3] = y1
abs_of_pair = struct_info_array2[give_pair_of_domain(domain_index, paired_dict), 3]
abs_here = take_opposite_angle(abs_of_pair)
struct_info_array2[domain_index, 3] = abs_here
#others: unpaireds, first pairs
else:
side = struct_info_array2[domain_index, 1]
neighbors = get_red_neigh(domain_index, skeleton_graph)
#rel_angle cases
if struct_info_array2[domain_index, 0] == -999:
prefer = get_preferred_angle(domain_index, paired_node_list, skeleton_graph, traverse_order)
rel_angle = side * prefer
else:
rel_angle = struct_info_array2[domain_index, 0]
#dom length cases
if struct_info_array2[domain_index, 2] == 0:
length_here = default_length
else:
length_here = struct_info_array2[domain_index, 2]
#for doms > 0 and previous dom didn't have strand break, x0, y0 is the same as x1, y1 of prev dom
if len(neighbors) == 1:
if neighbors[0] < domain_index:
coordinate_array[domain_index, 0] = coordinate_array[neighbors[0], 2]
coordinate_array[domain_index, 1] = coordinate_array[neighbors[0], 3]
abs_angle = struct_info_array2[neighbors[0], 3] + rel_angle
struct_info_array2[domain_index, 3] = abs_angle #fill out abs angle field
x0, y0 = coordinate_array[domain_index, 0], coordinate_array[domain_index, 1]
x1, y1 = calculate_other_two_coords(x0, y0, abs_angle, length_here)
coordinate_array[domain_index, 2] = x1
coordinate_array[domain_index, 3] = y1
elif neighbors[0] > domain_index:
coordinate_array[domain_index, 2] = coordinate_array[neighbors[0], 0]
coordinate_array[domain_index, 3] = coordinate_array[neighbors[0], 1]
abs_angle = take_opposite_angle(struct_info_array2[neighbors[0], 3]) - rel_angle
struct_info_array2[domain_index, 3] = abs_angle #fill out abs angle field
x1, y1 = coordinate_array[domain_index, 2], coordinate_array[domain_index, 3] #other way around
x0, y0 = calculate_other_two_coords(x1, y1, abs_angle, length_here)
coordinate_array[domain_index, 0] = x0
coordinate_array[domain_index, 1] = y0
if len(neighbors) == 2:
for neigh in neighbors:
if neigh < domain_index:
if neigh in written_rows:
coordinate_array[domain_index, 0] = coordinate_array[domain_index - 1, 2]
coordinate_array[domain_index, 1] = coordinate_array[domain_index - 1, 3]
if neigh > domain_index:
if neigh in written_rows:
coordinate_array[domain_index, 2] = coordinate_array[domain_index + 1, 0]
coordinate_array[domain_index, 3] = coordinate_array[domain_index + 1, 1]
for neigh in neighbors:
if neigh < domain_index:
if neigh not in written_rows:
abs_angle = take_opposite_angle(struct_info_array2[domain_index + 1, 3]) - rel_angle #take the abs angle of the written neighbor
struct_info_array2[domain_index, 3] = abs_angle #fill out abs angle field
x1, y1 = coordinate_array[domain_index, 2], coordinate_array[domain_index, 3]
x0, y0 = calculate_other_two_coords(x1, y1, abs_angle, length_here)
coordinate_array[domain_index, 0] = x0
coordinate_array[domain_index, 1] = y0
if neigh > domain_index:
if neigh not in written_rows:
abs_angle = struct_info_array2[neighbors[0], 3] + rel_angle
struct_info_array2[domain_index, 3] = abs_angle #fill out abs angle field
x0, y0 = coordinate_array[domain_index, 0], coordinate_array[domain_index, 1]
x1, y1 = calculate_other_two_coords(x0, y0, abs_angle, length_here)
coordinate_array[domain_index, 2] = x1
coordinate_array[domain_index, 3] = y1
return coordinate_array
def create_color_list(name_final_lol, paired_dict, multiloop_list, palette):
"""
color list for each domain where: pairs are same color, multiloops are same color,
neighbor domains never same color
"""
if palette is "IBM":
colors = ["#648FFF", "#785EF0", "#DC267F", "#FE6100", "#FFB000"]
background_cols = ["#BACCFB", "#CAC1F3", "#DC9BBB", "#F1C2A6", "#F9E5B9"] #more 'faded' color for rectangle between pairs
elif palette is "Wong":
colors = ["#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7"]
background_cols = ["#BDBDBD", "#EAC267", "#A5CCE2", "#77A79A", "#E8E4A7", "#6595B1", "#D49E73", "#C7A8BA"]
elif palette is "Magma":
colors = ["#fcfdbf", "#fe9f6d", "#de4968", "#8c2981", "#3b0f70", "#000004"]
background_cols = ["#FFFFE8", "#F7C9B1", "#DCA6B1", "#BBA0B8", "#AA93C5", "#A9A9A9"]
elif palette is "Plasma":
colors = ["#f0f921", "#fdb42f", "#ed7953", "#cc4778", "#9c179e", "#5c01a6", "#0d0887"]
background_cols = ["#EDF194", "#F9DCA8", "#E8AE9B", "#CC91A6", "#AE7FAF", "#9C78B9", "#7E7CC1"]
else:
print("Palette not yet available")
def give_pair_of_domain(dom_index, paired_dict):
if dom_index in paired_dict:
return paired_dict[dom_index]
elif dom_index in list(paired_dict.values()):
index_of_pair = list(paired_dict.keys())[list(paired_dict.values()).index(dom_index)]
return index_of_pair
else:
raise ValueError('not actually paired')
curr_color_i = 0
for dom_index in range(len(name_final_lol)):
color_i = curr_color_i % len(colors)
if len(name_final_lol[dom_index]) == 2:
name, domtype = name_final_lol[dom_index]
if domtype is "paired":
pair_index = give_pair_of_domain(dom_index, paired_dict)
#index2 is line color
name_final_lol[dom_index].append(colors[color_i])
name_final_lol[pair_index].append(colors[color_i])
#index3 is only for paired, the between pair background color
name_final_lol[dom_index].append(background_cols[color_i])
name_final_lol[pair_index].append(background_cols[color_i])
curr_color_i += 1
elif domtype is "multiloop":
for loop in multiloop_list:
if dom_index in loop:
for loop_index in loop:
name_final_lol[loop_index].append(colors[color_i])
else:
name_final_lol[dom_index].append(colors[color_i])
curr_color_i += 1
return name_final_lol
def lin_domain(x0, y0, x1, y1, side, name, color):
#draw line from 5' to 3'
p = draw.Line(x0, y0, x1, y1, stroke = color)
offset_len = 7
text_size = 7
#label has to be on its correct side
vector = np.array([x1 - x0, y1 - y0])
label_offset = normal_length1(vector, side) * offset_len
x_text, y_text = np.array([x0,y0]) + vector/2 + label_offset
t = draw.Text(name, text_size, x_text, y_text, fill = color)
return p, t
def hairpin_loop(x0, y0, x1, y1, side, name, color):
offset_len = 7
text_size = 7
#calculate Arc arguments
r = np.sqrt((x1-x0)**2 + (y1-y0)**2) #rn radius is the distance between paired regions, will change
#vector is pointing from point 0 -> point 1
vector = np.array([x1 - x0, y1 - y0])
vec_len = np.linalg.norm(vector)
unit_normal = normal_length1(vector, side)
#calculate center and label anchor with r and vector
diag = np.sqrt(r**2 - (vec_len/2)**2)
cx, cy = np.array([x0,y0])+ vector/2 + diag*unit_normal
x_text, y_text = np.array([x0,y0])+ vector/2 + (diag+r+offset_len)*unit_normal
vec_center_0 = np.array([x0-cx,y0-cy])
vec_center_1 = np.array([x1-cx,y1-cy])
#correction for 0 angle
if np.sign(vec_center_0[1]) == 0:
which_side0 = 1
else:
which_side0 = np.sign(vec_center_0[1])
if np.sign(vec_center_1[1]) == 0:
which_side1 = 1
else:
which_side1 = np.sign(vec_center_1[1])
#bit unclear about start/end degrees, but works
if side == -1:
startdeg = calc_degree(vec_center_0, np.array([1,0]))*which_side0
enddeg = calc_degree(vec_center_1, np.array([1,0]))*which_side1
elif side == 1:
enddeg = calc_degree(vec_center_0, np.array([1,0]))*which_side0
startdeg = calc_degree(vec_center_1, np.array([1,0]))*which_side1
#draw
p = draw.Arc(cx, cy, r, startdeg, enddeg,
stroke=color, stroke_width=0.9, fill="none") #stroke_width will be also important
#LABEL - if without name, no label is added
t = draw.Text(name, text_size, x_text, y_text, fill=color) #label
return p, t
def bulgeloop(x0, y0, x1, y1, side, name, color):
offset_len = 7
text_size = 7
#calculate Arc arguments
r = (np.sqrt((x1 - x0)**2 + (y1 - y0)**2))/2 #rn radius is the distance between paired regions, will change
#vector is pointing from point 0 -> point 1
vector = np.array([x1 - x0, y1 - y0])
vec_len = np.linalg.norm(vector)
unit_normal = normal_length1(vector, side)
#calculate center and label anchor with r and vector
cx, cy = np.array([x0, y0])+ vector/2
x_text, y_text = np.array([x0, y0])+ vector/2 + (r + offset_len) * unit_normal
vec_center_0 = np.array([x0 - cx, y0 - cy])
vec_center_1 = np.array([x1 - cx, y1 - cy])
#correction for 0 angle
if np.sign(vec_center_0[1]) == 0:
which_side0 = 1
else:
which_side0 = np.sign(vec_center_0[1])
if np.sign(vec_center_1[1]) == 0:
which_side1 = 1
else:
which_side1 = np.sign(vec_center_1[1])
#bit unclear about start/end degrees, but works
if side == -1:
startdeg = calc_degree(vec_center_0, np.array([1,0])) * which_side0
enddeg = calc_degree(vec_center_1, np.array([1,0])) * which_side1
elif side == 1:
enddeg = calc_degree(vec_center_0, np.array([1,0])) * which_side0
startdeg = calc_degree(vec_center_1, np.array([1,0])) * which_side1
#draw
p = draw.Arc(cx, cy, r, startdeg, enddeg,
stroke=color, stroke_width=0.9, fill="none") #stroke_width will be also important
#LABEL - if without name, no label is added
t = draw.Text(name, text_size, x_text, y_text, fill=color) #label
return p, t
def multiloop(d, ml, coordinate_array, struct_info_array2, name_final_lol):
offset_len = 7
text_size = 7
side = struct_info_array2[ml[0], 1]
colors = [name_final_lol[i][2] for i in ml]
names = [name_final_lol[i][0] for i in ml]
#taken from https://stackoverflow.com/a/47198877
def find_center(p_ex1, p_ex2, centroid):
x1, y1 = p_ex1
x2, y2 = p_ex2
x3, y3 = centroid
dx, dy = x2 - x1, y2 - y1
det = dx * dx + dy * dy
a = (dy * (y3 - y1) + dx * (x3 - x1))/det
return x1 + a * dx, y1 + a * dy
#put points into shapely, get centroid
coords = []
for dom in ml:
point0 = (coordinate_array[dom, 0], coordinate_array[dom, 1])
point1 = (coordinate_array[dom, 2], coordinate_array[dom, 3])
coords.append(point0)
coords.append(point1)
centroid = LinearRing(coords).centroid
centroid = np.asarray(centroid)
#get centerpoint for each arc
for dom_i in range(len(ml)):
dom = ml[dom_i]
x0 = coordinate_array[dom, 0]
y0 = coordinate_array[dom, 1]
x1 = coordinate_array[dom, 2]
y1 = coordinate_array[dom, 3]
#vector is pointing from point 0 -> point 1
vector = np.array([x1 - x0, y1 - y0])
unit_normal = normal_length1(vector, side) #this should go to the other side as the hairpin center!
p_ex1 = np.array([x0, y0]) + vector/2
p_ex2 = np.array([x0, y0]) + vector/2 + unit_normal * -1
cx, cy = find_center(p_ex1, p_ex2, centroid)
radius = np.linalg.norm(np.array([x1 - cx, y1 - cy]))
x_text, y_text = np.array([cx, cy]) + unit_normal * (radius + offset_len)
vec_center_0 = np.array([x0 - cx, y0 - cy])
vec_center_1 = np.array([x1 - cx, y1 - cy])
#correction for 0 angle
if np.sign(vec_center_0[1]) == 0:
which_side0 = 1
else:
which_side0 = np.sign(vec_center_0[1])
if np.sign(vec_center_1[1]) == 0:
which_side1 = 1
else:
which_side1 = np.sign(vec_center_1[1])
#bit unclear about start/end degrees, but works
if side == -1:
startdeg = calc_degree(vec_center_0, np.array([1,0])) * which_side0
enddeg = calc_degree(vec_center_1, np.array([1,0])) * which_side1
elif side == 1:
enddeg = calc_degree(vec_center_0, np.array([1,0])) * which_side0
startdeg = calc_degree(vec_center_1, np.array([1,0])) * which_side1
#append to draw object
m = draw.Arc(cx, cy, radius, startdeg, enddeg,
stroke=colors[dom_i], stroke_width=0.9, fill="none") #stroke_width will be also important
#LABEL - if without name, no label is added
t = draw.Text(names[dom_i], text_size, x_text, y_text, fill=colors[dom_i]) #label
d.append(m)
d.append(t)
return d
def draw_image_from_coords(coordinate_array, struct_info_array2, name_final_lol, multiloop_list, paired_dict):
def get_canvas_size(coordinate_array):
x_coords = list(coordinate_array[:,0]) + list(coordinate_array[:,2])
y_coords = list(coordinate_array[:,1]) + list(coordinate_array[:,3])
x_len = (max(x_coords) - min(x_coords)) + 65 #canvas 14 incre. larger than max range
y_len = (max(y_coords) - min(y_coords)) + 65
origo = [min(x_coords) - 30, min(y_coords) - 30] #origin 7 increment lower - which is the text_offset
canvas = [int(x_len), int(y_len)]
return canvas, origo
#draw process
SCALE_METRIC = 3 #?
CANVAS_SIZE, ORIGIN = get_canvas_size(coordinate_array)
#image instancing
d = draw.Drawing(CANVAS_SIZE[0], CANVAS_SIZE[1], origin=ORIGIN, displayInline=False)
d.setPixelScale(SCALE_METRIC)
for row in range(len(coordinate_array[:,0])):
x0 = coordinate_array[row, 0]
y0 = coordinate_array[row, 1]
x1 = coordinate_array[row, 2]
y1 = coordinate_array[row, 3]
side = struct_info_array2[row, 1]
name = name_final_lol[row][0]
color = name_final_lol[row][2]
#endings indicated
if row == 0:
dom1vec = np.array([x0 - x1, y0 - y1])
vec5prime = dom1vec/np.linalg.norm(dom1vec) * 10
t5 = draw.Text("5'", 6, vec5prime[0], vec5prime[1], fill='black')
d.append(t5)
elif row == len(coordinate_array[:,0])-1:
dom_last_vec = np.array([x1 - x0, y1 - y0])
vec3prime = np.array([x1,y1]) + dom_last_vec/np.linalg.norm(dom_last_vec) * 7
t3 = draw.Text("3'", 6, vec3prime[0], vec3prime[1], fill='black')
d.append(t3)
#some dom types have special functions
if name_final_lol[row][1] == "hairpin loop":
p, t = hairpin_loop(x0, y0, x1, y1, side, name, color)
elif name_final_lol[row][1] == "bulgeloop":
p, t = bulgeloop(x0, y0, x1, y1, side, name, color)
elif name_final_lol[row][1] != "multiloop":
p, t = lin_domain(x0, y0, x1, y1, side, name, color)
d.append(p)
d.append(t)
#multiloops done together
for ml in multiloop_list:
d = multiloop(d, ml, coordinate_array, struct_info_array2, name_final_lol)
#rectangle between paired doms
for pair1 in paired_dict:
pair2 = paired_dict[pair1]
color = name_final_lol[pair1][3]
x0, y0, x1, y1 = coordinate_array[pair1]
x0p, y0p, x1p, y1p = coordinate_array[pair2]
r = draw.Lines(x0, y0, x1, y1,
x0p, y0p, x1p, y1p,
close = False, fill = color)
d.append(r)
return d
def domain_visualization(string, length_input_dict={}, palette="IBM", filename=None, orient=1):
"""
Includes all previously defined functions, save intermediate data structures inside as local variables and
renders the drawing.
"""
dom_raw_list, struct_info_array, paired_dict, name_final_lol = comprehend_string(string, orient)
struct_info_array = process_length_input(struct_info_array, length_input_dict)
skeleton_graph = create_skeleton(dom_raw_list, paired_dict, name_final_lol)
name_final_lol, multiloop_list, cycle_list_G = find_type_of_unpaired(skeleton_graph, paired_dict, name_final_lol)
#if no cycles skip cycle resolve:
if len(cycle_list_G) == 0:
struct_info_array2 = struct_info_array
else:
struct_info_array2 = resolve_domains_in_cycles(struct_info_array, paired_dict, skeleton_graph)
traverse_order = determine_contstruct_order(skeleton_graph, paired_dict)
coordinate_array = stepwise_buildup(struct_info_array2, paired_dict, traverse_order, skeleton_graph)
name_final_lol = create_color_list(name_final_lol, paired_dict, multiloop_list, palette)
d = draw_image_from_coords(coordinate_array, struct_info_array2, name_final_lol, multiloop_list, paired_dict)
if filename != None:
d.saveSvg(filename)
return d | StarcoderdataPython |
3313574 | # -*- coding: utf-8 -*-
"""
lantz.drivers.newport.fsm300
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implementation of FSM300 using NI DAQ controller
Author: <NAME>
Date: 9/27/2016
"""
from lantz import Driver
from lantz.driver import Feat, DictFeat, Action
from lantz.drivers.ni.daqmx import AnalogOutputTask, VoltageOutputChannel
from lantz import Q_
import time
import numpy as np
def enforce_units(val, units):
if not isinstance(val, Q_):
val = Q_(val, units)
else:
val = val.to(units)
return val
def enforce_point_units(point, units='um'):
point = enforce_units(point[0], units) , enforce_units(point[1], units)
return point
class FSM300(Driver):
def __init__(self, x_ao_ch, y_ao_ch,
ao_smooth_rate=Q_('10 kHz'), ao_smooth_steps=Q_('1000 1/V'),
limits=((Q_(-10, 'V'), Q_(10, 'V')), (Q_(-10, 'V'), Q_(10, 'V'))),
cal=(Q_(10, 'um/V'), Q_(7.6, 'um/V'))):
self.x_limits_mag = tuple(float(val / Q_('1 V')) for val in limits[0])
self.y_limits_mag = tuple(float(val / Q_('1 V')) for val in limits[1])
self.ao_smooth_rate = ao_smooth_rate
self.ao_smooth_steps = ao_smooth_steps
self.cal = cal
self._position = (Q_('0 um'), Q_('0 um'))
super().__init__()
self.x_ao_ch = x_ao_ch
self.y_ao_ch = y_ao_ch
return
def initialize(self):
self.task = AnalogOutputTask('fsm300')
VoltageOutputChannel(self.x_ao_ch, name='fsm_x', min_max=self.x_limits_mag, units='volts', task=self.task)
VoltageOutputChannel(self.y_ao_ch, name='fsm_y', min_max=self.y_limits_mag, units='volts', task=self.task)
super().initialize()
def finalize(self):
self.task.clear()
super().finalize()
def ao_smooth_func(self, init_point, final_point):
init_x, init_y = init_point
final_x, final_y = final_point
init_x_voltage, final_x_voltage = init_x / self.cal[0], final_x / self.cal[0]
init_y_voltage, final_y_voltage = init_y / self.cal[1], final_y / self.cal[1]
diff_x_voltage = final_x_voltage - init_x_voltage
diff_y_voltage = final_y_voltage - init_y_voltage
diff_voltage = max(abs(diff_x_voltage), abs(diff_y_voltage))
steps = int(np.ceil(diff_voltage * self.ao_smooth_steps))
init = np.array([val.to('V').magnitude for val in [init_x_voltage, init_y_voltage]])
diff = np.array([val.to('V').magnitude for val in [diff_x_voltage, diff_y_voltage]])
versine_steps = (1.0 - np.cos(np.linspace(0.0, np.pi, steps))) / 2.0
step_voltages = np.outer(np.ones(steps), init) + np.outer(versine_steps, diff)
return step_voltages
def ao_linear_func(self, init_point, final_point, steps):
init_x, init_y = init_point
final_x, final_y = final_point
init_x_voltage, final_x_voltage = init_x / self.cal[0], final_x / self.cal[0]
init_y_voltage, final_y_voltage = init_y / self.cal[1], final_y / self.cal[1]
diff_x_voltage = final_x_voltage - init_x_voltage
diff_y_voltage = final_y_voltage - init_y_voltage
diff_voltage = max(abs(diff_x_voltage), abs(diff_y_voltage))
init = np.array([val.to('V').magnitude for val in [init_x_voltage, init_y_voltage]])
diff = np.array([val.to('V').magnitude for val in [diff_x_voltage, diff_y_voltage]])
linear_steps = np.linspace(0.0, 1.0, steps)
step_voltages = np.outer(np.ones(steps), init) + np.outer(linear_steps, diff)
return step_voltages
@Feat()
def abs_position(self):
return self._position
@abs_position.setter
def abs_position(self, point):
point = enforce_point_units(point)
step_voltages = self.ao_smooth_func(self._position, point)
if step_voltages.size:
steps = step_voltages.shape[0]
clock_config = {
'source': 'OnboardClock',
'rate': self.ao_smooth_rate.to('Hz').magnitude,
'sample_mode': 'finite',
'samples_per_channel': steps,
}
self.task.configure_timing_sample_clock(**clock_config)
self.task.configure_trigger_disable_start()
task_config = {
'data': step_voltages,
'auto_start': False,
'timeout': Q_(0,'s'),
'group_by': 'scan',
}
self.task.write(**task_config)
self.task.start()
time.sleep((steps / self.ao_smooth_rate).to('s').magnitude)
self.task.stop()
self._position = point
@Action()
def line_scan(self, init_point, final_point, steps, acq_task, acq_rate=Q_('20 kHz'), pts_per_pos=100):
init_point = enforce_point_units(init_point)
final_point = enforce_point_units(final_point)
timeout = enforce_units(1.5*(pts_per_pos*steps/acq_rate), units='s')
# AO smooth move to initial point
self.abs_position = init_point
step_voltages = self.ao_linear_func(init_point, final_point, steps)
if acq_task.IO_TYPE == 'CI':
chs = list(acq_task.channels.keys())
if not chs:
raise ValueError('acquisition task must have at least one channel')
ch = chs[0]
dev = ch.split('/')[0]
step_voltages = np.repeat(step_voltages, pts_per_pos + 1, axis=0)
clock_config = {
'rate': acq_rate.to('Hz').magnitude,
'sample_mode': 'finite',
'samples_per_channel': len(step_voltages),
}
self.task.configure_timing_sample_clock(**clock_config)
clock_config = {
'source': '/{}/ao/SampleClock'.format(dev),
'rate': acq_rate.to('Hz').magnitude,
'sample_mode': 'finite',
'samples_per_channel': len(step_voltages),
}
acq_task.configure_timing_sample_clock(**clock_config)
task_config = {
'data': step_voltages,
'auto_start': False,
'timeout': Q_('0 s'),
'group_by': 'scan',
}
self.task.write(**task_config)
acq_task.arm_start_trigger_source = 'ao/StartTrigger'.format(dev)
acq_task.arm_start_trigger_type = 'digital_edge'
acq_task.start()
self.task.start()
scanned = acq_task.read(samples_per_channel=len(step_voltages), timeout=timeout)
acq_task.stop()
self.task.stop()
scanned = scanned.reshape((steps, pts_per_pos + 1))
averaged = np.diff(scanned).mean(axis=1)
return averaged*acq_rate.to('Hz').magnitude
elif acq_task.IO_TYPE == 'AI':
step_voltages = np.repeat(step_voltages, pts_per_pos, axis=0)
clock_config = {
'source': 'OnboardClock',
'rate': acq_rate.to('Hz').magnitude,
'sample_mode': 'finite',
'samples_per_channel': len(step_voltages),
}
self.task.configure_timing_sample_clock(**clock_config)
acq_task.configure_timing_sample_clock(**clock_config)
task_config = {
'data': step_voltages,
'auto_start': False,
'timeout': Q_('0 s'),
'group_by': 'scan',
}
self.task.write(**task_config)
self.task.configure_trigger_digital_edge_start('ai/StartTrigger')
self.task.start()
acq_task.start()
scanned = acq_task.read(samples_per_channel=len(step_voltages), timeout=timeout)
acq_task.stop()
self.task.stop()
scanned = scanned.reshape((steps, pts_per_pos))
averaged = scanned.mean(axis=1)
return averaged
else:
pass
| StarcoderdataPython |
11290418 | <filename>CurationEngine/src/createNewEventRule.py
import traceback
import os
import json
import logging
import boto3
import botocore
from boto3.dynamodb.types import TypeDeserializer
logger = logging.getLogger()
class CreateNewEventRuleException(Exception):
pass
# Subclass of boto's TypeDeserializer for DynamoDB to adjust
# for DynamoDB Stream format.
class StreamTypeDeserializer(TypeDeserializer):
def _deserialize_n(self, value):
return float(value)
def _deserialize_b(self, value):
return value # Already in Base64
def lambda_handler(event, context):
'''
lambda_handler Top level lambda handler ensuring all exceptions
are caught and logged.
:param event: AWS Lambda uses this to pass in event data.
:type event: Python type - Dict / list / int / string / float / None
:param context: AWS Lambda uses this to pass in runtime information.
:type context: LambdaContext
:return: The event object passed into the method
:rtype: Python type - Dict / list / int / string / float / None
:raises CreateNewEventRuleException: On any error or exception
'''
try:
return create_new_event_rule(event, context)
except CreateNewEventRuleException:
raise
except Exception as e:
traceback.print_exc()
raise CreateNewEventRuleException(e)
def create_new_event_rule(event, context):
"""
create_new_event_rule Creates a new event rule and event target in
event bridge to be used in the accelerated data pipelines.
:param event: AWS Lambda uses this to pass in event data.
:type event: Python type - Dict / list / int / string / float / None
:param context: AWS Lambda uses this to pass in runtime information.
:type context: LambdaContext
:return: The event object passed into the method
:rtype: Python type - Dict / list / int / string / float / None
"""
ddb_deserializer = StreamTypeDeserializer()
records = event['Records']
start_curation_process_function_arn = os.environ['START_CURATION_PROCESS_FUNCTION_ARN']
for record in records:
ddb = record['dynamodb']
# Get the event type and curation type for the record
event_name = record['eventName'].upper() # INSERT, MODIFY, REMOVE
if (event_name == 'INSERT') or (event_name == 'MODIFY'):
if 'NewImage' not in ddb:
logger.warning(
'Cannot process stream if it does not contain NewImage')
continue
doc_fields = ddb_deserializer.deserialize({'M': ddb['NewImage']})
curation_type = doc_fields['curationType']
print(f'Creating or modifying event for curationType {curation_type}')
put_rule(curation_type, doc_fields['cronExpression'])
put_target(curation_type, start_curation_process_function_arn)
elif event_name == 'REMOVE':
doc_fields = ddb_deserializer.deserialize({'M': ddb['Keys']})
curation_type = doc_fields['curationType']
print(f'Removing event for curationType {curation_type}')
remove_targets(curation_type)
delete_rule(curation_type)
return 'Success'
def put_rule(curation_type, schedule_expression):
client = boto3.client('events')
response = client.put_rule(
Name=f'{curation_type}-scheduled-curation',
ScheduleExpression=schedule_expression,
State='ENABLED',
Description=f'Event rule for curation type {curation_type}'
)
def delete_rule(curation_type):
client = boto3.client('events')
response = client.delete_rule(
Name=f'{curation_type}-scheduled-curation'
)
def put_target(curation_type, function_arn):
client = boto3.client('events')
input = {"curationType": curation_type}
response = client.put_targets(
Rule=f'{curation_type}-scheduled-curation',
Targets=[
{
'Id': f'{curation_type}-event-target',
'Arn': function_arn,
'Input': json.dumps(input)
}
]
)
def remove_targets(curation_type):
client = boto3.client('events')
response = client.remove_targets(
Rule=f'{curation_type}-scheduled-curation',
Ids=[
f'{curation_type}-event-target',
]
) | StarcoderdataPython |
6562089 | print("Hello World")
print("My name is <NAME>.")
print("Testing")
print(" /|")
print(" / |")
print(" / |")
print("/___|")
age = ("14")
print("My name is <NAME> and I'm " + age + "-years-old.")
name = ("<NAME>")
print("I'm " + name + " and I live in California")
location = ("California")
year = ("2005")
print("I'm " + name + " and was born in " + year + ". I live currently live in " + location + ".")
print(name.upper())
print(name.lower())
print(name.isupper())
print(name[0])
print(name[1])
print(name[2])
print(name[3])
print(name[4])
print(name[5])
print(name[6])
print(name[7])
print(name + " was born in 2005.")
print(name)
print(name.index("V"))
print(name.index("i"))
print(name.index("c"))
print(name.index("t"))
print(name.index("o"))
print(name.index("r"))
print(name.index("y"))
print(name.index(" "))
print(name.index("T"))
print(name.index("a"))
print(name.index("m"))
print(name.index("a"))
print(name.index("n"))
print(name.index("g"))
my_num = -5
print(abs(my_num))
print(pow(3, 5))
print(min(3, 5))
print(max(3, 5))
print(round(3.14156))
print(round(3.7))
from math import*
print(max(3.7, pow(3.7, 20)))
print(floor(3.7))
print(ceil(3.7))
print(sqrt(36))
print(sqrt (pow(36, 6)))
friends = ["Kevin", "Karen", "Jim"]
friends[1] = "Mike"
print(friends[1:])
lucky_numbers = [4,8,15,16,23,42]
friends1 = ["Kevin", "Karen", "Jim", "Oscar", "Toby"]
friends1.append("Creed")
friends1.insert(1, "Kelly")
friends1.reverse()
print(friends1)
print(friends1.index("Karen"))
friends2 = friends1.copy()
print(friends2)
coordinates = [(4, 5), (6, 7), (80, 34)]
coordinates[1] = 10
print(coordinates[0])
def say_hi(name, age):
print("Hello " +name+ "you are are " +str(age)+ ".")
print("Top")
say_hi("Mike", 35)
say_hi("Steve", 70)
print("Bottom")
color = input("Enter a color: ")
plural_noun = input("Enter a plural noun: ")
celebrity = input("Enter a celebrity: ")
print("Roses are " +color)
print(plural_noun+ " are blue")
print("I love " +celebrity)
name = input("Enter your name: ")
age = input("Enter your age: ")
print("Hello " + name + "! You're " + age + "!")
num1 = input("Enter a number: ")
num2 = input("Enter another a number: ")
result = int(num1) + int(num2)
print(result)
num1 = input("Enter a number with a decimal: ")
num2 = input("Enter another a number with a decimal: ")
result = float(num1) + float(num2)
print(result)
| StarcoderdataPython |
348517 | <filename>gcal_notifier/main.py
#!/usr/bin/env python
from datetime import datetime
from typing import Any, Dict, Tuple
from gcal_notifier.cli import cli
from gcal_notifier.config_reader import init_config
from gcal_notifier.event_getter import SimpleGCalendarGetter
from gcal_notifier.event_loader import load_saved_events
from gcal_notifier.event_printer import SimpleGCalendarPrinter
from gcal_notifier.event_reminder import SimpleGCalendarNotifier
from gcal_notifier.event_saver import save_events
from gcal_notifier.globals import CACHE
from gcal_notifier.utils import define_period
def run_getter(
general_params: Dict[str, Any],
calendar_params: Dict[str, Any],
) -> None:
"""Run SimpleGCalendarGetter with user configs.
Args:
general_params (Dict[str, Any]): General params
calendar_params (Dict[str, Any]): Calendar params
"""
getter = SimpleGCalendarGetter(general_params, calendar_params)
getter.load_calendars(define_period("day"))
getter.load_events()
save_events(getter.events, file_path=CACHE / "events_notify.json")
getter.load_calendars(define_period("month"))
getter.load_events()
save_events(getter.events, file_path=CACHE / "events_print.json")
def run_notifier(
general_params: Dict[str, Any], calendar_params: Dict[str, Any]
) -> None:
"""Run SimpleGCalendarNotifier with user configs.
Args:
general_params (Dict[str, Any]): General params
calendar_params (Dict[str, Any]): Calendar params
"""
saved_events = load_saved_events(CACHE / "events_notify.json")
notifier = SimpleGCalendarNotifier(
saved_events, general_params, calendar_params
)
notifier.search_reminders()
def run_printer(
general_params: Dict[str, Any],
calendar_params: Dict[str, Any],
period: Tuple[datetime, datetime],
format: str = "day",
) -> None:
"""Run SimpleGCalendarPrinter with user configs.
Args:
general_params (Dict[str, Any]): General params
calendar_params (Dict[str, Any]): Calendar params
format (str): Format to use when printing events
"""
saved_events = load_saved_events(CACHE / "events_print.json")
printer = SimpleGCalendarPrinter(
saved_events, general_params, calendar_params, period, format=format
)
printer.print_events(format)
def gcal_notifier() -> None:
"""Run gcal_notifier cli."""
args = cli()
general_params, calendar_params = init_config()
if args.command == "get":
run_getter(general_params, calendar_params)
elif args.command == "notify":
run_notifier(general_params, calendar_params)
elif args.command == "print":
period = define_period(args.period)
run_printer(general_params, calendar_params, period, args.period)
| StarcoderdataPython |
11324685 | <reponame>danielsnider/ecosystem-project-website-template
import logging
import socket
from skyline.io.connection import Connection, ConnectionState
from skyline.exceptions import NoConnectionError
logger = logging.getLogger(__name__)
class ConnectionManager:
def __init__(self, message_handler, closed_handler):
self._connections = {}
self._message_handler = message_handler
self._closed_handler = closed_handler
def register_connection(self, opened_socket, address):
self._connections[address] = (
Connection(
opened_socket,
address,
self._message_handler,
self._closed_handler,
),
ConnectionState(),
)
self._connections[address][0].start()
def remove_connection(self, address):
connection, state = self.get_connection_tuple(address)
connection.stop()
state.connected = False
del self._connections[address]
logger.debug("Removed connection to (%s:%d).", *address)
def get_connection(self, address):
return self.get_connection_tuple(address)[0]
def get_connection_state(self, address):
return self.get_connection_tuple(address)[1]
def get_connection_tuple(self, address):
if address not in self._connections:
host, port = address
raise NoConnectionError(
"Connection to ({}:{}) does not exist.".format(host, port))
return self._connections[address]
def broadcast(self, string_message):
for _, (connection, _) in self._connections.items():
connection.write_string_message(string_message)
def connect_to(self, host, port):
new_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
new_socket.connect((host, port))
self.register_connection(new_socket, (host, port))
def stop(self):
for _, (connection, state) in self._connections.items():
connection.stop()
state.connected = False
self._connections.clear()
| StarcoderdataPython |
391840 | <reponame>logic-and-learning-lab/Popper-experiments
import json
import inspect
class ExperimentResult:
def __init__(self, problem_name, system_name, trial, solution, total_exec_time, conf_matrix, extra_stats):
self.problem_name = problem_name
self.system_name = system_name
self.trial = trial
self.solution = solution
self.total_exec_time = total_exec_time
self.extra_stats = extra_stats
self.conf_matrix = conf_matrix
def __str__(self):
return f'{self.problem_name}__{self.system_name} :: {self.total_exec_time:.2f}s :: {self.accuracy}'
@property
def accuracy(self):
if self.conf_matrix:
tp, fn, tn, fp = self.conf_matrix
return (tp + tn) / (tp+fn+tn+fp)
return .5
# TODO(Brad): I copied all this from my Popper code and it's still a pretty lazy way to do JSON.
TYPE = '__typ__'
WRITABLE_CLASSES = {ExperimentResult}
NAME_TO_CLASS = {clz.__name__:clz for clz in WRITABLE_CLASSES}
class ResultEncoder(json.JSONEncoder):
def default(self, obj):
if obj.__class__ in WRITABLE_CLASSES:
init_vars = inspect.getfullargspec(obj.__init__)[0]
all_vars = vars(obj)
final_dict = {key:all_vars[key] for key in init_vars if key in all_vars}
final_dict[TYPE] = obj.__class__.__name__
return final_dict
else:
return super().default(obj)
def write_result(file, out):
with open(file, "w") as f:
f.write(json.dumps(out, cls=ResultEncoder))
# To use: stats = json.loads(s, object_hook=decode_stats)
def decode_result(dct):
if TYPE in dct:
clazz = NAME_TO_CLASS[dct[TYPE]]
init_vars = inspect.getfullargspec(clazz.__init__)[0]
final_dict = {key:dct[key] for key in init_vars if key in dct}
return clazz(**final_dict)
return dct | StarcoderdataPython |
170399 | import json
import logging
import os
from unittest import mock
from elastic.cobalt_strike_extractor.extractor import CSBeaconExtractor
logger = logging.getLogger()
def test_transform_beacon(shared_datadir):
with mock.patch.dict(
os.environ,
{
"INPUT_ELASTICSEARCH_ENABLED": "False",
"OUTPUT_ELASTICSEARCH_ENABLED": "False",
"OUTPUT_CONSOLE_ENABLED": "True",
},
):
_extractor = CSBeaconExtractor()
_docs = (shared_datadir / "sample_raw_docs.ndjson").read_text().strip()
_control_docs = (
(shared_datadir / "sample_transformed_docs.ndjson").read_text().strip()
)
_data = []
for item in _docs.split("\n"):
_data.append(json.loads(item))
_control_data = []
for item in _control_docs.split("\n"):
_control_data.append(json.loads(item))
_results = _extractor.transform(_data)
_results = list(_results)
assert len(_results) == len(_control_data)
_zip = zip(_results, _control_data)
for item1, item2 in _zip:
assert item1 == item2
| StarcoderdataPython |
6542315 | <reponame>Mithzyl/Master-college-selecting-api<gh_stars>0
# Generated by Django 3.1.5 on 2021-02-05 12:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Colleges',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('province', models.CharField(max_length=10)),
('is_211', models.BooleanField(default=False)),
('is_985', models.BooleanField(default=False)),
],
options={
'verbose_name': 'College',
'verbose_name_plural': 'Colleges',
},
),
]
| StarcoderdataPython |
4942850 | <reponame>hughpyle/GW-BASIC<filename>conv/z80conv/conv.py<gh_stars>10-100
#!/usr/bin/python
# Copyright (c) 2020 <NAME> <<EMAIL>>
# Licensed under GPLv2.
import sys
import traceback
from .lexer import Lexer
from .parser import Parser
from .transformer import Transformer
from .writer import PasmoWriter
def main():
lexer = Lexer(sys.stdin)
parser = Parser(lexer)
transformer = Transformer(parser)
writer = PasmoWriter(transformer)
try:
for line in writer.lines():
print(line)
except SyntaxError as e:
print("Exception: %s" % e)
traceback.print_exc(file=sys.stdout)
if __name__ == '__main__':
main()
| StarcoderdataPython |
8120412 | from django.db import models
from django.db.models import DO_NOTHING
from django.urls import reverse
from apps.courses.models import Course, LessonScript
from apps.users.models import Student, Teacher
# Archwizacja grupy?
# from django.core.exceptions import ValidationError
class Group(models.Model):
teacher = models.ForeignKey(
Teacher, on_delete=models.SET_NULL, blank=True, null=True
)
students = models.ManyToManyField(Student, blank=True)
course = models.ForeignKey(Course, on_delete=models.SET_NULL, blank=True, null=True)
# school
MONDAY = 1
TUESDAY = 2
WEDNESDAY = 3
THURSDAY = 4
FRIDAY = 5
SATURDAY = 6
WEEK_DAY_CHOICES = (
(MONDAY, "Monday"),
(TUESDAY, "Tuesday"),
(WEDNESDAY, "Wednesday"),
(THURSDAY, "Thursday"),
(FRIDAY, "Friday"),
(SATURDAY, "Saturday"),
)
week_day = models.SmallIntegerField(choices=WEEK_DAY_CHOICES)
start_time = models.TimeField()
end_time = models.TimeField()
class Meta:
ordering = ("week_day",)
def get_absolute_url(self):
return reverse("groups:group-detail", kwargs={"pk": self.id})
def __str__(self):
return f"{self.get_week_day_display()} / {self.course} / {self.start_time} - {self.end_time} "
class Lesson(models.Model):
group = models.ForeignKey(Group, on_delete=models.CASCADE, unique=False)
number = models.SmallIntegerField(default=None)
teacher = models.ForeignKey(
Teacher, on_delete=models.SET_NULL, blank=True, null=True, unique=False
)
lesson_script = models.ForeignKey(
LessonScript, on_delete=models.SET_NULL, blank=True, null=True
)
date = models.DateField(blank=True, null=True)
start_time = models.TimeField(blank=True, null=True)
end_time = models.TimeField(blank=True, null=True)
is_started = models.BooleanField(default=False)
is_ended = models.BooleanField(default=False)
class Meta:
ordering = ("number",)
def __str__(self):
return f"{self.group} / {self.number}"
def get_absolute_url(self):
group_id = self.group.pk
return reverse(
"groups:lesson-detail", kwargs={"pk": self.pk, "group_id": group_id}
)
# dodać walidacje czasu
# def clean(self):
# if self.start_date >= self.end_date:
# raise ValidationError("End date cannot be before start date")
class StudentPresence(models.Model):
student = models.ForeignKey(
Student,
on_delete=DO_NOTHING,
)
lesson = models.ForeignKey(Lesson, on_delete=models.CASCADE)
is_present = models.BooleanField(default=False)
| StarcoderdataPython |
8054278 | n1 = int(input('Digite um número para calcular o fatorial dele:\n'))
c = n1
mult = 1
while c > 0:
print('{}'.format(c), end='')
mult = mult * c
c -= 1
print('x' if c >= 1 else '=', end='')
print(mult)
| StarcoderdataPython |
6406549 | from flask import render_template, request
import os
import nltk
import re
import pandas as pd
import numpy as np
import csv
from sentipt.sentipt import SentimentIntensityAnalyzer
import plotly.express as px
def init_app(app):
@app.route("/process")
def result():
training_set = []
with open('./query.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
training_set.append((row['text']))
os.remove('./query.csv')
s = SentimentIntensityAnalyzer()
def classificate(training_set):
df = pd.DataFrame(columns=["neg", "neu", "pos", "compound"])
for i in range(len(training_set)):
e = s.polarity_scores(training_set[i])
df = df.append(e, ignore_index=True)
return df
data = classificate(training_set)
labels = ["Positivos","Negativos"] # ,"Neutros"]
cmap = pd.Series(["blue","red"])
#neutro = data["neu"].astype(np.float16).sum()
positivo = data["pos"].astype(np.float16).sum()
negativo = data["neg"].astype(np.float16).sum()
values = [positivo, negativo]#, neutro]
fig = px.pie(data, values=values, names=labels, color=cmap, width=800, height=800, hole=.3)
fig.write_html('tcrawler/templates/plot.html', auto_open=False)
return render_template("result.html")
| StarcoderdataPython |
8033752 | <filename>chatette/modifiers/__init__.py
"""
Module `chatette.modifiers`
Contains everything that is related to modifiers
(at the moment only their representation and not
generation behavior).
"""
| StarcoderdataPython |
8124454 | <filename>mundo-1/usando-modulos/ex018.py
from math import sin, cos, tan, radians
ang = int(input('Insira um ângulo aqui: '))
print('Funções trigonométricas resultantes', end=' --> ')
print('Seno: {:.2f}, cosseno: {:.2f} e tangente {:.2f}'.format(sin(radians(ang)), cos(radians(ang)), tan(radians(ang)))) | StarcoderdataPython |
9675052 |
def foo(a, b):
return a - b + 2 * 42
| StarcoderdataPython |
9694986 | #! /usr/bin/env python3
# Copyright (C) 2017 <NAME>. All Rights Reserved.
import unittest
from pyparsing import *
def get_grammar(debug=False):
grammar = Forward()
identifier = Word( alphas )
bools = Word("false").setParseAction(lambda s,l,t: False) \
| Word("true").setParseAction(lambda s,l,t: True)
integer = Word( nums ).setParseAction(lambda s,l,t: int(t[0]))
special = "$eot"
lp = Suppress(Literal("("))
rp = Suppress(Literal(")"))
ListItem = Literal("ListItem") + lp + grammar + rp
ListItem = ListItem.setParseAction(lambda s,l,t: [t[1:len(t)]])
ListEmpty = Literal(".List")
ListEmpty = ListEmpty.setParseAction(lambda s,l,t: [])
List = (ListEmpty | OneOrMore(ListItem))
List = List.setParseAction(lambda s,l,t: [[i for sublist in t for i in sublist]])
SetItem = Literal("SetItem") + lp + grammar + rp
SetItem = SetItem.setParseAction(lambda s,l,t: set(t[1:len(t)]))
SetEmpty = Literal(".Set")
SetEmpty = SetEmpty.setParseAction(lambda s,l,t: set())
Set = (SetEmpty | OneOrMore(SetItem))
Set = Set.setParseAction(lambda s,l,t: {i for Set in t for i in Set})
MapEmpty = Literal(".Map")
MapEmpty = MapEmpty.setParseAction(lambda s,l,t: {})
MapArrow = Literal("|->")
MapKey = integer | identifier
MapItem = MapKey + MapArrow + grammar
MapItem = MapItem.setParseAction(lambda s,l,t: {t[0]:t[2]})
Map = (MapEmpty | OneOrMore(MapItem))
Map = Map.setParseAction(lambda s, l, t: {k: v for d in t for k, v in d.items()})
grammar << (List | Set | Map | integer | bools | special | identifier | lp + grammar + rp )
return grammar
def translate(kstr, grammar=None):
if grammar == None:
grammar = get_grammar()
return grammar.parseString(kstr).asList()[0]
class test(unittest.TestCase):
def test_list(self):
kstr = "ListItem(1)"
self.assertEqual(translate(kstr), [1])
kstr = "ListItem(1) ListItem(2)"
self.assertEqual(translate(kstr), [1, 2])
kstr = "ListItem(ListItem(ListItem ( ListItem ( 42 ) ListItem ( .Map ) ListItem ( .Set ) )))"
self.assertEqual(translate(kstr), [[[[42, {}, set()]]]])
kstr = "ListItem(ListItem ( ListItem ( 42 ) ListItem ( .Map ) ListItem ( .Set ) )))"
self.assertEqual(translate(kstr), [[[42, {}, set()]]])
pass
def test_map(self):
kstr = "x |-> ( 2 |-> 0 6 |-> 42 10 |-> 0 ) y |-> ( 9 |-> 24 3 |-> 0)"
self.assertEqual(translate(kstr),
{'x': {2: 0, 6: 42, 10: 0}, 'y': {9: 24, 3: 0}})
pass
def test_set(self):
kstr = "x |-> ( SetItem ( 2 ) SetItem ( 6 ) SetItem ( 10 ) ) y |-> ( SetItem ( 3 ) SetItem ( 9 ) )"
self.assertEqual(translate(kstr), {'x' : {2, 6, 10}, 'y' : {3, 9}})
pass
def test_eot(self):
kstr = "ListItem ( ListItem ( $eot ) ListItem ( .Map ) ListItem ( .Set ) ) ListItem ( ListItem ( 10 ) ListItem ( .Map ) ListItem ( .Set ) )"
self.assertEqual(translate(kstr), [['$eot', {}, set()], [10, {}, set()]])
pass
def test_bools(self):
self.assertEqual(translate("false"), False)
self.assertEqual(translate("true"), True)
pass
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1966810 | <filename>ccut/tests/dimension_test.py
from ..main.dimension import DimensionVector
def test():
assert DimensionVector().set_dimensions("M1.1").raise_to_power(2).get_abbr() == 'M2.2'
assert DimensionVector().set_dimensions("M1.1 L-1.2").raise_to_power(2).get_abbr() == 'M2.2 L-2.4'
assert DimensionVector().set_dimensions("M").raise_to_power(1).get_abbr() == 'M'
assert DimensionVector().set_dimensions("M L T").raise_to_power(2).get_abbr() == 'M2 L2 T2'
assert str((DimensionVector().set_dimensions("M L2 D2") + DimensionVector().set_dimensions(
"L3 Te-1 D-3")).get_dimension_vector()) == "[M 1.0, L 5.0, T 0, I 0, Te -1.0, N 0, J 0, D -1.0, C 0, B 0, U 0]"
assert str(DimensionVector().set_dimensions(
"M L2 Te-1").get_dimension_vector()) == "[M 1.0, L 2.0, T 0, I 0, Te -1.0, N 0, J 0, D 0, C 0, B 0, U 0]"
assert DimensionVector().set_dimensions("M").get_abbr() == "M"
assert DimensionVector().set_dimensions("L2").get_abbr() == "L2"
assert DimensionVector().set_dimensions("L2 T-2").get_abbr() == "L2 T-2"
assert DimensionVector().set_dimensions("T-2 L2").get_abbr() == "L2 T-2"
assert DimensionVector().set_dimensions("T-2.0 L2.1").get_abbr() == "L2.1 T-2"
assert DimensionVector().set_dimensions("C D J N Te I T L M").get_abbr() == "M L T I Te N J D C"
assert DimensionVector().set_dimensions("T2.0 L2.1").get_abbr() == "L2.1 T2"
assert DimensionVector().set_dimensions("Te3 T3").get_abbr() == "T3 Te3"
| StarcoderdataPython |
1912517 | """Sample script to demonstrate usage of the DataAcquisitionClient."""
def main():
"""Creates a sample client that reads data from a TCP server
(see demo/server.py). Data is written to a rawdata.csv file, as well as a
buffer.db sqlite3 database. These files are written in whichever directory
the script was run.
The client can be stopped with a Keyboard Interrupt (Ctl-C)."""
import time
import sys
from psychopy import clock
# Allow the script to be run from the bci root, acquisition dir, or
# demo dir.
sys.path.append('.')
sys.path.append('..')
sys.path.append('../..')
from bcipy.acquisition.client import DataAcquisitionClient
from bcipy.acquisition.devices import supported_device
from bcipy.acquisition.protocols.dsi.dsi_connector import DsiConnector
# Start the server with the command:
# python bcipy/acquisition/datastream/tcp_server.py --name DSI --port 9000
device_spec = supported_device('DSI')
connection_params = {'host': '127.0.0.1', 'port': 9000}
connector = DsiConnector(connection_params=connection_params,
device_spec=device_spec)
# Use default processor (FileWriter), buffer, and clock.
client = DataAcquisitionClient(connector=connector, clock=clock.Clock())
try:
client.start_acquisition()
print("\nCollecting data for 3s... (Interrupt [Ctl-C] to stop)\n")
while True:
time.sleep(3)
print(f"Number of samples: {client.get_data_len()}")
client.stop_acquisition()
client.cleanup()
break
except IOError as e:
print(f'{e.strerror}; make sure you started the server.')
except KeyboardInterrupt:
print("Keyboard Interrupt")
client.stop_acquisition()
client.cleanup()
if __name__ == '__main__':
main()
| StarcoderdataPython |
6564178 | <gh_stars>0
from django.conf import settings
from django_pgpy.defaults import get_default_restorers
DJANGO_PGPY_DEFAULT_RESTORERS = getattr(settings, 'DJANGO_PGPY_DEFAULT_RESTORERS', get_default_restorers)
DJANGO_PGPY_AES_KEY_LENGTH = 32
DJANGO_PGPY_RSA_KEY_LENGTH = 2048 | StarcoderdataPython |
93679 | <reponame>elwoodxblues/saleor<gh_stars>1000+
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-06 10:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("account", "0008_auto_20161115_1011")]
replaces = [("userprofile", "0009_auto_20170206_0407")]
operations = [
migrations.AlterModelOptions(
name="address",
options={"verbose_name": "address", "verbose_name_plural": "addresses"},
),
migrations.AlterModelOptions(
name="user",
options={"verbose_name": "user", "verbose_name_plural": "users"},
),
migrations.AlterField(
model_name="user",
name="addresses",
field=models.ManyToManyField(
blank=True, to="account.Address", verbose_name="addresses"
),
),
migrations.AlterField(
model_name="user",
name="email",
field=models.EmailField(max_length=254, unique=True, verbose_name="email"),
),
]
| StarcoderdataPython |
9758319 | x = [int(i) for i in input().split()]
if x[0] % x[1] == 0 or x[1] % x[0] == 0:
print("Sao Multiplos")
else:
print("Nao sao Multiplos")
| StarcoderdataPython |
11288887 | <reponame>carlio/pep8
#: E401
import os, sys
#: Okay
import os
import sys
from subprocess import Popen, PIPE
from myclass import MyClass
from foo.bar.yourclass import YourClass
import myclass
import foo.bar.yourclass
#: E402
__all__ = ['abc']
import foo
#: Okay
try:
import foo
except:
pass
else:
print('imported foo')
finally:
print('made attempt to import foo')
import bar
#: E402
VERSION = '1.2.3'
import foo
#: E402
import foo
a = 1
import bar
| StarcoderdataPython |
3491014 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import webob
from nova import context
from nova import flags
from nova import test
from nova.api import ec2
from nova.auth import manager
FLAGS = flags.FLAGS
class FakeControllerClass(object):
pass
class FakeApiRequest(object):
def __init__(self, action):
self.controller = FakeControllerClass()
self.action = action
class AccessTestCase(test.TestCase):
def _env_for(self, ctxt, action):
env = {}
env['ec2.context'] = ctxt
env['ec2.request'] = FakeApiRequest(action)
return env
def setUp(self):
super(AccessTestCase, self).setUp()
um = manager.AuthManager()
self.context = context.get_admin_context()
# Make test users
self.testadmin = um.create_user('testadmin')
self.testpmsys = um.create_user('testpmsys')
self.testnet = um.create_user('testnet')
self.testsys = um.create_user('testsys')
# Assign some rules
um.add_role('testadmin', 'cloudadmin')
um.add_role('testpmsys', 'sysadmin')
um.add_role('testnet', 'netadmin')
um.add_role('testsys', 'sysadmin')
# Make a test project
self.project = um.create_project('testproj',
'testpmsys',
'a test project',
['testpmsys', 'testnet', 'testsys'])
self.project.add_role(self.testnet, 'netadmin')
self.project.add_role(self.testsys, 'sysadmin')
#user is set in each test
def noopWSGIApp(environ, start_response):
start_response('200 OK', [])
return ['']
self.mw = ec2.Authorizer(noopWSGIApp)
self.mw.action_roles = {'FakeControllerClass': {
'_allow_all': ['all'],
'_allow_none': [],
'_allow_project_manager': ['projectmanager'],
'_allow_sys_and_net': ['sysadmin', 'netadmin'],
'_allow_sysadmin': ['sysadmin']}}
def tearDown(self):
um = manager.AuthManager()
# Delete the test project
um.delete_project('testproj')
# Delete the test user
um.delete_user('testadmin')
um.delete_user('testpmsys')
um.delete_user('testnet')
um.delete_user('testsys')
super(AccessTestCase, self).tearDown()
def response_status(self, user, methodName):
ctxt = context.RequestContext(user, self.project)
environ = self._env_for(ctxt, methodName)
req = webob.Request.blank('/', environ)
resp = req.get_response(self.mw)
return resp.status_int
def shouldAllow(self, user, methodName):
self.assertEqual(200, self.response_status(user, methodName))
def shouldDeny(self, user, methodName):
self.assertEqual(401, self.response_status(user, methodName))
def test_001_allow_all(self):
users = [self.testadmin, self.testpmsys, self.testnet, self.testsys]
for user in users:
self.shouldAllow(user, '_allow_all')
def test_002_allow_none(self):
self.shouldAllow(self.testadmin, '_allow_none')
users = [self.testpmsys, self.testnet, self.testsys]
for user in users:
self.shouldDeny(user, '_allow_none')
def test_003_allow_project_manager(self):
for user in [self.testadmin, self.testpmsys]:
self.shouldAllow(user, '_allow_project_manager')
for user in [self.testnet, self.testsys]:
self.shouldDeny(user, '_allow_project_manager')
def test_004_allow_sys_and_net(self):
for user in [self.testadmin, self.testnet, self.testsys]:
self.shouldAllow(user, '_allow_sys_and_net')
# denied because it doesn't have the per project sysadmin
for user in [self.testpmsys]:
self.shouldDeny(user, '_allow_sys_and_net')
if __name__ == "__main__":
# TODO: Implement use_fake as an option
unittest.main()
| StarcoderdataPython |
281550 | <reponame>Nova-Noir/novabot_v2
from nonebot import get_driver
global_config = get_driver().config
# nickname: str = list(global_config.nickname)[-1] # Yet this will return a randomly one.
nickname: str = "伊蕾娜"
| StarcoderdataPython |
1972267 | from datek_jaipur.domain.compound_types.goods import GoodsType
from datek_jaipur.errors import EventValidationError
class GoodsBoughtValidationError(EventValidationError):
pass
class TooMuchCardsInHandError(GoodsBoughtValidationError):
pass
class CardNotOnDeckError(GoodsBoughtValidationError):
def __init__(self, goods_type: GoodsType):
self.goods_type = goods_type
| StarcoderdataPython |
3285979 | <gh_stars>1-10
"""Rest API for Demo."""
from chalice import Chalice
from chalice import Response
from demo_dao import DemoDao
from util.logger_utility import LoggerUtility
APP = Chalice(app_name='ramit-test')
APP.debug = True
@APP.route('/info', methods=['POST'])
def info():
"""Info on user."""
# Set log level
LoggerUtility.set_level()
try:
request = APP.current_request
demo_dao = DemoDao(request.json_body)
response = demo_dao.info()
LoggerUtility.log_info('API Invoke sucessfully!')
return Response(response, status_code=200)
except UserNotFoundException as user_not_found_exception:
body = {'Code': '404 - UserNotFound', 'Message': str(user_not_found_exception)}
return Response(body, status_code=404)
except Exception as exception:
body = {'Code': '500- InternalServerError', 'Message': str(exception)}
return Response(body, status_code=500)
@APP.route('/favorite-shows', methods=['GET'])
def favorite_shows():
"""My Favourite Shows."""
# Set log level
LoggerUtility.set_level()
try:
request = APP.current_request
demo_dao = DemoDao(request.json_body)
response = demo_dao.favorite_shows()
return Response(response, status_code=200)
except Exception as exception:
body = {'Code': '500- InternalServerError', 'Message': str(exception)}
return Response(body, status_code=500)
| StarcoderdataPython |
4928948 | import logging
from multiprocessing import Manager
import pytest
from tx.readable_log import getLogger, format_message
from tx.parallex.objectstore import PlasmaStore, SimpleStore
logger = getLogger(__name__, logging.INFO)
@pytest.fixture
def manager():
with Manager() as manager:
yield manager
@pytest.fixture(params=[lambda manager: PlasmaStore(manager, 100000), SimpleStore])
def object_store(manager, request):
p = request.param(manager)
try:
p.init()
yield p
except:
p.shutdown()
| StarcoderdataPython |
3332082 | from github import Github
import argparse
import os
import requests
import json
import re
from difflib import get_close_matches
from urllib.parse import urlparse
def setup_args():
parser = argparse.ArgumentParser()
parser.add_argument("--token", help="A GitHub token for the repo")
return parser.parse_args()
def fuzzy_review_comment_check(comment_body):
first_word = comment_body.split()[0]
if first_word != "⚠️":
first_word = re.sub('[^A-Za-z0-9]+', '', first_word) #removing special character
match = get_close_matches(first_word, pref_list, cutoff=0.8)
if not match:
return False
else:
return True
def parse_review_comment(data, github):
for comment in data:
if "in_reply_to_id" not in comment:
if not fuzzy_review_comment_check(comment["body"]):
review_comment_edit(comment["id"], github, comment["body"])
def review_comment_edit(id, github, body):
url = "https://api.github.com/repos/{}/pulls/comments/{}".format(
repository_name, id
)
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": "Bearer " + str(github),
}
print(headers)
payload = {
"body": "⚠️ " + str(body)
+ " \n\n⚠️ [PR Comment Etiquette](https://github.com/HomeXLabs/reviewington/blob/main/docs/pr_etiquette.md) not followed on above comment ⚠️"
}
resp = requests.patch(url=url, headers=headers, data=json.dumps(payload))
def main():
global repository_name
global pref_list
global pr
job_name = os.environ["GITHUB_WORKFLOW"]
repository_name = os.environ["GITHUB_REPOSITORY"]
git_ref = os.environ["GITHUB_REF"]
# setup arguments
args = setup_args()
github = args.token
pref_list = [
"Change",
"Question",
"Concern",
"Discussion",
"Praise",
"Suggestion",
"Nitpick",
"Guide",
"⚠️ ",
]
pr = git_ref.split("/")[2]
url = "https://api.github.com/repos/{}/pulls/{}/comments".format(
repository_name, pr
)
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": "Bearer " + str(github),
}
resp = requests.get(url=url, headers=headers)
data = resp.json()
parse_review_comment(data, github)
if __name__ == "__main__":
main()
| StarcoderdataPython |
9658639 | <gh_stars>10-100
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from .field import *
from .char import *
from .float import *
from .integer import *
from .boolean import *
from .text import *
from .selection import *
from .date import *
from .datetime import *
from .time import *
from .binary import *
from .many2one import *
from .one2many import *
from .many2many import *
from .reference import *
from .file import *
from .json import *
from .reference import *
from .decimal import *
| StarcoderdataPython |
5194504 | import numpy as np
from scipy.signal import stft
SOUND_SPEED = 340 # [m/s]
# Steering vectors
def compute_steering_vectors_single_frequency(array_geometry, frequency, theta_grid, phi_grid):
# wave number
k = 2*np.pi*frequency/SOUND_SPEED
n_mics = len(array_geometry[0])
theta_grid = theta_grid * np.pi/180 # [degree] to [radian]
phi_grid = phi_grid * np.pi/180 # [degree] to [radian]
u = np.sin(theta_grid.reshape(-1, 1)).dot(np.cos(phi_grid).reshape(1, -1))
v = np.sin(theta_grid.reshape(-1, 1)).dot(np.sin(phi_grid).reshape(1, -1))
w = np.tile(np.cos(theta_grid.reshape(-1, 1)), (1, phi_grid.shape[0]))
x = u.reshape(u.shape[0], u.shape[1], 1)*array_geometry[0].reshape(1, 1, n_mics)
y = v.reshape(v.shape[0], v.shape[1], 1)*array_geometry[1].reshape(1, 1, n_mics)
z = w.reshape(w.shape[0], w.shape[1], 1)*array_geometry[2].reshape(1, 1, n_mics)
return np.exp( -1j*k*(x + y + z))
def compute_steering_vectors(array_geometry, sampling_frequency, n_fft, theta_grid, phi_grid):
n_thetas = len(theta_grid)
n_phis = len(phi_grid)
n_mics = len(array_geometry[0])
steering_vectors = np.zeros((n_fft, n_thetas, n_phis, n_mics), dtype=np.complex64)
for i_fft in range(n_fft):
frequency = (i_fft / n_fft) * (sampling_frequency/2)
steering_vectors[i_fft] = compute_steering_vectors_single_frequency(array_geometry, frequency, theta_grid, phi_grid)
return steering_vectors
def compute_sinr_2(source_tf_multichannel, interference_tf_multichannel):
source_power = 0
interference_power = 0
n_fft_bins = source_tf_multichannel.shape[0]
for i_f in range(n_fft_bins):
source_power += np.trace(source_stft_multichannel[i_f].dot(source_stft_multichannel[i_f].transpose().conjugate()))
interference_power += np.trace(interference_stft_multichannel[i_f].dot(interference_stft_multichannel[i_f].transpose().conjugate()))
return 10*np.log10(np.abs(source_power/interference_power))
def compute_sinr(source_tf_multichannel, interference_tf_multichannel, weights=None):
n_fft_bins, n_mics, _ = source_tf_multichannel.shape
source_power = 0
interference_power = 0
if weights is not None:
for i_f in range(n_fft_bins):
source_power += weights[i_f].reshape(n_mics, 1).transpose().conjugate().dot(
source_tf_multichannel[i_f].dot(
source_tf_multichannel[i_f].transpose().conjugate())).dot(
weights[i_f].reshape(n_mics, 1))
interference_power += weights[i_f].transpose().conjugate().dot(
interference_tf_multichannel[i_f].dot(
interference_tf_multichannel[i_f].transpose().conjugate())).dot(
weights[i_f])
else:
for i_f in range(n_fft_bins):
source_power += np.trace(source_tf_multichannel[i_f].dot(source_tf_multichannel[i_f].transpose().conjugate()))
interference_power += np.trace(interference_tf_multichannel[i_f].dot(interference_tf_multichannel[i_f].transpose().conjugate()))
return 10*np.log10(np.abs(source_power/interference_power))
def compute_mvdr_tf_beamformers(source_steering_vectors, tf_frames_multichannel, diagonal_loading_param=1):
n_fft_bins, n_mics = source_steering_vectors.shape
mvdr_tf_beamformers = np.zeros((n_fft_bins, n_mics), dtype=np.complex64)
for i_fft_bin in range(n_fft_bins):
n_frames = tf_frames_multichannel.shape[1]
R = 1./n_frames * tf_frames_multichannel[i_fft_bin].dot(tf_frames_multichannel[i_fft_bin].transpose().conjugate()) \
+ diagonal_loading_param*np.identity(n_mics, dtype=np.complex64)
invR = np.linalg.inv(R)
normalization_factor = source_steering_vectors[i_fft_bin, :].transpose().conjugate().dot(invR).dot(source_steering_vectors[i_fft_bin, :])
mvdr_tf_beamformers[i_fft_bin] = invR.dot(source_steering_vectors[i_fft_bin, :]) / (normalization_factor)
return mvdr_tf_beamformers
def compute_mvndr_tf_beamformers(source_steering_vectors, tf_frames_multichannel, regularization_param=1):
# Minimum variance near-distortless response beamformers
# w = argmin w^H*R*w + \lambda * (v_s^H*w - 1)^2
n_fft_bins, n_mics = source_steering_vectors.shape
mvndr_tf_beamformers = np.zeros((n_fft_bins, n_mics), dtype=np.complex64)
for i_fft_bin in range(n_fft_bins):
# R = tf_frames_multichannel[i_fft_bin].dot(tf_frames_multichannel[i_fft_bin].transpose().conjugate()) + np.identity(n_mics)
# invR = np.linalg.inv(R)
# normalization_factor = source_steering_vectors[i_fft_bin, :].transpose().conjugate().dot(invR).dot(source_steering_vectors[i_fft_bin, :])
# regularization_param = 1/normalization_factor
R = tf_frames_multichannel[i_fft_bin].dot(tf_frames_multichannel[i_fft_bin].transpose().conjugate())\
+ np.identity(n_mics)\
+ regularization_param*source_steering_vectors[i_fft_bin, :]*source_steering_vectors[i_fft_bin, :].transpose().conjugate()
invR = np.linalg.inv(R)
mvndr_tf_beamformers[i_fft_bin] = regularization_param*invR.dot(source_steering_vectors[i_fft_bin, :])
return mvndr_tf_beamformers
def compute_lcmv_tf_beamformers(steering_vectors, tf_frames_multichannel, constraint_vector):
n_fft_bins, n_mics, n_steering_vectors = steering_vectors.shape
lcmv_tf_beamformers = np.zeros((n_fft_bins, n_mics), dtype=np.complex64)
for i_fft_bin in range(n_fft_bins):
n_samples = len(tf_frames_multichannel[i_fft_bin])
R = 1./n_samples * (tf_frames_multichannel[i_fft_bin].dot(
tf_frames_multichannel[i_fft_bin].transpose().conjugate()) \
+ np.identity(n_mics) )
invR = np.linalg.inv(R)
normalization_matrix = steering_vectors[i_fft_bin].transpose().conjugate().dot(
invR).dot(steering_vectors[i_fft_bin])
normalization_matrix = (1 - 1e-3)*normalization_matrix \
+ 1e-3*np.trace(normalization_matrix)/n_steering_vectors * 1*np.identity(n_steering_vectors)
inverse_normalization_matrix = np.linalg.inv(normalization_matrix)
lcmv_tf_beamformers[i_fft_bin] = invR.dot(steering_vectors[i_fft_bin]).dot(
inverse_normalization_matrix).dot(constraint_vector)
return lcmv_tf_beamformers
def compute_null_controlling_tf_beamformers(source_steering_vectors, null_steering_vectors, tf_frames_multichannel,
null_constraint_threshold, eigenvalue_percentage_threshold=0.99):
n_fft_bins, n_mics, n_null_steering_vectors = null_steering_vectors.shape
nc_tf_beamformers = np.zeros((n_fft_bins, n_mics), dtype=np.complex64)
for i_fft_bin in range(n_fft_bins):
null_steering_correlation_matrix = null_steering_vectors[i_fft_bin].dot(
null_steering_vectors[i_fft_bin].transpose().conjugate())
eigenvalues, eigenvectors = np.linalg.eigh(null_steering_correlation_matrix)
running_sums = np.cumsum(np.abs(eigenvalues[-1::-1]))
cutoff_index = np.searchsorted(running_sums,
eigenvalue_percentage_threshold * running_sums[-1])
eigenvectors = eigenvectors[:, len(eigenvalues)-cutoff_index-1:]
steering_vectors = np.hstack((source_steering_vectors[i_fft_bin].reshape(-1, 1), eigenvectors))
n_samples = len(tf_frames_multichannel[i_fft_bin])
R = 1./n_samples * (tf_frames_multichannel[i_fft_bin].dot(
tf_frames_multichannel[i_fft_bin].transpose().conjugate()) \
+ np.identity(n_mics) )
invR = np.linalg.inv(R)
normalization_matrix = steering_vectors.transpose().conjugate().dot(
invR).dot(steering_vectors)
""" Regularization for dealing with ill-conditionaed normalization matrix
Ref: <NAME>, <NAME>, "Source reconstruction of broadband EEG/MEG data using
the frequency-adaptive broadband (FAB) beamformer", bioRxiv
Equation (12) in https://www.biorxiv.org/content/biorxiv/early/2018/12/20/502690.full.pdf
"""
normalization_matrix = (1 - 1e-3)*normalization_matrix \
+ 1e-3*np.trace(normalization_matrix)/steering_vectors.shape[1] * 10*np.identity(steering_vectors.shape[1])
inverse_normalization_matrix = np.linalg.inv(normalization_matrix)
constraint_vector = null_constraint_threshold*np.ones(steering_vectors.shape[1])
constraint_vector[0] = 1
nc_tf_beamformers[i_fft_bin] = invR.dot(steering_vectors).dot(
inverse_normalization_matrix).dot(constraint_vector)
return nc_tf_beamformers
def compute_null_controlling_tf_beamformers_2(source_steering_vectors, null_steering_vectors, tf_sample_covariance_batch,
null_constraint_threshold, eigenvalue_percentage_threshold=0.99, diagonal_loading_param=1):
n_fft_bins, n_mics, n_null_steering_vectors = null_steering_vectors.shape
nc_tf_beamformers = np.zeros((n_fft_bins, n_mics), dtype=np.complex64)
for i_fft_bin in range(n_fft_bins):
null_steering_correlation_matrix = null_steering_vectors[i_fft_bin].dot(
null_steering_vectors[i_fft_bin].transpose().conjugate())
eigenvalues, eigenvectors = np.linalg.eigh(null_steering_correlation_matrix)
running_sums = np.cumsum(np.abs(eigenvalues[-1::-1]))
cutoff_index = np.searchsorted(running_sums,
eigenvalue_percentage_threshold * running_sums[-1])
eigenvectors = eigenvectors[:, len(eigenvalues)-cutoff_index-1:]
steering_vectors = np.hstack((source_steering_vectors[i_fft_bin].reshape(-1, 1), eigenvectors))
R = np.sum(tf_sample_covariance_batch[:, i_fft_bin, :, :], axis=0) / len(tf_sample_covariance_batch) + diagonal_loading_param*np.identity(n_mics)
invR = np.linalg.inv(R)
normalization_matrix = steering_vectors.transpose().conjugate().dot(
invR).dot(steering_vectors)
""" Regularization for dealing with ill-conditionaed normalization matrix
Ref: <NAME>, <NAME>, "Source reconstruction of broadband EEG/MEG data using
the frequency-adaptive broadband (FAB) beamformer", bioRxiv
Equation (12) in https://www.biorxiv.org/content/biorxiv/early/2018/12/20/502690.full.pdf
"""
normalization_matrix = (1 - 1e-3)*normalization_matrix \
+ 1e-3*np.trace(normalization_matrix)/steering_vectors.shape[1] * 10*np.identity(steering_vectors.shape[1])
inverse_normalization_matrix = np.linalg.inv(normalization_matrix)
constraint_vector = null_constraint_threshold*np.ones(steering_vectors.shape[1])
constraint_vector[0] = 1
nc_tf_beamformers[i_fft_bin] = invR.dot(steering_vectors).dot(
inverse_normalization_matrix).dot(constraint_vector)
return nc_tf_beamformers
def compute_null_controlling_minibatch_tf_beamformers(source_steering_vectors,
null_steering_vectors, tf_frames_multichannel_batch,
null_constraint_threshold, eigenvalue_percentage_threshold=0.99):
n_fft_bins, n_mics, n_null_steering_vectors = null_steering_vectors.shape
nc_tf_beamformers = np.zeros((n_fft_bins, n_mics), dtype=np.complex64)
for i_fft_bin in range(n_fft_bins):
null_steering_correlation_matrix = null_steering_vectors[i_fft_bin].dot(
null_steering_vectors[i_fft_bin].transpose().conjugate())
eigenvalues, eigenvectors = np.linalg.eigh(null_steering_correlation_matrix)
running_sums = np.cumsum(np.abs(eigenvalues[-1::-1]))
cutoff_index = np.searchsorted(running_sums,
eigenvalue_percentage_threshold * running_sums[-1])
eigenvectors = eigenvectors[:, len(eigenvalues)-cutoff_index-1:]
steering_vectors = np.hstack((source_steering_vectors[i_fft_bin].reshape(-1, 1), eigenvectors))
R = np.zeros((n_mics, n_mics), dtype=np.complex64)
for tf_frames_multichannel in tf_frames_multichannel_batch:
n_samples = len(tf_frames_multichannel[i_fft_bin])
R += 1./n_samples * (tf_frames_multichannel[i_fft_bin].dot(
tf_frames_multichannel[i_fft_bin].transpose().conjugate()))
R = R / len(tf_frames_multichannel_batch)
R += 20*np.identity(n_mics) # To prevent singularity of R
invR = np.linalg.inv(R)
normalization_matrix = steering_vectors.transpose().conjugate().dot(
invR).dot(steering_vectors)
""" Regularization for dealing with ill-conditionaed normalization matrix
Ref: <NAME>, <NAME>, "Source reconstruction of broadband EEG/MEG data using
the frequency-adaptive broadband (FAB) beamformer", bioRxiv
Equation (12) in https://www.biorxiv.org/content/biorxiv/early/2018/12/20/502690.full.pdf
"""
normalization_matrix = (1 - 1e-3)*normalization_matrix \
+ 1e-3*np.trace(normalization_matrix)/steering_vectors.shape[1] * 10*np.identity(steering_vectors.shape[1])
inverse_normalization_matrix = np.linalg.inv(normalization_matrix)
constraint_vector = null_constraint_threshold*np.ones(steering_vectors.shape[1])
constraint_vector[0] = 1
nc_tf_beamformers[i_fft_bin] = invR.dot(steering_vectors).dot(
inverse_normalization_matrix).dot(constraint_vector)
return nc_tf_beamformers
def simulate_multichannel_tf(array_geometry, signal, theta, phi, sampling_frequency, stft_params):
n_mics = len(array_geometry[0])
n_samples_per_frame = stft_params["n_samples_per_frame"]
n_fft_bins = stft_params["n_fft_bins"]
hop_size = stft_params["hop_size"]
stft_window = stft_params["window"]
steering_vector = ( compute_steering_vectors(array_geometry, sampling_frequency, n_fft_bins, theta, phi) )[:, 0, 0, :]
_, _, tf_frames = stft(signal.reshape(-1), fs=sampling_frequency, window=stft_window,
nperseg=n_samples_per_frame, noverlap=n_samples_per_frame-hop_size,
nfft=n_samples_per_frame, padded=True)
tf_frames = tf_frames[:-1, 1:-1]
tf_frames_multichannel = steering_vector.reshape(n_fft_bins, n_mics, 1)\
* tf_frames.reshape(tf_frames.shape[0], 1, tf_frames.shape[1])
return tf_frames_multichannel
def simulate_multichannel_tf_circular(array_geometry, signal, azimuth, sampling_frequency, stft_params):
n_mics = len(array_geometry[0])
n_samples_per_frame = stft_params["n_samples_per_frame"]
n_fft_bins = stft_params["n_fft_bins"]
hop_size = stft_params["hop_size"]
stft_window = stft_params["window"]
steering_vector = ( compute_steering_vectors_circular(array_geometry, sampling_frequency, stft_params, azimuth) )[:, 0, :]
_, _, tf_frames = stft(signal.reshape(-1), fs=sampling_frequency, window=stft_window,
nperseg=n_samples_per_frame, noverlap=n_samples_per_frame-hop_size,
nfft=n_samples_per_frame, padded=True)
tf_frames = tf_frames[:-1, 1:-1]
tf_frames_multichannel = steering_vector.reshape(n_fft_bins, n_mics, 1)\
* tf_frames.reshape(tf_frames.shape[0], 1, tf_frames.shape[1])
return tf_frames_multichannel
def check_distortless_constraint(weight, source_steering_vector, tolerance=1e-9):
assert(np.abs(weight.transpose().conjugate().dot(source_steering_vector)) - 1 < tolerance)
def compute_steering_vectors_circular(array_geometry, sampling_frequency, stft_params, azimuth_grid):
n_mics = len(array_geometry[0])
n_azimuths = len(azimuth_grid)
delay = np.zeros((n_azimuths, n_mics), dtype=np.float32)
n_samples_per_frame = stft_params["n_samples_per_frame"]
n_fft_bins = stft_params["n_fft_bins"]
for m in range(n_mics):
pos_x = array_geometry[0][m]
pos_y = array_geometry[1][m]
radius = np.sqrt(pos_x*pos_x + pos_y*pos_y)
mic_azimuth = np.arctan2(pos_y, pos_x)
for k in range(n_azimuths):
azimuth = 2 * np.pi * azimuth_grid[k]/360
delay[k][m] = - radius * np.cos(mic_azimuth - azimuth) * sampling_frequency / SOUND_SPEED
steering_vectors = np.zeros((n_fft_bins, n_azimuths, n_mics), dtype=np.complex64)
for i_fft_bin in range(n_fft_bins):
v = 2 * np.pi * (i_fft_bin / n_samples_per_frame) * delay;
steering_vectors[i_fft_bin] = np.cos(v) - np.sin(v) * 1j
return steering_vectors
def compute_minimum_variance_tf_beamformers(source_steering_vectors, tf_frames_multichannel=None, diagonal_loading_param=1):
n_fft_bins, n_mics = source_steering_vectors.shape
mvdr_tf_beamformers = np.zeros((n_fft_bins, n_mics), dtype=np.complex64)
for i_fft_bin in range(n_fft_bins):
R = diagonal_loading_param*np.identity(n_mics, dtype=np.complex64)
if tf_frames_multichannel is not None:
n_frames = tf_frames_multichannel.shape[1]
R += 1./n_frames * tf_frames_multichannel[i_fft_bin].dot(tf_frames_multichannel[i_fft_bin].transpose().conjugate())
invR = np.linalg.inv(R)
normalization_factor = source_steering_vectors[i_fft_bin].transpose().conjugate().dot(
invR).dot(source_steering_vectors[i_fft_bin])
mvdr_tf_beamformers[i_fft_bin] = invR.dot(source_steering_vectors[i_fft_bin]) / normalization_factor
return mvdr_tf_beamformers | StarcoderdataPython |
5189612 | <reponame>kloper/pato
# -*- python -*-
"""@file
@brief I2C-serial transport for pato
Copyright (c) 2014-2015 <NAME> <<EMAIL>>.
All rights reserved.
@page License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation
are those of the authors and should not be interpreted as representing
official policies, either expressed or implied, of the Pato Project.
"""
from __future__ import absolute_import
import serial
import time
from util.protocol import ProtocolException
from bridge import Bridge as BridgeProtocol
from bridge.protocol import Cmd
class Bridge(object):
"""
@brief Communication transport using Pato's UART-TWI bridge
A simple transport that allows python code running on PC to talk
with Pato via UART, while Pato is compiled with TWI (I2C) interface
only.
This requires python pyserial package to be installed.
The main purpose of this transport is automatic E2D testing of various
features.
"""
def __init__(self, slave_addr, timeout=0.3, serial_timeout=10,
*args, **kwargs):
"""
@brief Constructor
@param[in] slave_addr TWI address of target Pato device
@param[in] timeout timeout value in time.clock() units for
the query sequence to be completed.
@param[in] serial_timeout timeout value for pyserial communication.
@param[in] args extra arguments for pyserial
@param[in] kwargs extra keyword arguments for pyserial
"""
self.serial = serial.Serial(timeout=serial_timeout, *args, **kwargs)
self.slave_addr = slave_addr
self.bridge = BridgeProtocol(self.serial)
self.timeout = timeout
def query(self, request):
"""
@brief Generic query (request/reply) method for UART-TWI bridge.
Schedule TWI Write of request packet supplied as a parameter. The write
is performed without stop condition. Then, TWI Read for a
single reply packet is scheduled.
If send and/or receive return unexpected result, the function will
retry both send and receive pair until timeout specified in constructor
is reached. If during the timeout period send and receive didn't
succeeded, @ref ProtocolException is thrown.
@param[in] request regular list of bytes representing packet to be sent
via the bridge.
@returns Received reply packet
@throws ProtocolException upon send or receive timeout
"""
now = time.monotonic()
elapsed = now
while elapsed - now < self.timeout:
(send_status, send_remaining) = \
self.bridge.execute(Cmd.TWI_MASTER_SEND,
self.slave_addr,
request,
0)
(recv_status, recv_remaining, reply) = \
self.bridge.execute(
Cmd.TWI_MASTER_RECV,
self.slave_addr,
5, 1, 1)
elapsed = time.monotonic()
if send_remaining + recv_remaining != 0:
print("send_remaining: {} status {:02x}".\
format(send_remaining, send_status))
print("recv_remaining: {} status {:02x}".\
format(recv_remaining, recv_status))
continue
return reply
raise ProtocolException("Failed to query")
def close(self):
"""
@brief Close serial line to bridge
"""
self.serial.close()
| StarcoderdataPython |
11245112 | <filename>chapter2/2_1_2.py
# Calculating the classification result using sample dataset
import kNN
if __name__ == "__main__":
group, labels = kNN.createDataSet()
print(group)
print(labels)
result = kNN.classify0([0, 0], group, labels, 3)
print(result)
| StarcoderdataPython |
3264786 | <reponame>kiteco/intellij-plugin
from definition import myFunction
<caret>myFunction()
| StarcoderdataPython |
8063079 | #!/bin/python3
# Our first python module
from gpiozero import Robot
import time
robby = Robot(left=(7,8), right=(9,10))
robby.backward()
time.sleep(20)
robby.stop() | StarcoderdataPython |
109704 | # A simple simulator for SHA+XRAM
from mmio import mmiodev, NOP, RD, WR
import sha as SHAFunc
def as_chars(s, n):
b = []
for i in xrange(n):
byte = s & 0xff
s >>= 8
b.append(byte)
return [chr(i) for i in b]
def to_num(s, n):
num = 0
for i in xrange(n):
num |= (ord(s[i]) << (i * 8))
return num
class SHA(mmiodev):
SHA_IDLE = 0
SHA_RD = 1
SHA_OP1 = 2
SHA_OP2 = 3
SHA_WR = 4
def __init__(self):
mmiodev.__init__(self)
self.addReg('sha_start', 0xfe00, 1, readonly=True)
self.addReg('sha_state', 0xfe01, 1, readonly=True)
self.addReg('sha_rdaddr', 0xfe02, 2)
self.addReg('sha_wraddr', 0xfe04, 2)
self.addReg('sha_len', 0xfe06, 2)
self.bytes_read = 0
self.rd_data = [0] * 64
self.hs_data = [0] * 20
self.sha = SHAFunc.new()
# Create easy access properties.
sha_state = property(lambda s: s.getRegI('sha_state'), lambda s, v: s.setRegI('sha_state', v))
sha_rdaddr = property(lambda s: s.getRegI('sha_rdaddr'), lambda s, v: s.setRegI('sha_rdaddr', v))
sha_wraddr = property(lambda s: s.getRegI('sha_wraddr'), lambda s, v: s.setRegI('sha_wraddr', v))
sha_len = property(lambda s: s.getRegI('sha_len'), lambda s, v: s.setRegI('sha_len', v))
def simulate(self, s_in):
cmd = s_in['cmd']
cmdaddr = s_in['cmdaddr']
cmddata = s_in['cmddata']
self.sha_state = s_in['sha_state']
self.sha_rdaddr = s_in['sha_rdaddr']
self.sha_wraddr = s_in['sha_wraddr']
self.sha_len = s_in['sha_len']
self.bytes_read = s_in['sha_bytes_read']
self.rd_data = s_in['sha_rd_data']
self.hs_data = s_in['sha_hs_data']
self.xram = s_in['XRAM']
# default dataout.
dataout = 0;
# execute command.
started = False
if cmd == RD:
found, data = self.read(cmdaddr)
if found:
dataout = data
s_out = self.s_dict()
s_out['dataout'] = dataout
return s_out
elif cmd == WR and self.sha_state == self.SHA_IDLE:
if cmdaddr == 0xfe00:
if cmddata == 1:
self.sha_state = self.SHA_RD
self.bytes_read = 0
started = True
else:
self.write(cmdaddr, cmddata)
# do the operations.
if not started and self.sha_state == self.SHA_RD:
self.rd_data = 0
for i in xrange(64):
addr = (self.sha_rdaddr + self.bytes_read + 63 - i) & 0xffff
byte = self.xram[addr]
self.rd_data |= byte << (i*8)
self.bytes_read = self.bytes_read + 64
self.sha_state = self.SHA_OP1
elif not started and self.sha_state == self.SHA_OP1:
self.sha_state = self.SHA_OP2
elif not started and self.sha_state == self.SHA_OP2:
if self.bytes_read < self.sha_len: # Need more blk
self.sha_state = self.SHA_RD
else:
self.sha_state = self.SHA_WR
bytes_in = bytes(''.join(as_chars(self.rd_data, 64)))
self.sha.update(bytes_in)
res = self.sha.digest()
self.hs_data = to_num(res, 20)
elif not started and self.sha_state == self.SHA_WR:
for i in xrange(20):
addr = (self.sha_wraddr + 19 - i) & 0xffff
byte = (self.hs_data >> (i*8)) & 0xff
self.xram[addr] = byte
self.sha_state = self.SHA_IDLE
s_out = self.s_dict()
s_out['dataout'] = dataout
return s_out
def s_dict(self):
return {
'sha_state' : self.sha_state,
'sha_rdaddr' : self.sha_rdaddr,
'sha_wraddr' : self.sha_wraddr,
'sha_len' : self.sha_len,
'sha_bytes_read': self.bytes_read,
'sha_rd_data' : self.rd_data,
'sha_hs_data' : self.hs_data,
'XRAM' : self.xram
}
def testSHA():
sha = SHA()
sha.sha_state = 0
assert sha.sha_state == 0
s_in = sha.s_dict()
s_in['cmd'] = WR
s_in['cmdaddr'] = 0xfe06
s_in['cmddata'] = 128
s_out = sha.simulate(s_in)
assert s_out['sha_len'] == 128
s_in = sha.s_dict()
s_in['cmd'] = RD
s_in['cmdaddr'] = 0xfe06
s_in['cmddata'] = 3
s_out = sha.simulate(s_in)
assert s_out['dataout'] == 128
if __name__ == '__main__':
testSHA()
| StarcoderdataPython |
4979343 | # Copyright 2021 Seek Thermal Inc.
#
# Original author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class SeekCameraVersion:
"""Version of the seekcamera-python library.
The version number scheme is [MAJOR].[MINOR].[PATCH].
Attributes
----------
MAJOR: int
Major version number of the library.
MINOR: int
Minor version number of the library.
PATCH: int
Patch version number of the library.
"""
MAJOR = 1
MINOR = 1
PATCH = 1
def __str__(self):
return "{}.{}.{}".format(self.MAJOR, self.MINOR, self.PATCH)
def __repr__(self):
return "SeekCameraVersion({}, {}, {})".format(
self.MAJOR, self.MINOR, self.PATCH
)
| StarcoderdataPython |
205661 | <gh_stars>1-10
import cv2
import depthai
import numpy as np
_conf_threshold = 0.5
def get_cv_rotated_rect(bbox, angle):
x0, y0, x1, y1 = bbox
width = abs(x0 - x1)
height = abs(y0 - y1)
x = x0 + width * 0.5
y = y0 + height * 0.5
return ((x, y), (width, height), np.rad2deg(angle))
def rotated_Rectangle(bbox, angle):
X0, Y0, X1, Y1 = bbox
width = abs(X0 - X1)
height = abs(Y0 - Y1)
x = int(X0 + width * 0.5)
y = int(Y0 + height * 0.5)
pt1_1 = (int(x + width / 2), int(y + height / 2))
pt2_1 = (int(x + width / 2), int(y - height / 2))
pt3_1 = (int(x - width / 2), int(y - height / 2))
pt4_1 = (int(x - width / 2), int(y + height / 2))
t = np.array([[np.cos(angle), -np.sin(angle), x - x * np.cos(angle) + y * np.sin(angle)],
[np.sin(angle), np.cos(angle), y - x * np.sin(angle) - y * np.cos(angle)],
[0, 0, 1]])
tmp_pt1_1 = np.array([[pt1_1[0]], [pt1_1[1]], [1]])
tmp_pt1_2 = np.dot(t, tmp_pt1_1)
pt1_2 = (int(tmp_pt1_2[0][0]), int(tmp_pt1_2[1][0]))
tmp_pt2_1 = np.array([[pt2_1[0]], [pt2_1[1]], [1]])
tmp_pt2_2 = np.dot(t, tmp_pt2_1)
pt2_2 = (int(tmp_pt2_2[0][0]), int(tmp_pt2_2[1][0]))
tmp_pt3_1 = np.array([[pt3_1[0]], [pt3_1[1]], [1]])
tmp_pt3_2 = np.dot(t, tmp_pt3_1)
pt3_2 = (int(tmp_pt3_2[0][0]), int(tmp_pt3_2[1][0]))
tmp_pt4_1 = np.array([[pt4_1[0]], [pt4_1[1]], [1]])
tmp_pt4_2 = np.dot(t, tmp_pt4_1)
pt4_2 = (int(tmp_pt4_2[0][0]), int(tmp_pt4_2[1][0]))
points = np.array([pt1_2, pt2_2, pt3_2, pt4_2])
return points
def non_max_suppression(boxes, probs=None, angles=None, overlapThresh=0.3):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return [], []
# if the bounding boxes are integers, convert them to floats -- this
# is important since we'll be doing a bunch of divisions
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
# compute the area of the bounding boxes and grab the indexes to sort
# (in the case that no probabilities are provided, simply sort on the bottom-left y-coordinate)
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = y2
# if probabilities are provided, sort on them instead
if probs is not None:
idxs = probs
# sort the indexes
idxs = np.argsort(idxs)
# keep looping while some indexes still remain in the indexes list
while len(idxs) > 0:
# grab the last index in the indexes list and add the index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of the bounding box and the smallest (x, y) coordinates for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have overlap greater than the provided overlap threshold
idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0])))
# return only the bounding boxes that were picked
return boxes[pick].astype("int"), angles[pick]
def decode_predictions(scores, geometry1, geometry2):
# grab the number of rows and columns from the scores volume, then
# initialize our set of bounding box rectangles and corresponding
# confidence scores
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []
angles = []
# loop over the number of rows
for y in range(0, numRows):
# extract the scores (probabilities), followed by the
# geometrical data used to derive potential bounding box
# coordinates that surround text
scoresData = scores[0, 0, y]
xData0 = geometry1[0, 0, y]
xData1 = geometry1[0, 1, y]
xData2 = geometry1[0, 2, y]
xData3 = geometry1[0, 3, y]
anglesData = geometry2[0, 0, y]
# loop over the number of columns
for x in range(0, numCols):
# if our score does not have sufficient probability,
# ignore it
if scoresData[x] < _conf_threshold:
continue
# compute the offset factor as our resulting feature
# maps will be 4x smaller than the input image
(offsetX, offsetY) = (x * 4.0, y * 4.0)
# extract the rotation angle for the prediction and
# then compute the sin and cosine
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
# use the geometry volume to derive the width and height
# of the bounding box
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
# compute both the starting and ending (x, y)-coordinates
# for the text prediction bounding box
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
startX = int(endX - w)
startY = int(endY - h)
# add the bounding box coordinates and probability score
# to our respective lists
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
angles.append(angle)
# return a tuple of the bounding boxes and associated confidences
return (rects, confidences, angles)
def decode_east(nnet_packet, **kwargs):
scores = nnet_packet.get_tensor(0)
geometry1 = nnet_packet.get_tensor(1)
geometry2 = nnet_packet.get_tensor(2)
bboxes, confs, angles = decode_predictions(scores, geometry1, geometry2
)
boxes, angles = non_max_suppression(np.array(bboxes), probs=confs, angles=np.array(angles))
boxesangles = (boxes, angles)
return boxesangles
def show_east(boxesangles, frame, **kwargs):
bboxes = boxesangles[0]
angles = boxesangles[1]
for ((X0, Y0, X1, Y1), angle) in zip(bboxes, angles):
width = abs(X0 - X1)
height = abs(Y0 - Y1)
cX = int(X0 + width * 0.5)
cY = int(Y0 + height * 0.5)
rotRect = ((cX, cY), ((X1 - X0), (Y1 - Y0)), angle * (-1))
points = rotated_Rectangle(frame, rotRect, color=(255, 0, 0), thickness=1)
cv2.polylines(frame, [points], isClosed=True, color=(255, 0, 0), thickness=1, lineType=cv2.LINE_8)
return frame
def order_points(pts):
rect = np.zeros((4, 2), dtype="float32")
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def four_point_transform(image, pts):
rect = order_points(pts)
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32")
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
return warped
| StarcoderdataPython |
3213723 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename = __init__
# author=KGerring
# date = 4/15/21
# project poetryproj
# docs root
"""
poetryproj
"""
from __future__ import annotations
import os
import sys
__all__ = []
__all__ = sorted(
[
getattr(v, "__name__", k)
for k, v in list(globals().items()) # export
if (
(callable(v) and getattr(v, "__module__", "") == __name__ or k.isupper()) # callables from this module
and not str(getattr(v, "__name__", k)).startswith("__") # or CONSTANTS
)
]
) # neither marked internal
if __name__ == "__main__":
print(__file__)
| StarcoderdataPython |
5023143 | #!/usr/bin/env python
"""
Unit test suite for the betr regression test manager
"""
from __future__ import print_function
import logging
import os
import sys
import unittest
if sys.version_info[0] == 2: # pragma: no coverage
from ConfigParser import SafeConfigParser as config_parser
else:
from configparser import ConfigParser as config_parser
from rtest_betr import main
class BeTR_suite(unittest.TestCase):
"""
"""
_LOG_FILENAME = 'dummy.testlog'
def setUp(self):
"""
"""
logging.basicConfig(filename=self._LOG_FILENAME,
filemode='w',
level=logging.INFO,
format='%(message)s')
logging.info('mtest {0} unit test log.'.format(__name__))
def tearDown(self):
"""
"""
logging.shutdown()
if os.path.isfile(self._LOG_FILENAME):
os.remove(self._LOG_FILENAME) # pragma: no coverage
# ------------------------------------------------------
def test_example_default(self):
"""A working example test
default value.
"""
expected = True
received = True
self.assertEqual(expected, received)
if __name__ == '__main__':
# unittest.main(buffer=True)
unittest.main() # pragma: no coverage
| StarcoderdataPython |
6598196 | <filename>devilry/apps/core/devilry_core_mommy_factories.py<gh_stars>0
from django.conf import settings
from model_mommy import mommy
def examiner(group=None, shortname=None, fullname=None, automatic_anonymous_id=None):
"""
Creates an Examiner using ``mommy.make('core.Examiner', ...)``.
Args:
group: The AssignmentGroup to add the examiner to (optional).
shortname: The ``shortname`` of the user (optional).
fullname: The ``fullname`` of the user (optional).
automatic_anonymous_id: The ``automatic_anonymous_id`` of the RelatedExaminer (optional).
Returns:
Examiner: The created examiner.
"""
user_kwargs = {}
if shortname:
user_kwargs['shortname'] = shortname
if fullname:
user_kwargs['fullname'] = fullname
relatedexaminer_kwargs = {}
if automatic_anonymous_id:
relatedexaminer_kwargs['automatic_anonymous_id'] = automatic_anonymous_id
return mommy.make(
'core.Examiner',
assignmentgroup=group,
relatedexaminer=mommy.make('core.RelatedExaminer',
user=mommy.make(settings.AUTH_USER_MODEL,
**user_kwargs),
**relatedexaminer_kwargs)
)
def candidate(group=None, shortname=None, fullname=None,
automatic_anonymous_id=None, relatedstudents_candidate_id=None,
candidates_candidate_id=None):
"""
Creates a Candidate using ``mommy.make('core.Candidate', ...)``.
Args:
group: The AssignmentGroup to add the candidate to (optional).
shortname: The ``shortname`` of the user (optional).
fullname: The ``fullname`` of the user (optional).
automatic_anonymous_id: The ``automatic_anonymous_id`` of the RelatedStudent (optional).
relatedstudents_candidate_id: The ``candidate_id`` of the RelatedStudent (optional).
candidates_candidate_id: The ``candidate_id`` of the Candidate (optional).
Returns:
Candidate: The created candidate.
"""
user_kwargs = {}
if shortname:
user_kwargs['shortname'] = shortname
if fullname:
user_kwargs['fullname'] = fullname
relatedstudent_kwargs = {}
if automatic_anonymous_id:
relatedstudent_kwargs['automatic_anonymous_id'] = automatic_anonymous_id
if relatedstudents_candidate_id:
relatedstudent_kwargs['candidate_id'] = relatedstudents_candidate_id
candidate_kwargs = {}
if candidates_candidate_id:
candidate_kwargs['candidate_id'] = candidates_candidate_id
return mommy.make(
'core.Candidate',
assignment_group=group,
relatedstudent=mommy.make('core.RelatedStudent',
user=mommy.make(settings.AUTH_USER_MODEL,
**user_kwargs),
**relatedstudent_kwargs)
)
| StarcoderdataPython |
246874 | def convert2meter(s, input_unit="in"):
'''
Function to convert inches, feet and cubic feet to meters and cubic meters
'''
if input_unit == "in":
return s*0.0254
elif input_unit == "ft":
return s*0.3048
elif input_unit == "cft":
return s*0.0283168
else:
print("Error: Input unit is unknown.")
## Apply function
measurement_unit = {"Girth":"in",
"Height":"ft",
"Volume":"cft"}
trees = trees_raw.copy()
for feature in ["Girth", "Height", "Volume"]:
trees[feature] = trees_raw[feature].apply(lambda x: convert2meter(s=x,
input_unit=measurement_unit.get(feature)))
| StarcoderdataPython |
9717025 | # This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, <NAME> and <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import numpy as np
from numpy.testing import assert_, run_module_suite
from qutip import ssesolve, destroy, coherent, mesolve, parallel_map
def test_ssesolve_photocurrent():
"Stochastic: ssesolve: photo-current"
tol = 0.01
N = 4
gamma = 0.25
ntraj = 25
nsubsteps = 100
a = destroy(N)
H = a.dag() * a
psi0 = coherent(N, 0.5)
sc_ops = [np.sqrt(gamma) * a]
e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())]
times = np.linspace(0, 2.5, 50)
res_ref = mesolve(H, psi0, times, sc_ops, e_ops)
res = ssesolve(H, psi0, times, sc_ops, e_ops,
ntraj=ntraj, nsubsteps=nsubsteps,
method='photocurrent', store_measurement=True,
map_func=parallel_map)
assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol
for idx in range(len(e_ops))]))
assert_(len(res.measurement) == ntraj)
assert_(all([m.shape == (len(times), len(sc_ops))
for m in res.measurement]))
def test_ssesolve_homodyne():
"Stochastic: ssesolve: homodyne"
tol = 0.01
N = 4
gamma = 0.25
ntraj = 25
nsubsteps = 100
a = destroy(N)
H = a.dag() * a
psi0 = coherent(N, 0.5)
sc_ops = [np.sqrt(gamma) * a]
e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())]
times = np.linspace(0, 2.5, 50)
res_ref = mesolve(H, psi0, times, sc_ops, e_ops)
res = ssesolve(H, psi0, times, sc_ops, e_ops,
ntraj=ntraj, nsubsteps=nsubsteps,
method='homodyne', store_measurement=True,
map_func=parallel_map)
assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol
for idx in range(len(e_ops))]))
assert_(len(res.measurement) == ntraj)
assert_(all([m.shape == (len(times), len(sc_ops))
for m in res.measurement]))
def test_ssesolve_heterodyne():
"Stochastic: ssesolve: heterodyne"
tol = 0.01
N = 4
gamma = 0.25
ntraj = 25
nsubsteps = 100
a = destroy(N)
H = a.dag() * a
psi0 = coherent(N, 0.5)
sc_ops = [np.sqrt(gamma) * a]
e_ops = [a.dag() * a, a + a.dag(), (-1j)*(a - a.dag())]
times = np.linspace(0, 2.5, 50)
res_ref = mesolve(H, psi0, times, sc_ops, e_ops)
res = ssesolve(H, psi0, times, sc_ops, e_ops,
ntraj=ntraj, nsubsteps=nsubsteps,
method='heterodyne', store_measurement=True,
map_func=parallel_map)
assert_(all([np.mean(abs(res.expect[idx] - res_ref.expect[idx])) < tol
for idx in range(len(e_ops))]))
assert_(len(res.measurement) == ntraj)
assert_(all([m.shape == (len(times), len(sc_ops), 2)
for m in res.measurement]))
if __name__ == "__main__":
run_module_suite()
| StarcoderdataPython |
11348996 | ##
# File: testGitUtils.py
# Author: jdw
# Date: 18-Jul-2021
#
# Updates:
#
##
"""
Test utilities
"""
__docformat__ = "google en"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "Apache 2.0"
import os.path
import time
import random
import string
import unittest
import logging
#
from rcsb.utils.io import __version__
from rcsb.utils.io.FileUtil import FileUtil
from rcsb.utils.io.GitUtil import GitUtil
HERE = os.path.abspath(os.path.dirname(__file__))
TOPDIR = os.path.dirname(os.path.dirname(os.path.dirname(HERE)))
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s]-%(module)s.%(funcName)s: %(message)s")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class GitUtilTests(unittest.TestCase):
def setUp(self):
self.__workPath = os.path.join(HERE, "test-output")
self.__startTime = time.time()
logger.debug("Running tests on version %s", __version__)
logger.debug("Starting %s at %s", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()))
def tearDown(self):
endTime = time.time()
logger.debug("Completed %s at %s (%.4f seconds)", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()), endTime - self.__startTime)
def testGitOps(self):
"""Test case - git clone"""
try:
branch = "test-branch"
repositoryPath = "rcsb/py-rcsb_exdb_assets_stash_test.git"
localRepositoryPath = os.path.join(self.__workPath, "test-stash")
fU = FileUtil()
fU.remove(localRepositoryPath)
gitToken = None
gU = GitUtil(gitToken)
ok = gU.clone(repositoryPath, localRepositoryPath, branch=branch)
self.assertTrue(ok)
logger.info("status %r", gU.status(localRepositoryPath))
#
gU.pull(localRepositoryPath, branch=branch)
self.assertTrue(ok)
logger.info("status %r", gU.status(localRepositoryPath))
#
testPath = os.path.join(localRepositoryPath, "stash", "TESTFILE.txt")
self.__makeFile(testPath, 10)
#
ok = gU.addAll(localRepositoryPath, branch=branch)
self.assertTrue(ok)
logger.info("status %r", gU.status(localRepositoryPath, branch=branch))
#
ok = gU.commit(localRepositoryPath, branch=branch)
self.assertTrue(ok)
logger.info("status %r", gU.status(localRepositoryPath, branch=branch))
#
if gitToken:
gU.push(localRepositoryPath, branch=branch)
self.assertTrue(ok)
logger.info("status %r", gU.status(localRepositoryPath, branch=branch))
#
gU.pull(localRepositoryPath, branch=branch)
self.assertTrue(ok)
logger.info("status %r", gU.status(localRepositoryPath, branch=branch))
#
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
def __makeFile(self, fp, count):
sLen = 50
with open(fp, "w") as ofh:
for ii in range(count):
tS = "".join(random.choices(string.ascii_uppercase + string.digits, k=sLen))
ofh.write("%d %s\n" % (ii, tS))
def suiteGitTests():
suiteSelect = unittest.TestSuite()
suiteSelect.addTest(GitUtilTests("testGitOps"))
return suiteSelect
if __name__ == "__main__":
mySuite = suiteGitTests()
unittest.TextTestRunner(verbosity=2).run(mySuite)
#
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.