text
stringlengths 2
999k
|
|---|
"""redwing URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth.views import login, logout
from auth.views import register
from auth.forms import LoginForm
urlpatterns = [
url(r'^', include('article.urls')),
url(r'^', include('category.urls')),
url(r'^', include('feedback.urls')),
url(r'^admin/', admin.site.urls),
url(r'^accounts/login/$', login,
{'authentication_form': LoginForm,
'redirect_authenticated_user': True},
name='login'),
url(r'^accounts/logout/$', logout,
{'next_page': '/'},
name='logout'),
url(r'^accounts/register/$', register,
name='register'),
]
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=no-self-use,too-many-arguments,too-many-lines
from __future__ import print_function
import getpass
import json
import os
import re
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from six.moves.urllib.request import urlopen # noqa, pylint: disable=import-error,unused-import
from azure.mgmt.compute.models import (DataDisk,
VirtualMachineScaleSet,
VirtualMachineCaptureParameters,
VirtualMachineScaleSetExtension,
VirtualMachineScaleSetExtensionProfile)
from azure.mgmt.compute.models.compute_management_client_enums import DiskCreateOptionTypes
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.commands.arm import parse_resource_id, resource_id
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_data_service_client
from azure.cli.core._util import CLIError
import azure.cli.core.azlogging as azlogging
from ._vm_utils import read_content_if_is_file, load_json
from ._vm_diagnostics_templates import get_default_diag_config
from ._actions import (load_images_from_aliases_doc,
load_extension_images_thru_services,
load_images_thru_services)
from ._client_factory import _compute_client_factory
logger = azlogging.get_az_logger(__name__)
def get_vm(resource_group_name, vm_name, expand=None):
'''Retrieves a VM'''
client = _compute_client_factory()
return client.virtual_machines.get(resource_group_name,
vm_name,
expand=expand)
def set_vm(instance, lro_operation=None):
'''Update the given Virtual Machine instance'''
instance.resources = None # Issue: https://github.com/Azure/autorest/issues/934
client = _compute_client_factory()
parsed_id = _parse_rg_name(instance.id)
poller = client.virtual_machines.create_or_update(
resource_group_name=parsed_id[0],
vm_name=parsed_id[1],
parameters=instance)
if lro_operation:
return lro_operation(poller)
else:
return LongRunningOperation()(poller)
def _parse_rg_name(strid):
'''From an ID, extract the contained (resource group, name) tuple
'''
parts = parse_resource_id(strid)
return (parts['resource_group'], parts['name'])
# Use the same name by portal, so people can update from both cli and portal
# (VM doesn't allow multiple handlers for the same extension)
_ACCESS_EXT_HANDLER_NAME = 'enablevmaccess'
_LINUX_ACCESS_EXT = 'VMAccessForLinux'
_WINDOWS_ACCESS_EXT = 'VMAccessAgent'
_LINUX_DIAG_EXT = 'LinuxDiagnostic'
_WINDOWS_DIAG_EXT = 'IaaSDiagnostics'
extension_mappings = {
_LINUX_ACCESS_EXT: {
'version': '1.4',
'publisher': 'Microsoft.OSTCExtensions'
},
_WINDOWS_ACCESS_EXT: {
'version': '2.0',
'publisher': 'Microsoft.Compute'
},
_LINUX_DIAG_EXT: {
'version': '2.3',
'publisher': 'Microsoft.OSTCExtensions'
},
_WINDOWS_DIAG_EXT: {
'version': '1.5',
'publisher': 'Microsoft.Azure.Diagnostics'
}
}
def _get_access_extension_upgrade_info(extensions, name):
version = extension_mappings[name]['version']
publisher = extension_mappings[name]['publisher']
auto_upgrade = None
if extensions:
extension = next((e for e in extensions if e.name == name), None)
# pylint: disable=no-name-in-module,import-error
from distutils.version import LooseVersion
if extension and LooseVersion(extension.type_handler_version) < LooseVersion(version):
auto_upgrade = True
elif extension and LooseVersion(extension.type_handler_version) > LooseVersion(version):
version = extension.type_handler_version
return publisher, version, auto_upgrade
def _get_storage_management_client():
from azure.mgmt.storage import StorageManagementClient
return get_mgmt_service_client(StorageManagementClient)
def _trim_away_build_number(version):
# workaround a known issue: the version must only contain "major.minor", even though
# "extension image list" gives more detail
return '.'.join(version.split('.')[0:2])
# Hide extension information from output as the info is not correct and unhelpful; also
# commands using it mean to hide the extension concept from users.
class ExtensionUpdateLongRunningOperation(LongRunningOperation): # pylint: disable=too-few-public-methods
def __call__(self, poller):
super(ExtensionUpdateLongRunningOperation, self).__call__(poller)
# That said, we surppress the output. Operation failures will still
# be caught through the base class
return None
def list_vm(resource_group_name=None, show_details=False):
''' List Virtual Machines. '''
ccf = _compute_client_factory()
vm_list = ccf.virtual_machines.list(resource_group_name=resource_group_name) \
if resource_group_name else ccf.virtual_machines.list_all()
if show_details:
return [get_vm_details(_parse_rg_name(v.id)[0], v.name) for v in vm_list]
else:
return list(vm_list)
def show_vm(resource_group_name, vm_name, show_details=False):
if show_details:
return get_vm_details(resource_group_name, vm_name)
else:
return get_vm(resource_group_name, vm_name)
def get_vm_details(resource_group_name, vm_name):
from azure.mgmt.network import NetworkManagementClient
result = get_instance_view(resource_group_name, vm_name)
network_client = get_mgmt_service_client(NetworkManagementClient)
public_ips = []
fqdns = []
private_ips = []
mac_addresses = []
# pylint: disable=line-too-long,no-member
for nic_ref in result.network_profile.network_interfaces:
nic = network_client.network_interfaces.get(resource_group_name, nic_ref.id.split('/')[-1])
mac_addresses.append(nic.mac_address)
for ip_configuration in nic.ip_configurations:
private_ips.append(ip_configuration.private_ip_address)
if ip_configuration.public_ip_address:
public_ip_info = network_client.public_ip_addresses.get(resource_group_name,
ip_configuration.public_ip_address.id.split('/')[-1])
if public_ip_info.ip_address:
public_ips.append(public_ip_info.ip_address)
if public_ip_info.dns_settings:
fqdns.append(public_ip_info.dns_settings.fqdn)
setattr(result, 'power_state', ','.join([s.display_status for s in result.instance_view.statuses if s.code.startswith('PowerState/')]))
setattr(result, 'public_ips', ','.join(public_ips))
setattr(result, 'fqdns', ','.join(fqdns))
setattr(result, 'private_ips', ','.join(private_ips))
setattr(result, 'mac_addresses', ','.join(mac_addresses))
del result.instance_view # we don't need other instance_view info as people won't care
return result
def list_vm_images(image_location=None, publisher_name=None, offer=None, sku=None,
all=False): # pylint: disable=redefined-builtin
'''vm image list
:param str image_location:Image location
:param str publisher_name:Image publisher name
:param str offer:Image offer name
:param str sku:Image sku name
:param bool all:Retrieve image list from live Azure service rather using an offline image list
'''
load_thru_services = all
if load_thru_services:
all_images = load_images_thru_services(publisher_name, offer, sku, image_location)
else:
logger.warning(
'You are viewing an offline list of images, use --all to retrieve an up-to-date list')
all_images = load_images_from_aliases_doc(publisher_name, offer, sku)
for i in all_images:
i['urn'] = ':'.join([i['publisher'], i['offer'], i['sku'], i['version']])
return all_images
def list_vm_extension_images(
image_location=None, publisher_name=None, name=None, version=None, latest=False):
'''vm extension image list
:param str image_location:Image location
:param str publisher_name:Image publisher name
:param str name:Image name
:param str version:Image version
:param bool latest: Show the latest version only.
'''
return load_extension_images_thru_services(
publisher_name, name, version, image_location, latest)
def list_ip_addresses(resource_group_name=None, vm_name=None):
''' Get IP addresses from one or more Virtual Machines
:param str resource_group_name:Name of resource group.
:param str vm_name:Name of virtual machine.
'''
from azure.mgmt.network import NetworkManagementClient
# We start by getting NICs as they are the smack in the middle of all data that we
# want to collect for a VM (as long as we don't need any info on the VM than what
# is available in the Id, we don't need to make any calls to the compute RP)
#
# Since there is no guarantee that a NIC is in the same resource group as a given
# Virtual Machine, we can't constrain the lookup to only a single group...
network_client = get_mgmt_service_client(NetworkManagementClient)
nics = network_client.network_interfaces.list_all()
public_ip_addresses = network_client.public_ip_addresses.list_all()
ip_address_lookup = {pip.id: pip for pip in list(public_ip_addresses)}
result = []
for nic in [n for n in list(nics) if n.virtual_machine]:
nic_resource_group, nic_vm_name = _parse_rg_name(nic.virtual_machine.id)
# If provided, make sure that resource group name and vm name match the NIC we are
# looking at before adding it to the result...
same_resource_group_name = resource_group_name is None or \
resource_group_name.lower() == nic_resource_group.lower()
same_vm_name = vm_name is None or \
vm_name.lower() == nic_vm_name.lower()
if same_resource_group_name and same_vm_name:
network_info = {
'privateIpAddresses': [],
'publicIpAddresses': []
}
for ip_configuration in nic.ip_configurations:
network_info['privateIpAddresses'].append(ip_configuration.private_ip_address)
if ip_configuration.public_ip_address:
public_ip_address = ip_address_lookup[ip_configuration.public_ip_address.id]
network_info['publicIpAddresses'].append({
'id': public_ip_address.id,
'name': public_ip_address.name,
'ipAddress': public_ip_address.ip_address,
'ipAllocationMethod': public_ip_address.public_ip_allocation_method
})
result.append({
'virtualMachine': {
'resourceGroup': nic_resource_group,
'name': nic_vm_name,
'network': network_info
}
})
return result
def attach_new_disk(resource_group_name, vm_name, vhd, lun=None,
disk_name=None, disk_size=1023, caching=None):
''' Attach a new disk to an existing Virtual Machine'''
return _attach_disk(resource_group_name, vm_name, vhd, DiskCreateOptionTypes.empty,
lun, disk_name, caching, disk_size)
def attach_existing_disk(resource_group_name, vm_name, vhd, lun=None, disk_name=None, caching=None):
''' Attach an existing disk to an existing Virtual Machine '''
return _attach_disk(resource_group_name, vm_name, vhd, DiskCreateOptionTypes.attach,
lun, disk_name, caching)
def _attach_disk(resource_group_name, vm_name, vhd, create_option, lun=None,
disk_name=None, caching=None, disk_size=None):
vm = get_vm(resource_group_name, vm_name)
if disk_name is None:
file_name = vhd.uri.split('/')[-1]
disk_name = os.path.splitext(file_name)[0]
# pylint: disable=no-member
if lun is None:
lun = _get_disk_lun(vm.storage_profile.data_disks)
disk = DataDisk(lun=lun, vhd=vhd, name=disk_name,
create_option=create_option,
caching=caching, disk_size_gb=disk_size)
if vm.storage_profile.data_disks is None:
vm.storage_profile.data_disks = []
vm.storage_profile.data_disks.append(disk) # pylint: disable=no-member
return set_vm(vm)
def detach_disk(resource_group_name, vm_name, disk_name):
''' Detach a disk from a Virtual Machine '''
vm = get_vm(resource_group_name, vm_name)
# Issue: https://github.com/Azure/autorest/issues/934
vm.resources = None
try:
disk = next(d for d in vm.storage_profile.data_disks if
d.name.lower() == disk_name.lower()) # pylint: disable=no-member
vm.storage_profile.data_disks.remove(disk) # pylint: disable=no-member
except (StopIteration, AttributeError):
raise CLIError("No disk with the name '{}' found".format(disk_name))
return set_vm(vm)
def _get_disk_lun(data_disks):
# start from 0, search for unused int for lun
if data_disks:
existing_luns = sorted([d.lun for d in data_disks])
for i in range(len(existing_luns)): # pylint: disable=consider-using-enumerate
if existing_luns[i] != i:
return i
return len(existing_luns)
else:
return 0
def resize_vm(resource_group_name, vm_name, size):
'''Update vm size
:param str size: sizes such as Standard_A4, Standard_F4s, etc
'''
vm = get_vm(resource_group_name, vm_name)
vm.hardware_profile.vm_size = size # pylint: disable=no-member
return set_vm(vm)
def get_instance_view(resource_group_name, vm_name):
return get_vm(resource_group_name, vm_name, 'instanceView')
def list_disks(resource_group_name, vm_name):
''' List disks for a Virtual Machine '''
vm = get_vm(resource_group_name, vm_name)
return vm.storage_profile.data_disks # pylint: disable=no-member
def capture_vm(resource_group_name, vm_name, vhd_name_prefix,
storage_container='vhds', overwrite=True):
'''Captures the VM by copying virtual hard disks of the VM and outputs a
template that can be used to create similar VMs.
:param str vhd_name_prefix: the VHD name prefix specify for the VM disks
:param str storage_container: the storage account container name to save the disks
:param str overwrite: overwrite the existing disk file
'''
client = _compute_client_factory()
parameter = VirtualMachineCaptureParameters(vhd_name_prefix, storage_container, overwrite)
poller = client.virtual_machines.capture(resource_group_name, vm_name, parameter)
result = LongRunningOperation()(poller)
print(json.dumps(result.output, indent=2)) # pylint: disable=no-member
def reset_windows_admin(
resource_group_name, vm_name, username, password):
'''Update the password.
You can only change the password. Adding a new user is not supported.
'''
vm = get_vm(resource_group_name, vm_name, 'instanceView')
client = _compute_client_factory()
from azure.mgmt.compute.models import VirtualMachineExtension
extension_name = _WINDOWS_ACCESS_EXT
publisher, version, auto_upgrade = _get_access_extension_upgrade_info(
vm.resources, extension_name)
ext = VirtualMachineExtension(vm.location, # pylint: disable=no-member
publisher=publisher,
virtual_machine_extension_type=extension_name,
protected_settings={'Password': password},
type_handler_version=version,
settings={'UserName': username},
auto_upgrade_minor_version=auto_upgrade)
poller = client.virtual_machine_extensions.create_or_update(resource_group_name, vm_name,
_ACCESS_EXT_HANDLER_NAME, ext)
return ExtensionUpdateLongRunningOperation('resetting admin', 'done')(poller)
def set_linux_user(
resource_group_name, vm_name, username, password=None, ssh_key_value=None):
'''create or update a user credential
:param username: user name
:param password: user password.
:param ssh_key_value: SSH key file value or key file path
'''
protected_settings = {}
protected_settings['username'] = username
if password:
protected_settings['password'] = password
elif not ssh_key_value and not password: # default to ssh
ssh_key_value = os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub')
if ssh_key_value:
protected_settings['ssh_key'] = read_content_if_is_file(ssh_key_value)
poller = _update_linux_access_extension(resource_group_name, vm_name,
protected_settings)
return ExtensionUpdateLongRunningOperation('setting user', 'done')(poller)
def delete_linux_user(
resource_group_name, vm_name, username):
'''Remove the user '''
poller = _update_linux_access_extension(resource_group_name, vm_name,
{'remove_user': username})
return ExtensionUpdateLongRunningOperation('deleting user', 'done')(poller)
def reset_linux_ssh(resource_group_name, vm_name):
'''Reset the SSH configuration'''
poller = _update_linux_access_extension(resource_group_name, vm_name,
{'reset_ssh': True})
return ExtensionUpdateLongRunningOperation('resetting SSH', 'done')(poller)
def _update_linux_access_extension(resource_group_name, vm_name, protected_settings):
vm = get_vm(resource_group_name, vm_name, 'instanceView')
client = _compute_client_factory()
from azure.mgmt.compute.models import VirtualMachineExtension
extension_name = _LINUX_ACCESS_EXT
publisher, version, auto_upgrade = _get_access_extension_upgrade_info(
vm.resources, extension_name)
ext = VirtualMachineExtension(vm.location, # pylint: disable=no-member
publisher=publisher,
virtual_machine_extension_type=extension_name,
protected_settings=protected_settings,
type_handler_version=version,
settings={},
auto_upgrade_minor_version=auto_upgrade)
poller = client.virtual_machine_extensions.create_or_update(resource_group_name, vm_name,
_ACCESS_EXT_HANDLER_NAME, ext)
return poller
def disable_boot_diagnostics(resource_group_name, vm_name):
vm = get_vm(resource_group_name, vm_name)
diag_profile = vm.diagnostics_profile
if not (diag_profile and
diag_profile.boot_diagnostics and
diag_profile.boot_diagnostics.enabled):
return
# Issue: https://github.com/Azure/autorest/issues/934
vm.resources = None
diag_profile.boot_diagnostics.enabled = False
diag_profile.boot_diagnostics.storage_uri = None
set_vm(vm, ExtensionUpdateLongRunningOperation('disabling boot diagnostics', 'done'))
def enable_boot_diagnostics(resource_group_name, vm_name, storage):
'''Enable boot diagnostics
:param storage:a storage account name or a uri like
https://your_stoage_account_name.blob.core.windows.net/
'''
vm = get_vm(resource_group_name, vm_name)
if urlparse(storage).scheme:
storage_uri = storage
else:
storage_mgmt_client = _get_storage_management_client()
storage_accounts = storage_mgmt_client.storage_accounts.list()
storage_account = next((a for a in list(storage_accounts)
if a.name.lower() == storage.lower()), None)
if storage_account is None:
raise CLIError('{} does\'t exist.'.format(storage))
storage_uri = storage_account.primary_endpoints.blob
if (vm.diagnostics_profile and
vm.diagnostics_profile.boot_diagnostics and
vm.diagnostics_profile.boot_diagnostics.enabled and
vm.diagnostics_profile.boot_diagnostics.storage_uri and
vm.diagnostics_profile.boot_diagnostics.storage_uri.lower() == storage_uri.lower()):
return
from azure.mgmt.compute.models import DiagnosticsProfile, BootDiagnostics
boot_diag = BootDiagnostics(True, storage_uri)
if vm.diagnostics_profile is None:
vm.diagnostics_profile = DiagnosticsProfile(boot_diag)
else:
vm.diagnostics_profile.boot_diagnostics = boot_diag
# Issue: https://github.com/Azure/autorest/issues/934
vm.resources = None
set_vm(vm, ExtensionUpdateLongRunningOperation('enabling boot diagnostics', 'done'))
def get_boot_log(resource_group_name, vm_name):
import sys
from azure.cli.core._profile import CLOUD
from azure.storage.blob import BlockBlobService
client = _compute_client_factory()
virtual_machine = client.virtual_machines.get(
resource_group_name,
vm_name,
expand='instanceView')
# pylint: disable=no-member
if (not virtual_machine.instance_view.boot_diagnostics or
not virtual_machine.instance_view.boot_diagnostics.serial_console_log_blob_uri):
raise CLIError('Please enable boot diagnostics.')
blob_uri = virtual_machine.instance_view.boot_diagnostics.serial_console_log_blob_uri
# Find storage account for diagnostics
storage_mgmt_client = _get_storage_management_client()
if not blob_uri:
raise CLIError('No console log available')
try:
storage_accounts = storage_mgmt_client.storage_accounts.list()
matching_storage_account = (a for a in list(storage_accounts)
if blob_uri.startswith(a.primary_endpoints.blob))
storage_account = next(matching_storage_account)
except StopIteration:
raise CLIError('Failed to find storage accont for console log file')
regex = r'/subscriptions/[^/]+/resourceGroups/(?P<rg>[^/]+)/.+'
match = re.search(regex, storage_account.id, re.I)
rg = match.group('rg')
# Get account key
keys = storage_mgmt_client.storage_accounts.list_keys(rg, storage_account.name)
# Extract container and blob name from url...
container, blob = urlparse(blob_uri).path.split('/')[-2:]
storage_client = get_data_service_client(
BlockBlobService,
storage_account.name,
keys.keys[0].value,
endpoint_suffix=CLOUD.suffixes.storage_endpoint) # pylint: disable=no-member
class StreamWriter(object): # pylint: disable=too-few-public-methods
def __init__(self, out):
self.out = out
def write(self, str_or_bytes):
if isinstance(str_or_bytes, bytes):
self.out.write(str_or_bytes.decode())
else:
self.out.write(str_or_bytes)
# our streamwriter not seekable, so no parallel.
storage_client.get_blob_to_stream(container, blob, StreamWriter(sys.stdout), max_connections=1)
def list_extensions(resource_group_name, vm_name):
vm = get_vm(resource_group_name, vm_name)
extension_type = 'Microsoft.Compute/virtualMachines/extensions'
result = [r for r in vm.resources if r.type == extension_type]
return result
def set_extension(
resource_group_name, vm_name, vm_extension_name, publisher,
version=None, settings=None,
protected_settings=None, no_auto_upgrade=False):
'''create/update extensions for a VM in a resource group. You can use
'extension image list' to get extension details
:param vm_extension_name: the name of the extension
:param publisher: the name of extension publisher
:param version: the version of extension.
:param settings: extension settings in json format. A json file path is also eccepted
:param protected_settings: protected settings in json format for sensitive information like
credentials. A json file path is also accepted.
:param no_auto_upgrade: by doing this, extension system will not pick the highest minor version
for the specified version number, and will not auto update to the latest build/revision number
on any VM updates in future.
'''
vm = get_vm(resource_group_name, vm_name)
client = _compute_client_factory()
from azure.mgmt.compute.models import VirtualMachineExtension
protected_settings = load_json(protected_settings) if protected_settings else {}
settings = load_json(settings) if settings else None
# pylint: disable=no-member
version = _normalize_extension_version(publisher, vm_extension_name, version, vm.location)
ext = VirtualMachineExtension(vm.location,
publisher=publisher,
virtual_machine_extension_type=vm_extension_name,
protected_settings=protected_settings,
type_handler_version=version,
settings=settings,
auto_upgrade_minor_version=(not no_auto_upgrade))
return client.virtual_machine_extensions.create_or_update(
resource_group_name, vm_name, vm_extension_name, ext)
def set_vmss_extension(
resource_group_name, vmss_name, extension_name, publisher,
version=None, settings=None,
protected_settings=None, no_auto_upgrade=False):
'''create/update extensions for a VMSS in a resource group. You can use
'extension image list' to get extension details
:param vm_extension_name: the name of the extension
:param publisher: the name of extension publisher
:param version: the version of extension.
:param settings: public settings or a file path with such contents
:param protected_settings: protected settings or a file path with such contents
:param no_auto_upgrade: by doing this, extension system will not pick the highest minor version
for the specified version number, and will not auto update to the latest build/revision number
on any scale set updates in future.
'''
client = _compute_client_factory()
vmss = client.virtual_machine_scale_sets.get(resource_group_name,
vmss_name)
protected_settings = load_json(protected_settings) if protected_settings else {}
settings = load_json(settings) if settings else None
# pylint: disable=no-member
version = _normalize_extension_version(publisher, extension_name, version, vmss.location)
ext = VirtualMachineScaleSetExtension(name=extension_name,
publisher=publisher,
type=extension_name,
protected_settings=protected_settings,
type_handler_version=version,
settings=settings,
auto_upgrade_minor_version=(not no_auto_upgrade))
if not vmss.virtual_machine_profile.extension_profile:
vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile([])
vmss.virtual_machine_profile.extension_profile.extensions.append(ext)
return client.virtual_machine_scale_sets.create_or_update(resource_group_name,
vmss_name,
vmss)
def _normalize_extension_version(publisher, vm_extension_name, version, location):
if not version:
result = load_extension_images_thru_services(publisher, vm_extension_name,
None, location, show_latest=True)
if not result:
raise CLIError('Failed to find the latest version for the extension "{}"'
.format(vm_extension_name))
# with 'show_latest' enabled, we will only get one result.
version = result[0]['version']
version = _trim_away_build_number(version)
return version
def set_diagnostics_extension(
resource_group_name, vm_name, settings, protected_settings=None, version=None,
no_auto_upgrade=False):
'''Enable diagnostics on a virtual machine
'''
vm = get_vm(resource_group_name, vm_name)
# pylint: disable=no-member
is_linux_os = _detect_os_type_for_diagnostics_ext(vm.os_profile)
vm_extension_name = _LINUX_DIAG_EXT if is_linux_os else _WINDOWS_DIAG_EXT
return set_extension(resource_group_name, vm_name, vm_extension_name,
extension_mappings[vm_extension_name]['publisher'],
version or extension_mappings[vm_extension_name]['version'],
settings,
protected_settings,
no_auto_upgrade)
def set_vmss_diagnostics_extension(
resource_group_name, vmss_name, settings, protected_settings=None, version=None,
no_auto_upgrade=False):
'''Enable diagnostics on a virtual machine scale set
'''
client = _compute_client_factory()
vmss = client.virtual_machine_scale_sets.get(resource_group_name,
vmss_name)
# pylint: disable=no-member
is_linux_os = _detect_os_type_for_diagnostics_ext(vmss.virtual_machine_profile.os_profile)
vm_extension_name = _LINUX_DIAG_EXT if is_linux_os else _WINDOWS_DIAG_EXT
return set_vmss_extension(resource_group_name, vmss_name, vm_extension_name,
extension_mappings[vm_extension_name]['publisher'],
version or extension_mappings[vm_extension_name]['version'],
settings,
protected_settings,
no_auto_upgrade)
# Same logic also applies on vmss
def _detect_os_type_for_diagnostics_ext(os_profile):
is_linux_os = bool(os_profile.linux_configuration)
is_windows_os = bool(os_profile.windows_configuration)
if not is_linux_os and not is_windows_os:
raise CLIError('Diagnostics extension can only be installed on Linux or Windows VM')
return is_linux_os
def get_vmss_extension(resource_group_name, vmss_name, extension_name):
client = _compute_client_factory()
vmss = client.virtual_machine_scale_sets.get(resource_group_name,
vmss_name)
# pylint: disable=no-member
if not vmss.virtual_machine_profile.extension_profile:
return
return next((e for e in vmss.virtual_machine_profile.extension_profile.extensions
if e.name == extension_name), None)
def list_vmss_extensions(resource_group_name, vmss_name):
client = _compute_client_factory()
vmss = client.virtual_machine_scale_sets.get(resource_group_name,
vmss_name)
# pylint: disable=no-member
return None if not vmss.virtual_machine_profile.extension_profile \
else vmss.virtual_machine_profile.extension_profile.extensions
def delete_vmss_extension(resource_group_name, vmss_name, extension_name):
client = _compute_client_factory()
vmss = client.virtual_machine_scale_sets.get(resource_group_name,
vmss_name)
# pylint: disable=no-member
if not vmss.virtual_machine_profile.extension_profile:
raise CLIError('Scale set has no extensions to delete')
keep_list = [e for e in vmss.virtual_machine_profile.extension_profile.extensions
if e.name != extension_name]
if len(keep_list) == len(vmss.virtual_machine_profile.extension_profile.extensions):
raise CLIError('Extension {} not found'.format(extension_name))
vmss.virtual_machine_profile.extension_profile.extensions = keep_list
return client.virtual_machine_scale_sets.create_or_update(resource_group_name,
vmss_name,
vmss)
def _get_private_config(resource_group_name, storage_account):
storage_mgmt_client = _get_storage_management_client()
# pylint: disable=no-member
keys = storage_mgmt_client.storage_accounts.list_keys(resource_group_name, storage_account).keys
private_config = {
'storageAccountName': storage_account,
'storageAccountKey': keys[0].value
}
return private_config
def show_default_diagnostics_configuration(is_windows_os=False):
'''show the default config file which defines data to be collected'''
return get_default_diag_config(is_windows_os)
def vm_show_nic(resource_group_name, vm_name, nic):
''' Show details of a network interface configuration attached to a virtual machine '''
vm = get_vm(resource_group_name, vm_name)
found = next(
(n for n in vm.network_profile.network_interfaces if nic.lower() == n.id.lower()), None # pylint: disable=no-member
)
if found:
from azure.mgmt.network import NetworkManagementClient
network_client = get_mgmt_service_client(NetworkManagementClient)
nic_name = parse_resource_id(found.id)['name']
return network_client.network_interfaces.get(resource_group_name, nic_name)
else:
raise CLIError("NIC '{}' not found on VM '{}'".format(nic, vm_name))
def vm_list_nics(resource_group_name, vm_name):
''' List network interface configurations attached to a virtual machine '''
vm = get_vm(resource_group_name, vm_name)
return vm.network_profile.network_interfaces # pylint: disable=no-member
def vm_add_nics(resource_group_name, vm_name, nics, primary_nic=None):
''' Add network interface configurations to the virtual machine
:param str nic_ids: NIC resource IDs
:param str nic_names: NIC names, assuming under the same resource group
:param str primary_nic: name or id of the primary NIC. If missing, the first of the
NIC list will be the primary
'''
vm = get_vm(resource_group_name, vm_name)
new_nics = _build_nic_list(nics)
existing_nics = _get_existing_nics(vm)
return _update_vm_nics(vm, existing_nics + new_nics, primary_nic)
def vm_remove_nics(resource_group_name, vm_name, nics, primary_nic=None):
''' Remove network interface configurations from the virtual machine
:param str nic_ids: NIC resource IDs
:param str nic_names: NIC names, assuming under the same resource group
:param str primary_nic: name or id of the primary NIC. If missing, the first of the
NIC list will be the primary
'''
def to_delete(nic_id):
return [n for n in nics_to_delete if n.id.lower() == nic_id.lower()]
vm = get_vm(resource_group_name, vm_name)
nics_to_delete = _build_nic_list(nics)
existing_nics = _get_existing_nics(vm)
survived = [x for x in existing_nics if not to_delete(x.id)]
return _update_vm_nics(vm, survived, primary_nic)
def vm_set_nics(resource_group_name, vm_name, nics, primary_nic=None):
''' Replace existing network interface configurations on the virtual machine
:param str nic_ids: NIC resource IDs
:param str nic_names: NIC names, assuming under the same resource group
:param str primary_nic: name or id of the primary nic. If missing, the first element of
nic list will be set to the primary
'''
vm = get_vm(resource_group_name, vm_name)
nics = _build_nic_list(nics)
return _update_vm_nics(vm, nics, primary_nic)
# pylint: disable=no-member
def vm_open_port(resource_group_name, vm_name, network_security_group_name=None,
apply_to_subnet=False):
""" Opens a VM to all inbound traffic and protocols by adding a security rule to the network
security group (NSG) that is attached to the VM's network interface (NIC) or subnet. The
existing NSG will be used or a new one will be created. The rule name is 'open-port-cmd' and
will overwrite an existing rule with this name. For multi-NIC VMs, or for more fine
grained control, use the appropriate network commands directly (nsg rule create, etc).
"""
from azure.mgmt.network import NetworkManagementClient
network = get_mgmt_service_client(NetworkManagementClient)
vm = get_vm(resource_group_name, vm_name)
location = vm.location
nic_ids = list(vm.network_profile.network_interfaces)
if len(nic_ids) > 1:
raise CLIError('Multiple NICs is not supported for this command. Create rules on the NSG '
'directly.')
elif not nic_ids:
raise CLIError("No NIC associated with VM '{}'".format(vm_name))
# get existing NSG or create a new one
nic = network.network_interfaces.get(resource_group_name, os.path.split(nic_ids[0].id)[1])
if not apply_to_subnet:
nsg = nic.network_security_group
else:
subnet_id = parse_resource_id(nic.ip_configurations[0].subnet.id)
subnet = network.subnets.get(resource_group_name,
subnet_id['name'],
subnet_id['child_name'])
nsg = subnet.network_security_group
if not nsg:
from azure.mgmt.network.models import NetworkSecurityGroup
nsg = LongRunningOperation('Creating network security group')(
network.network_security_groups.create_or_update(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
parameters=NetworkSecurityGroup(location=location)
)
)
# update the NSG with the new rule to allow inbound traffic
from azure.mgmt.network.models import SecurityRule
rule = SecurityRule(protocol='*', access='allow', direction='inbound', name='open-port-cmd',
source_port_range='*', destination_port_range='*', priority=900,
source_address_prefix='*', destination_address_prefix='*')
nsg_name = nsg.name or os.path.split(nsg.id)[1]
LongRunningOperation('Adding security rule')(
network.security_rules.create_or_update(
resource_group_name, nsg_name, 'open-port-cmd', rule)
)
# update the NIC or subnet
if not apply_to_subnet:
nic.network_security_group = nsg
return LongRunningOperation('Updating NIC')(
network.network_interfaces.create_or_update(
resource_group_name, nic.name, nic)
)
else:
subnet.network_security_group = nsg
return LongRunningOperation('Updating subnet')(
network.subnets.create_or_update(
resource_group_name=resource_group_name,
virtual_network_name=subnet_id['name'],
subnet_name=subnet_id['child_name'],
subnet_parameters=subnet
)
)
def _build_nic_list(nic_ids):
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.compute.models import NetworkInterfaceReference
nic_list = []
if nic_ids:
# pylint: disable=no-member
network_client = get_mgmt_service_client(NetworkManagementClient)
for nic_id in nic_ids:
rg, name = _parse_rg_name(nic_id)
nic = network_client.network_interfaces.get(rg, name)
nic_list.append(NetworkInterfaceReference(nic.id, False))
return nic_list
def _get_existing_nics(vm):
network_profile = getattr(vm, 'network_profile', None)
nics = []
if network_profile is not None:
nics = network_profile.network_interfaces or []
return nics
def _update_vm_nics(vm, nics, primary_nic):
from azure.mgmt.compute.models import NetworkProfile
if primary_nic:
try:
_, primary_nic_name = _parse_rg_name(primary_nic)
except IndexError:
primary_nic_name = primary_nic
matched = [n for n in nics if _parse_rg_name(n.id)[1].lower() == primary_nic_name.lower()]
if not matched:
raise CLIError('Primary Nic {} is not found'.format(primary_nic))
if len(matched) > 1:
raise CLIError('Duplicate Nic entries with name {}'.format(primary_nic))
for n in nics:
n.primary = False
matched[0].primary = True
elif nics:
if not [n for n in nics if n.primary]:
nics[0].primary = True
network_profile = getattr(vm, 'network_profile', None)
if network_profile is None:
vm.network_profile = NetworkProfile(nics)
else:
network_profile.network_interfaces = nics
return set_vm(vm).network_profile.network_interfaces
def scale_vmss(resource_group_name, vm_scale_set_name, new_capacity):
'''change the number of VMs in an virtual machine scale set
:param int new_capacity: number of virtual machines in a scale set
'''
client = _compute_client_factory()
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vm_scale_set_name)
# pylint: disable=no-member
if vmss.sku.capacity == new_capacity:
return
else:
vmss.sku.capacity = new_capacity
vmss_new = VirtualMachineScaleSet(vmss.location, sku=vmss.sku)
return client.virtual_machine_scale_sets.create_or_update(resource_group_name,
vm_scale_set_name,
vmss_new)
def update_vmss_instances(resource_group_name, vm_scale_set_name, instance_ids):
'''upgrade virtual machines in a virtual machine scale set'''
client = _compute_client_factory()
return client.virtual_machine_scale_sets.update_instances(resource_group_name,
vm_scale_set_name,
instance_ids)
def get_vmss_instance_view(resource_group_name, vm_scale_set_name, instance_id=None):
'''get instance view for a scale set or its VM instances
:param str instance_id: an VM instance id, or use "*" to list instance view for
all VMs in a scale set
'''
client = _compute_client_factory()
if instance_id:
if instance_id == '*':
return client.virtual_machine_scale_set_vms.list(resource_group_name,
vm_scale_set_name)
else:
return client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name,
vm_scale_set_name,
instance_id)
else:
return client.virtual_machine_scale_sets.get_instance_view(resource_group_name,
vm_scale_set_name)
def show_vmss(resource_group_name, vm_scale_set_name, instance_id=None):
'''show scale set or its VM instance
:param str instance_id: VM instance id. If missing, show scale set
'''
client = _compute_client_factory()
if instance_id:
return client.virtual_machine_scale_set_vms.get(resource_group_name,
vm_scale_set_name,
instance_id)
else:
return client.virtual_machine_scale_sets.get(resource_group_name,
vm_scale_set_name)
def list_vmss(resource_group_name=None):
'''list scale sets'''
client = _compute_client_factory()
if resource_group_name:
return client.virtual_machine_scale_sets.list(resource_group_name)
else:
return client.virtual_machine_scale_sets.list_all()
def deallocate_vmss(resource_group_name, vm_scale_set_name, instance_ids=None):
'''deallocate virtual machines in a scale set. '''
client = _compute_client_factory()
if instance_ids and len(instance_ids) == 1:
return client.virtual_machine_scale_set_vms.deallocate(resource_group_name,
vm_scale_set_name,
instance_ids[0])
else:
return client.virtual_machine_scale_sets.deallocate(resource_group_name,
vm_scale_set_name,
instance_ids=instance_ids)
def delete_vmss_instances(resource_group_name, vm_scale_set_name, instance_ids):
'''delete virtual machines in a scale set.'''
client = _compute_client_factory()
if len(instance_ids) == 1:
return client.virtual_machine_scale_set_vms.delete(resource_group_name,
vm_scale_set_name,
instance_ids[0])
else:
return client.virtual_machine_scale_sets.delete_instances(resource_group_name,
vm_scale_set_name,
instance_ids)
def stop_vmss(resource_group_name, vm_scale_set_name, instance_ids=None):
'''power off (stop) virtual machines in a virtual machine scale set.'''
client = _compute_client_factory()
if instance_ids and len(instance_ids) == 1:
return client.virtual_machine_scale_set_vms.power_off(resource_group_name,
vm_scale_set_name,
instance_ids[0])
else:
return client.virtual_machine_scale_sets.power_off(resource_group_name,
vm_scale_set_name,
instance_ids=instance_ids)
def reimage_vmss(resource_group_name, vm_scale_set_name, instance_id=None):
'''reimage virtual machines in a virtual machine scale set.
:param str instance_id: VM instance id. If missing, reimage all instances
'''
client = _compute_client_factory()
if instance_id:
return client.virtual_machine_scale_set_vms.reimage(resource_group_name,
vm_scale_set_name,
instance_id)
else:
return client.virtual_machine_scale_sets.reimage(resource_group_name,
vm_scale_set_name)
def restart_vmss(resource_group_name, vm_scale_set_name, instance_ids=None):
'''restart virtual machines in a scale set.'''
client = _compute_client_factory()
if instance_ids and len(instance_ids) == 1:
return client.virtual_machine_scale_set_vms.restart(resource_group_name,
vm_scale_set_name,
instance_ids[0])
else:
return client.virtual_machine_scale_sets.restart(resource_group_name,
vm_scale_set_name,
instance_ids=instance_ids)
def start_vmss(resource_group_name, vm_scale_set_name, instance_ids=None):
'''start virtual machines in a virtual machine scale set.'''
client = _compute_client_factory()
if instance_ids and len(instance_ids) == 1:
return client.virtual_machine_scale_set_vms.start(resource_group_name,
vm_scale_set_name,
instance_ids[0])
else:
return client.virtual_machine_scale_sets.start(resource_group_name,
vm_scale_set_name,
instance_ids=instance_ids)
def availset_get(resource_group_name, name):
return _compute_client_factory().availability_sets.get(resource_group_name, name)
def availset_set(**kwargs):
return _compute_client_factory().availability_sets.create_or_update(**kwargs)
def vmss_get(resource_group_name, name):
return _compute_client_factory().virtual_machine_scale_sets.get(resource_group_name, name)
def vmss_set(**kwargs):
return _compute_client_factory().virtual_machine_scale_sets.create_or_update(**kwargs)
def update_acs(resource_group_name, container_service_name, new_agent_count):
client = _compute_client_factory()
instance = client.container_services.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count
return client.container_services.create_or_update(resource_group_name,
container_service_name, instance)
def list_container_services(client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
# pylint: disable=too-many-locals, unused-argument, too-many-statements
def create_vm(vm_name, resource_group_name, image,
size='Standard_DS1', location=None, tags=None, no_wait=False,
authentication_type=None, admin_password=None, admin_username=getpass.getuser(),
ssh_dest_key_path=None, ssh_key_value=None, generate_ssh_keys=False,
availability_set=None,
nics=None, nsg=None, nsg_rule=None,
private_ip_address=None,
public_ip_address=None, public_ip_address_allocation='dynamic',
public_ip_address_dns_name=None,
os_disk_name=None, os_type=None, storage_account=None,
storage_caching='ReadWrite', storage_container_name='vhds', storage_sku='Premium_LRS',
vnet_name=None, vnet_address_prefix='10.0.0.0/16',
subnet=None, subnet_address_prefix='10.0.0.0/24', storage_profile=None,
os_publisher=None, os_offer=None, os_sku=None, os_version=None,
storage_account_type=None, vnet_type=None, nsg_type=None, public_ip_type=None,
nic_type=None, validate=False):
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.command_modules.vm._vm_utils import random_string
from azure.cli.command_modules.vm._template_builder import (
ArmTemplateBuilder, build_vm_resource, build_storage_account_resource, build_nic_resource,
build_vnet_resource, build_nsg_resource, build_public_ip_resource,
build_output_deployment_resource, build_deployment_resource, StorageProfile)
from azure.cli.core._profile import CLOUD
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.resource.resources.models import DeploymentProperties, TemplateLink
network_id_template = resource_id(
subscription=get_subscription_id(), resource_group=resource_group_name,
namespace='Microsoft.Network')
# determine final defaults and calculated values
tags = tags or {}
os_disk_name = os_disk_name or 'osdisk_{}'.format(random_string(10))
# Build up the ARM template
master_template = ArmTemplateBuilder()
vm_dependencies = []
if storage_account_type == 'new':
storage_account = storage_account or 'vhdstorage{}'.format(
random_string(14, force_lower=True))
vm_dependencies.append('Microsoft.Storage/storageAccounts/{}'.format(storage_account))
master_template.add_resource(build_storage_account_resource(storage_account, location,
tags, storage_sku))
nic_name = None
if nic_type == 'new':
nic_name = '{}VMNic'.format(vm_name)
vm_dependencies.append('Microsoft.Network/networkInterfaces/{}'.format(nic_name))
nic_dependencies = []
if vnet_type == 'new':
vnet_name = vnet_name or '{}VNET'.format(vm_name)
subnet = subnet or '{}Subnet'.format(vm_name)
nic_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name))
master_template.add_resource(build_vnet_resource(
vnet_name, location, tags, vnet_address_prefix, subnet, subnet_address_prefix))
if nsg_type == 'new':
nsg_rule_type = 'rdp' if os_type.lower() == 'windows' else 'ssh'
nsg = nsg or '{}NSG'.format(vm_name)
nic_dependencies.append('Microsoft.Network/networkSecurityGroups/{}'.format(nsg))
master_template.add_resource(build_nsg_resource(nsg, location, tags, nsg_rule_type))
if public_ip_type == 'new':
public_ip_address = public_ip_address or '{}PublicIP'.format(vm_name)
nic_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(
public_ip_address))
master_template.add_resource(build_public_ip_resource(public_ip_address, location,
tags,
public_ip_address_allocation,
public_ip_address_dns_name))
subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(
network_id_template, vnet_name, subnet)
nsg_id = '{}/networkSecurityGroups/{}'.format(network_id_template, nsg) if nsg else None
public_ip_address_id = \
'{}/publicIPAddresses/{}'.format(network_id_template, public_ip_address) \
if public_ip_address else None
nics = [
{'id': '{}/networkInterfaces/{}'.format(network_id_template, nic_name)}
]
nic_resource = build_nic_resource(
nic_name, location, tags, vm_name, subnet_id, private_ip_address, nsg_id,
public_ip_address_id)
nic_resource['dependsOn'] = nic_dependencies
master_template.add_resource(nic_resource)
else:
# Using an existing NIC
invalid_parameters = [nsg, public_ip_address, subnet, vnet_name]
if any(invalid_parameters):
raise CLIError('When specifying an existing NIC, do not specify NSG, '
'public IP, VNet or subnet.')
if storage_profile in [StorageProfile.SACustomImage, StorageProfile.SAPirImage]:
storage_account_name = storage_account.rsplit('/', 1)
storage_account_name = storage_account_name[1] if \
len(storage_account_name) > 1 else storage_account_name[0]
os_vhd_uri = 'https://{}.blob.{}/{}/{}.vhd'.format(
storage_account_name, CLOUD.suffixes.storage_endpoint, storage_container_name,
os_disk_name)
vm_resource = build_vm_resource(
vm_name, location, tags, size, storage_profile, nics, admin_username, availability_set,
admin_password, ssh_key_value, ssh_dest_key_path, image, os_disk_name,
os_type, storage_caching, os_publisher, os_offer, os_sku, os_version,
os_vhd_uri)
vm_resource['dependsOn'] = vm_dependencies
master_template.add_resource(vm_resource)
template = master_template.build()
# deploy ARM template
deployment_name = 'vm_deploy_' + random_string(32)
client = get_mgmt_service_client(ResourceManagementClient).deployments
properties = DeploymentProperties(template=template, parameters={}, mode='incremental')
if validate:
return client.validate(resource_group_name, deployment_name, properties, raw=no_wait)
# creates the VM deployment
if no_wait:
return client.create_or_update(
resource_group_name, deployment_name, properties, raw=no_wait)
else:
LongRunningOperation()(client.create_or_update(
resource_group_name, deployment_name, properties, raw=no_wait))
return get_vm_details(resource_group_name, vm_name)
# pylint: disable=too-many-locals, too-many-statements
def create_vmss(vmss_name, resource_group_name, image,
disable_overprovision=False, instance_count=2,
location=None, tags=None, upgrade_policy_mode='manual', validate=False,
admin_username=getpass.getuser(), admin_password=None, authentication_type=None,
vm_sku="Standard_D1_v2", no_wait=False,
ssh_dest_key_path=None, ssh_key_value=None, generate_ssh_keys=False,
load_balancer=None, backend_pool_name=None, nat_pool_name=None, backend_port=None,
public_ip_address=None, public_ip_address_allocation='dynamic',
public_ip_address_dns_name=None,
storage_caching='ReadOnly',
storage_container_name='vhds', storage_sku='Standard_LRS',
os_type=None, os_disk_name='osdiskimage',
vnet_name=None, vnet_address_prefix='10.0.0.0/16',
subnet=None, subnet_address_prefix='10.0.0.0/24',
os_offer=None, os_publisher=None, os_sku=None, os_version=None,
load_balancer_type=None, vnet_type=None, public_ip_type=None, storage_profile=None):
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.command_modules.vm._vm_utils import random_string
from azure.cli.command_modules.vm._template_builder import (
ArmTemplateBuilder, StorageProfile, build_vmss_resource, build_storage_account_resource,
build_vnet_resource, build_public_ip_resource, build_load_balancer_resource,
build_output_deployment_resource, build_deployment_resource,
build_load_balancer_inbound_nat_rules_resource, build_vmss_storage_account_pool_resource)
from azure.cli.core._profile import CLOUD
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.resource.resources.models import DeploymentProperties, TemplateLink
network_id_template = resource_id(
subscription=get_subscription_id(), resource_group=resource_group_name,
namespace='Microsoft.Network')
# determine final defaults and calculated values
tags = tags or {}
os_disk_name = os_disk_name or 'osdisk_{}'.format(random_string(10))
# Build up the ARM template
master_template = ArmTemplateBuilder()
vmss_dependencies = []
if vnet_type == 'new':
vnet_name = vnet_name or '{}VNET'.format(vmss_name)
subnet = subnet or '{}Subnet'.format(vmss_name)
vmss_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name))
master_template.add_resource(build_vnet_resource(
vnet_name, location, tags, vnet_address_prefix, subnet, subnet_address_prefix))
subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template, vnet_name, subnet)
if load_balancer_type == 'new':
load_balancer = load_balancer or '{}LB'.format(vmss_name)
vmss_dependencies.append('Microsoft.Network/loadBalancers/{}'.format(load_balancer))
lb_dependencies = []
if public_ip_type == 'new':
public_ip_address = public_ip_address or '{}PublicIP'.format(load_balancer)
lb_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address)) # pylint: disable=line-too-long
master_template.add_resource(build_public_ip_resource(public_ip_address, location,
tags,
public_ip_address_allocation,
public_ip_address_dns_name))
public_ip_address_id = '{}/publicIPAddresses/{}'.format(network_id_template, public_ip_address) if public_ip_address else None # pylint: disable=line-too-long
# calculate default names if not provided
backend_pool_name = backend_pool_name or '{}BEPool'.format(load_balancer)
nat_pool_name = nat_pool_name or '{}NatPool'.format(load_balancer)
if not backend_port:
backend_port = 3389 if os_type == 'windows' else 22
master_template.add_resource(build_load_balancer_inbound_nat_rules_resource(
load_balancer, location, backend_port, instance_count, 'loadBalancerFrontEnd'))
lb_resource = build_load_balancer_resource(
load_balancer, location, tags, backend_pool_name, nat_pool_name, backend_port,
'loadBalancerFrontEnd', public_ip_address_id, subnet_id)
lb_resource['dependsOn'] = lb_dependencies
master_template.add_resource(lb_resource)
if storage_profile in [StorageProfile.SACustomImage, StorageProfile.SAPirImage]:
master_template.add_resource(build_vmss_storage_account_pool_resource(
'storageLoop', location, tags, storage_sku))
vmss_dependencies.append('storageLoop')
else:
raise CLIError('Unsupported storage profile.')
scrubbed_name = vmss_name.replace('-', '').lower()[:5]
naming_prefix = '{}{}'.format(scrubbed_name,
random_string(9 - len(scrubbed_name), force_lower=True))
backend_address_pool_id = '{}/loadBalancers/{}/backendAddressPools/{}'.format(
network_id_template, load_balancer, backend_pool_name) if load_balancer_type else None
inbound_nat_pool_id = '{}/loadBalancers/{}/inboundNatPools/{}'.format(
network_id_template, load_balancer, nat_pool_name) if load_balancer_type == 'new' else None
ip_config_name = '{}IPConfig'.format(naming_prefix)
nic_name = '{}Nic'.format(naming_prefix)
vmss_resource = build_vmss_resource(vmss_name, naming_prefix, location, tags,
not disable_overprovision, upgrade_policy_mode,
vm_sku, instance_count,
ip_config_name, nic_name, subnet_id, admin_username,
authentication_type, storage_profile,
os_disk_name, storage_caching, os_type,
image, admin_password,
ssh_key_value, ssh_dest_key_path,
os_publisher, os_offer, os_sku, os_version,
backend_address_pool_id, inbound_nat_pool_id)
vmss_resource['dependsOn'] = vmss_dependencies
master_template.add_resource(vmss_resource)
master_template.add_variable('storageAccountNames', [
'{}{}'.format(naming_prefix, x) for x in range(5)
])
master_template.add_variable('vhdContainers', [
"[concat('https://', variables('storageAccountNames')[{}], '.blob.{}/{}')]".format(
x, CLOUD.suffixes.storage_endpoint, storage_container_name) for x in range(5)
])
master_template.add_output('VMSS', vmss_name, 'Microsoft.Compute', 'virtualMachineScaleSets',
output_type='object')
template = master_template.build()
# deploy ARM template
deployment_name = 'vmss_deploy_' + random_string(32)
client = get_mgmt_service_client(ResourceManagementClient).deployments
properties = DeploymentProperties(template=template, parameters={}, mode='incremental')
if validate:
return client.validate(resource_group_name, deployment_name, properties, raw=no_wait)
# creates the VMSS deployment
return client.create_or_update(resource_group_name, deployment_name, properties, raw=no_wait)
|
from import_export import resources
from import_export.admin import ImportExportModelAdmin
# from import_export.admin import ImportExportActionModelAdmin
from django.contrib import admin
from .models import TimePost, Client, Project
class TimePostResource(resources.ModelResource):
class Meta:
model = TimePost
fields = (
'user__username',
'date', 'time_spent', 'notes',
'expenses', 'expense_notes', 'miles',
'miles_notes','client__name', 'project__name',
)
# export_order = ('id', 'price', 'author', 'name')
# admin.site.register(TimePost)
@admin.register(TimePost)
# class TimePostAdmin(ImportExportActionModelAdmin):
class TimePostAdmin(ImportExportModelAdmin):
resource_class = TimePostResource
admin.site.register(Client)
admin.site.register(Project)
|
import asyncio
import json
import os
import sys
import multiprocessing
import webbrowser
import requests
import requests.cookies
import logging as log
import subprocess
import time
import re
from typing import Union, List, Dict
from galaxy.api.consts import LocalGameState, Platform
from galaxy.api.plugin import Plugin, create_and_run_plugin
from galaxy.api.types import Achievement, Game, LicenseInfo, LocalGame, GameTime, LicenseType
from galaxy.api.errors import (
AuthenticationRequired, BackendTimeout, BackendNotAvailable, BackendError,
NetworkError, UnknownError, InvalidCredentials, UnknownBackendResponse
)
from version import __version__ as version
from process import ProcessProvider
from local_client_base import ClientNotInstalledError
from local_client import LocalClient
from backend import BackendClient, AccessTokenExpired
from definitions import Blizzard, DataclassJSONEncoder, BlizzardGame, ClassicGame
from consts import SYSTEM
from consts import Platform as pf
from http_client import AuthenticatedHttpClient
class BNetPlugin(Plugin):
def __init__(self, reader, writer, token):
super().__init__(Platform.Battlenet, version, reader, writer, token)
self.local_client = LocalClient(self._update_statuses)
self.authentication_client = AuthenticatedHttpClient(self)
self.backend_client = BackendClient(self, self.authentication_client)
self.watched_running_games = set()
self.local_games_called = False
async def _notify_about_game_stop(self, game, starting_timeout):
if not self.local_games_called:
return
id_to_watch = game.info.uid
if id_to_watch in self.watched_running_games:
log.debug(f'Game {id_to_watch} is already watched. Skipping')
return
try:
self.watched_running_games.add(id_to_watch)
await asyncio.sleep(starting_timeout)
ProcessProvider().update_games_processes([game])
log.info(f'Setuping process watcher for {game._processes}')
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, game.wait_until_game_stops)
finally:
self.update_local_game_status(LocalGame(id_to_watch, LocalGameState.Installed))
self.watched_running_games.remove(id_to_watch)
def _update_statuses(self, refreshed_games, previous_games):
if not self.local_games_called:
return
for blizz_id, refr in refreshed_games.items():
prev = previous_games.get(blizz_id, None)
if prev is None:
if refr.playable:
log.debug('Detected playable game')
state = LocalGameState.Installed
else:
log.debug('Detected installation begin')
state = LocalGameState.None_
elif refr.playable and not prev.playable:
log.debug('Detected playable game')
state = LocalGameState.Installed
elif refr.last_played != prev.last_played:
log.debug('Detected launched game')
state = LocalGameState.Installed | LocalGameState.Running
asyncio.create_task(self._notify_about_game_stop(refr, 5))
else:
continue
log.info(f'Changing game {blizz_id} state to {state}')
self.update_local_game_status(LocalGame(blizz_id, state))
for blizz_id, prev in previous_games.items():
refr = refreshed_games.get(blizz_id, None)
if refr is None:
log.debug('Detected uninstalled game')
state = LocalGameState.None_
self.update_local_game_status(LocalGame(blizz_id, state))
def log_out(self):
if self.backend_client:
asyncio.create_task(self.authentication_client.shutdown())
self.authentication_client.user_details = None
async def open_battlenet_browser(self):
url = self.authentication_client.blizzard_battlenet_download_url
log.info(f'Opening battle.net website: {url}')
loop = asyncio.get_running_loop()
await loop.run_in_executor(None, lambda x: webbrowser.open(x, autoraise=True), url)
async def install_game(self, game_id):
if not self.authentication_client.is_authenticated():
raise AuthenticationRequired()
installed_game = self.local_client.get_installed_games().get(game_id, None)
if installed_game and os.access(installed_game.install_path, os.F_OK):
log.warning("Received install command on an already installed game")
return await self.launch_game(game_id)
if game_id in [classic.uid for classic in Blizzard.CLASSIC_GAMES]:
if SYSTEM == pf.WINDOWS:
platform = 'windows'
elif SYSTEM == pf.MACOS:
platform = 'macos'
webbrowser.open(f"https://www.blizzard.com/download/confirmation?platform={platform}&locale=enUS&version=LIVE&id={game_id}")
return
try:
self.local_client.refresh()
log.info(f'Installing game of id {game_id}')
self.local_client.install_game(game_id)
except ClientNotInstalledError as e:
log.warning(e)
await self.open_battlenet_browser()
except Exception as e:
log.exception(f"Installing game {game_id} failed: {e}")
def _open_battlenet_at_id(self, game_id):
try:
self.local_client.refresh()
self.local_client.open_battlenet(game_id)
except Exception as e:
log.exception(f"Opening battlenet client on specific game_id {game_id} failed {e}")
try:
self.local_client.open_battlenet()
except Exception as e:
log.exception(f"Opening battlenet client failed {e}")
async def uninstall_game(self, game_id):
if not self.authentication_client.is_authenticated():
raise AuthenticationRequired()
if game_id == 'wow_classic':
# attempting to uninstall classic wow through protocol gives you a message that the game cannot
# be uninstalled through protocol and you should use battle.net
return self._open_battlenet_at_id(game_id)
if SYSTEM == pf.MACOS:
self._open_battlenet_at_id(game_id)
else:
try:
installed_game = self.local_client.get_installed_games().get(game_id, None)
if installed_game is None or not os.access(installed_game.install_path, os.F_OK):
log.error(f'Cannot uninstall {game_id}')
self.update_local_game_status(LocalGame(game_id, LocalGameState.None_))
return
if not isinstance(installed_game.info, ClassicGame):
if self.local_client.uninstaller is None:
raise FileNotFoundError('Uninstaller not found')
uninstall_tag = installed_game.uninstall_tag
client_lang = self.local_client.config_parser.locale_language
self.local_client.uninstaller.uninstall_game(installed_game, uninstall_tag, client_lang)
except Exception as e:
log.exception(f'Uninstalling game {game_id} failed: {e}')
async def launch_game(self, game_id):
if not self.local_games_called:
await self.get_local_games()
try:
game = self.local_client.get_installed_games().get(game_id, None)
if game is None:
log.error(f'Launching game that is not installed: {game_id}')
return await self.install_game(game_id)
if isinstance(game.info, ClassicGame):
log.info(f'Launching game of id: {game_id}, {game} at path {os.path.join(game.install_path, game.info.exe)}')
if SYSTEM == pf.WINDOWS:
subprocess.Popen(os.path.join(game.install_path, game.info.exe))
elif SYSTEM == pf.MACOS:
if not game.info.bundle_id:
log.warning(f"{game.name} has no bundle id, help by providing us bundle id of this game")
subprocess.Popen(['open', '-b', game.info.bundle_id])
self.update_local_game_status(LocalGame(game_id, LocalGameState.Installed | LocalGameState.Running))
asyncio.create_task(self._notify_about_game_stop(game, 6))
return
self.local_client.refresh()
log.info(f'Launching game of id: {game_id}, {game}')
await self.local_client.launch_game(game, wait_sec=60)
self.update_local_game_status(LocalGame(game_id, LocalGameState.Installed | LocalGameState.Running))
self.local_client.close_window()
asyncio.create_task(self._notify_about_game_stop(game, 3))
except ClientNotInstalledError as e:
log.warning(e)
await self.open_battlenet_browser()
except TimeoutError as e:
log.warning(str(e))
except Exception as e:
log.exception(f"Launching game {game_id} failed: {e}")
async def authenticate(self, stored_credentials=None):
try:
if stored_credentials:
auth_data = self.authentication_client.process_stored_credentials(stored_credentials)
try:
await self.authentication_client.create_session()
await self.backend_client.refresh_cookies()
auth_status = await self.backend_client.validate_access_token(auth_data.access_token)
except (BackendNotAvailable, BackendError, NetworkError, UnknownError, BackendTimeout) as e:
raise e
except Exception:
raise InvalidCredentials()
if self.authentication_client.validate_auth_status(auth_status):
self.authentication_client.user_details = await self.backend_client.get_user_info()
return self.authentication_client.parse_user_details()
else:
return self.authentication_client.authenticate_using_login()
except Exception as e:
raise e
async def pass_login_credentials(self, step, credentials, cookies):
if "logout&app=oauth" in credentials['end_uri']:
# 2fa expired, repeat authentication
return self.authentication_client.authenticate_using_login()
if self.authentication_client.attempted_to_set_battle_tag:
self.authentication_client.user_details = await self.backend_client.get_user_info()
return self.authentication_client.parse_auth_after_setting_battletag()
cookie_jar = self.authentication_client.parse_cookies(cookies)
auth_data = await self.authentication_client.get_auth_data_login(cookie_jar, credentials)
try:
await self.authentication_client.create_session()
await self.backend_client.refresh_cookies()
except (BackendNotAvailable, BackendError, NetworkError, UnknownError, BackendTimeout) as e:
raise e
except Exception:
raise InvalidCredentials()
auth_status = await self.backend_client.validate_access_token(auth_data.access_token)
if not ("authorities" in auth_status and "IS_AUTHENTICATED_FULLY" in auth_status["authorities"]):
raise InvalidCredentials()
self.authentication_client.user_details = await self.backend_client.get_user_info()
self.authentication_client.set_credentials()
return self.authentication_client.parse_battletag()
async def get_owned_games(self):
if not self.authentication_client.is_authenticated():
raise AuthenticationRequired()
def _parse_battlenet_games(standard_games: dict, cn: bool) -> Dict[BlizzardGame, LicenseType]:
licenses = {
None: LicenseType.Unknown,
"Trial": LicenseType.OtherUserLicense,
"Good": LicenseType.SinglePurchase,
"Inactive": LicenseType.SinglePurchase,
"Banned": LicenseType.SinglePurchase,
"Free": LicenseType.FreeToPlay
}
games = {}
for standard_game in standard_games["gameAccounts"]:
title_id = standard_game['titleId']
try:
game = Blizzard.game_by_title_id(title_id, cn)
except KeyError:
log.warning(f"Skipping unknown game with titleId: {title_id}")
else:
games[game] = licenses[standard_game.get("gameAccountStatus")]
# Add wow classic if retail wow is present in owned games
wow_license = games.get(Blizzard['wow'])
if wow_license is not None:
games[Blizzard['wow_classic']] = wow_license
return games
def _parse_classic_games(classic_games: dict) -> Dict[ClassicGame, LicenseType]:
games = {}
for classic_game in classic_games["classicGames"]:
sanitized_name = classic_game["localizedGameName"].replace(u'\xa0', ' ')
for cg in Blizzard.CLASSIC_GAMES:
if cg.name == sanitized_name:
games[cg] = LicenseType.SinglePurchase
break
else:
log.warning(f"Skipping unknown classic game with name: {sanitized_name}")
return games
cn = self.authentication_client.region == 'cn'
battlenet_games = _parse_battlenet_games(await self.backend_client.get_owned_games(), cn)
classic_games = _parse_classic_games(await self.backend_client.get_owned_classic_games())
owned_games: Dict[BlizzardGame, LicenseType] = {**battlenet_games, **classic_games}
for game in Blizzard.try_for_free_games(cn):
if game not in owned_games:
owned_games[game] = LicenseType.FreeToPlay
return [
Game(game.uid, game.name, None, LicenseInfo(license_type))
for game, license_type in owned_games.items()
]
async def get_local_games(self):
timeout = time.time() + 2
try:
translated_installed_games = []
while not self.local_client.games_finished_parsing():
await asyncio.sleep(0.1)
if time.time() >= timeout:
break
running_games = self.local_client.get_running_games()
installed_games = self.local_client.get_installed_games()
log.info(f"Installed games {installed_games.items()}")
log.info(f"Running games {running_games}")
for uid, game in installed_games.items():
if game.playable:
state = LocalGameState.Installed
if uid in running_games:
state |= LocalGameState.Running
else:
state = LocalGameState.None_
translated_installed_games.append(LocalGame(uid, state))
self.local_client.installed_games_cache = installed_games
return translated_installed_games
except Exception as e:
log.exception(f"failed to get local games: {str(e)}")
raise
finally:
self.local_games_called = True
async def get_game_time(self, game_id, context):
total_time = None
last_played_time = None
blizzard_game = Blizzard[game_id]
if blizzard_game.name == "Overwatch":
total_time = await self._get_overwatch_time()
log.debug(f"Gametime for Overwatch is {total_time} minutes.")
for config_info in self.local_client.config_parser.games:
if config_info.uid == blizzard_game.uid:
if config_info.last_played is not None:
last_played_time = int(config_info.last_played)
break
return GameTime(game_id, total_time, last_played_time)
async def _get_overwatch_time(self) -> Union[None, int]:
log.debug("Fetching playtime for Overwatch...")
player_data = await self.backend_client.get_ow_player_data()
if 'message' in player_data: # user not found... unfortunately no 404 status code is returned :/
log.error('No Overwatch profile found.')
return None
if player_data['private'] == True:
log.info('Unable to get data as Overwatch profile is private.')
return None
qp_time = player_data['playtime']['quickplay']
if qp_time is None: # user has not played quick play
return 0
if qp_time.count(':') == 1: # minutes and seconds
match = re.search('(?:(?P<m>\\d+):)(?P<s>\\d+)', qp_time)
if match:
return int(match.group('m'))
elif qp_time.count(':') == 2: # hours, minutes and seconds
match = re.search('(?:(?P<h>\\d+):)(?P<m>\\d+)', qp_time)
if match:
return int(match.group('h')) * 60 + int(match.group('m'))
raise UnknownBackendResponse(f'Unknown Overwatch API playtime format: {qp_time}')
async def _get_wow_achievements(self):
achievements = []
try:
characters_data = await self.backend_client.get_wow_character_data()
characters_data = characters_data["characters"]
wow_character_data = await asyncio.gather(
*[
self.backend_client.get_wow_character_achievements(character["realm"], character["name"])
for character in characters_data
],
return_exceptions=True,
)
for data in wow_character_data:
if isinstance(data, requests.Timeout) or isinstance(data, requests.ConnectionError):
raise data
wow_achievement_data = [
list(
zip(
data["achievements"]["achievementsCompleted"],
data["achievements"]["achievementsCompletedTimestamp"],
)
)
for data in wow_character_data
if type(data) is dict
]
already_in = set()
for char_ach in wow_achievement_data:
for ach in char_ach:
if ach[0] not in already_in:
achievements.append(Achievement(achievement_id=ach[0], unlock_time=int(ach[1] / 1000)))
already_in.add(ach[0])
except (AccessTokenExpired, BackendError) as e:
log.exception(str(e))
with open('wow.json', 'w') as f:
f.write(json.dumps(achievements, cls=DataclassJSONEncoder))
return achievements
async def _get_sc2_achievements(self):
account_data = await self.backend_client.get_sc2_player_data(self.authentication_client.user_details["id"])
# TODO what if more sc2 accounts?
assert len(account_data) == 1
account_data = account_data[0]
profile_data = await self.backend_client.get_sc2_profile_data(
account_data["regionId"], account_data["realmId"],
account_data["profileId"]
)
sc2_achievement_data = [
Achievement(achievement_id=achievement["achievementId"], unlock_time=achievement["completionDate"])
for achievement in profile_data["earnedAchievements"]
if achievement["isComplete"]
]
with open('sc2.json', 'w') as f:
f.write(json.dumps(sc2_achievement_data, cls=DataclassJSONEncoder))
return sc2_achievement_data
# async def get_unlocked_achievements(self, game_id):
# if not self.website_client.is_authenticated():
# raise AuthenticationRequired()
# try:
# if game_id == "21298":
# return await self._get_sc2_achievements()
# elif game_id == "5730135":
# return await self._get_wow_achievements()
# else:
# return []
# except requests.Timeout:
# raise BackendTimeout()
# except requests.ConnectionError:
# raise NetworkError()
# except Exception as e:
# log.exception(str(e))
# return []
async def launch_platform_client(self):
if self.local_client.is_running():
log.info("Launch platform client called but client is already running")
return
self.local_client.open_battlenet()
await self.local_client.prevent_battlenet_from_showing()
async def shutdown_platform_client(self):
await self.local_client.shutdown_platform_client()
async def shutdown(self):
log.info("Plugin shutdown.")
await self.authentication_client.shutdown()
def main():
multiprocessing.freeze_support()
create_and_run_plugin(BNetPlugin, sys.argv)
if __name__ == "__main__":
main()
|
import functools
from django_countries.serializers import CountryFieldMixin
from rest_framework import serializers
from rest_framework.reverse import reverse
from standards.models import Jurisdiction, UserProfile
from standards.models import ControlledVocabulary, Term
from standards.models import TermRelation
from standards.models import StandardsDocument, StandardNode
from standards.models import StandardsCrosswalk, StandardNodeRelation
from standards.models import ContentCollection, ContentNode, ContentNodeRelation
from standards.models import ContentCorrelation, ContentStandardRelation
# ROC HYPERLINK FIELDS
################################################################################
class MultiKeyHyperlinkField(serializers.HyperlinkedRelatedField):
"""
Used to create and parse ROC resources hyperlinks that have multiple keys.
Subclasses must define ``view_name``, ``queryset``, ``url_kwargs_mapping``
(used by ``get_url``), and ``lookup_kwargs_mapping`` (used by ``get_object``).
"""
def rgetattr(self, obj, attrpath):
"""
A fancy version of ``getattr`` that allows getting dot-separated nested attributes
like ``jurisdiction.id`` used in ``MultiKeyHyperlinkField`` mapping dicst.
This code is inspired by solution in https://stackoverflow.com/a/31174427.
"""
return functools.reduce(getattr, [obj] + attrpath.split('.'))
def get_url(self, obj, view_name, request, format):
url_kwargs = dict(
(urlparam, self.rgetattr(obj, attrpath))
for urlparam, attrpath in self.url_kwargs_mapping.items()
)
if "format" in request.GET:
# This is a hack to avoid ?format=api appended to URIs by preserve_builtin_query_params
# github.com/encode/django-rest-framework/blob/master/rest_framework/reverse.py#L12-L29
request.GET._mutable = True
del request.GET["format"]
request.GET._mutable = False
return reverse(view_name, kwargs=url_kwargs, request=request)
def get_object(self, view_name, view_args, view_kwargs):
lookup_kwargs = dict(
(kwarg, view_kwargs[url_kwarg])
for kwarg, url_kwarg in self.lookup_kwargs_mapping.items()
)
return self.get_queryset().get(**lookup_kwargs)
def use_pk_only_optimization(self):
# via
# https://github.com/django-json-api/django-rest-framework-json-api/issues/489#issuecomment-428002360
return False
class JurisdictionScopedHyperlinkField(MultiKeyHyperlinkField):
# /<jurisdiction_name>/*/<pk>
url_kwargs_mapping = {
"jurisdiction_name": "jurisdiction.name",
"pk": "id",
}
lookup_kwargs_mapping = {
"jurisdiction__name": "jurisdiction_name",
"id": "pk",
}
# JURISDICTION
class JurisdictionHyperlinkField(MultiKeyHyperlinkField):
# /<name> == Jurisdiction namespace root
view_name = 'jurisdiction-detail'
queryset = Jurisdiction.objects.all()
url_kwargs_mapping = {"name": "name"}
lookup_kwargs_mapping = {"name": "name"}
# VOCABULARIES AND TERMS
class ControlledVocabularyHyperlinkField(MultiKeyHyperlinkField):
# /<jurisdiction__name>/terms/<name>
view_name = 'jurisdiction-vocabulary-detail'
queryset = ControlledVocabulary.objects.all()
url_kwargs_mapping = {
"jurisdiction_name": "jurisdiction.name",
"name": "name",
}
lookup_kwargs_mapping = {
"jurisdiction__name": "jurisdiction_name",
"name": "name",
}
class TermHyperlinkField(MultiKeyHyperlinkField):
# /<jurisdiction_name>/terms/<vocabulary_name>/<path>
view_name = 'jurisdiction-vocabulary-term-detail'
queryset = Term.objects.all()
url_kwargs_mapping = {
"jurisdiction_name": "vocabulary.jurisdiction.name",
"vocabulary_name": "vocabulary.name",
"path": "path",
}
lookup_kwargs_mapping = {
"vocabulary__jurisdiction__name": "jurisdiction_name",
"vocabulary__name": "vocabulary_name",
"path": "path",
}
class TermRelationHyperlinkField(JurisdictionScopedHyperlinkField):
# /<jurisdiction_name>/termrels/<pk>
view_name = 'jurisdiction-termrelation-detail'
queryset = TermRelation.objects.all()
# CURRICULUM STANDARDS
class StandardsDocumentHyperlinkHyperlinkField(JurisdictionScopedHyperlinkField):
# /<jurisdiction_name>/documents/<pk>
view_name = 'jurisdiction-document-detail'
queryset = StandardsDocument.objects.all()
class StandardNodeHyperlinkField(MultiKeyHyperlinkField):
# /<jurisdiction_name>/standardnodes/<pk>
view_name = 'jurisdiction-standardnode-detail'
queryset = StandardNode.objects.all()
url_kwargs_mapping = {
"jurisdiction_name": "document.jurisdiction.name",
"pk": "id",
}
lookup_kwargs_mapping = {
"document__jurisdiction__name": "jurisdiction_name",
"id": "pk",
}
# CROSSWALKS
class StandardsCrowsswalkHyperlinkField(JurisdictionScopedHyperlinkField):
# /<jurisdiction_name>/standardscrosswalks/<pk>
view_name = 'jurisdiction-standardscrosswalk-detail'
queryset = StandardsCrosswalk.objects.all()
class StandardNodeRelationHyperlinkField(MultiKeyHyperlinkField):
# /<jurisdiction_name>/standardnoderels/<pk>
view_name = 'jurisdiction-standardnoderel-detail'
queryset = StandardNodeRelation.objects.all()
url_kwargs_mapping = {
"jurisdiction_name": "crosswalk.jurisdiction.name",
"pk": "id",
}
lookup_kwargs_mapping = {
"crosswalk__jurisdiction__name": "jurisdiction_name",
"id": "pk",
}
# CONTENT
class ContentCollectionHyperlinkField(JurisdictionScopedHyperlinkField):
# /<jurisdiction_name>/contentcollections/<pk>
view_name = 'jurisdiction-contentcollection-detail'
queryset = ContentCollection.objects.all()
class ContentNodeHyperlinkField(MultiKeyHyperlinkField):
# /<jurisdiction_name>/contentnodes/<pk>
view_name = 'jurisdiction-contentnode-detail'
queryset = ContentNode.objects.all()
url_kwargs_mapping = {
"jurisdiction_name": "collection.jurisdiction.name",
"pk": "id",
}
lookup_kwargs_mapping = {
"collection__jurisdiction__name": "jurisdiction_name",
"id": "pk",
}
class ContentNodeRelationHyperlinkField(JurisdictionScopedHyperlinkField):
# /<jurisdiction_name>/contentnoderels/<pk>
view_name = 'jurisdiction-contentnoderel-detail'
queryset = ContentNodeRelation.objects.all()
# CONTENT CORRELATIONS
class ContentCorrelationHyperlinkField(JurisdictionScopedHyperlinkField):
# /<jurisdiction_name>/contentcorrelations/<pk>
view_name = 'jurisdiction-contentcorrelation-detail'
queryset = ContentCorrelation.objects.all()
class ContentStandardRelationHyperlinkField(MultiKeyHyperlinkField):
# /<jurisdiction_name>/contentstandardrels/<pk>
view_name = 'jurisdiction-contentstandardrel-detail'
queryset = ContentStandardRelation.objects.all()
url_kwargs_mapping = {
"jurisdiction_name": "correlation.jurisdiction.name",
"pk": "id",
}
lookup_kwargs_mapping = {
"correlation__jurisdiction__name": "jurisdiction_name",
"id": "pk",
}
# JURISDICTION
################################################################################
class JurisdictionSerializer(serializers.ModelSerializer):
vocabularies = ControlledVocabularyHyperlinkField(many=True, required=False)
documents = serializers.SerializerMethodField()
crosswalks = serializers.SerializerMethodField()
contentcollections = serializers.SerializerMethodField()
contentcorrelations = serializers.SerializerMethodField()
class Meta:
model = Jurisdiction
fields = [
# "id", # internal identifiers; need not be exposed to users
"uri",
"name",
"display_name",
"country",
"language",
"alt_name",
"notes",
"vocabularies",
"documents",
"crosswalks",
"contentcollections",
"contentcorrelations",
]
# The following four are done as a method fields because the serializers are
# only defined later in this source file.
def get_documents(self, obj):
return [
reverse(
"jurisdiction-document-detail",
kwargs= {"jurisdiction_name": doc.jurisdiction.name, "pk": doc.id},
request=self.context["request"],
) for doc in obj.documents.all()
]
def get_crosswalks(self, obj):
return [
reverse(
"jurisdiction-standardscrosswalk-detail",
kwargs= {"jurisdiction_name": sc.jurisdiction.name, "pk": sc.id},
request=self.context["request"],
) for sc in obj.crosswalks.all()
]
def get_contentcollections(self, obj):
return [
reverse(
"jurisdiction-contentcollection-detail",
kwargs= {"jurisdiction_name": cc.jurisdiction.name, "pk": cc.id},
request=self.context["request"],
)
for cc in obj.contentcollections.all()
]
def get_contentcorrelations(self, obj):
return [
reverse(
"jurisdiction-contentcorrelation-detail",
kwargs= {"jurisdiction_name": cs.jurisdiction.name, "pk": cs.id},
request=self.context["request"],
)
for cs in obj.contentcorrelations.all()
]
# VOCABULARIES, TERMS, and TERM RELATIONS
################################################################################
class ControlledVocabularySerializer(serializers.ModelSerializer):
jurisdiction = JurisdictionHyperlinkField(required=True)
terms = TermHyperlinkField(many=True, required=False)
class Meta:
model = ControlledVocabulary
fields = [
# "id", # internal identifiers; need not be exposed to users
"jurisdiction",
"uri",
"name",
"label",
"alt_label",
"hidden_label",
"description",
"language",
"source",
"notes",
"date_created",
"date_modified",
"extra_fields",
"creator",
"terms",
]
class TermSerializer(serializers.ModelSerializer):
jurisdiction = JurisdictionHyperlinkField(source='vocabulary.jurisdiction', required=True)
vocabulary = ControlledVocabularyHyperlinkField(required=True)
class Meta:
model = Term
fields = [
# "id", # internal identifiers; need not be exposed to users
"jurisdiction",
"vocabulary",
"uri",
"path",
"label",
"alt_label",
"hidden_label",
"notation",
"definition",
"notes",
"language",
"sort_order",
"date_created",
"date_modified",
"extra_fields",
]
class TermRelationSerializer(serializers.ModelSerializer):
jurisdiction = JurisdictionHyperlinkField(required=True)
source = TermHyperlinkField(required=True)
target = TermHyperlinkField(required=False)
class Meta:
model = TermRelation
fields = [
"id",
"uri",
"jurisdiction",
"source",
"target_uri",
"target",
"kind",
"notes",
"date_created",
"date_modified",
"extra_fields",
]
# STANDARDS
################################################################################
class StandardsDocumentSerializer(serializers.ModelSerializer):
root_node_id = serializers.SerializerMethodField()
jurisdiction = JurisdictionHyperlinkField(required=True)
children = StandardNodeHyperlinkField(source='root.children', many=True)
subjects = TermHyperlinkField(many=True)
education_levels = TermHyperlinkField(many=True)
license = TermHyperlinkField()
class Meta:
model = StandardsDocument
fields = '__all__'
def get_root_node_id(self, obj):
try:
return StandardNode.objects.get(level=0, document_id=obj.id).id
except StandardNode.DoesNotExist:
return None
class FullStandardsDocumentSerializer(StandardsDocumentSerializer):
"""
Full standard document serialization recursive traversal of standard nodes.
"""
children = serializers.SerializerMethodField()
def get_children(self, obj):
return [
FullStandardNodeSerializer(node, context=self.context).data
for node in obj.root.children.all()
]
class StandardNodeSerializer(serializers.ModelSerializer):
uri = serializers.SerializerMethodField()
jurisdiction = JurisdictionHyperlinkField(source='document.jurisdiction', required=False) # check this...
document = StandardsDocumentHyperlinkHyperlinkField(required=True)
parent = StandardNodeHyperlinkField()
kind = TermHyperlinkField()
subjects = TermHyperlinkField(many=True)
education_levels = TermHyperlinkField(many=True)
concept_terms = TermHyperlinkField(many=True)
children = StandardNodeHyperlinkField(many=True)
class Meta:
model = StandardNode
fields = '__all__'
def get_uri(self, obj):
return obj.uri
class FullStandardNodeSerializer(StandardNodeSerializer):
"""
Recursive variant of ``StandardNodeSerializer`` to use for ``/full`` action.
"""
children = serializers.SerializerMethodField()
def get_children(self, obj):
return [
FullStandardNodeSerializer(node, context=self.context).data
for node in obj.children.all()
]
# STANDARDS CROSSWALKS
################################################################################
class StandardsCrosswalkSerializer(serializers.ModelSerializer):
jurisdiction = JurisdictionHyperlinkField(required=True)
license = TermHyperlinkField()
subjects = TermHyperlinkField(many=True)
education_levels = TermHyperlinkField(many=True)
relations = StandardNodeRelationHyperlinkField(many=True)
class Meta:
model = StandardsCrosswalk
fields = '__all__'
class StandardNodeRelationSerializer(serializers.ModelSerializer):
jurisdiction = JurisdictionHyperlinkField(source='crosswalk.jurisdiction', required=False)
crosswalk = StandardsCrowsswalkHyperlinkField(required=True)
source = StandardNodeHyperlinkField(style={'base_template': 'input.html'})
kind = TermHyperlinkField()
target = StandardNodeHyperlinkField(style={'base_template': 'input.html'})
class Meta:
model = StandardNodeRelation
fields = '__all__'
# CONTENT
################################################################################
class ContentCollectionSerializer(CountryFieldMixin, serializers.ModelSerializer):
uri = serializers.SerializerMethodField()
jurisdiction = JurisdictionHyperlinkField(required=True)
license = TermHyperlinkField()
subjects = TermHyperlinkField(many=True)
education_levels = TermHyperlinkField(many=True)
children = ContentNodeHyperlinkField(source='root.children', many=True)
class Meta:
model = ContentCollection
fields = '__all__'
def get_uri(self, obj):
return obj.uri
class FullContentCollectionSerializer(ContentCollectionSerializer):
"""
Full content collection serialization recursive traversal of content nodes.
"""
children = serializers.SerializerMethodField()
def get_children(self, obj):
return [
FullContentNodeSerializer(node, context=self.context).data
for node in obj.root.children.all()
]
class ContentNodeSerializer(serializers.ModelSerializer):
uri = serializers.SerializerMethodField()
jurisdiction = JurisdictionHyperlinkField(source='document.jurisdiction', required=False)
collection = ContentCollectionHyperlinkField(required=True)
parent = ContentNodeHyperlinkField()
kind = TermHyperlinkField()
subjects = TermHyperlinkField(many=True)
education_levels = TermHyperlinkField(many=True)
concept_terms = TermHyperlinkField(many=True)
license = TermHyperlinkField()
children = ContentNodeHyperlinkField(many=True)
class Meta:
model = ContentNode
fields = '__all__'
def get_uri(self, obj):
return obj.uri
class FullContentNodeSerializer(ContentNodeSerializer):
"""
Recursive variant of ``ContentNodeSerializer`` to use for ``/full`` action.
"""
children = serializers.SerializerMethodField()
def get_children(self, obj):
return [
FullContentNodeSerializer(node, context=self.context).data
for node in obj.children.all()
]
class ContentNodeRelationSerializer(serializers.ModelSerializer):
jurisdiction = JurisdictionHyperlinkField(required=True)
source = ContentNodeHyperlinkField(style={'base_template': 'input.html'})
kind = TermHyperlinkField()
target = ContentNodeHyperlinkField(style={'base_template': 'input.html'})
class Meta:
model = ContentNodeRelation
fields = '__all__'
# CONTENT CORRELATIONS
################################################################################
class ContentCorrelationSerializer(serializers.ModelSerializer):
jurisdiction = JurisdictionHyperlinkField(required=True)
license = TermHyperlinkField()
subjects = TermHyperlinkField(many=True)
education_levels = TermHyperlinkField(many=True)
relations = ContentStandardRelationHyperlinkField(many=True)
class Meta:
model = ContentCorrelation
fields = '__all__'
class ContentStandardRelationSerializer(serializers.ModelSerializer):
jurisdiction = JurisdictionHyperlinkField(source='correlation.jurisdiction', required=False)
correlation = ContentCorrelationHyperlinkField(required=True)
contentnode = ContentNodeHyperlinkField(style={'base_template': 'input.html'})
kind = TermHyperlinkField()
standardnode = StandardNodeHyperlinkField(style={'base_template': 'input.html'})
class Meta:
model = ContentStandardRelation
fields = '__all__'
|
from collections.abc import MutableMapping
from dask.utils import stringify
from .utils import log_errors
class PublishExtension:
"""An extension for the scheduler to manage collections
* publish_list
* publish_put
* publish_get
* publish_delete
"""
def __init__(self, scheduler):
self.scheduler = scheduler
self.datasets = dict()
handlers = {
"publish_list": self.list,
"publish_put": self.put,
"publish_get": self.get,
"publish_delete": self.delete,
}
self.scheduler.handlers.update(handlers)
self.scheduler.extensions["publish"] = self
def put(self, keys=None, data=None, name=None, override=False, client=None):
with log_errors():
if not override and name in self.datasets:
raise KeyError("Dataset %s already exists" % name)
self.scheduler.client_desires_keys(keys, f"published-{stringify(name)}")
self.datasets[name] = {"data": data, "keys": keys}
return {"status": "OK", "name": name}
def delete(self, name=None):
with log_errors():
out = self.datasets.pop(name, {"keys": []})
self.scheduler.client_releases_keys(
out["keys"], f"published-{stringify(name)}"
)
def list(self, *args):
with log_errors():
return list(sorted(self.datasets.keys(), key=str))
def get(self, name=None, client=None):
with log_errors():
return self.datasets.get(name, None)
class Datasets(MutableMapping):
"""A dict-like wrapper around :class:`Client` dataset methods.
Parameters
----------
client : distributed.client.Client
"""
__slots__ = ("_client",)
def __init__(self, client):
self._client = client
def __getitem__(self, key):
# When client is asynchronous, it returns a coroutine
return self._client.get_dataset(key)
def __setitem__(self, key, value):
if self._client.asynchronous:
# 'await obj[key] = value' is not supported by Python as of 3.8
raise TypeError(
"Can't use 'client.datasets[name] = value' when client is "
"asynchronous; please use 'client.publish_dataset(name=value)' instead"
)
self._client.publish_dataset(value, name=key)
def __delitem__(self, key):
if self._client.asynchronous:
# 'await del obj[key]' is not supported by Python as of 3.8
raise TypeError(
"Can't use 'del client.datasets[name]' when client is asynchronous; "
"please use 'client.unpublish_dataset(name)' instead"
)
return self._client.unpublish_dataset(key)
def __iter__(self):
if self._client.asynchronous:
raise TypeError(
"Can't invoke iter() or 'for' on client.datasets when client is "
"asynchronous; use 'async for' instead"
)
yield from self._client.list_datasets()
def __aiter__(self):
if not self._client.asynchronous:
raise TypeError(
"Can't invoke 'async for' on client.datasets when client is "
"synchronous; use iter() or 'for' instead"
)
async def _():
for key in await self._client.list_datasets():
yield key
return _()
def __len__(self):
if self._client.asynchronous:
# 'await len(obj)' is not supported by Python as of 3.8
raise TypeError(
"Can't use 'len(client.datasets)' when client is asynchronous; "
"please use 'len(await client.list_datasets())' instead"
)
return len(self._client.list_datasets())
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 31 16:29:33 2019
@author: jlee
"""
import time
start_time = time.time()
import numpy as np
import glob, os
import g0_init_cfg as ic
# ----- Importing IRAF from the root directory ----- #
current_dir = os.getcwd()
os.chdir(ic.dir_iraf)
from pyraf import iraf
from pyraf.iraf import gemini, gmos
os.chdir(current_dir)
iraf.chdir(current_dir)
iraf.unlearn('gfreduce')
iraf.unlearn('gswavelength')
# ---------- Wavelength solution ---------- #
flat = np.loadtxt(ic.lst_flat, dtype=str)
if (flat.size > 1):
raise ValueError("Please check if there is only one flat image for the standard star.")
flat0 = flat.item(0)
# Extract the arc
arc = np.loadtxt(ic.lst_arc, dtype=str)
if (arc.size > 1):
raise ValueError("Please check if there is only one arc image for the standard star.")
arc0 = arc.item(0)
iraf.imdelete('g@'+ic.lst_arc)
iraf.imdelete('rg@'+ic.lst_arc)
iraf.imdelete('erg@'+ic.lst_arc)
iraf.gfreduce(arc0, rawpath=ic.rawdir, fl_extract='yes', recenter='no',
trace='no', reference='erg'+flat0, fl_bias='no',
fl_over='yes', fl_trim='yes', mdffile=ic.nmdf, mdfdir='./',
slits=ic.cslit, fl_fluxcal='no', fl_gscrrej='no',
fl_wavtran='no', fl_skysub='no', fl_inter='no')
# ----- Measure the wavelength solution ----- #
iraf.sleep(10.0)
iraf.gswavelength('erg'+arc0, fl_inter='yes',
nlost=10, ntarget=15, threshold=25,
coordlis='gmos$data/GCALcuar.dat')
'''
----- Interactive task after gswavelength -----
Examine identifications interactively? (Enter)
(IRAF graphics of spectrum displaying...)
"The spectrum window"
- "w" + "e" (left bottom) + "e" (right top) : zoom-in
- "w" + "a" : zoom-out
- "d" : delete the line
- "m" : mark the line
- "f" : jump to the parabola window
- "q" : quitting the interactive task
"The parabola window"
- "d" : jump to the spectrum window
- "f" : fit the line again
- "q" : return to the spectrum window
For the two-slit mode, you have to do the manual check twice.
Fit dispersion function interactively? (no|yes|NO|YES) ('NO'): Enter
Output file : erg[ARC].fits, database/aperg[ARC]_[1,2], database/iderg[ARC]_[1,2]
'''
# Printing the running time
print('--- %.4f seconds ---' %(time.time()-start_time))
|
__author__ = 'sibirrer'
from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase
from lenstronomy.LensModel.Profiles.cored_density import CoredDensity
from lenstronomy.LensModel.Profiles.cored_density_2 import CoredDensity2
from lenstronomy.LensModel.Profiles.cored_density_exp import CoredDensityExp
from lenstronomy.LensModel.Profiles.convergence import Convergence
__all__ = ['CoredDensityMST']
class CoredDensityMST(LensProfileBase):
"""
approximate mass-sheet transform of a density core. This routine takes the parameters of the density core and
subtracts a mass=sheet that approximates the cored profile in it's center to counter-act (in approximation) this
model. This allows for better sampling of the mass-sheet transformed quantities that do not have strong covariances.
Attention!!! The interpretation of the result is that the mass sheet as 'CONVERGENCE' that is present needs to be
subtracted in post-processing.
"""
param_names = ['lambda_approx', 'r_core', 'center_x', 'center_y']
lower_limit_default = {'lambda_approx': -1, 'r_core': 0, 'center_x': -100, 'center_y': -100}
upper_limit_default = {'lambda_approx': 10, 'r_core': 100, 'center_x': 100, 'center_y': 100}
def __init__(self, profile_type='CORED_DENSITY'):
if profile_type == 'CORED_DENSITY':
self._profile = CoredDensity()
elif profile_type == 'CORED_DENSITY_2':
self._profile = CoredDensity2()
elif profile_type == 'CORED_DENSITY_EXP':
self._profile = CoredDensityExp()
else:
raise ValueError('profile_type %s not supported for CoredDensityMST instance.' % profile_type)
self._convergence = Convergence()
super(CoredDensityMST, self).__init__()
def function(self, x, y, lambda_approx, r_core, center_x=0, center_y=0):
"""
lensing potential of approximate mass-sheet correction
:param x: x-coordinate
:param y: y-coordinate
:param lambda_approx: approximate mass sheet transform
:param r_core: core radius of the cored density profile
:param center_x: x-center of the profile
:param center_y: y-center of the profile
:return: lensing potential correction
"""
kappa_ext = 1 - lambda_approx
f_cored_density = self._profile.function(x, y, kappa_ext, r_core, center_x, center_y)
f_ms = self._convergence.function(x, y, kappa_ext, center_x, center_y)
return f_cored_density - f_ms
def derivatives(self, x, y, lambda_approx, r_core, center_x=0, center_y=0):
"""
deflection angles of approximate mass-sheet correction
:param x: x-coordinate
:param y: y-coordinate
:param lambda_approx: approximate mass sheet transform
:param r_core: core radius of the cored density profile
:param center_x: x-center of the profile
:param center_y: y-center of the profile
:return: alpha_x, alpha_y
"""
kappa_ext = 1 - lambda_approx
f_x_cd, f_y_cd = self._profile.derivatives(x, y, kappa_ext, r_core, center_x, center_y)
f_x_ms, f_y_ms = self._convergence.derivatives(x, y, kappa_ext, center_x, center_y)
return f_x_cd - f_x_ms, f_y_cd - f_y_ms
def hessian(self, x, y, lambda_approx, r_core, center_x=0, center_y=0):
"""
Hessian terms of approximate mass-sheet correction
:param x: x-coordinate
:param y: y-coordinate
:param lambda_approx: approximate mass sheet transform
:param r_core: core radius of the cored density profile
:param center_x: x-center of the profile
:param center_y: y-center of the profile
:return: df/dxx, df/dyy, df/dxy
"""
kappa_ext = 1 - lambda_approx
f_xx_cd, f_yy_cd, f_xy_cd = self._profile.hessian(x, y, kappa_ext, r_core, center_x, center_y)
f_xx_ms, f_yy_ms, f_xy_ms = self._convergence.hessian(x, y, kappa_ext, center_x, center_y)
return f_xx_cd - f_xx_ms, f_yy_cd - f_yy_ms, f_xy_cd - f_xy_ms
|
# -*- coding: utf-8 -*-
"""IdentityServicesEngineAPI network_access_time_date_conditions API fixtures and tests.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from ciscoisesdk.exceptions import MalformedRequest
from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION
pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.0.0', reason='version does not match')
def is_valid_get_all_network_access_time_conditions(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_ab916b19789c59b79dddbc2d0a3c57fc_v3_0_0').validate(obj.response)
return True
def get_all_network_access_time_conditions(api):
endpoint_result = api.network_access_time_date_conditions.get_all_network_access_time_conditions(
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_get_all_network_access_time_conditions(api, validator):
try:
assert is_valid_get_all_network_access_time_conditions(
validator,
get_all_network_access_time_conditions(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def get_all_network_access_time_conditions_default(api):
endpoint_result = api.network_access_time_date_conditions.get_all_network_access_time_conditions(
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_get_all_network_access_time_conditions_default(api, validator):
try:
assert is_valid_get_all_network_access_time_conditions(
validator,
get_all_network_access_time_conditions_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_network_access_time_condition(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_784b314d32b258a1b53c5c84cf84d396_v3_0_0').validate(obj.response)
return True
def create_network_access_time_condition(api):
endpoint_result = api.network_access_time_date_conditions.create_network_access_time_condition(
active_validation=False,
attribute_id='string',
attribute_name='string',
attribute_value='string',
children=[{'conditionType': 'string', 'isNegate': True}],
condition_type='string',
dates_range={'startDate': 'string', 'endDate': 'string'},
dates_range_exception={'startDate': 'string', 'endDate': 'string'},
description='string',
dictionary_name='string',
dictionary_value='string',
hours_range={'startTime': 'string', 'endTime': 'string'},
hours_range_exception={'startTime': 'string', 'endTime': 'string'},
id='string',
is_negate=True,
name='string',
operator='string',
payload=None,
week_days=['string'],
week_days_exception=['string']
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_create_network_access_time_condition(api, validator):
try:
assert is_valid_create_network_access_time_condition(
validator,
create_network_access_time_condition(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def create_network_access_time_condition_default(api):
endpoint_result = api.network_access_time_date_conditions.create_network_access_time_condition(
active_validation=False,
attribute_id=None,
attribute_name=None,
attribute_value=None,
children=None,
condition_type=None,
dates_range=None,
dates_range_exception=None,
description=None,
dictionary_name=None,
dictionary_value=None,
hours_range=None,
hours_range_exception=None,
id=None,
is_negate=None,
name=None,
operator=None,
payload=None,
week_days=None,
week_days_exception=None
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_create_network_access_time_condition_default(api, validator):
try:
assert is_valid_create_network_access_time_condition(
validator,
create_network_access_time_condition_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_network_access_time_condition_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_c941303330bc5615b3eb8d4d2702b874_v3_0_0').validate(obj.response)
return True
def get_network_access_time_condition_by_id(api):
endpoint_result = api.network_access_time_date_conditions.get_network_access_time_condition_by_id(
id='string'
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_get_network_access_time_condition_by_id(api, validator):
try:
assert is_valid_get_network_access_time_condition_by_id(
validator,
get_network_access_time_condition_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def get_network_access_time_condition_by_id_default(api):
endpoint_result = api.network_access_time_date_conditions.get_network_access_time_condition_by_id(
id='string'
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_get_network_access_time_condition_by_id_default(api, validator):
try:
assert is_valid_get_network_access_time_condition_by_id(
validator,
get_network_access_time_condition_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_network_access_time_condition_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_11232a518d5655f69e8687c9c98740c6_v3_0_0').validate(obj.response)
return True
def update_network_access_time_condition_by_id(api):
endpoint_result = api.network_access_time_date_conditions.update_network_access_time_condition_by_id(
active_validation=False,
attribute_id='string',
attribute_name='string',
attribute_value='string',
children=[{'conditionType': 'string', 'isNegate': True}],
condition_type='string',
dates_range={'startDate': 'string', 'endDate': 'string'},
dates_range_exception={'startDate': 'string', 'endDate': 'string'},
description='string',
dictionary_name='string',
dictionary_value='string',
hours_range={'startTime': 'string', 'endTime': 'string'},
hours_range_exception={'startTime': 'string', 'endTime': 'string'},
id='string',
is_negate=True,
name='string',
operator='string',
payload=None,
week_days=['string'],
week_days_exception=['string']
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_update_network_access_time_condition_by_id(api, validator):
try:
assert is_valid_update_network_access_time_condition_by_id(
validator,
update_network_access_time_condition_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def update_network_access_time_condition_by_id_default(api):
endpoint_result = api.network_access_time_date_conditions.update_network_access_time_condition_by_id(
active_validation=False,
id='string',
attribute_id=None,
attribute_name=None,
attribute_value=None,
children=None,
condition_type=None,
dates_range=None,
dates_range_exception=None,
description=None,
dictionary_name=None,
dictionary_value=None,
hours_range=None,
hours_range_exception=None,
is_negate=None,
name=None,
operator=None,
payload=None,
week_days=None,
week_days_exception=None
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_update_network_access_time_condition_by_id_default(api, validator):
try:
assert is_valid_update_network_access_time_condition_by_id(
validator,
update_network_access_time_condition_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_network_access_time_condition_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_e2a697abfe2058d3adc7ad9922f5a5d6_v3_0_0').validate(obj.response)
return True
def delete_network_access_time_condition_by_id(api):
endpoint_result = api.network_access_time_date_conditions.delete_network_access_time_condition_by_id(
id='string'
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_delete_network_access_time_condition_by_id(api, validator):
try:
assert is_valid_delete_network_access_time_condition_by_id(
validator,
delete_network_access_time_condition_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print(original_e)
raise original_e
def delete_network_access_time_condition_by_id_default(api):
endpoint_result = api.network_access_time_date_conditions.delete_network_access_time_condition_by_id(
id='string'
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_delete_network_access_time_condition_by_id_default(api, validator):
try:
assert is_valid_delete_network_access_time_condition_by_id(
validator,
delete_network_access_time_condition_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
|
#-*- coding: utf-8 -*-
import six
import base64
import select
import logging
from hashlib import sha1
from wsgiref import util
import django
from django.core.wsgi import get_wsgi_application
from django.core.servers.basehttp import WSGIServer, ServerHandler as _ServerHandler, WSGIRequestHandler as _WSGIRequestHandler
from django.conf import settings
from django.core.management.commands import runserver
try:
from django.utils.six.moves import socketserver
except ModuleNotFoundError as e:
import socketserver
from django.utils.encoding import force_str
from ws4redis.websocket import WebSocket
from ws4redis.wsgi_server import WebsocketWSGIServer, HandshakeError, UpgradeRequiredError
util._hoppish = {}.__contains__
logger = logging.getLogger('django.request')
class ServerHandler(_ServerHandler):
http_version = str("1.1")
class WSGIRequestHandler(_WSGIRequestHandler):
protocol_version = 'HTTP/1.1'
def handle(self):
"""Handle multiple requests if necessary."""
self.close_connection = 1
self.handle_one_request()
def handle_one_request(self):
"""Copy of WSGIRequestHandler.handle(), but with different ServerHandler"""
"""Copy of WSGIRequestHandler, but with different ServerHandler"""
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
class WebsocketRunServer(WebsocketWSGIServer):
WS_GUID = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
WS_VERSIONS = ('13', '8', '7')
protocol_version = "HTTP/1.1"
def upgrade_websocket(self, environ, start_response):
"""
Attempt to upgrade the socket environ['wsgi.input'] into a websocket enabled connection.
"""
websocket_version = environ.get('HTTP_SEC_WEBSOCKET_VERSION', '')
if not websocket_version:
raise UpgradeRequiredError
elif websocket_version not in self.WS_VERSIONS:
raise HandshakeError('Unsupported WebSocket Version: {0}'.format(websocket_version))
key = environ.get('HTTP_SEC_WEBSOCKET_KEY', '').strip()
if not key:
raise HandshakeError('Sec-WebSocket-Key header is missing/empty')
try:
key_len = len(base64.b64decode(key))
except TypeError:
raise HandshakeError('Invalid key: {0}'.format(key))
if key_len != 16:
# 5.2.1 (3)
raise HandshakeError('Invalid key: {0}'.format(key))
sec_ws_accept = base64.b64encode(sha1(six.b(key) + self.WS_GUID).digest())
if six.PY3:
sec_ws_accept = sec_ws_accept.decode('ascii')
headers = [
('Upgrade', 'websocket'),
('Connection', 'Upgrade'),
('Sec-WebSocket-Accept', sec_ws_accept),
('Sec-WebSocket-Version', str(websocket_version))
]
if environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL') is not None:
headers.append(('Sec-WebSocket-Protocol', environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL')))
logger.debug('WebSocket request accepted, switching protocols')
start_response(force_str('101 Switching Protocols'), headers)
six.get_method_self(start_response).finish_content()
if (django.VERSION[:3] >= (2,1,5)):
wsgi_input = environ['wsgi.input'].stream
else:
wsgi_input = environ['wsgi.input']
return WebSocket(wsgi_input)
def select(self, rlist, wlist, xlist, timeout=None):
return select.select(rlist, wlist, xlist, timeout)
def run(addr, port, wsgi_handler, ipv6=False, threading=False, **kwargs):
"""
Function to monkey patch the internal Django command: manage.py runserver
"""
logger.info('Websocket support is enabled')
server_address = (addr, port)
if not threading:
raise Exception("Django's Websocket server must run with threading enabled")
httpd_cls = type('WSGIServer', (socketserver.ThreadingMixIn, WSGIServer), {'daemon_threads': True})
httpd = httpd_cls(server_address, WSGIRequestHandler, ipv6=ipv6)
httpd.set_app(wsgi_handler)
httpd.serve_forever()
runserver.run = run
_django_app = get_wsgi_application()
_websocket_app = WebsocketRunServer()
_websocket_url = getattr(settings, 'WEBSOCKET_URL')
def application(environ, start_response):
if _websocket_url and environ.get('PATH_INFO').startswith(_websocket_url):
return _websocket_app(environ, start_response)
return _django_app(environ, start_response)
|
import pathlib
from typing import Callable, Dict
import os
import logging
from snorkel.classification import cross_entropy_with_probs
import torch
from torch import Tensor
from torch.optim import SGD
from torch.optim.optimizer import Optimizer
from knodle.trainer.utils.utils import check_and_return_device, set_seed
logger = logging.getLogger(__name__)
class TrainerConfig:
def __init__(
self,
criterion: Callable[[Tensor, Tensor], float] = cross_entropy_with_probs,
batch_size: int = 32,
optimizer: Optimizer = None,
lr: int = 0.01,
output_classes: int = 2,
class_weights: Tensor = None,
epochs: int = 3,
seed: int = None,
grad_clipping: int = None,
device: str = None,
caching_folder: str = os.path.join(pathlib.Path().absolute(), "cache"),
caching_suffix: str = "",
saved_models_dir: str = None
):
"""
A default and minimum sufficient configuration of a Trainer instance.
:param criterion: a usual PyTorch criterion; computes a gradient according to a given loss function
:param batch_size: a usual PyTorch batch_size; the number of training examples utilized in one training iteration
:param optimizer: a usual PyTorch optimizer; which is used to solve optimization problems by minimizing the
function
:param lr: a usual PyTorch learning rate; tuning parameter in an optimization algorithm that determines the step
size at each iteration while moving toward a minimum of a loss function
:param output_classes: the number of classes used in classification
:param class_weights: introduce the weight of each class. By default, all classes have the same weights 1.0.
:param epochs: the number of epochs the classification model will be trained
:param seed: the desired seed for generating random numbers
:param grad_clipping: if set to True, gradient norm of an iterable of parameters will be clipped
:param device: what device the model will be trained on (CPU/CUDA)
:param caching_folder: a path to the folder where cache will be saved (default: root/cache)
:param caching_suffix: a specific index that could be added to the caching file name (e.g. in WSCrossWeigh for
sample weights calculated in different iterations and stored in different files.)
:param saved_models_dir: a path to the folder where trained models will be stored. If None, the trained models
won't be stored.
"""
self.seed = seed
if self.seed is not None:
set_seed(seed)
self.caching_suffix = caching_suffix
self.caching_folder = caching_folder
os.makedirs(self.caching_folder, exist_ok=True)
logger.info(f"The cache will be saved to {self.caching_folder} folder")
# create directory where saved models will be stored
if saved_models_dir:
self.saved_models_dir = saved_models_dir
os.makedirs(self.saved_models_dir, exist_ok=True)
else:
self.saved_models_dir = caching_folder
logger.info(f"The trained models will be saved to the {self.saved_models_dir} directory.")
self.criterion = criterion
self.lr = lr
self.batch_size = batch_size
self.output_classes = output_classes
self.grad_clipping = grad_clipping
self.device = torch.device(device) if device is not None else check_and_return_device()
logger.info(f"Model will be trained on {self.device}")
if epochs <= 0:
raise ValueError("Epochs needs to be positive")
self.epochs = epochs
if optimizer is None:
logger.info(f"Defaulting to SGD optimizer as none specified in the config.")
self.optimizer = SGD
else:
self.optimizer = optimizer
if class_weights is None:
self.class_weights = torch.tensor([1.0] * self.output_classes)
else:
if len(class_weights) != self.output_classes:
raise Exception("Wrong class sample_weights initialisation!")
self.class_weights = class_weights
class BaseTrainerConfig(TrainerConfig):
def __init__(
self,
filter_non_labelled: bool = True,
other_class_id: int = None,
evaluate_with_other_class: bool = False,
ids2labels: Dict = None,
max_rules: int = None,
min_coverage: float = None,
drop_rules: bool = False,
**kwargs
):
"""
Additionally provided parameters needed for handling the cases where there are data samples with no rule
matched (filtering OR introducing the other class + training & evaluation with other class).
:param filter_non_labelled: if True, the samples with no rule matched will be filtered out from the dataset
:param other_class_id: id of the negative class; if set, the samples with no rule matched will be assigned to it
:param evaluate_with_other_class: if set to True, the evaluation will be done with respect to the negative class
(for more details please see knodle/evaluation/other_class_metrics.py file)
:param ids2labels: dictionary {label id: label}, which is needed to perform evaluation with the negative class
"""
super().__init__(**kwargs)
self.filter_non_labelled = filter_non_labelled
self.other_class_id = other_class_id
self.evaluate_with_other_class = evaluate_with_other_class
self.ids2labels = ids2labels
if self.other_class_id is not None and self.filter_non_labelled:
raise ValueError("You can either filter samples with no weak labels or add them to 'other_class_id'")
logger.debug(f"{self.evaluate_with_other_class} and {self.ids2labels}")
if self.evaluate_with_other_class and self.ids2labels is None:
# check if the selected evaluation type is valid
logging.warning(
"Labels to label ids correspondence is needed to make other_class specific evaluation. Since it is "
"absent now, the standard sklearn metrics will be calculated instead."
)
self.evaluate_with_other_class = False
self.max_rules = max_rules
self.min_coverage = min_coverage
self.drop_rules = drop_rules
|
import unittest
import urllib2
from flask.ext.testing import LiveServerTestCase, TestCase
from tmb import app as tmbapp, db
from tmb.models import User
class TestTMB(TestCase):
def setUp(self):
db.create_all()
super(TestCase, self).setUp()
def tearDown(self):
db.session.remove()
db.drop_all()
super(TestCase, self).tearDown()
def create_app(self):
app = tmbapp
app.config['TESTING'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///../test.db"
return app
def test_create_account(self):
steamid = "au9a0ou9ea0"
# There should only be one user
u = User.get_or_create(steamid)
db.session.commit()
user_count = User.query.count()
self.assertEqual(user_count, 1)
# There should only be one user
u2 = User.get_or_create(steamid)
db.session.commit()
user_count2 = User.query.count()
self.assertEqual(user_count2, 1)
# Now there should be 2 users
u3 = User.get_or_create("ah9oe0uh")
db.session.commit()
user_count3 = User.query.count()
self.assertEqual(user_count3, 2)
if __name__ == '__main__':
unittest.main()
|
import pytest
import os
import math
import torch
@pytest.fixture
def image_size():
return 256
@pytest.fixture
def strides():
return [8, 16, 32, 64]
@pytest.fixture
def sample(image_size):
return (
torch.tensor([
[5, 11, 200, 210],
[149, 40, 227, 121],
[38, 118, 119, 180],
[190, 187, 230, 232]], dtype=torch.float32) / image_size,
torch.tensor([1, 1, 2, 2], dtype=torch.float32))
@pytest.fixture
def output_sample():
d = os.path.dirname(os.path.abspath(__file__))
filename = "map_output_sample.pt"
r = torch.load(os.path.join(d, filename))
for c in r:
c.requires_grad_(False)
return r
@pytest.fixture
def targets(sample):
return ([sample[0]], [sample[1]])
@pytest.fixture
def expected_joint_map_8x8(image_size):
l = torch.tensor([
[0] * 8,
[0] * 8,
[0] * 5 + [11., 0, 75.],
[0] * 3 + [91., 123., 11., 0, 75.],
[0] * 3 + [91., 123.] + [0] * 3,
[0] * 8,
[0] * 8,
[0] * 8
]).unsqueeze(dim=-1)
t = torch.tensor([
[0] * 8,
[0] * 8,
[0] * 5 + [24., 0, 24.],
[0] * 3 + [85., 85., 56, 0, 56.],
[0] * 3 + [117., 117.] + [0] * 3,
[0] * 8,
[0] * 8,
[0] * 8
]).unsqueeze(dim=-1)
r = torch.tensor([
[0] * 8,
[0] * 8,
[0] * 5 + [67., 0, 3.],
[0] * 3 + [104., 72., 67., 0., 3.],
[0] * 3 + [104., 72.] + [0] * 3,
[0] * 8,
[0] * 8,
[0] * 8
]).unsqueeze(dim=-1)
b = torch.tensor([
[0] * 8,
[0] * 8,
[0] * 5 + [57., 0, 57.],
[0] * 3 + [114., 114., 25., 0, 25.],
[0] * 3 + [82., 82.] + [0] * 3,
[0] * 8,
[0] * 8,
[0] * 8
]).unsqueeze(dim=-1)
reg = torch.cat([l, t, r, b], dim=-1)
reg /= image_size
bg = torch.tensor([
[1.] * 8,
[1.] + [0.] * 6 + [1.],
[1.] + [0.] * 7,
[1.] + [0.] * 7,
[1.] + [0.] * 6 + [1.],
[1.] + [0.] * 6 + [1.],
[1.] + [0.] * 7,
[1.] * 6 + [0.] * 2
]).unsqueeze(dim=-1)
fg1 = torch.tensor([
[0.] * 8,
[0.] * 8,
[0.] * 5 + [1., 0., 1.],
[0.] * 3 + [1., 1.] + [1., 0., 1.],
[0.] * 3 + [1., 1.] + [0.] * 3,
[0.] * 8,
[0.] * 8,
[0.] * 8
]).unsqueeze(dim=-1)
fg2 = torch.zeros([8, 8, 1])
centerness = torch.tensor([
[0] * 8,
[0] * 8,
[0] * 5 +
[
math.sqrt(11. / 67. * 24. / 57.), 0.,
math.sqrt(3. / 75. * 24. / 57.)],
[0] * 3 +
[
math.sqrt(91. / 104. * 85. / 114.),
math.sqrt(72. / 123. * 85. / 114.),
math.sqrt(11. / 67. * 25. / 56.), 0.,
math.sqrt(3. / 75. * 25. / 56.)],
[0] * 3 +
[
math.sqrt(91. / 104. * 82. / 117.),
math.sqrt(72. / 123. * 82. / 117.)] +
[0] * 3,
[0] * 8,
[0] * 8,
[0] * 8
]).unsqueeze(dim=-1)
result = torch.cat([reg, centerness, bg, fg1, fg2], dim=-1)
result = result.permute(2, 0, 1)
return result
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Logging utility tooling."""
import codecs
import copy
import json
import logging
try: # Python 3.5.0 and 3.5.1 have incompatible typing modules
from typing import Dict, Sequence, Text, Union, cast # noqa pylint: disable=unused-import
except ImportError: # pragma: no cover
cast = lambda typ, val: val # noqa pylint: disable=invalid-name
# We only actually need the other imports when running the mypy checks
__all__ = ("setup_logger", "LOGGER_NAME")
LOGGING_LEVELS = {0: logging.CRITICAL, 1: logging.INFO, 2: logging.DEBUG} # type: Dict[int, int]
LOGGER_NAME = "aws_encryption_sdk_cli" # type: str
FORMAT_STRING = "%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s" # type: str
MAX_LOGGING_LEVEL = 2 # type: int
_REDACTED = "<**-redacted-**>" # type: str
class _KMSKeyRedactingFormatter(logging.Formatter):
"""Log formatter that redacts ``Plaintext`` values from KMS request and response bodies."""
def __to_str(self, value): # pylint: disable=no-self-use
# type: (Union[Text, str, bytes]) -> Text
"""Converts bytes or str to str.
:param value: Value to convert
:type value: bytes or str
:rtype: str
"""
if isinstance(value, bytes):
return codecs.decode(value, "utf-8")
return value
def __is_kms_encrypt_request(self, record): # pylint: disable=no-self-use
# type: (logging.LogRecord) -> bool
"""Determine if a record contains a kms:Encrypt request.
:param record: Logging record to filter
:type record: logging.LogRecord
:rtype: bool
"""
try:
return all(
(
record.name == "botocore.endpoint",
record.msg.startswith("Making request"),
cast(tuple, record.args)[-1]["headers"]["X-Amz-Target"] == "TrentService.Encrypt",
)
)
except Exception: # pylint: disable=broad-except
return False
def __redact_encrypt_request(self, record):
# type: (logging.LogRecord) -> None
"""Redact the ``Plaintext`` value from a kms:Encrypt request.
:param record: Logging record to filter
:type record: logging.LogRecord
"""
try:
parsed_body = json.loads(self.__to_str(cast(tuple, record.args)[-1]["body"]))
parsed_body["Plaintext"] = _REDACTED
cast(tuple, record.args)[-1]["body"] = json.dumps(parsed_body, sort_keys=True)
except Exception: # pylint: disable=broad-except
return
def __is_kms_response_with_plaintext(self, record): # pylint: disable=no-self-use
# type: (logging.LogRecord) -> bool
"""Determine if a record contains a KMS response with plaintext.
:param record: Logging record to filter
:type record: logging.LogRecord
:rtype: bool
"""
try:
return all(
(
record.name == "botocore.parsers",
record.msg.startswith("Response body:"),
b"KeyId" in cast(tuple, record.args)[0],
b"Plaintext" in cast(tuple, record.args)[0],
)
)
except Exception: # pylint: disable=broad-except
return False
def __redact_key_from_response(self, record):
# type: (logging.LogRecord) -> None
"""Redact the ``Plaintext`` value from a KMS response body.
:param record: Logging record to filter
:type record: logging.LogRecord
"""
try:
parsed_body = json.loads(self.__to_str(cast(tuple, record.args)[0]))
parsed_body["Plaintext"] = _REDACTED
new_args = (json.dumps(parsed_body, sort_keys=True),) + cast(tuple, record.args)[1:]
record.args = new_args
except Exception: # pylint: disable=broad-except
return
def __redact_record(self, record):
# type: (logging.LogRecord) -> logging.LogRecord
"""Redact any values from a record, as necessary.
:param record: Logging record to filter
:type record: logging.LogRecord
"""
_record = copy.deepcopy(record)
if self.__is_kms_encrypt_request(_record):
self.__redact_encrypt_request(_record)
elif self.__is_kms_response_with_plaintext(_record):
self.__redact_key_from_response(_record)
return _record
def format(self, record):
# type: (logging.LogRecord) -> str
"""Format the specified record as text, redacting plaintext KMS data keys if found.
:param record: Logging record to filter
:type record: logging.LogRecord
"""
_record = self.__redact_record(record)
return super(_KMSKeyRedactingFormatter, self).format(_record)
class _BlacklistFilter(logging.Filter): # pylint: disable=too-few-public-methods
"""Logging filter that allows blacklisting of certain logger names.
:param str *args: logger names to ignore
"""
def __init__(self, *args):
# type: (Union[Text, str]) -> None
"""Creates internal blacklist."""
super(_BlacklistFilter, self).__init__()
self.__blacklist = args
def filter(self, record):
# type: (logging.LogRecord) -> bool
"""Determines whether to filter record.
:param record: Logging record to filter
:type record: logging.LogRecord
:rtype: bool
"""
return record.name not in self.__blacklist
def _logging_levels(verbosity, quiet):
# type: (int, bool) -> Sequence[int]
"""Determines the proper logging levels given required verbosity level and quiet.
:param int verbosity: Requested level of verbosity
:param bool quiet: Suppresses all logging when true
:returns: local and root logging levels
:rtype: list of int
"""
if quiet:
return logging.CRITICAL, logging.CRITICAL
if verbosity is None or verbosity <= 0:
return logging.WARNING, logging.CRITICAL
normalized_local = min(verbosity, MAX_LOGGING_LEVEL)
normalized_root = min(verbosity - normalized_local, MAX_LOGGING_LEVEL)
return LOGGING_LEVELS[normalized_local], LOGGING_LEVELS[normalized_root]
def setup_logger(verbosity, quiet):
# type: (int, bool) -> None
"""Sets up the logger.
:param int verbosity: Requested level of verbosity
:param bool quiet: Suppresses all logging when true
"""
local_logging_level, root_logging_level = _logging_levels(verbosity, quiet)
formatter = _KMSKeyRedactingFormatter(FORMAT_STRING)
local_handler = logging.StreamHandler()
local_handler.setFormatter(formatter)
local_logger = logging.getLogger(LOGGER_NAME)
local_logger.setLevel(local_logging_level)
local_logger.addHandler(local_handler)
root_handler = logging.StreamHandler()
root_handler.setFormatter(formatter)
root_handler.addFilter(_BlacklistFilter(LOGGER_NAME))
root_logger = logging.getLogger()
root_logger.setLevel(root_logging_level)
root_logger.addHandler(root_handler)
|
def chain(*iters):
for l in iters:
yield from l
|
# Copyright 2015 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.tests.test import Test
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService
from kafkatest.services.kafka.version import LATEST_0_8_2, TRUNK
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.console_consumer import ConsoleConsumer, is_int
class ClientCompatibilityTest(Test):
def __init__(self, test_context):
super(ClientCompatibilityTest, self).__init__(test_context=test_context)
def setUp(self):
self.topic = "test_topic"
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=LATEST_0_8_2, topics={self.topic: {
"partitions": 3,
"replication-factor": 3,
"min.insync.replicas": 2}})
self.zk.start()
self.kafka.start()
# Producer and consumer
self.producer_throughput = 10000
self.num_producers = 1
self.num_consumers = 1
def test_producer_back_compatibility(self):
"""Run 0.9.X java producer against 0.8.X brokers.
This test documents the fact that java producer v0.9.0.0 and later won't run against 0.8.X brokers
the broker responds to a V1 produce request with a V0 fetch response; the client then tries to parse this V0
produce response as a V1 produce response, resulting in a BufferUnderflowException
"""
self.producer = VerifiableProducer(
self.test_context, self.num_producers, self.kafka, self.topic, max_messages=100,
throughput=self.producer_throughput, version=TRUNK)
node = self.producer.nodes[0]
try:
self.producer.start()
self.producer.wait()
raise Exception("0.9.X java producer should not run successfully against 0.8.X broker")
except:
# Expected
pass
finally:
self.producer.kill_node(node, clean_shutdown=False)
self.logger.info("Grepping producer log for expected error type")
node.account.ssh("egrep -m 1 %s %s" % ("\"org\.apache\.kafka\.common\.protocol\.types\.SchemaException.*throttle_time_ms.*: java\.nio\.BufferUnderflowException\"", self.producer.LOG_FILE), allow_fail=False)
def test_consumer_back_compatibility(self):
"""Run the scala 0.8.X consumer against an 0.9.X cluster.
Expect 0.8.X scala consumer to fail with buffer underflow. This error is the same as when an 0.9.X producer
is run against an 0.8.X broker: the broker responds to a V1 fetch request with a V0 fetch response; the
client then tries to parse this V0 fetch response as a V1 fetch response, resulting in a BufferUnderflowException
"""
num_messages = 10
self.producer = VerifiableProducer(
self.test_context, self.num_producers, self.kafka, self.topic, max_messages=num_messages,
throughput=self.producer_throughput, version=LATEST_0_8_2)
self.consumer = ConsoleConsumer(
self.test_context, self.num_consumers, self.kafka, self.topic, group_id="consumer-09X",
consumer_timeout_ms=10000, message_validator=is_int, version=TRUNK)
self.old_consumer = ConsoleConsumer(
self.test_context, self.num_consumers, self.kafka, self.topic, group_id="consumer-08X",
consumer_timeout_ms=10000, message_validator=is_int, version=LATEST_0_8_2)
self.producer.run()
self.consumer.run()
self.old_consumer.run()
consumed = len(self.consumer.messages_consumed[1])
old_consumed = len(self.old_consumer.messages_consumed[1])
assert old_consumed == num_messages, "Expected 0.8.X scala consumer to consume %d, but only got %d" % (num_messages, old_consumed)
assert consumed == 0, "Expected 0.9.X scala consumer to fail to consume any messages, but got %d" % consumed
self.logger.info("Grepping consumer log for expected error type")
node = self.consumer.nodes[0]
node.account.ssh("egrep -m 1 %s %s" % ("\"java\.nio\.BufferUnderflowException\"", self.consumer.LOG_FILE), allow_fail=False)
|
import os
from django.test import TestCase
from django.core.files import File as DjangoFile
from filer.models.foldermodels import Folder
from filer.models.imagemodels import Image
from filer.models.clipboardmodels import Clipboard
from filer.admin.clipboardadmin import UploadImageFileForm
from filer.tests.helpers import (create_superuser, create_folder_structure,
create_image, create_clipboard_item)
from filer import settings as filer_settings
class FilerApiTests(TestCase):
def setUp(self):
self.superuser = create_superuser()
self.client.login(username='admin', password='secret')
self.img = create_image()
self.image_name = 'test_file.jpg'
self.filename = os.path.join(os.path.dirname(__file__),
self.image_name)
self.img.save(self.filename, 'JPEG')
def tearDown(self):
self.client.logout()
os.remove(self.filename)
for img in Image.objects.all():
img.delete()
def create_filer_image(self):
file = DjangoFile(open(self.filename), name=self.image_name)
image = Image.objects.create(owner=self.superuser,
original_filename=self.image_name,
file=file)
return image
def test_create_folder_structure(self):
create_folder_structure(depth=3, sibling=2, parent=None)
self.assertEqual(Folder.objects.count(), 26)
def test_create_and_delete_image(self):
self.assertEqual(Image.objects.count(), 0)
image = self.create_filer_image()
image.save()
self.assertEqual(Image.objects.count(), 1)
image = Image.objects.all()[0]
image.delete()
self.assertEqual(Image.objects.count(), 0)
def test_upload_image_form(self):
self.assertEqual(Image.objects.count(), 0)
file = DjangoFile(open(self.filename), name=self.image_name)
upoad_image_form = UploadImageFileForm({'original_filename':self.image_name,
'owner': self.superuser.pk},
{'file':file})
if upoad_image_form.is_valid():
image = upoad_image_form.save()
self.assertEqual(Image.objects.count(), 1)
def test_create_clipboard_item(self):
image = self.create_filer_image()
image.save()
# Get the clipboard of the current user
clipboard_item = create_clipboard_item(user=self.superuser,
file=image)
clipboard_item.save()
self.assertEqual(Clipboard.objects.count(), 1)
def test_create_icons(self):
image = self.create_filer_image()
image.save()
icons = image.icons
file_basename = os.path.basename(image.file.path)
self.assertEqual(len(icons), 3)
self.assertEqual(os.path.basename(icons['32']),
file_basename + u'.32x32_q85_crop_upscale.jpg')
self.assertEqual(os.path.basename(icons['48']),
file_basename + u'.48x48_q85_crop_upscale.jpg')
self.assertEqual(os.path.basename(icons['64']),
file_basename + u'.64x64_q85_crop_upscale.jpg')
def test_file_upload_public_destination(self):
"""
Test where an image `is_public` == True is uploaded.
"""
image = self.create_filer_image()
image.is_public = True
image.save()
self.assertTrue(image.file.path.startswith(filer_settings.FILER_PUBLICMEDIA_ROOT))
def test_file_upload_private_destination(self):
"""
Test where an image `is_public` == False is uploaded.
"""
image = self.create_filer_image()
image.is_public = False
image.save()
self.assertTrue(image.file.path.startswith(filer_settings.FILER_PRIVATEMEDIA_ROOT))
def test_file_move_location(self):
"""
Test the method that move a file between filer_public, filer_private
and vice et versa
"""
image = self.create_filer_image()
image.is_public = False
image.save()
self.assertTrue(image.file.path.startswith(filer_settings.FILER_PRIVATEMEDIA_ROOT))
image._move_file(filer_settings.FILER_PRIVATEMEDIA_PREFIX,
filer_settings.FILER_PUBLICMEDIA_PREFIX)
image.save()
self.assertTrue(image.file.path.startswith(filer_settings.FILER_PUBLICMEDIA_ROOT))
def test_file_change_upload_to_destination(self):
"""
Test that the file is actualy move from the private to the public
directory when the is_public is checked on an existing private file.
"""
file = DjangoFile(open(self.filename), name=self.image_name)
image = Image.objects.create(owner=self.superuser,
is_public=False,
original_filename=self.image_name,
file=file)
image.save()
self.assertTrue(image.file.path.startswith(filer_settings.FILER_PRIVATEMEDIA_ROOT))
image.is_public = True
image.save()
self.assertTrue(image.file.path.startswith(filer_settings.FILER_PUBLICMEDIA_ROOT))
self.assertEqual(len(image.icons), 3)
image.is_public = False
image.save()
self.assertTrue(image.file.path.startswith(filer_settings.FILER_PRIVATEMEDIA_ROOT))
self.assertEqual(len(image.icons), 3)
|
from .bits import Reader
from .iab_tcf import base64_decode
class ConsentV1:
"""Represents a v1.1 consent with all the information extracted.
:param consent: The consent to process in bytes.
"""
def __init__(self, consent: bytes):
self._reader: Reader = Reader(consent)
self.version = self._reader.read_int(6)
self.created = self._reader.read_time()
self.last_updated = self._reader.read_time()
self.cmp_id = self._reader.read_int(12)
self.cmp_version = self._reader.read_int(12)
self.consent_screen = self._reader.read_int(6)
self.consent_language = self._reader.read_string(2)
self.vendor_list_version = self._reader.read_int(12)
self.purposes_allowed = self._reader.read_bitfield(24)
self.max_vendor_id = self._reader.read_int(16)
self.is_range_encoding = self._reader.read_bool()
if self.is_range_encoding:
self.default_consent = self._reader.read_bool()
self.num_entries = self._reader.read_int(12)
self.range_entries = self._reader.read_range(self.num_entries)
else:
self.consented_vendors = self._reader.read_bitfield(self.max_vendor_id)
def is_purpose_allowed(self, id: int) -> bool:
"""Checks if a purpose is allowed or not.
:param id: Purpose id to check if it's allowed or not.
"""
if id in self.purposes_allowed:
return self.purposes_allowed[id]
return False
def is_vendor_allowed(self, id: int) -> bool:
"""Checks if a vendor is allowed or not.
:param id: Vendor id to check if it's allowed or not.
"""
if self.is_range_encoding:
for (start, end) in self.range_entries:
if start <= id and id <= end:
return not self.default_consent
return self.default_consent
return False if id not in self.consented_vendors else self.consented_vendors[id]
def decode_v1(consent: str):
"""Decodes a v1.1 consent string that it's encoded in base64.
:param consent: base64 encoded consent string.
"""
return ConsentV1(base64_decode(consent))
|
# -*- coding: utf-8 -*-
from ThymeBoost.trend_models.trend_base_class import TrendBaseModel
import numpy as np
import pandas as pd
class EwmModel(TrendBaseModel):
model = 'ewm'
def __init__(self):
self.model_params = None
self.fitted = None
def __str__(self):
return f'{self.model}({self.kwargs["ewm_alpha"]})'
def fit(self, y, **kwargs):
"""
Fit the trend component in the boosting loop for a ewm model using alpha.
Parameters
----------
time_series : TYPE
DESCRIPTION.
**kwargs : TYPE
DESCRIPTION.
Returns
-------
None.
"""
self.kwargs = kwargs
alpha = kwargs['ewm_alpha']
bias = kwargs['bias']
y = pd.Series(y - bias)
self.fitted = np.array(y.ewm(alpha=alpha).mean()) + bias
last_fitted_values = self.fitted[-1]
self.model_params = last_fitted_values
return self.fitted
def predict(self, forecast_horizon, model_params):
return np.tile(model_params, forecast_horizon)
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Zcash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
'''
Test rollbacks on post-Heartwood chains.
'''
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
bitcoind_processes,
connect_nodes_bi,
initialize_chain,
nuparams,
start_node,
start_nodes,
BLOSSOM_BRANCH_ID,
HEARTWOOD_BRANCH_ID,
CANOPY_BRANCH_ID,
)
import logging
import time
HAS_CANOPY = [nuparams(BLOSSOM_BRANCH_ID, 205), nuparams(HEARTWOOD_BRANCH_ID, 210), nuparams(CANOPY_BRANCH_ID, 220), '-nurejectoldversions=false']
NO_CANOPY = [nuparams(BLOSSOM_BRANCH_ID, 205), nuparams(HEARTWOOD_BRANCH_ID, 210), '-nurejectoldversions=false']
class PostHeartwoodRollbackTest (BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir, extra_args=[
HAS_CANOPY,
HAS_CANOPY,
NO_CANOPY,
NO_CANOPY
])
def run_test (self):
# Generate shared state beyond Heartwood activation
print("Generating shared state beyond Heartwood activation")
logging.info("Generating initial blocks.")
self.nodes[0].generate(15)
self.sync_all()
# Split network at block 215 (after Heartwood, before Canopy)
print("Splitting network at block 215 (after Heartwood, before Canopy)")
self.split_network()
# Activate Canopy on node 0
print("Activating Canopy on node 0")
self.nodes[0].generate(5)
self.sync_all()
# Mine past Canopy activation height on node 2
print("Mining past Canopy activation height on node 2 ")
self.nodes[2].generate(20)
self.sync_all()
# print("nodes[0].getblockcount()", self.nodes[0].getblockcount())
# print("nodes[2].getblockcount()", self.nodes[2].getblockcount())
# for i in range (0,3,2):
# blockcount = self.nodes[i].getblockcount()
# for j in range (201,blockcount + 1):
# print("\n before shutdown node: ", i, "block: ", j, "\n")
# print(self.nodes[i].getblock(str(j)))
# Upgrade node 2 and 3 to Canopy
print("Upgrading nodes 2 and 3 to Canopy")
self.nodes[2].stop()
bitcoind_processes[2].wait()
self.nodes[2] = start_node(2, self.options.tmpdir, extra_args=HAS_CANOPY)
self.nodes[3].stop()
bitcoind_processes[3].wait()
self.nodes[3] = start_node(3, self.options.tmpdir, extra_args=HAS_CANOPY)
# for i in range (0,3,2):
# blockcount = self.nodes[i].getblockcount()
# for j in range (201,blockcount + 1):
# print("\n after shutdown node: ", i, "block: ", j, "\n")
# print(self.nodes[i].getblock(str(j)))
# Join network
print("Joining network")
# (if we used self.sync_all() here and there was a bug, the test would hang)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,1,3)
connect_nodes_bi(self.nodes,2,3)
time.sleep(5)
# for i in range (0,3,2):
# blockcount = self.nodes[i].getblockcount()
# for j in range (201,blockcount + 1):
# print("\n after sync node: ", i, "block: ", j, "\n")
# print(self.nodes[i].getblock(str(j)))
node0_blockcount = self.nodes[0].getblockcount()
node2_blockcount = self.nodes[2].getblockcount()
assert_equal(node0_blockcount, node2_blockcount, "node 0 blockcount: " + str(node0_blockcount) + "node 2 blockcount: " + str(node2_blockcount))
node0_bestblockhash = self.nodes[0].getbestblockhash()
node2_bestblockhash = self.nodes[2].getbestblockhash()
assert_equal(node0_bestblockhash, node2_bestblockhash, "node 0 bestblockhash: " + str(node0_bestblockhash) + "node 2 bestblockhash: " + str(node2_blockcount))
if __name__ == '__main__':
PostHeartwoodRollbackTest().main()
|
"""Helps you output colourised code snippets in ReportLab documents.
Platypus has an 'XPreformatted' flowable for handling preformatted
text, with variations in fonts and colors. If Pygments is installed,
calling 'pygments2xpre' will return content suitable for display in
an XPreformatted object. If it's not installed, you won't get colours.
For a list of available lexers see http://pygments.org/docs/
"""
__all__ = ('pygments2xpre',)
from reportlab.lib.utils import asBytes, getBytesIO, getStringIO, asUnicode, isUnicode
import re
def _2xpre(s,styles):
"Helper to transform Pygments HTML output to ReportLab markup"
s = s.replace('<div class="highlight">','')
s = s.replace('</div>','')
s = s.replace('<pre>','')
s = s.replace('</pre>','')
for k,c in styles+[('p','#000000'),('n','#000000'),('err','#000000')]:
s = s.replace('<span class="%s">' % k,'<span color="%s">' % c)
s = re.sub(r'<span class="%s\s+.*">'% k,'<span color="%s">' % c,s)
s = re.sub(r'<span class=".*">','<span color="#0f0f0f">',s)
return s
def pygments2xpre(s, language="python"):
"Return markup suitable for XPreformatted"
try:
from pygments import highlight
from pygments.formatters import HtmlFormatter
except ImportError:
return s
from pygments.lexers import get_lexer_by_name
rconv = lambda x: x
out = getStringIO()
l = get_lexer_by_name(language)
h = HtmlFormatter()
highlight(s,l,h,out)
styles = [(cls, style.split(';')[0].split(':')[1].strip())
for cls, (style, ttype, level) in h.class2style.items()
if cls and style and style.startswith('color:')]
return rconv(_2xpre(out.getvalue(),styles))
def convertSourceFiles(filenames):
"Helper function - makes minimal PDF document"
from reportlab.platypus import Paragraph, SimpleDocTemplate, Spacer, XPreformatted
from reportlab.lib.styles import getSampleStyleSheet
styT=getSampleStyleSheet()["Title"]
styC=getSampleStyleSheet()["Code"]
doc = SimpleDocTemplate("pygments2xpre.pdf")
S = [].append
for filename in filenames:
S(Paragraph(filename,style=styT))
src = open(filename, 'r').read()
fmt = pygments2xpre(src)
S(XPreformatted(fmt, style=styC))
doc.build(S.__self__)
print('saved pygments2xpre.pdf')
if __name__=='__main__':
import sys
filenames = sys.argv[1:]
if not filenames:
print('usage: pygments2xpre.py file1.py [file2.py] [...]')
sys.exit(0)
convertSourceFiles(filenames)
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from espnet(https://github.com/espnet/espnet)
"""Decoder self-attention layer definition."""
import paddle
from paddle import nn
from paddlespeech.t2s.modules.layer_norm import LayerNorm
class DecoderLayer(nn.Layer):
"""Single decoder layer module.
Parameters
----------
size : int
Input dimension.
self_attn : paddle.nn.Layer
Self-attention module instance.
`MultiHeadedAttention` instance can be used as the argument.
src_attn : paddle.nn.Layer
Self-attention module instance.
`MultiHeadedAttention` instance can be used as the argument.
feed_forward : paddle.nn.Layer
Feed-forward module instance.
`PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance can be used as the argument.
dropout_rate : float
Dropout rate.
normalize_before : bool
Whether to use layer_norm before the first block.
concat_after : bool
Whether to concat attention layer's input and output.
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied. i.e. x -> x + att(x)
"""
def __init__(
self,
size,
self_attn,
src_attn,
feed_forward,
dropout_rate,
normalize_before=True,
concat_after=False, ):
"""Construct an DecoderLayer object."""
super().__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.norm1 = LayerNorm(size)
self.norm2 = LayerNorm(size)
self.norm3 = LayerNorm(size)
self.dropout = nn.Dropout(dropout_rate)
self.normalize_before = normalize_before
self.concat_after = concat_after
if self.concat_after:
self.concat_linear1 = nn.Linear(size + size, size)
self.concat_linear2 = nn.Linear(size + size, size)
def forward(self, tgt, tgt_mask, memory, memory_mask, cache=None):
"""Compute decoded features.
Parameters
----------
tgt : paddle.Tensor
Input tensor (#batch, maxlen_out, size).
tgt_mask : paddle.Tensor
Mask for input tensor (#batch, maxlen_out).
memory : paddle.Tensor
Encoded memory, float32 (#batch, maxlen_in, size).
memory_mask : paddle.Tensor
Encoded memory mask (#batch, maxlen_in).
cache : List[paddle.Tensor]
List of cached tensors.
Each tensor shape should be (#batch, maxlen_out - 1, size).
Returns
----------
paddle.Tensor
Output tensor(#batch, maxlen_out, size).
paddle.Tensor
Mask for output tensor (#batch, maxlen_out).
paddle.Tensor
Encoded memory (#batch, maxlen_in, size).
paddle.Tensor
Encoded memory mask (#batch, maxlen_in).
"""
residual = tgt
if self.normalize_before:
tgt = self.norm1(tgt)
if cache is None:
tgt_q = tgt
tgt_q_mask = tgt_mask
else:
# compute only the last frame query keeping dim: max_time_out -> 1
assert cache.shape == [
tgt.shape[0],
tgt.shape[1] - 1,
self.size,
], f"{cache.shape} == {(tgt.shape[0], tgt.shape[1] - 1, self.size)}"
tgt_q = tgt[:, -1:, :]
residual = residual[:, -1:, :]
tgt_q_mask = None
if tgt_mask is not None:
tgt_mask = paddle.cast(tgt_mask, dtype="int64")
tgt_q_mask = tgt_mask[:, -1:, :]
tgt_q_mask = paddle.cast(tgt_q_mask, dtype="bool")
if self.concat_after:
tgt_concat = paddle.concat(
(tgt_q, self.self_attn(tgt_q, tgt, tgt, tgt_q_mask)), axis=-1)
x = residual + self.concat_linear1(tgt_concat)
else:
x = residual + self.dropout(
self.self_attn(tgt_q, tgt, tgt, tgt_q_mask))
if not self.normalize_before:
x = self.norm1(x)
residual = x
if self.normalize_before:
x = self.norm2(x)
if self.concat_after:
x_concat = paddle.concat(
(x, self.src_attn(x, memory, memory, memory_mask)), axis=-1)
x = residual + self.concat_linear2(x_concat)
else:
x = residual + self.dropout(
self.src_attn(x, memory, memory, memory_mask))
if not self.normalize_before:
x = self.norm2(x)
residual = x
if self.normalize_before:
x = self.norm3(x)
x = residual + self.dropout(self.feed_forward(x))
if not self.normalize_before:
x = self.norm3(x)
if cache is not None:
x = paddle.concat([cache, x], axis=1)
return x, tgt_mask, memory, memory_mask
|
"""PBR renderer for Python.
Author: Matthew Matl
"""
import sys
import numpy as np
import PIL
from .constants import (RenderFlags, TextAlign, GLTF, BufFlags, TexFlags,
ProgramFlags, DEFAULT_Z_FAR, DEFAULT_Z_NEAR,
SHADOW_TEX_SZ, MAX_N_LIGHTS)
from .shader_program import ShaderProgramCache
from .material import MetallicRoughnessMaterial, SpecularGlossinessMaterial
from .light import PointLight, SpotLight, DirectionalLight
from .font import FontCache
from .utils import format_color_vector
from OpenGL.GL import *
class Renderer(object):
"""Class for handling all rendering operations on a scene.
Note
----
This renderer relies on the existence of an OpenGL context and
does not create one on its own.
Parameters
----------
viewport_width : int
Width of the viewport in pixels.
viewport_height : int
Width of the viewport height in pixels.
point_size : float, optional
Size of points in pixels. Defaults to 1.0.
"""
def __init__(self, viewport_width, viewport_height, point_size=1.0):
self.dpscale = 1
# Scaling needed on retina displays
if sys.platform == 'darwin':
self.dpscale = 2
self.viewport_width = viewport_width
self.viewport_height = viewport_height
self.point_size = point_size
# Optional framebuffer for offscreen renders
self._main_fb = None
self._main_cb = None
self._main_db = None
self._main_fb_ms = None
self._main_cb_ms = None
self._main_db_ms = None
self._main_fb_dims = (None, None)
self._shadow_fb = None
self._latest_znear = DEFAULT_Z_NEAR
self._latest_zfar = DEFAULT_Z_FAR
# Shader Program Cache
self._program_cache = ShaderProgramCache()
self._font_cache = FontCache()
self._meshes = set()
self._mesh_textures = set()
self._shadow_textures = set()
self._texture_alloc_idx = 0
@property
def viewport_width(self):
"""int : The width of the main viewport, in pixels.
"""
return self._viewport_width
@viewport_width.setter
def viewport_width(self, value):
self._viewport_width = self.dpscale * value
@property
def viewport_height(self):
"""int : The height of the main viewport, in pixels.
"""
return self._viewport_height
@viewport_height.setter
def viewport_height(self, value):
self._viewport_height = self.dpscale * value
@property
def point_size(self):
"""float : The size of screen-space points, in pixels.
"""
return self._point_size
@point_size.setter
def point_size(self, value):
self._point_size = float(value)
def render(self, scene, flags, seg_node_map=None):
"""Render a scene with the given set of flags.
Parameters
----------
scene : :class:`Scene`
A scene to render.
flags : int
A specification from :class:`.RenderFlags`.
seg_node_map : dict
A map from :class:`.Node` objects to (3,) colors for each.
If specified along with flags set to :attr:`.RenderFlags.SEG`,
the color image will be a segmentation image.
Returns
-------
color_im : (h, w, 3) uint8 or (h, w, 4) uint8
If :attr:`RenderFlags.OFFSCREEN` is set, the color buffer. This is
normally an RGB buffer, but if :attr:`.RenderFlags.RGBA` is set,
the buffer will be a full RGBA buffer.
depth_im : (h, w) float32
If :attr:`RenderFlags.OFFSCREEN` is set, the depth buffer
in linear units.
"""
# Update context with meshes and textures
self._update_context(scene, flags)
# Render necessary shadow maps
if not bool(flags & RenderFlags.DEPTH_ONLY or flags & RenderFlags.SEG):
for ln in scene.light_nodes:
take_pass = False
if (isinstance(ln.light, DirectionalLight) and
bool(flags & RenderFlags.SHADOWS_DIRECTIONAL)):
take_pass = True
elif (isinstance(ln.light, SpotLight) and
bool(flags & RenderFlags.SHADOWS_SPOT)):
take_pass = True
elif (isinstance(ln.light, PointLight) and
bool(flags & RenderFlags.SHADOWS_POINT)):
take_pass = True
if take_pass:
self._shadow_mapping_pass(scene, ln, flags)
# Make forward pass
retval = self._forward_pass(scene, flags, seg_node_map=seg_node_map)
# If necessary, make normals pass
if flags & (RenderFlags.VERTEX_NORMALS | RenderFlags.FACE_NORMALS):
self._normals_pass(scene, flags)
# Update camera settings for retrieving depth buffers
self._latest_znear = scene.main_camera_node.camera.znear
self._latest_zfar = scene.main_camera_node.camera.zfar
return retval
def render_text(self, text, x, y, font_name='OpenSans-Regular',
font_pt=40, color=None, scale=1.0,
align=TextAlign.BOTTOM_LEFT):
"""Render text into the current viewport.
Note
----
This cannot be done into an offscreen buffer.
Parameters
----------
text : str
The text to render.
x : int
Horizontal pixel location of text.
y : int
Vertical pixel location of text.
font_name : str
Name of font, from the ``pyrender/fonts`` folder, or
a path to a ``.ttf`` file.
font_pt : int
Height of the text, in font points.
color : (4,) float
The color of the text. Default is black.
scale : int
Scaling factor for text.
align : int
One of the :class:`TextAlign` options which specifies where the
``x`` and ``y`` parameters lie on the text. For example,
:attr:`TextAlign.BOTTOM_LEFT` means that ``x`` and ``y`` indicate
the position of the bottom-left corner of the textbox.
"""
x *= self.dpscale
y *= self.dpscale
font_pt *= self.dpscale
if color is None:
color = np.array([0.0, 0.0, 0.0, 1.0])
else:
color = format_color_vector(color, 4)
# Set up viewport for render
self._configure_forward_pass_viewport(0)
# Load font
font = self._font_cache.get_font(font_name, font_pt)
if not font._in_context():
font._add_to_context()
# Load program
program = self._get_text_program()
program._bind()
# Set uniforms
p = np.eye(4)
p[0,0] = 2.0 / self.viewport_width
p[0,3] = -1.0
p[1,1] = 2.0 / self.viewport_height
p[1,3] = -1.0
program.set_uniform('projection', p)
program.set_uniform('text_color', color)
# Draw text
font.render_string(text, x, y, scale, align)
def read_color_buf(self):
"""Read and return the current viewport's color buffer.
Alpha cannot be computed for an on-screen buffer.
Returns
-------
color_im : (h, w, 3) uint8
The color buffer in RGB byte format.
"""
# Extract color image from frame buffer
width, height = self.viewport_width, self.viewport_height
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0)
glReadBuffer(GL_FRONT)
color_buf = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE)
# Re-format them into numpy arrays
color_im = np.frombuffer(color_buf, dtype=np.uint8)
color_im = color_im.reshape((height, width, 3))
color_im = np.flip(color_im, axis=0)
# Resize for macos if needed
if sys.platform == 'darwin':
color_im = self._resize_image(color_im, True)
return color_im
def read_depth_buf(self):
"""Read and return the current viewport's color buffer.
Returns
-------
depth_im : (h, w) float32
The depth buffer in linear units.
"""
width, height = self.viewport_width, self.viewport_height
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0)
glReadBuffer(GL_FRONT)
depth_buf = glReadPixels(
0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT
)
depth_im = np.frombuffer(depth_buf, dtype=np.float32)
depth_im = depth_im.reshape((height, width))
depth_im = np.flip(depth_im, axis=0)
inf_inds = (depth_im == 1.0)
depth_im = 2.0 * depth_im - 1.0
z_near, z_far = self._latest_znear, self._latest_zfar
noninf = np.logical_not(inf_inds)
if z_far is None:
depth_im[noninf] = 2 * z_near / (1.0 - depth_im[noninf])
else:
depth_im[noninf] = ((2.0 * z_near * z_far) /
(z_far + z_near - depth_im[noninf] *
(z_far - z_near)))
depth_im[inf_inds] = 0.0
# Resize for macos if needed
if sys.platform == 'darwin':
depth_im = self._resize_image(depth_im)
return depth_im
def delete(self):
"""Free all allocated OpenGL resources.
"""
# Free shaders
self._program_cache.clear()
# Free fonts
self._font_cache.clear()
# Free meshes
for mesh in self._meshes:
for p in mesh.primitives:
p.delete()
# Free textures
for mesh_texture in self._mesh_textures:
mesh_texture.delete()
for shadow_texture in self._shadow_textures:
shadow_texture.delete()
self._meshes = set()
self._mesh_textures = set()
self._shadow_textures = set()
self._texture_alloc_idx = 0
self._delete_main_framebuffer()
self._delete_shadow_framebuffer()
def __del__(self):
try:
self.delete()
except Exception:
pass
###########################################################################
# Rendering passes
###########################################################################
def _forward_pass(self, scene, flags, seg_node_map=None):
# Set up viewport for render
self._configure_forward_pass_viewport(flags)
# Clear it
if bool(flags & RenderFlags.SEG):
glClearColor(0.0, 0.0, 0.0, 1.0)
if seg_node_map is None:
seg_node_map = {}
else:
glClearColor(*scene.bg_color)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
if not bool(flags & RenderFlags.SEG):
glEnable(GL_MULTISAMPLE)
else:
glDisable(GL_MULTISAMPLE)
# Set up camera matrices
V, P = self._get_camera_matrices(scene)
program = None
# Now, render each object in sorted order
for node in self._sorted_mesh_nodes(scene):
mesh = node.mesh
# Skip the mesh if it's not visible
if not mesh.is_visible:
continue
# If SEG, set color
if bool(flags & RenderFlags.SEG):
if node not in seg_node_map:
continue
color = seg_node_map[node]
if not isinstance(color, (list, tuple, np.ndarray)):
color = np.repeat(color, 3)
else:
color = np.asanyarray(color)
color = color / 255.0
for primitive in mesh.primitives:
# First, get and bind the appropriate program
program = self._get_primitive_program(
primitive, flags, ProgramFlags.USE_MATERIAL
)
program._bind()
# Set the camera uniforms
program.set_uniform('V', V)
program.set_uniform('P', P)
program.set_uniform(
'cam_pos', scene.get_pose(scene.main_camera_node)[:3,3]
)
if bool(flags & RenderFlags.SEG):
program.set_uniform('color', color)
# Next, bind the lighting
if not (flags & RenderFlags.DEPTH_ONLY or flags & RenderFlags.FLAT or
flags & RenderFlags.SEG):
self._bind_lighting(scene, program, node, flags)
# Finally, bind and draw the primitive
self._bind_and_draw_primitive(
primitive=primitive,
pose=scene.get_pose(node),
program=program,
flags=flags
)
self._reset_active_textures()
# Unbind the shader and flush the output
if program is not None:
program._unbind()
glFlush()
# If doing offscreen render, copy result from framebuffer and return
if flags & RenderFlags.OFFSCREEN:
return self._read_main_framebuffer(scene, flags)
else:
return
def _shadow_mapping_pass(self, scene, light_node, flags):
light = light_node.light
# Set up viewport for render
self._configure_shadow_mapping_viewport(light, flags)
# Set up camera matrices
V, P = self._get_light_cam_matrices(scene, light_node, flags)
# Now, render each object in sorted order
for node in self._sorted_mesh_nodes(scene):
mesh = node.mesh
# Skip the mesh if it's not visible
if not mesh.is_visible:
continue
for primitive in mesh.primitives:
# First, get and bind the appropriate program
program = self._get_primitive_program(
primitive, flags, ProgramFlags.NONE
)
program._bind()
# Set the camera uniforms
program.set_uniform('V', V)
program.set_uniform('P', P)
program.set_uniform(
'cam_pos', scene.get_pose(scene.main_camera_node)[:3,3]
)
# Finally, bind and draw the primitive
self._bind_and_draw_primitive(
primitive=primitive,
pose=scene.get_pose(node),
program=program,
flags=RenderFlags.DEPTH_ONLY
)
self._reset_active_textures()
# Unbind the shader and flush the output
if program is not None:
program._unbind()
glFlush()
def _normals_pass(self, scene, flags):
# Set up viewport for render
self._configure_forward_pass_viewport(flags)
program = None
# Set up camera matrices
V, P = self._get_camera_matrices(scene)
# Now, render each object in sorted order
for node in self._sorted_mesh_nodes(scene):
mesh = node.mesh
# Skip the mesh if it's not visible
if not mesh.is_visible:
continue
for primitive in mesh.primitives:
# Skip objects that don't have normals
if not primitive.buf_flags & BufFlags.NORMAL:
continue
# First, get and bind the appropriate program
pf = ProgramFlags.NONE
if flags & RenderFlags.VERTEX_NORMALS:
pf = pf | ProgramFlags.VERTEX_NORMALS
if flags & RenderFlags.FACE_NORMALS:
pf = pf | ProgramFlags.FACE_NORMALS
program = self._get_primitive_program(primitive, flags, pf)
program._bind()
# Set the camera uniforms
program.set_uniform('V', V)
program.set_uniform('P', P)
program.set_uniform('normal_magnitude', 0.05 * primitive.scale)
program.set_uniform(
'normal_color', np.array([0.1, 0.1, 1.0, 1.0])
)
# Finally, bind and draw the primitive
self._bind_and_draw_primitive(
primitive=primitive,
pose=scene.get_pose(node),
program=program,
flags=RenderFlags.DEPTH_ONLY
)
self._reset_active_textures()
# Unbind the shader and flush the output
if program is not None:
program._unbind()
glFlush()
###########################################################################
# Handlers for binding uniforms and drawing primitives
###########################################################################
def _bind_and_draw_primitive(self, primitive, pose, program, flags):
# Set model pose matrix
program.set_uniform('M', pose)
# Bind mesh buffers
primitive._bind()
# Bind mesh material
if not (flags & RenderFlags.DEPTH_ONLY or flags & RenderFlags.SEG):
material = primitive.material
# Bind textures
tf = material.tex_flags
if tf & TexFlags.NORMAL:
self._bind_texture(material.normalTexture,
'material.normal_texture', program)
if tf & TexFlags.OCCLUSION:
self._bind_texture(material.occlusionTexture,
'material.occlusion_texture', program)
if tf & TexFlags.EMISSIVE:
self._bind_texture(material.emissiveTexture,
'material.emissive_texture', program)
if tf & TexFlags.BASE_COLOR:
self._bind_texture(material.baseColorTexture,
'material.base_color_texture', program)
if tf & TexFlags.METALLIC_ROUGHNESS:
self._bind_texture(material.metallicRoughnessTexture,
'material.metallic_roughness_texture',
program)
if tf & TexFlags.DIFFUSE:
self._bind_texture(material.diffuseTexture,
'material.diffuse_texture', program)
if tf & TexFlags.SPECULAR_GLOSSINESS:
self._bind_texture(material.specularGlossinessTexture,
'material.specular_glossiness_texture',
program)
# Bind other uniforms
b = 'material.{}'
program.set_uniform(b.format('emissive_factor'),
material.emissiveFactor)
if isinstance(material, MetallicRoughnessMaterial):
program.set_uniform(b.format('base_color_factor'),
material.baseColorFactor)
program.set_uniform(b.format('metallic_factor'),
material.metallicFactor)
program.set_uniform(b.format('roughness_factor'),
material.roughnessFactor)
elif isinstance(material, SpecularGlossinessMaterial):
program.set_uniform(b.format('diffuse_factor'),
material.diffuseFactor)
program.set_uniform(b.format('specular_factor'),
material.specularFactor)
program.set_uniform(b.format('glossiness_factor'),
material.glossinessFactor)
# Set blending options
if material.alphaMode == 'BLEND':
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
else:
glEnable(GL_BLEND)
glBlendFunc(GL_ONE, GL_ZERO)
# Set wireframe mode
wf = material.wireframe
if flags & RenderFlags.FLIP_WIREFRAME:
wf = not wf
if (flags & RenderFlags.ALL_WIREFRAME) or wf:
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
else:
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
# Set culling mode
if material.doubleSided or flags & RenderFlags.SKIP_CULL_FACES:
glDisable(GL_CULL_FACE)
else:
glEnable(GL_CULL_FACE)
glCullFace(GL_BACK)
else:
glEnable(GL_CULL_FACE)
glEnable(GL_BLEND)
glCullFace(GL_BACK)
glBlendFunc(GL_ONE, GL_ZERO)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
# Set point size if needed
glDisable(GL_PROGRAM_POINT_SIZE)
if primitive.mode == GLTF.POINTS:
glEnable(GL_PROGRAM_POINT_SIZE)
glPointSize(self.point_size)
# Render mesh
n_instances = 1
if primitive.poses is not None:
n_instances = len(primitive.poses)
if primitive.indices is not None:
glDrawElementsInstanced(
primitive.mode, primitive.indices.size, GL_UNSIGNED_INT,
ctypes.c_void_p(0), n_instances
)
else:
glDrawArraysInstanced(
primitive.mode, 0, len(primitive.positions), n_instances
)
# Unbind mesh buffers
primitive._unbind()
def _bind_lighting(self, scene, program, node, flags):
"""Bind all lighting uniform values for a scene.
"""
max_n_lights = self._compute_max_n_lights(flags)
n_d = min(len(scene.directional_light_nodes), max_n_lights[0])
n_s = min(len(scene.spot_light_nodes), max_n_lights[1])
n_p = min(len(scene.point_light_nodes), max_n_lights[2])
program.set_uniform('ambient_light', scene.ambient_light)
program.set_uniform('n_directional_lights', n_d)
program.set_uniform('n_spot_lights', n_s)
program.set_uniform('n_point_lights', n_p)
plc = 0
slc = 0
dlc = 0
light_nodes = scene.light_nodes
if (len(scene.directional_light_nodes) > max_n_lights[0] or
len(scene.spot_light_nodes) > max_n_lights[1] or
len(scene.point_light_nodes) > max_n_lights[2]):
light_nodes = self._sorted_nodes_by_distance(
scene, scene.light_nodes, node
)
for n in light_nodes:
light = n.light
pose = scene.get_pose(n)
position = pose[:3,3]
direction = -pose[:3,2]
if isinstance(light, PointLight):
if plc == max_n_lights[2]:
continue
b = 'point_lights[{}].'.format(plc)
plc += 1
shadow = bool(flags & RenderFlags.SHADOWS_POINT)
program.set_uniform(b + 'position', position)
elif isinstance(light, SpotLight):
if slc == max_n_lights[1]:
continue
b = 'spot_lights[{}].'.format(slc)
slc += 1
shadow = bool(flags & RenderFlags.SHADOWS_SPOT)
las = 1.0 / max(0.001, np.cos(light.innerConeAngle) -
np.cos(light.outerConeAngle))
lao = -np.cos(light.outerConeAngle) * las
program.set_uniform(b + 'direction', direction)
program.set_uniform(b + 'position', position)
program.set_uniform(b + 'light_angle_scale', las)
program.set_uniform(b + 'light_angle_offset', lao)
else:
if dlc == max_n_lights[0]:
continue
b = 'directional_lights[{}].'.format(dlc)
dlc += 1
shadow = bool(flags & RenderFlags.SHADOWS_DIRECTIONAL)
program.set_uniform(b + 'direction', direction)
program.set_uniform(b + 'color', light.color)
program.set_uniform(b + 'intensity', light.intensity)
# if light.range is not None:
# program.set_uniform(b + 'range', light.range)
# else:
# program.set_uniform(b + 'range', 0)
if shadow:
self._bind_texture(light.shadow_texture,
b + 'shadow_map', program)
if not isinstance(light, PointLight):
V, P = self._get_light_cam_matrices(scene, n, flags)
program.set_uniform(b + 'light_matrix', P.dot(V))
else:
raise NotImplementedError(
'Point light shadows not implemented'
)
def _sorted_mesh_nodes(self, scene):
cam_loc = scene.get_pose(scene.main_camera_node)[:3,3]
solid_nodes = []
trans_nodes = []
for node in scene.mesh_nodes:
mesh = node.mesh
if mesh.is_transparent:
trans_nodes.append(node)
else:
solid_nodes.append(node)
# TODO BETTER SORTING METHOD
trans_nodes.sort(
key=lambda n: -np.linalg.norm(scene.get_pose(n)[:3,3] - cam_loc)
)
solid_nodes.sort(
key=lambda n: -np.linalg.norm(scene.get_pose(n)[:3,3] - cam_loc)
)
return solid_nodes + trans_nodes
def _sorted_nodes_by_distance(self, scene, nodes, compare_node):
nodes = list(nodes)
compare_posn = scene.get_pose(compare_node)[:3,3]
nodes.sort(key=lambda n: np.linalg.norm(
scene.get_pose(n)[:3,3] - compare_posn)
)
return nodes
###########################################################################
# Context Management
###########################################################################
def _update_context(self, scene, flags):
# Update meshes
scene_meshes = scene.meshes
# Add new meshes to context
for mesh in scene_meshes - self._meshes:
for p in mesh.primitives:
p._add_to_context()
# Remove old meshes from context
for mesh in self._meshes - scene_meshes:
for p in mesh.primitives:
p.delete()
self._meshes = scene_meshes.copy()
# Update mesh textures
mesh_textures = set()
for m in scene_meshes:
for p in m.primitives:
mesh_textures |= p.material.textures
# Add new textures to context
for texture in mesh_textures - self._mesh_textures:
texture._add_to_context()
# Remove old textures from context
for texture in self._mesh_textures - mesh_textures:
texture.delete()
self._mesh_textures = mesh_textures.copy()
shadow_textures = set()
for l in scene.lights:
# Create if needed
active = False
if (isinstance(l, DirectionalLight) and
flags & RenderFlags.SHADOWS_DIRECTIONAL):
active = True
elif (isinstance(l, PointLight) and
flags & RenderFlags.SHADOWS_POINT):
active = True
elif isinstance(l, SpotLight) and flags & RenderFlags.SHADOWS_SPOT:
active = True
if active and l.shadow_texture is None:
l._generate_shadow_texture()
if l.shadow_texture is not None:
shadow_textures.add(l.shadow_texture)
# Add new textures to context
for texture in shadow_textures - self._shadow_textures:
texture._add_to_context()
# Remove old textures from context
for texture in self._shadow_textures - shadow_textures:
texture.delete()
self._shadow_textures = shadow_textures.copy()
###########################################################################
# Texture Management
###########################################################################
def _bind_texture(self, texture, uniform_name, program):
"""Bind a texture to an active texture unit and return
the texture unit index that was used.
"""
tex_id = self._get_next_active_texture()
glActiveTexture(GL_TEXTURE0 + tex_id)
texture._bind()
program.set_uniform(uniform_name, tex_id)
def _get_next_active_texture(self):
val = self._texture_alloc_idx
self._texture_alloc_idx += 1
return val
def _reset_active_textures(self):
self._texture_alloc_idx = 0
###########################################################################
# Camera Matrix Management
###########################################################################
def _get_camera_matrices(self, scene):
main_camera_node = scene.main_camera_node
if main_camera_node is None:
raise ValueError('Cannot render scene without a camera')
P = main_camera_node.camera.get_projection_matrix(
width=self.viewport_width, height=self.viewport_height
)
pose = scene.get_pose(main_camera_node)
V = np.linalg.inv(pose) # V maps from world to camera
return V, P
def _get_light_cam_matrices(self, scene, light_node, flags):
light = light_node.light
pose = scene.get_pose(light_node).copy()
s = scene.scale
camera = light._get_shadow_camera(s)
P = camera.get_projection_matrix()
if isinstance(light, DirectionalLight):
direction = -pose[:3,2]
c = scene.centroid
loc = c - direction * s
pose[:3,3] = loc
V = np.linalg.inv(pose) # V maps from world to camera
return V, P
###########################################################################
# Shader Program Management
###########################################################################
def _get_text_program(self):
program = self._program_cache.get_program(
vertex_shader='text.vert',
fragment_shader='text.frag'
)
if not program._in_context():
program._add_to_context()
return program
def _compute_max_n_lights(self, flags):
max_n_lights = [MAX_N_LIGHTS, MAX_N_LIGHTS, MAX_N_LIGHTS]
n_tex_units = glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS)
# Reserved texture units: 6
# Normal Map
# Occlusion Map
# Emissive Map
# Base Color or Diffuse Map
# MR or SG Map
# Environment cubemap
n_reserved_textures = 6
n_available_textures = n_tex_units - n_reserved_textures
# Distribute textures evenly among lights with shadows, with
# a preference for directional lights
n_shadow_types = 0
if flags & RenderFlags.SHADOWS_DIRECTIONAL:
n_shadow_types += 1
if flags & RenderFlags.SHADOWS_SPOT:
n_shadow_types += 1
if flags & RenderFlags.SHADOWS_POINT:
n_shadow_types += 1
if n_shadow_types > 0:
tex_per_light = n_available_textures // n_shadow_types
if flags & RenderFlags.SHADOWS_DIRECTIONAL:
max_n_lights[0] = (
tex_per_light +
(n_available_textures - tex_per_light * n_shadow_types)
)
if flags & RenderFlags.SHADOWS_SPOT:
max_n_lights[1] = tex_per_light
if flags & RenderFlags.SHADOWS_POINT:
max_n_lights[2] = tex_per_light
return max_n_lights
def _get_primitive_program(self, primitive, flags, program_flags):
vertex_shader = None
fragment_shader = None
geometry_shader = None
defines = {}
if (bool(program_flags & ProgramFlags.USE_MATERIAL) and
not flags & RenderFlags.DEPTH_ONLY and
not flags & RenderFlags.FLAT and
not flags & RenderFlags.SEG):
vertex_shader = 'mesh.vert'
fragment_shader = 'mesh.frag'
elif bool(program_flags & (ProgramFlags.VERTEX_NORMALS |
ProgramFlags.FACE_NORMALS)):
vertex_shader = 'vertex_normals.vert'
if primitive.mode == GLTF.POINTS:
geometry_shader = 'vertex_normals_pc.geom'
else:
geometry_shader = 'vertex_normals.geom'
fragment_shader = 'vertex_normals.frag'
elif flags & RenderFlags.FLAT:
vertex_shader = 'flat.vert'
fragment_shader = 'flat.frag'
elif flags & RenderFlags.SEG:
vertex_shader = 'segmentation.vert'
fragment_shader = 'segmentation.frag'
else:
vertex_shader = 'mesh_depth.vert'
fragment_shader = 'mesh_depth.frag'
# Set up vertex buffer DEFINES
bf = primitive.buf_flags
buf_idx = 1
if bf & BufFlags.NORMAL:
defines['NORMAL_LOC'] = buf_idx
buf_idx += 1
if bf & BufFlags.TANGENT:
defines['TANGENT_LOC'] = buf_idx
buf_idx += 1
if bf & BufFlags.TEXCOORD_0:
defines['TEXCOORD_0_LOC'] = buf_idx
buf_idx += 1
if bf & BufFlags.TEXCOORD_1:
defines['TEXCOORD_1_LOC'] = buf_idx
buf_idx += 1
if bf & BufFlags.COLOR_0:
defines['COLOR_0_LOC'] = buf_idx
buf_idx += 1
if bf & BufFlags.JOINTS_0:
defines['JOINTS_0_LOC'] = buf_idx
buf_idx += 1
if bf & BufFlags.WEIGHTS_0:
defines['WEIGHTS_0_LOC'] = buf_idx
buf_idx += 1
defines['INST_M_LOC'] = buf_idx
# Set up shadow mapping defines
if flags & RenderFlags.SHADOWS_DIRECTIONAL:
defines['DIRECTIONAL_LIGHT_SHADOWS'] = 1
if flags & RenderFlags.SHADOWS_SPOT:
defines['SPOT_LIGHT_SHADOWS'] = 1
if flags & RenderFlags.SHADOWS_POINT:
defines['POINT_LIGHT_SHADOWS'] = 1
max_n_lights = self._compute_max_n_lights(flags)
defines['MAX_DIRECTIONAL_LIGHTS'] = max_n_lights[0]
defines['MAX_SPOT_LIGHTS'] = max_n_lights[1]
defines['MAX_POINT_LIGHTS'] = max_n_lights[2]
# Set up vertex normal defines
if program_flags & ProgramFlags.VERTEX_NORMALS:
defines['VERTEX_NORMALS'] = 1
if program_flags & ProgramFlags.FACE_NORMALS:
defines['FACE_NORMALS'] = 1
# Set up material texture defines
if bool(program_flags & ProgramFlags.USE_MATERIAL):
tf = primitive.material.tex_flags
if tf & TexFlags.NORMAL:
defines['HAS_NORMAL_TEX'] = 1
if tf & TexFlags.OCCLUSION:
defines['HAS_OCCLUSION_TEX'] = 1
if tf & TexFlags.EMISSIVE:
defines['HAS_EMISSIVE_TEX'] = 1
if tf & TexFlags.BASE_COLOR:
defines['HAS_BASE_COLOR_TEX'] = 1
if tf & TexFlags.METALLIC_ROUGHNESS:
defines['HAS_METALLIC_ROUGHNESS_TEX'] = 1
if tf & TexFlags.DIFFUSE:
defines['HAS_DIFFUSE_TEX'] = 1
if tf & TexFlags.SPECULAR_GLOSSINESS:
defines['HAS_SPECULAR_GLOSSINESS_TEX'] = 1
if isinstance(primitive.material, MetallicRoughnessMaterial):
defines['USE_METALLIC_MATERIAL'] = 1
elif isinstance(material, SpecularGlossinessMaterial):
defines['USE_GLOSSY_MATERIAL'] = 1
program = self._program_cache.get_program(
vertex_shader=vertex_shader,
fragment_shader=fragment_shader,
geometry_shader=geometry_shader,
defines=defines
)
if not program._in_context():
program._add_to_context()
return program
###########################################################################
# Viewport Management
###########################################################################
def _configure_forward_pass_viewport(self, flags):
# If using offscreen render, bind main framebuffer
if flags & RenderFlags.OFFSCREEN:
self._configure_main_framebuffer()
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb_ms)
else:
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0)
glViewport(0, 0, self.viewport_width, self.viewport_height)
glEnable(GL_DEPTH_TEST)
glDepthMask(GL_TRUE)
glDepthFunc(GL_LESS)
glDepthRange(0.0, 1.0)
def _configure_shadow_mapping_viewport(self, light, flags):
self._configure_shadow_framebuffer()
glBindFramebuffer(GL_FRAMEBUFFER, self._shadow_fb)
light.shadow_texture._bind()
light.shadow_texture._bind_as_depth_attachment()
glActiveTexture(GL_TEXTURE0)
light.shadow_texture._bind()
glDrawBuffer(GL_NONE)
glReadBuffer(GL_NONE)
glClear(GL_DEPTH_BUFFER_BIT)
glViewport(0, 0, SHADOW_TEX_SZ, SHADOW_TEX_SZ)
glEnable(GL_DEPTH_TEST)
glDepthMask(GL_TRUE)
glDepthFunc(GL_LESS)
glDepthRange(0.0, 1.0)
glDisable(GL_CULL_FACE)
glDisable(GL_BLEND)
###########################################################################
# Framebuffer Management
###########################################################################
def _configure_shadow_framebuffer(self):
if self._shadow_fb is None:
self._shadow_fb = glGenFramebuffers(1)
def _delete_shadow_framebuffer(self):
if self._shadow_fb is not None:
glDeleteFramebuffers(1, [self._shadow_fb])
def _configure_main_framebuffer(self):
# If mismatch with prior framebuffer, delete it
if (self._main_fb is not None and
self.viewport_width != self._main_fb_dims[0] or
self.viewport_height != self._main_fb_dims[1]):
self._delete_main_framebuffer()
# If framebuffer doesn't exist, create it
if self._main_fb is None:
# Generate standard buffer
self._main_cb, self._main_db = glGenRenderbuffers(2)
glBindRenderbuffer(GL_RENDERBUFFER, self._main_cb)
glRenderbufferStorage(
GL_RENDERBUFFER, GL_RGBA,
self.viewport_width, self.viewport_height
)
glBindRenderbuffer(GL_RENDERBUFFER, self._main_db)
glRenderbufferStorage(
GL_RENDERBUFFER, GL_DEPTH_COMPONENT24,
self.viewport_width, self.viewport_height
)
self._main_fb = glGenFramebuffers(1)
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb)
glFramebufferRenderbuffer(
GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER, self._main_cb
)
glFramebufferRenderbuffer(
GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
GL_RENDERBUFFER, self._main_db
)
# Generate multisample buffer
self._main_cb_ms, self._main_db_ms = glGenRenderbuffers(2)
glBindRenderbuffer(GL_RENDERBUFFER, self._main_cb_ms)
glRenderbufferStorageMultisample(
GL_RENDERBUFFER, 4, GL_RGBA,
self.viewport_width, self.viewport_height
)
glBindRenderbuffer(GL_RENDERBUFFER, self._main_db_ms)
glRenderbufferStorageMultisample(
GL_RENDERBUFFER, 4, GL_DEPTH_COMPONENT24,
self.viewport_width, self.viewport_height
)
self._main_fb_ms = glGenFramebuffers(1)
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb_ms)
glFramebufferRenderbuffer(
GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER, self._main_cb_ms
)
glFramebufferRenderbuffer(
GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
GL_RENDERBUFFER, self._main_db_ms
)
self._main_fb_dims = (self.viewport_width, self.viewport_height)
def _delete_main_framebuffer(self):
if self._main_fb is not None:
glDeleteFramebuffers(2, [self._main_fb, self._main_fb_ms])
if self._main_cb is not None:
glDeleteRenderbuffers(2, [self._main_cb, self._main_cb_ms])
if self._main_db is not None:
glDeleteRenderbuffers(2, [self._main_db, self._main_db_ms])
self._main_fb = None
self._main_cb = None
self._main_db = None
self._main_fb_ms = None
self._main_cb_ms = None
self._main_db_ms = None
self._main_fb_dims = (None, None)
def _read_main_framebuffer(self, scene, flags):
width, height = self._main_fb_dims[0], self._main_fb_dims[1]
# Bind framebuffer and blit buffers
glBindFramebuffer(GL_READ_FRAMEBUFFER, self._main_fb_ms)
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb)
glBlitFramebuffer(
0, 0, width, height, 0, 0, width, height,
GL_COLOR_BUFFER_BIT, GL_LINEAR
)
glBlitFramebuffer(
0, 0, width, height, 0, 0, width, height,
GL_DEPTH_BUFFER_BIT, GL_NEAREST
)
glBindFramebuffer(GL_READ_FRAMEBUFFER, self._main_fb)
# Read depth
depth_buf = glReadPixels(
0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT
)
depth_im = np.frombuffer(depth_buf, dtype=np.float32)
depth_im = depth_im.reshape((height, width))
depth_im = np.flip(depth_im, axis=0)
inf_inds = (depth_im == 1.0)
depth_im = 2.0 * depth_im - 1.0
z_near = scene.main_camera_node.camera.znear
z_far = scene.main_camera_node.camera.zfar
noninf = np.logical_not(inf_inds)
if z_far is None:
depth_im[noninf] = 2 * z_near / (1.0 - depth_im[noninf])
else:
depth_im[noninf] = ((2.0 * z_near * z_far) /
(z_far + z_near - depth_im[noninf] *
(z_far - z_near)))
depth_im[inf_inds] = 0.0
# Resize for macos if needed
if sys.platform == 'darwin':
depth_im = self._resize_image(depth_im)
if flags & RenderFlags.DEPTH_ONLY:
return depth_im
# Read color
if flags & RenderFlags.RGBA:
color_buf = glReadPixels(
0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE
)
color_im = np.frombuffer(color_buf, dtype=np.uint8)
color_im = color_im.reshape((height, width, 4))
else:
color_buf = glReadPixels(
0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE
)
color_im = np.frombuffer(color_buf, dtype=np.uint8)
color_im = color_im.reshape((height, width, 3))
color_im = np.flip(color_im, axis=0)
# Resize for macos if needed
if sys.platform == 'darwin':
color_im = self._resize_image(color_im, True)
return color_im, depth_im
def _resize_image(self, value, antialias=False):
"""If needed, rescale the render for MacOS."""
img = PIL.Image.fromarray(value)
resample = PIL.Image.NEAREST
if antialias:
resample = PIL.Image.BILINEAR
size = (self.viewport_width // self.dpscale,
self.viewport_height // self.dpscale)
img = img.resize(size, resample=resample)
return np.array(img)
###########################################################################
# Shadowmap Debugging
###########################################################################
def _forward_pass_no_reset(self, scene, flags):
# Set up camera matrices
V, P = self._get_camera_matrices(scene)
# Now, render each object in sorted order
for node in self._sorted_mesh_nodes(scene):
mesh = node.mesh
# Skip the mesh if it's not visible
if not mesh.is_visible:
continue
for primitive in mesh.primitives:
# First, get and bind the appropriate program
program = self._get_primitive_program(
primitive, flags, ProgramFlags.USE_MATERIAL
)
program._bind()
# Set the camera uniforms
program.set_uniform('V', V)
program.set_uniform('P', P)
program.set_uniform(
'cam_pos', scene.get_pose(scene.main_camera_node)[:3,3]
)
# Next, bind the lighting
if not flags & RenderFlags.DEPTH_ONLY and not flags & RenderFlags.FLAT:
self._bind_lighting(scene, program, node, flags)
# Finally, bind and draw the primitive
self._bind_and_draw_primitive(
primitive=primitive,
pose=scene.get_pose(node),
program=program,
flags=flags
)
self._reset_active_textures()
# Unbind the shader and flush the output
if program is not None:
program._unbind()
glFlush()
def _render_light_shadowmaps(self, scene, light_nodes, flags, tile=False):
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0)
glClearColor(*scene.bg_color)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glEnable(GL_DEPTH_TEST)
glDepthMask(GL_TRUE)
glDepthFunc(GL_LESS)
glDepthRange(0.0, 1.0)
w = self.viewport_width
h = self.viewport_height
num_nodes = len(light_nodes)
viewport_dims = {
(0, 2): [0, h // 2, w // 2, h],
(1, 2): [w // 2, h // 2, w, h],
(0, 3): [0, h // 2, w // 2, h],
(1, 3): [w // 2, h // 2, w, h],
(2, 3): [0, 0, w // 2, h // 2],
(0, 4): [0, h // 2, w // 2, h],
(1, 4): [w // 2, h // 2, w, h],
(2, 4): [0, 0, w // 2, h // 2],
(3, 4): [w // 2, 0, w, h // 2]
}
if tile:
for i, ln in enumerate(light_nodes):
light = ln.light
if light.shadow_texture is None:
raise ValueError('Light does not have a shadow texture')
glViewport(*viewport_dims[(i, num_nodes + 1)])
program = self._get_debug_quad_program()
program._bind()
self._bind_texture(light.shadow_texture, 'depthMap', program)
self._render_debug_quad()
self._reset_active_textures()
glFlush()
i += 1
glViewport(*viewport_dims[(i, num_nodes + 1)])
self._forward_pass_no_reset(scene, flags)
else:
for i, ln in enumerate(light_nodes):
light = ln.light
if light.shadow_texture is None:
raise ValueError('Light does not have a shadow texture')
glViewport(0, 0, self.viewport_width, self.viewport_height)
program = self._get_debug_quad_program()
program._bind()
self._bind_texture(light.shadow_texture, 'depthMap', program)
self._render_debug_quad()
self._reset_active_textures()
glFlush()
return
def _get_debug_quad_program(self):
program = self._program_cache.get_program(
vertex_shader='debug_quad.vert',
fragment_shader='debug_quad.frag'
)
if not program._in_context():
program._add_to_context()
return program
def _render_debug_quad(self):
x = glGenVertexArrays(1)
glBindVertexArray(x)
glDrawArrays(GL_TRIANGLES, 0, 6)
glBindVertexArray(0)
glDeleteVertexArrays(1, [x])
|
import numpy as np
import numpy.matlib
from matplotlib import cm
from tfc.utils import MakePlot
# Import the model from the auxillary folder
import sys
sys.path.append("aux")
from Navier_Stokes_DeepTFC_aux import myModel
# Set CPU as available physical device
#import tensorflow as tf
#my_devices = tf.config.experimental.list_physical_devices(device_type='CPU')
#tf.config.experimental.set_visible_devices(devices= my_devices, device_type='CPU')
# Constants:
H = 30
varType = "float64"
L = 15.
tf = 3.
rho = 1.
mu = 1.
h = 1.
P = -5.
# Train the class
model = myModel(rho,h,L,P,mu,H)
inputs = np.random.rand(2000,3)*np.array([[L,h,tf]])+np.array([[0.,-h/2.,0.]])
inputs = np.array(inputs,dtype=varType)
outputs = np.zeros((inputs.shape[0],1),dtype=varType)
#model.trainBfgs(inputs,outputs,maxIter=1500)
model.trainBfgs(inputs,outputs,maxIter=500)
# Calcualte u and v and plot for different times
n = 100
X = np.matlib.repmat(np.reshape(np.linspace(0,L,num=n),(n,1)),n,1).flatten()
Y = np.reshape(np.matlib.repmat(np.reshape(np.linspace(-h/2.,h/2.,num=n),(n,1)),1,n),(n**2,1)).flatten()
xTest = np.zeros((3,n**2*3))
xTest[0,:] = np.hstack([X,]*3)
xTest[1,:] = np.hstack([Y,]*3)
xTest[2,:] = np.hstack([np.ones(n**2)*0.01,np.ones(n**2)*0.1,np.ones(n**2)*tf])
xTest = np.array(xTest.T,dtype=varType)
p = []; U = [];
vals = [0.01,0.1,tf]
u,v = model.call(xTest)
u = u.numpy(); v = v.numpy()
for k in range(len(vals)):
p.append(MakePlot(r'$x (m)$',r'$y (m)$'))
ind = np.where(np.round(xTest[:,2],12)==np.round(vals[k],12))
U.append(np.reshape(u[ind],(n,n)))
Xm = np.reshape(xTest[:,0][ind],(n,n))
Ym = np.reshape(xTest[:,1][ind],(n,n))
dark = np.block(U)
vMin = np.min(dark)
vMax = np.max(dark)
def MakeContourPlot(Xm,Ym,Um):
p = MakePlot(r'$x$ (m)',r'$y$ (m)')
C = p.ax[0].contourf(Xm,Ym,Um,vmin=vMin,vmax=vMax,cmap=cm.gist_rainbow)
cbar = p.fig.colorbar(C)
return p
plots = [MakeContourPlot(Xm,Ym,U[0]),MakeContourPlot(Xm,Ym,U[1]),MakeContourPlot(Xm,Ym,U[2])]
for k,j in enumerate(plots):
j.FullScreen()
j.show()
j.save('DeepTFC'+str(k),fileType='png')
# U error
ind = np.where(xTest[:,2]==tf)
ind2 = np.where(xTest[:,0][ind] == L)
uEnd = u[ind][ind2].flatten()
y = xTest[:,1][ind][ind2]
uTrue = P*(4.*y**2-h**2)/(8.*mu)
uErr = np.abs(uEnd-uTrue)
print("Max u error at the end: "+str(np.max(uErr)))
print("Mean u error at the end: "+str(np.mean(uErr)))
# V error
vEnd = v[ind][ind2].flatten()
vTrue = np.zeros_like(vEnd)
vErr = np.abs(vEnd-vTrue)
print("Max v error at the end: "+str(np.max(vErr)))
print("Mean v error at the end: "+str(np.mean(vErr)))
|
import os
import click
from flask_migrate import Migrate
from app import create_app, db
from app.models import User, Role
# haetaan FLASK_CONFIG .flaskenv-tiedostosta:
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
migrate = Migrate(app, db)
with app.app_context():
db.create_all()
@app.shell_context_processor
def make_shell_context():
return dict(db=db, User=User, Role=Role)
@app.cli.command()
@click.argument('test_names', nargs=-1)
def test(test_names):
"""Run the unit tests."""
import unittest
if test_names:
tests = unittest.TestLoader().loadTestsFromNames(test_names)
else:
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
|
from __future__ import print_function, absolute_import
import numpy as np
from typing import List, Set
class FemSelection(object):
def __init__(self):
self._data = set() # type: Set[int]
def clear(self):
self._data.clear()
def set_data1(self, data):
# type: (int)->None
self._data.clear()
self._data.add(data)
def set_data2(self, data):
# type: (List[int])->None
self._data.clear()
self._data.update(set(data))
def set_data3(self, data):
# type: (Set[int])->None
self._data.clear()
self._data.update(data)
def set_data4(self, data, data_size):
# type: (List[int], int)->None
self._data.clear()
self._data.update(set(data[:data_size]))
def add_data1(self, data):
# type: (int)->None
self._data.add(data)
def add_data2(self, data):
# type: (List[int])->None
self._data.update(set(data))
def add_data3(self, data):
# type: (Set[int])->None
self._data.update(data)
def add_data4(self, data):
# type: (FemSelection)->None
self._data.update(data._data)
def remove_data1(self, data):
# type: (int)->None
self._data.discard(data)
def remove_data2(self, data):
# type: (List[int])->None
self._data.symmetric_difference_update(set(data))
def remove_data3(self, data):
# type: (Set[int])->None
self._data.symmetric_difference_update(data)
def intersect1(self, data):
# type: (int)->None
self._data.insersection_update(set([data]))
def intersect2(self, data):
# type: (List[int])->None
self._data.insersection_update(set(data))
def intersect3(self, data):
# type: (Set[int])->None
self._data.insersection_update(data)
def contains(self, data):
# type: (int)->bool
return data in self._data
def condense(self):
# type: ()->List[List[int]]
vec = self.to_vector()
if vec.size <= 2:
return [vec.tolist()]
first = vec[0]
second = 0
count = 0
old_category = int(first / 100000000)
category = 0
offset = 0
results = []
tmp = []
for i in range(1, vec.size):
count += 1
second = vec[i]
category = int(second / 100000000)
if category != old_category:
results.append([])
first = second
second = 0
count = 0
offset = 0
old_category = category
continue
if offset == 0:
offset = second - first
if second - first != count:
tmp_ = vec[i-1]
if first != tmp_:
if tmp_ - first == offset:
tmp.extend([first, -1, -1])
tmp.extend([tmp_, -1, -1])
else:
offset_ = -1
if offset > 1:
offset_ = offset
tmp.extend([first, tmp_, offset_])
else:
tmp.append([first, -1, -1])
first = second
count = 0
if count != 0:
if first != second:
if second - first == offset:
tmp.extend([first, -1, -1])
tmp.extend([second, -1, -1])
else:
offset_ = -1
if offset > 1:
offset_ = offset
tmp.extend([first, second, offset_])
sz = len(tmp)
if sz > 3 and tmp[sz-1] == tmp[sz-4] and tmp[sz-2] == tmp[sz-5] and tmp[sz-3] == tmp[sz-6]:
tmp = tmp[:sz-3]
sz = len(tmp)
if sz > 0:
results.append(tmp)
return results
def raw_pointer(self):
# type: ()->None
return self
def to_vector(self):
# type: ()->List[int]
return np.array(sorted(self._data))
def selection_by_category(self, category):
# type: (int)->List[int]
result = []
for i in self._data:
_category = int(i / 100000000)
if _category == category:
result.append(i)
return result
def __contains__(self, data):
return data in self._data
|
import subprocess
from PIL import Image
from pathlib import Path
from time import sleep
from View import View
from utils import Vector2, TouchVector2, console
class EVABot(object):
def __init__(self, screenSize : tuple = (1920, 1080), ip_address : str = None, runFromDevice : bool = False):
self.runFromDevice = runFromDevice
if not ip_address is None:
if not isinstance(ip_address, str) or (
len(ip_address.split('.')) != 4 or len(ip_address.split('.')[-1].split(':')) != 2):
raise ValueError('EVABot : ip_address variable should respect the "x.x.x.x:y" format, "y" being the '
f'port. Received "{ip_address}" instead.')
self.runCMD(('adb', 'connect', f'{ip_address}'))
self.ip_address = ip_address
self.screenSize = screenSize
self._tmpImageFileName = 'screen.dump'
self._tmpImage = None
self.viewList = []
def loadConfFile(self, fileName : str):
import json
jsonData = Path(fileName).read_text()
viewData = json.loads(jsonData)[View.VIEW_ID]
self.viewList = []
for viewID in viewData:
self.viewList.append(View.loadFromDict(viewData[viewID]))
self.viewList = tuple(self.viewList)
def getScreen(self):
screenData = subprocess.check_output(('adb', 'shell', 'screencap'))
self._tmpImage = Image.frombytes('RGBA', self.screenSize, screenData)
return True
def checkForView(self):
try:
console.print('Capturing screen', 0)
self.getScreen()
except ValueError:
console.print(f'{console.icho.bold}{console.icho.red}'
'Error capturing screen'
f'{console.icho.normal}', 0)
return False
idx = len(self.viewList)
for view in self.viewList:
position = (len(self.viewList) - idx) // len(self.viewList)
console.print(f'View: {view.name}', position)
idx -= 1
if view.isView(self._tmpImage):
console.print(f'View: {view.name} '
f'{console.icho.bold}'
f'{console.icho.cyan}'
f'OK{console.icho.normal}', 1)
for touch in view.touchArray:
self.touchScreen(touch)
sleep(view.touchDelay / 1000)
return True
console.print('No view found', 1)
return False
def touchScreen(self, position : Vector2):
self.runCMD(('adb', 'shell', 'input', 'touchscreen', 'tap', f'{position.x}', f'{position.y}'))
def longTouchScreen(self, position : TouchVector2):
self.runCMD(('adb', 'shell', 'input', 'touchscreen', 'swipe',
f'{position.x}', f'{position.y}', f'{position.x}', f'{position.y}',
f'{position.duration}'))
def runCMD(self, command : 'str or list'):
if isinstance(command, str):
command_array = command.split(' ')
elif isinstance(command, (tuple, list)):
command_array = command
if self.runFromDevice:
subprocess.call(command_array[2:])
else:
subprocess.call(command_array)
@staticmethod
def run(ip_address : str = None, fileName : str = 'views.json', sleepTime : int = 100):
tmpDaemon = EVABot(ip_address=ip_address)
tmpDaemon.loadConfFile(fileName)
idx = 0
while True:
if not tmpDaemon.checkForView():
sleep((sleepTime / 1000) + (idx // 10))
idx += 1
else: idx = 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-28 07:50
from __future__ import unicode_literals
from django.db import migrations
def tidy_progress_range(apps, schema_editor):
"""
Tidies progress ranges because a bug had caused them to go out of range
"""
ContentSessionLog = apps.get_model("logger", "ContentSessionLog")
ContentSummaryLog = apps.get_model("logger", "ContentSummaryLog")
# Not knowing how floating points will behave in the local database,
# 1.0 might become bigger than 1.0!!
ContentSessionLog.objects.filter(progress__lt=0).update(progress=0.0)
ContentSummaryLog.objects.filter(progress__lt=0).update(progress=0.0)
ContentSummaryLog.objects.filter(progress__gt=1).update(progress=1.0)
def reverse(apps, schema_editor):
return
class Migration(migrations.Migration):
dependencies = [("logger", "0003_auto_20170531_1140")]
operations = [migrations.RunPython(tidy_progress_range, reverse_code=reverse)]
|
from typing import Any, Dict, List, Type, TypeVar
import attr
T = TypeVar("T", bound="IndyEQProofM")
@attr.s(auto_attribs=True)
class IndyEQProofM:
""" """
additional_properties: Dict[str, str] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
indy_eq_proof_m = cls()
indy_eq_proof_m.additional_properties = d
return indy_eq_proof_m
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> str:
return self.additional_properties[key]
def __setitem__(self, key: str, value: str) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'HaiFeng'
__mtime__ = '2017/1/17'
"""
import os, sys
if __name__ == "__main__":
# 切换到 generate 目录下
pre_dir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# src_dir = '../ctp_20180109_x86'
# src_dir = '../v6.3.15_20190220'
src_dir = '../v6.3.16_T1_20190508' # 看穿式监管
if len(sys.argv) == 2:
print('run.py 版本目录')
src_dir = f'../{sys.argv[1]}'
data_type_file_name = 'ThostFtdcUserApiDataType'
import g_enum
g_enum.src_dir = src_dir
g_enum.data_type_file_name = 'ThostFtdcUserApiDataType'
g_enum.run()
import g_struct
g_struct.src_dir = src_dir
g_struct.struct_file_name = 'ThostFtdcUserApiStruct'
g_struct.run()
import g_c_py
g_c_py.src_dir = src_dir
g_c_py.spi_class_name = 'trade'
g_c_py.file_src = 'ThostFtdcTraderApi'
# g_c_py.lib_name = 'thosttraderapi'
g_c_py.lib_name = 'thosttraderapi_se' # 看穿式监管
g_c_py.api_class_name = 'CThostFtdcTraderApi'
g_c_py.info_struct_name = 'CThostFtdcRspInfoField'
g_c_py.create_api = 'CThostFtdcTraderApi::CreateFtdcTraderApi'
g_c_py.run(True, True) # 生成c+ py
# g_c_py.run(False, True)
g_c_py.spi_class_name = 'quote'
g_c_py.file_src = 'ThostFtdcMdApi'
# g_c_py.lib_name = 'thostmduserapi'
g_c_py.lib_name = 'thostmduserapi_se' # 看穿式监管
g_c_py.api_class_name = 'CThostFtdcMdApi'
g_c_py.info_struct_name = 'CThostFtdcRspInfoField'
g_c_py.create_api = 'CThostFtdcMdApi::CreateFtdcMdApi'
g_c_py.run(True, True)
# g_c_py.run(False, True)
print('finish.')
os.chdir(pre_dir)
|
#===============================================================================
# Copyright 2021-2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from onedal.svm import SVC
from sklearn.utils.estimator_checks import check_estimator
import sklearn.utils.estimator_checks
from sklearn import datasets
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
from onedal.tests.utils._device_selection import (get_queues,
pass_if_not_implemented_for_gpu)
def _replace_and_save(md, fns, replacing_fn):
saved = dict()
for check_f in fns:
try:
fn = getattr(md, check_f)
setattr(md, check_f, replacing_fn)
saved[check_f] = fn
except RuntimeError:
pass
return saved
def _restore_from_saved(md, saved_dict):
for check_f in saved_dict:
setattr(md, check_f, saved_dict[check_f])
def test_estimator():
def dummy(*args, **kwargs):
pass
md = sklearn.utils.estimator_checks
saved = _replace_and_save(md, [
'check_sample_weights_invariance', # Max absolute difference: 0.0008
'check_estimators_fit_returns_self', # ValueError: empty metadata
'check_classifiers_train', # assert y_pred.shape == (n_samples,)
'check_estimators_unfitted', # Call 'fit' with appropriate arguments
], dummy)
check_estimator(SVC())
_restore_from_saved(md, saved)
def _test_libsvm_parameters(queue, array_constr, dtype):
X = array_constr([[-2, -1], [-1, -1], [-1, -2],
[1, 1], [1, 2], [2, 1]], dtype=dtype)
y = array_constr([1, 1, 1, 2, 2, 2], dtype=dtype)
clf = SVC(kernel='linear').fit(X, y, queue=queue)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), y)
# TODO: investigate sporadic failures on GPU
@pytest.mark.parametrize('queue', get_queues('host,cpu'))
@pytest.mark.parametrize('array_constr', [np.array])
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
def test_libsvm_parameters(queue, array_constr, dtype):
_test_libsvm_parameters(queue, array_constr, dtype)
@pytest.mark.parametrize('queue', get_queues('cpu') + [
pytest.param(get_queues('gpu'),
marks=pytest.mark.xfail(
reason="class weights are not implemented "
"but the error is not raised"))])
def test_class_weight(queue):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
clf = SVC(class_weight={1: 0.1})
clf.fit(X, y, queue=queue)
assert_array_almost_equal(clf.predict(X, queue=queue), [2] * 6)
# TODO: investigate sporadic failures on GPU
@pytest.mark.parametrize('queue', get_queues('host,cpu'))
def test_sample_weight(queue):
X = np.array([[-2, 0], [-1, -1], [0, -2], [0, 2], [1, 1], [2, 2]])
y = np.array([1, 1, 1, 2, 2, 2])
clf = SVC(kernel='linear')
clf.fit(X, y, sample_weight=[1] * 6, queue=queue)
assert_array_almost_equal(clf.intercept_, [0.0])
@pytest.mark.parametrize('queue', get_queues())
def test_decision_function(queue):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype=np.float32)
Y = np.array([1, 1, 1, 2, 2, 2], dtype=np.float32)
clf = SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y, queue=queue)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X, queue=queue))
@pass_if_not_implemented_for_gpu(reason="multiclass svm is not implemented")
@pytest.mark.parametrize('queue', get_queues())
def test_iris(queue):
iris = datasets.load_iris()
clf = SVC(kernel='linear').fit(iris.data, iris.target, queue=queue)
assert clf.score(iris.data, iris.target, queue=queue) > 0.9
assert_array_equal(clf.classes_, np.sort(clf.classes_))
@pass_if_not_implemented_for_gpu(reason="multiclass svm is not implemented")
@pytest.mark.parametrize('queue', get_queues())
def test_decision_function_shape(queue):
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# check shape of ovo_decition_function=True
clf = SVC(kernel='linear',
decision_function_shape='ovo').fit(X_train, y_train, queue=queue)
dec = clf.decision_function(X_train, queue=queue)
assert dec.shape == (len(X_train), 10)
with pytest.raises(ValueError, match="must be either 'ovr' or 'ovo'"):
SVC(decision_function_shape='bad').fit(X_train, y_train, queue=queue)
@pass_if_not_implemented_for_gpu(reason="multiclass svm is not implemented")
@pytest.mark.parametrize('queue', get_queues())
def test_pickle(queue):
iris = datasets.load_iris()
clf = SVC(kernel='linear').fit(iris.data, iris.target, queue=queue)
expected = clf.decision_function(iris.data, queue=queue)
import pickle
dump = pickle.dumps(clf)
clf2 = pickle.loads(dump)
assert type(clf2) == clf.__class__
result = clf2.decision_function(iris.data, queue=queue)
assert_array_equal(expected, result)
@pass_if_not_implemented_for_gpu(reason="sigmoid kernel is not implemented")
@pytest.mark.parametrize('queue', get_queues('cpu') + [
pytest.param(get_queues('gpu'),
marks=pytest.mark.xfail(reason="raises Unimplemented error "
"with inconsistent error message"))])
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
def test_svc_sigmoid(queue, dtype):
X_train = np.array([[-1, 2], [0, 0], [2, -1],
[+1, +1], [+1, +2], [+2, +1]], dtype=dtype)
X_test = np.array([[0, 2], [0.5, 0.5],
[0.3, 0.1], [2, 0], [-1, -1]], dtype=dtype)
y_train = np.array([1, 1, 1, 2, 2, 2], dtype=dtype)
svc = SVC(kernel='sigmoid').fit(X_train, y_train, queue=queue)
assert_array_equal(svc.dual_coef_, [[-1, -1, -1, 1, 1, 1]])
assert_array_equal(svc.support_, [0, 1, 2, 3, 4, 5])
assert_array_equal(svc.predict(X_test, queue=queue), [2, 2, 1, 2, 1])
|
# Copyright 2012-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess, os.path
import tempfile
from .import mesonlib
from . import mlog
from .mesonlib import MesonException
from . import coredata
"""This file contains the data files of all compilers Meson knows
about. To support a new compiler, add its information below.
Also add corresponding autodetection code in environment.py."""
header_suffixes = ('h', 'hh', 'hpp', 'hxx', 'H', 'ipp', 'moc', 'vapi', 'di')
obj_suffixes = ('o', 'obj', 'res')
lib_suffixes = ('a', 'lib', 'dll', 'dylib', 'so')
# Mapping of language to suffixes of files that should always be in that language
# This means we can't include .h headers here since they could be C, C++, ObjC, etc.
lang_suffixes = {
'c': ('c',),
'cpp': ('cpp', 'cc', 'cxx', 'c++', 'hh', 'hpp', 'ipp', 'hxx'),
'fortran': ('f', 'f90', 'f95'),
'd': ('d', 'di'),
'objc': ('m',),
'objcpp': ('mm',),
'rust': ('rs',),
'vala': ('vala', 'vapi'),
'cs': ('cs',),
'swift': ('swift',),
'java': ('java',),
}
cpp_suffixes = lang_suffixes['cpp'] + ('h',)
c_suffixes = lang_suffixes['c'] + ('h',)
clike_suffixes = lang_suffixes['c'] + lang_suffixes['cpp'] + ('h',)
def is_header(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
suffix = fname.split('.')[-1]
return suffix in header_suffixes
def is_source(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
suffix = fname.split('.')[-1]
return suffix in clike_suffixes
def is_object(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
suffix = fname.split('.')[-1]
return suffix in obj_suffixes
def is_library(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
suffix = fname.split('.')[-1]
return suffix in lib_suffixes
gnulike_buildtype_args = {'plain' : [],
# -O0 is passed for improved debugging information with gcc
# See https://github.com/mesonbuild/meson/pull/509
'debug' : ['-O0', '-g'],
'debugoptimized' : ['-O2', '-g'],
'release' : ['-O3'],
'minsize' : ['-Os', '-g']}
msvc_buildtype_args = {'plain' : [],
'debug' : ["/MDd", "/ZI", "/Ob0", "/Od", "/RTC1"],
'debugoptimized' : ["/MD", "/Zi", "/O2", "/Ob1"],
'release' : ["/MD", "/O2", "/Ob2"],
'minsize' : ["/MD", "/Zi", "/Os", "/Ob1"],
}
gnulike_buildtype_linker_args = {}
if mesonlib.is_osx():
gnulike_buildtype_linker_args.update({'plain' : [],
'debug' : [],
'debugoptimized' : [],
'release' : [],
'minsize' : [],
})
else:
gnulike_buildtype_linker_args.update({'plain' : [],
'debug' : [],
'debugoptimized' : [],
'release' : ['-Wl,-O1'],
'minsize' : [],
})
msvc_buildtype_linker_args = {'plain' : [],
'debug' : [],
'debugoptimized' : [],
'release' : [],
'minsize' : ['/INCREMENTAL:NO'],
}
java_buildtype_args = {'plain' : [],
'debug' : ['-g'],
'debugoptimized' : ['-g'],
'release' : [],
'minsize' : [],
}
rust_buildtype_args = {'plain' : [],
'debug' : ['-g'],
'debugoptimized' : ['-g', '--opt-level', '2'],
'release' : ['--opt-level', '3'],
'minsize' : [],
}
d_gdc_buildtype_args = {'plain' : [],
'debug' : ['-g', '-O0'],
'debugoptimized' : ['-g', '-O'],
'release' : ['-O3', '-frelease'],
'minsize' : [],
}
d_ldc_buildtype_args = {'plain' : [],
'debug' : ['-g', '-O0'],
'debugoptimized' : ['-g', '-O'],
'release' : ['-O3', '-release'],
'minsize' : [],
}
d_dmd_buildtype_args = {'plain' : [],
'debug' : ['-g'],
'debugoptimized' : ['-g', '-O'],
'release' : ['-O', '-release'],
'minsize' : [],
}
mono_buildtype_args = {'plain' : [],
'debug' : ['-debug'],
'debugoptimized': ['-debug', '-optimize+'],
'release' : ['-optimize+'],
'minsize' : [],
}
swift_buildtype_args = {'plain' : [],
'debug' : ['-g'],
'debugoptimized': ['-g', '-O'],
'release' : ['-O'],
'minsize' : [],
}
gnu_winlibs = ['-lkernel32', '-luser32', '-lgdi32', '-lwinspool', '-lshell32',
'-lole32', '-loleaut32', '-luuid', '-lcomdlg32', '-ladvapi32']
msvc_winlibs = ['kernel32.lib', 'user32.lib', 'gdi32.lib',
'winspool.lib', 'shell32.lib', 'ole32.lib', 'oleaut32.lib',
'uuid.lib', 'comdlg32.lib', 'advapi32.lib']
gnu_color_args = {'auto' : ['-fdiagnostics-color=auto'],
'always': ['-fdiagnostics-color=always'],
'never' : ['-fdiagnostics-color=never'],
}
base_options = {
'b_pch': coredata.UserBooleanOption('b_pch', 'Use precompiled headers', True),
'b_lto': coredata.UserBooleanOption('b_lto', 'Use link time optimization', False),
'b_sanitize': coredata.UserComboOption('b_sanitize',
'Code sanitizer to use',
['none', 'address', 'thread', 'undefined', 'memory'],
'none'),
'b_lundef': coredata.UserBooleanOption('b_lundef', 'Use -Wl,--no-undefined when linking', True),
'b_asneeded': coredata.UserBooleanOption('b_asneeded', 'Use -Wl,--as-needed when linking', True),
'b_pgo': coredata.UserComboOption('b_pgo', 'Use profile guide optimization',
['off', 'generate', 'use'],
'off'),
'b_coverage': coredata.UserBooleanOption('b_coverage',
'Enable coverage tracking.',
False),
'b_colorout' : coredata.UserComboOption('b_colorout', 'Use colored output',
['auto', 'always', 'never'],
'always'),
'b_ndebug' : coredata.UserBooleanOption('b_ndebug',
'Disable asserts',
False)
}
def sanitizer_compile_args(value):
if value == 'none':
return []
args = ['-fsanitize=' + value]
if value == 'address':
args.append('-fno-omit-frame-pointer')
return args
def sanitizer_link_args(value):
if value == 'none':
return []
args = ['-fsanitize=' + value]
return args
def get_base_compile_args(options, compiler):
args = []
# FIXME, gcc/clang specific.
try:
if options['b_lto'].value:
args.append('-flto')
except KeyError:
pass
try:
args += compiler.get_colorout_args(options['b_colorout'].value)
except KeyError:
pass
try:
args += sanitizer_compile_args(options['b_sanitize'].value)
except KeyError:
pass
try:
pgo_val = options['b_pgo'].value
if pgo_val == 'generate':
args.append('-fprofile-generate')
elif pgo_val == 'use':
args.append('-fprofile-use')
except KeyError:
pass
try:
if options['b_coverage'].value:
args += compiler.get_coverage_args()
except KeyError:
pass
try:
if options['b_ndebug'].value:
args += ['-DNDEBUG']
except KeyError:
pass
return args
def get_base_link_args(options, linker):
args = []
# FIXME, gcc/clang specific.
try:
if options['b_lto'].value:
args.append('-flto')
except KeyError:
pass
try:
args += sanitizer_link_args(options['b_sanitize'].value)
except KeyError:
pass
try:
pgo_val = options['b_pgo'].value
if pgo_val == 'generate':
args.append('-fprofile-generate')
elif pgo_val == 'use':
args.append('-fprofile-use')
except KeyError:
pass
try:
if options['b_lundef'].value:
args.append('-Wl,--no-undefined')
except KeyError:
pass
try:
if options['b_asneeded'].value:
args.append('-Wl,--as-needed')
except KeyError:
pass
try:
if options['b_coverage'].value:
args += linker.get_coverage_link_args()
except KeyError:
pass
return args
def build_unix_rpath_args(build_dir, rpath_paths, install_rpath):
if len(rpath_paths) == 0 and len(install_rpath) == 0:
return []
paths = ':'.join([os.path.join(build_dir, p) for p in rpath_paths])
if len(paths) < len(install_rpath):
padding = 'X'*(len(install_rpath) - len(paths))
if len(paths) == 0:
paths = padding
else:
paths = paths + ':' + padding
return ['-Wl,-rpath,' + paths]
class EnvironmentException(MesonException):
def __init(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class CrossNoRunException(MesonException):
def __init(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class RunResult():
def __init__(self, compiled, returncode=999, stdout='UNDEFINED', stderr='UNDEFINED'):
self.compiled = compiled
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
class Compiler():
def __init__(self, exelist, version):
if type(exelist) == type(''):
self.exelist = [exelist]
elif type(exelist) == type([]):
self.exelist = exelist
else:
raise TypeError('Unknown argument to Compiler')
# In case it's been overriden by a child class already
if not hasattr(self, 'file_suffixes'):
self.file_suffixes = lang_suffixes[self.language]
if not hasattr(self, 'can_compile_suffixes'):
self.can_compile_suffixes = set(self.file_suffixes)
self.default_suffix = self.file_suffixes[0]
self.version = version
self.base_options = []
def can_compile(self, src):
if hasattr(src, 'fname'):
src = src.fname
suffix = os.path.splitext(src)[1].lower()
if suffix and suffix[1:] in self.can_compile_suffixes:
return True
return False
def get_id(self):
return self.id
def get_language(self):
return self.language
def get_exelist(self):
return self.exelist[:]
def get_define(self, *args, **kwargs):
raise EnvironmentException('%s does not support get_define.' % self.id)
def has_define(self, *args, **kwargs):
raise EnvironmentException('%s does not support has_define.' % self.id)
def get_always_args(self):
return []
def get_linker_always_args(self):
return []
def gen_import_library_args(self, implibname):
"""
Used only on Windows for libraries that need an import library.
This currently means C, C++, Fortran.
"""
return []
def get_options(self):
return {} # build afresh every time
def get_option_compile_args(self, options):
return []
def get_option_link_args(self, options):
return []
def has_header(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support header checks.' % self.language)
def has_header_symbol(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support header symbol checks.' % self.language)
def compiles(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support compile checks.' % self.language)
def links(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support link checks.' % self.language)
def run(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support run checks.' % self.language)
def sizeof(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support sizeof checks.' % self.language)
def alignment(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support alignment checks.' % self.language)
def has_function(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support function checks.' % self.language)
def unix_link_flags_to_native(self, args):
"Always returns a copy that can be independently mutated"
return args[:]
def unix_compile_flags_to_native(self, args):
"Always returns a copy that can be independently mutated"
return args[:]
def find_library(self, *args, **kwargs):
raise EnvironmentException('Language {} does not support library finding.'.format(self.language))
def get_library_dirs(self):
return []
def has_argument(self, arg):
raise EnvironmentException('Language {} does not support has_arg.'.format(self.language))
def get_cross_extra_flags(self, environment, *, compile, link):
extra_flags = []
if self.is_cross and environment:
if 'properties' in environment.cross_info.config:
lang_args_key = self.language + '_args'
if compile:
extra_flags += environment.cross_info.config['properties'].get(lang_args_key, [])
lang_link_args_key = self.language + '_link_args'
if link:
extra_flags += environment.cross_info.config['properties'].get(lang_link_args_key, [])
return extra_flags
def get_colorout_args(self, colortype):
return []
# Some compilers (msvc) write debug info to a separate file.
# These args specify where it should be written.
def get_compile_debugfile_args(self, rel_obj):
return []
def get_link_debugfile_args(self, rel_obj):
return []
class CCompiler(Compiler):
def __init__(self, exelist, version, is_cross, exe_wrapper=None):
# If a child ObjC or CPP class has already set it, don't set it ourselves
if not hasattr(self, 'language'):
self.language = 'c'
super().__init__(exelist, version)
self.id = 'unknown'
self.is_cross = is_cross
self.can_compile_suffixes.add('h')
if isinstance(exe_wrapper, str):
self.exe_wrapper = [exe_wrapper]
else:
self.exe_wrapper = exe_wrapper
def needs_static_linker(self):
return True # When compiling static libraries, so yes.
def get_always_args(self):
return []
def get_linker_debug_crt_args(self):
"""
Arguments needed to select a debug crt for the linker
This is only needed for MSVC
"""
return []
def get_no_stdinc_args(self):
return ['-nostdinc']
def get_no_stdlib_link_args(self):
return ['-nostdlib']
def get_warn_args(self, level):
return self.warn_args[level]
def get_soname_args(self, shlib_name, path, soversion):
return []
def split_shlib_to_parts(self, fname):
return (None, fname)
# The default behaviour is this, override in
# OSX and MSVC.
def build_rpath_args(self, build_dir, rpath_paths, install_rpath):
return build_unix_rpath_args(build_dir, rpath_paths, install_rpath)
def get_dependency_gen_args(self, outtarget, outfile):
return ['-MMD', '-MQ', outtarget, '-MF', outfile]
def depfile_for_object(self, objfile):
return objfile + '.' + self.get_depfile_suffix()
def get_depfile_suffix(self):
return 'd'
def get_default_suffix(self):
return self.default_suffix
def get_exelist(self):
return self.exelist[:]
def get_linker_exelist(self):
return self.exelist[:]
def get_compile_only_args(self):
return ['-c']
def get_no_optimization_args(self):
return ['-O0']
def get_output_args(self, target):
return ['-o', target]
def get_linker_output_args(self, outputname):
return ['-o', outputname]
def get_coverage_args(self):
return ['--coverage']
def get_coverage_link_args(self):
return ['-lgcov']
def get_werror_args(self):
return ['-Werror']
def get_std_exe_link_args(self):
return []
def get_include_args(self, path, is_system):
if path == '':
path = '.'
if is_system:
return ['-isystem', path]
return ['-I' + path]
def get_std_shared_lib_link_args(self):
return ['-shared']
def get_library_dirs(self):
output = subprocess.Popen(self.exelist + ['--print-search-dirs'], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
(stdo, _) = output.communicate()
stdo = stdo.decode('utf-8')
for line in stdo.split('\n'):
if line.startswith('libraries:'):
libstr = line.split('=', 1)[1]
return libstr.split(':')
return []
def get_pic_args(self):
return ['-fPIC']
def name_string(self):
return ' '.join(self.exelist)
def get_pch_use_args(self, pch_dir, header):
return ['-include', os.path.split(header)[-1]]
def get_pch_name(self, header_name):
return os.path.split(header_name)[-1] + '.' + self.get_pch_suffix()
def get_linker_search_args(self, dirname):
return ['-L'+dirname]
def gen_import_library_args(self, implibname):
"""
The name of the outputted import library
This implementation is used only on Windows by compilers that use GNU ld
"""
return ['-Wl,--out-implib=' + implibname]
def sanity_check_impl(self, work_dir, environment, sname, code):
mlog.debug('Sanity testing ' + self.language + ' compiler:', ' '.join(self.exelist))
mlog.debug('Is cross compiler: %s.' % str(self.is_cross))
extra_flags = []
source_name = os.path.join(work_dir, sname)
binname = sname.rsplit('.', 1)[0]
if self.is_cross:
binname += '_cross'
if self.exe_wrapper is None:
# Linking cross built apps is painful. You can't really
# tell if you should use -nostdlib or not and for example
# on OSX the compiler binary is the same but you need
# a ton of compiler flags to differentiate between
# arm and x86_64. So just compile.
extra_flags += self.get_cross_extra_flags(environment, compile=True, link=False)
extra_flags += self.get_compile_only_args()
else:
extra_flags += self.get_cross_extra_flags(environment, compile=True, link=True)
# Is a valid executable output for all toolchains and platforms
binname += '.exe'
# Write binary check source
binary_name = os.path.join(work_dir, binname)
with open(source_name, 'w') as ofile:
ofile.write(code)
# Compile sanity check
cmdlist = self.exelist + extra_flags + [source_name] + self.get_output_args(binary_name)
pc = subprocess.Popen(cmdlist, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=work_dir)
(stdo, stde) = pc.communicate()
stdo = stdo.decode()
stde = stde.decode()
mlog.debug('Sanity check compiler command line:', ' '.join(cmdlist))
mlog.debug('Sanity check compile stdout:')
mlog.debug(stdo)
mlog.debug('-----\nSanity check compile stderr:')
mlog.debug(stde)
mlog.debug('-----')
if pc.returncode != 0:
raise EnvironmentException('Compiler {0} can not compile programs.'.format(self.name_string()))
# Run sanity check
if self.is_cross:
if self.exe_wrapper is None:
# Can't check if the binaries run so we have to assume they do
return
cmdlist = self.exe_wrapper + [binary_name]
else:
cmdlist = [binary_name]
mlog.debug('Running test binary command: ' + ' '.join(cmdlist))
pe = subprocess.Popen(cmdlist)
pe.wait()
if pe.returncode != 0:
raise EnvironmentException('Executables created by {0} compiler {1} are not runnable.'.format(self.language, self.name_string()))
def sanity_check(self, work_dir, environment):
code = 'int main(int argc, char **argv) { int class=0; return class; }\n'
return self.sanity_check_impl(work_dir, environment, 'sanitycheckc.c', code)
def has_header(self, hname, env, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
templ = '''#include<%s>
int someSymbolHereJustForFun;
'''
return self.compiles(templ % hname, env, extra_args, dependencies)
def has_header_symbol(self, hname, symbol, prefix, env, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
templ = '''{2}
#include <{0}>
int main () {{ {1}; }}'''
# Pass -O0 to ensure that the symbol isn't optimized away
args = extra_args + self.get_no_optimization_args()
return self.compiles(templ.format(hname, symbol, prefix), env, args, dependencies)
def compile(self, code, srcname, extra_args=None):
if extra_args is None:
extra_args = []
commands = self.get_exelist()
commands.append(srcname)
commands += extra_args
mlog.debug('Running compile:')
mlog.debug('Command line: ', ' '.join(commands), '\n')
mlog.debug('Code:\n', code)
p = subprocess.Popen(commands, cwd=os.path.split(srcname)[0], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stde, stdo) = p.communicate()
stde = stde.decode()
stdo = stdo.decode()
mlog.debug('Compiler stdout:\n', stdo)
mlog.debug('Compiler stderr:\n', stde)
os.remove(srcname)
return p
def compiles(self, code, env, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
if isinstance(extra_args, str):
extra_args = [extra_args]
if dependencies is None:
dependencies = []
elif not isinstance(dependencies, list):
dependencies = [dependencies]
suflen = len(self.default_suffix)
(fd, srcname) = tempfile.mkstemp(suffix='.'+self.default_suffix)
os.close(fd)
with open(srcname, 'w') as ofile:
ofile.write(code)
cargs = [a for d in dependencies for a in d.get_compile_args()]
# Convert flags to the native type of the selected compiler
args = self.unix_link_flags_to_native(cargs + extra_args)
# Read c_args/cpp_args/etc from the cross-info file (if needed)
args += self.get_cross_extra_flags(env, compile=True, link=False)
# We only want to compile; not link
args += self.get_compile_only_args()
p = self.compile(code, srcname, args)
try:
trial = srcname[:-suflen] + 'o'
os.remove(trial)
except FileNotFoundError:
pass
try:
os.remove(srcname[:-suflen] + 'obj')
except FileNotFoundError:
pass
return p.returncode == 0
def links(self, code, env, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
elif isinstance(extra_args, str):
extra_args = [extra_args]
if dependencies is None:
dependencies = []
elif not isinstance(dependencies, list):
dependencies = [dependencies]
(fd, srcname) = tempfile.mkstemp(suffix='.'+self.default_suffix)
os.close(fd)
(fd, dstname) = tempfile.mkstemp()
os.close(fd)
with open(srcname, 'w') as ofile:
ofile.write(code)
cargs = [a for d in dependencies for a in d.get_compile_args()]
link_args = [a for d in dependencies for a in d.get_link_args()]
# Convert flags to the native type of the selected compiler
args = self.unix_link_flags_to_native(cargs + link_args + extra_args)
# Select a CRT if needed since we're linking
args += self.get_linker_debug_crt_args()
# Read c_args/c_link_args/cpp_args/cpp_link_args/etc from the cross-info file (if needed)
args += self.get_cross_extra_flags(env, compile=True, link=True)
# Arguments specifying the output filename
args += self.get_output_args(dstname)
p = self.compile(code, srcname, args)
try:
os.remove(dstname)
except FileNotFoundError:
pass
return p.returncode == 0
def run(self, code, env, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
if dependencies is None:
dependencies = []
elif not isinstance(dependencies, list):
dependencies = [dependencies]
if self.is_cross and self.exe_wrapper is None:
raise CrossNoRunException('Can not run test applications in this cross environment.')
(fd, srcname) = tempfile.mkstemp(suffix='.'+self.default_suffix)
os.close(fd)
with open(srcname, 'w') as ofile:
ofile.write(code)
cargs = [a for d in dependencies for a in d.get_compile_args()]
link_args = [a for d in dependencies for a in d.get_link_args()]
# Convert flags to the native type of the selected compiler
args = self.unix_link_flags_to_native(cargs + link_args + extra_args)
# Select a CRT if needed since we're linking
args += self.get_linker_debug_crt_args()
# Read c_link_args/cpp_link_args/etc from the cross-info file
args += self.get_cross_extra_flags(env, compile=True, link=True)
# Create command list
exename = srcname + '.exe' # Is guaranteed to be executable on every platform.
commands = self.get_exelist() + args
commands.append(srcname)
commands += self.get_output_args(exename)
mlog.debug('Running code:\n\n', code)
mlog.debug('Command line:', ' '.join(commands))
p = subprocess.Popen(commands, cwd=os.path.split(srcname)[0], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdo, stde) = p.communicate()
stde = stde.decode()
stdo = stdo.decode()
mlog.debug('Compiler stdout:\n')
mlog.debug(stdo)
mlog.debug('Compiler stderr:\n')
mlog.debug(stde)
os.remove(srcname)
if p.returncode != 0:
return RunResult(False)
if self.is_cross:
cmdlist = self.exe_wrapper + [exename]
else:
cmdlist = exename
try:
pe = subprocess.Popen(cmdlist, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
mlog.debug('Could not run: %s (error: %s)\n' % (cmdlist, e))
return RunResult(False)
(so, se) = pe.communicate()
so = so.decode()
se = se.decode()
mlog.debug('Program stdout:\n')
mlog.debug(so)
mlog.debug('Program stderr:\n')
mlog.debug(se)
try:
os.remove(exename)
except PermissionError:
# On Windows antivirus programs and the like hold
# on to files so they can't be deleted. There's not
# much to do in this case.
pass
return RunResult(True, pe.returncode, so, se)
def cross_sizeof(self, element, prefix, env, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
element_exists_templ = '''#include <stdio.h>
{0}
int main(int argc, char **argv) {{
{1} something;
}}
'''
templ = '''#include <stdio.h>
%s
int temparray[%d-sizeof(%s)];
'''
args = extra_args + self.get_no_optimization_args()
if not self.compiles(element_exists_templ.format(prefix, element), env, args, dependencies):
return -1
for i in range(1, 1024):
code = templ % (prefix, i, element)
if self.compiles(code, env, args, dependencies):
if self.id == 'msvc':
# MSVC refuses to construct an array of zero size, so
# the test only succeeds when i is sizeof(element) + 1
return i - 1
return i
raise EnvironmentException('Cross checking sizeof overflowed.')
def sizeof(self, element, prefix, env, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
if self.is_cross:
return self.cross_sizeof(element, prefix, env, extra_args, dependencies)
templ = '''#include<stdio.h>
%s
int main(int argc, char **argv) {
printf("%%ld\\n", (long)(sizeof(%s)));
return 0;
};
'''
res = self.run(templ % (prefix, element), env, extra_args, dependencies)
if not res.compiled:
return -1
if res.returncode != 0:
raise EnvironmentException('Could not run sizeof test binary.')
return int(res.stdout)
def cross_alignment(self, typename, env, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
type_exists_templ = '''#include <stdio.h>
int main(int argc, char **argv) {{
{0} something;
}}
'''
templ = '''#include<stddef.h>
struct tmp {
char c;
%s target;
};
int testarray[%d-offsetof(struct tmp, target)];
'''
args = extra_args + self.get_no_optimization_args()
if not self.compiles(type_exists_templ.format(typename), env, args, dependencies):
return -1
for i in range(1, 1024):
code = templ % (typename, i)
if self.compiles(code, env, args, dependencies):
if self.id == 'msvc':
# MSVC refuses to construct an array of zero size, so
# the test only succeeds when i is sizeof(element) + 1
return i - 1
return i
raise EnvironmentException('Cross checking offsetof overflowed.')
def alignment(self, typename, env, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
if self.is_cross:
return self.cross_alignment(typename, env, extra_args, dependencies)
templ = '''#include<stdio.h>
#include<stddef.h>
struct tmp {
char c;
%s target;
};
int main(int argc, char **argv) {
printf("%%d", (int)offsetof(struct tmp, target));
return 0;
}
'''
res = self.run(templ % typename, env, extra_args, dependencies)
if not res.compiled:
raise EnvironmentException('Could not compile alignment test.')
if res.returncode != 0:
raise EnvironmentException('Could not run alignment test binary.')
align = int(res.stdout)
if align == 0:
raise EnvironmentException('Could not determine alignment of %s. Sorry. You might want to file a bug.' % typename)
return align
def has_function(self, funcname, prefix, env, extra_args=None, dependencies=None):
"""
First, this function looks for the symbol in the default libraries
provided by the compiler (stdlib + a few others usually). If that
fails, it checks if any of the headers specified in the prefix provide
an implementation of the function, and if that fails, it checks if it's
implemented as a compiler-builtin.
"""
if extra_args is None:
extra_args = []
# Define the symbol to something else in case it is defined by the
# includes or defines listed by the user `{0}` or by the compiler.
# Then, undef the symbol to get rid of it completely.
templ = '''
#define {1} meson_disable_define_of_{1}
#include <limits.h>
{0}
#undef {1}
'''
# Override any GCC internal prototype and declare our own definition for
# the symbol. Use char because that's unlikely to be an actual return
# value for a function which ensures that we override the definition.
templ += '''
#ifdef __cplusplus
extern "C"
#endif
char {1} ();
'''
# glibc defines functions that are not available on Linux as stubs that
# fail with ENOSYS (such as e.g. lchmod). In this case we want to fail
# instead of detecting the stub as a valid symbol.
# We always include limits.h above to ensure that these are defined for
# stub functions.
stubs_fail = '''
#if defined __stub_{1} || defined __stub___{1}
fail fail fail this function is not going to work
#endif
'''
templ += stubs_fail
# And finally the actual function call
templ += '''
int
main ()
{{
return {1} ();
}}'''
varname = 'has function ' + funcname
varname = varname.replace(' ', '_')
if self.is_cross:
val = env.cross_info.config['properties'].get(varname, None)
if val is not None:
if isinstance(val, bool):
return val
raise EnvironmentException('Cross variable {0} is not a boolean.'.format(varname))
if self.links(templ.format(prefix, funcname), env, extra_args, dependencies):
return True
# Add -O0 to ensure that the symbol isn't optimized away by the compiler
args = extra_args + self.get_no_optimization_args()
# Sometimes the implementation is provided by the header, or the header
# redefines the symbol to be something else. In that case, we want to
# still detect the function. We still want to fail if __stub_foo or
# _stub_foo are defined, of course.
header_templ = '#include <limits.h>\n{0}\n' + stubs_fail + '\nint main() {{ {1}; }}'
if self.links(header_templ.format(prefix, funcname), env, args, dependencies):
return True
# Some functions like alloca() are defined as compiler built-ins which
# are inlined by the compiler, so test for that instead. Built-ins are
# special functions that ignore all includes and defines, so we just
# directly try to link via main().
return self.links('int main() {{ {0}; }}'.format('__builtin_' + funcname), env, args, dependencies)
def has_members(self, typename, membernames, prefix, env, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
templ = '''{0}
void bar() {{
{1} {2};
{3}
}};
'''
# Create code that accesses all members
members = ''
for m in membernames:
members += 'foo.{};\n'.format(m)
code = templ.format(prefix, typename, 'foo', members)
return self.compiles(code, env, extra_args, dependencies)
def has_type(self, typename, prefix, env, extra_args, dependencies=None):
templ = '''%s
void bar() {
sizeof(%s);
};
'''
return self.compiles(templ % (prefix, typename), env, extra_args, dependencies)
def find_library(self, libname, env, extra_dirs):
# First try if we can just add the library as -l.
code = '''int main(int argc, char **argv) {
return 0;
}
'''
if extra_dirs and isinstance(extra_dirs, str):
extra_dirs = [extra_dirs]
# Gcc + co seem to prefer builtin lib dirs to -L dirs.
# Only try to find std libs if no extra dirs specified.
if len(extra_dirs) == 0:
args = ['-l' + libname]
if self.links(code, env, extra_args=args):
return args
# Not found? Try to find the library file itself.
extra_dirs += self.get_library_dirs()
suffixes = ['so', 'dylib', 'lib', 'dll', 'a']
for d in extra_dirs:
for suffix in suffixes:
trial = os.path.join(d, 'lib' + libname + '.' + suffix)
if os.path.isfile(trial):
return trial
trial2 = os.path.join(d, libname + '.' + suffix)
if os.path.isfile(trial2):
return trial2
return None
def thread_flags(self):
return ['-pthread']
def thread_link_flags(self):
return ['-pthread']
def has_argument(self, arg, env):
return self.compiles('int i;\n', env, extra_args=arg)
class CPPCompiler(CCompiler):
def __init__(self, exelist, version, is_cross, exe_wrap):
# If a child ObjCPP class has already set it, don't set it ourselves
if not hasattr(self, 'language'):
self.language = 'cpp'
CCompiler.__init__(self, exelist, version, is_cross, exe_wrap)
def sanity_check(self, work_dir, environment):
code = 'class breakCCompiler;int main(int argc, char **argv) { return 0; }\n'
return self.sanity_check_impl(work_dir, environment, 'sanitycheckcpp.cc', code)
class ObjCCompiler(CCompiler):
def __init__(self, exelist, version, is_cross, exe_wrap):
self.language = 'objc'
CCompiler.__init__(self, exelist, version, is_cross, exe_wrap)
def sanity_check(self, work_dir, environment):
# TODO try to use sanity_check_impl instead of duplicated code
source_name = os.path.join(work_dir, 'sanitycheckobjc.m')
binary_name = os.path.join(work_dir, 'sanitycheckobjc')
extra_flags = self.get_cross_extra_flags(environment, compile=True, link=False)
if self.is_cross:
extra_flags += self.get_compile_only_args()
with open(source_name, 'w') as ofile:
ofile.write('#import<stdio.h>\n'
'int main(int argc, char **argv) { return 0; }\n')
pc = subprocess.Popen(self.exelist + extra_flags + [source_name, '-o', binary_name])
pc.wait()
if pc.returncode != 0:
raise EnvironmentException('ObjC compiler %s can not compile programs.' % self.name_string())
if self.is_cross:
# Can't check if the binaries run so we have to assume they do
return
pe = subprocess.Popen(binary_name)
pe.wait()
if pe.returncode != 0:
raise EnvironmentException('Executables created by ObjC compiler %s are not runnable.' % self.name_string())
class ObjCPPCompiler(CPPCompiler):
def __init__(self, exelist, version, is_cross, exe_wrap):
self.language = 'objcpp'
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap)
def sanity_check(self, work_dir, environment):
# TODO try to use sanity_check_impl instead of duplicated code
source_name = os.path.join(work_dir, 'sanitycheckobjcpp.mm')
binary_name = os.path.join(work_dir, 'sanitycheckobjcpp')
extra_flags = self.get_cross_extra_flags(environment, compile=True, link=False)
if self.is_cross:
extra_flags += self.get_compile_only_args()
with open(source_name, 'w') as ofile:
ofile.write('#import<stdio.h>\n'
'class MyClass;'
'int main(int argc, char **argv) { return 0; }\n')
pc = subprocess.Popen(self.exelist + extra_flags + [source_name, '-o', binary_name])
pc.wait()
if pc.returncode != 0:
raise EnvironmentException('ObjC++ compiler %s can not compile programs.' % self.name_string())
if self.is_cross:
# Can't check if the binaries run so we have to assume they do
return
pe = subprocess.Popen(binary_name)
pe.wait()
if pe.returncode != 0:
raise EnvironmentException('Executables created by ObjC++ compiler %s are not runnable.' % self.name_string())
class MonoCompiler(Compiler):
def __init__(self, exelist, version):
self.language = 'cs'
super().__init__(exelist, version)
self.id = 'mono'
self.monorunner = 'mono'
def get_output_args(self, fname):
return ['-out:' + fname]
def get_link_args(self, fname):
return ['-r:' + fname]
def get_soname_args(self, shlib_name, path, soversion):
return []
def get_werror_args(self):
return ['-warnaserror']
def split_shlib_to_parts(self, fname):
return (None, fname)
def build_rpath_args(self, build_dir, rpath_paths, install_rpath):
return []
def get_dependency_gen_args(self, outtarget, outfile):
return []
def get_default_suffix(self):
return self.default_suffix
def get_linker_exelist(self):
return self.exelist[:]
def get_compile_only_args(self):
return []
def get_linker_output_args(self, outputname):
return []
def get_coverage_args(self):
return []
def get_coverage_link_args(self):
return []
def get_std_exe_link_args(self):
return []
def get_include_args(self, path):
return []
def get_std_shared_lib_link_args(self):
return []
def get_pic_args(self):
return []
def name_string(self):
return ' '.join(self.exelist)
def get_pch_use_args(self, pch_dir, header):
return []
def get_pch_name(self, header_name):
return ''
def sanity_check(self, work_dir, environment):
src = 'sanity.cs'
obj = 'sanity.exe'
source_name = os.path.join(work_dir, src)
with open(source_name, 'w') as ofile:
ofile.write('''public class Sanity {
static public void Main () {
}
}
''')
pc = subprocess.Popen(self.exelist + [src], cwd=work_dir)
pc.wait()
if pc.returncode != 0:
raise EnvironmentException('Mono compiler %s can not compile programs.' % self.name_string())
cmdlist = [self.monorunner, obj]
pe = subprocess.Popen(cmdlist, cwd=work_dir)
pe.wait()
if pe.returncode != 0:
raise EnvironmentException('Executables created by Mono compiler %s are not runnable.' % self.name_string())
def needs_static_linker(self):
return False
def get_buildtype_args(self, buildtype):
return mono_buildtype_args[buildtype]
class JavaCompiler(Compiler):
def __init__(self, exelist, version):
self.language = 'java'
super().__init__(exelist, version)
self.id = 'unknown'
self.javarunner = 'java'
def get_soname_args(self, shlib_name, path, soversion):
return []
def get_werror_args(self):
return ['-Werror']
def split_shlib_to_parts(self, fname):
return (None, fname)
def build_rpath_args(self, build_dir, rpath_paths, install_rpath):
return []
def get_dependency_gen_args(self, outtarget, outfile):
return []
def get_default_suffix(self):
return self.default_suffix
def get_linker_exelist(self):
return self.exelist[:]
def get_compile_only_args(self):
return []
def get_output_args(self, subdir):
if subdir == '':
subdir = './'
return ['-d', subdir, '-s', subdir]
def get_linker_output_args(self, outputname):
return []
def get_coverage_args(self):
return []
def get_coverage_link_args(self):
return []
def get_std_exe_link_args(self):
return []
def get_include_args(self, path):
return []
def get_std_shared_lib_link_args(self):
return []
def get_pic_args(self):
return []
def name_string(self):
return ' '.join(self.exelist)
def get_pch_use_args(self, pch_dir, header):
return []
def get_pch_name(self, header_name):
return ''
def get_buildtype_args(self, buildtype):
return java_buildtype_args[buildtype]
def sanity_check(self, work_dir, environment):
src = 'SanityCheck.java'
obj = 'SanityCheck'
source_name = os.path.join(work_dir, src)
with open(source_name, 'w') as ofile:
ofile.write('''class SanityCheck {
public static void main(String[] args) {
int i;
}
}
''')
pc = subprocess.Popen(self.exelist + [src], cwd=work_dir)
pc.wait()
if pc.returncode != 0:
raise EnvironmentException('Java compiler %s can not compile programs.' % self.name_string())
cmdlist = [self.javarunner, obj]
pe = subprocess.Popen(cmdlist, cwd=work_dir)
pe.wait()
if pe.returncode != 0:
raise EnvironmentException('Executables created by Java compiler %s are not runnable.' % self.name_string())
def needs_static_linker(self):
return False
class ValaCompiler(Compiler):
def __init__(self, exelist, version):
self.language = 'vala'
super().__init__(exelist, version)
self.version = version
self.id = 'valac'
self.is_cross = False
def name_string(self):
return ' '.join(self.exelist)
def needs_static_linker(self):
return False # Because compiles into C.
def get_werror_args(self):
return ['--fatal-warnings']
def sanity_check(self, work_dir, environment):
src = 'valatest.vala'
source_name = os.path.join(work_dir, src)
with open(source_name, 'w') as ofile:
ofile.write('''class SanityCheck : Object {
}
''')
extra_flags = self.get_cross_extra_flags(environment, compile=True, link=False)
pc = subprocess.Popen(self.exelist + extra_flags + ['-C', '-c', src], cwd=work_dir)
pc.wait()
if pc.returncode != 0:
raise EnvironmentException('Vala compiler %s can not compile programs.' % self.name_string())
def get_buildtype_args(self, buildtype):
if buildtype == 'debug' or buildtype == 'debugoptimized' or buildtype == 'minsize':
return ['--debug']
return []
class RustCompiler(Compiler):
def __init__(self, exelist, version):
self.language = 'rust'
super().__init__(exelist, version)
self.id = 'rustc'
def needs_static_linker(self):
return False
def name_string(self):
return ' '.join(self.exelist)
def sanity_check(self, work_dir, environment):
source_name = os.path.join(work_dir, 'sanity.rs')
output_name = os.path.join(work_dir, 'rusttest')
with open(source_name, 'w') as ofile:
ofile.write('''fn main() {
}
''')
pc = subprocess.Popen(self.exelist + ['-o', output_name, source_name], cwd=work_dir)
pc.wait()
if pc.returncode != 0:
raise EnvironmentException('Rust compiler %s can not compile programs.' % self.name_string())
if subprocess.call(output_name) != 0:
raise EnvironmentException('Executables created by Rust compiler %s are not runnable.' % self.name_string())
def get_dependency_gen_args(self, outfile):
return ['--dep-info', outfile]
def get_buildtype_args(self, buildtype):
return rust_buildtype_args[buildtype]
class SwiftCompiler(Compiler):
def __init__(self, exelist, version):
self.language = 'swift'
super().__init__(exelist, version)
self.version = version
self.id = 'llvm'
self.is_cross = False
def get_linker_exelist(self):
return self.exelist[:]
def name_string(self):
return ' '.join(self.exelist)
def needs_static_linker(self):
return True
def get_werror_args(self):
return ['--fatal-warnings']
def get_dependency_gen_args(self, outtarget, outfile):
return ['-emit-dependencies']
def depfile_for_object(self, objfile):
return os.path.splitext(objfile)[0] + '.' + self.get_depfile_suffix()
def get_depfile_suffix(self):
return 'd'
def get_output_args(self, target):
return ['-o', target]
def get_linker_output_args(self, target):
return ['-o', target]
def get_header_import_args(self, headername):
return ['-import-objc-header', headername]
def get_warn_args(self, level):
return []
def get_buildtype_args(self, buildtype):
return swift_buildtype_args[buildtype]
def get_buildtype_linker_args(self, buildtype):
return []
def get_std_exe_link_args(self):
return ['-emit-executable']
def get_module_args(self, modname):
return ['-module-name', modname]
def get_mod_gen_args(self):
return ['-emit-module']
def build_rpath_args(self, *args):
return [] # FIXME
def get_include_args(self, dirname):
return ['-I' + dirname]
def get_compile_only_args(self):
return ['-c']
def sanity_check(self, work_dir, environment):
src = 'swifttest.swift'
source_name = os.path.join(work_dir, src)
output_name = os.path.join(work_dir, 'swifttest')
with open(source_name, 'w') as ofile:
ofile.write('''1 + 2
''')
extra_flags = self.get_cross_extra_flags(environment, compile=True, link=True)
pc = subprocess.Popen(self.exelist + extra_flags + ['-emit-executable', '-o', output_name, src], cwd=work_dir)
pc.wait()
if pc.returncode != 0:
raise EnvironmentException('Swift compiler %s can not compile programs.' % self.name_string())
if subprocess.call(output_name) != 0:
raise EnvironmentException('Executables created by Swift compiler %s are not runnable.' % self.name_string())
class DCompiler(Compiler):
def __init__(self, exelist, version, is_cross):
self.language = 'd'
super().__init__(exelist, version)
self.id = 'unknown'
self.is_cross = is_cross
def sanity_check(self, work_dir, environment):
source_name = os.path.join(work_dir, 'sanity.d')
output_name = os.path.join(work_dir, 'dtest')
with open(source_name, 'w') as ofile:
ofile.write('''void main() {
}
''')
pc = subprocess.Popen(self.exelist + self.get_output_args(output_name) + [source_name], cwd=work_dir)
pc.wait()
if pc.returncode != 0:
raise EnvironmentException('D compiler %s can not compile programs.' % self.name_string())
if subprocess.call(output_name) != 0:
raise EnvironmentException('Executables created by D compiler %s are not runnable.' % self.name_string())
def needs_static_linker(self):
return True
def name_string(self):
return ' '.join(self.exelist)
def get_linker_exelist(self):
return self.exelist[:]
def depfile_for_object(self, objfile):
return objfile + '.' + self.get_depfile_suffix()
def get_depfile_suffix(self):
return 'dep'
def get_pic_args(self):
return ['-fPIC']
def get_std_shared_lib_link_args(self):
return ['-shared']
def get_soname_args(self, shlib_name, path, soversion):
return []
def get_unittest_args(self):
return ['-unittest']
def get_buildtype_linker_args(self, buildtype):
return []
def get_std_exe_link_args(self):
return []
def build_rpath_args(self, build_dir, rpath_paths, install_rpath):
# This method is to be used by LDC and DMD.
# GDC can deal with the verbatim flags.
if len(rpath_paths) == 0 and len(install_rpath) == 0:
return []
paths = ':'.join([os.path.join(build_dir, p) for p in rpath_paths])
if len(paths) < len(install_rpath):
padding = 'X'*(len(install_rpath) - len(paths))
if len(paths) == 0:
paths = padding
else:
paths = paths + ':' + padding
return ['-L-rpath={}'.format(paths)]
def translate_args_to_nongnu(self, args):
dcargs = []
# Translate common arguments to flags the LDC/DMD compilers
# can understand.
# The flags might have been added by pkg-config files,
# and are therefore out of the user's control.
for arg in args:
if arg == '-pthread':
continue
if arg.startswith('-Wl,'):
linkargs = arg[arg.index(',')+1:].split(',')
for la in linkargs:
dcargs.append('-L' + la.strip())
continue
elif arg.startswith('-l'):
# translate library link flag
dcargs.append('-L' + arg)
continue
dcargs.append(arg)
return dcargs
class GnuDCompiler(DCompiler):
def __init__(self, exelist, version, is_cross):
DCompiler.__init__(self, exelist, version, is_cross)
self.id = 'gcc'
self.warn_args = {'1': ['-Wall', '-Wdeprecated'],
'2': ['-Wall', '-Wextra', '-Wdeprecated'],
'3': ['-Wall', '-Wextra', '-Wdeprecated', '-Wpedantic']}
self.base_options = ['b_colorout', 'b_sanitize']
def get_colorout_args(self, colortype):
if mesonlib.version_compare(self.version, '>=4.9.0'):
return gnu_color_args[colortype][:]
return []
def get_dependency_gen_args(self, outtarget, outfile):
# FIXME: Passing -fmake-deps results in a file-not-found message.
# Investigate why.
return []
def get_output_args(self, target):
return ['-o', target]
def get_compile_only_args(self):
return ['-c']
def get_linker_output_args(self, target):
return ['-o', target]
def get_include_args(self, path, is_system):
return ['-I' + path]
def get_warn_args(self, level):
return self.warn_args[level]
def get_werror_args(self):
return ['-Werror']
def get_linker_search_args(self, dirname):
return ['-L'+dirname]
def get_buildtype_args(self, buildtype):
return d_gdc_buildtype_args[buildtype]
def build_rpath_args(self, build_dir, rpath_paths, install_rpath):
return build_unix_rpath_args(build_dir, rpath_paths, install_rpath)
def get_unittest_args(self):
return ['-funittest']
class LLVMDCompiler(DCompiler):
def __init__(self, exelist, version, is_cross):
DCompiler.__init__(self, exelist, version, is_cross)
self.id = 'llvm'
self.base_options = ['b_coverage', 'b_colorout']
def get_colorout_args(self, colortype):
if colortype == 'always':
return ['-enable-color']
return []
def get_dependency_gen_args(self, outtarget, outfile):
# LDC using the -deps flag returns a non-Makefile dependency-info file, which
# the backends can not use. So we disable this feature for now.
return []
def get_output_args(self, target):
return ['-of', target]
def get_compile_only_args(self):
return ['-c']
def get_linker_output_args(self, target):
return ['-of', target]
def get_include_args(self, path, is_system):
return ['-I' + path]
def get_warn_args(self, level):
if level == '2':
return ['-wi']
else:
return ['-w']
def get_coverage_args(self):
return ['-cov']
def get_buildtype_args(self, buildtype):
return d_ldc_buildtype_args[buildtype]
def get_pic_args(self):
return ['-relocation-model=pic']
def get_linker_search_args(self, dirname):
# -L is recognized as "add this to the search path" by the linker,
# while the compiler recognizes it as "pass to linker". So, the first
# -L is for the compiler, telling it to pass the second -L to the linker.
return ['-L-L'+dirname]
def unix_link_flags_to_native(self, args):
return self.translate_args_to_nongnu(args)
def unix_compile_flags_to_native(self, args):
return self.translate_args_to_nongnu(args)
class DmdDCompiler(DCompiler):
def __init__(self, exelist, version, is_cross):
DCompiler.__init__(self, exelist, version, is_cross)
self.id = 'dmd'
self.base_options = ['b_coverage', 'b_colorout']
def get_colorout_args(self, colortype):
if colortype == 'always':
return ['-color=on']
return []
def get_dependency_gen_args(self, outtarget, outfile):
# LDC using the -deps flag returns a non-Makefile dependency-info file, which
# the backends can not use. So we disable this feature for now.
return []
def get_output_args(self, target):
return ['-of' + target]
def get_werror_args(self):
return ['-w']
def get_compile_only_args(self):
return ['-c']
def get_linker_output_args(self, target):
return ['-of' + target]
def get_include_args(self, path, is_system):
return ['-I' + path]
def get_warn_args(self, level):
return []
def get_coverage_args(self):
return ['-cov']
def get_linker_search_args(self, dirname):
# -L is recognized as "add this to the search path" by the linker,
# while the compiler recognizes it as "pass to linker". So, the first
# -L is for the compiler, telling it to pass the second -L to the linker.
return ['-L-L'+dirname]
def get_buildtype_args(self, buildtype):
return d_dmd_buildtype_args[buildtype]
def get_std_shared_lib_link_args(self):
return ['-shared', '-defaultlib=libphobos2.so']
def unix_link_flags_to_native(self, args):
return self.translate_args_to_nongnu(args)
def unix_compile_flags_to_native(self, args):
return self.translate_args_to_nongnu(args)
class VisualStudioCCompiler(CCompiler):
std_warn_args = ['/W3']
std_opt_args= ['/O2']
def __init__(self, exelist, version, is_cross, exe_wrap):
CCompiler.__init__(self, exelist, version, is_cross, exe_wrap)
self.id = 'msvc'
self.always_args = ['/nologo', '/showIncludes']
self.warn_args = {'1': ['/W2'],
'2': ['/W3'],
'3': ['/W4']}
self.base_options = ['b_pch'] # FIXME add lto, pgo and the like
def get_always_args(self):
return self.always_args
def get_linker_debug_crt_args(self):
"""
Arguments needed to select a debug crt for the linker
Sometimes we need to manually select the CRT (C runtime) to use with
MSVC. One example is when trying to link with static libraries since
MSVC won't auto-select a CRT for us in that case and will error out
asking us to select one.
"""
return ['/MDd']
def get_buildtype_args(self, buildtype):
return msvc_buildtype_args[buildtype]
def get_buildtype_linker_args(self, buildtype):
return msvc_buildtype_linker_args[buildtype]
def get_pch_suffix(self):
return 'pch'
def get_pch_name(self, header):
chopped = os.path.split(header)[-1].split('.')[:-1]
chopped.append(self.get_pch_suffix())
pchname = '.'.join(chopped)
return pchname
def get_pch_use_args(self, pch_dir, header):
base = os.path.split(header)[-1]
pchname = self.get_pch_name(header)
return ['/FI' + base, '/Yu' + base, '/Fp' + os.path.join(pch_dir, pchname)]
def get_compile_only_args(self):
return ['/c']
def get_no_optimization_args(self):
return ['/Od']
def get_output_args(self, target):
if target.endswith('.exe'):
return ['/Fe' + target]
return ['/Fo' + target]
def get_dependency_gen_args(self, outtarget, outfile):
return []
def get_linker_exelist(self):
return ['link'] # FIXME, should have same path as compiler.
def get_linker_always_args(self):
return ['/nologo']
def get_linker_output_args(self, outputname):
return ['/OUT:' + outputname]
def get_linker_search_args(self, dirname):
return ['/LIBPATH:' + dirname]
def get_pic_args(self):
return [] # PIC is handled by the loader on Windows
def get_std_shared_lib_link_args(self):
return ['/DLL']
def gen_vs_module_defs_args(self, defsfile):
if not isinstance(defsfile, str):
raise RuntimeError('Module definitions file should be str')
# With MSVC, DLLs only export symbols that are explicitly exported,
# so if a module defs file is specified, we use that to export symbols
return ['/DEF:' + defsfile]
def gen_pch_args(self, header, source, pchname):
objname = os.path.splitext(pchname)[0] + '.obj'
return (objname, ['/Yc' + header, '/Fp' + pchname, '/Fo' + objname ])
def gen_import_library_args(self, implibname):
"The name of the outputted import library"
return ['/IMPLIB:' + implibname]
def build_rpath_args(self, build_dir, rpath_paths, install_rpath):
return []
# FIXME, no idea what these should be.
def thread_flags(self):
return []
def thread_link_flags(self):
return []
def get_options(self):
return {'c_winlibs' : coredata.UserStringArrayOption('c_winlibs',
'Windows libs to link against.',
msvc_winlibs)
}
def get_option_link_args(self, options):
return options['c_winlibs'].value[:]
def unix_link_flags_to_native(self, args):
result = []
for i in args:
if i.startswith('-L'):
i = '/LIBPATH:' + i[2:]
# Translate GNU-style -lfoo library name to the import library
elif i.startswith('-l'):
name = i[2:]
if name in ('m', 'c'):
# With MSVC, these are provided by the C runtime which is
# linked in by default
continue
else:
i = name + '.lib'
result.append(i)
return result
def unix_compile_flags_to_native(self, args):
result = []
for i in args:
# -mms-bitfields is specific to MinGW-GCC
if i == '-mms-bitfields':
continue
result.append(i)
return result
def get_include_args(self, path, is_system):
if path == '':
path = '.'
# msvc does not have a concept of system header dirs.
return ['-I' + path]
# Visual Studio is special. It ignores arguments it does not
# understand and you can't tell it to error out on those.
# http://stackoverflow.com/questions/15259720/how-can-i-make-the-microsoft-c-compiler-treat-unknown-flags-as-errors-rather-t
def has_argument(self, arg, env):
warning_text = b'9002'
code = 'int i;\n'
(fd, srcname) = tempfile.mkstemp(suffix='.'+self.default_suffix)
os.close(fd)
with open(srcname, 'w') as ofile:
ofile.write(code)
# Read c_args/cpp_args/etc from the cross-info file (if needed)
extra_args = self.get_cross_extra_flags(env, compile=True, link=False)
extra_args += self.get_compile_only_args()
commands = self.exelist + [arg] + extra_args + [srcname]
mlog.debug('Running VS compile:')
mlog.debug('Command line: ', ' '.join(commands))
mlog.debug('Code:\n', code)
p = subprocess.Popen(commands, cwd=os.path.split(srcname)[0], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stde, stdo) = p.communicate()
if p.returncode != 0:
raise MesonException('Compiling test app failed.')
return not(warning_text in stde or warning_text in stdo)
def get_compile_debugfile_args(self, rel_obj):
pdbarr = rel_obj.split('.')[:-1]
pdbarr += ['pdb']
return ['/Fd' + '.'.join(pdbarr)]
def get_link_debugfile_args(self, targetfile):
pdbarr = targetfile.split('.')[:-1]
pdbarr += ['pdb']
return ['/DEBUG', '/PDB:' + '.'.join(pdbarr)]
class VisualStudioCPPCompiler(VisualStudioCCompiler):
def __init__(self, exelist, version, is_cross, exe_wrap):
self.language = 'cpp'
VisualStudioCCompiler.__init__(self, exelist, version, is_cross, exe_wrap)
self.default_suffix = 'cpp'
self.base_options = ['b_pch'] # FIXME add lto, pgo and the like
def get_options(self):
return {'cpp_eh' : coredata.UserComboOption('cpp_eh',
'C++ exception handling type.',
['none', 'a', 's', 'sc'],
'sc'),
'cpp_winlibs' : coredata.UserStringArrayOption('cpp_winlibs',
'Windows libs to link against.',
msvc_winlibs)
}
def get_option_compile_args(self, options):
args = []
std = options['cpp_eh']
if std.value != 'none':
args.append('/EH' + std.value)
return args
def get_option_link_args(self, options):
return options['cpp_winlibs'].value[:]
GCC_STANDARD = 0
GCC_OSX = 1
GCC_MINGW = 2
CLANG_STANDARD = 0
CLANG_OSX = 1
CLANG_WIN = 2
# Possibly clang-cl?
def get_gcc_soname_args(gcc_type, shlib_name, path, soversion):
if soversion is None:
sostr = ''
else:
sostr = '.' + soversion
if gcc_type == GCC_STANDARD or gcc_type == GCC_MINGW:
# Might not be correct for mingw but seems to work.
return ['-Wl,-soname,lib%s.so%s' % (shlib_name, sostr)]
elif gcc_type == GCC_OSX:
return ['-install_name', os.path.join(path, 'lib' + shlib_name + '.dylib')]
else:
raise RuntimeError('Not implemented yet.')
class GnuCompiler:
# Functionality that is common to all GNU family compilers.
def __init__(self, gcc_type, defines):
self.id = 'gcc'
self.gcc_type = gcc_type
self.defines = defines or {}
self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage',
'b_colorout', 'b_ndebug']
if self.gcc_type != GCC_OSX:
self.base_options.append('b_lundef')
self.base_options.append('b_asneeded')
def get_colorout_args(self, colortype):
if mesonlib.version_compare(self.version, '>=4.9.0'):
return gnu_color_args[colortype][:]
return []
def get_warn_args(self, level):
args = super().get_warn_args(level)
if mesonlib.version_compare(self.version, '<4.8.0') and '-Wpedantic' in args:
# -Wpedantic was added in 4.8.0
# https://gcc.gnu.org/gcc-4.8/changes.html
args[args.index('-Wpedantic')] = '-pedantic'
return args
def has_define(self, define):
return define in self.defines
def get_define(self, define):
if define in self.defines:
return defines[define]
def get_pic_args(self):
if self.gcc_type == GCC_MINGW:
return [] # On Window gcc defaults to fpic being always on.
return ['-fPIC']
def get_buildtype_args(self, buildtype):
return gnulike_buildtype_args[buildtype]
def get_buildtype_linker_args(self, buildtype):
return gnulike_buildtype_linker_args[buildtype]
def get_always_args(self):
return ['-pipe']
def get_pch_suffix(self):
return 'gch'
def split_shlib_to_parts(self, fname):
return (os.path.split(fname)[0], fname)
def get_soname_args(self, shlib_name, path, soversion):
return get_gcc_soname_args(self.gcc_type, shlib_name, path, soversion)
class GnuCCompiler(GnuCompiler, CCompiler):
def __init__(self, exelist, version, gcc_type, is_cross, exe_wrapper=None, defines=None):
CCompiler.__init__(self, exelist, version, is_cross, exe_wrapper)
GnuCompiler.__init__(self, gcc_type, defines)
# Gcc can do asm, too.
self.can_compile_suffixes.add('s')
self.warn_args = {'1': ['-Wall', '-Winvalid-pch'],
'2': ['-Wall', '-Wextra', '-Winvalid-pch'],
'3' : ['-Wall', '-Wpedantic', '-Wextra', '-Winvalid-pch']}
def get_options(self):
opts = {'c_std' : coredata.UserComboOption('c_std', 'C language standard to use',
['none', 'c89', 'c99', 'c11', 'gnu89', 'gnu99', 'gnu11'],
'none')}
if self.gcc_type == GCC_MINGW:
opts.update({
'c_winlibs': coredata.UserStringArrayOption('c_winlibs', 'Standard Win libraries to link against',
gnu_winlibs),
})
return opts
def get_option_compile_args(self, options):
args = []
std = options['c_std']
if std.value != 'none':
args.append('-std=' + std.value)
return args
def get_option_link_args(self, options):
if self.gcc_type == GCC_MINGW:
return options['c_winlibs'].value
return []
class GnuCPPCompiler(GnuCompiler, CPPCompiler):
def __init__(self, exelist, version, gcc_type, is_cross, exe_wrap, defines):
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap)
GnuCompiler.__init__(self, gcc_type, defines)
self.warn_args = {'1': ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor'],
'2': ['-Wall', '-Wextra', '-Winvalid-pch', '-Wnon-virtual-dtor'],
'3': ['-Wall', '-Wpedantic', '-Wextra', '-Winvalid-pch', '-Wnon-virtual-dtor']}
def get_options(self):
opts = {'cpp_std' : coredata.UserComboOption('cpp_std', 'C++ language standard to use',
['none', 'c++03', 'c++11', 'c++14', 'c++1z',
'gnu++03', 'gnu++11', 'gnu++14', 'gnu++1z'],
'none'),
'cpp_debugstl': coredata.UserBooleanOption('cpp_debugstl',
'STL debug mode',
False)}
if self.gcc_type == GCC_MINGW:
opts.update({
'cpp_winlibs': coredata.UserStringArrayOption('c_winlibs', 'Standard Win libraries to link against',
gnu_winlibs),
})
return opts
def get_option_compile_args(self, options):
args = []
std = options['cpp_std']
if std.value != 'none':
args.append('-std=' + std.value)
if options['cpp_debugstl'].value:
args.append('-D_GLIBCXX_DEBUG=1')
return args
def get_option_link_args(self, options):
if self.gcc_type == GCC_MINGW:
return options['cpp_winlibs'].value
return []
class GnuObjCCompiler(GnuCompiler,ObjCCompiler):
def __init__(self, exelist, version, is_cross, exe_wrapper=None, defines=None):
ObjCCompiler.__init__(self, exelist, version, is_cross, exe_wrapper)
# Not really correct, but GNU objc is only used on non-OSX non-win. File a bug
# if this breaks your use case.
GnuCompiler.__init__(self, GCC_STANDARD, defines)
self.warn_args = {'1': ['-Wall', '-Winvalid-pch'],
'2': ['-Wall', '-Wextra', '-Winvalid-pch'],
'3' : ['-Wall', '-Wpedantic', '-Wextra', '-Winvalid-pch']}
class GnuObjCPPCompiler(GnuCompiler, ObjCPPCompiler):
def __init__(self, exelist, version, is_cross, exe_wrapper=None, defines=None):
ObjCPPCompiler.__init__(self, exelist, version, is_cross, exe_wrapper)
# Not really correct, but GNU objc is only used on non-OSX non-win. File a bug
# if this breaks your use case.
GnuCompiler.__init__(self, GCC_STANDARD, defines)
self.warn_args = {'1': ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor'],
'2': ['-Wall', '-Wextra', '-Winvalid-pch', '-Wnon-virtual-dtor'],
'3' : ['-Wall', '-Wpedantic', '-Wextra', '-Winvalid-pch', '-Wnon-virtual-dtor']}
class ClangCompiler():
def __init__(self, clang_type):
self.id = 'clang'
self.clang_type = clang_type
self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage',
'b_ndebug']
if self.clang_type != CLANG_OSX:
self.base_options.append('b_lundef')
self.base_options.append('b_asneeded')
def get_buildtype_args(self, buildtype):
return gnulike_buildtype_args[buildtype]
def get_buildtype_linker_args(self, buildtype):
return gnulike_buildtype_linker_args[buildtype]
def get_pch_suffix(self):
return 'pch'
def get_pch_use_args(self, pch_dir, header):
# Workaround for Clang bug http://llvm.org/bugs/show_bug.cgi?id=15136
# This flag is internal to Clang (or at least not documented on the man page)
# so it might change semantics at any time.
return ['-include-pch', os.path.join (pch_dir, self.get_pch_name (header))]
class ClangCCompiler(ClangCompiler, CCompiler):
def __init__(self, exelist, version, clang_type, is_cross, exe_wrapper=None):
CCompiler.__init__(self, exelist, version, is_cross, exe_wrapper)
ClangCompiler.__init__(self, clang_type)
# Clang can do asm, too.
self.can_compile_suffixes.add('s')
self.warn_args = {'1': ['-Wall', '-Winvalid-pch'],
'2': ['-Wall', '-Wextra', '-Winvalid-pch'],
'3' : ['-Wall', '-Wpedantic', '-Wextra', '-Winvalid-pch']}
def get_options(self):
return {'c_std' : coredata.UserComboOption('c_std', 'C language standard to use',
['none', 'c89', 'c99', 'c11'],
'none')}
def get_option_compile_args(self, options):
args = []
std = options['c_std']
if std.value != 'none':
args.append('-std=' + std.value)
return args
def get_option_link_args(self, options):
return []
def has_argument(self, arg, env):
return super().has_argument(['-Werror=unknown-warning-option', arg], env)
class ClangCPPCompiler(ClangCompiler, CPPCompiler):
def __init__(self, exelist, version, cltype, is_cross, exe_wrapper=None):
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrapper)
ClangCompiler.__init__(self, cltype)
self.warn_args = {'1': ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor'],
'2': ['-Wall', '-Wextra', '-Winvalid-pch', '-Wnon-virtual-dtor'],
'3': ['-Wall', '-Wpedantic', '-Wextra', '-Winvalid-pch', '-Wnon-virtual-dtor']}
def get_options(self):
return {'cpp_std' : coredata.UserComboOption('cpp_std', 'C++ language standard to use',
['none', 'c++03', 'c++11', 'c++14', 'c++1z'],
'none')}
def get_option_compile_args(self, options):
args = []
std = options['cpp_std']
if std.value != 'none':
args.append('-std=' + std.value)
return args
def get_option_link_args(self, options):
return []
def has_argument(self, arg, env):
return super().has_argument(['-Werror=unknown-warning-option', arg], env)
class ClangObjCCompiler(GnuObjCCompiler):
def __init__(self, exelist, version, cltype, is_cross, exe_wrapper=None):
super().__init__(exelist, version, is_cross, exe_wrapper)
self.id = 'clang'
self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage']
self.clang_type = cltype
if self.clang_type != CLANG_OSX:
self.base_options.append('b_lundef')
self.base_options.append('b_asneeded')
class ClangObjCPPCompiler(GnuObjCPPCompiler):
def __init__(self, exelist, version, cltype, is_cross, exe_wrapper=None):
super().__init__(exelist, version, is_cross, exe_wrapper)
self.id = 'clang'
self.clang_type = cltype
self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage']
if self.clang_type != CLANG_OSX:
self.base_options.append('b_lundef')
self.base_options.append('b_asneeded')
class FortranCompiler(Compiler):
def __init__(self, exelist, version, is_cross, exe_wrapper=None):
self.language = 'fortran'
super().__init__(exelist, version)
self.is_cross = is_cross
self.exe_wrapper = exe_wrapper
# Not really correct but I don't have Fortran compilers to test with. Sorry.
self.gcc_type = GCC_STANDARD
self.id = "IMPLEMENTATION CLASSES MUST SET THIS"
def name_string(self):
return ' '.join(self.exelist)
def get_pic_args(self):
if self.gcc_type == GCC_MINGW:
return [] # On Windows gcc defaults to fpic being always on.
return ['-fPIC']
def get_std_shared_lib_link_args(self):
return ['-shared']
def needs_static_linker(self):
return True
def sanity_check(self, work_dir, environment):
source_name = os.path.join(work_dir, 'sanitycheckf.f90')
binary_name = os.path.join(work_dir, 'sanitycheckf')
with open(source_name, 'w') as ofile:
ofile.write('''program prog
print *, "Fortran compilation is working."
end program prog
''')
extra_flags = self.get_cross_extra_flags(environment, compile=True, link=True)
pc = subprocess.Popen(self.exelist + extra_flags + [source_name, '-o', binary_name])
pc.wait()
if pc.returncode != 0:
raise EnvironmentException('Compiler %s can not compile programs.' % self.name_string())
if self.is_cross:
if self.exe_wrapper is None:
# Can't check if the binaries run so we have to assume they do
return
cmdlist = self.exe_wrapper + [binary_name]
else:
cmdlist = [binary_name]
pe = subprocess.Popen(cmdlist, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
pe.wait()
if pe.returncode != 0:
raise EnvironmentException('Executables created by Fortran compiler %s are not runnable.' % self.name_string())
def get_std_warn_args(self, level):
return FortranCompiler.std_warn_args
def get_buildtype_args(self, buildtype):
return gnulike_buildtype_args[buildtype]
def get_buildtype_linker_args(self, buildtype):
return gnulike_buildtype_linker_args[buildtype]
def split_shlib_to_parts(self, fname):
return (os.path.split(fname)[0], fname)
def get_soname_args(self, shlib_name, path, soversion):
return get_gcc_soname_args(self.gcc_type, shlib_name, path, soversion)
def get_dependency_gen_args(self, outtarget, outfile):
# Disabled until this is fixed:
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=62162
#return ['-cpp', '-MMD', '-MQ', outtarget]
return []
def get_output_args(self, target):
return ['-o', target]
def get_compile_only_args(self):
return ['-c']
def get_linker_exelist(self):
return self.exelist[:]
def get_linker_output_args(self, outputname):
return ['-o', outputname]
def get_include_args(self, path, is_system):
return ['-I' + path]
def get_module_outdir_args(self, path):
return ['-J' + path]
def depfile_for_object(self, objfile):
return objfile + '.' + self.get_depfile_suffix()
def get_depfile_suffix(self):
return 'd'
def get_std_exe_link_args(self):
return []
def build_rpath_args(self, build_dir, rpath_paths, install_rpath):
return build_unix_rpath_args(build_dir, rpath_paths, install_rpath)
def module_name_to_filename(self, module_name):
return module_name.lower() + '.mod'
def get_warn_args(self, level):
return ['-Wall']
class GnuFortranCompiler(FortranCompiler):
def __init__(self, exelist, version, gcc_type, is_cross, exe_wrapper=None, defines=None):
super().__init__(exelist, version, is_cross, exe_wrapper=None)
self.gcc_type = gcc_type
self.defines = defines or {}
self.id = 'gcc'
def has_define(self, define):
return define in self.defines
def get_define(self, define):
if define in self.defines:
return defines[define]
def get_always_args(self):
return ['-pipe']
def gen_import_library_args(self, implibname):
"""
The name of the outputted import library
Used only on Windows
"""
return ['-Wl,--out-implib=' + implibname]
class G95FortranCompiler(FortranCompiler):
def __init__(self, exelist, version, is_cross, exe_wrapper=None):
super().__init__(exelist, version, is_cross, exe_wrapper=None)
self.id = 'g95'
def get_module_outdir_args(self, path):
return ['-fmod='+path]
def get_always_args(self):
return ['-pipe']
def gen_import_library_args(self, implibname):
"""
The name of the outputted import library
Used only on Windows
"""
return ['-Wl,--out-implib=' + implibname]
class SunFortranCompiler(FortranCompiler):
def __init__(self, exelist, version, is_cross, exe_wrapper=None):
super().__init__(exelist, version, is_cross, exe_wrapper=None)
self.id = 'sun'
def get_dependency_gen_args(self, outtarget, outfile):
return ['-fpp']
def get_always_args(self):
return []
def get_warn_args(self):
return []
def get_module_outdir_args(self, path):
return ['-moddir='+path]
class IntelFortranCompiler(FortranCompiler):
std_warn_args = ['-warn', 'all']
def __init__(self, exelist, version, is_cross, exe_wrapper=None):
self.file_suffixes = ('f', 'f90')
super().__init__(exelist, version, is_cross, exe_wrapper=None)
self.id = 'intel'
def get_module_outdir_args(self, path):
return ['-module', path]
def get_warn_args(self, level):
return IntelFortranCompiler.std_warn_args
class PathScaleFortranCompiler(FortranCompiler):
std_warn_args = ['-fullwarn']
def __init__(self, exelist, version, is_cross, exe_wrapper=None):
super().__init__(exelist, version, is_cross, exe_wrapper=None)
self.id = 'pathscale'
def get_module_outdir_args(self, path):
return ['-module', path]
def get_std_warn_args(self, level):
return PathScaleFortranCompiler.std_warn_args
class PGIFortranCompiler(FortranCompiler):
std_warn_args = ['-Minform=inform']
def __init__(self, exelist, version, is_cross, exe_wrapper=None):
super().__init__(exelist, version, is_cross, exe_wrapper=None)
self.id = 'pgi'
def get_module_outdir_args(self, path):
return ['-module', path]
def get_warn_args(self, level):
return PGIFortranCompiler.std_warn_args
class Open64FortranCompiler(FortranCompiler):
std_warn_args = ['-fullwarn']
def __init__(self, exelist, version, is_cross, exe_wrapper=None):
super().__init__(exelist, version, is_cross, exe_wrapper=None)
self.id = 'open64'
def get_module_outdir_args(self, path):
return ['-module', path]
def get_warn_args(self, level):
return Open64FortranCompiler.std_warn_args
class NAGFortranCompiler(FortranCompiler):
std_warn_args = []
def __init__(self, exelist, version, is_cross, exe_wrapper=None):
super().__init__(exelist, version, is_cross, exe_wrapper=None)
self.id = 'nagfor'
def get_module_outdir_args(self, path):
return ['-mdir', path]
def get_always_args(self):
return []
def get_warn_args(self, level):
return NAGFortranCompiler.std_warn_args
class VisualStudioLinker():
always_args = ['/NOLOGO']
def __init__(self, exelist):
self.exelist = exelist
def get_exelist(self):
return self.exelist[:]
def get_std_link_args(self):
return []
def get_buildtype_linker_args(self, buildtype):
return []
def get_output_args(self, target):
return ['/OUT:' + target]
def get_coverage_link_args(self):
return []
def get_always_args(self):
return VisualStudioLinker.always_args
def get_linker_always_args(self):
return VisualStudioLinker.always_args
def build_rpath_args(self, build_dir, rpath_paths, install_rpath):
return []
def thread_link_flags(self):
return []
def get_option_link_args(self, options):
return []
def unix_link_flags_to_native(self, args):
return args[:]
def unix_compile_flags_to_native(self, args):
return args[:]
def get_link_debugfile_args(self, targetfile):
pdbarr = targetfile.split('.')[:-1]
pdbarr += ['pdb']
return ['/DEBUG', '/PDB:' + '.'.join(pdbarr)]
class ArLinker():
def __init__(self, exelist):
self.exelist = exelist
self.id = 'ar'
pc = subprocess.Popen(self.exelist + ['-h'], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
(stdo, _) = pc.communicate()
# Enable deterministic builds if they are available.
if b'[D]' in stdo:
self.std_args = ['csrD']
else:
self.std_args = ['csr']
def build_rpath_args(self, build_dir, rpath_paths, install_rpath):
return []
def get_exelist(self):
return self.exelist[:]
def get_std_link_args(self):
return self.std_args
def get_output_args(self, target):
return [target]
def get_buildtype_linker_args(self, buildtype):
return []
def get_linker_always_args(self):
return []
def get_coverage_link_args(self):
return []
def get_always_args(self):
return []
def thread_link_flags(self):
return []
def get_option_link_args(self, options):
return []
def unix_link_flags_to_native(self, args):
return args[:]
def unix_compile_flags_to_native(self, args):
return args[:]
def get_link_debugfile_args(self, targetfile):
return []
|
from django.conf.urls import url, include
from django.contrib import admin
from django.http import HttpResponseNotFound, HttpResponseServerError
from test_app import views
handler404 = lambda r: HttpResponseNotFound()
handler500 = lambda r: HttpResponseServerError()
admin.autodiscover()
urlpatterns = [
url(r'^flag_in_view', views.flag_in_view, name='flag_in_view'),
url(r'^switch-on', views.switched_view),
url(r'^switch-off', views.switched_off_view),
url(r'^flag-on', views.flagged_view),
url(r'^foo_view', views.foo_view, name='foo_view'),
url(r'^foo_view_with_args/(?P<some_number>\d+)/', views.foo_view_with_args, name='foo_view_with_args'),
url(r'^switched_view_with_valid_redirect',
views.switched_view_with_valid_redirect),
url(r'^switched_view_with_valid_url_name',
views.switched_view_with_valid_url_name),
url(r'^switched_view_with_args_with_valid_redirect/(?P<some_number>\d+)/',
views.switched_view_with_args_with_valid_redirect),
url(r'^switched_view_with_args_with_valid_url_name/(?P<some_number>\d+)/',
views.switched_view_with_args_with_valid_url_name),
url(r'^switched_view_with_invalid_redirect',
views.switched_view_with_invalid_redirect),
url(r'^flagged_view_with_valid_redirect',
views.flagged_view_with_valid_redirect),
url(r'^flagged_view_with_valid_url_name',
views.flagged_view_with_valid_url_name),
url(r'^flagged_view_with_args_with_valid_redirect/(?P<some_number>\d+)/',
views.flagged_view_with_args_with_valid_redirect),
url(r'^flagged_view_with_args_with_valid_url_name/(?P<some_number>\d+)/',
views.flagged_view_with_args_with_valid_url_name),
url(r'^flagged_view_with_invalid_redirect',
views.flagged_view_with_invalid_redirect),
url(r'^flag-off', views.flagged_off_view),
url(r'^', include('waffle.urls')),
url(r'^admin/', include(admin.site.urls)),
]
|
"""
Python 2/3 compatibility.
"""
#noinspection PyUnresolvedReferences
from requests.compat import (
is_windows,
bytes,
str,
is_py3,
is_py26,
)
try:
from urllib.parse import urlsplit
except ImportError:
from urlparse import urlsplit
|
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 - 2017 Björn Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of python-quilt for details.
import os
import os.path
from quilt.utils import Process, DirectoryParam, _EqBase, File, FileParam, \
SubprocessError
class Patch(_EqBase):
""" Wrapper around the patch util """
def __init__(self, patch_name, strip=1, reverse=False):
self.patch_name = patch_name
self.strip = strip
self.reverse = reverse
@DirectoryParam(["patch_dir", "work_dir"])
def run(self, cwd, patch_dir=None, backup=False, prefix=None,
reverse=False, work_dir=None, force=False, dry_run=False,
no_backup_if_mismatch=False, remove_empty_files=False,
quiet=False, suppress_output=False):
cmd = ["patch"]
cmd.append("-p" + str(self.strip))
if backup:
cmd.append("--backup")
if prefix:
cmd.append("--prefix")
if not prefix[-1] == os.sep:
prefix += os.sep
cmd.append(prefix)
reverse = reverse != self.reverse
if reverse:
cmd.append("-R")
if work_dir:
cmd.append("-d")
cmd.append(work_dir.get_name())
if no_backup_if_mismatch:
cmd.append("--no-backup-if-mismatch")
if remove_empty_files:
cmd.append("--remove-empty-files")
if force:
cmd.append("-f")
cmd.append("-i")
if patch_dir:
dir = patch_dir + self.get_name()
name = dir.get_name()
else:
name = self.get_name()
cmd.append(name)
if quiet:
cmd.append("-s")
if dry_run:
cmd.append("--dry-run")
Process(cmd).run(cwd=cwd, suppress_output=suppress_output)
def get_name(self):
return self.patch_name
@DirectoryParam(["patch_dir"])
def get_header(self, patch_dir=None):
""" Returns bytes """
lines = []
if patch_dir:
file = patch_dir + File(self.get_name())
name = file.get_name()
else:
name = self.get_name()
with open(name, "rb") as f:
for line in f:
if line.startswith(b"---") or line.startswith(b"Index:"):
break
lines.append(line)
return b"".join(lines)
def __eq__(self, other):
return (isinstance(other, Patch) and self.get_name() ==
other.get_name())
def __hash__(self):
return hash(self.get_name())
def __str__(self):
return self.get_name()
def __repr__(self):
return "<Patch(%r, %r, %r) id=0x%0x>" % (self.get_name(), self.strip,
self.reverse, id(self))
class RollbackPatch(object):
@DirectoryParam(["cwd", "backup_dir"])
def __init__(self, cwd, backup_dir):
self.cwd = cwd
self.backup_dir = backup_dir
def rollback(self, keep=False):
(dirs, files) = self.backup_dir.content()
for dir in dirs:
newdir = self.cwd + dir
if not newdir.exists():
newdir.create()
for file in files:
file = File(file)
backup_file = self.backup_dir + file
rollback_file = self.cwd + file
if not keep:
rollback_file.delete_if_exists()
if not backup_file.is_empty():
backup_file.copy(rollback_file)
def delete_backup(self):
self.backup_dir.delete()
class Diff(object):
""" Wrapper around the diff util
"""
@FileParam(["left", "right"])
def __init__(self, left, right):
""" left points to the first file and right to the second file
"""
self.left = left
if not self.left.exists():
self.left = File("/dev/null")
self.right = right
if not self.right.exists():
self.right = File("/dev/null")
def run(self, cwd, left_label=None, right_label=None, unified=True,
fd=None):
cmd = ["diff"]
if unified:
cmd.append("-u")
if left_label:
cmd.append("--label")
cmd.append(left_label)
if right_label:
if not left_label:
cmd.append("--label")
cmd.append(self.right.get_name())
cmd.append("--label")
cmd.append(right_label)
cmd.append(self.left.get_name())
cmd.append(self.right.get_name())
try:
Process(cmd).run(cwd=cwd, stdout=fd)
except SubprocessError as e:
if e.get_returncode() > 1:
raise e
def equal(self, cwd):
""" Returns True if left and right are equal
"""
cmd = ["diff"]
cmd.append("-q")
cmd.append(self.left.get_name())
cmd.append(self.right.get_name())
try:
Process(cmd).run(cwd=cwd, suppress_output=True)
except SubprocessError as e:
if e.get_returncode() == 1:
return False
else:
raise e
return True
|
# Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2015-2017 The Agon developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "agon.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
agond and agon-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run agond:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "agond"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "agon-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in agon.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a agond and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "agond"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "agon-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple agonds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.layout_tests.models.test_failures import *
class TestFailuresTest(unittest.TestCase):
def assert_loads(self, cls):
failure_obj = cls()
s = failure_obj.dumps()
new_failure_obj = TestFailure.loads(s)
self.assertIsInstance(new_failure_obj, cls)
self.assertEqual(failure_obj, new_failure_obj)
# Also test that != is implemented.
self.assertFalse(failure_obj != new_failure_obj)
def test_unknown_failure_type(self):
class UnknownFailure(TestFailure):
def message(self):
return ''
failure_obj = UnknownFailure()
self.assertRaises(ValueError, determine_result_type, [failure_obj])
def test_message_is_virtual(self):
failure_obj = TestFailure()
self.assertRaises(NotImplementedError, failure_obj.message)
def test_loads(self):
for c in ALL_FAILURE_CLASSES:
self.assert_loads(c)
def test_equals(self):
self.assertEqual(FailureCrash(), FailureCrash())
self.assertNotEqual(FailureCrash(), FailureTimeout())
crash_set = set([FailureCrash(), FailureCrash()])
self.assertEqual(len(crash_set), 1)
# The hash happens to be the name of the class, but sets still work:
crash_set = set([FailureCrash(), "FailureCrash"])
self.assertEqual(len(crash_set), 2)
def test_crashes(self):
self.assertEqual(FailureCrash().message(), 'content_shell crashed')
self.assertEqual(FailureCrash(process_name='foo', pid=1234).message(), 'foo crashed [pid=1234]')
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._assets import (
Asset,
Equity,
Future,
make_asset_array,
)
from .assets import (
AssetFinder,
AssetConvertible,
ContinuousFuture,
PricingDataAssociable,
)
from .asset_db_schema import ASSET_DB_VERSION
from .asset_writer import AssetDBWriter
from .exchange_info import ExchangeInfo
__all__ = [
'ASSET_DB_VERSION',
'Asset',
'AssetDBWriter',
'ContinuousFuture',
'Equity',
'Future',
'AssetFinder',
'AssetConvertible',
'ExchangeInfo',
'PricingDataAssociable',
'make_asset_array',
]
|
#Mostly Notes for now
#Start of main program loop
#Run every 0.25 seconds
#Get ORP (Odometry Robot Position) as (float,float) tuple
#Get ORH (Odometry Robot Heading) as 0-360 range, float
#Function - Check if ORP and ORH make sense
#This is probably quite complicated , so do be mindful
#Function - Take photo with both cameras
#Likely activate GPIO pin, and prep for inputs over HDMI or USB
#Input into file
#Figure out how to do this!
#Function - Load each photo by accessing previous file into image_left and image_right
#Need to make sure generated file and requested file have same name, but if
#...it is possible to generate new filenames live, than this should be simple
#Function - Use ROI to select only the 180 degrees that the target could be in according to the ORH
#Somewhat annoying mathematics, but relatively easy
#Denoise image_left & image_right
#Learn how to do this effectively
#Contourize image, but only contours that are similar in size to the contours in the list
#Find corners in image
#Match each corner with the closest virtual corner on the sent-over list, as long as it is within certain distance
#Compare each corner in each image, and apply localization algorithms
#Output Localization Results
#Test results for feasibility against Odometry
#Give results to other services
#End main program loop
#On a second core?
#Loop
#Check ORP and ORH for feasibility
#Plug ORP and ORH into game engine, teleport camera to that position
#Contourize image
#Find length of relevant target contours
#Find pixel locations of corners
#Place length and ID and pixel locations into a list
#Send that list over to Core One
#End Loop
|
""" Unit tests for pipelines
"""
import logging
import sys
import unittest
import numpy
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.wcs.utils import pixel_to_skycoord
from rascil.data_models.polarisation import PolarisationFrame
from rascil.processing_components.calibration.operations import qa_gaintable, create_gaintable_from_blockvisibility, apply_gaintable
from rascil.processing_components.image.operations import export_image_to_fits
from rascil.processing_components.image.operations import copy_image, create_empty_image_like
from rascil.processing_components.imaging import dft_skycomponent_visibility, \
create_image_from_visibility
from rascil.processing_components.skycomponent.operations import create_skycomponent, insert_skycomponent
from rascil.processing_components.simulation import simulate_gaintable
from rascil.processing_components.simulation import create_named_configuration
from rascil.processing_components.visibility.base import create_blockvisibility, create_visibility
from rascil.processing_components.calibration.rcal import rcal
log = logging.getLogger('logger')
log.setLevel(logging.WARNING)
log.addHandler(logging.StreamHandler(sys.stdout))
log.addHandler(logging.StreamHandler(sys.stderr))
class TestPipelinesFunctions(unittest.TestCase):
def setUp(self):
from rascil.data_models.parameters import rascil_path, rascil_data_path
self.dir = rascil_path('test_results')
self.setupVis(add_errors=False, block=True)
def setupVis(self, add_errors=False, block=True, freqwin=7, bandpass=False):
self.npixel = 256
self.freqwin = freqwin
self.ntimes = 5
self.times = numpy.linspace(-3.0, +3.0, self.ntimes) * numpy.pi / 12.0
self.frequency = numpy.linspace(0.8e8, 1.2e8, self.freqwin)
if freqwin > 1:
self.channel_bandwidth = numpy.array(freqwin * [self.frequency[1] - self.frequency[0]])
else:
self.channel_bandwidth = numpy.array([4e7])
self.vis = self.ingest_visibility(self.frequency, chan_width=self.channel_bandwidth,
times=self.times, add_errors=add_errors, block=block,
bandpass=bandpass)
def ingest_visibility(self, freq=None, chan_width=None, times=None, add_errors=False,
block=True, bandpass=False):
if freq is None:
freq = [1e8]
if chan_width is None:
chan_width = [1e6]
if times is None:
times = (numpy.pi / 12.0) * numpy.linspace(-3.0, 3.0, 5)
lowcore = create_named_configuration('LOWBD2', rmax=750.0)
frequency = numpy.array(freq)
channel_bandwidth = numpy.array(chan_width)
phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
if block:
vt = create_blockvisibility(lowcore, times, frequency, channel_bandwidth=channel_bandwidth,
weight=1.0, phasecentre=phasecentre,
polarisation_frame=PolarisationFrame("stokesI"))
else:
vt = create_visibility(lowcore, times, frequency, channel_bandwidth=channel_bandwidth,
weight=1.0, phasecentre=phasecentre,
polarisation_frame=PolarisationFrame("stokesI"))
cellsize = 0.001
model = create_image_from_visibility(vt, npixel=self.npixel, cellsize=cellsize, npol=1,
frequency=frequency, phasecentre=phasecentre,
polarisation_frame=PolarisationFrame("stokesI"))
nchan = len(self.frequency)
flux = numpy.array(nchan * [[100.0]])
facets = 4
rpix = model.wcs.wcs.crpix - 1.0
spacing_pixels = self.npixel // facets
centers = [-1.5, -0.5, 0.5, 1.5]
comps = list()
for iy in centers:
for ix in centers:
p = int(round(rpix[0] + ix * spacing_pixels * numpy.sign(model.wcs.wcs.cdelt[0]))), \
int(round(rpix[1] + iy * spacing_pixels * numpy.sign(model.wcs.wcs.cdelt[1])))
sc = pixel_to_skycoord(p[0], p[1], model.wcs, origin=1)
comp = create_skycomponent(direction=sc, flux=flux, frequency=frequency,
polarisation_frame=PolarisationFrame("stokesI"))
comps.append(comp)
if block:
dft_skycomponent_visibility(vt, comps)
else:
dft_skycomponent_visibility(vt, comps)
insert_skycomponent(model, comps)
self.comps = comps
self.model = copy_image(model)
self.empty_model = create_empty_image_like(model)
export_image_to_fits(model, '%s/test_pipeline_functions_model.fits' % (self.dir))
if add_errors:
# These will be the same for all calls
numpy.random.seed(180555)
gt = create_gaintable_from_blockvisibility(vt)
gt = simulate_gaintable(gt, phase_error=1.0, amplitude_error=0.0)
vt = apply_gaintable(vt, gt)
if bandpass:
bgt = create_gaintable_from_blockvisibility(vt, timeslice=1e5)
bgt = simulate_gaintable(bgt, phase_error=0.01, amplitude_error=0.01, smooth_channels=4)
vt = apply_gaintable(vt, bgt)
return vt
def test_time_setup(self):
pass
def test_RCAL(self):
self.setupVis(add_errors=True, block=True, freqwin=5)
for igt, gt in enumerate(rcal(vis=self.vis, components=self.comps)):
assert numpy.max(gt.residual) < 4e-5
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from celery.datastructures import LRUCache
from celery.exceptions import ImproperlyConfigured
from celery.utils import cached_property
from .base import KeyValueStoreBackend
_imp = [None]
def import_best_memcache():
if _imp[0] is None:
is_pylibmc = False
try:
import pylibmc as memcache
is_pylibmc = True
except ImportError:
try:
import memcache # noqa
except ImportError:
raise ImproperlyConfigured(
"Memcached backend requires either the 'pylibmc' "
"or 'memcache' library")
_imp[0] = (is_pylibmc, memcache)
return _imp[0]
def get_best_memcache(*args, **kwargs):
behaviors = kwargs.pop("behaviors", None)
is_pylibmc, memcache = import_best_memcache()
client = memcache.Client(*args, **kwargs)
if is_pylibmc and behaviors is not None:
client.behaviors = behaviors
return client
class DummyClient(object):
def __init__(self, *args, **kwargs):
self.cache = LRUCache(limit=5000)
def get(self, key, *args, **kwargs):
return self.cache.get(key)
def get_multi(self, keys):
cache = self.cache
return dict((k, cache[k]) for k in keys if k in cache)
def set(self, key, value, *args, **kwargs):
self.cache[key] = value
def delete(self, key, *args, **kwargs):
self.cache.pop(key, None)
def incr(self, key, delta=1):
return self.cache.incr(key, delta)
backends = {"memcache": lambda: get_best_memcache,
"memcached": lambda: get_best_memcache,
"pylibmc": lambda: get_best_memcache,
"memory": lambda: DummyClient}
class CacheBackend(KeyValueStoreBackend):
servers = None
supports_native_join = True
implements_incr = True
def __init__(self, expires=None, backend=None, options={}, **kwargs):
super(CacheBackend, self).__init__(self, **kwargs)
self.options = dict(self.app.conf.CELERY_CACHE_BACKEND_OPTIONS,
**options)
self.backend = backend or self.app.conf.CELERY_CACHE_BACKEND
if self.backend:
self.backend, _, servers = self.backend.partition("://")
self.servers = servers.rstrip('/').split(";")
self.expires = self.prepare_expires(expires, type=int)
try:
self.Client = backends[self.backend]()
except KeyError:
raise ImproperlyConfigured(
"Unknown cache backend: %s. Please use one of the "
"following backends: %s" % (self.backend,
", ".join(backends.keys())))
def get(self, key):
return self.client.get(key)
def mget(self, keys):
return self.client.get_multi(keys)
def set(self, key, value):
return self.client.set(key, value, self.expires)
def delete(self, key):
return self.client.delete(key)
def on_chord_apply(self, setid, body, result=None, **kwargs):
self.client.set(self.get_key_for_chord(setid), '0', time=86400)
def incr(self, key):
return self.client.incr(key)
@cached_property
def client(self):
return self.Client(self.servers, **self.options)
def __reduce__(self, args=(), kwargs={}):
servers = ";".join(self.servers)
backend = "%s://%s/" % (self.backend, servers)
kwargs.update(
dict(backend=backend,
expires=self.expires,
options=self.options))
return super(CacheBackend, self).__reduce__(args, kwargs)
|
# Copyright 2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Hafnian Python interface
"""
from functools import lru_cache
from collections import Counter
from itertools import chain, combinations
import numba
import numpy as np
@numba.jit(nopython=True, cache=True)
def nb_binom(n, k): # pragma: no cover
"""Numba version of binomial coefficient function.
Args:
n (int): how many options
k (int): how many are chosen
Returns:
int: how many ways of choosing
"""
if k < 0 or k > n:
return 0
if k in (0, n):
return 1
binom = 1
for i in range(min(k, n - k)):
binom *= n - i
binom //= i + 1
return binom
@numba.jit(nopython=True, cache=True)
def precompute_binoms(max_binom): # pragma: no cover
"""Precompute binomial coefficients, return as a 2d array.
Args:
max_binom (int): max value of n in the binomial
Returns:
array: ``max_binom + 1 * max_binom + 1`` array of binomial coefficients
"""
binoms = np.zeros((max_binom + 1, max_binom + 1), dtype=type(max_binom))
for i in range(max_binom + 1):
for j in range(max_binom + 1):
binoms[i, j] = nb_binom(i, j)
return binoms
@numba.jit(nopython=True, cache=True)
def nb_ix(arr, rows, cols): # pragma: no cover
"""Numba implementation of ``np.ix_``.
Args:
arr (2d array): matrix to take submatrix of
rows (array): rows to be selected in submatrix
cols (array): columns to be selected in submatrix
Return:
array: selected submatrix of ``arr`` with dimension ``len(rows) * len(cols)``
"""
return arr[rows][:, cols]
def matched_reps(reps): # pylint: disable = too-many-branches
"""Takes the repeated rows and find a way to pair them up to create a perfect
matching with many repeated edges.
Args:
reps (list): list of repeated rows/cols
Returns:
tuple[array, array, int]: tuple with vertex pairs (length ``2N`` for ``N`` edges; index
``i`` is matched with ``i + N``), length ``N`` array for how many times each edge is
repeated, and index of odd mode (``None`` if even number of vertices)
"""
n = len(reps)
if sum(reps) == 0:
return np.array([], dtype=int), np.array([], dtype=int), None
# need to pair off the indices with high numbers of repetitions...
x = range(n) # the starting set of indices
edgesA = [] # contains part A of each pair
edgesB = [] # part B of each pair
edgereps = [] # number of repetitions of a pair
reps, x = zip(
*sorted(zip(reps, x), reverse=True)
) # sort according to reps, in descending order
reps = list(reps)
x = list(x)
# remove zeros
nonzero_reps = []
nonzero_x = []
for i, r in zip(x, reps):
if r > 0:
nonzero_reps.append(r)
nonzero_x.append(i)
reps = nonzero_reps
x = nonzero_x
while len(reps) > 1 or (len(reps) == 1 and reps[0] > 1):
reps, x = zip(*sorted(zip(reps, x), reverse=True)) # sort
reps = list(reps)
x = list(x)
if len(reps) == 1 or reps[0] > reps[1] * 2:
# if largest number of reps is more than double the 2nd largest, pair it with itself
edgesA += [x[0]]
edgesB += [x[0]]
edgereps += [reps[0] // 2]
if reps[0] % 2 == 0:
x = x[1:]
reps = reps[1:]
else:
reps[0] = 1
else:
# otherwise, form pairs between largest reps and 2nd largest reps
edgesA += [x[0]]
edgesB += [x[1]]
edgereps += [reps[1]]
if reps[0] > reps[1]:
if len(x) > 2:
x = [x[0]] + x[2:]
reps = [reps[0] - reps[1]] + reps[2:]
else:
x = [x[0]]
reps = [reps[0] - reps[1]]
else:
x = x[2:]
reps = reps[2:]
if len(x) == 1:
oddmode = x[0] # if there is an unpaired mode, store it
else:
oddmode = None
# the adjacency matrix of red edges connects 1 to N/2+1, 2 to N/2+2, etc.
# Reorder the indices (from x2 back to x) so that the paired indices get
# connected by red edges
x = np.asarray(edgesA + edgesB, dtype=np.int64) # reordered list of indices
edgereps = np.asarray(edgereps, dtype=np.int64)
return x, edgereps, oddmode
@numba.jit(nopython=True, cache=True)
def find_kept_edges(j, reps): # pragma: no cover
"""Write ``j`` as a string where the ith digit is in base ``reps[i]+1``
decides which edges are included given index of the inclusion/exclusion sum.
Args:
j (int): index of sum
reps (list): number of repetitions of each edge
Returns:
array: number of repetitions kept for the current inclusion/exclusion step
"""
num = j
output = []
bases = np.asarray(reps) + 1
for base in bases[::-1]:
output.append(num % base)
num //= base
return np.array(output[::-1], dtype=reps.dtype)
@numba.jit(nopython=True, cache=True)
def f(E, n): # pragma: no cover
"""Evaluate the polynomial coefficients of the function in the eigenvalue-trace formula.
Args:
E (array): eigenvalues of ``AX``
n (int): number of polynomial coefficients to compute
Returns:
array: polynomial coefficients
"""
E_k = E.copy()
# Compute combinations in O(n^2log n) time
# code translated from thewalrus matlab script
count = 0
comb = np.zeros((2, n // 2 + 1), dtype=np.complex128)
comb[0, 0] = 1
for i in range(1, n // 2 + 1):
factor = E_k.sum() / (2 * i)
E_k *= E
powfactor = 1
count = 1 - count
comb[count, :] = comb[1 - count, :]
for j in range(1, n // (2 * i) + 1):
powfactor *= factor / j
for k in range(i * j + 1, n // 2 + 2):
comb[count, k - 1] += comb[1 - count, k - i * j - 1] * powfactor
return comb[count, :]
@numba.jit(nopython=True, cache=True)
def f_loop(E, AX_S, XD_S, D_S, n): # pragma: no cover
"""Evaluate the polynomial coefficients of the function in the eigenvalue-trace formula.
Args:
E (array): eigenvalues of ``AX``
AX_S (array): ``AX_S`` with weights given by repetitions and excluded rows removed
XD_S (array): diagonal multiplied by ``X``
D_S (array): diagonal
n (int): number of polynomial coefficients to compute
Returns:
array: polynomial coefficients
"""
E_k = E.copy()
# Compute combinations in O(n^2log n) time
# code translated from thewalrus matlab script
count = 0
comb = np.zeros((2, n // 2 + 1), dtype=np.complex128)
comb[0, 0] = 1
for i in range(1, n // 2 + 1):
factor = E_k.sum() / (2 * i) + (XD_S @ D_S) / 2
E_k *= E
XD_S = XD_S @ AX_S
powfactor = 1
count = 1 - count
comb[count, :] = comb[1 - count, :]
for j in range(1, n // (2 * i) + 1):
powfactor *= factor / j
for k in range(i * j + 1, n // 2 + 2):
comb[count, k - 1] += comb[1 - count, k - i * j - 1] * powfactor
return comb[count, :]
# pylint: disable = too-many-arguments
@numba.jit(nopython=True, cache=True)
def f_loop_odd(E, AX_S, XD_S, D_S, n, oddloop, oddVX_S): # pragma: no cover
"""Evaluate the polynomial coefficients of the function in the eigenvalue-trace formula
when there is a self-edge in the fixed perfect matching.
Args:
E (array): eigenvalues of ``AX``
AX_S (array): ``AX_S`` with weights given by repetitions and excluded rows removed
XD_S (array): diagonal multiplied by ``X``
D_S (array): diagonal
n (int): number of polynomial coefficients to compute
oddloop (float): weight of self-edge
oddVX_S (array): vector corresponding to matrix at the index of the self-edge
Returns:
array: polynomial coefficients
"""
E_k = E.copy()
count = 0
comb = np.zeros((2, n + 1), dtype=np.complex128)
comb[0, 0] = 1
for i in range(1, n + 1):
if i == 1:
factor = oddloop
elif i % 2 == 0:
factor = E_k.sum() / i + (XD_S @ D_S) / 2
E_k *= E
else:
factor = oddVX_S @ D_S
D_S = AX_S @ D_S
powfactor = 1
count = 1 - count
comb[count, :] = comb[1 - count, :]
for j in range(1, n // i + 1):
powfactor *= factor / j
for k in range(i * j + 1, n + 2):
comb[count, k - 1] += comb[1 - count, k - i * j - 1] * powfactor
return comb[count, :]
@numba.jit(nopython=True, cache=True)
def get_AX_S(kept_edges, A): # pragma: no cover
"""Given the kept edges, return the appropriate scaled submatrices to compute ``f``.
Args:
kept_edges (array): number of repetitions of each edge
A (array): matrix before repetitions applied
Returns:
array: scaled ``A @ X``, where ``X = ((0, I), (I, 0))``
"""
z = np.concatenate((kept_edges, kept_edges))
nonzero_rows = np.where(z != 0)[0]
n_nonzero_edges = len(nonzero_rows) // 2
kept_edges_nonzero = kept_edges[np.where(kept_edges != 0)]
A_nonzero = nb_ix(A, nonzero_rows, nonzero_rows)
AX_nonzero = np.empty_like(A_nonzero, dtype=np.complex128)
AX_nonzero[:, :n_nonzero_edges] = kept_edges_nonzero * A_nonzero[:, n_nonzero_edges:]
AX_nonzero[:, n_nonzero_edges:] = kept_edges_nonzero * A_nonzero[:, :n_nonzero_edges]
return AX_nonzero
@numba.jit(nopython=True, cache=True)
def get_submatrices(kept_edges, A, D, oddV): # pragma: no cover
"""Given the kept edges, return the appropriate scaled submatrices to compute ``f``.
Args:
kept_edges (array): number of repetitions of each edge
A (array): matrix before repetitions applied
D (array): diagonal before repetitions applied
oddV (array): Row of matrix at index of self-edge. ``None`` is no self-edge.
Returns:
tuple[array, array, array, array]: scaled ``A @ X `` (where ``X = ((0, I), (I, 0))``),
scaled ``X @ D``, scaled ``D``, and scaled ``oddV @ X``
"""
z = np.concatenate((kept_edges, kept_edges))
nonzero_rows = np.where(z != 0)[0]
n_nonzero_edges = len(nonzero_rows) // 2
kept_edges_nonzero = kept_edges[np.where(kept_edges != 0)]
A_nonzero = nb_ix(A, nonzero_rows, nonzero_rows)
AX_nonzero = np.empty_like(A_nonzero, dtype=np.complex128)
AX_nonzero[:, :n_nonzero_edges] = kept_edges_nonzero * A_nonzero[:, n_nonzero_edges:]
AX_nonzero[:, n_nonzero_edges:] = kept_edges_nonzero * A_nonzero[:, :n_nonzero_edges]
D_nonzero = D[nonzero_rows]
XD_nonzero = np.empty_like(D_nonzero, dtype=np.complex128)
XD_nonzero[:n_nonzero_edges] = kept_edges_nonzero * D_nonzero[n_nonzero_edges:]
XD_nonzero[n_nonzero_edges:] = kept_edges_nonzero * D_nonzero[:n_nonzero_edges]
if oddV is not None:
oddV_nonzero = oddV[nonzero_rows]
oddVX_nonzero = np.empty_like(oddV_nonzero, dtype=np.complex128)
oddVX_nonzero[:n_nonzero_edges] = kept_edges_nonzero * oddV_nonzero[n_nonzero_edges:]
oddVX_nonzero[n_nonzero_edges:] = kept_edges_nonzero * oddV_nonzero[:n_nonzero_edges]
else:
oddVX_nonzero = None
return AX_nonzero, XD_nonzero, D_nonzero, oddVX_nonzero
@numba.jit(nopython=True, cache=True)
def get_submatrix_batch_odd0(kept_edges, oddV0): # pragma: no cover
"""Find ``oddVX_nonzero0`` for batching (sometimes different vertices are
identified as self edges).
Args:
kept_edges (array): number of repetitions of each edge
oddV0 (array): Row of matrix at index of self-edge. ``None`` is no self-edge.
Returns:
array: scaled ``oddV0 @ X``
"""
z = np.concatenate((kept_edges, kept_edges))
nonzero_rows = np.where(z != 0)[0]
n_nonzero_edges = len(nonzero_rows) // 2
kept_edges_nonzero = kept_edges[np.where(kept_edges != 0)]
oddV_nonzero0 = oddV0[nonzero_rows]
oddVX_nonzero0 = np.empty_like(oddV_nonzero0, dtype=np.complex128)
oddVX_nonzero0[:n_nonzero_edges] = kept_edges_nonzero * oddV_nonzero0[n_nonzero_edges:]
oddVX_nonzero0[n_nonzero_edges:] = kept_edges_nonzero * oddV_nonzero0[:n_nonzero_edges]
return oddVX_nonzero0
@numba.jit(nopython=True, cache=True)
def get_Dsubmatrices(kept_edges, D): # pragma: no cover
"""Find submatrices for batch gamma functions."""
z = np.concatenate((kept_edges, kept_edges))
nonzero_rows = np.where(z != 0)[0]
n_nonzero_edges = len(nonzero_rows) // 2
kept_edges_nonzero = kept_edges[np.where(kept_edges != 0)]
D_nonzero = D[nonzero_rows]
XD_nonzero = np.empty_like(D_nonzero, dtype=np.complex128)
XD_nonzero[:n_nonzero_edges] = kept_edges_nonzero * D_nonzero[n_nonzero_edges:]
XD_nonzero[n_nonzero_edges:] = kept_edges_nonzero * D_nonzero[:n_nonzero_edges]
return XD_nonzero, D_nonzero
@numba.jit(nopython=True, cache=True)
def eigvals(M): # pragma: no cover
"""Computes the eigenvalues of a matrix.
Args:
M (array): square matrix
Returns:
array: eigenvalues of the matrix ``M``
"""
return np.linalg.eigvals(M)
# pylint: disable=W0612, E1133
@numba.jit(nopython=True, parallel=True, cache=True)
def _calc_hafnian(A, edge_reps, glynn=True): # pragma: no cover
r"""Compute hafnian, using inputs as prepared by frontend hafnian function compiled with Numba.
Args:
A (array): matrix ordered according to the chosen perfect matching
edge_reps (array): how many times each edge in the perfect matching is repeated
glynn (bool): whether to use finite difference sieve
Returns:
complex: value of hafnian
"""
n = A.shape[0]
N = 2 * edge_reps.sum() # number of photons
if glynn:
steps = ((edge_reps[0] + 2) // 2) * np.prod(edge_reps[1:] + 1)
else:
steps = np.prod(edge_reps + 1)
# precompute binomial coefficients
max_binom = edge_reps.max() + 1
binoms = precompute_binoms(max_binom)
H = np.complex128(0) # start running total for the hafnian
for j in numba.prange(steps):
kept_edges = find_kept_edges(j, edge_reps)
edge_sum = kept_edges.sum()
binom_prod = 1.0
for i in range(n // 2):
binom_prod *= binoms[edge_reps[i], kept_edges[i]]
if glynn:
kept_edges = 2 * kept_edges - edge_reps
AX_S = get_AX_S(kept_edges, A)
E = eigvals(AX_S) # O(n^3) step
prefac = (-1.0) ** (N // 2 - edge_sum) * binom_prod
if glynn and kept_edges[0] == 0:
prefac *= 0.5
Hnew = prefac * f(E, N)[N // 2]
H += Hnew
if glynn:
H = H * 0.5 ** (N // 2 - 1)
return H
def _haf(A, reps=None, glynn=True):
r"""Calculate hafnian with (optional) repeated rows and columns.
Code contributed by `Jake F.F. Bulmer <https://github.com/jakeffbulmer/gbs>`_ based on
`arXiv:2108.01622 <https://arxiv.org/abs/2108.01622>`_.
Args:
A (array): N x N matrix.
reps (list): Length-N list of repetitions of each row/col (optional). If not provided,
each row/column assumed to be repeated once.
glynn (bool): If ``True``, use Glynn-style finite difference sieve formula. If ``False``,
use Ryser style inclusion/exclusion principle.
Returns
complex: result of hafnian calculation
"""
n = A.shape[0]
if reps is None:
reps = [1] * n
N = sum(reps)
if N == 0:
return 1.0
if N % 2 == 1:
return 0.0
assert n == len(reps)
x, edge_reps, oddmode = matched_reps(reps)
# make new A matrix using the ordering from above
Ax = A[np.ix_(x, x)].astype(np.complex128)
H = _calc_hafnian(Ax, edge_reps, glynn)
return H
# pylint: disable=too-many-arguments, redefined-outer-name, not-an-iterable
@numba.jit(nopython=True, parallel=True, cache=True)
def _calc_loop_hafnian(A, D, edge_reps, oddloop=None, oddV=None, glynn=True): # pragma: no cover
"""Compute loop hafnian, using inputs as prepared by frontend loop_hafnian function
compiled with Numba.
Code contributed by `Jake F.F. Bulmer <https://github.com/jakeffbulmer/gbs>`_ based on
`arXiv:2108.01622 <https://arxiv.org/abs/2108.01622>`_.
Args:
A (array): matrix ordered according to the chosen perfect matching.
D (array): diagonals ordered according to the chosen perfect matchin
edge_reps (array): how many times each edge in the perfect matching is repeated
oddloop (float): weight of self-loop in perfect matching, None if no self-loops
oddV (array): row of matrix corresponding to the odd loop in the perfect matching
glynn (bool): whether to use finite difference sieve
Returns:
complex: value of loop hafnian
"""
n = A.shape[0]
N = 2 * edge_reps.sum() # Number of photons
if oddloop is not None:
N += 1
if glynn and (oddloop is None):
steps = ((edge_reps[0] + 2) // 2) * np.prod(edge_reps[1:] + 1)
else:
steps = np.prod(edge_reps + 1)
# Precompute binomial coefficients
max_binom = edge_reps.max() + 1
binoms = precompute_binoms(max_binom)
H = np.complex128(0) # Start running total for the hafnian
for j in numba.prange(steps):
kept_edges = find_kept_edges(j, edge_reps)
edge_sum = kept_edges.sum()
binom_prod = 1.0
for i in range(n // 2):
binom_prod *= binoms[edge_reps[i], kept_edges[i]]
if glynn:
kept_edges = 2 * kept_edges - edge_reps
AX_S, XD_S, D_S, oddVX_S = get_submatrices(kept_edges, A, D, oddV)
E = eigvals(AX_S) # O(n^3) step
prefac = (-1.0) ** (N // 2 - edge_sum) * binom_prod
if oddloop is not None:
Hnew = prefac * f_loop_odd(E, AX_S, XD_S, D_S, N, oddloop, oddVX_S)[N]
else:
if glynn and kept_edges[0] == 0:
prefac *= 0.5
Hnew = prefac * f_loop(E, AX_S, XD_S, D_S, N)[N // 2]
H += Hnew
if glynn:
if oddloop is None:
H = H * 0.5 ** (N // 2 - 1)
else:
H = H * 0.5 ** (N // 2)
return H
# pylint: disable=redefined-outer-name
def loop_hafnian(A, D=None, reps=None, glynn=True):
"""Calculate loop hafnian with (optional) repeated rows and columns.
Code contributed by `Jake F.F. Bulmer <https://github.com/jakeffbulmer/gbs>`_ based on
`arXiv:2108.01622 <https://arxiv.org/abs/2108.01622>`_.
Args:
A (array): N x N matrix.
D (array): Diagonal entries of matrix (optional). If not provided, ``D`` is the diagonal of ``A``.
If repetitions are provided, ``D`` should be provided explicitly.
reps (list): Length-N list of repetitions of each row/col (optional), if not provided, each
row/column assumed to be repeated once.
glynn (bool): If ``True``, use Glynn-style finite difference sieve formula, if ``False``,
use Ryser style inclusion/exclusion principle.
Returns
complex: result of loop hafnian calculation
"""
n = A.shape[0]
if reps is None:
reps = [1] * n
if D is None:
D = A.diagonal()
N = sum(reps)
if N == 0:
return 1.0
if N == 1:
return D[np.where(np.array(reps) == 1)[0][0]]
assert n == len(reps)
assert D.shape[0] == n
x, edge_reps, oddmode = matched_reps(reps)
# Make new A matrix and D vector using the ordering from above
if oddmode is not None:
oddloop = D[oddmode].astype(np.complex128)
oddV = A[oddmode, x].astype(np.complex128)
else:
oddloop = None
oddV = None
Ax = A[np.ix_(x, x)].astype(np.complex128)
Dx = D[x].astype(np.complex128)
H = _calc_loop_hafnian(Ax, Dx, edge_reps, oddloop, oddV, glynn)
return H
def input_validation(A, rtol=1e-05, atol=1e-08):
"""Checks that the matrix A satisfies the requirements for Hafnian calculation.
These include:
* That the ``A`` is a NumPy array
* That ``A`` is square
* That ``A`` does not contain any NaNs
* That ``A`` is symmetric
Args:
A (array): a NumPy array.
rtol (float): the relative tolerance parameter used in ``np.allclose``
atol (float): the absolute tolerance parameter used in ``np.allclose``
Returns:
bool: returns ``True`` if the matrix satisfies all requirements
"""
if not isinstance(A, np.ndarray):
raise TypeError("Input matrix must be a NumPy array.")
n = A.shape
if n[0] != n[1]:
raise ValueError("Input matrix must be square.")
if np.isnan(A).any():
raise ValueError("Input matrix must not contain NaNs.")
if not np.allclose(A, A.T, rtol=rtol, atol=atol):
raise ValueError("Input matrix must be symmetric.")
return True
def bandwidth(A):
"""Calculates the upper bandwidth of the matrix A.
Args:
A (array): input matrix
Returns:
int: bandwidth of matrix
"""
n, _ = A.shape
for i in range(n - 1, 0, -1):
vali = np.diag(A, i)
if not np.allclose(vali, 0):
return i
return 0
def powerset(iterable):
"""Calculates the powerset of a list.
Args:
iterable (iterable): input list
Returns:
chain: chain of all subsets of input list
"""
return chain.from_iterable(combinations(iterable, r) for r in range(len(iterable) + 1))
def reduction(A, rpt):
r"""Calculates the reduction of an array by a vector of indices.
This is equivalent to repeating the ith row/column of :math:`A`, :math:`rpt_i` times.
Args:
A (array): matrix of size ``[N, N]``
rpt (Sequence): sequence of N positive integers indicating the corresponding rows/columns
of ``A`` to be repeated.
Returns:
array: the reduction of ``A`` by the index vector ``rpt``
"""
rows = [i for sublist in [[idx] * j for idx, j in enumerate(rpt)] for i in sublist]
if A.ndim == 1:
return A[rows]
return A[:, rows][rows]
# pylint: disable=too-many-arguments
def hafnian(
A,
loop=False,
rtol=1e-05,
atol=1e-08,
approx=False,
num_samples=1000,
glynn=True,
): # pylint: disable=too-many-arguments
"""Returns the hafnian of a matrix.
Code contributed by `Jake F.F. Bulmer <https://github.com/jakeffbulmer/gbs>`_ based on
`arXiv:2108.01622 <https://arxiv.org/abs/2108.01622>`_.
Args:
A (array): a square, symmetric array of even dimensions
loop (bool): If ``True``, the loop hafnian is returned. Default is ``False``.
recursive (bool): If ``True``, the recursive algorithm is used. Note that the recursive
algorithm does not currently support the loop hafnian. If ``loop=True``, then this
keyword argument is ignored.
rtol (float): the relative tolerance parameter used in ``np.allclose``
atol (float): the absolute tolerance parameter used in ``np.allclose``
approx (bool): If ``True``, an approximation algorithm is used to estimate the hafnian. Note
that the approximation algorithm can only be applied to matrices ``A`` that only have
non-negative entries.
num_samples (int): if ``approx=True``, the approximation algorithm performs ``num_samples``
iterations for estimation of the hafnian of the non-negative matrix ``A``
glynn (bool): whether to use finite difference sieve
Returns:
int or float or complex: the hafnian of matrix ``A``
"""
# pylint: disable=too-many-return-statements,too-many-branches
input_validation(A, rtol=rtol, atol=atol)
matshape = A.shape
if matshape == (0, 0):
return 1
if matshape[0] % 2 != 0 and not loop:
return 0.0
if np.allclose(np.diag(np.diag(A)), A, rtol=rtol, atol=atol):
if loop:
return np.prod(np.diag(A))
return 0
if matshape[0] % 2 != 0 and loop:
A = np.pad(A, pad_width=((0, 1), (0, 1)), mode="constant")
A[-1, -1] = 1.0
matshape = A.shape
if matshape[0] == 2:
if loop:
return A[0, 1] + A[0, 0] * A[1, 1]
return A[0][1]
if matshape[0] == 4:
if loop:
result = (
A[0, 1] * A[2, 3]
+ A[0, 2] * A[1, 3]
+ A[0, 3] * A[1, 2]
+ A[0, 0] * A[1, 1] * A[2, 3]
+ A[0, 1] * A[2, 2] * A[3, 3]
+ A[0, 2] * A[1, 1] * A[3, 3]
+ A[0, 0] * A[2, 2] * A[1, 3]
+ A[0, 0] * A[3, 3] * A[1, 2]
+ A[0, 3] * A[1, 1] * A[2, 2]
+ A[0, 0] * A[1, 1] * A[2, 2] * A[3, 3]
)
return result
return A[0, 1] * A[2, 3] + A[0, 2] * A[1, 3] + A[0, 3] * A[1, 2]
if approx:
if np.any(np.iscomplex(A)):
raise ValueError("Input matrix must be real")
if np.any(A < 0):
raise ValueError("Input matrix must not have negative entries")
return hafnian_approx(A, num_samples=num_samples)
if loop:
return loop_hafnian(A, D=None, reps=None, glynn=glynn)
return _haf(A, reps=None, glynn=glynn)
def hafnian_sparse(A, D=None, loop=False):
r"""Returns the hafnian of a sparse symmetric matrix.
This pure python implementation is very slow on full matrices, but faster the sparser a matrix is.
As a rule of thumb, the crossover in runtime with respect to :func:`~.hafnian` happens around 50% sparsity.
Args:
A (array): the symmetric matrix of which we want to compute the hafnian
D (set): Set of indices that identify a submatrix. If ``None`` (default) it computes
the hafnian of the whole matrix.
loop (bool): If ``True``, the loop hafnian is returned. Default is ``False``.
Returns:
float: hafnian of ``A`` or of the submatrix of ``A`` defined by the set of indices ``D``
"""
if D is None:
D = frozenset(range(len(A)))
else:
D = frozenset(D)
if not loop:
A = A - np.diag(np.diag(A))
if np.allclose(A, 0):
return 0.0
r, _ = np.nonzero(A)
m = max(Counter(r).values()) # max nonzero values per row/column
@lru_cache(maxsize=2 ** m)
def indices(d, k):
return d.intersection(set(np.nonzero(A[k])[0]))
@lru_cache(maxsize=2 ** m)
def lhaf(d: frozenset) -> float:
if not d:
return 1
d_ = set(d)
k = d_.pop()
return sum(A[i, k] * lhaf(frozenset(d_).difference({i})) for i in indices(d, k))
return lhaf(D)
def hafnian_repeated(A, rpt, mu=None, loop=False, rtol=1e-05, atol=1e-08, glynn=True):
r"""Returns the hafnian of matrix with repeated rows/columns.
Code contributed by `Jake F.F. Bulmer <https://github.com/jakeffbulmer/gbs>`_ based on
`arXiv:2108.01622 <https://arxiv.org/abs/2108.01622>`_.
The :func:`reduction` function may be used to show the resulting matrix
with repeated rows and columns as per ``rpt``.
As a result, the following are identical:
.. code:
>>> hafnian_repeated(A, rpt)
>>> hafnian(reduction(A, rpt))
However, using ``hafnian_repeated`` in the case where there are a large number
of repeated rows and columns (:math:`\sum_{i}rpt_i \gg N`) can be
significantly faster.
.. note::
If :math:`rpt=(1, 1, \dots, 1)`, then
>>> hafnian_repeated(A, rpt) == hafnian(A)
Args:
A (array): a square, symmetric :math:`N\times N` array
rpt (Sequence): a length-:math:`N` positive integer sequence, corresponding
to the number of times each row/column of matrix :math:`A` is repeated
mu (array): A vector of length :math:`N` representing the vector of means/displacement.
If not provided, ``mu`` is set to the diagonal of matrix ``A``. Note that this
only affects the loop hafnian.
loop (bool): If ``True``, the loop hafnian is returned. Default is ``False``.
rtol (float): the relative tolerance parameter used in ``np.allclose``
atol (float): the absolute tolerance parameter used in ``np.allclose``
glynn (bool): whether to use finite difference sieve
Returns:
int or float or complex: the hafnian of matrix A
"""
# pylint: disable=too-many-return-statements,too-many-branches
input_validation(A, atol=atol, rtol=rtol)
if len(rpt) != len(A):
raise ValueError("the rpt argument must be 1-dimensional sequence of length len(A).")
nud = np.array(rpt, dtype=np.int32)
if not np.all(np.mod(rpt, 1) == 0) or np.any(nud < 0):
raise ValueError("the rpt argument must contain non-negative integers.")
if np.all(nud == 0):
return 1.0
if np.sum(nud) % 2 != 0 and not loop:
return 0.0
if mu is None:
mu = A.diagonal().copy()
if np.allclose(A, 0, rtol=rtol, atol=atol):
if loop:
return np.prod(mu ** rpt)
return 0
if len(mu) != len(A):
raise ValueError("Length of means vector must be the same length as the matrix A.")
if loop:
return loop_hafnian(A, D=mu, reps=rpt, glynn=glynn)
return _haf(A, reps=rpt, glynn=glynn)
def hafnian_banded(A, loop=False, rtol=1e-05, atol=1e-08):
"""Returns the loop hafnian of a banded matrix.
For the derivation see Section V of `'Efficient sampling from shallow Gaussian quantum-optical
circuits with local interactions', Qi et al. <https://arxiv.org/abs/2009.11824>`_.
Args:
A (array): a square, symmetric array of even dimensions
Returns:
int or float or complex: the loop hafnian of matrix ``A``
"""
input_validation(A, atol=atol, rtol=rtol)
(n, _) = A.shape
w = bandwidth(A)
if not loop:
A = A - np.diag(np.diag(A))
loop_haf = {(): 1, (1,): A[0, 0]}
for t in range(1, n + 1):
if t - 2 * w - 1 > 0:
lower_end = set(range(1, t - 2 * w))
else:
lower_end = set()
upper_end = set(range(1, t + 1))
diff = [item for item in upper_end if item not in lower_end]
# Makes sure set ordering is preserved when the difference of two sets is taken
# This is also used in the if statement below
ps = powerset(diff)
lower_end = tuple(lower_end)
for D in ps:
if lower_end + D not in loop_haf:
# pylint: disable=consider-using-generator
loop_haf[lower_end + D] = sum(
[
A[i - 1, t - 1]
* loop_haf[
tuple([item for item in lower_end + D if item not in set((i, t))])
]
for i in D
]
)
return loop_haf[tuple(range(1, n + 1))]
@numba.jit(nopython=True)
def _one_det(B): # pragma: no cover
"""Calculates the determinant of an antisymmetric matrix with entries distributed
according to a normal distribution, with scale equal to the entries of the symmetric matrix
given as input.
Args:
B (array[float]): symmetric matrix
Returns:
float: determinant of the samples antisymmetric matrix
"""
mat = np.empty_like(B, dtype=np.float64)
n, m = B.shape
for i in range(n):
for j in range(m):
mat[i, j] = B[i, j] * np.random.normal()
mat[j, i] = -mat[i, j]
return np.linalg.det(mat)
@numba.jit(nopython=True)
def hafnian_approx(A, num_samples=1000): # pragma: no cover
"""Returns the approximation to the hafnian of a matrix with non-negative entries.
The approximation follows the stochastic Barvinok's approximation allowing the
hafnian can be approximated as the sum of determinants of matrices.
The accuracy of the approximation increases with increasing number of iterations.
Args:
B (array[float]): a symmetric matrix
Returns:
float: approximate hafnian of the input
"""
sqrtA = np.sqrt(A)
return np.array([_one_det(sqrtA) for _ in range(num_samples)]).mean()
|
from __future__ import print_function
import numpy as np
import time
"""
init:
state = 0
c = random(0,T)
step:
if(a neighbor flashed)
c = c + k * c
else
c = c + 1
if(c >= T)
state = 1
c = 0
else
state = 0
"""
class Oscillator(object):
def __init__(self, T=100, k=0.5):
self.state = 0
self.c = np.random.randint(0,T)
self.T = T
self.k = k
# self.trigger = False
def step(self, is_neighbour_triggered):
if is_neighbour_triggered:
# print('triggered')
self.c = self.c + self.k*self.c
else:
self.c = self.c + 1
if (self.c >= self.T):
# if self.state==0:
# self.state = 1
# else:
# self.state = 0
self.state = 1
self.c=0
# self.trigger = True
else:
self.state=0
def is_triggered(self):
# if self.trigger:
# self.trigger = False
# return True
# else:
# return False
return self.c==0
class CoupledOscillator(object):
def __init__(self, grid=(10,10),T=100):
self.width = grid[0]
self.height = grid[1]
self.grid = np.zeros(grid)
self.plot_now = True
self.initialize()
def initialize(self):
self.array = [[None for i in range(self.width)] for j in range(self.height) ]
for i in range(self.width):
for j in range(self.height):
self.array[i][j] = Oscillator()
def step_all(self):
for i in range(self.width):
for j in range(self.height):
trigger = self.check_neighbours_trigger(i, j)
self.array[i][j].step(trigger)
# self.plot_now = trigger
def check_neighbours_trigger(self,i,j):
neighbours = [(i-1,j),(i+1,j),(i,j-1),(i,j+1)]
for neighbour in neighbours:
_i,_j = neighbour
if (_i>=0 and _i<self.width) and (_j>=0 and _j<self.height):
if self.array[_i][_j].is_triggered():
return True
return False
def run(self):
for i in range(50000):
self.step_all()
if self.plot_now:
self.plot()
time.sleep(0.5)
def plot(self):
for i in range(self.width):
for j in range(self.height):
self.grid[i,j] = self.array[i][j].state
print('-'*80)
print(self.grid,'\r' )
if __name__ == '__main__':
co = CoupledOscillator()
co.run()
|
# ------------------------------------------------------------------------------
# Copyright 2020 Graz University of Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
|
"""
GCP provides a set of services for Google Cloud Platform provider.
"""
from src.diagrams import Node
class _ICONS(Node):
_provider = "icons"
_icon_dir = "../resources/icons"
fontcolor = "#2d3436"
|
import os
import sys
import numpy as np
import networkx as nx
import pyomo.environ as en
from pyomo.opt import SolverFactory
from pyomo.opt import TerminationCondition, SolverStatus
path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.
abspath(__file__))))
if path not in sys.path:
sys.path.append(path)
def optimize_dist(threshold, cost_matrix, pow_range_matrix, distance_matrix,
demand_coherent_area, dist_cost_coherent_area, mip_gap,
obj_case=1, full_load_hours=3000, max_pipe_length=3000,
logFile_path=None):
# st = time.time()
'''
distance_matrix = np.round(distance_matrix[:7,:7])
demand_coherent_area = np.round(demand_coherent_area[:7])
dist_cost_coherent_area = dist_cost_coherent_area[:7]
Number of coherent areas: n
demand_coherent_area (n x 1) [MWh]: demand in each coherent area
dist_cost_coherent_area (n x 1) [EUR]: distribution cost of each coherent
area
transmision_line_cost (n x n) [EUR/m]: constant value for the cost of
transmission line
'''
max_cap = 1000 # range of x variables
BigM = 10 ** 6
if len(demand_coherent_area) == 0:
term_cond = False
dh = np.zeros(6)
edge_list = []
return term_cond, dh, np.array(edge_list)
if len(demand_coherent_area) == 1:
term_cond = True
dh = np.zeros(7)
dh[0] = 1
covered_demand = demand_coherent_area
dist_inv = demand_coherent_area * dist_cost_coherent_area
dist_spec_cost = dist_inv/covered_demand
trans_inv = 0
trans_spec_cost = 0
trans_line_length = 0
dh[1: 7] = covered_demand, dist_inv, dist_spec_cost, trans_inv, \
trans_spec_cost, trans_line_length
edge_list = []
return term_cond, dh, np.array(edge_list)
G = np.argmax(demand_coherent_area)
G2 = np.argsort(demand_coherent_area)[-2]
n = demand_coherent_area.shape[0]
'''
# cut to more distant areas
# Keep only connections to the x closest Areas
x = min(7.0, n-1)
distance_matrix_orig = distance_matrix.copy()
max_pipe_length2 = np.percentile(distance_matrix, (1 + x) / n
* 100.0, axis=0)
mask = (np.ones((n, 1), dtype="f4") * max_pipe_length2)
NotCloseRegions = (distance_matrix > mask)
# if distance_matrix[a, b] > mask and therefore, should be filtered.
# accordingly, distance_matrix[b, a] should be filtered as well.
Filter_NotCloseRegions = np.logical_and(NotCloseRegions, NotCloseRegions.T)
# Assess which regions belong to Category Large Areas ( x Regions)
x = min(10.0, n)
min_energy_large_area = np.percentile(demand_coherent_area, (1 - x / n)
* 100, axis=0)
is_large_area_vct = demand_coherent_area >= min_energy_large_area
#Matrix: mxm Larger areas are true, others are false
maskLA= (np.ones((n, 1), dtype="f4") * is_large_area_vct)
# returns nxn matrix showing each coherent area is smaller than which ones
maskIsSmallerA = ((np.ones((n, 1), dtype="f4") * demand_coherent_area)
< (np.ones((n, 1), dtype="f4") * demand_coherent_area).T)
maskLA[maskIsSmallerA] = False
# Store the distance of an Area (rows) to all larger Large Areas (in Columns)
distance_matrix2 = distance_matrix_orig.copy()
# assign a large distance (BigM) to those coherent areas that are not
# LA (large area).
distance_matrix2[maskLA == False] = BigM
# assign a large distance (BigM) to distance from itself with an exception
# of greatest coherent area (G).
EYE = np.eye(distance_matrix.shape[0], dtype="f4") * BigM
EYE[G, G] = 0
EYE[G2, G2] = 0
distance_matrix2 += EYE
# Distance of Areas (in rows) to the second closest larger Large Area
# but only if
distance2closest_LargeArea = np.zeros(n, dtype="f4")
idx = np.argsort(distance_matrix2, axis=1)[:, :2]
count__ = 0
for i in range(n):
dist2closest = distance_matrix_orig[i, idx[i, 0]]
dist2_2ndclosest = distance_matrix_orig[i, idx[i, 1]]
dist_1to2nd = distance_matrix_orig[idx[i, 1], idx[i, 0]]
# if dist2closest=0, it means it is the largest area and the next
# largest area is the closest large area!
if dist2closest == 0:
distance2closest_LargeArea[i] = dist2_2ndclosest
count__ += 1
continue
if dist2_2ndclosest == 0:
distance2closest_LargeArea[i] = dist2closest
count__ += 1
continue
cos_ = ((dist2closest ** 2 + dist2_2ndclosest ** 2 - dist_1to2nd ** 2)/(2 * dist2closest * dist2_2ndclosest))
if dist2_2ndclosest > dist_1to2nd:
# add_2nd_largest = False
distance2closest_LargeArea[i] = dist2closest
continue
elif cos_ <= -1 or cos_ >= 1:
distance2closest_LargeArea[i] = dist2_2ndclosest
count__ += 1
continue
else:
arc_between = np.arccos(cos_) * 180 / np.pi
if (arc_between < 20):
# add_2nd_largest = False
distance2closest_LargeArea[i] = dist2closest
elif arc_between < (20 + 35 * (dist2_2ndclosest / dist2closest / 1.2 - 1)):
# add_2nd_largest = False
distance2closest_LargeArea[i] = dist2closest
else:
distance2closest_LargeArea[i] = dist2_2ndclosest
count__ += 1
# print("Allowed connection to 2nd most distant large area: %s" % count__)
# Look for closest Large Areas
m2 = (distance_matrix2 <= (np.ones((n, 1), dtype="f4") * distance2closest_LargeArea).T)
# Build Filter Matrix: True if connection to close Larger Area
# Therefore logical_or
Filter_CloseLargeAreas = np.logical_or(m2, m2.T)
FilterNoConnection = np.logical_and(Filter_NotCloseRegions
, Filter_CloseLargeAreas == False)
distance_matrix[FilterNoConnection] = BigM
# print(np.sum(np.logical_and(distance_matrix <= BigM, distance_matrix > 0)))
fix_to_zero_index = np.argwhere(distance_matrix >= max_pipe_length)
# print("Connections : %i" %(distance_matrix.size - fix_to_zero_index.shape[0] - n))
# np.savetxt('fix_to_zero_index.csv', fix_to_zero_index, delimiter=",")
# distance_matrix3: Distance to a Large Area
distance_matrix3 = distance_matrix.copy()
distance_matrix3[maskLA==False] = BigM
HasConnection2LargerArea_Vctr = np.zeros(n, dtype="int8")
# HasConnection2LargerArea_Vctr == 1, if a short distance is available
HasConnection2LargerArea_Vctr[np.min(distance_matrix3, axis=1) < max_pipe_length] = 1
for i in range(10):
# Built Matrix: For each Area (in Rows), specify if connected Area is
# is Connected to a Larger Large Area
# Loop to check if it can handle it forward to such a region
HasConnection2LargerArea_Vctr_prev = HasConnection2LargerArea_Vctr.copy()
HasConnection2LargArea_Mtrx = np.ones((n, 1), dtype="int8") * HasConnection2LargerArea_Vctr
D = distance_matrix.copy()
D[HasConnection2LargArea_Mtrx==False] = BigM
HasConnection2LargerArea_Vctr[np.min(D, axis=1) < max_pipe_length] = 1
if (np.sum(HasConnection2LargerArea_Vctr_prev) == np.sum(HasConnection2LargerArea_Vctr)):
break
###########################################################################
###########################################################################
AddConection = np.zeros((n, n)).astype(bool)
###########################################################################
###########################################################################
if min(HasConnection2LargerArea_Vctr) == 0:
# Some regions have no connection to a larger Large Region
# print(HasConnection2LargerArea_Vctr)
HasConnection2LargArea_Mtrx = np.ones((n, 1), dtype="int8") * HasConnection2LargerArea_Vctr
distance_matrix3 = distance_matrix_orig.copy()
distance_matrix3[HasConnection2LargArea_Mtrx==0] = BigM
ClosestDistance2ConnectedArea = np.min(distance_matrix3, axis=1)
AddConection = (distance_matrix_orig < (np.ones((n, 1), dtype="f4") * ClosestDistance2ConnectedArea * 1.15))
# Remove Regions which are already connected
AddConection[HasConnection2LargerArea_Vctr == 1, :] = False
AddConection = np.maximum(AddConection, AddConection.T)
AddConection[distance_matrix <= max_pipe_length] = False
distance_matrix[AddConection] = distance_matrix_orig[AddConection]
fix_to_zero_index = np.argwhere(np.logical_and(
distance_matrix >= max_pipe_length
, AddConection==False))
# np.savetxt('AddConection.csv', AddConection, delimiter=",")
# np.savetxt('distance_matrix_final.csv', distance_matrix, delimiter=",")
# print("Connections after additional Connections: %i" %(distance_matrix.size - fix_to_zero_index.shape[0] - n))
'''
m = en.ConcreteModel()
solver = SolverFactory('gurobi', solver_io='python')
# the gap between the lower and upper objective bound
solver.options["MIPGap"] = mip_gap
# the relative difference between the primal and dual objective value
solver.options["BarConvTol"] = 1e-1
# set to 1 if you are interested in feasible solutions
# set to 2 if no problem with finding good quality solution exist and you want to focus on optimality
# set to 3 if the best objective bound is moving very slowly (or not at all), to focus on bound
solver.options["MIPFocus"] = 3
# memory used. the rest will be written in the hard drive.
# solver.options["NodefileStart"] = 0.5
# number of threads used by the solver
# solver.options["Threads"] = 2
solver.options["TimeLimit"] = 300
# ##########################################################################
# ########## Sets:
# ##########################################################################
m.index_row = en.RangeSet(0, n-1)
m.index_col = en.RangeSet(0, n-1)
# ##########################################################################
# ########## Parameters:
# ##########################################################################
m.th = en.Param(m.index_row, initialize=threshold)
m.cap_up = en.Param(initialize=np.sum(demand_coherent_area) -
demand_coherent_area[G])
def demand(m, i):
return demand_coherent_area[i]
m.q = en.Param(m.index_row, initialize=demand)
def distribution_cost(m, i):
return dist_cost_coherent_area[i]
m.dist_cost = en.Param(m.index_row, initialize=distribution_cost)
def l_length(m, i, j):
return distance_matrix[i, j]
m.line_length = en.Param(m.index_row, m.index_col, initialize=l_length)
# ##########################################################################
# ########## Variables:
# ##########################################################################
m.q_bool = en.Var(m.index_row, domain=en.Binary, initialize=1)
m.l_bool = en.Var(m.index_row, m.index_col, domain=en.Binary, initialize=0)
m.line_capacity = en.Var(m.index_row, m.index_col,
domain=en.NonNegativeReals, bounds=(0, max_cap),
initialize=0)
m.line_cost = en.Var(m.index_row, m.index_col, domain=en.NonNegativeReals,
initialize=0)
# set the largest demand zone to be part of the result
m.q_bool[G].fix(1)
for i in m.index_row:
m.l_bool[i, G].fix(0)
m.l_bool[i, i].fix(0)
m.line_capacity[i, G].fix(0)
m.line_capacity[i, i].fix(0)
for i in m.index_row:
for j in m.index_col:
if distance_matrix[i, j] > max_pipe_length:
m.l_bool[i, j].fix(0)
m.line_capacity[i, j].fix(0)
'''
for i in range(len(demand_coherent_area)):
# Lowest transmission line capacity is 0.2 Mw. Intercept of the cost function
# in the piecewise linear expresion is 242.87. It should be set to zero if the
# capacity is less than 0.2 MW
if demand_coherent_area[i]/full_load_hours < 0.2:
for j in range(len(demand_coherent_area)):
m.l_bool[i, j].fix(0)
m.line_capacity[i, j].fix(0)
m.l_bool[j, i].fix(0)
m.line_capacity[j, i].fix(0)
'''
# ##########################################################################
# ########## Constraints:
# ##########################################################################
def overall_cost_rule(m):
return sum(m.line_cost[i, j] * m.line_length[i, j]
for i in m.index_row for j in m.index_col) <= \
sum(m.q_bool[i]*m.q[i]*(m.th[i]-m.dist_cost[i])
for i in m.index_row)
m.overall_cost = en.Constraint(rule=overall_cost_rule)
def max_edge_number_rule(m):
return sum(m.l_bool[i, j] for i in m.index_row
for j in m.index_col) == sum(m.q_bool[i]
for i in m.index_row) - 1
m.max_edge_number = en.Constraint(rule=max_edge_number_rule)
def edge_connectivity_rule(m, i):
if i == G:
# l_bool[x, G] are already fixed to zero
return en.Constraint.Skip
else:
return m.q_bool[i] <= sum(m.l_bool[j, i] for j in m.index_row)
m.edge_connectivity = en.Constraint(m.index_row,
rule=edge_connectivity_rule)
# m.edge_connectivity_0 = en.Constraint(expr=1 <= sum(m.l_bool[G, j]
# for j in m.index_col))
def edge_connectivity_2_rule(m, i, j):
if i == G:
return en.Constraint.Skip
else:
return m.l_bool[i, j] <= sum(m.l_bool[h, i]
for h in m.index_row if h != j)
m.edge_connectivity_2 = en.Constraint(m.index_row, m.index_col,
rule=edge_connectivity_2_rule)
def edge_connectivity_3_rule(m, i):
if i == G:
return en.Constraint.Skip
else:
return sum(m.l_bool[h, i] for h in m.index_row) <= 1
m.edge_connectivity_3 = en.Constraint(m.index_row, rule=edge_connectivity_3_rule)
def edge_active_rule(m, i, j):
return 2*(m.l_bool[i, j] + m.l_bool[j, i]) <= m.q_bool[i] + m.q_bool[j]
m.edge_active = en.Constraint(m.index_row, m.index_col,
rule=edge_active_rule)
def capacity_lower_bound_rule(m, i, j):
return m.line_capacity[i, j] >= (m.l_bool[i, j]*m.q[j])/full_load_hours
m.capacity_lower_bound = en.Constraint(m.index_row, m.index_col,
rule=capacity_lower_bound_rule)
def capacity_upper_bound_rule(m, i, j):
return m.line_capacity[i, j] <= \
(sum(m.q[h] for h in m.index_row if (h != G and h != i)) -
sum(m.l_bool[h, i] * m.q[h]
for h in m.index_row if h != G))/full_load_hours
m.capacity_upper_bound = en.Constraint(m.index_row, m.index_col,
rule=capacity_upper_bound_rule)
def force_cap_to_zero_rule(m, i, j):
return m.line_capacity[i, j] - m.l_bool[i, j]*m.cap_up <= 0
m.force_cap_to_zero = en.Constraint(m.index_row, m.index_col,
rule=force_cap_to_zero_rule)
'''
def force_cap_to_zero_2_rule(m, i, j):
return m.line_capacity[i, j] + 0.99 >= m.l_bool[i, j]
m.force_cap_to_zero_2 = en.Constraint(m.index_row, m.index_col,
rule=force_cap_to_zero_2_rule)
'''
def capacity_flow_rule(m, i):
if i == G:
return sum(m.line_capacity[G, h] for h in m.index_col) == \
sum(m.q_bool[h]*m.q[h]
for h in m.index_row if h != G)/full_load_hours
else:
return sum(m.line_capacity[h, i] for h in m.index_row) - \
sum(m.line_capacity[i, h] for h in m.index_col) == \
m.q_bool[i]*m.q[i]/full_load_hours
m.capacity_flow = en.Constraint(m.index_row, rule=capacity_flow_rule)
def f(m, i, j, x):
if x >= pow_range_matrix[0] and x < pow_range_matrix[1]:
return x * (cost_matrix[1] - cost_matrix[0])/(pow_range_matrix[1] - pow_range_matrix[0])
elif x >= pow_range_matrix[1] and x < pow_range_matrix[2]:
return (x - pow_range_matrix[1]) * (cost_matrix[2] - cost_matrix[1])/(pow_range_matrix[2] - pow_range_matrix[1]) + cost_matrix[1]
elif x >= pow_range_matrix[2] and x < pow_range_matrix[3]:
return (x - pow_range_matrix[2]) * (cost_matrix[3] - cost_matrix[2])/(pow_range_matrix[3] - pow_range_matrix[2]) + cost_matrix[2]
elif x >= pow_range_matrix[3] and x < pow_range_matrix[4]:
return (x - pow_range_matrix[3]) * (cost_matrix[4] - cost_matrix[3])/(pow_range_matrix[4] - pow_range_matrix[3]) + cost_matrix[3]
elif x >= pow_range_matrix[4] and x < pow_range_matrix[5]:
return (x - pow_range_matrix[4]) * (cost_matrix[5] - cost_matrix[4])/(pow_range_matrix[5] - pow_range_matrix[4]) + cost_matrix[4]
elif x >= pow_range_matrix[5] and x < pow_range_matrix[6]:
return (x - pow_range_matrix[5]) * (cost_matrix[6] - cost_matrix[5])/(pow_range_matrix[6] - pow_range_matrix[5]) + cost_matrix[5]
elif x >= pow_range_matrix[6] and x < pow_range_matrix[7]:
return (x - pow_range_matrix[6]) * (cost_matrix[7] - cost_matrix[6])/(pow_range_matrix[7] - pow_range_matrix[6]) + cost_matrix[6]
elif x >= pow_range_matrix[7] and x < pow_range_matrix[8]:
return (x - pow_range_matrix[7]) * (cost_matrix[8] - cost_matrix[7])/(pow_range_matrix[8] - pow_range_matrix[7]) + cost_matrix[7]
elif x >= pow_range_matrix[8] and x < pow_range_matrix[9]:
return (x - pow_range_matrix[8]) * (cost_matrix[9] - cost_matrix[8])/(pow_range_matrix[9] - pow_range_matrix[8]) + cost_matrix[8]
elif x >= pow_range_matrix[9] and x < pow_range_matrix[10]:
return (x - pow_range_matrix[9]) * (cost_matrix[10] - cost_matrix[9])/(pow_range_matrix[10] - pow_range_matrix[9]) + cost_matrix[9]
elif x >= pow_range_matrix[10] and x < pow_range_matrix[11]:
return (x - pow_range_matrix[10]) * (cost_matrix[11] - cost_matrix[10])/(pow_range_matrix[11] - pow_range_matrix[10]) + cost_matrix[10]
elif x >= pow_range_matrix[11] and x < pow_range_matrix[12]:
return (x - pow_range_matrix[11]) * (cost_matrix[12] - cost_matrix[11])/(pow_range_matrix[12] - pow_range_matrix[11]) + cost_matrix[11]
elif x >= pow_range_matrix[12] and x < pow_range_matrix[13]:
return (x - pow_range_matrix[12]) * (cost_matrix[13] - cost_matrix[12])/(pow_range_matrix[13] - pow_range_matrix[12]) + cost_matrix[12]
elif x >= pow_range_matrix[13] and x < pow_range_matrix[14]:
return (x - pow_range_matrix[13]) * (cost_matrix[14] - cost_matrix[13])/(pow_range_matrix[14] - pow_range_matrix[13]) + cost_matrix[13]
elif x >= pow_range_matrix[14] and x < pow_range_matrix[15]:
return (x - pow_range_matrix[14]) * (cost_matrix[15] - cost_matrix[14])/(pow_range_matrix[15] - pow_range_matrix[14]) + cost_matrix[14]
else:
return (x - pow_range_matrix[15]) * (cost_matrix[16] - cost_matrix[15])/(pow_range_matrix[16] - pow_range_matrix[15]) + cost_matrix[15]
m.pw_con = en.Piecewise(m.index_row, m.index_col, m.line_cost, m.line_capacity,
pw_pts=list(pow_range_matrix), pw_constr_type='EQ', f_rule=f)
def obj_rule_1(m):
# OBJ1: Revenue-Oriented Prize Collecting
return sum(m.th[i]*m.q_bool[i]*m.q[i] for i in m.index_row) - \
sum(m.line_cost[i, j] * m.line_length[i, j]
for i in m.index_row for j in m.index_col)
def obj_rule_2(m):
# OBJ2: Profit-Oriented Prize Collectiing
return sum((m.th[i]-m.dist_cost[i])*m.q_bool[i]*m.q[i]
for i in m.index_row) - \
sum(m.line_cost[i, j] * m.line_length[i, j]
for i in m.index_row for j in m.index_col)
if obj_case == 1:
m.obj = en.Objective(rule=obj_rule_1, sense=en.maximize)
elif obj_case == 2:
m.obj = en.Objective(rule=obj_rule_2, sense=en.maximize)
else:
raise ValueError('Objective method is selected wrongly! Please enter '
'"1" for Revenue-Oriented Prize Collection or "2" '
'for Profit-Oriented Prize Collection')
results = solver.solve(m, report_timing=False, tee=False, logfile="gurobi.log")
print("Solver Termination: ", results.solver.termination_condition)
print("Solver Status: ", results.solver.status)
term_cond = results.solver.termination_condition == TerminationCondition.optimal
if results.solver.status == SolverStatus.aborted:
term_cond = "aborted"
'''
##################
# save variable values to a csv file
var_names = [name(v) for v in m.component_objects(Var)]
list_of_vars = [value(v[index]) for v in m.component_objects(Var) for index in v]
df = pd.DataFrame()
result_series = pd.Series(list_of_vars, index=var_names)
result_series.to_csv(outCSV)
if done:
results.write()
print('obejctive = ', value(m.obj))
###################
'''
edge_list = []
dist_inv = 0
trans_inv = 0
covered_demand = 0
trans_line_length = 0
dh = np.zeros(n+6)
for i in range(n):
dh[i] = en.value(m.q_bool[i])
covered_demand += demand_coherent_area[i] * en.value(m.q_bool[i])
dist_inv += dh[i]*demand_coherent_area[i] * dist_cost_coherent_area[i]
for i in range(n):
for j in range(n):
if en.value(m.l_bool[i, j]) > 0:
trans_inv += en.value(m.line_cost[i, j]) * \
en.value(m.line_length[i, j])
trans_line_length += en.value(m.line_length[i, j])
edge_list.append([i, j, en.value(m.line_capacity[i, j])])
'''
for i in range(n):
for j in range(n):
if en.value(m.l_bool[i, j]) > 0:
print(i, " , ", j, "\t capacity: ", en.value(m.line_capacity[i, j]), "\t line: ", en.value(m.l_bool[i, j]))
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
for i in range(n):
for j in range(n):
if en.value(m.line_capacity[i, j]) > 0:
print(i, " , ", j, "\t capacity: ", en.value(m.line_capacity[i, j]), "\t line: ", en.value(m.l_bool[i, j]))
'''
# meter to km
trans_line_length /= 1000
dist_spec_cost = dist_inv/covered_demand
trans_spec_cost = trans_inv/covered_demand
dh[n: n+6] = covered_demand, dist_inv, dist_spec_cost, trans_inv, \
trans_spec_cost, trans_line_length
return term_cond, dh, np.array(edge_list)
|
NOEXEC = 'noexec'
NOSUID = 'nosuid'
NODEV = 'nodev'
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import redirect
from django.views.generic import TemplateView
from django.shortcuts import render
from .forms import CommentForm
# Create your views here.
class CommentView(TemplateView):
http_method_names = ['post']
template_name = 'comment/result.html'
def post(self, request, *args, **kwargs):
comment_form = CommentForm(request.POST)
target = request.POST.get('target')
if comment_form.is_valid():
instance = comment_form.save(commit=False)
instance.target = target
instance.save()
succeed = True
return redirect(target)
else:
succeed = False
context = {
'succeed': succeed,
# 'form': comment_form,
'target': target,
}
return self.render_to_response(context)
|
##############################################################################
#
# Copyright (c) 2000-2009 Jens Vagelpohl and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" LDAP-based user object
"""
import time
from AccessControl import ClassSecurityInfo
from AccessControl.class_init import InitializeClass
from AccessControl.Permissions import access_contents_information
from AccessControl.User import BasicUser
from DateTime import DateTime
from .utils import _verifyUnicode
from .utils import encoding
class NonexistingUser:
"""Fake user we can use in our negative cache."""
def __init__(self):
self.birth = DateTime()
def getCreationTime(self):
return self.birth
def _getPassword(self):
return None
class LDAPUser(BasicUser):
""" A user object for LDAP users """
security = ClassSecurityInfo()
_properties = None
def __init__(self, uid, name, password, roles, domains, user_dn,
user_attrs, mapped_attrs, multivalued_attrs=(),
binary_attrs=(), ldap_groups=()):
""" Instantiate a new LDAPUser object """
self._properties = {}
self.id = _verifyUnicode(uid)
self.name = _verifyUnicode(name)
self.__ = password
self._dn = _verifyUnicode(user_dn)
self.roles = roles
self.domains = []
self._ldap_groups = ldap_groups
self.RID = ''
self.groups = ''
now = time.time()
self._created = now
for key in user_attrs.keys():
if key in multivalued_attrs:
prop = user_attrs.get(key, [None])
else:
prop = user_attrs.get(key, [None])[0]
if isinstance(prop, str) and key != 'objectGUID' and \
key not in binary_attrs:
prop = _verifyUnicode(prop)
self._properties[key] = prop
for att_name, map_name in mapped_attrs:
self._properties[map_name] = self._properties.get(att_name)
self._properties['dn'] = user_dn
######################################################
# Distinguish between user id and name
#######################################################
@security.public
def getId(self):
return self.id
######################################################
# User interface not implemented in class BasicUser
#######################################################
@security.private
def _getPassword(self):
""" Retrieve the password """
return self.__
@security.public
def getUserName(self):
""" Get the name associated with this user """
return self.name
@security.public
def getRoles(self):
""" Return the user's roles """
if self.name == 'Anonymous User':
return tuple(self.roles)
else:
return tuple(self.roles) + ('Authenticated',)
@security.public
def getDomains(self):
""" The user's domains """
return self.domains
#######################################################
# Interface unique to the LDAPUser class of user objects
#######################################################
@security.protected(access_contents_information)
def __getattr__(self, name):
""" Look into the _properties as well... """
my_props = self._properties
if name in my_props:
prop = my_props.get(name)
return prop
else:
raise AttributeError(name)
@security.protected(access_contents_information)
def getProperty(self, prop_name, default=''):
""" Return the user property referred to by prop_name,
if the attribute is indeed public.
"""
return self._properties.get(prop_name, default)
@security.protected(access_contents_information)
def getUserDN(self):
""" Return the user's full Distinguished Name """
return self._dn
@security.protected(access_contents_information)
def getCreationTime(self):
""" When was this user object created? """
return DateTime(self._created)
def _getLDAPGroups(self):
""" What groups in LDAP does this user belong to? """
return tuple(self._ldap_groups)
InitializeClass(LDAPUser)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect, HttpResponse
from django.core.urlresolvers import reverse
from django.contrib import messages
from .models import User
# Create your views here.
def users(request):
context = {
'user_list' : User.objects.all()
}
return render(request, "main/users.html", context)
def new_user(request):
return render(request, "main/new.html")
def edit_user(request, id):
context = {
'user' : User.objects.get(id=id)
}
return render(request, "main/edit.html", context)
def show_user(request, id):
context = {
'user' : User.objects.get(id=id)
}
return render(request, "main/show.html", context)
def create(request):
errors = User.objects.validator(request.POST)
if(len(errors)):
for tag, error in errors.iteritems():
messages.error(request, error, extra_tags=tag)
return redirect('new_user')
else:
User.objects.create(first_name=request.POST.get('first_name'), last_name=request.POST.get('last_name'), email=request.POST.get('email'))
return redirect ('users')
def destroy(request, id):
user = User.objects.get(id=id)
user.delete()
return redirect ('users')
def update(request):
errors = User.objects.validator(request.POST)
if(len(errors)):
for tag, error in errors.iteritems():
messages.error(request, error, extra_tags=tag)
return redirect('edit_user', id = request.POST.get('user_id'))
else:
user_id = request.POST.get('user_id')
user = User.objects.get(id=user_id)
user.first_name = request.POST.get('first_name')
user.last_name = request.POST.get('last_name')
user.email = request.POST.get('email')
user.save()
return redirect ('users')
|
import re, os, sys, shutil
from pathlib import Path
import pandas as pd
import numpy as np
def split_wnd(df):
unsplit = df['wnd'].str.split(',')
wnd_metrics = pd.DataFrame.from_dict(
dict(zip(df.index, unsplit)),
orient='index',
columns=[
'wnd_direction', # The angle, measured in a clockwise direction, between true north and the direction from which the wind is blowing.
'wnd_direction_code', # If type code (below) = V, then 999 indicates variable wind direction.
'wnd_type_code', # If a value of 9 appears with a wind speed of 0000, this indicates calm winds.
'wnd_speed', # meters per second. 9999 = Missing.
'wnd_speed_code',
]
)
wnd_metrics['wnd_speed'] = wnd_metrics['wnd_speed'].replace('9999', np.nan)
wnd_metrics['wnd_direction'] = wnd_metrics['wnd_direction'].replace('999', np.nan)
wnd_metrics['wnd_speed'] = pd.to_numeric(wnd_metrics['wnd_speed']).astype('float32')
wnd_metrics['wnd_direction'] = pd.to_numeric(wnd_metrics['wnd_direction']).astype('float32')
wnd_metrics['wnd_direction_sin'] = np.sin(np.deg2rad(wnd_metrics['wnd_direction']))
wnd_metrics['wnd_direction_cos'] = np.cos(np.deg2rad(wnd_metrics['wnd_direction']))
return wnd_metrics
def split_ceil(df):
unsplit = df['cig'].str.split(',')
ceil = pd.DataFrame.from_dict(
dict(zip(df.index, unsplit)),
orient='index',
columns=[
'ceil_height', # Lowest clouds in meters. Unlimited = 22000.
'ceil_code', # A quality status of a reported ceiling height dimension.
'ceil_determination_code', # Method used to determine the ceiling.
'ceil_cavok', # Whether the 'Ceiling and Visibility Okay' (CAVOK) condition has been reported.
]
)
ceil['ceil'] = ceil['ceil_height'] != '22000'
ceil.loc[ceil['ceil_height'] == '99999','ceil'] = np.nan
ceil['ceil_height'] = ceil['ceil_height'].replace(['99999','22000'], np.nan)
ceil['ceil_height'] = (ceil['ceil_height']).astype('float32')
ceil['ceil_code'] = ceil['ceil_code'].replace('9', np.nan)
ceil['ceil_determination_code'] = ceil['ceil_determination_code'].replace('9', np.nan)
return ceil
def split_vis(df):
unsplit = df['vis'].str.split(',')
vis = pd.DataFrame.from_dict(
dict(zip(df.index, unsplit)),
orient='index',
columns=[
'vis_distance', # Horizontal distance (in meters) at which an object can be seen and identified. # Missing = 999999. NOTE: Values greater than 160000 are entered as 160000.
'vis_code', # Quality status.
'vis_variability', # Denotes whether or not the reported visibility is variable. 9 = Missing.
'vis_variability_code',
]
)
vis['vis_distance'] = vis['vis_distance'].replace('999999', np.nan)
vis['vis_distance'] = pd.to_numeric(vis['vis_distance']).astype('float32')
vis['vis_variability'] = (vis['vis_variability'] == 'V').astype('float32')
return vis
def split_tmp(df):
unsplit = df['tmp'].str.split(',')
tmp = pd.DataFrame.from_dict(
dict(zip(df.index, unsplit)),
orient='index',
columns=[
'tmp', # temps are in celsius, scaled up by 10
'tmp_code',
]
)
nan = tmp['tmp'].apply(lambda t: np.nan if t=='+9999' else 1.0)
sign = tmp['tmp'].apply(lambda t: 1.0 if t[0]=='+' else -1.0)
value = tmp['tmp'].apply(lambda t: t[1:]).astype(float) / 10
tmp['tmp'] = (nan * sign * value).astype('float32')
return tmp
def split_liquid_precip(df):
if 'aa1' in df.keys():
unsplit = df['aa1'].fillna(',,,').str.split(',').copy()
liquid_precip = pd.DataFrame.from_dict(
dict(zip(df.index, unsplit)),
orient='index',
columns=[
'liquid_precip_period_quantity_hours', # The quantity of time over which the LIQUID-PRECIPITATION was measured. (millimeters) 99 missing (milli)
'liquid_precip_depth_dimension', # The depth of LIQUID-PRECIPITATION that is measured at the time of an observation.
'liquid_precip_condition_code', # The code that denotes whether a LIQUID-PRECIPITATION depth dimension was a trace value. 9 missing.
'liquid_precip_quality_code',
]
)
liquid_precip['liquid_precip_period_quantity_hours'] = liquid_precip['liquid_precip_period_quantity_hours'].replace('99', np.nan)
liquid_precip['liquid_precip_depth_dimension'] = liquid_precip['liquid_precip_depth_dimension'].replace('9999',0.0)
liquid_precip['liquid_precip_depth_dimension'] = pd.to_numeric(liquid_precip['liquid_precip_depth_dimension']).astype('float32')
liquid_precip.liquid_precip_depth_dimension = liquid_precip.liquid_precip_depth_dimension.fillna(0)
else:
df['liquid_precip_period_quantity_hours'], df['liquid_precip_depth_dimension'] = 0, 0
liquid_precip = df[['liquid_precip_period_quantity_hours','liquid_precip_depth_dimension']].copy()
return liquid_precip
def split_snow(df):
if 'aj1' in df.keys():
unsplit = df['aj1'].fillna(',,,,,').str.split(',').copy()
snow = pd.DataFrame.from_dict(
dict(zip(df.index, unsplit)),
orient='index',
columns=[
'snow_identifier', # The quantity of time over which the SNOW-DEPTH was measured. (millimeters) 99 missing
'snow_depth_dimension', # The depth of SNOW-DEPTH that is measured at the time of an observation.
'snow_condition_code', # The code that denotes whether a SNOW-DEPTH depth dimension was a trace value. 9 missing.
'snow_quality_code',
'snow_equivalent_water_depth_dimension',
'snow_equivalent_water_condition_code'
]
)
snow['snow_equivalent_water_condition_code'] = snow['snow_equivalent_water_condition_code'].replace('9', np.nan)
snow['snow_depth_dimension'] = snow['snow_depth_dimension'].replace('9999',0.0)
snow['snow_depth_dimension'] = pd.to_numeric(snow['snow_depth_dimension']).astype('float32')
snow['snow_equivalent_water_depth_dimension'] = snow['snow_equivalent_water_depth_dimension'].replace('',0)
snow['snow_equivalent_water_depth_dimension'] = snow['snow_equivalent_water_depth_dimension'].replace('9999',0.0)
snow['snow_equivalent_water_depth_dimension'] = pd.to_numeric(snow['snow_equivalent_water_depth_dimension']).astype('float32')
else:
df['snow_depth_dimension'] , df['snow_equivalent_water_depth_dimension'] = 0, 0
snow = df[['snow_depth_dimension','snow_equivalent_water_depth_dimension']].copy()
return snow
|
# 10 Quiz
# What are the primitive type in python?
# Strings
# Interger number
# Float numbers
# Booleans - True and False
# What will we see in the terminal
fruit = "Apple"
print(fruit[1])
# p
# What will we see in the terminal?
fruit = "Apple"
# when slicing a string the last caracter called is not inclued
print(fruit[1:-1])
# ppl
# Waht is the result?
print(10 % 3)
# 1
# Whats the result?
print(bool("False"))
# True
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib.parse
import requests
from zope.interface import implementer
from warehouse import tasks
from warehouse.cache.origin.interfaces import IOriginCache
class UnsuccessfulPurgeError(Exception):
pass
@tasks.task(bind=True, ignore_result=True, acks_late=True)
def purge_key(task, request, key):
cacher = request.find_service(IOriginCache)
request.log.info("Purging %s", key)
try:
cacher.purge_key(key)
except (
requests.ConnectionError,
requests.HTTPError,
requests.Timeout,
UnsuccessfulPurgeError,
) as exc:
request.log.error("Error purging %s: %s", key, str(exc))
raise task.retry(exc=exc)
@implementer(IOriginCache)
class FastlyCache:
_api_domain = "https://api.fastly.com"
def __init__(self, *, api_key, service_id, purger):
self.api_key = api_key
self.service_id = service_id
self._purger = purger
@classmethod
def create_service(cls, context, request):
return cls(
api_key=request.registry.settings["origin_cache.api_key"],
service_id=request.registry.settings["origin_cache.service_id"],
purger=request.task(purge_key).delay,
)
def cache(
self,
keys,
request,
response,
*,
seconds=None,
stale_while_revalidate=None,
stale_if_error=None
):
existing_keys = set(response.headers.get("Surrogate-Key", "").split())
response.headers["Surrogate-Key"] = " ".join(sorted(set(keys) | existing_keys))
values = []
if seconds is not None:
values.append("max-age={}".format(seconds))
if stale_while_revalidate is not None:
values.append("stale-while-revalidate={}".format(stale_while_revalidate))
if stale_if_error is not None:
values.append("stale-if-error={}".format(stale_if_error))
if values:
response.headers["Surrogate-Control"] = ", ".join(values)
def purge(self, keys):
for key in keys:
self._purger(key)
def purge_key(self, key):
path = "/service/{service_id}/purge/{key}".format(
service_id=self.service_id, key=key
)
url = urllib.parse.urljoin(self._api_domain, path)
headers = {
"Accept": "application/json",
"Fastly-Key": self.api_key,
"Fastly-Soft-Purge": "1",
}
resp = requests.post(url, headers=headers)
resp.raise_for_status()
if resp.json().get("status") != "ok":
raise UnsuccessfulPurgeError("Could not purge {!r}".format(key))
|
##############################################################################
# Copyright 2017 Parker Berberian and Others #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
##############################################################################
import json
from lib.vpn import VPNAction
class Del_VPN_User(VPNAction):
def __init__(self, config=None):
super(Del_VPN_User, self).__init__(config=config)
def run(self, dn=None, key=None, keys=None):
if dn is not None and dn != "None": # we are provided with a good dn
st2key = 'vpn_'
# get username from dn
for attr in dn.split(','):
if 'uid' in attr:
st2key += attr.split('=')[-1]
self.action_service.delete_value(name=st2key, local=False)
self.deleteUser(dn)
return
# consolidate all keys into one list
vpn_keys = []
if keys is not None:
vpn_keys += keys
if key is not None and key != "None":
vpn_keys.append(key)
# delete all provided keys
for k in vpn_keys:
vpn_info = json.loads(
self.action_service.get_value(
name=k,
local=False,
decrypt=True
)
)
dn = vpn_info['dn']
st2key = k
self.action_service.delete_value(name=st2key, local=False)
self.deleteUser(dn)
|
"""
Given a list of integers nums, return the sum of a non-empty contiguous sublist with the largest sum.
Constraints
1 ≤ n ≤ 100,000 where n is the length of nums
https://binarysearch.com/problems/Largest-Sublist-Sum
"""
class Solution:
def brute(self, nums):
ans = float("-inf")
for i in range(len(nums)):
s = 0
for k in range(i, len(nums)):
s += nums[k]
ans = max(ans, s)
return ans
def solve(self, nums):
ans = float("-inf")
def dp(i, n):
nonlocal ans
if i < len(nums):
n = max(nums[i], n + nums[i])
ans = max(ans, n)
dp(i + 1, n)
dp(0, 0)
return ans
def solve2(self, nums):
ans, n = float("-inf"), 0
for i in range(len(nums)):
n = max(nums[i], n + nums[i])
ans = max(ans, n)
return ans
|
# Generated by Django 2.0.2 on 2018-05-03 06:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('home_score', models.PositiveIntegerField(default=0)),
('opponent_score', models.PositiveIntegerField(default=0)),
('total_games', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, unique=True)),
('games_played', models.IntegerField(default=0)),
('games_won', models.IntegerField(default=0)),
('win_ratio', models.DecimalField(decimal_places=2, default=0, max_digits=50)),
],
),
migrations.CreateModel(
name='PlayerSeason',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('games_played', models.PositiveIntegerField(default=0)),
('games_won', models.PositiveIntegerField(default=0)),
('win_ratio', models.DecimalField(decimal_places=2, default=0, max_digits=50)),
('player', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='backgammon.Player')),
],
),
migrations.CreateModel(
name='Season',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, unique=True)),
('active', models.BooleanField(default=True)),
('minimum_games', models.IntegerField(default=30)),
('date_start', models.DateField()),
('date_end', models.DateField()),
('winner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='backgammon.Player')),
],
),
migrations.AddField(
model_name='playerseason',
name='season',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='backgammon.Season'),
),
migrations.AddField(
model_name='game',
name='home',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='home_player', to='backgammon.PlayerSeason'),
),
migrations.AddField(
model_name='game',
name='opponent',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='opponent', to='backgammon.PlayerSeason'),
),
migrations.AddField(
model_name='game',
name='season',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='backgammon.Season'),
),
]
|
#!/usr/bin/env python3
import sys, os.path
from modules.Engine import Engine
def print_usage():
print("\nUSAGE: ./run.py [app_name] [guest_port] [host_port]")
print("EXAMPLE: ./run.py xssstored 8888 80\n")
sys.exit(0)
if __name__ == "__main__":
if len(sys.argv[1:]) != 3:
print_usage()
app_name = sys.argv[1]
guest_port = sys.argv[2]
host_port = sys.argv[3]
engine = Engine(app_name, guest_port, host_port)
try:
engine.run()
except Exception as e:
print("[ERROR]: {}".format(e))
|
from typing import List, Dict
import os
import gc
import time
import shutil
from pathlib import Path
from datetime import datetime
import numpy as np
import logging
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from catalyst.utils.seed import set_global_seed, Seeder
from catalyst import utils
from .db import DBSpec
from .environment import EnvironmentSpec
from .algorithm import AlgorithmSpec
logger = logging.getLogger(__name__)
try:
import wandb
WANDB_ENABLED = True
except ImportError:
logger.warning(
"wandb not available, to install wandb, run `pip install wandb`."
)
WANDB_ENABLED = False
class TrainerSpec:
def __init__(
self,
algorithm: AlgorithmSpec,
env_spec: EnvironmentSpec,
db_server: DBSpec,
logdir: str,
num_workers: int = 1,
batch_size: int = 64,
min_num_transitions: int = int(1e4),
online_update_period: int = 1,
weights_sync_period: int = 1,
save_period: int = 10,
gc_period: int = 10,
seed: int = 42,
epoch_limit: int = None,
monitoring_params: Dict = None,
**kwargs,
):
# algorithm & environment
self.algorithm = algorithm
self.env_spec = env_spec
# logging
self.logdir = logdir
self._prepare_logger(logdir)
self._seeder = Seeder(init_seed=seed)
# updates & counters
self.batch_size = batch_size
self.num_workers = num_workers
self.epoch = 0
self.update_step = 0
self.num_updates = 0
self._num_trajectories = 0
self._num_transitions = 0
# updates configuration
# (actor_period, critic_period)
self.actor_grad_period, self.critic_grad_period = \
utils.make_tuple(online_update_period)
# synchronization configuration
self.db_server = db_server
self.min_num_transitions = min_num_transitions
self.save_period = save_period
self.weights_sync_period = weights_sync_period
self._gc_period = gc_period
self.replay_buffer = None
self.replay_sampler = None
self.loader = None
self._epoch_limit = epoch_limit
# special
self.monitoring_params = monitoring_params
self._prepare_seed()
self._init(**kwargs)
def _init(self, **kwargs):
global WANDB_ENABLED
assert len(kwargs) == 0
if WANDB_ENABLED:
if self.monitoring_params is not None:
self.checkpoints_glob: List[str] = \
self.monitoring_params.pop(
"checkpoints_glob", ["best.pth", "last.pth"])
wandb.init(**self.monitoring_params)
logdir_src = Path(self.logdir)
logdir_dst = Path(wandb.run.dir)
configs_src = logdir_src.joinpath("configs")
os.makedirs(f"{logdir_dst}/{configs_src.name}", exist_ok=True)
shutil.rmtree(f"{logdir_dst}/{configs_src.name}")
shutil.copytree(
f"{str(configs_src.absolute())}",
f"{logdir_dst}/{configs_src.name}")
code_src = logdir_src.joinpath("code")
if code_src.exists():
os.makedirs(f"{logdir_dst}/{code_src.name}", exist_ok=True)
shutil.rmtree(f"{logdir_dst}/{code_src.name}")
shutil.copytree(
f"{str(code_src.absolute())}",
f"{logdir_dst}/{code_src.name}")
else:
WANDB_ENABLED = False
self.wandb_mode = "trainer"
def _prepare_logger(self, logdir):
timestamp = datetime.utcnow().strftime("%y%m%d.%H%M%S")
logpath = f"{logdir}/trainer.{timestamp}"
os.makedirs(logpath, exist_ok=True)
self.logger = SummaryWriter(logpath)
def _prepare_seed(self):
seed = self._seeder()[0]
set_global_seed(seed)
def _log_to_console(
self, fps: float, updates_per_sample: float, num_trajectories: int,
num_transitions: int, buffer_size: int, **kwargs
):
prefix = f"--- Epoch {self.epoch:09d}/{self._epoch_limit:09d}" \
if self._epoch_limit is not None \
else f"--- Epoch {self.epoch:09d}"
metrics = [
prefix,
f"fps: {fps:7.1f}",
f"updates per sample: {updates_per_sample:7.1f}",
f"trajectories: {num_trajectories:09d}",
f"transitions: {num_transitions:09d}",
f"buffer size: {buffer_size:09d}",
]
metrics = " | ".join(metrics)
print(metrics)
def _log_to_tensorboard(
self, fps: float, updates_per_sample: float, num_trajectories: int,
num_transitions: int, buffer_size: int, **kwargs
):
self.logger.add_scalar("fps", fps, self.epoch)
self.logger.add_scalar(
"updates_per_sample", updates_per_sample, self.epoch
)
self.logger.add_scalar(
"num_trajectories", num_trajectories, self.epoch
)
self.logger.add_scalar("num_transitions", num_transitions, self.epoch)
self.logger.add_scalar("buffer_size", buffer_size, self.epoch)
self.logger.flush()
@staticmethod
def _log_wandb_metrics(
metrics: Dict,
step: int,
mode: str,
suffix: str = ""
):
metrics = {
f"{mode}/{key}{suffix}": value
for key, value in metrics.items()
}
step = None # @TODO: fix, wandb issue
wandb.log(metrics, step=step)
def _log_to_wandb(self, *, step, suffix="", **metrics):
if WANDB_ENABLED:
self._log_wandb_metrics(
metrics, step=step, mode=self.wandb_mode, suffix=suffix)
def _save_wandb(self):
if WANDB_ENABLED:
logdir_src = Path(self.logdir)
logdir_dst = Path(wandb.run.dir)
events_src = list(logdir_src.glob("events.out.tfevents*"))
if len(events_src) > 0:
events_src = events_src[0]
os.makedirs(f"{logdir_dst}/{logdir_src.name}", exist_ok=True)
shutil.copy2(
f"{str(events_src.absolute())}",
f"{logdir_dst}/{logdir_src.name}/{events_src.name}")
def _save_checkpoint(self):
if self.epoch % self.save_period == 0:
checkpoint = self.algorithm.pack_checkpoint()
checkpoint["epoch"] = self.epoch
filename = utils.save_checkpoint(
logdir=self.logdir,
checkpoint=checkpoint,
suffix=str(self.epoch)
)
print(f"Checkpoint saved to: {filename}")
def _update_sampler_weights(self):
if self.epoch % self.weights_sync_period == 0:
checkpoint = self.algorithm.pack_checkpoint(with_optimizer=False)
for key in checkpoint:
checkpoint[key] = {
k: v.detach().cpu().numpy()
for k, v in checkpoint[key].items()
}
self.db_server.put_checkpoint(
checkpoint=checkpoint, epoch=self.epoch
)
def _update_target_weights(self, update_step) -> Dict:
pass
def _run_loader(self, loader: DataLoader) -> Dict:
start_time = time.time()
# @TODO: add average meters
for batch in loader:
metrics: Dict = self.algorithm.train(
batch,
actor_update=(self.update_step % self.actor_grad_period == 0),
critic_update=(
self.update_step % self.critic_grad_period == 0
)
) or {}
self.update_step += 1
metrics_ = self._update_target_weights(self.update_step) or {}
metrics.update(**metrics_)
metrics = dict(
(key, value)
for key, value in metrics.items()
if isinstance(value, (float, int))
)
for key, value in metrics.items():
self.logger.add_scalar(key, value, self.update_step)
self._log_to_wandb(
step=self.update_step, suffix="_batch", **metrics)
elapsed_time = time.time() - start_time
elapsed_num_updates = len(loader) * loader.batch_size
self.num_updates += elapsed_num_updates
fps = elapsed_num_updates / elapsed_time
output = {"elapsed_time": elapsed_time, "fps": fps}
return output
def _run_epoch(self) -> Dict:
raise NotImplementedError()
def _run_epoch_loop(self):
self._prepare_seed()
metrics: Dict = self._run_epoch()
self.epoch += 1
self._log_to_console(**metrics)
self._log_to_tensorboard(**metrics)
self._log_to_wandb(step=self.epoch, suffix="_epoch", **metrics)
self._save_checkpoint()
self._save_wandb()
self._update_sampler_weights()
if self.epoch % self._gc_period == 0:
gc.collect()
def _run_train_loop(self):
self.db_server.push_message(self.db_server.Message.ENABLE_TRAINING)
epoch_limit = self._epoch_limit or np.iinfo(np.int32).max
while self.epoch < epoch_limit:
try:
self._run_epoch_loop()
except Exception as ex:
self.db_server.push_message(
self.db_server.Message.DISABLE_TRAINING)
raise ex
self.db_server.push_message(self.db_server.Message.DISABLE_TRAINING)
def _start_train_loop(self):
self._run_train_loop()
def run(self):
self._update_sampler_weights()
self._start_train_loop()
self.logger.close()
|
#!/usr/bin/python3
import os
import re
import sys
import time
import json
import pytz
import utils
import yaml
import datetime
import argparse
import textwrap
import random
from mysql import connector
cwd = os.path.dirname(__file__)
os.chdir(cwd)
sys.path.append("../utils")
from pathlib import Path
from ConfigUtils import *
from constants import FILE_MAP_PATH, ENV_CNF_YAML, STATUS_YAML
from params import default_config_parameters
from cloud_init_deploy import load_node_list_by_role_from_config
from cloud_init_deploy import update_service_path
from cloud_init_deploy import get_kubectl_binary
from cloud_init_deploy import load_config as load_deploy_config
from cloud_init_deploy import render_restfulapi, render_dashboard, render_storagemanager, render_repairmanager
from cloud_init_deploy import check_buildable_images, push_docker_images
def load_config_4_ctl(args, command):
# if we need to load all config
if command in ["svc", "render_template", "download", "docker", "db"]:
args.config = [ENV_CNF_YAML, STATUS_YAML] if not args.config else args.config
config = load_deploy_config(args)
else:
if not args.config and command != "restorefromdir":
args.config = [STATUS_YAML]
config = init_config(default_config_parameters)
config = add_configs_in_order(args.config, config)
config["ssh_cert"] = config.get("ssh_cert", "./deploy/sshkey/id_rsa")
return config
def connect_to_machine(config, args):
if args.nargs[0] in config['allroles']:
target_role = args.nargs[0]
index = int(args.nargs[1]) if len(args.nargs) > 1 else 0
nodes, _ = load_node_list_by_role_from_config(config, [target_role])
node = nodes[index]
else:
node = args.nargs[0]
assert node in config["machines"]
utils.SSH_connect(config["ssh_cert"], config["machines"][node]
["admin_username"], config["machines"][node]["fqdns"])
def run_kubectl(config, args, commands, need_output=False, dump_to_file=''):
if not os.path.exists("./deploy/bin/kubectl"):
print("please make sure ./deploy/bin/kubectl exists. One way is to use ./ctl.py download")
exit(-1)
one_command = " ".join(commands)
nodes, _ = load_node_list_by_role_from_config(config, ["infra"], False)
master_node = random.choice(nodes)
kube_command = "./deploy/bin/kubectl --server=https://{}:{} --certificate-authority={} --client-key={} --client-certificate={} {}".format(
config["machines"][master_node]["fqdns"], config["k8sAPIport"], "./deploy/ssl/ca/ca.pem", "./deploy/ssl/kubelet/apiserver-key.pem", "./deploy/ssl/kubelet/apiserver.pem", one_command)
if need_output:
# we may want to dump command to another file instead of args.output, when we don't want to mix k8s commands with others
output = utils.execute_or_dump_locally(kube_command, args.verbose, args.dryrun, dump_to_file)
if not args.verbose:
print(output)
return output
else:
os.system(kube_command)
def run_script(node, ssh_cert, adm_usr, nargs, sudo=False, noSupressWarning=True):
if ".py" in nargs[0]:
if sudo:
fullcmd = "sudo /opt/bin/python"
else:
fullcmd = "/opt/bin/python"
else:
if sudo:
fullcmd = "sudo bash"
else:
fullcmd = "bash"
len_args = len(nargs)
for i in range(len_args):
if i == 0:
fullcmd += " " + os.path.basename(nargs[i])
else:
fullcmd += " " + nargs[i]
srcdir = os.path.dirname(nargs[0])
utils.SSH_exec_cmd_with_directory(
ssh_cert, adm_usr, node, srcdir, fullcmd, noSupressWarning)
def run_cmd(node, ssh_cert, adm_usr, nargs, sudo=False, noSupressWarning=True):
fullcmd = " ".join(nargs)
utils.SSH_exec_cmd(
ssh_cert, adm_usr, node, fullcmd, noSupressWarning)
def run_script_wrapper(arg_tuple):
node, ssh_cert, adm_usr, nargs, sudo, noSupressWarning = arg_tuple
run_script(node, ssh_cert, adm_usr, nargs, sudo, noSupressWarning)
def run_cmd_wrapper(arg_tuple):
node, ssh_cert, adm_usr, nargs, sudo, noSupressWarning = arg_tuple
run_cmd(node, ssh_cert, adm_usr, nargs, sudo, noSupressWarning)
def copy2_wrapper(arg_tuple):
node, ssh_cert, adm_usr, nargs, sudo, noSupressWarning = arg_tuple
source, target = nargs[0], nargs[1]
if sudo:
utils.sudo_scp(ssh_cert, source, target, adm_usr,
node, verbose=noSupressWarning)
else:
utils.scp(ssh_cert, source, target, adm_usr,
node, verbose=noSupressWarning)
def execute_in_parallel(config, nodes, nargs, sudo, func, noSupressWarning=True):
args_list = [(config["machines"][node]["fqdns"], config["ssh_cert"],
config["admin_username"], nargs, sudo, noSupressWarning) for node in nodes]
utils.multiprocess_exec(func, args_list, len(nodes))
def get_multiple_machines(config, args):
valid_roles = set(config['allroles']) & set(args.roles_or_machine)
valid_machine_names = set(config['machines']) & set(args.roles_or_machine)
invalid_rom = set(args.roles_or_machine) - \
valid_roles - valid_machine_names
if invalid_rom:
print("Warning: invalid roles/machine names detected, the following names \\\
are neither valid role names nor machines in our cluster: " + ",".join(list(invalid_rom)))
nodes, _ = load_node_list_by_role_from_config(config, list(valid_roles), False)
return nodes + list(valid_machine_names)
def parallel_action_by_role(config, args, func):
nodes = get_multiple_machines(config, args)
execute_in_parallel(config, nodes, args.nargs, args.sudo,
func, noSupressWarning=args.verbose)
def verify_all_nodes_ready(config, args):
"""
return unready nodes, used for contiguous integration(CI)
"""
nodes_info_raw = run_kubectl(config, args, ["get nodes"], True)
ready_machines = set([entry.split("Ready")[0].strip()
for entry in nodes_info_raw.split('\n')[1:]])
expected_nodes = set(config["machines"].keys())
nodes_expected_but_not_ready = expected_nodes - ready_machines
if len(list(nodes_expected_but_not_ready)) > 0:
print("following nodes not ready:\n{}".format(
','.join(list(nodes_expected_but_not_ready))))
exit(1)
def change_kube_service(config, args, operation, service_list):
assert operation in [
"start", "stop"] and "you can only start or stop a service"
kubectl_action = "create" if operation == "start" else "delete"
if operation == "start":
render_services(config, service_list)
remote_config_update(config, args)
elif not os.path.exists("./deploy/services"):
utils.render_template_directory("./services/", "./deploy/services/", config)
service2path = update_service_path()
for service_name in service_list:
fname = service2path[service_name]
dirname = os.path.dirname(fname)
if os.path.exists(os.path.join(dirname, "launch_order")) and "/" not in service_name:
with open(os.path.join(dirname, "launch_order"), 'r') as f:
allservices = f.readlines()
if operation == "stop":
allservices = reversed(allservices)
for filename in allservices:
# If this line is a sleep tag (e.g. SLEEP 10), sleep for given seconds to wait for the previous service to start.
if filename.startswith("SLEEP"):
if operation == "start":
time.sleep(int(filename.split(" ")[1]))
else:
continue
filename = filename.strip('\n')
run_kubectl(config, args, [
"{} -f {}".format(kubectl_action, os.path.join(dirname, filename))])
else:
run_kubectl(config, args, [
"{} -f {}".format(kubectl_action, fname)])
def render_services(config, service_list):
'''render services, ./ctl.py svc render <service name, e.g. monitor>'''
for svc in service_list:
if not os.path.exists("./services/{}".format(svc)):
print("Warning: folder of service {} not found under ./services directory")
continue
utils.render_template_directory(
"./services/{}".format(svc), "./deploy/services/{}".format(svc), config)
def remote_config_update(config, args, check_module=False):
'''
client end(infra/NFS node) config file update
./ctl.py -s svc configupdate restfulapi
./ctl.py [-r storage_machine1 [-r storage_machine2]] -s svc configupdate storage_manager
by default sudo
'''
if check_module:
assert set(args.nargs[1:]) - set(["restfulapi", "storagemanager", "repairmanager", "dashboard"]) == set(), "not supported"
# need to get node list for this subcommand of svc, so load status.yaml
if not os.path.exists(FILE_MAP_PATH):
utils.render_template("template/cloud-config/file_map.yaml", FILE_MAP_PATH, config)
with open(FILE_MAP_PATH) as f:
file_map = yaml.load(f)
for module in args.nargs[1:]:
if module in ["restfulapi", "dashboard", "repairmanager"]:
render_func = eval("render_{}".format(module))
render_func(config)
infra_nodes, _ = load_node_list_by_role_from_config(config, ["infra"], False)
for file_pair in file_map[module]:
src_dst_list = [file_pair["src"], file_pair["dst"]]
execute_in_parallel(config, infra_nodes, src_dst_list,
True, copy2_wrapper, noSupressWarning=args.verbose)
elif module == "storagemanager":
nfs_nodes, _ = load_node_list_by_role_from_config(config, ["nfs"], False)
for node in nfs_nodes:
config["storage_manager"] = config["machines"][node]["storage_manager"]
render_storagemanager(config, node)
src_dst_list = ["./deploy/StorageManager/{}_storage_manager.yaml".format(
node), "/etc/StorageManager/config.yaml"]
args_list = (config["machines"][node]["fqdns"], config["ssh_cert"],
config["admin_username"], src_dst_list, True, args.verbose)
copy2_wrapper(args_list)
def render_template_or_dir(config, args):
nargs = args.nargs
# no destination, then mirror one in ./deploy folder
src = nargs[0]
if len(nargs) == 1:
dst = os.path.join("deploy", src.split("template/")[1])
else:
dst = nargs[1]
if os.path.isdir(src):
utils.render_template_directory(src, dst, config)
else:
utils.render_template(src, dst, config)
def maintain_db(config, args):
"""
push/pull a table to/from DB
"""
subcommand = args.nargs[0]
assert subcommand in ["pull", "push", "connect"], "invalid action."
host = config["mysql_node"]
user = config["mysql_username"]
password = config["mysql_password"]
if subcommand == "connect":
os.system("mysql -h {} -u {} -p{}".format(host, user, password))
else:
database = "DLWSCluster-{}".format(config["clusterId"])
table_name = args.nargs[1]
assert table_name in ["vc", "acl"], "invalid table."
if args.verbose:
print("connecting to {}@{}, DB {}".format(user, host, database))
conn = connector.connect(user=user, password=password,
host=host, database=database)
if subcommand == "pull":
sql = "SELECT * from {}".format(table_name)
cursor = conn.cursor()
cursor.execute(sql)
col_names = [col[0] for col in cursor.description]
serialized_rows = []
rows = cursor.fetchall()
for row in rows:
serialized_row = {}
for i, v in enumerate(row):
try:
serialized_row[col_names[i]] = json.loads(v)
# JSONDecodeError
except:
serialized_row[col_names[i]] = v
serialized_rows.append(serialized_row)
table_config = {"col_names": col_names, "rows": serialized_rows}
output_file = args.output if args.output else "{}.yaml".format(table_name)
with open(output_file, "w") as wf:
yaml.safe_dump(table_config, wf)
elif subcommand == "push":
sql = "DELETE from {}".format(table_name)
cursor = conn.cursor()
cursor.execute(sql)
input_file_list = args.input if args.input else ["{}.yaml".format(table_name)]
table_config = add_configs_in_order(input_file_list, {})
col_names = table_config["col_names"]
cols_2_ignore = table_config.get("columns_to_ignore", ["time"])
cols_filtered = [col for col in col_names if col not in cols_2_ignore]
cols_str = ", ".join(cols_filtered)
for row in table_config["rows"]:
vals = ", ".join(["'{}'".format(json.dumps(row[col])) for col in cols_filtered])
vals = vals.replace("'null'", "NULL")
sql = "INSERT INTO `{}` ({}) VALUES ({})".format(table_name, cols_str, vals)
if args.verbose:
print(sql)
cursor.execute(sql)
conn.commit()
cursor.close()
def cordon(config, args):
home_dir = str(Path.home())
dlts_admin_config_path = os.path.join(home_dir, ".dlts-admin.yaml")
dlts_admin_config_path = config.get(
"dlts_admin_config_path", dlts_admin_config_path)
if os.path.exists(dlts_admin_config_path):
with open(dlts_admin_config_path) as f:
admin_name = yaml.safe_load(f)["admin_name"]
else:
admin_name = args.admin
assert admin_name is not None and admin_name, "specify admin_name by"\
"--admin or in ~/.dlts-admin.yaml"
now = datetime.datetime.now(pytz.timezone("UTC"))
timestr = now.strftime("%Y/%m/%d %H:%M:%S %Z")
node = args.nargs[0]
note = " ".join(args.nargs[1:])
annotation = "cordoned by {} at {}, {}".format(admin_name, timestr, note)
k8s_cmd = "annotate node {} --overwrite cordon-note='{}'".format(
node, annotation)
run_kubectl(config, args, [k8s_cmd])
run_kubectl(config, args, ["cordon {}".format(node)])
def uncordon(config, args):
node = args.nargs[0]
query_cmd = "get nodes {} -o=jsonpath=\'{{.metadata.annotations.cordon-note}}\'".format(node)
output = run_kubectl(config, args, [query_cmd], need_output=True)
if output and not args.force:
print("node annotated, if you are sure that you want to uncordon it, "\
"please specify --force or use `{} kubectl cordon <node>` to"\
" cordon".format(__file__))
else:
run_kubectl(config, args, ["uncordon {}".format(node)])
def run_command(args, command):
config = load_config_4_ctl(args, command)
if command == "restorefromdir":
utils.restore_keys_from_dir(args.nargs)
elif command == "connect":
connect_to_machine(config, args)
elif command == "kubectl":
run_kubectl(config, args, args.nargs[0:])
elif command == "runscript":
parallel_action_by_role(config, args, run_script_wrapper)
elif command == "runcmd":
parallel_action_by_role(config, args, run_cmd_wrapper)
elif command == "copy2":
parallel_action_by_role(config, args, copy2_wrapper)
elif command == "backuptodir":
utils.backup_keys_to_dir(args.nargs)
elif command == "restorefromdir":
utils.restore_keys_from_dir(args.nargs)
elif command == "verifyallnodes":
verify_all_nodes_ready(config, args)
elif command == "svc":
assert len(
args.nargs) > 1 and "at least 1 action and 1 kubernetes service name should be provided"
if args.nargs[0] == "start":
change_kube_service(config, args, "start", args.nargs[1:])
elif args.nargs[0] == "stop":
change_kube_service(config, args, "stop", args.nargs[1:])
elif args.nargs[0] == "render":
render_services(config, args.nargs[1:])
elif args.nargs[0] == "configupdate":
remote_config_update(config, args, True)
elif command == "render_template":
render_template_or_dir(config, args)
elif command == "download":
if not os.path.exists('deploy/bin/kubectl') or args.force:
get_kubectl_binary(config)
elif command == "docker":
nargs = args.nargs
if nargs[0] == "push":
check_buildable_images(args.nargs[1], config)
push_docker_images(args, config)
elif command == "db":
maintain_db(config, args)
elif command == "cordon":
cordon(config, args)
elif command == "uncordon":
uncordon(config, args)
else:
print("invalid command, please read the doc")
if __name__ == '__main__':
# the program always run at the current directory.
# ssh -q -o "StrictHostKeyChecking no" -o "UserKnownHostsFile=/dev/null" -i deploy/sshkey/id_rsa core@
dirpath = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
os.chdir(dirpath)
parser = argparse.ArgumentParser(prog='maintain.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''
Maintain the status of the cluster.
Prerequest:
* Have the accumulated config file ready.
Command:
connect connect to a machine in the deployed cluster
'''))
parser.add_argument('-cnf', '--config', action='append', default=[], help='Specify the config files you want to load, later ones \
would overwrite former ones, e.g., -cnf config.yaml -cnf status.yaml')
parser.add_argument('-i', '--input', action='append',
default=[], help='Files to take as input')
parser.add_argument('-o', '--output', default='', help='File to dump to as output')
parser.add_argument("-v", "--verbose",
help="verbose print", action="store_true")
parser.add_argument('-r', '--roles_or_machine', action='append', default=[], help='Specify the roles of machines that you want to copy file \
to or execute command on')
parser.add_argument("-s", "--sudo", action="store_true",
help='Execute scripts in sudo')
parser.add_argument("-f", "--force", action="store_true",
help='Force execution')
parser.add_argument("--nocache",
help="Build docker without cache",
action="store_true")
parser.add_argument("--admin",
help="Name of admin that execute this script")
parser.add_argument("command",
help="See above for the list of valid command")
parser.add_argument('nargs', nargs=argparse.REMAINDER,
help="Additional command argument")
parser.add_argument(
'-d', '--dryrun', help='Dry run -- no actual execution', action="store_true")
args = parser.parse_args()
command = args.command
nargs = args.nargs
run_command(args, command)
|
''' Tests ability to read plausible values from software examples.
'''
import unittest
import test.verify as verify
from test.base import Base
class Sample00(verify.WithinRangeTest, Base):
''' Sample00 containing software file examples from:
https://www.c3d.org/sampledata.html
'''
ZIP = 'sample00.zip'
DATA_RANGE = (-1e6, 1e6)
zip_files = \
[
('Advanced Realtime Tracking GmbH', ['arthuman-sample.c3d', 'arthuman-sample-fingers.c3d']),
('Codamotion', ['codamotion_gaitwands_19970212.c3d', 'codamotion_gaitwands_20150204.c3d']),
('Cometa Systems', ['EMG Data Cometa.c3d']),
('Innovative Sports Training', ['Gait with EMG.c3d', 'Static Pose.c3d']),
('Motion Analysis Corporation', ['Sample_Jump2.c3d', 'Walk1.c3d']),
('NexGen Ergonomics', ['test1.c3d']),
# Vicon files are weird, uses non-standard encodings. Walking01.c3d contain nan values.
('Vicon Motion Systems', ['pyCGM2 lower limb CGM24 Walking01.c3d', 'TableTennis.c3d']),
]
if __name__ == '__main__':
unittest.main()
|
from typing import List, Dict, Callable, Tuple, Generator, Set, Sequence
import functools
import operator
from collections import defaultdict
import random
from tython import Program, TastNode, _RULES_BY_KIND, Rule, nt
from models.model import CandidateGenerator, reachable_rules_by_kind
from models import RegisterModel
from challenges import extract_constants
def prod(iterable): # like sum but product
return functools.reduce(operator.mul, iterable, 1)
@RegisterModel("uniform")
class UniformModel(CandidateGenerator):
'''
Uniformly samples from all rules.
'''
def __init__(self, copy_prob=0.5) -> None:
super().__init__()
self.copy_prob = copy_prob
# self.intended_depth = intended_depth
#self._random_cache = defaultdict(lambda: defaultdict(dict))
self._random_cache = {} # defaultdict is not pickable (for multiproc)
def random(self, kind, max_depth=5, nodes_by_kind=None):
if nodes_by_kind is None:
nodes_by_kind = {}
key = sum(hash(n) for n in nodes_by_kind)
if key not in self._random_cache:
self._random_cache[key] = {}
cache = self._random_cache[key]
# cache[kind][depth] is a list of available rules
def available_rules(kind, depth):
if depth <= 0:
return []
if kind in cache and depth in cache[kind]:
return cache[kind][depth]
rules = [r for r in _RULES_BY_KIND[kind] if
all([nodes_by_kind.get(k) or available_rules(k, depth - 1) for k in r.kids])]
if kind not in cache:
cache[kind] = {}
cache[kind][depth] = rules
return rules
def helper(kind, depth):
assert depth >= 0
rules = available_rules(kind, depth)
assert rules or nodes_by_kind.get(kind), f"Cannot generate random {kind} of depth <= {depth}"
if nodes_by_kind.get(kind) and (not rules or random.random() < self.copy_prob):
return random.choice(nodes_by_kind[kind])
rule = random.choice(rules)
return TastNode(rule, [helper(k, depth - 1) for k in rule.kids])
return Program(helper(kind, max_depth))
def get_candidates_by_nodes(self, kind, nodes_by_kind):
rules_by_kind = _RULES_BY_KIND # zzz reachable_rules_by_kind(kind, nodes_by_kind)
if kind not in rules_by_kind:
return {}
# p_kind_rules = {k: (1 - self.copy_prob if nodes_by_kind.get(k) else 1) / max(1, len(rules_by_kind[k]))
# for k in rules_by_kind}
by_kind = {}
for parent_kind in rules_by_kind:
rules = rules_by_kind[parent_kind]
has_copy = sum(r.name == "COPY" for r in rules)
if has_copy:
assert has_copy == 1
by_kind[parent_kind] = [], [
(self.copy_prob if r.name == "COPY" else (1 - self.copy_prob) / len(rules), r)
for r in rules]
else:
by_kind[parent_kind] = [], [(1 / len(rules), r) for r in rules]
ans = {r: [by_kind[k] for k in r.kids]
for rules in rules_by_kind.values() for r in rules if r.name != "COPY"}
ans.update({r: [([(1. / len(nodes_by_kind[r.nt]), n) for n in nodes_by_kind.get(r.nt, [])], [])]
for rules in rules_by_kind.values() for r in rules if r.name == "COPY"})
ans[None] = [by_kind[kind]]
return ans
def get_candidates(self, q: Program) -> Dict[Rule,
List[Tuple[List[Tuple[float, TastNode]], List[Tuple[float, Rule]]]]]:
consts = extract_constants(q)
sol_type_annotation = q.tree.children[0].children[1].children[1]
sol_kind = nt.type2nt(eval(sol_type_annotation.src()))
return self.get_candidates_by_nodes(sol_kind, consts)
if __name__ == "__main__":
u = UniformModel()
random.seed(0)
for _ in range(100):
p = u.random((List, int), 5)
print(p.src(safe=False))
try:
p.val(max_ticks=1000)
except Program.EvalException:
pass
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import re
from datetime import datetime
from typing import Any, Dict, List, Optional, Pattern, Tuple, TYPE_CHECKING
from urllib import parse
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from flask_babel import gettext as __
from marshmallow import fields, Schema
from sqlalchemy.engine.url import make_url, URL
from typing_extensions import TypedDict
from superset.db_engine_specs.postgres import PostgresBaseEngineSpec
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.models.sql_lab import Query
from superset.utils import core as utils
if TYPE_CHECKING:
from superset.models.core import Database
# Regular expressions to catch custom errors
OBJECT_DOES_NOT_EXIST_REGEX = re.compile(
r"Object (?P<object>.*?) does not exist or not authorized."
)
SYNTAX_ERROR_REGEX = re.compile(
"syntax error line (?P<line>.+?) at position (?P<position>.+?) "
"unexpected '(?P<syntax_error>.+?)'."
)
class SnowflakeParametersSchema(Schema):
username = fields.Str(required=True)
password = fields.Str(required=True)
account = fields.Str(required=True)
database = fields.Str(required=True)
role = fields.Str(required=True)
warehouse = fields.Str(required=True)
class SnowflakeParametersType(TypedDict):
username: str
password: str
account: str
database: str
role: str
warehouse: str
class SnowflakeEngineSpec(PostgresBaseEngineSpec):
engine = "snowflake"
engine_name = "Snowflake"
force_column_alias_quotes = True
max_column_name_length = 256
parameters_schema = SnowflakeParametersSchema()
default_driver = "snowflake"
sqlalchemy_uri_placeholder = "snowflake://"
_time_grain_expressions = {
None: "{col}",
"PT1S": "DATE_TRUNC('SECOND', {col})",
"PT1M": "DATE_TRUNC('MINUTE', {col})",
"PT5M": "DATEADD(MINUTE, FLOOR(DATE_PART(MINUTE, {col}) / 5) * 5, \
DATE_TRUNC('HOUR', {col}))",
"PT10M": "DATEADD(MINUTE, FLOOR(DATE_PART(MINUTE, {col}) / 10) * 10, \
DATE_TRUNC('HOUR', {col}))",
"PT15M": "DATEADD(MINUTE, FLOOR(DATE_PART(MINUTE, {col}) / 15) * 15, \
DATE_TRUNC('HOUR', {col}))",
"PT30M": "DATEADD(MINUTE, FLOOR(DATE_PART(MINUTE, {col}) / 30) * 30, \
DATE_TRUNC('HOUR', {col}))",
"PT1H": "DATE_TRUNC('HOUR', {col})",
"P1D": "DATE_TRUNC('DAY', {col})",
"P1W": "DATE_TRUNC('WEEK', {col})",
"P1M": "DATE_TRUNC('MONTH', {col})",
"P3M": "DATE_TRUNC('QUARTER', {col})",
"P1Y": "DATE_TRUNC('YEAR', {col})",
}
custom_errors: Dict[Pattern[str], Tuple[str, SupersetErrorType, Dict[str, Any]]] = {
OBJECT_DOES_NOT_EXIST_REGEX: (
__("%(object)s does not exist in this database."),
SupersetErrorType.OBJECT_DOES_NOT_EXIST_ERROR,
{},
),
SYNTAX_ERROR_REGEX: (
__(
"Please check your query for syntax errors at or "
'near "%(syntax_error)s". Then, try running your query again.'
),
SupersetErrorType.SYNTAX_ERROR,
{},
),
}
@classmethod
def adjust_database_uri(
cls, uri: URL, selected_schema: Optional[str] = None
) -> None:
database = uri.database
if "/" in uri.database:
database = uri.database.split("/")[0]
if selected_schema:
selected_schema = parse.quote(selected_schema, safe="")
uri.database = database + "/" + selected_schema
@classmethod
def epoch_to_dttm(cls) -> str:
return "DATEADD(S, {col}, '1970-01-01')"
@classmethod
def epoch_ms_to_dttm(cls) -> str:
return "DATEADD(MS, {col}, '1970-01-01')"
@classmethod
def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]:
tt = target_type.upper()
if tt == utils.TemporalType.DATE:
return f"TO_DATE('{dttm.date().isoformat()}')"
if tt == utils.TemporalType.DATETIME:
return f"""CAST('{dttm.isoformat(timespec="microseconds")}' AS DATETIME)"""
if tt == utils.TemporalType.TIMESTAMP:
return f"""TO_TIMESTAMP('{dttm.isoformat(timespec="microseconds")}')"""
return None
@staticmethod
def mutate_db_for_connection_test(database: "Database") -> None:
"""
By default, snowflake doesn't validate if the user/role has access to the chosen
database.
:param database: instance to be mutated
"""
extra = json.loads(database.extra or "{}")
engine_params = extra.get("engine_params", {})
connect_args = engine_params.get("connect_args", {})
connect_args["validate_default_parameters"] = True
engine_params["connect_args"] = connect_args
extra["engine_params"] = engine_params
database.extra = json.dumps(extra)
@classmethod
def get_cancel_query_id(cls, cursor: Any, query: Query) -> Optional[str]:
"""
Get Snowflake session ID that will be used to cancel all other running
queries in the same session.
:param cursor: Cursor instance in which the query will be executed
:param query: Query instance
:return: Snowflake Session ID
"""
cursor.execute("SELECT CURRENT_SESSION()")
row = cursor.fetchone()
return row[0]
@classmethod
def cancel_query(cls, cursor: Any, query: Query, cancel_query_id: str) -> bool:
"""
Cancel query in the underlying database.
:param cursor: New cursor instance to the db of the query
:param query: Query instance
:param cancel_query_id: Snowflake Session ID
:return: True if query cancelled successfully, False otherwise
"""
try:
cursor.execute(f"SELECT SYSTEM$CANCEL_ALL_QUERIES({cancel_query_id})")
except Exception: # pylint: disable=broad-except
return False
return True
@classmethod
def build_sqlalchemy_uri(
cls,
parameters: SnowflakeParametersType,
encrypted_extra: Optional[ # pylint: disable=unused-argument
Dict[str, Any]
] = None,
) -> str:
return str(
URL(
"snowflake",
username=parameters.get("username"),
password=parameters.get("password"),
host=parameters.get("account"),
database=parameters.get("database"),
query={
"role": parameters.get("role"),
"warehouse": parameters.get("warehouse"),
},
)
)
@classmethod
def get_parameters_from_uri(
cls,
uri: str,
encrypted_extra: Optional[ # pylint: disable=unused-argument
Dict[str, str]
] = None,
) -> Any:
url = make_url(uri)
query = dict(url.query.items())
return {
"username": url.username,
"password": url.password,
"account": url.host,
"database": url.database,
"role": query.get("role"),
"warehouse": query.get("warehouse"),
}
@classmethod
def validate_parameters(
cls, parameters: SnowflakeParametersType
) -> List[SupersetError]:
errors: List[SupersetError] = []
required = {
"warehouse",
"username",
"database",
"account",
"role",
"password",
}
present = {key for key in parameters if parameters.get(key, ())}
missing = sorted(required - present)
if missing:
errors.append(
SupersetError(
message=f'One or more parameters are missing: {", ".join(missing)}',
error_type=SupersetErrorType.CONNECTION_MISSING_PARAMETERS_ERROR,
level=ErrorLevel.WARNING,
extra={"missing": missing},
),
)
return errors
@classmethod
def parameters_json_schema(cls) -> Any:
"""
Return configuration parameters as OpenAPI.
"""
if not cls.parameters_schema:
return None
ma_plugin = MarshmallowPlugin()
spec = APISpec(
title="Database Parameters",
version="1.0.0",
openapi_version="3.0.0",
plugins=[ma_plugin],
)
spec.components.schema(cls.__name__, schema=cls.parameters_schema)
return spec.to_dict()["components"]["schemas"][cls.__name__]
|
# Copyright (c) 2016, Aaron Christianson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Monkey patch setuptools to write faster console_scripts with this format:
import sys
from mymodule import entry_function
sys.exit(entry_function())
This is better.
(c) 2016, Aaron Christianson
http://github.com/ninjaaron/fast-entry_points
"""
import re
from setuptools.command import easy_install
TEMPLATE = r"""\
# -*- coding: utf-8 -*-
# EASY-INSTALL-ENTRY-SCRIPT: '{3}','{4}','{5}'
__requires__ = '{3}'
import re
import sys
from {0} import {1}
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit({2}())"""
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in "console", "gui":
group = type_ + "_scripts"
for name, ep in dist.get_entry_map(group).items():
# ensure_safe_name
if re.search(r"[\\/]", name):
raise ValueError("Path separators not allowed in script names")
script_text = TEMPLATE.format(ep.module_name, ep.attrs[0],
".".join(ep.attrs), spec, group,
name)
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
easy_install.ScriptWriter.get_args = get_args
def main():
import os
import re
import shutil
import sys
dests = sys.argv[1:] or ["."]
filename = re.sub(r"\.pyc$", ".py", __file__)
for dst in dests:
shutil.copy(filename, dst)
manifest_path = os.path.join(dst, "MANIFEST.in")
setup_path = os.path.join(dst, "setup.py")
# Insert the include statement to MANIFEST.in if not present
with open(manifest_path, "a+") as manifest:
manifest.seek(0)
manifest_content = manifest.read()
if not "include fastentrypoints.py" in manifest_content:
manifest.write(("\n" if manifest_content else "") +
"include fastentrypoints.py")
# Insert the import statement to setup.py if not present
with open(setup_path, "a+") as setup:
setup.seek(0)
setup_content = setup.read()
if not "import fastentrypoints" in setup_content:
setup.seek(0)
setup.truncate()
setup.write("import fastentrypoints\n" + setup_content)
print(__name__)
|
"""
Script to pull the latest changes from the SerGIS Server git repository and
put them in the web directory.
The defaults here assume:
- IIS with iisnode
- SerGIS Socket Server service set up through NSSM
But it can be easily modified for a different environment.
Before running this, make sure to set the configuration variables.
Usage:
python2 update_sergis-server.py
Fully update the server web directory by:
1. Pull the latest changes in the git repo.
2. Pull the latest changes in the git submodules.
3. Run `npm install` in the git repo directory.
4. Run `grunt dist` in the git repo directory.
5. Stop the SerGIS Socket Server service.
6. Set IIS permissions on the web directory.
7. Clear out the web directory.
8. Copy from the git repo to the web directory.
9. Copy config files to the web directory.
10. Creating the `uploads` directory.
11. Start the SerGIS Socket Server service
python2 update_sergis-server.py lite
Update the server web directory with the latest changes from the git repo
without reinstalling all dependencies.
1. Pull the latest changes in the git repo.
2. Pull the latest changes in the git submodules.
3. Run `grunt dist` in the git repo directory.
4. Stop the SerGIS Socket Server service.
5. Copy from the git repo to the web directory.
6. Copy config files to the web directory.
7. Start the SerGIS Socket Server service.
"""
import os, os.path, subprocess, shutil, sys, time
import win32com.shell.shell as shell
################################################################################
## Configuration
################################################################################
# Whether we are running the "lite" version (see above)
LITE = False
if len(sys.argv) > 1 and sys.argv[1] == "lite":
LITE = True
# The directory where the sergis-server git repo is
GIT_REPO = "C:\\sergis_files\\git\\sergis-server"
# The submodules in the sergis-server git repo
GIT_SUBMODULES = [
os.path.join(GIT_REPO, "sergis-client"),
os.path.join(GIT_REPO, "sergis-author")
]
# The web directory (where we're "publishing" to)
WEB_DIR = "C:\\sergis-server"
# When we're clearing out the web directory, which files/folders to ignore (i.e. keep)
WEB_DIR_CLEAR_IGNORE = ["iisnode"]
# The config.js file for SerGIS Server (to overwrite the repo default)
CONFIG_JS = "C:\\sergis_files\\sergis-server-config.js"
# Config files to copy to the web directory: [(from, to), ...]
CONFIG_FILES = [
# The node.js server config file
(CONFIG_JS, os.path.join(WEB_DIR, "config.js")),
# The iisnode config file (here, it's relative to the script location)
(os.path.join(os.path.dirname(os.path.realpath(__file__)), "sergis-server-web.config"), os.path.join(WEB_DIR, "web.config"))
]
# Config files requires to run grunt: [(from, to), ...]
GRUNT_CONFIG_FILES = [
# The node.js server config file
(CONFIG_JS, os.path.join(GIT_REPO, "config.js"))
]
# Ignored files (to not copy from the repo to the web directory)
GIT_REPO_IGNORE = [".git", "config.js", ".gitignore", ".gitmodules"]
# Additional ignored files if we're running in "lite mode"
GIT_REPO_IGNORE_LITE = ["node_modules"]
# The directory to the nodejs installation
NODE_DIR = "C:\\Program Files\\nodejs"
# The location of the grunt command
#GRUNT_PATH = os.path.expanduser("~\\AppData\\Roaming\\npm\\grunt.cmd")
GRUNT_PATH = "C:\\ProgramData\\npm\\grunt.cmd"
# The location of NSSM (see http://nssm.co/)
NSSM_PATH = "C:\\nssm\\win64\\nssm.exe"
# The NSSM service name
NSSM_SERVICE_NAME = "sergis-server-service"
# The location of the git executable (this tries to find GitHub's git if no other is specified)
GIT_PATH = "C:\\Program Files (x86)\\Git\\bin\\git.exe"
if not GIT_PATH:
GITHUB_DIR = os.path.expanduser("~\\AppData\\Local\\GitHub")
if os.path.exists(GITHUB_DIR):
for f in os.listdir(GITHUB_DIR):
if f[:12] == "PortableGit_":
GIT_PATH = os.path.join(GITHUB_DIR, f, "bin", "git.exe")
break
################################################################################
## Functions for doing different tasks to update the web directory
################################################################################
def updateGitRepos():
"""Update the git repository and any submodules."""
print "Running", GIT_PATH
print ""
print subprocess.check_output([GIT_PATH, "pull"], cwd=GIT_REPO)
print ""
for submod in GIT_SUBMODULES:
print subprocess.check_output([GIT_PATH, "pull"], cwd=submod)
print ""
def runNPM():
"""Run npm install"""
print "Running npm install"
subprocess.check_call([
os.path.join(NODE_DIR, "node.exe"),
os.path.join(NODE_DIR, "node_modules", "npm", "bin", "npm-cli.js"),
"install"
], cwd=GIT_REPO)
print ""
def runGrunt():
"""Run grunt dist"""
print "Running grunt dist"
# Firstly, copy in any config files
for src, dst in GRUNT_CONFIG_FILES:
if os.path.exists(dst):
os.rename(dst, dst + ".BAK")
shutil.copy(src, dst)
# Run grunt
subprocess.check_call([
GRUNT_PATH,
"dist"
], cwd=GIT_REPO)
# Reset any config files that we copied in
for src, dst in GRUNT_CONFIG_FILES:
if os.path.exists(dst) and os.path.exists(dst + ".BAK"):
os.unlink(dst)
os.rename(dst + ".BAK", dst)
print ""
def setIISPermissions():
"""Set proper permissions (only needs to be done once)"""
print "Setting permissions on", WEB_DIR
subprocess.check_call(["C:\\windows\\system32\\icacls.exe", WEB_DIR, "/grant", "IIS_IUSRS:(OI)(CI)F"])
print ""
def clearWebDirectory():
"""Clear out the directory"""
print "Clearing out", WEB_DIR
for item in os.listdir(WEB_DIR):
if not item in WEB_DIR_CLEAR_IGNORE:
file_path = os.path.join(WEB_DIR, item)
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
print ""
def copyToWebDirectory():
"""Copy the latest files to the web directory"""
for item in os.listdir(GIT_REPO):
if not (item in GIT_REPO_IGNORE or (LITE and item in GIT_REPO_IGNORE_LITE)):
print "Copying", item
if os.path.isdir(os.path.join(GIT_REPO, item)):
if os.path.exists(os.path.join(WEB_DIR, item)):
shutil.rmtree(os.path.join(WEB_DIR, item))
shutil.copytree(os.path.join(GIT_REPO, item), os.path.join(WEB_DIR, item), ignore=shutil.ignore_patterns(*GIT_REPO_IGNORE))
else:
if os.path.exists(os.path.join(WEB_DIR, item)):
os.remove(os.path.join(WEB_DIR, item))
shutil.copy(os.path.join(GIT_REPO, item), WEB_DIR)
print ""
def copyConfigFiles():
"""Copy config files"""
for src, dst in CONFIG_FILES:
print "Copying", src, "to", dst
shutil.copy(src, dst)
print ""
def createUploadsDirectory():
"""Create the uploads directory in the web directory."""
if not os.path.exists(os.path.join(WEB_DIR, "uploads")):
os.mkdir(os.path.join(WEB_DIR, "uploads"))
def stopService():
"""Stop the NSSM service for the SerGIS Server socket server."""
shell.ShellExecuteEx(lpVerb="runas",
lpFile="cmd",
lpParameters="/K " + NSSM_PATH + " stop " + NSSM_SERVICE_NAME,
nShow=5,
fMask=256) # SEE_MASK_NOASYNC
# Let it recuperate
time.sleep(5)
def startService():
"""Restart the NSSM service for the SerGIS Server socket server."""
shell.ShellExecuteEx(lpVerb="runas",
lpFile="cmd",
lpParameters="/K " + NSSM_PATH + " start " + NSSM_SERVICE_NAME,
nShow=5,
fMask=256) # SEE_MASK_NOASYNC
# Let it recuperate
time.sleep(5)
################################################################################
## Actually run the tasks to update the web directory
################################################################################
def check():
"""Make sure that all required files/directories exist."""
if not os.path.exists(GIT_REPO):
print "Couldn't find GIT_REPO at", GIT_REPO
return False
if not os.path.exists(WEB_DIR):
print "Couldn't find WEB_DIR at", WEB_DIR
return False
if not os.path.exists(CONFIG_JS):
print "Couldn't find CONFIG_JS at", CONFIG_JS
return False
if not os.path.exists(NODE_DIR):
print "Couldn't find NODE_DIR at", NODE_DIR
return False
if not os.path.exists(GRUNT_PATH):
print "Couldn't find GRUNT_PATH at", GRUNT_PATH
return False
if not os.path.exists(NSSM_PATH):
print "Couldn't find NSSM_PATH at", NSSM_PATH
return False
if not os.path.exists(GIT_PATH):
print "Couldn't find GIT_PATH at", GIT_PATH
return False
# All seems good
return True
# Alrighty, let's get started!
if __name__ == "__main__":
if not GIT_PATH:
print "Couldn't find git!"
elif check():
updateGitRepos()
if not LITE:
runNPM()
runGrunt()
stopService()
if not LITE:
setIISPermissions()
clearWebDirectory()
copyToWebDirectory()
copyConfigFiles()
if not LITE:
createUploadsDirectory()
startService()
|
import os
import subprocess
import sys
from ..base import BaseTestCase
def inject_sitecustomize(path):
"""Creates a new environment, injecting a ``sitecustomize.py`` module in
the current PYTHONPATH.
:param path: package path containing ``sitecustomize.py`` module, starting
from the ddtrace root folder
:returns: a cloned environment that includes an altered PYTHONPATH with
the given `sitecustomize.py`
"""
from ddtrace import __file__ as root_file
root_folder = os.path.dirname(root_file)
# Copy the current environment and replace the PYTHONPATH. This is
# required otherwise `ddtrace` scripts are not found when `env` kwarg is
# passed
env = os.environ.copy()
sitecustomize = os.path.join(root_folder, '..', path)
# Add `bootstrap` directory to the beginning of PYTHONTPATH so we know
# if `import sitecustomize` is run that it'll be the one we specify
python_path = [sitecustomize] + list(sys.path)
env['PYTHONPATH'] = ':'.join(python_path)
return env
class DdtraceRunTest(BaseTestCase):
def test_service_name_passthrough(self):
"""
$DATADOG_SERVICE_NAME gets passed through to the program
"""
with self.override_env(dict(DATADOG_SERVICE_NAME='my_test_service')):
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_service.py']
)
assert out.startswith(b'Test success')
def test_env_name_passthrough(self):
"""
$DATADOG_ENV gets passed through to the global tracer as an 'env' tag
"""
with self.override_env(dict(DATADOG_ENV='test')):
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_env.py']
)
assert out.startswith(b'Test success')
def test_env_enabling(self):
"""
DATADOG_TRACE_ENABLED=false allows disabling of the global tracer
"""
with self.override_env(dict(DATADOG_TRACE_ENABLED='false')):
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_disabled.py']
)
assert out.startswith(b'Test success')
with self.override_env(dict(DATADOG_TRACE_ENABLED='true')):
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_enabled.py']
)
assert out.startswith(b'Test success')
def test_patched_modules(self):
"""
Using `ddtrace-run` registers some generic patched modules
"""
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_patched_modules.py']
)
assert out.startswith(b'Test success')
def test_integration(self):
out = subprocess.check_output(
['ddtrace-run', 'python', '-m', 'tests.commands.ddtrace_run_integration']
)
assert out.startswith(b'Test success')
def test_debug_enabling(self):
"""
DATADOG_TRACE_DEBUG=true allows setting debug logging of the global tracer
"""
with self.override_env(dict(DATADOG_TRACE_DEBUG='false')):
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_no_debug.py']
)
assert out.startswith(b'Test success')
with self.override_env(dict(DATADOG_TRACE_DEBUG='true')):
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_debug.py']
)
assert out.startswith(b'Test success')
def test_host_port_from_env(self):
"""
DATADOG_TRACE_AGENT_HOSTNAME|PORT point to the tracer
to the correct host/port for submission
"""
with self.override_env(dict(DATADOG_TRACE_AGENT_HOSTNAME='172.10.0.1',
DATADOG_TRACE_AGENT_PORT='8120')):
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_hostname.py']
)
assert out.startswith(b'Test success')
def test_host_port_from_env_dd(self):
"""
DD_AGENT_HOST|DD_TRACE_AGENT_PORT point to the tracer
to the correct host/port for submission
"""
with self.override_env(dict(DD_AGENT_HOST='172.10.0.1',
DD_TRACE_AGENT_PORT='8120')):
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_hostname.py']
)
assert out.startswith(b'Test success')
# Do we get the same results without `ddtrace-run`?
out = subprocess.check_output(
['python', 'tests/commands/ddtrace_run_hostname.py']
)
assert out.startswith(b'Test success')
def test_dogstatsd_client_env_host_and_port(self):
"""
DD_AGENT_HOST and DD_DOGSTATSD_PORT used to configure dogstatsd with udp in tracer
"""
with self.override_env(dict(DD_AGENT_HOST='172.10.0.1',
DD_DOGSTATSD_PORT='8120')):
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_dogstatsd.py']
)
assert out.startswith(b'Test success')
def test_dogstatsd_client_env_url_host_and_port(self):
"""
DD_DOGSTATSD_URL=<host>:<port> used to configure dogstatsd with udp in tracer
"""
with self.override_env(dict(DD_DOGSTATSD_URL='172.10.0.1:8120')):
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_dogstatsd.py']
)
assert out.startswith(b'Test success')
def test_dogstatsd_client_env_url_udp(self):
"""
DD_DOGSTATSD_URL=udp://<host>:<port> used to configure dogstatsd with udp in tracer
"""
with self.override_env(dict(DD_DOGSTATSD_URL='udp://172.10.0.1:8120')):
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_dogstatsd.py']
)
assert out.startswith(b'Test success')
def test_dogstatsd_client_env_url_unix(self):
"""
DD_DOGSTATSD_URL=unix://<path> used to configure dogstatsd with socket path in tracer
"""
with self.override_env(dict(DD_DOGSTATSD_URL='unix:///dogstatsd.sock')):
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_dogstatsd.py']
)
assert out.startswith(b'Test success')
def test_dogstatsd_client_env_url_path(self):
"""
DD_DOGSTATSD_URL=<path> used to configure dogstatsd with socket path in tracer
"""
with self.override_env(dict(DD_DOGSTATSD_URL='/dogstatsd.sock')):
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_dogstatsd.py']
)
assert out.startswith(b'Test success')
def test_priority_sampling_from_env(self):
"""
DATADOG_PRIORITY_SAMPLING enables Distributed Sampling
"""
with self.override_env(dict(DATADOG_PRIORITY_SAMPLING='True')):
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_priority_sampling.py']
)
assert out.startswith(b'Test success')
def test_patch_modules_from_env(self):
"""
DATADOG_PATCH_MODULES overrides the defaults for patch_all()
"""
from ddtrace.bootstrap.sitecustomize import EXTRA_PATCHED_MODULES, update_patched_modules
orig = EXTRA_PATCHED_MODULES.copy()
# empty / malformed strings are no-ops
with self.override_env(dict(DATADOG_PATCH_MODULES='')):
update_patched_modules()
assert orig == EXTRA_PATCHED_MODULES
with self.override_env(dict(DATADOG_PATCH_MODULES=':')):
update_patched_modules()
assert orig == EXTRA_PATCHED_MODULES
with self.override_env(dict(DATADOG_PATCH_MODULES=',')):
update_patched_modules()
assert orig == EXTRA_PATCHED_MODULES
with self.override_env(dict(DATADOG_PATCH_MODULES=',:')):
update_patched_modules()
assert orig == EXTRA_PATCHED_MODULES
# overrides work in either direction
with self.override_env(dict(DATADOG_PATCH_MODULES='django:false')):
update_patched_modules()
assert EXTRA_PATCHED_MODULES['django'] is False
with self.override_env(dict(DATADOG_PATCH_MODULES='boto:true')):
update_patched_modules()
assert EXTRA_PATCHED_MODULES['boto'] is True
with self.override_env(dict(DATADOG_PATCH_MODULES='django:true,boto:false')):
update_patched_modules()
assert EXTRA_PATCHED_MODULES['boto'] is False
assert EXTRA_PATCHED_MODULES['django'] is True
with self.override_env(dict(DATADOG_PATCH_MODULES='django:false,boto:true')):
update_patched_modules()
assert EXTRA_PATCHED_MODULES['boto'] is True
assert EXTRA_PATCHED_MODULES['django'] is False
def test_sitecustomize_without_ddtrace_run_command(self):
# [Regression test]: ensure `sitecustomize` path is removed only if it's
# present otherwise it will cause:
# ValueError: list.remove(x): x not in list
# as mentioned here: https://github.com/DataDog/dd-trace-py/pull/516
env = inject_sitecustomize('')
out = subprocess.check_output(
['python', 'tests/commands/ddtrace_minimal.py'],
env=env,
)
# `out` contains the `loaded` status of the module
result = out[:-1] == b'True'
self.assertTrue(result)
def test_sitecustomize_run(self):
# [Regression test]: ensure users `sitecustomize.py` is properly loaded,
# so that our `bootstrap/sitecustomize.py` doesn't override the one
# defined in users' PYTHONPATH.
env = inject_sitecustomize('tests/commands/bootstrap')
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_sitecustomize.py'],
env=env,
)
assert out.startswith(b'Test success')
def test_sitecustomize_run_suppressed(self):
# ensure `sitecustomize.py` is not loaded if `-S` is used
env = inject_sitecustomize('tests/commands/bootstrap')
out = subprocess.check_output(
['ddtrace-run', 'python', '-S', 'tests/commands/ddtrace_run_sitecustomize.py', '-S'],
env=env,
)
assert out.startswith(b'Test success')
def test_argv_passed(self):
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_argv.py', 'foo', 'bar']
)
assert out.startswith(b'Test success')
def test_got_app_name(self):
"""
apps run with ddtrace-run have a proper app name
"""
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_app_name.py']
)
assert out.startswith(b'ddtrace_run_app_name.py')
def test_global_trace_tags(self):
""" Ensure global tags are passed in from environment
"""
with self.override_env(dict(DD_TRACE_GLOBAL_TAGS='a:True,b:0,c:C')):
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_global_tags.py']
)
assert out.startswith(b'Test success')
def test_logs_injection(self):
""" Ensure logs injection works
"""
with self.override_env(dict(DD_LOGS_INJECTION='true')):
out = subprocess.check_output(
['ddtrace-run', 'python', 'tests/commands/ddtrace_run_logs_injection.py']
)
assert out.startswith(b'Test success')
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_11 import models
class SoftwareVersions(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'details': 'str',
'name': 'str',
'release_family': 'str',
'upgrade_hops': 'list[str]',
'version': 'str'
}
attribute_map = {
'details': 'details',
'name': 'name',
'release_family': 'release_family',
'upgrade_hops': 'upgrade_hops',
'version': 'version'
}
required_args = {
}
def __init__(
self,
details=None, # type: str
name=None, # type: str
release_family=None, # type: str
upgrade_hops=None, # type: List[str]
version=None, # type: str
):
"""
Keyword args:
details (str): The description of the version.
name (str): The name of the software.
release_family (str): The major and minor release number of the version.
upgrade_hops (list[str]): The list of software versions the upgrade will go through.
version (str): The version of the software.
"""
if details is not None:
self.details = details
if name is not None:
self.name = name
if release_family is not None:
self.release_family = release_family
if upgrade_hops is not None:
self.upgrade_hops = upgrade_hops
if version is not None:
self.version = version
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SoftwareVersions`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SoftwareVersions, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SoftwareVersions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
def findRightInterval(intervals):
"""
:type intervals: List[List[int]]
:rtype: List[int]
"""
indexedIntervals = [ intervals[i]+[i] for i in range(len(intervals)) ] #[[3, 4, 0], [2, 3, 1], [1, 2, 2]]
indexedIntervals.sort() #[[1, 2, 2], [2, 3, 1], [3, 4, 0]]
ans = [-1]*len(intervals)
for i in indexedIntervals:
indexRightInterval = binarySearch(indexedIntervals, i[1])
ans[i[2]] = indexedIntervals[indexRightInterval][2] if indexRightInterval != -1 else -1
print(ans)
def binarySearch(array, target):
start = 0
end = len(array)-1
while start <= end:
mid = (start+end)//2
if array[mid][0]>=target: end = mid-1;
else : start = mid+1;
return start if start < len(array) else -1
findRightInterval([[3,4],[2,3],[1,2]])
findRightInterval([[1,2]])
findRightInterval([[1,4],[2,3],[3,4]])
|
"""
Tools model definitions
"""
from cuid import cuid
import time
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.db import models
from django.db.models.signals import pre_delete
from django.dispatch.dispatcher import receiver
from django.conf.global_settings import LANGUAGES as DJANGO_LANG
from django.utils.translation import gettext_lazy as _
from django.contrib import messages
# import all of the publishers.
from .publisher import (publish_XdBoolean, publish_XdLink, publish_XdString, publish_XdFile, publish_XdInterval,
publish_ReferenceRange, publish_SimpleReferenceRange, publish_XdOrdinal, publish_XdCount, publish_XdQuantity, publish_XdFloat,
publish_XdRatio, publish_XdTemporal, publish_Party, publish_Participation, publish_Audit, publish_Attestation,
publish_Cluster, publish_DM)
from S3ModelTools.settings import AUTH_USER_MODEL
LANGUAGES = [('en-US', 'US English'), ('pt-BR', 'Brazilian Portuguese')]
for n in DJANGO_LANG:
LANGUAGES.append(n)
def get_cuid():
# insure there are no collisions
time.sleep(.001)
acuid = cuid()
return acuid
# no one should be able to delete a published object
@receiver(pre_delete)
def no_delete_test(sender, instance, **kwargs):
if sender in [Cluster, XdBoolean, XdString, XdCount, XdString, XdInterval, XdFile, XdOrdinal, XdQuantity, XdFloat, XdRatio, XdString, XdTemporal, XdLink, Participation, Party, ReferenceRange, Units]:
if instance.published:
raise PermissionDenied
def dm_folder(instance, filename):
fldr_title = ''.join(
[c for c in instance.title if c.isalnum() and ord(c) <= 127])
return '/'.join([fldr_title, filename])
def get_rcode(ctid):
# find the ct_id and return the r_code and label
obj = None
if not obj:
try:
obj = XdBoolean.objects.get(ct_id=ctid)
except ObjectDoesNotExist:
obj = None
if not obj:
try:
obj = XdLink.objects.get(ct_id=ctid)
except ObjectDoesNotExist:
obj = None
if not obj:
try:
obj = XdString.objects.get(ct_id=ctid)
except ObjectDoesNotExist:
obj = None
if not obj:
try:
obj = XdString.objects.get(ct_id=ctid)
except ObjectDoesNotExist:
obj = None
if not obj:
try:
obj = XdString.objects.get(ct_id=ctid)
except ObjectDoesNotExist:
obj = None
if not obj:
try:
obj = XdFile.objects.get(ct_id=ctid)
except ObjectDoesNotExist:
obj = None
if not obj:
try:
obj = XdFile.objects.get(ct_id=ctid)
except ObjectDoesNotExist:
obj = None
if not obj:
try:
obj = XdOrdinal.objects.get(ct_id=ctid)
except ObjectDoesNotExist:
obj = None
if not obj:
try:
obj = XdCount.objects.get(ct_id=ctid)
except ObjectDoesNotExist:
obj = None
if not obj:
try:
obj = XdQuantity.objects.get(ct_id=ctid)
except ObjectDoesNotExist:
obj = None
if not obj:
try:
obj = XdFloat.objects.get(ct_id=ctid)
except ObjectDoesNotExist:
obj = None
if not obj:
try:
obj = XdRatio.objects.get(ct_id=ctid)
except ObjectDoesNotExist:
obj = None
if not obj:
try:
obj = XdTemporal.objects.get(ct_id=ctid)
except ObjectDoesNotExist:
obj = None
if obj:
return(obj.label, obj.r_code)
else:
return(None)
def get_sentinel_user():
return get_user_model().objects.get_or_create(username='deleted')[0]
class Project(models.Model):
"""
Every item created in tools must be assigned to a Project when created. All items (except DM) may be
reused in multiple DMs. However, this does not change the original Project.
The Allowed Groups field contains each of the User Groups allowed to see each item with this Project name.
The User Group, Open, is assigned to every user. So if you assign the Open group as one of the allowed groups,
all tools users will see this item.
"""
prj_name = models.CharField(_("project name"), max_length=110, unique=True, db_index=True, help_text=_('Enter the name of your project.'))
description = models.TextField(_("project description"), blank=True, help_text=_('Enter a description or explaination of an acronym of the project.'))
def __str__(self):
return self.prj_name
class Meta:
verbose_name = _("Project")
verbose_name_plural = _("Projects")
ordering = ['prj_name']
class Modeler(models.Model):
"""
Provides names and email addresses for the author and contributor sections of the DM Metadata.
Also contains the default project for the user.
"""
user = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.SET(get_sentinel_user),)
name = models.CharField(_("Name"), max_length=110, help_text=_('Enter the author name as it should appear in DM metadata.'))
email = models.EmailField(_("Email"), max_length=110, help_text=_('Enter the email address as it should appear in DM metadata as an author and/or contributor.'))
project = models.ForeignKey(Project, verbose_name=_("Default Project"), to_field="prj_name", help_text=_('Choose your default Project.'), blank=True, null=True, on_delete=models.CASCADE,)
prj_filter = models.BooleanField(_('Filter by Project'), default=True, help_text=_('Uncheck this box if want to see choices from all projects. Note that this will very likely have a negative impact on performance.'))
class Meta:
verbose_name = "Modeler"
verbose_name_plural = "Modelers"
ordering = ['name', 'email']
def __str__(self):
return self.name.strip()
class NS(models.Model):
"""
Provides a set of namespaces & abbreviations that are guaranteed to be referenced in a DM.
Along with valid classes from those vocabularies. ex. rdfs and http://www.w3.org/2000/01/rdf-schema#
"""
abbrev = models.CharField(_("NS Abbreviation"), max_length=15, help_text=_('Enter a valid namesspace abbreviation.'))
uri = models.CharField(_("NS URI"), max_length=1024, help_text=_('Enter a valid namesspace URI.'))
class Meta:
verbose_name = "Namespace"
verbose_name_plural = "Namespaces"
def __str__(self):
return self.abbrev.strip()
class Predicate(models.Model):
"""
Provides a set of pre-defined namespace abbreviations that are guaranteed to be referenced in a DM.
Along with valid classes from those vocabularies. ex. rdfs:isDefinedBy
"""
ns_abbrev = models.ForeignKey(NS, verbose_name=_("NS Abbreviation"), help_text=_('Select a valid namesspace abbreviation.'), on_delete=models.CASCADE,)
class_name = models.CharField(_("Classname"), max_length=30, help_text=_('Enter a valid classname from the vocabulary.'))
def __str__(self):
return self.ns_abbrev.abbrev + ":" + self.class_name.strip()
class Meta:
ordering = ['ns_abbrev', 'class_name']
verbose_name = "Predicate"
verbose_name_plural = "Predicates"
class PredObj(models.Model):
"""
Predicate - Object references.
"""
po_name = models.CharField(_("Name"), max_length=100, help_text=_("Enter a human readable name for this Predicate/URI combination. This is only used to aide selection, it is not part of the MC semantics."), blank=True, default='')
predicate = models.ForeignKey(Predicate, verbose_name=_("Predicate"), help_text=_("Select a predicate to define the RDF triple."), blank=True, null=True, on_delete=models.SET_NULL,)
object_uri = models.CharField(_("Object URI"), max_length=2000, help_text="Enter an IRI for the object of the RDF triple.", blank=True, default='')
project = models.ForeignKey(Project, verbose_name=_("Project Name"), to_field="prj_name", help_text=_('Choose the name of the Project.'), on_delete=models.CASCADE,)
def __str__(self):
return (self.project.prj_name + ' { ' + self.po_name.strip() + ' } ' + self.predicate.__str__() + " --> " + self.object_uri.strip())
class Meta:
ordering = ['project', 'po_name']
verbose_name = "RDF Object"
verbose_name_plural = "RDF Objects"
class Common(models.Model):
"""
Columns common to all entries except DM.
"""
project = models.ForeignKey(Project, verbose_name=_("Project Name"), to_field="prj_name", help_text=_('Choose the name of your Project.'), on_delete=models.CASCADE,)
public = models.BooleanField(_("Is Public?"), default=True, help_text=_("Public components are available to every other modeller. If not public then the component is only visible within the project it was defined in."))
label = models.CharField(_('label'), max_length=110, help_text=_("A human readable label used to identify this model in tools. This will also be use in generated app UI."))
ct_id = models.CharField(_("CUID"), max_length=40, default=get_cuid, editable=False, unique=True, help_text=_('The unique identifier for the MC.'))
created = models.DateTimeField(_('created'), auto_now_add=True, help_text=_('The dateTime that the MC was created.'))
updated = models.DateTimeField(_('last updated'), auto_now=True, help_text=_("Last update."))
published = models.BooleanField(_("published"), default=False, help_text=_("Published must be a green check icon in order to use this in a DM. This is not user editable. It is managed by the publication process."))
description = models.TextField(_('description'), help_text=_("Enter a free text description for this complexType. Include a usage statement and any possible misuses. This is used as the annotation for the MC and as help text in the UI."), null=True)
pred_obj = models.ManyToManyField(PredObj, verbose_name=_("RDF Object"), blank=True, help_text=_("Select or create a new set of RDF Objects as semantic links to define this item."))
schema_code = models.TextField(_("Schema Code"), help_text="This is only writable from the tools, not via user input. It contains the code required for each component to create an entry in a DM.", blank=True, null=True, default='')
lang = models.CharField(_("language"), max_length=40, choices=LANGUAGES, default='en-US', help_text=_('Choose the language of this MC.'))
creator = models.ForeignKey(Modeler, verbose_name="Creator", blank=True, related_name='%(class)s_related_creator', default=1, on_delete=models.SET_DEFAULT,)
edited_by = models.ForeignKey(Modeler, verbose_name="Last Edited By", blank=True, related_name='%(class)s_related_edited_by', default=1, on_delete=models.SET_DEFAULT,)
seq = models.CharField(_("Sequence Number"), max_length=4, default='0000', help_text=_('Enter the sequence number (aka. TabIndex) for this component in the UI. Components nested in a Cluster are grouped together. When two or more components have the same number they will be sorted by label.'))
validate = models.BooleanField(_("Hard Validate?"), default=False, help_text=_("Hard validation is when the UI enforces criteria on input fields. Soft validation allows users to input any value, warns them when it is not within parameters of the component. Then offers to allow correction or select an Exceptional Value as explaination."))
app_code = models.TextField(_("App Code"), help_text="This is only writable from the tools, not via user input. It contains the code required for each component to create the User App.", blank=True, null=True, default='')
def __str__(self):
return self.project.prj_name + ' : ' + self.label
class Meta:
abstract = True
ordering = ['project', 'label']
indexes = [models.Index(fields=['project', 'label']),]
class XdAny(Common):
"""
Abstract root of all datatypes.
"""
UI_TYPES = (('choose', 'Choose Type:'), ('input', 'Input Box'), ('dropdown', 'Dropdown'), ('radiogroup', 'Radio Button Group'), ('radio', 'Radio Button'), ('checkbox', 'Checkbox(es)') )
adapter_ctid = models.CharField(_("CUID"), max_length=40, default=get_cuid, editable=False, unique=True, help_text=_('This UUID is generated for datatype that can be included in a Cluster. It is used to create a specific XdAdapter complexType.'))
require_act = models.BooleanField(_('Access Control Tag'), default=False, help_text=_('Check this box to require an Access Control Tag element.'))
require_vtb = models.BooleanField(_('Valid Time Begin'), default=False, help_text=_('Check this box to require a Valid Time Begin element.'))
require_vte = models.BooleanField(_('Valid Time End'), default=False, help_text=_('Check this box to require a Valid Time End element.'))
require_tr = models.BooleanField(_('Time Recorded'), default=False, help_text=_('Check this box to require a Date & Time Recorded element.'))
require_mod = models.BooleanField(_('Time Modified'), default=False, help_text=_('Check this box to require a Date & Time for last modified element.'))
require_location = models.BooleanField(_('Location'), default=False, help_text=_('Check this box to require a decimal longitude and latitude location.'))
ui_type = models.CharField(_("Preferred UI Type"), max_length=40, choices=UI_TYPES, default='Choose UI Type:', help_text=_('Choose the preferred UI Type for this component. Use UX best practices. Be aware that the generator may override your selection.'))
allow_vtb = models.BooleanField(_('Allow Valid Time Begin?'), default=False, help_text=_('Check this box to allow a Valid Time Begin element even when not required.'))
allow_vte = models.BooleanField(_('Allow Valid Time End?'), default=False, help_text=_('Check this box to allow a Valid Time End element even when not required.'))
allow_tr = models.BooleanField(_('Allow Time Recorded?'), default=False, help_text=_('Check this box to allow a Date & Time Recorded element even when not required.'))
allow_mod = models.BooleanField(_('Allow Time Modified?'), default=False, help_text=_('Check this box to allow a Date & Time for last modified element even when not required.'))
allow_location = models.BooleanField(_('Allow Location?'), default=False, help_text=_('Check this box to allow a decimal longitude and latitude location even when not required.'))
class Meta:
abstract = True
ordering = ['label']
class XdBoolean(XdAny):
"""
Items which represent boolean decisions, such as true/false or yes/no answers. Use for such data, it
is important to devise the meanings (usually questions in subjective data) carefully, so that the only allowed results
are in fact true or false.
Potential MisUse: The XdBoolean class should not be used as a replacement for naively
modelled enumerated types such as male/female etc. Such values should be coded, and in any case the enumeration often
has more than two values.
"""
trues = models.TextField(_('true values'), help_text=_("Enter the set of values that are Boolean TRUEs. For instance, if this is a 'Yes/No' type of concept, usually the 'Yes' is a Boolean TRUE. Enter one per line."))
falses = models.TextField(_('false values'), help_text=_("Enter the set of values that are Boolean FALSEs. For instance, if this is a 'Yes/No' type of concept, usually the 'No' is a Boolean FALSE. Enter one per line."))
def publish(self, request):
if self.schema_code == '':
msg = publish_XdBoolean(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip() + ' was not published because code already exists.', messages.ERROR)
return msg
class Meta:
verbose_name = "Boolean"
verbose_name_plural = "Booleans"
ordering = ['project', 'label']
class XdLink(XdAny):
"""
Used to specify a link to another resource such as another DM.
"""
link = models.CharField(_('Link URI'), max_length=255, help_text=_("The Link URI that points to the linked item."))
relation = models.CharField(_('Relationship'), max_length=110, help_text=_("The relationship describing the link. Usually constrained by an ontology such as <a href='https://github.com/oborel/obo-relations'>OBO RO</a>."))
relation_uri = models.CharField(_('Relationship URI'), max_length=255, help_text=_("The relationship URI. Points to the vocabulary, ontology, etc that provides the relation."))
def publish(self, request):
if self.schema_code == '':
msg = publish_XdLink(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip() + ' was not published because code already exists.', messages.ERROR)
return msg
class Meta:
verbose_name = "Link"
verbose_name_plural = "Links"
ordering = ['project', 'label']
class XdString(XdAny):
"""
The string data type may contain characters, line feeds, carriage returns, and tab characters.
Used to constrain strings to an enumerated set or may be used for free text entry.
When defining constraints the publisher has this sequence of priorities: Enumeration, Exact Length, Max & Min Lengths, Min Length, Max Length, Default value.
"""
min_length = models.IntegerField(_('minimum length'), help_text=_("Enter the minimum number of characters that are required for this string. If the character is optional, leave it blank."), null=True, blank=True)
max_length = models.IntegerField(_('maximum length'), help_text=_("Enter the maximum number of characters that are required for this string. If the character is optional, leave it blank."), null=True, blank=True)
exact_length = models.IntegerField(_('exact length'), help_text=_("Enter the exact length of the string. It should be defined only when the number of characters is always fixed (e.g. codes and identifiers)."), null=True, blank=True)
enums = models.TextField(_('enumerations'), blank=True, help_text=_("Enter the set of values of the string (e.g.Male,Female). One per line."))
definitions = models.TextField(_('enumeration definitions'), blank=True, help_text=_("Enter a URI (prefereable a URL) defining each enumeration. One per line. If the URI is the same for each enumeration then just put it on the first line."))
def_val = models.CharField(_('default value'), max_length=255, blank=True, help_text=_("Enter a default value (up to 255 characters) for the string if desired. Cannot contain 'http://' nor 'https://'. Notice that if other restrictions are used, this default is ignored."))
str_fmt = models.CharField(_('String Format'), max_length=60, blank=True, help_text=_("Enter a regular expression used to constrain the string to a specific format. See the XML reduced regex set here: https://www.regular-expressions.info/xml.html "))
def publish(self, request):
if self.schema_code == '':
msg = publish_XdString(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip() + ' was not published because code already exists.', messages.ERROR)
return msg
class Meta:
verbose_name = "String"
verbose_name_plural = "Strings"
ordering = ['project', 'label']
class Units(XdAny):
"""
An XdString data type used to define Units for Quantified types.
"""
min_length = models.IntegerField(_('minimum length'), help_text=_("Enter the minimum number of characters that are required for this concept. If the character is optional, leave it blank."), null=True, blank=True)
max_length = models.IntegerField(_('maximum length'), help_text=_("Enter the maximum number of characters that are required for this concept. If the character is optional, leave it blank."), null=True, blank=True)
exact_length = models.IntegerField(_('exact length'), help_text=_("Enter the exact length of the concept. It should be defined only when the number of characters is always fixed (e.g. codes and identifiers)."), null=True, blank=True)
enums = models.TextField(_('Symbols'), help_text=_("Enter the abbreviations or symbols for allowed units designations. One per line."))
def_val = models.CharField(_('default value'), max_length=255, blank=True, help_text=_("Enter a default value (up to 255 characters) for the string if desired. Cannot contain 'http://' nor 'https://'"))
definitions = models.TextField(_('Symbol definitions'), help_text=_("Enter a URI for each symbol. One per line. These are used as rdf:isDefinedBy in the semantics. If the same URI is to be used for all symbols then enter it on the first line only."))
str_fmt = models.CharField(_('String Format'), max_length=60, blank=True, help_text=_("Enter a regular expression used to constrain the string to a specific format. See the XML reduced regex set here: https://www.regular-expressions.info/xml.html "))
def publish(self, request):
if self.schema_code == '':
msg = publish_XdString(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip() + ' was not published because code already exists.', messages.ERROR)
return msg
class Meta:
verbose_name = "Units"
verbose_name_plural = "Units"
ordering = ['project', 'label']
class XdFile(XdAny):
"""
Used to define external files that may be included directly or by reference.
"""
MODE_TYPES = (('select', 'Select Mode:'), ('url', 'Link via a URL'), ('embed', 'Embed a file'))
media_type = models.TextField(_("Media Type"), help_text=_("The allowed Media Types (formerly known as MIME Types) of the included data, one per line; i.e. text/html"), blank=True)
encoding = models.CharField(_("encoding"), max_length=10, default='utf-8', help_text=_("<a href='http://www.iana.org/assignments/character-sets/character-sets.txt'>List of encoding types at IANA.</a>"))
language = models.CharField(_("default language"), max_length=40, choices=LANGUAGES, default='en-US', help_text=_('Choose the DEFAULT language of the content.'))
alt_txt = models.CharField(_("Alt. Text"), max_length=110, blank=True, help_text=_('Default alternative text label to display when the content cannot be displayed.'))
content_mode = models.CharField(_("Content Mode"), default='Select Mode:', help_text=_("Select how the content will referenced, either via a URL or included in the data instance."), choices=MODE_TYPES, max_length=6)
def publish(self, request):
if self.schema_code == '':
msg = publish_XdFile(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip() + ' was not published because code already exists.', messages.ERROR)
return msg
class Meta:
verbose_name = "File"
ordering = ['project', 'label']
class XdInterval(XdAny):
"""
Generic class defining an interval (i.e. range) of a comparable type. An interval is a contiguous
subrange of a comparable base type. Used to define intervals of dates, times, quantities Whose units of measure match
and datatypes are the same and are ordered. See http://docstore.mik.ua/orelly/xml/schema/ch04_04.htm for value limits on numerics.
"""
INTERVAL_TYPES = (('None', 'Select Type:'), ('int', 'Count data (xs:int)'), ('decimal', 'Real set numbers (xs:decimal)'), ('float', 'Floating Point (xs:float)'),
('dateTime', 'Date/Time (YYYY-MM-DDTHH:mm:ss)'), ('date', 'Date (YYYY-MM-DD)'), ('time', 'Time (HH:mm:ss)'), ('duration', 'Duration (xs:duration)'))
lower = models.CharField(_("Lower Value"), max_length=110, blank=True, null=True, help_text=_('Enter the lower value of the interval. This will be used to set the minInclusive facet.'))
upper = models.CharField(_("Upper Value"), max_length=110, blank=True, null=True, help_text=_('Enter the upper value of the interval. This will be used to set the maxInclusive facet.'))
interval_type = models.CharField(_("Interval Type"), default='Select Type:', help_text=_("The XML Schema datatype of the upper and lower values."), choices=INTERVAL_TYPES, max_length=20)
lower_included = models.BooleanField(_('Lower Included?'), default=True, help_text=_('Uncheck this box if the lower value is excluded in the interval'))
upper_included = models.BooleanField(_('Upper Included?'), default=True, help_text=_('Uncheck this box if the upper value is excluded in the interval'))
lower_bounded = models.BooleanField(_('Lower Bounded?'), default=True, help_text=_("Uncheck this box if the lower value is unbounded. If unchecked, instances must be set to xsi:nil='true'"))
upper_bounded = models.BooleanField(_('Upper Bounded?'), default=True, help_text=_("Uncheck this box if the lower value is unbounded. If unchecked, instances must be set to xsi:nil='true'"))
units_name = models.CharField(_("Units Name"), max_length=60, blank=True, null=True, help_text=_('OPTIONAL: Enter the common name or abbreviation for these units.'))
units_uri = models.URLField(_("Units URI"), max_length=2000, blank=True, null=True, help_text=_('Enter the URL pointing to the definition for these units. This is mandatory if you entered a Units Name.'))
def publish(self, request):
if self.schema_code == '':
msg = publish_XdInterval(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip(
) + ' was not published because code already exists.', messages.ERROR)
return msg
class Meta:
verbose_name = "Interval"
ordering = ['project', 'label']
class ReferenceRange(XdAny):
"""
Defines a named range to be associated with any ORDERED datum.
"""
definition = models.CharField(_("Definition"), max_length=110, help_text=_("Enter the term that indicates the status of this range, e.g. 'normal', 'critical', 'therapeutic' etc."))
interval = models.ForeignKey(XdInterval, verbose_name=_('interval'), help_text=_("The data range for this meaning. Select the appropriate XdInterval."), on_delete=models.CASCADE,)
is_normal = models.BooleanField(_('Is Normal?'), default=False, help_text=_("Is this considered the normal range?"))
def publish(self, request):
if self.schema_code == '':
msg = publish_ReferenceRange(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip() + ' was not published because code already exists.', messages.ERROR)
return msg
class Meta:
verbose_name = "ReferenceRange"
ordering = ['project', 'label']
class SimpleReferenceRange(XdAny):
"""
Defines a ReferenceRange with one and only one Interval included.
"""
definition = models.CharField(_("Definition"), max_length=110, help_text=_("Enter the term that indicates the status of this range, e.g. 'normal', 'critical', 'therapeutic' etc."))
INTERVAL_TYPES = (('None', 'Select Type:'), ('int', 'Count data (xs:int)'), ('decimal', 'Real set numbers (xs:decimal)'), ('float', 'Floating Point (xs:float)'), ('dateTime', 'Date/Time (YYYY-MM-DDTHH:mm:ss)'), ('date', 'Date (YYYY-MM-DD)'), ('time', 'Time (HH:mm:ss)'), ('duration', 'Duration (xs:duration)'))
lower = models.CharField(_("Lower Value"), max_length=110, blank=True, null=True, help_text=_('Enter the lower value of the interval. This will be used to set the minInclusive facet.'))
upper = models.CharField(_("Upper Value"), max_length=110, blank=True, null=True, help_text=_('Enter the upper value of the interval. This will be used to set the maxInclusive facet.'))
interval_type = models.CharField(_("Interval Type"), default='Select Type:', help_text=_("The XML Schema datatype of the upper and lower values."), choices=INTERVAL_TYPES, max_length=20)
lower_included = models.BooleanField(_('Lower Included?'), default=True, help_text=_('Uncheck this box if the lower value is excluded in the interval'))
upper_included = models.BooleanField(_('Upper Included?'), default=True, help_text=_('Uncheck this box if the upper value is excluded in the interval'))
lower_bounded = models.BooleanField(_('Lower Bounded?'), default=True, help_text=_("Uncheck this box if the lower value is unbounded. If unchecked, instances must be set to xsi:nil='true'"))
upper_bounded = models.BooleanField(_('Upper Bounded?'), default=True, help_text=_("Uncheck this box if the lower value is unbounded. If unchecked, instances must be set to xsi:nil='true'"))
units_name = models.CharField(_("Units Name"), max_length=60, blank=True, null=True, help_text=_('OPTIONAL: Enter the common name or abbreviation for these units.'))
units_uri = models.URLField(_("Units URI"), max_length=2000, blank=True, null=True, help_text=_('Enter the URL pointing to the definition for these units. This is mandatory if you entered a Units Name.'))
is_normal = models.BooleanField(_('Is Normal?'), default=False, help_text=_("Is this considered the normal range?"))
def publish(self, request):
if self.schema_code == '':
msg = publish_SimpleReferenceRange(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip() + ' was not published because code already exists.', messages.ERROR)
return msg
class Meta:
verbose_name = "ReferenceRange (Simple)"
verbose_name_plural = "ReferenceRanges (Simple)"
ordering = ['project', 'label']
class XdOrdered(XdAny):
"""
Abstract class defining the concept of ordered values, which includes ordinals as well as true
quantities. The implementations require the functions ‘<’, '>' and is_strictly_comparable_to ('==').
"""
reference_ranges = models.ManyToManyField(ReferenceRange, verbose_name=_('reference ranges'), blank=True, help_text=_('Select the appropriate ReferenceRange that defines each ordered value. The listing is by Project: Reference Range Name.'))
normal_status = models.CharField(_('normal status'), max_length=110, help_text=_("Enter text that indicates a normal status. Example: This should be a Symbol in a XdOrdinal, a date range in a XdTemporal, a value range in a XdCount, etc."), blank=True, null=True)
simple_rr = models.ForeignKey(SimpleReferenceRange, verbose_name=_("Reference Range (Simple)"), null=True, blank=True, on_delete=models.SET_NULL)
def publish(self):
pass
class Meta:
abstract = True
class XdOrdinal(XdOrdered):
"""
Models rankings and scores, e.g. pain, Apgar values, etc, where there is:
a) implied ordering,
b) no implication that the distance between each value is constant, and
c) the total number of values is finite.
Note that although the term ‘ordinal’ in mathematics means natural numbers only, here any decimal is allowed, since negative and
zero values are often used by medical and other professionals for values around a neutral point. Also, decimal values are
sometimes used such as 0.5 or .25
Examples of sets of ordinal values:
-3, -2, -1, 0, 1, 2, 3 -- reflex response values
0, 1, 2 -- Apgar values
Used for recording any clinical datum which is customarily recorded using symbolic values.
Example: the results on a urinalysis strip, e.g. {neg, trace, +, ++, +++} are used for leucocytes, protein, nitrites
etc; for non-haemolysed blood {neg, trace, moderate}; for haemolysed blood {neg, trace, small, moderate, large}.
"""
ordinals = models.TextField(_('ordinals'), help_text=_("Enter the ordered enumeration of values. The base integer is zero with any number of integer values used to order the symbols. Example A: 0 = Trace, 1 = +, 2 = ++, 3 = +++, etc. Example B: 0 = Mild, 1 = Moderate, 2 = Severe. One per line."))
symbols = models.TextField(_('symbols'), help_text=_("Enter the symbols or the text that represent the ordinal values, which may be strings made from '+' symbols, or other enumerations of terms such as 'mild', 'moderate', 'severe', or even the same number series used for the ordinal values, e.g. '1', '2', '3'.. One per line."))
annotations = models.TextField(_('Symbols Definitions'), blank=True, help_text=_("Enter a URI for as a definition for each symbol. One per line. If the URI is the same for each symbol then just put it on the first line."))
def publish(self, request):
if self.schema_code == '':
msg = publish_XdOrdinal(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip() + ' was not published because code already exists.', messages.ERROR)
return msg
class Meta:
verbose_name = "Ordinal"
ordering = ['project', 'label']
class XdQuantified(XdOrdered):
"""
Abstract class defining the concept of true quantified values, i.e. values which are not only ordered,
but which have a precise magnitude.
"""
min_magnitude = models.DecimalField(_('minimum magnitude'), blank=True, null=True, max_digits=10, decimal_places=5, help_text=_("The minimum allowed value for a magnitude. If there isn't a min. then leave blank."))
max_magnitude = models.DecimalField(_('maximum magnitude'), blank=True, null=True, max_digits=10, decimal_places=5, help_text=_("Any maximum allowed value. If there isn't a max. then leave blank."))
min_inclusive = models.DecimalField(_('minimum inclusive'), max_digits=10, decimal_places=5, help_text=_("Enter the minimum (inclusive) value for this concept."), null=True, blank=True)
max_inclusive = models.DecimalField(_('maximum inclusive'), max_digits=10, decimal_places=5, help_text=_("Enter the maximum (inclusive) value for this concept."), null=True, blank=True)
min_exclusive = models.DecimalField(_('minimum exclusive'), max_digits=10, decimal_places=5, help_text=_("Enter the minimum (exclusive) value for this concept."), null=True, blank=True)
max_exclusive = models.DecimalField(_('maximum exclusive'), max_digits=10, decimal_places=5, help_text=_("Enter the maximum (exclusive) value for this concept."), null=True, blank=True)
total_digits = models.IntegerField(_('total digits'), help_text=_("Enter the maximum number of digits for this concept, excluding the decimal separator and the decimal places."), null=True, blank=True)
require_ms = models.BooleanField(_('Require Magnitude Status?'), default=False, help_text=_('MagnitudeStatus provides a general indication of the accuracy of the magnitude expressed in the XdQuantified subtypes. Should be used to inform users and not for decision support uses.'))
require_error = models.BooleanField(_('Require Error Value?'), default=False, help_text=_('Error margin of measurement, indicating error in the recording method or instrument (+/- %). A logical value of 0 indicates 100% accuracy, i.e. no error.'))
require_accuracy = models.BooleanField(_('Require Accuracy Value?'), default=False, help_text=_('Accuracy of the value in the magnitude attribute in the range 0% to (+/-)100%. A value of 0 means that the accuracy is unknown.'))
allow_ms = models.BooleanField(_('Allow Magnitude Status?'), default=False, help_text=_('Allow a UI component for this element even if it is not required.'))
allow_error = models.BooleanField(_('Allow Error Value?'), default=False, help_text=_('Allow a UI component for this element even if it is not required.'))
allow_accuracy = models.BooleanField(_('Allow Accuracy Value?'), default=False, help_text=_('Allow a UI component for this element even if it is not required.'))
def publish(self):
pass
class Meta:
abstract = True
class XdCount(XdQuantified):
"""
Countable quantities. Used for countable types such as pregnancies and steps (taken by a physiotherapy
patient), number of cigarettes smoked in a day, etc. Misuse:Not used for amounts of physical entities (which all have
standardized units)
"""
units = models.ForeignKey(Units, verbose_name=_('units'), related_name='%(class)s_related_units', null=True, help_text=_("Choose a units of measurement of this concept."), on_delete=models.SET_NULL,)
def publish(self, request):
if self.schema_code == '':
msg = publish_XdCount(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip() + ' was not published because code already exists.', messages.ERROR)
return msg
class Meta:
verbose_name = "Count"
ordering = ['project', 'label']
class XdQuantity(XdQuantified):
"""
Quantitified type representing “scientific” quantities, i.e. quantities expressed as a magnitude (decimal) and
units. Can also be used for time durations, where it is more convenient to treat these as simply a number of individual
seconds, minutes, hours, days, months, years, etc. when no temporal calculation is to be performed.
"""
fraction_digits = models.IntegerField(_('fraction digits'), help_text=_("Enter the maximum number of decimal places."), null=True, blank=True)
units = models.ForeignKey(Units, verbose_name=_('units'), related_name='%(class)s_related_units', null=True, help_text=_("Choose a units of measurement of this concept."), on_delete=models.SET_NULL,)
def publish(self, request):
if self.schema_code == '':
msg = publish_XdQuantity(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip() + ' was not published because code already exists.', messages.ERROR)
return msg
class Meta:
verbose_name = "Quantity"
verbose_name_plural = "Quantities"
ordering = ['project', 'label']
class XdFloat(XdQuantified):
"""
Quantitified type representing “scientific” quantities, i.e. quantities expressed as a magnitude (float) and an optional units.
"""
units = models.ForeignKey(Units, verbose_name=_('units'), related_name='%(class)s_related_units', null=True, help_text=_("Choose a units of measurement of this concept."), on_delete=models.SET_NULL,)
def publish(self, request):
if self.schema_code == '':
msg = publish_XdFloat(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip() + ' was not published because code already exists.', messages.ERROR)
return msg
class Meta:
verbose_name = "Float"
verbose_name_plural = "Floats"
ordering = ['project', 'label']
# TODO: Remove XdRatio and update the database structure. It is no longer in the RM.
class XdRatio(XdQuantified):
"""
Models a ratio of values, i.e. where the numerator and denominator are both pure numbers. Should not
be used to represent things like blood pressure which are often written using a ‘/’ character, giving the misleading
impression that the item is a ratio, when in fact it is a structured value. Similarly, visual acuity, often written as
(e.g.) “6/24” in clinical notes is not a ratio but an ordinal (which includes non-numeric symbols like CF = count
fingers etc). Should not be used for formulations.
"""
RATIO_CHOICES = (('ratio', _('Ratio')), ('proportion',_('Proportion')), ('rate', _('Rate')))
ratio_type = models.CharField(_('ratio type'), max_length=10, choices=RATIO_CHOICES)
num_min_inclusive = models.IntegerField(_('numerator minimum inclusive'), help_text=_("Enter the minimum (inclusive) value for the numerator."), null=True, blank=True)
num_max_inclusive = models.IntegerField(_('numerator maximum inclusive'), help_text=_("Enter the maximum (inclusive) value for the numerator."), null=True, blank=True)
num_min_exclusive = models.IntegerField(_('numerator minimum exclusive'), help_text=_("Enter the minimum (exclusive) value for the numerator."), null=True, blank=True)
num_max_exclusive = models.IntegerField(_('numerator maximum exclusive'), help_text=_("Enter the maximum (exclusive) value for the numerator."), null=True, blank=True)
den_min_inclusive = models.IntegerField(_('denominator minimum inclusive'), help_text=_("Enter the minimum (inclusive) value for the denominator."), null=True, blank=True)
den_max_inclusive = models.IntegerField(_('denominator maximum inclusive'), help_text=_("Enter the maximum (inclusive) value for the denominator."), null=True, blank=True)
den_min_exclusive = models.IntegerField(_('denominator minimum exclusive'), help_text=_("Enter the minimum (exclusive) value for the denominator."), null=True, blank=True)
den_max_exclusive = models.IntegerField(_('denominator maximum exclusive'), help_text=_("Enter the maximum (exclusive) value for the denominator."), null=True, blank=True)
num_units = models.ForeignKey(Units, verbose_name=_('units'), related_name='%(class)s_related_num_units', null=True, blank=True, help_text=_("Choose a units of measurement of this concept."), on_delete=models.SET_NULL,)
den_units = models.ForeignKey(Units, verbose_name=_('units'), related_name='%(class)s_related_den_units', null=True, blank=True, help_text=_("Choose a units of measurement of this concept."), on_delete=models.SET_NULL,)
ratio_units = models.ForeignKey(Units, verbose_name=_('units'), related_name='%(class)s_related_ratio_units', null=True, blank=True, help_text=_("Choose a units of measurement of this concept."), on_delete=models.SET_NULL,)
def publish(self, request):
if self.schema_code == '':
msg = publish_XdRatio(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip() + ' was not published because code already exists.', messages.ERROR)
return msg
class Meta:
verbose_name = "Ratio"
ordering = ['project', 'label']
class XdTemporal(XdOrdered):
"""
Class defining the concept of date and time types.
Must be constrained in DMs to be one or more of the allowed types.
This gives the modeller the ability to optionally allow partial dates at run time.
If one of the duration types is selected then no other type is allowed.
All types are considered optional (minOccurs='0') by default.
If you need to make on mandatory then an assert statement is required and only one type should be allowed.
"""
allow_duration = models.BooleanField(_('allow duration'), default=False, help_text=_("If Duration is allowed, no other types will be permitted."))
allow_date = models.BooleanField(_('allow date'), default=False, help_text=_('Check this box if complete date entry is allowed.'))
allow_time = models.BooleanField(_('allow time'), default=False, help_text=_('Check this box if time only entry is allowed.'))
allow_datetime = models.BooleanField(_('allow datetime'), default=False, help_text=_('Check this box if complete dates and times are allowed.'))
allow_day = models.BooleanField(_('allow day'), default=False, help_text=_('Check this box if day only is allowed.'))
allow_month = models.BooleanField(_('allow month'), default=False, help_text=_('Check this box if month only is allowed.'))
allow_year = models.BooleanField(_('allow year'), default=False, help_text=_('Check this box if year only entry is allowed.'))
allow_year_month = models.BooleanField(_('allow year month'), default=False, help_text=_('Check this box if combination of years and months are allowed.'))
allow_month_day = models.BooleanField(_('allow month day'), default=False, help_text=_('Check this box if combination of months and days are allowed.'))
def publish(self, request):
if self.schema_code == '':
msg = publish_XdTemporal(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip() + ' was not published because code already exists.', messages.ERROR)
return msg
class Meta:
verbose_name = "Temporal"
ordering = ['project', 'label']
class Party(Common):
"""
A proxy description of a party, including an optional link to data for this party in a demographic or other identity management system.
"""
details = models.ForeignKey('Cluster', verbose_name=_('details'), related_name='%(class)s_related', null=True, blank=True, help_text=_('A Cluster structure that defines the details of this Party.'), on_delete=models.SET_NULL,)
external_ref = models.ManyToManyField(XdLink, verbose_name=_('external reference'), help_text=_("Optional XdLink(s) that point to a description of this Party in other services."), blank=True, related_name='%(class)s_related')
class Meta:
verbose_name = "Party"
ordering = ['project', 'label']
def publish(self, request):
if self.schema_code == '':
msg = publish_Party(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip() + ' was not published because code already exists.', messages.ERROR)
return msg
class Audit(Common):
"""
Audit provides a mechanism to identifiy the who/where/when tracking of instances as they move from system to system.
"""
system_id = models.ForeignKey(XdString, verbose_name=_('system id'), null=True, blank=True, related_name='%(class)s_related', help_text=_('Identifier of the system which handled the information item.'), on_delete=models.SET_NULL,)
system_user = models.ForeignKey(Party, verbose_name=_('system user'), null=True, blank=True, related_name='%(class)s_related', help_text=_('A model for user(s) who created, committed, forwarded or otherwise handled the item.'), on_delete=models.SET_NULL,)
location = models.ForeignKey('Cluster', verbose_name=_('location'), related_name='%(class)s_related', null=True, blank=True, help_text=_('A Cluster for location information.'), on_delete=models.SET_NULL,)
def publish(self, request):
if self.schema_code == '':
msg = publish_Audit(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip() + ' was not published because code already exists.', messages.ERROR)
return msg
class Meta:
verbose_name = "Audit"
verbose_name_plural = "Audits"
ordering = ['project', 'label']
class Attestation(Common):
"""
Record an attestation by a party of item(s) of record content.
The type of attestation is recorded by the reason attribute, which my be coded from a vocabulary.
"""
view = models.ForeignKey(XdFile, verbose_name=_('attested view'), related_name='attested_view', null=True, blank=True, help_text=_('Select a model for the recorded view that is being attested.'), on_delete=models.SET_NULL,)
proof = models.ForeignKey(XdFile, verbose_name=_('proof'), related_name='proof', null=True, blank=True, help_text=_('Select a model for the proof of attestation such as an GPG signature.'), on_delete=models.SET_NULL,)
reason = models.ForeignKey(XdString, verbose_name=_('reason'), related_name='%(class)s_related', null=True, blank=True, help_text=_('Select a model for the reason.'), on_delete=models.SET_NULL,)
committer = models.ForeignKey(Party, verbose_name=_('committer'), related_name='%(class)s_related_committer', null=True, blank=True, help_text=_('The Party that commited the Attestation.'), on_delete=models.SET_NULL,)
def publish(self, request):
if self.schema_code == '':
msg = publish_Attestation(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip() + ' was not published because code already exists.', messages.ERROR)
return msg
class Meta:
verbose_name = "Attestation"
ordering = ['project', 'label']
class Participation(Common):
"""
Model of a participation of a Party (any Actor or Role) in an activity. Used to represent any participation of a Party in some activity,
which is not explicitly in the model, e.g. assisting nurse, ambulance service, etc. Can be used to record past or future participations.
"""
performer = models.ForeignKey(Party, verbose_name='Performer', related_name='%(class)s_related_performer', null=True, help_text=_('The Party instance and possibly demographic system link of the party participating in the activity.'), on_delete=models.SET_NULL,)
function = models.ForeignKey(XdString, related_name='%(class)s_related', null=True, help_text=_('The function of the Party in this participation (note that a given party might participate in more than one way in a particular activity). In some applications this might be called a Role.'), on_delete=models.SET_NULL,)
mode = models.ForeignKey(XdString, related_name='%(class)s_related_mode', null=True, help_text=_('The mode of the performer / activity interaction, e.g. present, by telephone, by email etc. If the participation is by device or software it may contain a protocol standard or interface definition.'), on_delete=models.SET_NULL,)
def publish(self, request):
if self.schema_code == '':
msg = publish_Participation(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip() + ' was not published because code already exists.', messages.ERROR)
return msg
class Meta:
verbose_name = "Participation"
ordering = ['project', 'label']
class Cluster(Common):
"""
The grouping structure of Item, which may contain further instances of Item subclasses, in an ordered list. This
provides the root Item for potentially very complex structures.
"""
clusters = models.ManyToManyField('Cluster', help_text="Select zero or more Clusters to include in this Cluster. You cannot put a Cluster inside itself, it will be ignored if you select itself.", blank=True)
xdboolean = models.ManyToManyField(XdBoolean, verbose_name='Boolean', related_name='%(class)s_related', help_text="Select zero or more booleans to include in this Cluster.", blank=True)
xdlink = models.ManyToManyField(XdLink, verbose_name='Link', related_name='%(class)s_related', help_text="Select zero or more uris to include in this Cluster.", blank=True)
xdstring = models.ManyToManyField(XdString, verbose_name='String', related_name='%(class)s_related', help_text="Select zero or more strings to include in this Cluster.", blank=True)
xdfile = models.ManyToManyField(XdFile, verbose_name='File', related_name='%(class)s_related', help_text="Select zero or more media items to include in this Cluster.", blank=True)
xdordinal = models.ManyToManyField(XdOrdinal, verbose_name='Ordinal', related_name='%(class)s_related', help_text="Select zero or more ordinals to include in this Cluster.", blank=True)
xdcount = models.ManyToManyField(XdCount, verbose_name='Count', related_name='%(class)s_related', help_text="Select zero or more counts to include in this Cluster.", blank=True)
xdquantity = models.ManyToManyField(XdQuantity, verbose_name='Quantity', related_name='%(class)s_related', help_text="Select zero or more quantity items to include in this Cluster.", blank=True)
xdfloat = models.ManyToManyField(XdFloat, verbose_name='Float', related_name='%(class)s_related', help_text="Select zero or more floats to include in this Cluster.", blank=True)
xdratio = models.ManyToManyField(XdRatio, verbose_name='Ratio', related_name='%(class)s_related', help_text="Select zero or more ratios to include in this Cluster.", blank=True)
xdtemporal = models.ManyToManyField(XdTemporal, verbose_name='Temporal', related_name='%(class)s_related', help_text="Select zero or more temporal items to include in this Cluster.", blank=True)
def publish(self, request):
if self.schema_code == '':
msg = publish_Cluster(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.label.strip() + ' was not published because code already exists.', messages.ERROR)
return msg
class Meta:
verbose_name = "Cluster"
ordering = ['label']
class DM(models.Model):
"""
This is the root node of a Data Model.
"""
ct_id = models.CharField(_("CUID"), max_length=40, default=get_cuid, editable=False, unique=True, help_text=_('The unique identifier for the DM.'))
created = models.DateTimeField(_('Created'), auto_now_add=True, help_text=_('The dateTime that the MC was created.'))
updated = models.DateTimeField(_('Last updated'), auto_now=True, help_text=_("Last update."))
creator = models.ForeignKey(Modeler, verbose_name='Creator', related_name='%(class)s_related_creator', on_delete=models.CASCADE,)
edited_by = models.ForeignKey(Modeler, verbose_name='Last Edited By', related_name='%(class)s_related_edited_by', on_delete=models.CASCADE,)
published = models.BooleanField(_("Generated"), default=False, help_text=_("Once this <em>Generated</em> box has been checked, DM generation has been completed and no further edits are allowed."))
project = models.ForeignKey(Project, verbose_name=_("Project name"), to_field="prj_name", help_text=_('Choose a Project for this Data Model (DM)'), on_delete=models.CASCADE,)
about = models.CharField(_('About'), default="http://tools.s3model.com/dmlib/", max_length=255, help_text=_("The URL to the DM after publication. The DM ID will be added after the trailing slash in the format of 'dm-{dm_id}.xsd' This provides a full path and filename for the dm as a unique identifier."), blank=True)
title = models.CharField(_('Title'), unique=True, max_length=255, help_text=_("Enter the name of this Data Model (DM)."))
author = models.ForeignKey(Modeler, verbose_name=_("Author"), help_text=_("Select the author of the DM"), related_name='%(class)s_related_author', blank=True, on_delete=models.CASCADE,)
contrib = models.ManyToManyField(Modeler, verbose_name=_("Contributors"), help_text=_("Select the contributors (if any) to this DM"), related_name='%(class)s_related_contrib', blank=True)
dc_subject = models.CharField(_('DC Subject'), max_length=255, default='', help_text=_("Enter a semi-colon separated list of keywords. Usually MeSH terms"), blank=True)
source = models.CharField(_('DC Source'), max_length=255, default='', help_text=_("Enter the name of a document or a URL to a supporting source."), blank=True)
rights = models.CharField(_('DC Rights'), max_length=255, help_text=_("Enter the rights or license statement."), default="CC-BY http://creativecommons.org/licenses/by/3.0/", blank=True)
relation = models.CharField(_('DC Relation'), max_length=255, help_text=_("Enter the relationship to another Data Model (DM), if applicable."), default="None", blank=True)
coverage = models.CharField(_('DC Coverage'), max_length=255, help_text=_("Enter the demographic, geographical or political coverage."), default="Universal", blank=True)
dc_type = models.CharField(_('DC Type'), max_length=110, editable=False, default="S3Model Data Model (DM)")
identifier = models.CharField(_('DC Identifier'), max_length=110, editable=False, default="dm-")
description = models.TextField(_('Description'), help_text=_("Enter a general description of the purpose of this DM."), blank=True)
publisher = models.CharField(_('DC Publisher'), max_length=255, help_text=_("Enter the name of the publisher/copyright holder."), blank=True)
pub_date = models.DateTimeField(verbose_name=_("date of publication"), auto_now=True, help_text=_("Date of publication."), blank=True)
dc_format = models.CharField(_('DC Format'), max_length=8, editable=False, default="text/xml", help_text=_('The format of the data. Default is text/xml for DMs.'))
dc_language = models.CharField(_("DC Language"), max_length=10, default="en-US", choices=LANGUAGES, help_text=_('The written language of the DM.'), blank=True)
language = models.CharField(_("Language"), max_length=40, choices=LANGUAGES, blank=True, default='en-US', help_text=_('Choose the language of this Data Model.'))
encoding = models.CharField(_("Encoding"), max_length=10, default='utf-8', help_text="<a href='http://www.iana.org/assignments/character-sets/character-sets.txt'>List of encoding types at IANA.</a>")
state = models.CharField(_('Current State'), max_length=110, blank=True, help_text=_('The current state according to the state machine / workflow engine identified in workflow_id. You may enter a default/start state here.'))
data = models.ForeignKey(Cluster, verbose_name=_('Model Data'), related_name='%(class)s_related', help_text=_("You must select the Cluster that is the structure for this model."), null=True, on_delete=models.CASCADE,)
subject = models.ForeignKey(Party, verbose_name=_('Model Subject'), related_name='%(class)s_related_subject', null=True, blank=True, help_text=_('Refers to the subject component of the record for anonymous or identified reference.'), on_delete=models.SET_NULL,)
provider = models.ForeignKey(Party, verbose_name=_('Model Provider'), related_name='%(class)s_related_provider', null=True, blank=True, help_text=_('Select a Party componet that models the provider of the activity in this model.'), on_delete=models.SET_NULL,)
participations = models.ManyToManyField(Participation, verbose_name=_('Other Participations'), blank=True, help_text=_('Select any Participations components that describe additional entities involved in this model.'))
protocol = models.ForeignKey(XdString, null=True, verbose_name=_('Protocol ID'), blank=True, help_text=_('Optional external identifier of protocol used to create this Entry. This could be a clinical guideline, an operations protocol,etc.'), on_delete=models.SET_NULL,)
workflow = models.ForeignKey(XdLink, null=True, verbose_name=_('Workflow ID'), blank=True, help_text=_('Identifier of externally held workflow engine (state machine) data for this workflow execution.'), on_delete=models.SET_NULL,)
acs = models.ForeignKey(XdLink, null=True, verbose_name=_('ACS ID'), blank=True, related_name='%(class)s_access', help_text=_('Identifier of externally held access control system. This URI can be an ontology, vocabulary or descriptive document; URI link.'), on_delete=models.SET_NULL,)
audit = models.ManyToManyField('Audit', verbose_name=_('Audit'), blank=True, help_text=_('Audit structure to provide audit trail tracking of information.'))
attestation = models.ForeignKey(Attestation, verbose_name=_('Attestation'), null=True, blank=True, help_text=_('A model that allows an attestation that the data is correct.'), on_delete=models.SET_NULL,)
links = models.ManyToManyField(XdLink, verbose_name=_('Links'), blank=True, related_name='%(class)s_related_links', default=None, help_text=_('Can be used to establish ad-hoc links between concepts.'))
asserts = models.TextField(_("asserts"), help_text="XPath assert statements. See the documentation for details. One per line.", blank=True)
pred_obj = models.ManyToManyField(PredObj, verbose_name=_("RDF Object"), help_text=_("Select or create a new set of Predicate Object combinations as semantic links."), blank=True)
schema_code = models.TextField(_("Schema Code"), help_text="This is only writable from the tools, not via user input. It contains the code required for each component to create an entry in a DM.", default='', blank=True, null=True, editable=True)
doc_code = models.TextField(_("Documentation Code"), help_text="This is only writable from the tools, not via user input. It contains the HTML code to document the DM.", null=True, blank=True)
app_code = models.TextField(_("App Code"), help_text="This is only writable from the tools, not via user input. It contains the code required to create the User App.", blank=True, null=True, default='')
xsd_file = models.FileField("DM XSD Schema", upload_to=dm_folder, max_length=2048, blank=True, null=True)
xml_file = models.FileField("DM XML Instance", upload_to=dm_folder, max_length=2048, blank=True, null=True)
json_file = models.FileField("DM JSON Instance", upload_to=dm_folder, max_length=2048, blank=True, null=True)
html_file = models.FileField("DM HTML Form", upload_to=dm_folder, max_length=2048, blank=True, null=True)
sha1_file = models.FileField("DM SHA1", upload_to=dm_folder, max_length=2048, blank=True, null=True)
zip_file = models.FileField("DM Zip", upload_to='zips/', max_length=2048, blank=True, null=True)
def __init__(self, *args, **kwargs):
super(DM, self).__init__(*args, **kwargs)
if self.ct_id:
self.identifier = "dm-" + str(self.ct_id)
def __str__(self):
return self.project.prj_name + ' : ' + self.title
def publish(self, request):
if "(***COPY***)" in self.title: # skip publishing a copy.
msg = (self.title + " --Cannot publish a copy until it is edited.", messages.ERROR)
return(msg)
if self.schema_code == '' or self.schema_code == None:
msg = publish_DM(self)
if len(self.schema_code) > 100:
self.published = True
self.save()
else:
self.published = False
self.schema_code = ''
self.save()
else:
msg = (self.title.strip() + ' was not published because code already exists.', messages.ERROR)
return(msg)
class Meta:
verbose_name = "DM"
ordering = ['project', 'title']
indexes = [models.Index(fields=['project', 'title']),]
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
"a": ["b", "c"],
"b": [],
"c": ["d"],
"d": ["b"],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge), ["a", "c", "d", "b"]
)
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
"a": ["b"],
"b": ["c"],
"c": ["d"],
"d": ["a"],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted, graph.keys(), GetEdge
)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ""
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor("freebsd", "freebsd9", {})
self.assertFlavor("freebsd", "freebsd10", {})
self.assertFlavor("openbsd", "openbsd5", {})
self.assertFlavor("solaris", "sunos5", {})
self.assertFlavor("solaris", "sunos", {})
self.assertFlavor("linux", "linux2", {})
self.assertFlavor("linux", "linux3", {})
self.assertFlavor("linux", "linux", {})
def test_param(self):
self.assertFlavor("foobar", "linux2", {"flavor": "foobar"})
if __name__ == "__main__":
unittest.main()
|
# slimDNS
# Simple, Lightweight Implementation of Multicast DNS
# Copyright 2018 Nicko van Someren
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# SPDX-License-Identifier: Apache-2.0
__version__ = "0.1.0"
__author__ = "Nicko van Someren"
__license__ = "Apache-2.0"
import sys
if sys.implementation.name != "micropython":
const = lambda x:x
import time
from select import select
try:
from ustruct import pack_into, unpack_from
except:
from struct import pack_into, unpack_from
import socket
# The biggest packet we will process
MAX_PACKET_SIZE = const(1024)
MAX_NAME_SEARCH = const(20)
# DNS constants
_MDNS_ADDR = '224.0.0.251'
_MDNS_PORT = const(5353);
_DNS_TTL = const(2 * 60) # two minute default TTL
_FLAGS_QR_MASK = const(0x8000) # query response mask
_FLAGS_QR_QUERY = const(0x0000) # query
_FLAGS_QR_RESPONSE = const(0x8000) # response
_FLAGS_AA = const(0x0400) # Authorative answer
_CLASS_IN = const(1)
_CLASS_ANY = const(255)
_CLASS_MASK = const(0x7FFF)
_CLASS_UNIQUE = const(0x8000)
_TYPE_A = const(1)
_TYPE_PTR = const(12)
_TYPE_TXT = const(16)
_TYPE_AAAA = const(28)
_TYPE_SRV = const(33)
_TYPE_ANY = const(255)
# Convert a dotted IPv4 address string into four bytes, with some
# sanity checks
def dotted_ip_to_bytes(ip):
l = [int(i) for i in ip.split('.')]
if len(l) != 4 or any(i<0 or i>255 for i in l):
raise ValueError
return bytes(l)
# Convert four bytes into a dotted IPv4 address string, without any
# sanity checks
def bytes_to_dotted_ip(a):
return ".".join(str(i) for i in a)
# Ensure that a name is in the form of a list of encoded blocks of
# bytes, typically starting as a qualified domain name
def check_name(n):
if isinstance(n, str):
n = n.split(".")
if n[-1] == '':
n = n[:-1]
n = [i.encode("UTF8") if isinstance(i, str) else i for i in n]
return n
# Move the offset past the name to which it currently points
def skip_name_at(buf, o):
while True:
l = buf[o]
if l == 0:
o += 1
break
elif (l & 0xc0) == 0xc0:
o += 2
break
else:
o += l+1
return o
# Test if two possibly compressed names are equal
def compare_packed_names(buf, o, packed_name, po=0):
while packed_name[po] != 0:
while buf[o] & 0xc0:
(o,) = unpack_from("!H", buf, o)
o &= 0x3fff
while packed_name[po] & 0xc0:
(po,) = unpack_from("!H", packed_name, po)
po &= 0x3fff
l1 = buf[o] +1
l2 = packed_name[po] +1
if l1 != l2 or buf[o:o+l1] != packed_name[po:po+l2]:
return False
o += l1
po += l2
return buf[o] == 0
# Find the memory size needed to pack a name without compression
def name_packed_len(name):
return sum(len(i)+1 for i in name) + 1
# Pack a name into the start of the buffer
def pack_name(buf, name):
# We don't support writing with name compression, BIWIOMS
o = 0
for part in name:
pl = len(part)
buf[o] = pl
buf[o+1:o+pl+1] = part
o += pl+1
buf[o] = 0
# Pack a question into a new array and return it as a memoryview
def pack_question(name, qtype, qclass):
# Return a pre-packed question as a memoryview
name = check_name(name)
name_len = name_packed_len(name)
buf = bytearray(name_len + 4)
pack_name(buf, name)
pack_into("!HH", buf, name_len, qtype, qclass)
return memoryview(buf)
# Pack an answer into a new array and return it as a memoryview
def pack_answer(name, rtype, rclass, ttl, rdata):
# Return a pre-packed answer as a memoryview
name = check_name(name)
name_len = name_packed_len(name)
buf = bytearray(name_len + 10 + len(rdata))
pack_name(buf, name)
pack_into("!HHIH", buf, name_len, rtype, rclass, ttl, len(rdata))
buf[name_len+10:] = rdata
return memoryview(buf)
# Advance the offset past the question to which it points
def skip_question(buf, o):
o = skip_name_at(buf, o)
return o + 4
# Advance the offset past the answer to which it points
def skip_answer(buf, o):
o = skip_name_at(buf, o)
(rdlen,) = unpack_from("!H", buf, o+8)
return o + 10 + rdlen
# Test if a questing an answer. Note that this also works for
# comparing a "known answer" in a packet to a local answer. The code
# is asymetric to the extent that the questions my have a type or
# class of ANY
def compare_q_and_a(q_buf, q_offset, a_buf, a_offset=0):
if not compare_packed_names(q_buf, q_offset, a_buf, a_offset):
return False
(q_type, q_class) = unpack_from("!HH", q_buf, skip_name_at(q_buf, q_offset))
(r_type, r_class) = unpack_from("!HH", a_buf, skip_name_at(a_buf, a_offset))
if not (q_type == r_type or q_type == _TYPE_ANY):
return False
q_class &= _CLASS_MASK
r_class &= _CLASS_MASK
return (q_class == r_class or q_class == _TYPE_ANY)
# The main SlimDNSServer class
class SlimDNSServer:
def __init__(self, local_addr, hostname=None):
# If a hostname is give we try to register it
self.local_addr = local_addr
self.sock = self._make_socket()
self.sock.bind(('', _MDNS_PORT))
self.adverts = []
self.hostname = None
self._reply_buffer = None
self._pending_question = None
self.answered = False
if hostname:
self.advertise_hostname(hostname)
def _make_socket(self):
# Note that on devices with a more complete UDP/IP stack it
# might be necessary to set more options on the socket,
# incluing things like setting the mutlicast TTL and enabling
# multicast on the interface.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
member_info = dotted_ip_to_bytes(_MDNS_ADDR) + dotted_ip_to_bytes(self.local_addr)
s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, member_info)
return s
def advertise_hostname(self, hostname, find_vacant=True):
# Try to advertise our own IP address under the given hostname
# If the hostname is taken we try to tack some numbers on the end to make it unique
hostname = check_name(hostname)
n = len(hostname)
if n == 1:
hostname.append(b"local")
elif n == 0 or n > 2 or hostname[1] != b'local':
raise ValueError("hostname should be a single name component")
ip_bytes = dotted_ip_to_bytes(self.local_addr)
basename = hostname[0]
for i in range(MAX_NAME_SEARCH):
if i != 0:
hostname[0] = basename + b"-"+str(i)
addr = self.resolve_mdns_address(hostname, True)
# Some helpful machine might know us and send us our own address
if not addr or addr == ip_bytes:
break
# Even is seaching we have to give up eventually
if not find_vacant or i == MAX_NAME_SEARCH-1:
raise ValueError("Name in use")
A_record = pack_answer(hostname, _TYPE_A, _CLASS_IN, _DNS_TTL, ip_bytes)
self.adverts.append(A_record)
self.hostname = hostname
# We could add a reverse PTR record here.
# We don't, BIWIOMS
def process_packet(self, buf, addr):
# Process a single multicast DNS packet
(pkt_id, flags, qst_count, ans_count, _, _) = unpack_from("!HHHHHH", buf, 0)
o = 12
matches = []
reply_len = 12
for i in range(qst_count):
for a in self.adverts:
if compare_q_and_a(buf, o, a):
matches.append(a)
reply_len += len(a)
o = skip_question(buf, o)
# In theory we could do known answer suppression here
# We don't, BIWIOMS
if self._pending_question:
for i in range(ans_count):
if compare_q_and_a(self._pending_question, 0, buf, o):
if self._answer_callback(buf[o:skip_answer(buf,o)]):
self.answered = True
o = skip_answer(buf,o)
if not matches:
return
# We could check for duplicates in the answers (which is
# possible) but we don't, BIWIOMS
# Since Micropython sockets don't currently support
# recvfrom_into() we need to have our own buffer for the
# reply, even though we are now done with the receiving buffer
if not self._reply_buffer or len(self._reply_buffer) < reply_len:
# print("Making new reply buffer of len {}".format(reply_len))
self._reply_buffer = memoryview(bytearray(reply_len))
buf = self._reply_buffer
pack_into("!HHHHHH", buf, 0,
pkt_id, _FLAGS_QR_RESPONSE | _FLAGS_AA,
0, len(matches), 0, 0)
o = 12
for a in matches:
l = len(a)
buf[o:o+l] = a
o += l
# print("Sending packed reply: {}".format(bytes(buf[:o])))
# We fake the handling of unicast replies. If the packet came
# from the mutlicast port we multicast the reply but if it
# came from any other port we unicast the reply.
self.sock.sendto(buf[:o], (_MDNS_ADDR, _MDNS_PORT) if addr[0] == _MDNS_PORT else addr)
def process_waiting_packets(self):
# Handle all the packets that can be read immediately and
# return as soon as none are waiting
while True:
readers, _, _ = select([self.sock], [], [], 0)
if not readers:
break
buf, addr = self.sock.recvfrom(MAX_PACKET_SIZE)
# print("Received {} bytes from {}".format(len(buf), addr))
if buf and addr[0] != self.local_addr:
try:
self.process_packet(memoryview(buf), addr)
except IndexError:
print("Index error processing packet; probably malformed data")
except Exception as e:
print("Error processing packet: {}".format(e))
# raise e
def run_forever(self):
# Only really useful once we have stable thread support
while True:
readers, _, _ = select([self.sock], [], [], None)
self.process_waiting_packets()
def handle_question(self, q, answer_callback, fast=False, retry_count=3):
# Send our a (packed) question, and send matching replies to
# the answer_callback function. This will stop after sending
# the given number of retries and waiting for the a timeout on
# each, or sooner if the answer_callback function returns True
p = bytearray(len(q)+12)
pack_into("!HHHHHH", p, 0,
1, 0, 1, 0, 0, 0, 0)
p[12:] = q
self._pending_question = q
self._answer_callback = answer_callback
self.answered = False
try:
for i in range(retry_count):
if self.answered:
break
self.sock.sendto(p, (_MDNS_ADDR, _MDNS_PORT))
timeout = time.ticks_ms() + (250 if fast else 1000)
while not self.answered:
sel_time = time.ticks_diff(timeout, time.ticks_ms())
if sel_time <= 0:
break
(rr, _, _) = select([self.sock], [], [], sel_time/1000.0)
if rr:
self.process_waiting_packets()
finally:
self._pending_question = None
self._answer_callback = None
def resolve_mdns_address(self, hostname, fast=False):
# Look up an IPv4 address for a hostname using mDNS.
q = pack_question(hostname, _TYPE_A, _CLASS_IN)
answer = []
def _answer_handler(a):
addr_offset = skip_name_at(a, 0) + 10
answer.append(a[addr_offset:addr_offset+4])
return True
self.handle_question(q, _answer_handler, fast)
return bytes(answer[0]) if answer else None
def test():
import network
sta_if = network.WLAN(network.STA_IF)
local_addr = sta_if.ifconfig()[0]
server = SlimDNSServer(local_addr, "micropython")
server.run_forever()
|
import bm
import utils
@bm.register
class FlaskSimple(bm.Scenario):
tracer_enabled = bm.var(type=bool)
profiler_enabled = bm.var(type=bool)
def run(self):
with utils.server(self) as get_response:
def _(loops):
for _ in range(loops):
get_response()
yield _
|
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 常量与宏
Case Name : SESSION_USER有效性测试
Description :
1.查看SESSION_USER
2.创建普通用户
3.切换用户
4.查看SESSION_USER
5.删除用户
Expect :
1.显示当前gsql连接的用户
2.创建普通用户成功
3.切换用户成功
4.显示pili
5.删除用户成功
History :
"""
import sys
import unittest
from yat.test import macro
from yat.test import Node
sys.path.append(sys.path[0] + "/../")
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
logger = Logger()
commonsh = CommonSH('dbuser')
constant = Constant()
class ConstantsMacros(unittest.TestCase):
def setUp(self):
logger.info(
'------------------------Opengauss_Function_DDL_Constants_Macros_Case0011开始执行-----------------------------')
self.userNode = Node('dbuser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
def test_session_user(self):
sql_cmd1 = commonsh.execut_db_sql(f'''select SESSION_USER;
drop user if exists pili;
create user pili password '{macro.COMMON_PASSWD}';''')
logger.info(sql_cmd1)
self.res = sql_cmd1.splitlines()[-2].strip()
self.assertIn(constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd1)
sql_cmd2 = '''select SESSION_USER;'''
excute_cmd1 = f'''
source {self.DB_ENV_PATH};
gsql -d {self.userNode.db_name} -p {self.userNode.db_port} -U pili -W '{macro.COMMON_PASSWD}' -c "{sql_cmd2}"
'''
logger.info(excute_cmd1)
msg1 = self.userNode.sh(excute_cmd1).result()
logger.info(msg1)
self.assertIn('pili', msg1)
# 清理环境
def tearDown(self):
logger.info('----------this is teardown-------')
sql_cmd3 = commonsh.execut_db_sql('''drop user if exists pili;''')
logger.info(sql_cmd3)
logger.info(
'------------------------Opengauss_Function_DDL_Constants_Macros_Case0011执行结束--------------------------')
|
import datetime
from sqlalchemy import *
from migrate import *
from sqlalchemy.databases import mysql
metadata = MetaData(migrate_engine)
# New tables
tag_table = Table('tag', metadata,
Column('id', mysql.MSBigInteger(unsigned=True), autoincrement=True, primary_key=True, nullable=False),
Column('entry_id', mysql.MSBigInteger(unsigned=True), nullable=False),
Column('keyword', VARCHAR(64), server_default="", nullable=False),
# The following column is the lowercased version of the tag
Column('lower', VARCHAR(64), server_default="", nullable=False)
# Note: SQLAlchemy doesnt seem to have a way to create a current_timestamp col
# So see upgrade script 8 where we do it with raw sql.
#Column('created', TIMESTAMP, default='current_timestamp')
)
def upgrade():
tag_table.create()
def downgrade():
tag_table.drop()
|
import os
import pytest
import yaml
import numpy as np
import pandas as pd
from collections import namedtuple
from datetime import datetime, timedelta, date
from unittest import mock
from prophet import Prophet
import mlflow
import mlflow.prophet
import mlflow.utils
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
from mlflow import pyfunc
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.models.utils import _read_example
from mlflow.models import infer_signature, Model
from mlflow.store.artifact.s3_artifact_repo import S3ArtifactRepository
from mlflow.utils.file_utils import TempDir
from mlflow.utils.model_utils import _get_flavor_configuration
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from tests.helper_functions import mock_s3_bucket # pylint: disable=unused-import
from tests.helper_functions import (
_compare_conda_env_requirements,
_assert_pip_requirements,
pyfunc_serve_and_score_model,
)
class DataGeneration:
def __init__(self, **kwargs):
self.shift = kwargs["shift"]
self.start = datetime.strptime(kwargs["start"], "%Y-%M-%d")
self.size = kwargs["size"]
self.date_field = kwargs["date_field"]
self.target_field = kwargs["target_field"]
self.seasonal_period = kwargs["seasonal_period"]
self.seasonal_freq = kwargs["seasonal_freq"]
np.random.seed(42)
def _period_gen(self):
period = np.sin(np.arange(0, self.seasonal_period, self.seasonal_freq)) * 50 + 50
return np.tile(
period, int(np.ceil(self.size / (self.seasonal_period / self.seasonal_freq)))
)[: self.size]
def _generate_raw(self):
base = np.random.lognormal(mean=2.0, sigma=0.92, size=self.size)
seasonal = [
np.polyval([-5.0, -1.0], x) for x in np.linspace(start=0, stop=2, num=self.size)
]
series = (
np.linspace(start=45.0, stop=90.0, num=self.size) + base + seasonal + self._period_gen()
)
return series
def _generate_linear_data(self):
DataStruct = namedtuple("DataStruct", "dates, series")
series = self._generate_raw()
date_ranges = np.arange(
self.start, self.start + timedelta(days=self.size), timedelta(days=1)
).astype(date)
return DataStruct(date_ranges, series)
def _generate_shift_data(self):
DataStruct = namedtuple("DataStruct", "dates, series")
raw = self._generate_raw()[: int(self.size * 0.6)]
temperature = np.concatenate((raw, raw / 2.0)).ravel()[: self.size]
date_ranges = np.arange(
self.start, self.start + timedelta(days=self.size), timedelta(days=1)
).astype(date)
return DataStruct(date_ranges, temperature)
def _gen_series(self):
if self.shift:
return self._generate_shift_data()
else:
return self._generate_linear_data()
def create_series_df(self):
gen_data = self._gen_series()
temporal_df = pd.DataFrame.from_records(gen_data).T
temporal_df.columns = [self.date_field, self.target_field]
return temporal_df
TEST_CONFIG = {
"shift": False,
"start": "2011-07-25",
"size": 365 * 4,
"seasonal_period": 7,
"seasonal_freq": 0.1,
"date_field": "ds",
"target_field": "y",
}
FORECAST_HORIZON = 60
SEED = 98765
HORIZON_FIELD_NAME = "horizon"
TARGET_FIELD_NAME = "yhat"
DS_FORMAT = "%Y-%m-%dT%H:%M:%S"
INFER_FORMAT = "%Y-%m-%d %H:%M:%S"
ModelWithSource = namedtuple("ModelWithSource", ["model", "data"])
pytestmark = pytest.mark.large
@pytest.fixture(scope="session")
def prophet_model():
np.random.seed(SEED)
data = DataGeneration(**TEST_CONFIG).create_series_df()
model = Prophet().fit(data)
return ModelWithSource(model, data)
@pytest.fixture
def model_path(tmpdir):
return os.path.join(str(tmpdir), "model")
@pytest.fixture
def prophet_custom_env(tmpdir):
conda_env = os.path.join(str(tmpdir), "conda_env.yml")
_mlflow_conda_env(conda_env, additional_pip_deps=["pystan", "prophet", "pytest"])
return conda_env
def future_horizon_df(model, horizon):
return model.make_future_dataframe(periods=horizon)
def generate_forecast(model, horizon):
return model.predict(model.make_future_dataframe(periods=horizon))[TARGET_FIELD_NAME]
def test_model_native_save_load(prophet_model, model_path):
model = prophet_model.model
mlflow.prophet.save_model(pr_model=model, path=model_path)
loaded_model = mlflow.prophet.load_model(model_uri=model_path)
np.testing.assert_array_equal(
generate_forecast(model, FORECAST_HORIZON),
loaded_model.predict(future_horizon_df(loaded_model, FORECAST_HORIZON))[TARGET_FIELD_NAME],
)
def test_model_pyfunc_save_load(prophet_model, model_path):
model = prophet_model.model
mlflow.prophet.save_model(pr_model=model, path=model_path)
loaded_pyfunc = pyfunc.load_pyfunc(model_uri=model_path)
horizon_df = future_horizon_df(model, FORECAST_HORIZON)
np.testing.assert_array_equal(
generate_forecast(model, FORECAST_HORIZON),
loaded_pyfunc.predict(horizon_df)[TARGET_FIELD_NAME],
)
def test_signature_and_examples_saved_correctly(prophet_model):
data = prophet_model.data
model = prophet_model.model
horizon_df = future_horizon_df(model, FORECAST_HORIZON)
signature_ = infer_signature(data, model.predict(horizon_df))
example_ = data[0:5].copy(deep=False)
example_["y"] = pd.to_numeric(example_["y"]) # cast to appropriate precision
for signature in (None, signature_):
for example in (None, example_):
with TempDir() as tmp:
path = tmp.path("model")
mlflow.prophet.save_model(
model, path=path, signature=signature, input_example=example
)
mlflow_model = Model.load(path)
assert signature == mlflow_model.signature
if example is None:
assert mlflow_model.saved_input_example_info is None
else:
r_example = _read_example(mlflow_model, path).copy(deep=False)
r_example["ds"] = pd.to_datetime(r_example["ds"], format=DS_FORMAT)
np.testing.assert_array_equal(r_example, example)
def test_model_load_from_remote_uri_succeeds(prophet_model, model_path, mock_s3_bucket):
mlflow.prophet.save_model(pr_model=prophet_model.model, path=model_path)
artifact_root = "s3://{bucket_name}".format(bucket_name=mock_s3_bucket)
artifact_path = "model"
artifact_repo = S3ArtifactRepository(artifact_root)
artifact_repo.log_artifacts(model_path, artifact_path=artifact_path)
model_uri = os.path.join(artifact_root, artifact_path)
reloaded_prophet_model = mlflow.prophet.load_model(model_uri=model_uri)
np.testing.assert_array_equal(
generate_forecast(prophet_model.model, FORECAST_HORIZON),
generate_forecast(reloaded_prophet_model, FORECAST_HORIZON),
)
def test_model_log(prophet_model):
old_uri = mlflow.get_tracking_uri()
with TempDir(chdr=True, remove_on_exit=True) as tmp:
for should_start_run in [False, True]:
try:
mlflow.set_tracking_uri("test")
if should_start_run:
mlflow.start_run()
artifact_path = "prophet"
conda_env = os.path.join(tmp.path(), "conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["pystan", "prophet"])
mlflow.prophet.log_model(
pr_model=prophet_model.model, artifact_path=artifact_path, conda_env=conda_env
)
model_uri = f"runs:/{mlflow.active_run().info.run_id}/{artifact_path}"
reloaded_prophet_model = mlflow.prophet.load_model(model_uri=model_uri)
np.testing.assert_array_equal(
generate_forecast(prophet_model.model, FORECAST_HORIZON),
generate_forecast(reloaded_prophet_model, FORECAST_HORIZON),
)
model_path = _download_artifact_from_uri(artifact_uri=model_uri)
model_config = Model.load(os.path.join(model_path, "MLmodel"))
assert pyfunc.FLAVOR_NAME in model_config.flavors
assert pyfunc.ENV in model_config.flavors[pyfunc.FLAVOR_NAME]
env_path = model_config.flavors[pyfunc.FLAVOR_NAME][pyfunc.ENV]
assert os.path.exists(os.path.join(model_path, env_path))
finally:
mlflow.end_run()
mlflow.set_tracking_uri(old_uri)
def test_log_model_calls_register_model(prophet_model):
artifact_path = "prophet"
register_model_patch = mock.patch("mlflow.register_model")
with mlflow.start_run(), register_model_patch, TempDir(chdr=True, remove_on_exit=True) as tmp:
conda_env = os.path.join(tmp.path(), "conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["pystan", "prophet"])
mlflow.prophet.log_model(
pr_model=prophet_model.model,
artifact_path=artifact_path,
conda_env=conda_env,
registered_model_name="ProphetModel1",
)
model_uri = f"runs:/{mlflow.active_run().info.run_id}/{artifact_path}"
mlflow.register_model.assert_called_once_with(
model_uri, "ProphetModel1", await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS
)
def test_log_model_no_registered_model_name(prophet_model):
artifact_path = "prophet"
register_model_patch = mock.patch("mlflow.register_model")
with mlflow.start_run(), register_model_patch, TempDir(chdr=True, remove_on_exit=True) as tmp:
conda_env = os.path.join(tmp.path(), "conda_env.yaml")
_mlflow_conda_env(conda_env, additional_pip_deps=["pystan", "prophet"])
mlflow.prophet.log_model(
pr_model=prophet_model.model, artifact_path=artifact_path, conda_env=conda_env,
)
mlflow.register_model.assert_not_called()
def test_model_save_persists_specified_conda_env_in_mlflow_model_directory(
prophet_model, model_path, prophet_custom_env
):
mlflow.prophet.save_model(
pr_model=prophet_model.model, path=model_path, conda_env=prophet_custom_env
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != prophet_custom_env
with open(prophet_custom_env, "r") as f:
prophet_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == prophet_custom_env_parsed
def test_model_save_persists_requirements_in_mlflow_model_directory(
prophet_model, model_path, prophet_custom_env
):
mlflow.prophet.save_model(
pr_model=prophet_model.model, path=model_path, conda_env=prophet_custom_env
)
saved_pip_req_path = os.path.join(model_path, "requirements.txt")
_compare_conda_env_requirements(prophet_custom_env, saved_pip_req_path)
def test_log_model_with_pip_requirements(prophet_model, tmpdir):
req_file = tmpdir.join("requirements.txt")
req_file.write("a")
with mlflow.start_run():
mlflow.prophet.log_model(prophet_model.model, "model", pip_requirements=req_file.strpath)
_assert_pip_requirements(mlflow.get_artifact_uri("model"), ["mlflow", "a"], strict=True)
# List of requirements
with mlflow.start_run():
mlflow.prophet.log_model(
prophet_model.model, "model", pip_requirements=[f"-r {req_file.strpath}", "b"]
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"), ["mlflow", "a", "b"], strict=True
)
# Constraints file
with mlflow.start_run():
mlflow.prophet.log_model(
prophet_model.model, "model", pip_requirements=[f"-c {req_file.strpath}", "b"]
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"),
["mlflow", "b", "-c constraints.txt"],
["a"],
strict=True,
)
def test_log_model_with_extra_pip_requirements(prophet_model, tmpdir):
default_reqs = mlflow.prophet.get_default_pip_requirements()
# Path to a requirements file
req_file = tmpdir.join("requirements.txt")
req_file.write("a")
with mlflow.start_run():
mlflow.prophet.log_model(
prophet_model.model, "model", extra_pip_requirements=req_file.strpath
)
_assert_pip_requirements(mlflow.get_artifact_uri("model"), ["mlflow", *default_reqs, "a"])
# List of requirements
with mlflow.start_run():
mlflow.prophet.log_model(
prophet_model.model, "model", extra_pip_requirements=[f"-r {req_file.strpath}", "b"]
)
_assert_pip_requirements(
mlflow.get_artifact_uri("model"), ["mlflow", *default_reqs, "a", "b"]
)
# Constraints file
with mlflow.start_run():
mlflow.prophet.log_model(
prophet_model.model, "model", extra_pip_requirements=[f"-c {req_file.strpath}", "b"]
)
_assert_pip_requirements(
model_uri=mlflow.get_artifact_uri("model"),
requirements=["mlflow", *default_reqs, "b", "-c constraints.txt"],
constraints=["a"],
strict=False,
)
def test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies(
prophet_model, model_path
):
mlflow.prophet.save_model(prophet_model.model, model_path)
_assert_pip_requirements(model_path, mlflow.prophet.get_default_pip_requirements())
def test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies(
prophet_model,
):
artifact_path = "model"
with mlflow.start_run():
mlflow.prophet.log_model(prophet_model.model, artifact_path)
model_uri = mlflow.get_artifact_uri(artifact_path)
_assert_pip_requirements(model_uri, mlflow.prophet.get_default_pip_requirements())
def test_pyfunc_serve_and_score(prophet_model):
artifact_path = "model"
with mlflow.start_run():
mlflow.prophet.log_model(prophet_model.model, artifact_path)
model_uri = mlflow.get_artifact_uri(artifact_path)
local_predict = prophet_model.model.predict(
prophet_model.model.make_future_dataframe(FORECAST_HORIZON)
)
# cast to string representation of datetime series, otherwise will default cast to Unix time
# which Prophet does not support for encoding
inference_data = (
prophet_model.model.make_future_dataframe(FORECAST_HORIZON)["ds"]
.dt.strftime(INFER_FORMAT)
.to_frame(name="ds")
)
resp = pyfunc_serve_and_score_model(
model_uri,
data=inference_data,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_RECORDS_ORIENTED,
)
scores = pd.read_json(resp.content, orient="records")
# predictions are deterministic, but yhat_lower, yhat_upper are non-deterministic based on
# stan build underlying environment. Seed value only works for reproducibility of yhat.
# see: https://github.com/facebook/prophet/issues/1124
pd.testing.assert_series_equal(
left=local_predict["yhat"], right=scores["yhat"], check_dtype=True
)
|
from django.db import models
from django.conf import settings
from django.dispatch import receiver
from django.db.models.signals import pre_delete, post_save, m2m_changed, post_delete
from django.utils.text import get_valid_filename
from sortedm2m.fields import SortedManyToManyField
from datetime import date, datetime, timedelta
import os
import os.path
import logging
from .person import Person
from .project_umbrella import Project_umbrella
from .keyword import Keyword
from .video import Video
from .talk import Talk
from .poster import Poster
# This retrieves a Python logging instance (or creates it)
_logger = logging.getLogger(__name__)
class Publication(models.Model):
UPLOAD_DIR = 'publications/' # relative path
THUMBNAIL_DIR = os.path.join(UPLOAD_DIR, 'images/') # relative path
title = models.CharField(max_length=255)
authors = SortedManyToManyField(Person)
# authorsOrdered = models.ManyToManyField(Person, through='PublicationAuthorThroughModel')
# The PDF is required
pdf_file = models.FileField(upload_to=UPLOAD_DIR, null=False, default=None, max_length=255)
book_title = models.CharField(max_length=255, null=True)
book_title.help_text = "This is the long-form proceedings title. For example, for UIST, this would be 'Proceedings of the 27th Annual ACM Symposium on User " \
"Interface Software and Technology.' For CHI, 'Proceedings of the 2017 CHI Conference on " \
"Human Factors in Computing Systems' "
book_title_short = models.CharField(max_length=255, null=True)
book_title_short.help_text = "This is a shorter version of book title. For UIST, 'Proceedings of UIST 2014' " \
"For CHI, 'Proceedings of CHI 2017'"
# The thumbnail should have null=True because it is added automatically later by a post_save signal
# TODO: decide if we should have this be editable=True and if user doesn't add one him/herself, then
# auto-generate thumbnail
thumbnail = models.ImageField(upload_to=THUMBNAIL_DIR, editable=False, null=True, max_length=255)
date = models.DateField(null=True)
date.help_text = "This is the publication date (e.g., first day of the conference in which the paper appears or the journal publication date)"
num_pages = models.IntegerField(null=True)
num_pages.help_text = "The total number of pages in this publication (including references)"
# A publication can be about more than one project
projects = SortedManyToManyField('Project', blank=True)
project_umbrellas = SortedManyToManyField('Project_umbrella', blank=True)
keywords = SortedManyToManyField('Keyword', blank=True)
# TODO, see if there is an IntegerRangeField or something like that for page_num_start and end
page_num_start = models.IntegerField(blank=True, null=True)
page_num_end = models.IntegerField(blank=True, null=True)
official_url = models.URLField(blank=True, null=True)
official_url.help_text = "The official url link to the paper, often a DOI url like https://doi.org/10.1145/3441852.3476542"
geo_location = models.CharField(max_length=255, blank=True, null=True)
geo_location.help_text = "The physical location of the conference, if any. For example, CHI 2017 was in 'Denver, Colorado'"
# Publications can have corresponding videos, talks, posters, etc.
video = models.OneToOneField(Video, on_delete=models.DO_NOTHING, null=True, blank=True)
talk = models.ForeignKey(Talk, blank=True, null=True, on_delete=models.DO_NOTHING)
poster = models.ForeignKey(Poster, blank=True, null=True, on_delete=models.DO_NOTHING)
code_repo_url = models.URLField(blank=True, null=True)
code_repo_url.help_text = "URL to github or gitlab"
series = models.CharField(max_length=255, blank=True, null=True)
isbn = models.CharField(max_length=255, blank=True, null=True)
doi = models.CharField(max_length=255, blank=True, null=True)
publisher = models.CharField(max_length=255, blank=True, null=True)
publisher_address = models.CharField(max_length=255, blank=True, null=True)
acmid = models.CharField(max_length=255, blank=True, null=True)
CONFERENCE = "Conference"
ARTICLE = "Article"
JOURNAL = "Journal"
BOOK_CHAPTER = "Book Chapter"
BOOK = "Book"
DOCTORAL_CONSORTIUM = "Doctoral Consortium"
MS_THESIS = "MS Thesis"
PHD_DISSERTATION = "PhD Dissertation"
WORKSHOP = "Workshop"
POSTER = "Poster"
DEMO = "Demo"
WIP = "Work in Progress"
LATE_BREAKING = "Late Breaking Result"
PANEL = "Panel"
OTHER = "Other"
PUB_VENUE_TYPE_CHOICES = (
(CONFERENCE, CONFERENCE),
(ARTICLE, ARTICLE),
(JOURNAL, JOURNAL),
(BOOK_CHAPTER, BOOK_CHAPTER),
(BOOK, BOOK),
(DOCTORAL_CONSORTIUM, DOCTORAL_CONSORTIUM),
(MS_THESIS, MS_THESIS),
(PHD_DISSERTATION, PHD_DISSERTATION),
(WORKSHOP, WORKSHOP),
(POSTER, POSTER),
(DEMO, DEMO),
(WIP, WIP),
(LATE_BREAKING, LATE_BREAKING),
(PANEL, PANEL),
(OTHER, OTHER)
)
# TODO: remove null=True from the following three
pub_venue_url = models.URLField(blank=True, null=True)
pub_venue_url.help_text = "The url to the publication venue (e.g., https://chi2021.acm.org/ or https://cscw.acm.org/2022/)"
pub_venue_type = models.CharField(max_length=50, choices=PUB_VENUE_TYPE_CHOICES, null=True)
extended_abstract = models.BooleanField(null=True)
extended_abstract.help_text = "If the paper is not a *full* paper, it's likely an extended abstract (like a poster, demo, etc.)"
peer_reviewed = models.BooleanField(null=True)
total_papers_accepted = models.IntegerField(blank=True, null=True)
total_papers_accepted.help_text = "The total number of papers accepted to the venue (if known)"
total_papers_submitted = models.IntegerField(blank=True, null=True)
total_papers_submitted.help_text = "The total number of papers submitted to the venue (if known)"
BEST_ARTIFACT_AWARD = "Best Artifact Award"
BEST_ARTIFACT_RUNNERUP_AWARD = "Best Artifact Runner-up Award"
BEST_PAPER_AWARD = "Best Paper Award"
HONORABLE_MENTION = "Honorable Mention"
BEST_PAPER_NOMINATION = "Best Paper Nominee"
TEN_YEAR_IMPACT_AWARD = "10-Year Impact Award"
AWARD_CHOICES = (
(BEST_PAPER_AWARD, BEST_PAPER_AWARD),
(HONORABLE_MENTION, HONORABLE_MENTION),
(BEST_PAPER_NOMINATION, BEST_PAPER_NOMINATION),
(TEN_YEAR_IMPACT_AWARD, TEN_YEAR_IMPACT_AWARD),
(BEST_ARTIFACT_AWARD, BEST_ARTIFACT_AWARD),
(BEST_ARTIFACT_RUNNERUP_AWARD, BEST_ARTIFACT_RUNNERUP_AWARD)
)
award = models.CharField(max_length=50, choices=AWARD_CHOICES, blank=True, null=True)
def get_person(self):
"""Returns the first author"""
return self.authors.all()[0]
def is_extended_abstract(self):
"""Returns True if this publication is an extended abstract"""
return (self.extended_abstract or
self.pub_venue_type == self.POSTER or
self.pub_venue_type == self.DEMO or
self.pub_venue_type == self.WIP or
self.pub_venue_type == self.DOCTORAL_CONSORTIUM)
def get_acceptance_rate(self):
"""Returns the acceptance rate as a percentage"""
if self.total_papers_accepted and self.total_papers_submitted:
return 100 * (self.total_papers_accepted / self.total_papers_submitted)
else:
return -1
def is_best_paper(self):
"""Returns true if earned best paper, best artifact, or test of time award"""
return self.award == self.BEST_PAPER_AWARD or \
self.award == self.BEST_ARTIFACT_AWARD or \
self.award == self.TEN_YEAR_IMPACT_AWARD
def is_honorable_mention(self):
"""Returns true if earned honorable mention or best paper nomination"""
return self.award == self.HONORABLE_MENTION or \
self.award == self.BEST_ARTIFACT_RUNNERUP_AWARD or \
self.award == self.BEST_PAPER_NOMINATION
def to_appear(self):
"""Returns true if the publication date happens in the future (e.g., tomorrow or later)"""
return self.date and self.date > date.today()
def get_citation_as_html(self):
"""Returns a human readable citation as html"""
citation = ""
author_idx = 0
num_authors = self.authors.count()
for author in self.authors.all():
citation += author.get_citation_name(full_name=False)
if (author_idx + 1) < num_authors:
citation += ", "
else:
citation += " "
author_idx += 1
citation += "({}). ".format(self.date.year)
citation += self.title + ". "
citation += "<i>{}</i>. ".format(self.book_title_short)
if self.official_url:
citation += "<a href={}>{}</a>".format(self.official_url, self.official_url)
return citation
def get_bibtex_id(self):
"""Generates and returns the bibtex id for this paper"""
bibtex_id = self.get_person().last_name
forum = self.book_title_short.lower()
if "proceedings of" in forum:
forum = forum.replace('proceedings of', '')
forum = forum.upper().replace(" ", "")
if not forum[-1].isdigit():
forum = forum + str(self.date.year)
bibtex_id += ":" + forum
# code to make acronym from: https://stackoverflow.com/a/4355337
title_acronym = ''.join(w[0] for w in self.title.split() if w[0].isupper())
bibtex_id += ":" + title_acronym[:3]
if self.doi:
doi = self.doi.rsplit('/', 1)[-1]
bibtex_id += doi
bibtex_id += ","
return bibtex_id
def get_citation_as_bibtex(self, newline="<br/>", use_hyperlinks=True):
"""Returns bibtex citation as a string"""
bibtex = ""
if self.pub_venue_type is self.JOURNAL or\
self.pub_venue_type is self.ARTICLE:
bibtex += "@article{"
else:
bibtex += "@inproceedings{"
bibtex += self.get_bibtex_id() + newline
# start author block
bibtex += " author = {"
author_idx = 0
num_authors = self.authors.count()
for author in self.authors.all():
citation_name = author.get_citation_name(full_name=True)
bibtex += citation_name
if (author_idx + 1) < num_authors:
bibtex += " and "
author_idx += 1
bibtex += "}" + newline
# end author block
bibtex += " title={{{}}},{}".format(self.title, newline)
bibtex += " booktitle={{{}}},{}".format(self.book_title, newline)
bibtex += " booktitleshort={{{}}},{}".format(self.book_title_short, newline)
if self.series:
bibtex += " series = {" + self.series + "},"
bibtex += " year={{{}}},{}".format(self.date.year, newline)
if self.isbn:
bibtex += " isbn={{{}}},{}".format(self.isbn, newline)
if self.geo_location:
bibtex += " location={{{}}},{}".format(self.geo_location, newline)
if self.page_num_start and self.page_num_end:
bibtex += " pages={{{}--{}}},{}".format(self.page_num_start, self.page_num_end, newline)
if self.num_pages:
bibtex += " numpages={{{}}},{}".format(self.num_pages, newline)
if self.doi:
if use_hyperlinks:
bibtex += " doi={{<a href='{}'>{}</a>}},{}".format(self.doi, self.doi, newline)
else:
bibtex += " doi={{{}}},{}".format(self.doi, newline)
if self.official_url:
if use_hyperlinks:
bibtex += " url={{<a href='{}'>{}</a>}},{}".format(self.official_url, self.official_url, newline)
else:
bibtex += " url={{{}}},{}".format(self.official_url, newline)
if self.acmid:
bibtex += " acmid={{{}}},{}".format(self.acmid, newline)
if self.publisher:
bibtex += " publisher={{{}}},{}".format(self.publisher, newline)
bibtex += "}"
return bibtex
def __str__(self):
return self.title
def update_file_name_publication(sender, instance, action, reverse, **kwargs):
# Reverse: Indicates which side of the relation is updated (i.e., if it is the forward or reverse relation that is being modified)
# Action: A string indicating the type of update that is done on the relation.
# post_add: Sent after one or more objects are added to the relation
if action == 'post_add' and not reverse:
initial_path = instance.pdf_file.path
person = instance.get_person()
last_name = person.last_name
year = instance.date.year
# Remove spaces non alphanumeric characters
pub_title = ''.join(x for x in instance.title.title() if not x.isspace())
pub_title = ''.join(e for e in pub_title if e.isalnum())
# Get the publication venue but remove proceedings from it (if it exists)
forum = instance.book_title_short.lower()
if "proceedings of" in forum.lower():
forum = forum.replace('proceedings of', '')
forum = forum.strip().upper()
forum = ''.join(x for x in forum if not x.isspace())
if not forum[-1].isdigit():
forum = forum + str(year)
# Convert metadata into a filename
new_filename = last_name + '_' + pub_title + '_' + forum + '.pdf'
# Use Django helper function to ensure a clean filename
new_filename = get_valid_filename(new_filename)
# Change the path of the pdf file to point to the new file name
instance.pdf_file.name = os.path.join(Publication.UPLOAD_DIR, new_filename)
new_path = os.path.join(settings.MEDIA_ROOT, instance.pdf_file.name)
# Actually rename the existing file (aka initial_path) but only if it exists (it should!)
if os.path.exists(initial_path):
os.rename(initial_path, new_path)
instance.save()
else:
_logger.error(f'The file {initial_path} does not exist and cannot be renamed to {new_path}')
m2m_changed.connect(update_file_name_publication , sender=Publication.authors.through)
@receiver(pre_delete, sender=Publication)
def publication_delete(sender, instance, **kwards):
if instance.thumbnail:
instance.thumbnail.delete(True)
if instance.pdf_file:
instance.pdf_file.delete(True)
if instance.thumbnail:
instance.thumbnail.delete(True)
|
from yaetos.etl_utils import ETL_Base, Commandliner
class Job(ETL_Base):
def transform(self, some_events):
df = self.query("""
SELECT se.session_id, session_length, session_length*2 as doubled_length
FROM some_events se
""")
return df
if __name__ == "__main__":
args = {'job_param_file': 'conf/jobs_metadata.yml'}
Commandliner(Job, **args)
|
# Based on Sphinx
# Copyright (c) 2007-2020 by the Sphinx team.
# | All rights reserved.
# |
# | Redistribution and use in source and binary forms, with or without
# | modification, are permitted provided that the following conditions are
# | met:
# |
# | * Redistributions of source code must retain the above copyright
# | notice, this list of conditions and the following disclaimer.
# |
# | * Redistributions in binary form must reproduce the above copyright
# | notice, this list of conditions and the following disclaimer in the
# | documentation and/or other materials provided with the distribution.
# |
# | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# stdlib
from typing import Any, Dict, Sequence, Tuple
# 3rd party
import pytest
from bs4 import BeautifulSoup # type: ignore
from domdf_python_tools.paths import PathPlus
from sphinx.testing.fixtures import app as gh_src_app
from sphinx.testing.fixtures import make_app, shared_result, sphinx_test_tempdir, test_params
from sphinx.testing.path import path
# this package
from tests.common import AppParams
fixtures = [make_app, shared_result, sphinx_test_tempdir, test_params, gh_src_app]
@pytest.fixture(scope="session")
def rootdir():
rdir = PathPlus(__file__).parent.absolute() / "github-doc-test"
(rdir / "test-github-root").maybe_make(parents=True)
return path(rdir)
@pytest.fixture()
def app_params(
request: Any,
test_params: Dict,
sphinx_test_tempdir: path,
rootdir: path,
) -> Tuple[Sequence, Dict]:
"""
parameters that is specified by 'pytest.mark.sphinx' for
sphinx.application.Sphinx initialization
"""
# ##### process pytest.mark.sphinx
markers = request.node.iter_markers("sphinx")
pargs = {}
kwargs: Dict[str, Any] = {}
if markers is not None:
# to avoid stacking positional args
for info in reversed(list(markers)):
for i, a in enumerate(info.args):
pargs[i] = a
kwargs.update(info.kwargs)
args = [pargs[i] for i in sorted(pargs.keys())]
# ##### prepare Application params
testroot = "github-root"
kwargs["srcdir"] = srcdir = sphinx_test_tempdir / kwargs.get("srcdir", testroot)
# special support for sphinx/tests
if rootdir and not srcdir.exists():
testroot_path = rootdir / ("test-" + testroot)
testroot_path.copytree(srcdir)
return AppParams(args, kwargs)
@pytest.fixture()
def github_source_page(gh_src_app, request) -> BeautifulSoup:
gh_src_app.build(force_all=True)
pagename = request.param
c = (gh_src_app.outdir / pagename).read_text()
yield BeautifulSoup(c, "html5lib")
|
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
sys.path.append(os.path.join(ROOT_DIR, 'tf_ops/sampling'))
sys.path.append(os.path.join(ROOT_DIR, 'tf_ops/grouping'))
sys.path.append(os.path.join(ROOT_DIR, 'tf_ops/3d_interpolation'))
sys.path.append(os.path.join(ROOT_DIR, 'tf_ops/new_grouping'))#####
sys.path.append(os.path.join(ROOT_DIR, 'tf_ops/ops_square'))
from tf_sampling import farthest_point_sample, gather_point
from tf_grouping import query_ball_point, group_point
import tensorflow as tf
import numpy as np
import tf_util
def inv_q(q):
q = tf.squeeze(q, axis = 1)
q_2 = tf.reduce_sum(q*q, axis = -1, keep_dims = True) + 1e-10
q_ = tf.concat([tf.slice(q, [0, 0], [-1, 1]), -tf.slice(q, [0, 1], [-1, 3])], axis = -1)
q_inv = q_/q_2
return q_inv
def mul_point_q(q_a, q_b, batch_size):
q_b = tf.reshape(q_b, [batch_size, 1, 4])
q_result_0 = tf.multiply(q_a[ :, :, 0], q_b[ :, :, 0])-tf.multiply(q_a[ :, :, 1], q_b[ :, :, 1])-tf.multiply(q_a[ :, :, 2], q_b[ :, :, 2])-tf.multiply(q_a[ :, :, 3], q_b[ :, :, 3])
q_result_0 = tf.reshape(q_result_0, [batch_size, -1, 1])
q_result_1 = tf.multiply(q_a[ :, :, 0], q_b[ :, :, 1])+tf.multiply(q_a[ :, :, 1], q_b[ :, :, 0])+tf.multiply(q_a[ :, :, 2], q_b[ :, :, 3])-tf.multiply(q_a[ :, :, 3], q_b[ :, :, 2])
q_result_1 = tf.reshape(q_result_1, [batch_size, -1, 1])
q_result_2 = tf.multiply(q_a[ :, :, 0], q_b[ :, :, 2])-tf.multiply(q_a[ :, :, 1], q_b[ :, :, 3])+tf.multiply(q_a[ :, :, 2], q_b[ :, :, 0])+tf.multiply(q_a[ :, :, 3], q_b[ :, :, 1])
q_result_2 = tf.reshape(q_result_2, [batch_size, -1, 1])
q_result_3 = tf.multiply(q_a[ :, :, 0], q_b[ :, :, 3])+tf.multiply(q_a[ :, :, 1], q_b[ :, :, 2])-tf.multiply(q_a[ :, :, 2], q_b[ :, :, 1])+tf.multiply(q_a[ :, :, 3], q_b[ :, :, 0])
q_result_3 = tf.reshape(q_result_3, [batch_size, -1, 1])
q_result = tf.concat([q_result_0, q_result_1, q_result_2, q_result_3], axis = -1)
return q_result ## B N 4
def mul_q_point(q_a, q_b, batch_size):
q_a = tf.reshape(q_a, [batch_size, 1, 4])
q_result_0 = tf.multiply(q_a[ :, :, 0], q_b[ :, :, 0])-tf.multiply(q_a[ :, :, 1], q_b[ :, :, 1])-tf.multiply(q_a[ :, :, 2], q_b[ :, :, 2])-tf.multiply(q_a[ :, :, 3], q_b[ :, :, 3])
q_result_0 = tf.reshape(q_result_0, [batch_size, -1, 1])
q_result_1 = tf.multiply(q_a[ :, :, 0], q_b[ :, :, 1])+tf.multiply(q_a[ :, :, 1], q_b[ :, :, 0])+tf.multiply(q_a[ :, :, 2], q_b[ :, :, 3])-tf.multiply(q_a[ :, :, 3], q_b[ :, :, 2])
q_result_1 = tf.reshape(q_result_1, [batch_size, -1, 1])
q_result_2 = tf.multiply(q_a[ :, :, 0], q_b[ :, :, 2])-tf.multiply(q_a[ :, :, 1], q_b[ :, :, 3])+tf.multiply(q_a[ :, :, 2], q_b[ :, :, 0])+tf.multiply(q_a[ :, :, 3], q_b[ :, :, 1])
q_result_2 = tf.reshape(q_result_2, [batch_size, -1, 1])
q_result_3 = tf.multiply(q_a[ :, :, 0], q_b[ :, :, 3])+tf.multiply(q_a[ :, :, 1], q_b[ :, :, 2])-tf.multiply(q_a[ :, :, 2], q_b[ :, :, 1])+tf.multiply(q_a[ :, :, 3], q_b[ :, :, 0])
q_result_3 = tf.reshape(q_result_3, [batch_size, -1, 1])
q_result = tf.concat([q_result_0, q_result_1, q_result_2, q_result_3], axis = -1)
return q_result ## B N 4
def square_distance(src, dst):
B = src.get_shape()[0].value
N = src.get_shape()[1].value
M = dst.get_shape()[1].value
for i in range(B):
ddd = dst[i, :, :]
sss = src[i, :, :]
dist_i = -2 * tf.matmul(sss, tf.transpose(ddd, [1, 0]))
dist_i = tf.expand_dims(dist_i, axis = 0)
if i == 0:
dist = dist_i
else:
dist = tf.concat([dist, dist_i], axis = 0 )
dist = dist + tf.reshape(tf.reduce_sum(src ** 2, axis = -1), [B, N, 1])
dist = dist + tf.reshape(tf.reduce_sum(dst ** 2, axis = -1), [B, 1, M])
return dist
def knn_point(nsample, xyz, new_xyz):
"""
Input:
nsample: max sample number in local region
xyz: all points, [B, N, C]
new_xyz: query points, [B, S, C]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
sqrdists = square_distance(new_xyz, xyz)
group_dist, group_idx = tf.nn.top_k(0-sqrdists, nsample)
return (0-group_dist), group_idx
def warping_layers( xyz1, upsampled_flow):
return xyz1+upsampled_flow
def cost_volume(warped_xyz, warped_points, f2_xyz, f2_points, nsample, nsample_q, mlp1, mlp2, is_training, bn_decay, scope, bn=True, pooling='max', knn=True, corr_func='elementwise_product' ):
with tf.variable_scope(scope) as sc:
### FIRST AGGREGATE
_, idx_q = knn_point(nsample_q, f2_xyz, warped_xyz)
qi_xyz_grouped = group_point(f2_xyz, idx_q)
qi_points_grouped = group_point(f2_points, idx_q)
pi_xyz_expanded = tf.tile(tf.expand_dims(warped_xyz, 2), [1,1,nsample_q,1]) # batch_size, npoints, nsample, 3
pi_points_expanded = tf.tile(tf.expand_dims(warped_points, 2), [1,1,nsample_q,1]) # batch_size, npoints, nsample, 3
pi_xyz_diff = qi_xyz_grouped - pi_xyz_expanded
pi_euc_diff = tf.sqrt(tf.reduce_sum(tf.square(pi_xyz_diff), axis=[-1] , keep_dims=True) + 1e-20 )
pi_xyz_diff_concat = tf.concat([pi_xyz_expanded, qi_xyz_grouped, pi_xyz_diff, pi_euc_diff], axis=3)
pi_feat_diff = tf.concat(axis=-1, values=[pi_points_expanded, qi_points_grouped])
pi_feat1_new = tf.concat([pi_xyz_diff_concat, pi_feat_diff], axis=3) # batch_size, npoint*m, nsample, [channel or 1] + 3
for j, num_out_channel in enumerate(mlp1):
pi_feat1_new = tf_util.conv2d(pi_feat1_new, num_out_channel, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='CV_%d'%(j), bn_decay=bn_decay)
pi_xyz_encoding = tf_util.conv2d(pi_xyz_diff_concat, mlp1[-1], [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='CV_xyz', bn_decay=bn_decay)
pi_concat = tf.concat([pi_xyz_encoding, pi_feat1_new], axis = 3)
for j, num_out_channel in enumerate(mlp2):
pi_concat = tf_util.conv2d(pi_concat, num_out_channel, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='sum_CV_%d'%(j), bn_decay=bn_decay)
WQ = tf.nn.softmax(pi_concat,dim=2)
pi_feat1_new = WQ * pi_feat1_new
pi_feat1_new = tf.reduce_sum(pi_feat1_new, axis=[2], keep_dims=False, name='avgpool_diff')#b, n, mlp1[-1]
##### SECOND AGGREGATE
_, idx = knn_point(nsample, warped_xyz, warped_xyz)
pc_xyz_grouped = group_point(warped_xyz, idx)
pc_points_grouped = group_point(pi_feat1_new, idx)
pc_xyz_new = tf.tile( tf.expand_dims (warped_xyz, axis = 2), [1,1,nsample,1] )
pc_points_new = tf.tile( tf.expand_dims (warped_points, axis = 2), [1,1,nsample,1] )
pc_xyz_diff = pc_xyz_grouped - pc_xyz_new####b , n ,m ,3
pc_euc_diff = tf.sqrt(tf.reduce_sum(tf.square(pc_xyz_diff), axis=3, keep_dims=True) + 1e-20)
pc_xyz_diff_concat = tf.concat([pc_xyz_new, pc_xyz_grouped, pc_xyz_diff, pc_euc_diff], axis=3)
pc_xyz_encoding = tf_util.conv2d(pc_xyz_diff_concat, mlp1[-1], [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='sum_xyz_encoding', bn_decay=bn_decay)
pc_concat = tf.concat([pc_xyz_encoding, pc_points_new, pc_points_grouped], axis = -1)
for j, num_out_channel in enumerate(mlp2):
pc_concat = tf_util.conv2d(pc_concat, num_out_channel, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='sum_cost_volume_%d'%(j), bn_decay=bn_decay)
WP = tf.nn.softmax(pc_concat,dim=2) ##### b, npoints, nsample, mlp[-1]
pc_feat1_new = WP * pc_points_grouped
pc_feat1_new = tf.reduce_sum(pc_feat1_new, axis=[2], keep_dims=False, name='sumpool_diff')#b*n*mlp2[-1]
return pc_feat1_new
def flow_predictor( points_f1, upsampled_feat, cost_volume, mlp, is_training, bn_decay, scope, bn=True ):
with tf.variable_scope(scope) as sc:
if points_f1 == None:
points_concat = cost_volume
elif upsampled_feat != None:
points_concat = tf.concat(axis=-1, values=[ points_f1, cost_volume, upsampled_feat]) # B,ndataset1,nchannel1+nchannel2
elif upsampled_feat == None:
points_concat = tf.concat(axis=-1, values=[ points_f1, cost_volume]) # B,ndataset1,nchannel1+nchannel2
points_concat = tf.expand_dims(points_concat, 2)
for i, num_out_channel in enumerate(mlp):
points_concat = tf_util.conv2d(points_concat, num_out_channel, [1,1],
padding='VALID', stride=[1,1],
bn=bn, is_training=is_training,
scope='conv_predictor%d'%(i), bn_decay=bn_decay)
points_concat = tf.squeeze(points_concat,[2])
return points_concat
def sample_and_group(npoint, nsample, xyz, points, use_xyz=True):
sample_idx = farthest_point_sample(npoint, xyz)
new_xyz = gather_point(xyz, sample_idx) # (batch_size, npoint, 3)
if points is None:
_, idx_q = knn_point(nsample, xyz, new_xyz)
grouped_xyz = group_point(xyz, idx_q)
grouped_points = group_point(points, idx_q)
xyz_diff = grouped_xyz - tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1])
# grouped_xyz, xyz_diff, grouped_points, idx = pointconv_util.grouping(xyz, nsample, xyz, new_xyz)## the method of pointconv
new_points = tf.concat([xyz_diff, grouped_points] , axis=-1)
else:
_, idx_q = knn_point(nsample, xyz, new_xyz)
grouped_xyz = group_point(xyz, idx_q)
xyz_diff = grouped_xyz - tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1])
# grouped_xyz, xyz_diff, grouped_points, idx = pointconv_util.grouping(points, nsample, xyz, new_xyz)## the method of pointconv
new_points = tf.concat([xyz_diff, grouped_xyz], axis=-1) # (batch_size, npoint, nample, 3+channel)
return new_xyz, new_points
def pointnet_sa_module(xyz, points, npoint, nsample, mlp, mlp2, is_training, bn_decay, scope, bn=True, pooling='max', use_xyz=True, use_nchw=False):
data_format = 'NCHW' if use_nchw else 'NHWC'
with tf.variable_scope(scope) as sc:
# Sample and Grouping
new_xyz, new_points = sample_and_group(npoint, nsample, xyz, points, use_xyz)
# Point Feature Embedding
if use_nchw: new_points = tf.transpose(new_points, [0,3,1,2])
for i, num_out_channel in enumerate(mlp):
new_points = tf_util.conv2d(new_points, num_out_channel, [1,1],
padding='VALID', stride=[1,1],
bn=bn, is_training=is_training,
scope='conv%d'%(i), bn_decay=bn_decay,
data_format=data_format)
if use_nchw: new_points = tf.transpose(new_points, [0,2,3,1])
# Pooling in Local Regions
if pooling=='max':
new_points = tf.reduce_max(new_points, axis=[2], keep_dims=True, name='maxpool')
elif pooling=='avg':
new_points = tf.reduce_mean(new_points, axis=[2], keep_dims=True, name='avgpool')
if mlp2 is not None:
if use_nchw: new_points = tf.transpose(new_points, [0,3,1,2])
for i, num_out_channel in enumerate(mlp2):
new_points = tf_util.conv2d(new_points, num_out_channel, [1,1],
padding='VALID', stride=[1,1],
bn=bn, is_training=is_training,
scope='conv_post_%d'%(i), bn_decay=bn_decay,
data_format=data_format)
if use_nchw: new_points = tf.transpose(new_points, [0,2,3,1])
new_points = tf.squeeze(new_points, [2]) # (batch_size, npoints, mlp2[-1])
return new_xyz, new_points
def set_upconv_module(xyz1, xyz2, feat1, feat2, nsample, mlp, mlp2, is_training, scope, bn_decay=None, bn=True, pooling='max', knn=True):
with tf.variable_scope(scope) as sc:
_, idx_q = knn_point(nsample, xyz2, xyz1)
xyz2_grouped = group_point(xyz2, idx_q)
feat2_grouped = group_point(feat2, idx_q)
xyz1_expanded = tf.expand_dims(xyz1, 2) # batch_size, npoint1, 1, 3
xyz_diff = xyz2_grouped - xyz1_expanded # batch_size, npoint1, nsample, 3
net = tf.concat([feat2_grouped, xyz_diff], axis=3) # batch_size, npoint1, nsample, channel2+3
if mlp is None: mlp=[]
for i, num_out_channel in enumerate(mlp):
net = tf_util.conv2d(net, num_out_channel, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv%d'%(i), bn_decay=bn_decay)
if pooling=='max':
feat1_new = tf.reduce_max(net, axis=[2], keep_dims=False, name='maxpool') # batch_size, npoint1, mlp[-1]
elif pooling=='avg':
feat1_new = tf.reduce_mean(net, axis=[2], keep_dims=False, name='avgpool') # batch_size, npoint1, mlp[-1]
if feat1 is not None:
feat1_new = tf.concat([feat1_new, feat1], axis=2) # batch_size, npoint1, mlp[-1]+channel1
feat1_new = tf.expand_dims(feat1_new, 2) # batch_size, npoint1, 1, mlp[-1]+channel2
if mlp2 is None: mlp2=[]
for i, num_out_channel in enumerate(mlp2):
feat1_new = tf_util.conv2d(feat1_new, num_out_channel, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='post-conv%d'%(i), bn_decay=bn_decay)
feat1_new = tf.squeeze(feat1_new, [2]) # batch_size, npoint1, mlp2[-1]
return feat1_new
|
import pytest
from .base import TestBase
pytestmark = pytest.mark.asyncio
class TestReadOnly(TestBase):
async def test_select(self, imap_server):
transport = self.new_transport(imap_server)
transport.push_login()
transport.push_select(b'Trash', 1, 1, readonly=True)
transport.push_select(b'Trash', 1, 1, readonly=True)
transport.push_logout()
await self.run(transport)
async def test_examine(self, imap_server):
transport = self.new_transport(imap_server)
transport.push_login()
transport.push_select(b'INBOX', 4, 1, examine=True)
transport.push_select(b'INBOX', 4, 1, examine=True)
transport.push_logout()
await self.run(transport)
async def test_append(self, imap_server):
transport = self.new_transport(imap_server)
message = b'test message\r\n'
transport.push_login()
transport.push_readline(
b'append1 APPEND Trash (\\Seen) {%i}\r\n' % len(message))
transport.push_write(
b'+ Literal string\r\n')
transport.push_readexactly(message)
transport.push_readline(
b'\r\n')
transport.push_write(
b'append1 NO [READ-ONLY] Mailbox is read-only.\r\n')
transport.push_select(b'Trash', 1, readonly=True)
transport.push_logout()
await self.run(transport)
async def test_copy(self, imap_server):
transport = self.new_transport(imap_server)
transport.push_login()
transport.push_select(b'INBOX')
transport.push_readline(
b'copy1 COPY 1 Trash\r\n')
transport.push_write(
b'copy1 NO [READ-ONLY] Mailbox is read-only.\r\n')
transport.push_select(b'Trash', 1, readonly=True)
transport.push_logout()
await self.run(transport)
async def test_expunge(self, imap_server):
transport = self.new_transport(imap_server)
transport.push_login()
transport.push_select(b'Trash', 1, readonly=True)
transport.push_readline(
b'expunge1 EXPUNGE\r\n')
transport.push_write(
b'expunge1 NO [READ-ONLY] Mailbox is read-only.\r\n')
transport.push_select(b'Trash', 1, readonly=True)
transport.push_logout()
await self.run(transport)
async def test_store(self, imap_server):
transport = self.new_transport(imap_server)
transport.push_login()
transport.push_select(b'Trash', 1, readonly=True)
transport.push_readline(
b'store1 STORE 1 +FlAGS (\\Seen)\r\n')
transport.push_write(
b'store1 NO [READ-ONLY] Mailbox is read-only.\r\n')
transport.push_logout()
await self.run(transport)
async def test_fetch_not_seen(self, imap_server):
transport = self.new_transport(imap_server)
transport.push_login()
transport.push_select(b'Trash', 1, readonly=True)
transport.push_readline(
b'fetch1 FETCH 1 (FLAGS)\r\n')
transport.push_write(
b'* 1 FETCH (FLAGS (\\Deleted))\r\n'
b'fetch1 OK FETCH completed.\r\n')
transport.push_readline(
b'fetch2 FETCH 1 (RFC822.TEXT FLAGS)\r\n')
transport.push_write(
b'* 1 FETCH (RFC822.TEXT {53}\r\n'
b'It just works. Only five easy payments of $19.99.\r\n\r\n'
b' FLAGS (\\Deleted))\r\n'
b'fetch2 OK FETCH completed.\r\n')
transport.push_logout()
await self.run(transport)
|
# -*- coding: utf-8 -*-
import sys
sys.path.append("./voc")
from rnaudio import *
# 录音测试
rna = Rnaudio()
filename = rna.Record()
rna.Play_WAV(filename)
rna.Delete(filename)
|
import numpy as np
from numpy import linalg as LA
import matplotlib.pyplot as plt
# stock prices (3x per day)
# [morning, midday, evening]
APPLE = np.array(
[[1,5],[3,-2],[-1,-4],[-2,1]])
# midday variance
print(APPLE.mean(axis=0))
cov = np.cov(APPLE,rowvar=0)
print(cov)
w, v = LA.eig(cov)
print(w)
print(v)
origin = [0, 0]
eig_vec1 = v[:,0]
eig_vec2 = v[:,1]
print(eig_vec1)
print(eig_vec2)
# This line below plots the 2d points
#plt.scatter(np_array[:,0], np_array[:,1])
plt.quiver(*origin, *eig_vec1, color=['r'], scale=21)
plt.quiver(*origin, *eig_vec2, color=['b'], scale=21)
plt.show()
|
"""
main class responsible for obtaining results from the Event Registry
"""
import six, os, sys, traceback, json, re, requests, time, logging, threading
from eventregistry.Base import *
from eventregistry.ReturnInfo import *
from eventregistry.Logger import logger
class EventRegistry(object):
"""
the core object that is used to access any data in Event Registry
it is used to send all the requests and queries
"""
def __init__(self,
apiKey = None,
host = None,
hostAnalytics = None,
minDelayBetweenRequests = 0.5,
repeatFailedRequestCount = -1,
allowUseOfArchive = True,
verboseOutput = False,
settingsFName = None):
"""
@param apiKey: API key that should be used to make the requests to the Event Registry. API key is assigned to each user account and can be obtained on
this page: https://newsapi.ai/dashboard
@param host: host to use to access the Event Registry backend. Use None to use the default host.
@param hostAnalytics: the host address to use to perform the analytics api calls
@param minDelayBetweenRequests: the minimum number of seconds between individual api calls
@param repeatFailedRequestCount: if a request fails (for example, because ER is down), what is the max number of times the request
should be repeated (-1 for indefinitely)
@param allowUseOfArchive: default is True. Determines if the queries made should potentially be executed on the archive data.
If False, all queries (regardless how the date conditions are set) will be executed on data from the last 31 days.
Queries executed on the archive are more expensive so set it to False if you are just interested in recent data
@param verboseOutput: if True, additional info about errors etc will be printed to console
@param settingsFName: If provided it should be a full path to 'settings.json' file where apiKey an/or host can be loaded from.
If None, we will look for the settings file in the eventregistry module folder
"""
self._host = host
self._hostAnalytics = hostAnalytics
self._lastException = None
self._logRequests = False
self._minDelayBetweenRequests = minDelayBetweenRequests
self._repeatFailedRequestCount = repeatFailedRequestCount
self._allowUseOfArchive = allowUseOfArchive
self._verboseOutput = verboseOutput
self._lastQueryTime = time.time()
self._headers = {}
self._dailyAvailableRequests = -1
self._remainingAvailableRequests = -1
# lock for making sure we make one request at a time - requests module otherwise sometimes returns incomplete json objects
self._lock = threading.Lock()
self._reqSession = requests.Session()
self._apiKey = apiKey
self._extraParams = None
# if there is a settings.json file in the directory then try using it to load the API key from it
# and to read the host name from it (if custom host is not specified)
currPath = os.path.split(os.path.realpath(__file__))[0]
settFName = settingsFName or os.path.join(currPath, "settings.json")
if apiKey:
logger.debug("using user provided API key for making requests")
if os.path.exists(settFName):
settings = json.load(open(settFName))
self._host = host or settings.get("host", "http://eventregistry.org")
self._hostAnalytics = hostAnalytics or settings.get("hostAnalytics", "http://analytics.eventregistry.org")
# if api key is set, then use it when making the requests
if "apiKey" in settings and not apiKey:
logger.debug("found apiKey in settings file which will be used for making requests")
self._apiKey = settings["apiKey"]
else:
self._host = host or "http://eventregistry.org"
self._hostAnalytics = hostAnalytics or "http://analytics.eventregistry.org"
if self._apiKey == None:
print("No API key was provided. You will be allowed to perform only a very limited number of requests per day.")
self._requestLogFName = os.path.join(currPath, "requests_log.txt")
logger.debug("Event Registry host: %s" % (self._host))
logger.debug("Text analytics host: %s" % (self._hostAnalytics))
# list of status codes - when we get them as a response from the call, we don't want to repeat the query as the response will likely always be the same
self._stopStatusCodes = set([
204, # Information not available. Request succeeded, but the requested information is not available.
400, # Bad request. The request was unacceptable, most likely due to invalid or missing parameter.
401, # User's limit reached. The user reached the limit of the tokens in his account. The requests are rejected.
403, # Invalid account. The user's IP or account is disabled, potentially due to misuse.
])
def checkVersion(self):
"""
check what is the latest version of the python sdk and report in case there is a newer version
"""
try:
respInfo = self._reqSession.get(self._host + "/static/pythonSDKVersion.txt")
if respInfo.status_code != 200 or len(respInfo.text) > 20:
return
latestVersion = respInfo.text.strip()
import eventregistry._version as _version
currentVersion = _version.__version__
for (latest, current) in zip(latestVersion.split("."), currentVersion.split(".")):
if int(latest) > int(current):
logger.info("==============\nYour version of the module is outdated, please update to the latest version")
logger.info("Your version is %s while the latest is %s" % (currentVersion, latestVersion))
logger.info("Update by calling: pip install --upgrade eventregistry\n==============")
return
# in case the server mistakenly has a lower version that the user has, don't report an error
elif int(latest) < int(current):
return
except:
pass
def setLogging(self, val):
"""should all requests be logged to a file or not?"""
self._logRequests = val
def setExtraParams(self, params):
if params != None:
assert(isinstance(params, dict))
self._extraParams = params
def getHost(self):
return self._host
def getLastException(self):
"""return the last exception"""
return self._lastException
def printLastException(self):
logger.error(str(self._lastException))
def format(self, obj):
"""return a string containing the object in a pretty formated version"""
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
def getRemainingAvailableRequests(self):
"""get the number of requests that are still available for the user today. Information is only accessible after you make some query."""
return self._remainingAvailableRequests
def getDailyAvailableRequests(self):
"""get the total number of requests that the user can make in a day. Information is only accessible after you make some query."""
return self._dailyAvailableRequests
def getUsageInfo(self):
"""return the number of used and total available tokens. Can be used at any time (also before making queries)"""
return self.jsonRequest("/api/v1/usage", { "apiKey": self._apiKey })
def getServiceStatus(self):
"""return the status of various services used in Event Registry pipeline"""
return self.jsonRequest("/api/v1/getServiceStatus", {"apiKey": self._apiKey})
def getUrl(self, query):
"""
return the url that can be used to get the content that matches the query
@param query: instance of Query class
"""
assert isinstance(query, QueryParamsBase), "query parameter should be an instance of a class that has Query as a base class, such as QueryArticles or QueryEvents"
import urllib
# don't modify original query params
allParams = query._getQueryParams()
# make the url
try:
url = self._host + query._getPath() + "?" + urllib.urlencode(allParams, doseq=True)
except:
url = self._host + query._getPath() + "?" + urllib.parse.urlencode(allParams, doseq=True)
return url
def getLastHeaders(self):
"""
return the headers returned in the response object of the last executed request
"""
return self._headers
def getLastHeader(self, headerName, default = None):
"""
get a value of the header headerName that was set in the headers in the last response object
"""
return self._headers.get(headerName, default)
def printLastReqStats(self):
"""
print some statistics about the last executed request
"""
print("Tokens used by the request: " + self.getLastHeader("req-tokens"))
print("Performed action: " + self.getLastHeader("req-action"))
print("Was archive used for the query: " + (self.getLastHeader("req-archive") == "1" and "Yes" or "No"))
def getLastReqArchiveUse(self):
"""
return True or False depending on whether the last request used the archive or not
"""
return self.getLastHeader("req-archive", "0") == "1"
def execQuery(self, query, allowUseOfArchive = None):
"""
main method for executing the search queries.
@param query: instance of Query class
@param allowUseOfArchive: potentially override the value set when constructing EventRegistry class.
If not None set it to boolean to determine if the request can be executed on the archive data or not
If left to None then the value set in the EventRegistry constructor will be used
"""
assert isinstance(query, QueryParamsBase), "query parameter should be an instance of a class that has Query as a base class, such as QueryArticles or QueryEvents"
# don't modify original query params
allParams = query._getQueryParams()
# make the request
respInfo = self.jsonRequest(query._getPath(), allParams, allowUseOfArchive = allowUseOfArchive)
return respInfo
def jsonRequest(self, methodUrl, paramDict, customLogFName = None, allowUseOfArchive = None):
"""
make a request for json data. repeat it _repeatFailedRequestCount times, if they fail (indefinitely if _repeatFailedRequestCount = -1)
@param methodUrl: url on er (e.g. "/api/v1/article")
@param paramDict: optional object containing the parameters to include in the request (e.g. { "articleUri": "123412342" }).
@param customLogFName: potentially a file name where the request information can be logged into
@param allowUseOfArchive: potentially override the value set when constructing EventRegistry class.
If not None set it to boolean to determine if the request can be executed on the archive data or not
If left to None then the value set in the EventRegistry constructor will be used
"""
self._sleepIfNecessary()
self._lastException = None
self._lock.acquire()
if self._logRequests:
try:
with open(customLogFName or self._requestLogFName, "a") as log:
if paramDict != None:
log.write("# " + json.dumps(paramDict) + "\n")
log.write(methodUrl + "\n\n")
except Exception as ex:
self._lastException = ex
if paramDict == None:
paramDict = {}
# if we have api key then add it to the paramDict
if self._apiKey:
paramDict["apiKey"] = self._apiKey
# if we want to ignore the archive, set the flag
if allowUseOfArchive != None:
if not allowUseOfArchive:
paramDict["forceMaxDataTimeWindow"] = 31
# if we didn't override the parameter then check what we've set when constructing the EventRegistry class
elif self._allowUseOfArchive == False:
paramDict["forceMaxDataTimeWindow"] = 31
# if we also have some extra parameters, then set those too
if self._extraParams:
paramDict.update(self._extraParams)
tryCount = 0
self._headers = {} # reset any past data
returnData = None
respInfo = None
url = self._host + methodUrl
while self._repeatFailedRequestCount < 0 or tryCount < self._repeatFailedRequestCount:
tryCount += 1
try:
# make the request
respInfo = self._reqSession.post(url, json = paramDict, timeout=60)
# remember the returned headers
self._headers = respInfo.headers
# if we got some error codes print the error and repeat the request after a short time period
if respInfo.status_code != 200:
raise Exception(respInfo.text)
# did we get a warning. if yes, print it
if self.getLastHeader("warning"):
logger.warning("=========== WARNING ===========\n%s\n===============================" % (self.getLastHeader("warning")))
# remember the available requests
self._dailyAvailableRequests = tryParseInt(self.getLastHeader("x-ratelimit-limit", ""), val = -1)
self._remainingAvailableRequests = tryParseInt(self.getLastHeader("x-ratelimit-remaining", ""), val = -1)
returnData = respInfo.json()
break
except Exception as ex:
self._lastException = ex
if self._verboseOutput:
logger.error("Event Registry exception while executing the request:")
logger.error("endpoint: %s\nParams: %s" % (url, json.dumps(paramDict, indent=4)))
self.printLastException()
# in case of invalid input parameters, don't try to repeat the search but we simply raise the same exception again
if respInfo != None and respInfo.status_code in self._stopStatusCodes:
break
# in case of the other exceptions (maybe the service is temporarily unavailable) we try to repeat the query
logger.info("The request will be automatically repeated in 3 seconds...")
time.sleep(5) # sleep for X seconds on error
self._lock.release()
if returnData == None:
raise self._lastException or Exception("No valid return data provided")
return returnData
def jsonRequestAnalytics(self, methodUrl, paramDict):
"""
call the analytics service to execute a method like annotation, categorization, etc.
@param methodUrl: api endpoint url to call
@param paramDict: a dictionary with values to send to the api endpoint
"""
if self._apiKey:
paramDict["apiKey"] = self._apiKey
self._lock.acquire()
returnData = None
respInfo = None
self._lastException = None
self._headers = {} # reset any past data
tryCount = 0
while self._repeatFailedRequestCount < 0 or tryCount < self._repeatFailedRequestCount:
tryCount += 1
try:
url = self._hostAnalytics + methodUrl
# make the request
respInfo = self._reqSession.post(url, json = paramDict, timeout=60)
# remember the returned headers
self._headers = respInfo.headers
# if we got some error codes print the error and repeat the request after a short time period
if respInfo.status_code != 200:
raise Exception(respInfo.text)
returnData = respInfo.json()
break
except Exception as ex:
self._lastException = ex
if self._verboseOutput:
logger.error("Event Registry Analytics exception while executing the request:")
logger.error("endpoint: %s\nParams: %s" % (url, json.dumps(paramDict, indent=4)))
self.printLastException()
# in case of invalid input parameters, don't try to repeat the search but we simply raise the same exception again
if respInfo != None and respInfo.status_code in self._stopStatusCodes:
break
logger.info("The request will be automatically repeated in 3 seconds...")
time.sleep(5) # sleep for X seconds on error
self._lock.release()
if returnData == None:
raise self._lastException or Exception("No valid return data provided")
return returnData
#
# suggestion methods - return type is a list of matching items
def suggestConcepts(self, prefix, sources = ["concepts"], lang = "eng", conceptLang = "eng", page = 1, count = 20, returnInfo = ReturnInfo(), **kwargs):
"""
return a list of concepts that contain the given prefix. returned matching concepts are sorted based on their
frequency of occurence in news (from most to least frequent)
@param prefix: input text that should be contained in the concept
@param sources: what types of concepts should be returned. valid values are person, loc, org, wiki, entities (== person + loc + org), concepts (== entities + wiki)
@param lang: language in which the prefix is specified
@param conceptLang: languages in which the label(s) for the concepts are to be returned
@param page: page of the results (1, 2, ...)
@param count: number of returned suggestions per page
@param returnInfo: what details about concepts should be included in the returned information
"""
assert page > 0, "page parameter should be above 0"
params = { "prefix": prefix, "source": sources, "lang": lang, "conceptLang": conceptLang, "page": page, "count": count}
params.update(returnInfo.getParams())
params.update(kwargs)
return self.jsonRequest("/api/v1/suggestConceptsFast", params)
def suggestCategories(self, prefix, page = 1, count = 20, returnInfo = ReturnInfo(), **kwargs):
"""
return a list of dmoz categories that contain the prefix
@param prefix: input text that should be contained in the category name
@param page: page of the results (1, 2, ...)
@param count: number of returned suggestions
@param returnInfo: what details about categories should be included in the returned information
"""
assert page > 0, "page parameter should be above 0"
params = { "prefix": prefix, "page": page, "count": count }
params.update(returnInfo.getParams())
params.update(kwargs)
return self.jsonRequest("/api/v1/suggestCategoriesFast", params)
def suggestNewsSources(self, prefix, dataType = ["news", "pr", "blog"], page = 1, count = 20, **kwargs):
"""
return a list of news sources that match the prefix
@param prefix: input text that should be contained in the source name or uri
@param dataType: suggest sources that provide content in these data types ("news", "pr", "blog" or a list of any of those)
@param page: page of results
@param count: number of returned suggestions
"""
assert page > 0, "page parameter should be above 0"
params = {"prefix": prefix, "dataType": dataType, "page": page, "count": count}
params.update(kwargs)
return self.jsonRequest("/api/v1/suggestSourcesFast", params)
def suggestSourceGroups(self, prefix, page = 1, count = 20, **kwargs):
"""
return a list of news source groups that match the prefix
@param prefix: input text that should be contained in the source group name or uri
@param page: page of the results (1, 2, ...)
@param count: number of returned suggestions
"""
assert page > 0, "page parameter should be above 0"
params = { "prefix": prefix, "page": page, "count": count }
params.update(kwargs)
return self.jsonRequest("/api/v1/suggestSourceGroups", params)
def suggestLocations(self, prefix, sources = ["place", "country"], lang = "eng", count = 20, countryUri = None, sortByDistanceTo = None, returnInfo = ReturnInfo(), **kwargs):
"""
return a list of geo locations (cities or countries) that contain the prefix
@param prefix: input text that should be contained in the location name
@param source: what types of locations are we interested in. Possible options are "place" and "country"
@param lang: language in which the prefix is specified
@param count: number of returned suggestions
@param countryUri: if provided, then return only those locations that are inside the specified country
@param sortByDistanceTo: if provided, then return the locations sorted by the distance to the (lat, long) provided in the tuple
@param returnInfo: what details about locations should be included in the returned information
"""
params = { "prefix": prefix, "count": count, "source": sources, "lang": lang, "countryUri": countryUri or "" }
params.update(returnInfo.getParams())
params.update(kwargs)
if sortByDistanceTo:
assert isinstance(sortByDistanceTo, (tuple, list)), "sortByDistanceTo has to contain a tuple with latitude and longitude of the location"
assert len(sortByDistanceTo) == 2, "The sortByDistanceTo should contain two float numbers"
params["closeToLat"] = sortByDistanceTo[0]
params["closeToLon"] = sortByDistanceTo[1]
return self.jsonRequest("/api/v1/suggestLocationsFast", params)
def suggestLocationsAtCoordinate(self, latitude, longitude, radiusKm, limitToCities = False, lang = "eng", count = 20, ignoreNonWiki = True, returnInfo = ReturnInfo(), **kwargs):
"""
return a list of geo locations (cities or places) that are close to the provided (lat, long) values
@param latitude: latitude part of the coordinate
@param longitude: longitude part of the coordinate
@param radiusKm: radius in kilometres around the coordinates inside which the locations should be returned
@param limitToCities: limit the set of results only to cities (True) or also to general places (False)
@param lang: language in which the location label should be returned
@param count: number of returned suggestions
@param ignoreNonWiki: ignore locations that don't have a wiki page and can not be used for concept search
@param returnInfo: what details about locations should be included in the returned information
"""
assert isinstance(latitude, (int, float)), "The 'latitude' should be a number"
assert isinstance(longitude, (int, float)), "The 'longitude' should be a number"
params = { "action": "getLocationsAtCoordinate", "lat": latitude, "lon": longitude, "radius": radiusKm, "limitToCities": limitToCities, "count": count, "lang": lang }
params.update(returnInfo.getParams())
params.update(kwargs)
return self.jsonRequest("/api/v1/suggestLocationsFast", params)
def suggestSourcesAtCoordinate(self, latitude, longitude, radiusKm, count = 20, **kwargs):
"""
return a list of news sources that are close to the provided (lat, long) values
@param latitude: latitude part of the coordinate
@param longitude: longitude part of the coordinate
@param radiusKm: radius in kilometres around the coordinates inside which the news sources should be located
@param count: number of returned suggestions
"""
assert isinstance(latitude, (int, float)), "The 'latitude' should be a number"
assert isinstance(longitude, (int, float)), "The 'longitude' should be a number"
params = {"action": "getSourcesAtCoordinate", "lat": latitude, "lon": longitude, "radius": radiusKm, "count": count}
params.update(kwargs)
return self.jsonRequest("/api/v1/suggestSourcesFast", params)
def suggestSourcesAtPlace(self, conceptUri, dataType = "news", page = 1, count = 20, **kwargs):
"""
return a list of news sources that are close to the provided (lat, long) values
@param conceptUri: concept that represents a geographic location for which we would like to obtain a list of sources located at the place
@param dataType: type of the news source ("news", "pr", "blog" or a list of any of those)
@param page: page of the results (1, 2, ...)
@param count: number of returned sources
"""
params = {"action": "getSourcesAtPlace", "conceptUri": conceptUri, "page": page, "count": count, "dataType": dataType}
params.update(kwargs)
return self.jsonRequest("/api/v1/suggestSourcesFast", params)
def suggestAuthors(self, prefix, page = 1, count = 20, **kwargs):
"""
return a list of news sources that match the prefix
@param prefix: input text that should be contained in the author name and source url
@param page: page of results
@param count: number of returned suggestions
"""
assert page > 0, "page parameter should be above 0"
params = {"prefix": prefix, "page": page, "count": count}
params.update(kwargs)
return self.jsonRequest("/api/v1/suggestAuthorsFast", params)
def suggestConceptClasses(self, prefix, lang = "eng", conceptLang = "eng", source = ["dbpedia", "custom"], page = 1, count = 20, returnInfo = ReturnInfo(), **kwargs):
"""
return a list of concept classes that match the given prefix
@param prefix: input text that should be contained in the category name
@param lang: language in which the prefix is specified
@param conceptLang: languages in which the label(s) for the concepts are to be returned
@param source: what types of concepts classes should be returned. valid values are 'dbpedia' or 'custom'
@param page: page of the results (1, 2, ...)
@param count: number of returned suggestions
@param returnInfo: what details about categories should be included in the returned information
"""
assert page > 0, "page parameter should be above 0"
params = { "prefix": prefix, "lang": lang, "conceptLang": conceptLang, "source": source, "page": page, "count": count }
params.update(returnInfo.getParams())
params.update(kwargs)
return self.jsonRequest("/api/v1/suggestConceptClasses", params)
#
# get info methods - return type is a single item that is the best match to the given input
def getConceptUri(self, conceptLabel, lang = "eng", sources = ["concepts"]):
"""
return a concept uri that is the best match for the given concept label
if there are multiple matches for the given conceptLabel, they are sorted based on their frequency of occurence in news (most to least frequent)
@param conceptLabel: partial or full name of the concept for which to return the concept uri
@param sources: what types of concepts should be returned. valid values are person, loc, org, wiki, entities (== person + loc + org), concepts (== entities + wiki)
"""
matches = self.suggestConcepts(conceptLabel, lang = lang, sources = sources)
if matches != None and isinstance(matches, list) and len(matches) > 0 and "uri" in matches[0]:
return matches[0]["uri"]
return None
def getLocationUri(self, locationLabel, lang = "eng", sources = ["place", "country"], countryUri = None, sortByDistanceTo = None):
"""
return a location uri that is the best match for the given location label
@param locationLabel: partial or full location name for which to return the location uri
@param sources: what types of locations are we interested in. Possible options are "place" and "country"
@param countryUri: if set, then filter the possible locatiosn to the locations from that country
@param sortByDistanceTo: sort candidates by distance to the given (lat, long) pair
"""
matches = self.suggestLocations(locationLabel, sources = sources, lang = lang, countryUri = countryUri, sortByDistanceTo = sortByDistanceTo)
if matches != None and isinstance(matches, list) and len(matches) > 0 and "wikiUri" in matches[0]:
return matches[0]["wikiUri"]
return None
def getCategoryUri(self, categoryLabel):
"""
return a category uri that is the best match for the given label
@param categoryLabel: partial or full name of the category for which to return category uri
"""
matches = self.suggestCategories(categoryLabel)
if matches != None and isinstance(matches, list) and len(matches) > 0 and "uri" in matches[0]:
return matches[0]["uri"]
return None
def getNewsSourceUri(self, sourceName, dataType = ["news", "pr", "blog"]):
"""
return the news source that best matches the source name
@param sourceName: partial or full name of the source or source uri for which to return source uri
@param dataType: return the source uri that provides content of these data types ("news", "pr", "blog" or a list of any of those)
"""
matches = self.suggestNewsSources(sourceName, dataType = dataType)
if matches != None and isinstance(matches, list) and len(matches) > 0 and "uri" in matches[0]:
return matches[0]["uri"]
return None
def getSourceUri(self, sourceName, dataType=["news", "pr", "blog"]):
"""
alternative (shorter) name for the method getNewsSourceUri()
"""
return self.getNewsSourceUri(sourceName, dataType)
def getSourceGroupUri(self, sourceGroupName):
"""
return the URI of the source group that best matches the name
@param sourceGroupName: partial or full name of the source group
"""
matches = self.suggestSourceGroups(sourceGroupName)
if matches != None and isinstance(matches, list) and len(matches) > 0 and "uri" in matches[0]:
return matches[0]["uri"]
return None
def getConceptClassUri(self, classLabel, lang = "eng"):
"""
return a uri of the concept class that is the best match for the given label
@param classLabel: partial or full name of the concept class for which to return class uri
"""
matches = self.suggestConceptClasses(classLabel, lang = lang)
if matches != None and isinstance(matches, list) and len(matches) > 0 and "uri" in matches[0]:
return matches[0]["uri"]
return None
def getConceptInfo(self, conceptUri,
returnInfo = ReturnInfo(conceptInfo = ConceptInfoFlags(
synonyms = True, image = True, description = True))):
"""
return detailed information about a particular concept
@param conceptUri: uri of the concept
@param returnInfo: what details about the concept should be included in the returned information
"""
params = returnInfo.getParams()
params.update({"uri": conceptUri })
return self.jsonRequest("/api/v1/concept/getInfo", params)
def getAuthorUri(self, authorName):
"""
return author uri that is the best match for the given author name (and potentially source url)
if there are multiple matches for the given author name, they are sorted based on the number of articles they have written (from most to least frequent)
@param authorName: partial or full name of the author, potentially also containing the source url (e.g. "george brown nytimes")
"""
matches = self.suggestAuthors(authorName)
if matches != None and isinstance(matches, list) and len(matches) > 0 and "uri" in matches[0]:
return matches[0]["uri"]
return None
@staticmethod
def getUriFromUriWgt(uriWgtList):
"""
convert an array of items that contain uri:wgt to a list of items with uri only. Used for QueryArticle and QueryEvent classes
"""
assert isinstance(uriWgtList, list), "uriWgtList has to be a list of strings that represent article uris"
uriList = [uriWgt.split(":")[0] for uriWgt in uriWgtList]
return uriList
#
# additional utility methods
def getArticleUris(self, articleUrls):
"""
if you have article urls and you want to query them in ER you first have to obtain their uris in the ER.
@param articleUrls a single article url or a list of article urls
@returns returns dict where key is article url and value is either None if no match found or a string with article URI.
"""
assert isinstance(articleUrls, (six.string_types, list)), "Expected a single article url or a list of urls"
return self.jsonRequest("/api/v1/articleMapper", { "articleUrl": articleUrls })
def getSourceGroups(self):
"""return the list of URIs of all known source groups"""
ret = self.jsonRequest("/api/v1/sourceGroup/getSourceGroups", {})
return ret
def getSourceGroup(self, sourceGroupUri):
"""return info about the source group"""
ret = self.jsonRequest("/api/v1/sourceGroup/getSourceGroupInfo", { "uri": sourceGroupUri })
return ret
#
# internal methods
def _sleepIfNecessary(self):
"""ensure that queries are not made too fast"""
t = time.time()
if t - self._lastQueryTime < self._minDelayBetweenRequests:
time.sleep(self._minDelayBetweenRequests - (t - self._lastQueryTime))
self._lastQueryTime = t
class ArticleMapper:
def __init__(self, er, rememberMappings = True):
"""
create instance of article mapper
it will map from article urls to article uris
the mappings can be remembered so it will not repeat requests for the same article urls
"""
self._er = er
self._articleUrlToUri = {}
self._rememberMappings = rememberMappings
def getArticleUri(self, articleUrl):
"""
given the article url, return an array with 0, 1 or more article uris. Not all returned article uris are necessarily valid anymore. For news sources
of lower importance we remove the duplicated articles and just keep the latest content
@param articleUrl: string containing the article url
@returns string: list of strings representing article uris.
"""
if articleUrl in self._articleUrlToUri:
return self._articleUrlToUri[articleUrl]
res = self._er.getArticleUris(articleUrl)
if res and articleUrl in res:
val = res[articleUrl]
if self._rememberMappings:
self._articleUrlToUri[articleUrl] = val
return val
return None
|
'''
Script to sort out the tags imported from ca.ckan.net to thedatahub.org and
got mangled in the process.
'''
import re
from optparse import OptionParser
import copy
import ckanclient
from status import Status
def sort_out_tags(source_ckan_uri,
dest_ckan_uri, dest_api_key,
):
ckan1 = ckanclient.CkanClient(base_location=source_ckan_uri)
ckan2 = ckanclient.CkanClient(base_location=dest_ckan_uri,
api_key=dest_api_key)
# ensure group exists
group = 'country-ca'
assert group in set(ckan2.group_register_get())
group_to_change = 'canadagov'
# work out tag mappings
tag_status = Status('tag mapping')
tag_replace_map = {}
source_tags = ckan1.tag_register_get()
for tag in source_tags:
mangled_tag = re.sub('[-._]', '', tag)
replacement_tag = tag
# Change underscores to hyphens
replacement_tag = replacement_tag.replace('_', '-')
# Remove trailing punctuation
if replacement_tag[-1] in '_-.':
replacement_tag = replacement_tag[:-1]
if replacement_tag[0] in '_-.':
replacement_tag = replacement_tag[1:]
if mangled_tag == replacement_tag:
tag_status.record('Unchanged', mangled_tag, do_print=False)
continue
if mangled_tag in tag_replace_map and tag_replace_map[mangled_tag] != replacement_tag:
print 'Warning - can\'t differentiate %s : %s / %s' % \
(mangled_tag, tag_replace_map[mangled_tag], replacement_tag)
tag_status.record('Mapping added', '%s:%s' % (mangled_tag, replacement_tag), do_print=False)
tag_replace_map[mangled_tag] = replacement_tag
example_map = tag_replace_map.items()[0]
print tag_status
# Custom mappings
tag_replace_map['metaimportedfromcackannet'] = 'meta.imported-from-ca-ckan-net'
# edit packages
pkg_status = Status('Packages')
pkgs = ckan2.group_entity_get(group)['packages']
print 'Packages in the group: %i' % len(pkgs)
for pkg_name in pkgs:
pkg = ckan2.package_entity_get(pkg_name)
original_pkg = copy.deepcopy(pkg)
# Change tags
edited_tags = [tag_replace_map.get(tag, tag) for tag in pkg['tags']]
if 'canada' in edited_tags:
edited_tags.remove('canada')
if group_to_change in pkg['groups']:
pkg['groups'].remove(group_to_change)
edited_tags.append('canada-gov')
if set(pkg['tags']) != set(edited_tags):
pkg['tags'] = edited_tags
print '%s: %r -> %r' % (pkg_name, sorted(original_pkg['tags']), sorted(edited_tags))
if pkg == original_pkg:
pkg_status.record('Unchanged', pkg_name)
continue
try:
ckan2.package_entity_put(pkg)
except ckanclient.CkanApiError, e:
pkg_status.record('Error: %r' % e.args, pkg_name)
continue
pkg_status.record('Successfully changed', pkg_name)
print pkg_status
usage = '''%prog [OPTIONS] <source_ckan_api_uri> <destination_ckan_api_uri>
Recopy tags that got mangled in Canadian copy.'''
parser = OptionParser(usage=usage)
parser.add_option("-k", "--destination-ckan-api-key", dest="destination_ckan_api_key",
help="Destination CKAN's API key", metavar="API-KEY")
(options, args) = parser.parse_args()
assert len(args) == 2, 'The source and destination CKAN API URIs are the only two arguments. Found: %r' % args
source_ckan_uri, destination_ckan_uri = args
print 'Key: ', options.destination_ckan_api_key
sort_out_tags(source_ckan_uri,
destination_ckan_uri,
options.destination_ckan_api_key,
)
|
from src.cli.commands import execute
def run():
execute()
if __name__ == "__main__":
run()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from io import StringIO
# StringIO behaves like a file object
c = StringIO("0 1\n2 3")
print(np.loadtxt(c))
d = StringIO("M 21 72\nF 35 58")
np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')})
c = StringIO("1,0,2\n3,0,4")
x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
print(x)
print(y)
s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94')
def conv(fld):
return -float(fld[:-1]) if fld.endswith(b'-') else float(fld)
print(np.loadtxt(s, converters={0: conv, 1: conv}))
x = y = z = np.arange(0.0,5.0,1.0)
np.savetxt('test.out', x, delimiter=',') # X is an array
np.savetxt('test1.out', (x,y,z)) # x,y,z equal sized 1D arrays
np.savetxt('test2.out', x, fmt='%1.4e') # use exponential notation
|
# -*- coding: UTF-8 -*-#
from builtins import object
from builtins import str
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from rest_framework.exceptions import APIException
from action.serializers import ActionSerializer
from dataops import ops, pandas_db
from table.serializers import DataFramePandasField, ViewSerializer
from workflow.column_serializers import ColumnSerializer
from .models import Workflow
class WorkflowListSerializer(serializers.ModelSerializer):
def create(self, validated_data, **kwargs):
attributes = validated_data.get('attributes', {})
if not isinstance(attributes, dict):
raise APIException(
_('Attributes must be a dictionary of (string, string) pairs.')
)
if any([not isinstance(k, str) or not isinstance(v, str)
for k, v in list(attributes.items())]):
raise APIException(_('Attributes must be a dictionary (str, str)'))
workflow_obj = None
try:
workflow_obj = Workflow(
user=self.context['request'].user,
name=validated_data['name'],
description_text=validated_data.get('description_text', ''),
nrows=0,
ncols=0,
attributes=attributes
)
workflow_obj.save()
except Exception:
if workflow_obj and workflow_obj.id:
workflow_obj.delete()
raise APIException(_('Workflow could not be created.'))
return workflow_obj
class Meta(object):
model = Workflow
fields = ('id', 'name', 'description_text', 'attributes')
class WorkflowExportSerializer(serializers.ModelSerializer):
"""
This serializer is use to export Workflows selecting a subset of
actions. Since the SerializerMethodField used for the selection is a
read_only field, the import is managed by a different serializer that
uses a regular one for the action field (see WorkflowImportSerializer)
"""
actions = serializers.SerializerMethodField('get_filtered_actions')
data_frame = DataFramePandasField(
required=False,
help_text=_('This field must be the Base64 encoded '
'result of pandas.to_pickle() function')
)
columns = ColumnSerializer(many=True, required=False)
views = ViewSerializer(many=True, required=False)
version = serializers.CharField(read_only=True,
default='NO VERSION',
allow_blank=True,
label="OnTask Version",
help_text=_("To guarantee compability"))
def get_filtered_actions(self, workflow):
# Get the subset of actions specified in the context
action_list = self.context.get('selected_actions', [])
if not action_list:
# No action needs to be included, no need to call the action
# serializer
return []
# Execute the query set
query_set = workflow.actions.filter(id__in=action_list)
# Serialize the content and return data
serializer = ActionSerializer(
instance=query_set,
many=True,
required=False)
return serializer.data
def create(self, validated_data, **kwargs):
# Initial values
workflow_obj = None
try:
workflow_obj = Workflow(
user=self.context['user'],
name=self.context['name'],
description_text=validated_data['description_text'],
nrows=0,
ncols=0,
attributes=validated_data['attributes'],
query_builder_ops=validated_data.get('query_builder_ops', {})
)
workflow_obj.save()
# Create the columns
column_data = ColumnSerializer(
data=validated_data.get('columns', []),
many=True,
context={'workflow': workflow_obj})
# And save its content
if column_data.is_valid():
column_data.save()
else:
raise Exception(_('Unable to save column information'))
# If there is any column with position = 0, recompute (this is to
# guarantee backward compatibility.
if workflow_obj.columns.filter(position=0).exists():
for idx, c in enumerate(workflow_obj.columns.all()):
c.position = idx + 1
c.save()
# Load the data frame
data_frame = validated_data.get('data_frame', None)
if data_frame is not None:
ops.store_dataframe_in_db(data_frame,
workflow_obj.id,
reset_keys=False)
# Reconcile now the information in workflow and columns with the
# one loaded
workflow_obj.data_frame_table_name = \
pandas_db.create_table_name(workflow_obj.pk)
workflow_obj.ncols = validated_data['ncols']
workflow_obj.nrows = validated_data['nrows']
workflow_obj.save()
# Create the actions pointing to the workflow
action_data = ActionSerializer(
data=validated_data.get('actions', []),
many=True,
context={'workflow': workflow_obj}
)
if action_data.is_valid():
action_data.save()
else:
raise Exception(_('Unable to save column information'))
# Create the views pointing to the workflow
view_data = ViewSerializer(
data=validated_data.get('views', []),
many=True,
context={'workflow': workflow_obj}
)
if view_data.is_valid():
view_data.save()
else:
raise Exception(_('Unable to save column information'))
except Exception:
# Get rid of the objects created
if workflow_obj:
if workflow_obj.has_data_frame():
pandas_db.delete_table(workflow_obj.id)
if workflow_obj.id:
workflow_obj.delete()
raise
return workflow_obj
class Meta(object):
model = Workflow
# fields = ('description_text', 'nrows', 'ncols', 'attributes',
# 'query_builder_ops', 'columns', 'data_frame', 'actions')
exclude = ('id', 'user', 'created', 'modified', 'data_frame_table_name',
'session_key', 'shared')
class WorkflowImportSerializer(WorkflowExportSerializer):
"""
This serializer simply overwrites the actions field to make it writeable.
The rest of the functionality is identical to the WorkflowExportSerializer
"""
actions = ActionSerializer(many=True, required=False)
class WorkflowLockSerializer(serializers.Serializer):
"""
Serializer to transmit the boolean value of the lock in a workflow
"""
lock = serializers.BooleanField()
|
"""
Created: 26 April 2018
Last Updated: 26 April 2018
Dan Marley
daniel.edison.marley@cernSPAMNOT.ch
Texas A&M University
-----
Base class for performing unfolding
"""
import json
import datetime
import uproot
import numpy as np
import pandas as pd
import util
from unfoldingPlotter import UnfoldingPlotter
# fix random seed for reproducibility
np.random.seed(2018)
class Unfolding(object):
"""Unfolding class"""
def __init__(self):
self.date = datetime.date.today().strftime('%d%b%Y')
## Handling unfolding objects and data -- set in the class
self.output_dir = "" # directory for storing outputs
self.plotter = UnfoldingPlotter() # class for plotting relevant unfolding information
self.variables = [] # variables to load from the dataframe ('deltay','mtt',etc.)
self.backgrounds = [] # names of background samples (e.g., w+jets, z+jets, etc.)
self.stat_only = False # only consider the statistical uncertainty
self.xsections = None # normalization uncertainties, e.g., util.read_config('xsection.txt')
self.objsysts = None # systematic uncertainties for each detector-related object
self.regularization = None # regularization to use
def initialize(self): #,config):
"""Initialize a few parameters after they've been set by user"""
self.msg_svc = util.VERBOSE()
self.msg_svc.level = self.verbose_level
self.msg_svc.initialize()
## -- Plotting framework
self.plotter.output_dir = self.output_dir
self.plotter.image_format = 'pdf' # must use .pdf at the LPC
return
def execute(self):
"""
Perform unfolding
Normalization uncertainties -- keep stored in a text file for easy, global access
"""
pass
def load_hep_data(self):
"""
Load the physics data (histograms) for unfolding
ROOT I/O
"""
pass
def diagnostics(self,pre=False,post=False):
"""Diagnostic tests of the Unfolding"""
pass
## THE END ##
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import copy
import logging
import numpy as np
import torch
from reagent.core import types as rlt
from reagent.core.types import PreprocessedRankingInput
from reagent.training.reward_network_trainer import RewardNetTrainer
logger = logging.getLogger(__name__)
class RewardNetEvaluator:
"""Evaluate reward networks"""
def __init__(self, trainer: RewardNetTrainer) -> None:
self.trainer = trainer
self.loss = []
self.rewards = []
self.pred_rewards = []
self.best_model = None
self.best_model_loss = 1e9
# pyre-fixme[56]: Decorator `torch.no_grad(...)` could not be called, because
# its type `no_grad` is not callable.
@torch.no_grad()
def evaluate(self, eval_batch: PreprocessedRankingInput):
reward_net = self.trainer.reward_net
reward_net_prev_mode = reward_net.training
reward_net.eval()
if isinstance(eval_batch, rlt.PreprocessedRankingInput):
reward = eval_batch.slate_reward
else:
reward = eval_batch.reward
assert reward is not None
pred_reward = reward_net(eval_batch).predicted_reward
# pyre-fixme[58]: `/` is not supported for operand types `float` and
# `Optional[torch.Tensor]`.
weight = 1.0 / eval_batch.tgt_out_probs
loss = self.trainer.loss_fn(pred_reward, reward, weight)
self.loss.append(loss.flatten().detach().cpu())
self.rewards.append(reward.flatten().detach().cpu())
self.pred_rewards.append(pred_reward.flatten().detach().cpu())
reward_net.train(reward_net_prev_mode)
@torch.no_grad()
def evaluate_post_training(self):
mean_loss = np.mean(self.loss)
logger.info(f"Evaluation {self.trainer.loss_type}={mean_loss}")
eval_res = {
"loss": mean_loss,
"rewards": torch.cat(self.rewards),
"pred_rewards": torch.cat(self.pred_rewards),
}
self.loss = []
self.rewards = []
self.pred_rewards = []
if mean_loss < self.best_model_loss:
self.best_model_loss = mean_loss
self.best_model = copy.deepcopy(self.trainer.reward_net)
return eval_res
|
# Copyright (c) 2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from itertools import combinations
from typing import List
from builders.ameba import AmebaApp, AmebaBoard, AmebaBuilder
from builders.android import AndroidApp, AndroidBoard, AndroidBuilder
from builders.cc13x2x7_26x2x7 import cc13x2x7_26x2x7App, cc13x2x7_26x2x7Builder
from builders.cyw30739 import Cyw30739App, Cyw30739Board, Cyw30739Builder
from builders.efr32 import Efr32App, Efr32Board, Efr32Builder
from builders.esp32 import Esp32App, Esp32Board, Esp32Builder
from builders.host import HostApp, HostBoard, HostBuilder
from builders.infineon import InfineonApp, InfineonBoard, InfineonBuilder
from builders.k32w import K32WApp, K32WBuilder
from builders.mbed import MbedApp, MbedBoard, MbedBuilder, MbedProfile
from builders.nrf import NrfApp, NrfBoard, NrfConnectBuilder
from builders.qpg import QpgApp, QpgBoard, QpgBuilder
from builders.telink import TelinkApp, TelinkBoard, TelinkBuilder
from builders.tizen import TizenApp, TizenBoard, TizenBuilder
from builders.bl602 import Bl602App, Bl602Board, Bl602Builder
class Target:
"""Represents a build target:
Has a name identifier plus parameters on how to build it (what
builder class to use and what arguments are required to produce
the specified build)
"""
def __init__(self, name, builder_class, **kwargs):
self.name = name
self.builder_class = builder_class
self.glob_blacklist_reason = None
self.create_kw_args = kwargs
def Clone(self):
"""Creates a clone of self."""
clone = Target(self.name, self.builder_class,
**self.create_kw_args.copy())
clone.glob_blacklist_reason = self.glob_blacklist_reason
return clone
def Extend(self, suffix, **kargs):
"""Creates a clone of the current object extending its build parameters.
Arguments:
suffix: appended with a "-" as separator to the clone name
**kargs: arguments needed to produce the new build variant
"""
clone = self.Clone()
clone.name += "-" + suffix
clone.create_kw_args.update(kargs)
return clone
def Create(self, runner, repository_path: str, output_prefix: str,
enable_flashbundle: bool):
builder = self.builder_class(
repository_path, runner=runner, **self.create_kw_args)
builder.target = self
builder.identifier = self.name
builder.output_dir = os.path.join(output_prefix, self.name)
builder.enable_flashbundle(enable_flashbundle)
return builder
def GlobBlacklist(self, reason):
clone = self.Clone()
if clone.glob_blacklist_reason:
clone.glob_blacklist_reason += ", "
clone.glob_blacklist_reason += reason
else:
clone.glob_blacklist_reason = reason
return clone
@property
def IsGlobBlacklisted(self):
return self.glob_blacklist_reason is not None
@property
def GlobBlacklistReason(self):
return self.glob_blacklist_reason
class AcceptAnyName:
def Accept(self, name: str):
return True
class AcceptNameWithSubstrings:
def __init__(self, substr: List[str]):
self.substr = substr
def Accept(self, name: str):
for s in self.substr:
if s in name:
return True
return False
class BuildVariant:
def __init__(self, name: str, validator=AcceptAnyName(),
conflicts: List[str] = [], requires: List[str] = [],
**buildargs):
self.name = name
self.validator = validator
self.conflicts = conflicts
self.buildargs = buildargs
self.requires = requires
def HasConflicts(items: List[BuildVariant]) -> bool:
for a, b in combinations(items, 2):
if (a.name in b.conflicts) or (b.name in a.conflicts):
return True
return False
def AllRequirementsMet(items: List[BuildVariant]) -> bool:
"""
Check that item.requires is satisfied for all items in the given list
"""
available = set([item.name for item in items])
for item in items:
for requirement in item.requires:
if requirement not in available:
return False
return True
class VariantBuilder:
"""Handles creating multiple build variants based on a starting target.
"""
def __init__(self, targets: List[Target] = []):
# note the clone in case the default arg is used
self.targets = targets[:]
self.variants = []
self.glob_whitelist = []
def WhitelistVariantNameForGlob(self, name):
"""
Whitelist the specified variant to be allowed for globbing.
By default we do not want a 'build all' to select all variants, so
variants are generally glob-blacklisted.
"""
self.glob_whitelist.append(name)
def AppendVariant(self, **args):
"""
Add another variant to accepted variants. Arguments are construction
variants to BuildVariant.
Example usage:
builder.AppendVariant(name="ipv6only", enable_ipv4=False)
"""
self.variants.append(BuildVariant(**args))
def AllVariants(self):
"""
Yields a list of acceptable variants for the given targets.
Handles conflict resolution between build variants and globbing
whitelist targets.
"""
for target in self.targets:
yield target
# skip variants that do not work for this target
ok_variants = [
v for v in self.variants if v.validator.Accept(target.name)]
# Build every possible variant
for variant_count in range(1, len(ok_variants) + 1):
for subgroup in combinations(ok_variants, variant_count):
if HasConflicts(subgroup):
continue
if not AllRequirementsMet(subgroup):
continue
# Target ready to be created - no conflicts
variant_target = target.Clone()
for option in subgroup:
variant_target = variant_target.Extend(
option.name, **option.buildargs)
# Only a few are whitelisted for globs
name = '-'.join([o.name for o in subgroup])
if name not in self.glob_whitelist:
if not variant_target.IsGlobBlacklisted:
variant_target = variant_target.GlobBlacklist(
'Reduce default build variants')
yield variant_target
def HostTargets():
target = Target(HostBoard.NATIVE.PlatformName(), HostBuilder)
target_native = target.Extend(HostBoard.NATIVE.BoardName(), board=HostBoard.NATIVE)
targets = [target_native]
# x64 linux supports cross compile
cross_compile = (HostBoard.NATIVE.PlatformName() == 'linux') and (HostBoard.NATIVE.BoardName() != HostBoard.ARM64.BoardName())
if cross_compile:
targets.append(target.Extend('arm64', board=HostBoard.ARM64))
app_targets = []
# Don't cross compile some builds
app_targets.append(
target_native.Extend('rpc-console', app=HostApp.RPC_CONSOLE))
app_targets.append(
target_native.Extend('tv-app', app=HostApp.TV_APP))
app_targets.append(
target_native.Extend('nl-test-runner', app=HostApp.NL_TEST_RUNNER))
for target in targets:
app_targets.append(target.Extend(
'all-clusters', app=HostApp.ALL_CLUSTERS))
app_targets.append(target.Extend('chip-tool', app=HostApp.CHIP_TOOL))
app_targets.append(target.Extend('thermostat', app=HostApp.THERMOSTAT))
app_targets.append(target.Extend('minmdns', app=HostApp.MIN_MDNS))
app_targets.append(target.Extend('lock', app=HostApp.LOCK))
app_targets.append(target.Extend('shell', app=HostApp.SHELL))
app_targets.append(target.Extend(
'ota-provider', app=HostApp.OTA_PROVIDER, enable_ble=False))
app_targets.append(target.Extend(
'ota-requestor', app=HostApp.OTA_REQUESTOR, enable_ble=False))
app_targets.append(target.Extend('python-bindings', app=HostApp.PYTHON_BINDINGS))
builder = VariantBuilder()
# Possible build variants. Note that number of potential
# builds is exponential here
builder.AppendVariant(name="test-group", validator=AcceptNameWithSubstrings(
['-all-clusters', '-chip-tool']), test_group=True),
builder.AppendVariant(name="same-event-loop", validator=AcceptNameWithSubstrings(
['-chip-tool']), separate_event_loop=False),
builder.AppendVariant(name="no-interactive", validator=AcceptNameWithSubstrings(
['-chip-tool']), interactive_mode=False),
builder.AppendVariant(name="ipv6only", enable_ipv4=False),
builder.AppendVariant(name="no-ble", enable_ble=False),
builder.AppendVariant(name="no-wifi", enable_wifi=False),
builder.AppendVariant(name="tsan", conflicts=['asan'], use_tsan=True),
builder.AppendVariant(name="asan", conflicts=['tsan'], use_asan=True),
builder.AppendVariant(name="libfuzzer", requires=[
"clang"], use_libfuzzer=True),
builder.AppendVariant(name="clang", use_clang=True),
builder.WhitelistVariantNameForGlob('no-interactive-ipv6only')
builder.WhitelistVariantNameForGlob('ipv6only')
for target in app_targets:
if ('-rpc-console' in target.name) or ('-python-bindings' in target.name) or ('nl-test-runner' in target.name):
# Single-variant builds
yield target
else:
builder.targets.append(target)
for target in builder.AllVariants():
if cross_compile and 'chip-tool' in target.name and 'arm64' in target.name and '-no-interactive' not in target.name:
# Interactive builds will not compile by default on arm cross compiles
# because libreadline is not part of the default sysroot
yield target.GlobBlacklist('Arm crosscompile does not support libreadline-dev')
else:
yield target
# Without extra build variants
yield target_native.Extend('chip-cert', app=HostApp.CERT_TOOL)
yield target_native.Extend('address-resolve-tool', app=HostApp.ADDRESS_RESOLVE)
yield target_native.Extend('address-resolve-tool-clang', app=HostApp.ADDRESS_RESOLVE,
use_clang=True).GlobBlacklist("Reduce default build variants")
yield target_native.Extend('address-resolve-tool-platform-mdns', app=HostApp.ADDRESS_RESOLVE,
use_platform_mdns=True).GlobBlacklist("Reduce default build variants")
yield target_native.Extend('address-resolve-tool-platform-mdns-ipv6only', app=HostApp.ADDRESS_RESOLVE,
use_platform_mdns=True, enable_ipv4=False).GlobBlacklist("Reduce default build variants")
test_target = Target(HostBoard.NATIVE.PlatformName(), HostBuilder)
for board in [HostBoard.NATIVE, HostBoard.FAKE]:
yield test_target.Extend(board.BoardName() + '-tests', board=board, app=HostApp.TESTS)
def Esp32Targets():
esp32_target = Target('esp32', Esp32Builder)
yield esp32_target.Extend('m5stack-all-clusters', board=Esp32Board.M5Stack, app=Esp32App.ALL_CLUSTERS)
yield esp32_target.Extend('m5stack-all-clusters-ipv6only', board=Esp32Board.M5Stack, app=Esp32App.ALL_CLUSTERS,
enable_ipv4=False)
yield esp32_target.Extend('m5stack-all-clusters-rpc', board=Esp32Board.M5Stack, app=Esp32App.ALL_CLUSTERS,
enable_rpcs=True)
yield esp32_target.Extend('m5stack-all-clusters-rpc-ipv6only', board=Esp32Board.M5Stack, app=Esp32App.ALL_CLUSTERS,
enable_rpcs=True, enable_ipv4=False)
yield esp32_target.Extend('c3devkit-all-clusters', board=Esp32Board.C3DevKit, app=Esp32App.ALL_CLUSTERS)
devkitc = esp32_target.Extend('devkitc', board=Esp32Board.DevKitC)
yield devkitc.Extend('all-clusters', app=Esp32App.ALL_CLUSTERS)
yield devkitc.Extend('all-clusters-ipv6only', app=Esp32App.ALL_CLUSTERS, enable_ipv4=False)
yield devkitc.Extend('shell', app=Esp32App.SHELL)
yield devkitc.Extend('light', app=Esp32App.LIGHT)
yield devkitc.Extend('lock', app=Esp32App.LOCK)
yield devkitc.Extend('bridge', app=Esp32App.BRIDGE)
yield devkitc.Extend('temperature-measurement', app=Esp32App.TEMPERATURE_MEASUREMENT)
yield devkitc.Extend('temperature-measurement-rpc', app=Esp32App.TEMPERATURE_MEASUREMENT, enable_rpcs=True)
yield esp32_target.Extend('qemu-tests', board=Esp32Board.QEMU, app=Esp32App.TESTS)
def Efr32Targets():
efr_target = Target('efr32', Efr32Builder)
board_targets = [
efr_target.Extend('brd4161a', board=Efr32Board.BRD4161A),
efr_target.Extend('brd4163a', board=Efr32Board.BRD4163A).GlobBlacklist(
'only user requested'),
efr_target.Extend('brd4164a', board=Efr32Board.BRD4164A).GlobBlacklist(
'only user requested'),
efr_target.Extend('brd4166a', board=Efr32Board.BRD4166A).GlobBlacklist(
'only user requested'),
efr_target.Extend('brd4170a', board=Efr32Board.BRD4170A).GlobBlacklist(
'only user requested'),
efr_target.Extend('brd4186a', board=Efr32Board.BRD4186A).GlobBlacklist(
'only user requested'),
efr_target.Extend('brd4187a', board=Efr32Board.BRD4187A).GlobBlacklist(
'only user requested'),
efr_target.Extend('brd4304a', board=Efr32Board.BRD4304A).GlobBlacklist(
'only user requested')
]
builder = VariantBuilder()
for board_target in board_targets:
builder.targets.append(board_target.Extend(
'window-covering', app=Efr32App.WINDOW_COVERING))
builder.targets.append(board_target.Extend(
'switch', app=Efr32App.SWITCH))
builder.targets.append(board_target.Extend(
'unit-test', app=Efr32App.UNIT_TEST))
builder.targets.append(
board_target.Extend('light', app=Efr32App.LIGHT))
builder.targets.append(board_target.Extend('lock', app=Efr32App.LOCK))
# Possible build variants. Note that number of potential
# builds is exponential here
builder.AppendVariant(name="rpc", validator=AcceptNameWithSubstrings(
['-light', '-lock']), enable_rpcs=True)
builder.AppendVariant(name="with-ota-requestor", enable_ota_requestor=True)
builder.WhitelistVariantNameForGlob('rpc')
for target in builder.AllVariants():
yield target
def NrfTargets():
target = Target('nrf', NrfConnectBuilder)
yield target.Extend('native-posix-64-tests', board=NrfBoard.NATIVE_POSIX_64, app=NrfApp.UNIT_TESTS)
targets = [
target.Extend('nrf5340dk', board=NrfBoard.NRF5340DK),
target.Extend('nrf52840dk', board=NrfBoard.NRF52840DK),
]
# Enable nrf52840dongle for all-clusters and lighting app only
yield target.Extend('nrf52840dongle-all-clusters', board=NrfBoard.NRF52840DONGLE, app=NrfApp.ALL_CLUSTERS)
yield target.Extend('nrf52840dongle-light', board=NrfBoard.NRF52840DONGLE, app=NrfApp.LIGHT)
for target in targets:
yield target.Extend('all-clusters', app=NrfApp.ALL_CLUSTERS)
yield target.Extend('lock', app=NrfApp.LOCK)
yield target.Extend('light', app=NrfApp.LIGHT)
yield target.Extend('shell', app=NrfApp.SHELL)
yield target.Extend('pump', app=NrfApp.PUMP)
yield target.Extend('pump-controller', app=NrfApp.PUMP_CONTROLLER)
rpc = target.Extend('light-rpc', app=NrfApp.LIGHT, enable_rpcs=True)
if '-nrf5340dk-' in rpc.name:
rpc = rpc.GlobBlacklist(
'Compile failure due to pw_build args not forwarded to proto compiler. '
'https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/66760')
yield rpc
def AndroidTargets():
target = Target('android', AndroidBuilder)
yield target.Extend('arm-chip-tool', board=AndroidBoard.ARM, app=AndroidApp.CHIP_TOOL)
yield target.Extend('arm64-chip-tool', board=AndroidBoard.ARM64, app=AndroidApp.CHIP_TOOL)
yield target.Extend('x64-chip-tool', board=AndroidBoard.X64, app=AndroidApp.CHIP_TOOL)
yield target.Extend('x86-chip-tool', board=AndroidBoard.X86, app=AndroidApp.CHIP_TOOL)
yield target.Extend('arm64-chip-test', board=AndroidBoard.ARM64, app=AndroidApp.CHIP_TEST)
yield target.Extend('androidstudio-arm-chip-tool', board=AndroidBoard.AndroidStudio_ARM, app=AndroidApp.CHIP_TOOL)
yield target.Extend('androidstudio-arm64-chip-tool', board=AndroidBoard.AndroidStudio_ARM64, app=AndroidApp.CHIP_TOOL)
yield target.Extend('androidstudio-x86-chip-tool', board=AndroidBoard.AndroidStudio_X86, app=AndroidApp.CHIP_TOOL)
yield target.Extend('androidstudio-x64-chip-tool', board=AndroidBoard.AndroidStudio_X64, app=AndroidApp.CHIP_TOOL)
yield target.Extend('arm64-chip-tvserver', board=AndroidBoard.ARM64, app=AndroidApp.CHIP_TVServer)
yield target.Extend('arm-chip-tvserver', board=AndroidBoard.ARM, app=AndroidApp.CHIP_TVServer)
yield target.Extend('x86-chip-tvserver', board=AndroidBoard.X86, app=AndroidApp.CHIP_TVServer)
yield target.Extend('x64-chip-tvserver', board=AndroidBoard.X64, app=AndroidApp.CHIP_TVServer)
yield target.Extend('arm64-chip-tv-casting-app', board=AndroidBoard.ARM64, app=AndroidApp.CHIP_TV_CASTING_APP)
yield target.Extend('arm-chip-tv-casting-app', board=AndroidBoard.ARM, app=AndroidApp.CHIP_TV_CASTING_APP)
def MbedTargets():
target = Target('mbed', MbedBuilder)
targets = [
target.Extend('CY8CPROTO_062_4343W',
board=MbedBoard.CY8CPROTO_062_4343W),
]
app_targets = []
for target in targets:
app_targets.append(target.Extend('lock', app=MbedApp.LOCK))
app_targets.append(target.Extend('light', app=MbedApp.LIGHT))
app_targets.append(target.Extend(
'all-clusters', app=MbedApp.ALL_CLUSTERS))
app_targets.append(target.Extend('pigweed', app=MbedApp.PIGWEED))
app_targets.append(target.Extend('shell', app=MbedApp.SHELL))
for target in app_targets:
yield target.Extend('release', profile=MbedProfile.RELEASE)
yield target.Extend('develop', profile=MbedProfile.DEVELOP).GlobBlacklist(
'Compile only for debugging purpose - '
'https://os.mbed.com/docs/mbed-os/latest/program-setup/build-profiles-and-rules.html')
yield target.Extend('debug', profile=MbedProfile.DEBUG).GlobBlacklist(
'Compile only for debugging purpose - '
'https://os.mbed.com/docs/mbed-os/latest/program-setup/build-profiles-and-rules.html')
def InfineonTargets():
target = Target('infineon', InfineonBuilder)
yield target.Extend('p6-lock', board=InfineonBoard.P6BOARD, app=InfineonApp.LOCK)
yield target.Extend('p6-all-clusters', board=InfineonBoard.P6BOARD, app=InfineonApp.ALL_CLUSTERS)
yield target.Extend('p6-light', board=InfineonBoard.P6BOARD, app=InfineonApp.LIGHT)
def AmebaTargets():
ameba_target = Target('ameba', AmebaBuilder)
yield ameba_target.Extend('amebad-all-clusters', board=AmebaBoard.AMEBAD, app=AmebaApp.ALL_CLUSTERS)
yield ameba_target.Extend('amebad-light', board=AmebaBoard.AMEBAD, app=AmebaApp.LIGHT)
yield ameba_target.Extend('amebad-pigweed', board=AmebaBoard.AMEBAD, app=AmebaApp.PIGWEED)
def K32WTargets():
target = Target('k32w', K32WBuilder)
yield target.Extend('light-ota-se', app=K32WApp.LIGHT, release=True, disable_ble=True, se05x=True).GlobBlacklist("Only on demand build")
yield target.Extend('light-release-no-ota', app=K32WApp.LIGHT, tokenizer=True, disable_ota=True, release=True)
yield target.Extend('shell-release', app=K32WApp.SHELL, release=True)
yield target.Extend('lock-release', app=K32WApp.LOCK, release=True)
yield target.Extend('lock-low-power-release', app=K32WApp.LOCK,
low_power=True, release=True).GlobBlacklist("Only on demand build")
def cc13x2x7_26x2x7Targets():
target = Target('cc13x2x7_26x2x7', cc13x2x7_26x2x7Builder)
yield target.Extend('lock-ftd', app=cc13x2x7_26x2x7App.LOCK, openthread_ftd=True)
yield target.Extend('lock-mtd', app=cc13x2x7_26x2x7App.LOCK, openthread_ftd=False)
yield target.Extend('pump', app=cc13x2x7_26x2x7App.PUMP)
yield target.Extend('pump-controller', app=cc13x2x7_26x2x7App.PUMP_CONTROLLER)
yield target.Extend('all-clusters', app=cc13x2x7_26x2x7App.ALL_CLUSTERS)
def Cyw30739Targets():
yield Target('cyw30739-cyw930739m2evb_01-light', Cyw30739Builder,
board=Cyw30739Board.CYW930739M2EVB_01, app=Cyw30739App.LIGHT)
yield Target('cyw30739-cyw930739m2evb_01-lock', Cyw30739Builder,
board=Cyw30739Board.CYW930739M2EVB_01, app=Cyw30739App.LOCK)
yield Target('cyw30739-cyw930739m2evb_01-ota-requestor', Cyw30739Builder,
board=Cyw30739Board.CYW930739M2EVB_01, app=Cyw30739App.OTA_REQUESTOR).GlobBlacklist(
"Running out of XIP flash space")
yield Target('cyw30739-cyw930739m2evb_01-ota-requestor-no-progress-logging', Cyw30739Builder,
board=Cyw30739Board.CYW930739M2EVB_01, app=Cyw30739App.OTA_REQUESTOR, progress_logging=False)
def QorvoTargets():
target = Target('qpg', QpgBuilder)
yield target.Extend('lock', board=QpgBoard.QPG6105, app=QpgApp.LOCK)
yield target.Extend('light', board=QpgBoard.QPG6105, app=QpgApp.LIGHT)
yield target.Extend('shell', board=QpgBoard.QPG6105, app=QpgApp.SHELL)
yield target.Extend('persistent-storage', board=QpgBoard.QPG6105, app=QpgApp.PERSISTENT_STORAGE)
def TizenTargets():
# Possible build variants.
# NOTE: The number of potential builds is exponential here.
builder = VariantBuilder()
builder.AppendVariant(name="no-ble", enable_ble=False)
builder.AppendVariant(name="no-wifi", enable_wifi=False)
builder.AppendVariant(name="asan", use_asan=True)
target = Target('tizen-arm', TizenBuilder, board=TizenBoard.ARM)
builder.targets.append(target.Extend('light', app=TizenApp.LIGHT))
for target in builder.AllVariants():
yield target
def Bl602Targets():
target = Target('bl602', Bl602Builder)
yield target.Extend('light', board=Bl602Board.BL602BOARD, app=Bl602App.LIGHT)
ALL = []
target_generators = [
HostTargets(),
Esp32Targets(),
Efr32Targets(),
NrfTargets(),
AndroidTargets(),
MbedTargets(),
InfineonTargets(),
AmebaTargets(),
K32WTargets(),
cc13x2x7_26x2x7Targets(),
Cyw30739Targets(),
QorvoTargets(),
TizenTargets(),
Bl602Targets(),
]
for generator in target_generators:
for target in generator:
ALL.append(target)
# Simple targets added one by one
ALL.append(Target('telink-tlsr9518adk80d-light', TelinkBuilder,
board=TelinkBoard.TLSR9518ADK80D, app=TelinkApp.LIGHT))
# have a consistent order overall
ALL.sort(key=lambda t: t.name)
|
#!/usr/bin/python
# Helper resources => {
# https://pillow.readthedocs.io/en/3.0.x/handbook/tutorial.html
# https://pythonexamples.org/python-pillow-get-image-size/
# https://realpython.com/working-with-files-in-python/#getting-file-attributes
# }
import os
import sys
import shutil
from PIL import Image
#print('Argument List:', str(sys.argv))
if len(sys.argv) < 3:
raise ValueError('Please provide the following args in this order [ prefixName, newFolderName, pathToSrcFolder] to create a new destination folder with new renamed files...')
#folderPath = r'C:\Users\mdaka\Downloads\icons\demo_icon'
prefixName = sys.argv[1]
newFolderName = sys.argv[2]
if len(sys.argv) > 3:
folderPath = sys.argv[3]
else:
folderPath = os.getcwd() # We can use os.getcwdb() or os.path.dirname(os.path.abspath(__file__))
def getAllowedImageTypes():
return ['.png', '.jpg', 'jpeg']
def createDirectoryIfNotExists(directoryPath):
if not os.path.exists(directoryPath):
os.makedirs(directoryPath)
return False
return True # Directory already exist
def getNewImageName(path, prefixName, extension):
im = Image.open(path)
newFileName = prefixName + '_' + str(im.width) + 'x' + str(im.height) + extension
return newFileName
def getFileName(path, prefixName, extension):
fileType = (extension[1:]).lower() # Remove the first dot from the string and then make all letters with a lowercase
if extension in ['.txt', '.pdf']:
newFileName = prefixName + extension
else:
newFileName = prefixName + '_' + fileType + extension
return newFileName
def copyFile(srcFile, destDirectory, newFileName):
dest = os.path.join(destDirectory, newFileName) # Instead of destDirectory + '\\' + newFilename
shutil.copy(srcFile, dest, follow_symlinks=False)
print(newFileName)
print("-------------------")
def main():
destFolderPath = os.path.join(folderPath, newFolderName)
# os.path.join(os.getcwd() , newFolderName) # This will get the path where the script file is running
isExist = createDirectoryIfNotExists(destFolderPath) # createDirectoryIfNotExists('C:\\Users\\mdaka\\Downloads\\icons\\2 - Copy\\test\\')
if isExist:
print("The directory has not been created because it already existed: ", destFolderPath)
print("***********************")
allowedImageTypes = getAllowedImageTypes()
for fileOrFolder in os.scandir(folderPath):
srcPath = os.path.join(folderPath, fileOrFolder)
if os.path.isfile(srcPath):
fileName = fileOrFolder.name
extension = (os.path.splitext(fileName)[1]).lower()
fileName = os.path.splitext(fileName)[0]
if extension in allowedImageTypes:
newFileName = getNewImageName(srcPath,prefixName,extension)
copyFile(srcPath, destFolderPath, newFileName)
elif extension in ['.psd', '.eps', '.svg']:
newFileName = getFileName(srcPath,prefixName,extension)
copyFile(srcPath, destFolderPath, newFileName)
elif extension in ['.txt', '.pdf']:
tempFileName = prefixName + '_' + fileName
newFileName = getFileName(srcPath,tempFileName ,extension)
copyFile(srcPath, destFolderPath, newFileName)
if __name__ == '__main__':
main()
|
from .Regex import Regex
from typing import Union
class Quantifier(Regex):
"""Quantifier class."""
def __init__(self, regex: Union[str, int, Regex] = "", n: int = 0, m: int = 0, without_maximum: bool = False):
super().__init__(regex)
self._set_regex(self.quantifier(n, m, without_maximum))
|
import numpy as np
import justice.simulate as sim
def test_make_gauss():
gauss_fcn = sim.make_gauss([1.0, ])
xs = sim.make_cadence([np.arange(0.0, 1.0, 0.1), ], [0.])
ys = gauss_fcn(xs)
expected = [1.,
0.99501248,
0.98019867,
0.95599748,
0.92311635,
0.8824969,
0.83527021,
0.78270454,
0.72614904,
0.66697681]
assert np.sum(np.abs(expected - ys[:, 0])) < 1e-6
def test_make_gauss_multiband():
gauss_fcn = sim.make_gauss([1.0, 2.0], locs=[0, 1], amps=[1, 2], consts=[2, 3])
xs = sim.make_cadence([np.arange(0.0, 1.0, 0.1), np.arange(.5, 2.5, 0.3)], [0., 0.])
ys = gauss_fcn(xs)
expected = np.array([np.array([3., 2.99501248, 2.98019867, 2.95599748, 2.92311635,
2.8824969, 2.83527021, 2.78270454, 2.72614904, 2.66697681]),
np.array([4.93846647, 4.99002496, 4.99750156, 4.96039735, 4.88117613,
4.76499381, 4.6191433])], dtype=object)
for color_idx in range(len(expected)):
np.testing.assert_allclose(ys[color_idx], expected[color_idx])
def test_make_dataset():
num_obj = 10
cls_models = [sim.make_gauss, sim.make_sine]
cls_params = [{'scales': [10.], 'locs': [100.], 'amps': [50.], 'consts': [1.]},
{'periods': [20.], 'phases': [0.], 'amps':[5.], 'consts': [5.]}]
cls_wts = None # even split for now
def_cadence = [np.arange(0., 200., 5.), ]
sim.make_dataset(num_obj, def_cadence, cls_models, cls_params, cls_wts=None)
return
|
import os
import tempfile
from pathlib import Path
from unittest import mock
from unittest import TestCase
import numpy as np
import pytest
import yaml
from bigbang.analysis.listserv import ListservArchive
from bigbang.analysis.listserv import ListservList
from config.config import CONFIG
dir_temp = tempfile.gettempdir()
file_temp_mbox = dir_temp + "/listserv.mbox"
file_auth = CONFIG.config_path + "authentication.yaml"
auth_key_mock = {"username": "bla", "password": "bla"}
@pytest.fixture(name="march", scope="module")
def get_mailingarchive():
march = ListservArchive.from_mbox(
name="3GPP",
directorypath=CONFIG.test_data_path + "3GPP_mbox/",
filedsc="3GPP_TSG_*",
)
return march
@pytest.fixture(name="mlist", scope="module")
def get_mailinglist():
mlist = ListservList.from_mbox(
name="3GPP_TSG_SA_WG4_EVS",
filepath=CONFIG.test_data_path + "3GPP_mbox/3GPP_TSG_SA_WG4_EVS.mbox",
)
return mlist
class TestListservList:
def test__to_percentage(self):
abso = np.array([1, 3])
perc = ListservList.to_percentage(abso)
np.testing.assert_array_equal(perc, np.array([0.25, 0.75]))
def test__get_name_localpart_domain(self):
addr = '"gabin frederic" <frederic.gabin@dolby.com>'
name, localpart, domain = ListservList.get_name_localpart_domain(addr)
assert name == "gabin frederic"
assert localpart == "frederic.gabin"
assert domain == "dolby.com"
def test__period_of_activity(self, mlist):
datetimes = mlist.period_of_activity()
years = [dt.year for dt in datetimes]
assert years == [2020, 2021]
def test__crop_by_year(self, mlist):
_mlist = mlist.crop_by_year(2020)
assert len(_mlist.df.index.values) == 25
datetimes = _mlist.period_of_activity()
years = [dt.year for dt in datetimes]
assert years == [2020, 2020]
def test__crop_by_address(self, mlist):
_mlist = mlist.crop_by_address(
header_field="from",
per_address_field={"domain": ["samsung.com"]},
)
assert len(_mlist.df.index.values) == 1
def test__crop_by_subject(self, mlist):
_mlist = mlist.crop_by_subject(match="EVS SWG Sessions", place=0)
assert len(_mlist.df.index.values) == 3
def test__get_domains(self, mlist):
domains = mlist.get_domains(
header_fields=["comments-to"], return_msg_counts=True
)
domains_comp = [
"ericsson.com",
"qti.qualcomm.com",
"list.etsi.org",
"usherbrooke.ca",
"philips.com",
]
for domain in domains["comments-to"]:
assert domain[0] in domains_comp
if domain[0] == "qti.qualcomm.com":
assert domain[1] == 8
domains = mlist.get_domains(
header_fields=["from"], return_msg_counts=False
)
domains_comp = [
"samsung.com",
"qti.qualcomm.com",
"philips.com",
"iis.fraunhofer.de",
"ericsson.com",
"usherbrooke.ca",
"3gpp.org",
"dolby.com",
"qosound.com",
]
assert set(domains["from"]) == set(domains_comp)
def test__get_domainscount(self, mlist):
domains = mlist.get_domainscount(
header_fields=["comments-to"],
per_year=True,
)
assert domains["comments-to"][2020] == 2
assert domains["comments-to"][2021] == 4
domains = mlist.get_domainscount(
header_fields=["from"],
per_year=False,
)
assert domains["from"] == 9
def test__get_localparts(self, mlist):
localparts = mlist.get_localparts(
header_fields=["comments-to"],
per_domain=True,
return_msg_counts=False,
)
assert localparts["comments-to"]["ericsson.com"] == ["tomas.toftgard"]
assert set(localparts["comments-to"]["qti.qualcomm.com"]) == set(
["nleung", "ivarga"]
)
localparts = mlist.get_localparts(
header_fields=["comments-to"],
per_domain=False,
return_msg_counts=True,
)
localparts = list(map(list, zip(*localparts["comments-to"])))
assert "3gpp_tsg_sa_wg4_video" in localparts[0]
assert "ivarga" in localparts[0]
assert "milan.jelinek" in localparts[0]
assert set(localparts[1]) == {1, 2, 3, 4, 6, 7}
def test__get_localpartscount(self, mlist):
localparts = mlist.get_localpartscount(
header_fields=["comments-to"],
per_domain=True,
per_year=False,
)
assert localparts["comments-to"]["list.etsi.org"] == 5
assert localparts["comments-to"]["usherbrooke.ca"] == 1
assert localparts["comments-to"]["qti.qualcomm.com"] == 2
localparts = mlist.get_localpartscount(
header_fields=["from"],
per_domain=False,
per_year=True,
)
assert localparts["from"][2020] == 6
assert localparts["from"][2021] == 9
def test__get_threadsroot(self, mlist):
subjects = mlist.get_threadsroot()
subjects_true = {
"Draft EVS-8a": 6,
"IVAS-1 v0.4.0 available in the Inbox": 7,
"Updated CRs to 26.442/443/444/452 in Inbox": 8,
"Draft IVAS-8a in Draft folder": 9,
"Revised IVAS-1 in Draft folder": 10,
"Draft LS reply to SG12 on P.SUPPL800 & draft IVAS call for labs": 11,
"Information related to EVS SWG Sessions during SA4#115e meeting": 13,
"Draft IVAS-8a (IVAS test plan skeleton with Appendix with example test designs)": 14,
"Information related to EVS SWG Sessions during SA4#114e meeting": 15,
"IVAS-1 is in the Inbox now": 16,
"Information for #113e EVS SWG participants": 19,
"IVAS-7a_v.0.2.0 available in S4-210315": 20,
"Final IVAS-1 is available in the Inbox": 21,
"Rev1 of S4-210133 (IVAS-1) is available in the draft folder": 22,
"FW: [11.5, S4-210129, Block A, 3 Feb 16:00 CET] Update to: Audio mixing of multiple streaming in ITT4RT": 23,
"Draft revised agenda and report template": 34,
"FW: [11.5; 1451; 18 Nov 1600 CET] Audio mixing of multiple streaming in ITT4RT - for agreement": 27,
"Information related to EVS SWG sessions": 28,
"3GPP SA4#110-e SQ SWG": 29,
"Update on the Tohru Hand raising Tool": 30,
"Wednesday meeting": 31,
"FW: Updated Draft Schedule of MTSI SWG Telco sessions at SA4#110-e": 33,
"3GPP SA4#110-e EVS SWG": 41,
"EVS SWG on 28th May: cancelled": 42,
"GTM Links A/B/C: SA4#109-e SWG Sessions": 44,
"subscribe": 45,
"SQA4 Breakout Sessions: =?utf-8?q?Today=E2=80=99s?= link for the online sessions": 46,
"Hosted: Agenda for SA4#108-e meeting": 47,
"test mail -": 49,
}
for sl in subjects.keys():
assert subjects[sl] == subjects_true[sl]
def test__get_threadsrootcount(self, mlist):
count = mlist.get_threadsrootcount()
assert count == 29 # as they are all replies
def test__get_messagescount(self, mlist):
msgcount = mlist.get_messagescount()
assert msgcount == 50
msgcount = mlist.get_messagescount(
header_fields=["comments-to"],
per_address_field="domain",
per_year=False,
)
assert msgcount["comments-to"]["list.etsi.org"] == 17
assert msgcount["comments-to"]["usherbrooke.ca"] == 3
assert msgcount["comments-to"]["qti.qualcomm.com"] == 8
msgcount = mlist.get_messagescount(
header_fields=["from"],
per_address_field="localpart",
per_year=True,
)
assert msgcount["from"][2020]["milan.jelinek"] == 1
assert msgcount["from"][2021]["milan.jelinek"] == 3
assert msgcount["from"][2021]["markus.multrus"] == 2
def test__get_messagescount_per_timezone(self, mlist):
msgcount = mlist.get_messagescount_per_timezone()
assert msgcount["+00:00"] == 38
assert msgcount["+08:00"] == 6
assert msgcount["-04:00"] == 3
assert msgcount["-05:00"] == 1
def test__get_sender_receiver_dict(self, mlist):
dic = mlist.get_sender_receiver_dict()
dic_true = {
"ericsson.com": {"usherbrooke.ca": 1, "qti.qualcomm.com": 1},
"usherbrooke.ca": {"ericsson.com": 1, "qti.qualcomm.com": 2},
"qti.qualcomm.com": {"usherbrooke.ca": 2},
"philips.com": {"qti.qualcomm.com": 1, "philips.com": 1},
"iis.fraunhofer.de": {"qti.qualcomm.com": 2},
"3gpp.org": {"list.etsi.org": 15, "qti.qualcomm.com": 1},
"samsung.com": {"list.etsi.org": 2},
"qosound.com": {"qti.qualcomm.com": 1},
"dolby.com": {},
"list.etsi.org": {},
}
for key1, value1 in dic.items():
for key2, value2 in value1.items():
assert dic_true[key1][key2] == value2
|
from WebScrapy import AlonhadatSpider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
import logging
from selenium.webdriver.remote.remote_connection import LOGGER
from urllib3.connectionpool import log
log.setLevel(logging.WARNING)
LOGGER.setLevel(logging.WARNING)
if __name__ == '__main__':
setting = get_project_settings()
process = CrawlerProcess(get_project_settings())
process.crawl(AlonhadatSpider)
process.start()
|
#!/usr/bin/python2.7
input_sequence = '1113222113'
for i in range(0,50):
same_char_count = 1
previous_character = input_sequence[0]
input_sequence = input_sequence[1:]
next_sequence = ''
for character in input_sequence:
if character == previous_character:
same_char_count += 1
else:
next_sequence += str(same_char_count) + previous_character
same_char_count = 1
previous_character = character
next_sequence += str(same_char_count) + previous_character
input_sequence = next_sequence
print len(input_sequence)
|
import logging
import os
from dxtbx.serialize import xds
from iotbx.xds import spot_xds
from scitbx import matrix
logger = logging.getLogger(__name__)
def dump(experiments, reflections, directory):
"""Dump the files in XDS format"""
if len(experiments) > 0:
for i, experiment in enumerate(experiments):
suffix = ""
if len(experiments) > 1:
suffix = "_%i" % (i + 1)
sub_dir = f"{directory}{suffix}"
if not os.path.isdir(sub_dir):
os.makedirs(sub_dir)
# XXX imageset is getting the experimental geometry from the image files
# rather than the input models.expt file
imageset = experiment.imageset
imageset.set_detector(experiment.detector)
imageset.set_beam(experiment.beam)
imageset.set_goniometer(experiment.goniometer)
imageset.set_scan(experiment.scan)
if experiment.crystal is None:
space_group_number = None
real_space_a = None
real_space_b = None
real_space_c = None
job_card = "XYCORR INIT COLSPOT IDXREF DEFPIX INTEGRATE CORRECT"
else:
crystal_model = experiment.crystal
crystal_model = crystal_model.change_basis(
crystal_model.get_space_group()
.info()
.change_of_basis_op_to_reference_setting()
)
space_group_number = crystal_model.get_space_group().type().number()
A = matrix.sqr(crystal_model.get_A())
A_inv = A.inverse()
real_space_a = A_inv.elems[:3]
real_space_b = A_inv.elems[3:6]
real_space_c = A_inv.elems[6:9]
job_card = ("XYCORR INIT DEFPIX INTEGRATE CORRECT",)
to_xds = xds.to_xds(imageset)
xds_inp = os.path.join(sub_dir, "XDS.INP")
xparm_xds = os.path.join(sub_dir, "XPARM.XDS")
logger.info("Exporting experiment to %s", xds_inp)
with open(xds_inp, "w") as f:
f.write(
to_xds.XDS_INP(
space_group_number=space_group_number,
real_space_a=real_space_a,
real_space_b=real_space_b,
real_space_c=real_space_c,
job_card=job_card,
)
)
if space_group_number:
logger.info("Exporting crystal model to %s", xparm_xds)
with open(xparm_xds, "w") as f:
f.write(
to_xds.xparm_xds(
real_space_a, real_space_b, real_space_c, space_group_number
)
)
if reflections is not None and len(reflections) > 0:
ref_cryst = reflections.select(reflections["id"] == i)
export_spot_xds(ref_cryst, os.path.join(sub_dir, "SPOT.XDS"))
else:
if not os.path.isdir(directory):
os.makedirs(directory)
export_spot_xds(reflections, os.path.join(directory, "SPOT.XDS"))
def export_spot_xds(reflections, filename):
if reflections is not None and len(reflections) > 0:
centroids = reflections["xyzobs.px.value"]
intensities = reflections["intensity.sum.value"]
miller_indices = None
if "miller_index" in reflections:
miller_indices = reflections["miller_index"]
selection = miller_indices != (0, 0, 0)
miller_indices = miller_indices.select(selection)
if len(miller_indices) == 0:
miller_indices = None
else:
centroids = centroids.select(selection)
intensities = intensities.select(selection)
xds_writer = spot_xds.writer(
centroids=centroids, intensities=intensities, miller_indices=miller_indices
)
logger.info("Exporting spot list as %s", filename)
xds_writer.write_file(filename=filename)
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.conf import settings
class Migration(SchemaMigration):
def forwards(self, orm):
db.execute("DROP VIEW IF EXISTS o_v_itineraire;")
# Changing field 'Trek.published'
db.alter_column('o_t_itineraire', 'public', self.gf('django.db.models.fields.BooleanField')(db_column='public'))
def backwards(self, orm):
db.execute("DROP VIEW IF EXISTS o_v_itineraire;")
# Changing field 'Trek.published'
db.alter_column('o_t_itineraire', 'public', self.gf('django.db.models.fields.NullBooleanField')(null=True, db_column='public'))
models = {
u'authent.structure': {
'Meta': {'ordering': "['name']", 'object_name': 'Structure'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'core.comfort': {
'Meta': {'ordering': "['comfort']", 'object_name': 'Comfort', 'db_table': "'l_b_confort'"},
'comfort': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'confort'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.datasource': {
'Meta': {'ordering': "['source']", 'object_name': 'Datasource', 'db_table': "'l_b_source'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.network': {
'Meta': {'ordering': "['network']", 'object_name': 'Network', 'db_table': "'l_b_reseau'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'reseau'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.path': {
'Meta': {'object_name': 'Path', 'db_table': "'l_t_troncon'"},
'arrival': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'db_column': "'arrivee'", 'blank': 'True'}),
'ascent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_positive'", 'blank': 'True'}),
'comfort': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paths'", 'null': 'True', 'db_column': "'confort'", 'to': u"orm['core.Comfort']"}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'db_column': "'remarques'", 'blank': 'True'}),
'datasource': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paths'", 'null': 'True', 'db_column': "'source'", 'to': u"orm['core.Datasource']"}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'departure': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'db_column': "'depart'", 'blank': 'True'}),
'descent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_negative'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.LineStringField', [], {'srid': '%s' % settings.SRID, 'spatial_index': 'False'}),
'geom_3d': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'dim': '3', 'spatial_index': 'False', 'null': 'True', 'srid': '%s' % settings.SRID}),
'geom_cadastre': ('django.contrib.gis.db.models.fields.LineStringField', [], {'srid': '%s' % settings.SRID, 'null': 'True', 'spatial_index': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'longueur'", 'blank': 'True'}),
'max_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_maximum'", 'blank': 'True'}),
'min_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_minimum'", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'db_column': "'nom'", 'blank': 'True'}),
'networks': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'paths'", 'to': u"orm['core.Network']", 'db_table': "'l_r_troncon_reseau'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'slope': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'pente'", 'blank': 'True'}),
'stake': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paths'", 'null': 'True', 'db_column': "'enjeu'", 'to': u"orm['core.Stake']"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'usages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'paths'", 'to': u"orm['core.Usage']", 'db_table': "'l_r_troncon_usage'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_column': "'valide'"})
},
u'core.pathaggregation': {
'Meta': {'ordering': "['id']", 'object_name': 'PathAggregation', 'db_table': "'e_r_evenement_troncon'"},
'end_position': ('django.db.models.fields.FloatField', [], {'db_column': "'pk_fin'", 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'ordre'", 'blank': 'True'}),
'path': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aggregations'", 'on_delete': 'models.DO_NOTHING', 'db_column': "'troncon'", 'to': u"orm['core.Path']"}),
'start_position': ('django.db.models.fields.FloatField', [], {'db_column': "'pk_debut'", 'db_index': 'True'}),
'topo_object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aggregations'", 'db_column': "'evenement'", 'to': u"orm['core.Topology']"})
},
u'core.stake': {
'Meta': {'ordering': "['id']", 'object_name': 'Stake', 'db_table': "'l_b_enjeu'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stake': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'enjeu'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.topology': {
'Meta': {'object_name': 'Topology', 'db_table': "'e_t_evenement'"},
'ascent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_positive'", 'blank': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'supprime'"}),
'descent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_negative'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'srid': '%s' % settings.SRID, 'null': 'True', 'spatial_index': 'False'}),
'geom_3d': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'dim': '3', 'spatial_index': 'False', 'null': 'True', 'srid': '%s' % settings.SRID}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'length': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'longueur'", 'blank': 'True'}),
'max_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_maximum'", 'blank': 'True'}),
'min_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_minimum'", 'blank': 'True'}),
'offset': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_column': "'decallage'"}),
'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Path']", 'through': u"orm['core.PathAggregation']", 'db_column': "'troncons'", 'symmetrical': 'False'}),
'slope': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'pente'", 'blank': 'True'})
},
u'core.usage': {
'Meta': {'ordering': "['usage']", 'object_name': 'Usage', 'db_table': "'l_b_usage'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'usage': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'usage'"})
},
u'trekking.difficultylevel': {
'Meta': {'ordering': "['id']", 'object_name': 'DifficultyLevel', 'db_table': "'o_b_difficulte'"},
'difficulty': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'difficulte'"}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'", 'blank': 'True'})
},
u'trekking.informationdesk': {
'Meta': {'ordering': "['name']", 'object_name': 'InformationDesk', 'db_table': "'o_b_renseignement'"},
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '256', 'null': 'True', 'db_column': "'email'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '%s' % settings.SRID, 'null': 'True', 'spatial_index': 'False', 'db_column': "'geom'", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipality': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_column': "'commune'", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_column': "'nom'"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'telephone'", 'blank': 'True'}),
'photo': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'photo'", 'blank': 'True'}),
'postal_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'code'", 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_column': "'rue'", 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '256', 'null': 'True', 'db_column': "'website'", 'blank': 'True'})
},
u'trekking.poi': {
'Meta': {'object_name': 'POI', 'db_table': "'o_t_poi'", '_ormbases': [u'core.Topology']},
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'topo_object': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Topology']", 'unique': 'True', 'primary_key': 'True', 'db_column': "'evenement'"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pois'", 'db_column': "'type'", 'to': u"orm['trekking.POIType']"})
},
u'trekking.poitype': {
'Meta': {'ordering': "['label']", 'object_name': 'POIType', 'db_table': "'o_b_poi'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'db_column': "'picto'"})
},
u'trekking.route': {
'Meta': {'ordering': "['route']", 'object_name': 'Route', 'db_table': "'o_b_parcours'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'route': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'parcours'"})
},
u'trekking.theme': {
'Meta': {'ordering': "['label']", 'object_name': 'Theme', 'db_table': "'o_b_theme'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'theme'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'db_column': "'picto'"})
},
u'trekking.trek': {
'Meta': {'ordering': "['name']", 'object_name': 'Trek', 'db_table': "'o_t_itineraire'", '_ormbases': [u'core.Topology']},
'access': ('django.db.models.fields.TextField', [], {'db_column': "'acces'", 'blank': 'True'}),
'advice': ('django.db.models.fields.TextField', [], {'db_column': "'recommandation'", 'blank': 'True'}),
'advised_parking': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'parking'", 'blank': 'True'}),
'ambiance': ('django.db.models.fields.TextField', [], {'db_column': "'ambiance'", 'blank': 'True'}),
'arrival': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'arrivee'", 'blank': 'True'}),
'departure': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'depart'", 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
'description_teaser': ('django.db.models.fields.TextField', [], {'db_column': "'chapeau'", 'blank': 'True'}),
'difficulty': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'treks'", 'null': 'True', 'db_column': "'difficulte'", 'to': u"orm['trekking.DifficultyLevel']"}),
'disabled_infrastructure': ('django.db.models.fields.TextField', [], {'db_column': "'handicap'", 'blank': 'True'}),
'duration': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_column': "'duree'", 'blank': 'True'}),
'information_desks': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['trekking.InformationDesk']", 'null': 'True', 'db_table': "'o_r_itineraire_renseignement'", 'blank': 'True'}),
'is_park_centered': ('django.db.models.fields.BooleanField', [], {'db_column': "'coeur'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'networks': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'treks'", 'to': u"orm['trekking.TrekNetwork']", 'db_table': "'o_r_itineraire_reseau'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'parking_location': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '%s' % settings.SRID, 'null': 'True', 'spatial_index': 'False', 'db_column': "'geom_parking'", 'blank': 'True'}),
'public_transport': ('django.db.models.fields.TextField', [], {'db_column': "'transport'", 'blank': 'True'}),
'publication_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'db_column': "'date_publication'", 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'public'"}),
'related_treks': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_treks+'", 'symmetrical': 'False', 'through': u"orm['trekking.TrekRelationship']", 'to': u"orm['trekking.Trek']"}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'treks'", 'null': 'True', 'db_column': "'parcours'", 'to': u"orm['trekking.Route']"}),
'themes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'treks'", 'to': u"orm['trekking.Theme']", 'db_table': "'o_r_itineraire_theme'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'topo_object': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Topology']", 'unique': 'True', 'primary_key': 'True', 'db_column': "'evenement'"}),
'usages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'treks'", 'to': u"orm['trekking.Usage']", 'db_table': "'o_r_itineraire_usage'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'web_links': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'treks'", 'to': u"orm['trekking.WebLink']", 'db_table': "'o_r_itineraire_web'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'})
},
u'trekking.treknetwork': {
'Meta': {'ordering': "['network']", 'object_name': 'TrekNetwork', 'db_table': "'o_b_reseau'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'reseau'"})
},
u'trekking.trekrelationship': {
'Meta': {'unique_together': "(('trek_a', 'trek_b'),)", 'object_name': 'TrekRelationship', 'db_table': "'o_r_itineraire_itineraire'"},
'has_common_departure': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'depart_commun'"}),
'has_common_edge': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'troncons_communs'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_circuit_step': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'etape_circuit'"}),
'trek_a': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trek_relationship_a'", 'db_column': "'itineraire_a'", 'to': u"orm['trekking.Trek']"}),
'trek_b': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trek_relationship_b'", 'db_column': "'itineraire_b'", 'to': u"orm['trekking.Trek']"})
},
u'trekking.usage': {
'Meta': {'ordering': "['usage']", 'object_name': 'Usage', 'db_table': "'o_b_usage'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'db_column': "'picto'"}),
'usage': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'usage'"})
},
u'trekking.weblink': {
'Meta': {'ordering': "['name']", 'object_name': 'WebLink', 'db_table': "'o_t_web'"},
'category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'links'", 'null': 'True', 'db_column': "'categorie'", 'to': u"orm['trekking.WebLinkCategory']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '128', 'db_column': "'url'"})
},
u'trekking.weblinkcategory': {
'Meta': {'ordering': "['label']", 'object_name': 'WebLinkCategory', 'db_table': "'o_b_web_category'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'db_column': "'picto'"})
}
}
complete_apps = ['trekking']
|
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
APP = ['CiscoBuild.py']
DATA_FILES = []
OPTIONS = {'argv_emulation': True,
'iconfile': 'DDIcon.icns'}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
name="CiscoBuild",
version="0.1",
author="Shane Carnahan",
author_email="Shane.carnahan1@gmail.com",
description="Copyright 2018",
long_description="Program to create configuration files from templates.",
platforms='MAC and Windows',
)
|
# Copyright 2013 IBM Corp
#
# Author: Tong Li <litong01@us.ibm.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import json
import time
from monasca.openstack.common import log
LOG = log.getLogger(__name__)
class MetricsFixer(object):
def __init__(self):
LOG.debug('initializing MetricsFixer!')
super(MetricsFixer, self).__init__()
@staticmethod
def _add_hash(message):
# If there is no timestamp, we need to fix that up
if not message.get('timestamp'):
message['timestamp'] = time.time()
# fixup the dimensions_hash
if not message.get('dimensions_hash') and message.get('dimensions'):
key_str = json.dumps(message['dimensions'],
sort_keys=True, indent=None,
separators=(',', ':'))
message['dimensions_hash'] = hashlib.md5(key_str).hexdigest()
return json.dumps(message, sort_keys=False, indent=None,
separators=(',', ':'))
def process_msg(self, msg):
try:
data = json.loads(msg)
if not isinstance(data, list):
data = [data]
result = ''
for item in data:
result += '{"index":{}}\n' + MetricsFixer._add_hash(item)
result += '\n'
return result
except Exception:
LOG.exception('')
return ''
|
# Copyright (c) 2009, 2010, 2011, 2012, 2013, 2016 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import uuid
import ovs.db.data as data
import ovs.db.parser
import ovs.db.schema
import ovs.jsonrpc
import ovs.ovsuuid
import ovs.poller
import ovs.vlog
from ovs.db import custom_index
from ovs.db import error
import six
vlog = ovs.vlog.Vlog("idl")
__pychecker__ = 'no-classattr no-objattrs'
ROW_CREATE = "create"
ROW_UPDATE = "update"
ROW_DELETE = "delete"
OVSDB_UPDATE = 0
OVSDB_UPDATE2 = 1
class Idl(object):
"""Open vSwitch Database Interface Definition Language (OVSDB IDL).
The OVSDB IDL maintains an in-memory replica of a database. It issues RPC
requests to an OVSDB database server and parses the responses, converting
raw JSON into data structures that are easier for clients to digest.
The IDL also assists with issuing database transactions. The client
creates a transaction, manipulates the IDL data structures, and commits or
aborts the transaction. The IDL then composes and issues the necessary
JSON-RPC requests and reports to the client whether the transaction
completed successfully.
The client is allowed to access the following attributes directly, in a
read-only fashion:
- 'tables': This is the 'tables' map in the ovs.db.schema.DbSchema provided
to the Idl constructor. Each ovs.db.schema.TableSchema in the map is
annotated with a new attribute 'rows', which is a dict from a uuid.UUID
to a Row object.
The client may directly read and write the Row objects referenced by the
'rows' map values. Refer to Row for more details.
- 'change_seqno': A number that represents the IDL's state. When the IDL
is updated (by Idl.run()), its value changes. The sequence number can
occasionally change even if the database does not. This happens if the
connection to the database drops and reconnects, which causes the
database contents to be reloaded even if they didn't change. (It could
also happen if the database server sends out a "change" that reflects
what the IDL already thought was in the database. The database server is
not supposed to do that, but bugs could in theory cause it to do so.)
- 'lock_name': The name of the lock configured with Idl.set_lock(), or None
if no lock is configured.
- 'has_lock': True, if the IDL is configured to obtain a lock and owns that
lock, and False otherwise.
Locking and unlocking happens asynchronously from the database client's
point of view, so the information is only useful for optimization
(e.g. if the client doesn't have the lock then there's no point in trying
to write to the database).
- 'is_lock_contended': True, if the IDL is configured to obtain a lock but
the database server has indicated that some other client already owns the
requested lock, and False otherwise.
- 'txn': The ovs.db.idl.Transaction object for the database transaction
currently being constructed, if there is one, or None otherwise.
"""
IDL_S_INITIAL = 0
IDL_S_MONITOR_REQUESTED = 1
IDL_S_MONITOR_COND_REQUESTED = 2
def __init__(self, remote, schema_helper, probe_interval=None):
"""Creates and returns a connection to the database named 'db_name' on
'remote', which should be in a form acceptable to
ovs.jsonrpc.session.open(). The connection will maintain an in-memory
replica of the remote database.
'remote' can be comma separated multiple remotes and each remote
should be in a form acceptable to ovs.jsonrpc.session.open().
'schema_helper' should be an instance of the SchemaHelper class which
generates schema for the remote database. The caller may have cut it
down by removing tables or columns that are not of interest. The IDL
will only replicate the tables and columns that remain. The caller may
also add an attribute named 'alert' to selected remaining columns,
setting its value to False; if so, then changes to those columns will
not be considered changes to the database for the purpose of the return
value of Idl.run() and Idl.change_seqno. This is useful for columns
that the IDL's client will write but not read.
As a convenience to users, 'schema' may also be an instance of the
SchemaHelper class.
The IDL uses and modifies 'schema' directly.
If "probe_interval" is zero it disables the connection keepalive
feature. If non-zero the value will be forced to at least 1000
milliseconds. If None it will just use the default value in OVS.
"""
assert isinstance(schema_helper, SchemaHelper)
schema = schema_helper.get_idl_schema()
self.tables = schema.tables
self.readonly = schema.readonly
self._db = schema
remotes = self._parse_remotes(remote)
self._session = ovs.jsonrpc.Session.open_multiple(remotes,
probe_interval=probe_interval)
self._monitor_request_id = None
self._last_seqno = None
self.change_seqno = 0
self.uuid = uuid.uuid1()
self.state = self.IDL_S_INITIAL
# Database locking.
self.lock_name = None # Name of lock we need, None if none.
self.has_lock = False # Has db server said we have the lock?
self.is_lock_contended = False # Has db server said we can't get lock?
self._lock_request_id = None # JSON-RPC ID of in-flight lock request.
# Transaction support.
self.txn = None
self._outstanding_txns = {}
for table in six.itervalues(schema.tables):
for column in six.itervalues(table.columns):
if not hasattr(column, 'alert'):
column.alert = True
table.need_table = False
table.rows = custom_index.IndexedRows(table)
table.idl = self
table.condition = [True]
table.cond_changed = False
def _parse_remotes(self, remote):
# If remote is -
# "tcp:10.0.0.1:6641,unix:/tmp/db.sock,t,s,tcp:10.0.0.2:6642"
# this function returns
# ["tcp:10.0.0.1:6641", "unix:/tmp/db.sock,t,s", tcp:10.0.0.2:6642"]
remotes = []
for r in remote.split(','):
if remotes and r.find(":") == -1:
remotes[-1] += "," + r
else:
remotes.append(r)
return remotes
def index_create(self, table, name):
"""Create a named multi-column index on a table"""
return self.tables[table].rows.index_create(name)
def index_irange(self, table, name, start, end):
"""Return items in a named index between start/end inclusive"""
return self.tables[table].rows.indexes[name].irange(start, end)
def index_equal(self, table, name, value):
"""Return items in a named index matching a value"""
return self.tables[table].rows.indexes[name].irange(value, value)
def close(self):
"""Closes the connection to the database. The IDL will no longer
update."""
self._session.close()
def run(self):
"""Processes a batch of messages from the database server. Returns
True if the database as seen through the IDL changed, False if it did
not change. The initial fetch of the entire contents of the remote
database is considered to be one kind of change. If the IDL has been
configured to acquire a database lock (with Idl.set_lock()), then
successfully acquiring the lock is also considered to be a change.
This function can return occasional false positives, that is, report
that the database changed even though it didn't. This happens if the
connection to the database drops and reconnects, which causes the
database contents to be reloaded even if they didn't change. (It could
also happen if the database server sends out a "change" that reflects
what we already thought was in the database, but the database server is
not supposed to do that.)
As an alternative to checking the return value, the client may check
for changes in self.change_seqno."""
assert not self.txn
initial_change_seqno = self.change_seqno
self.send_cond_change()
self._session.run()
i = 0
while i < 50:
i += 1
if not self._session.is_connected():
break
seqno = self._session.get_seqno()
if seqno != self._last_seqno:
self._last_seqno = seqno
self.__txn_abort_all()
self.__send_monitor_request()
if self.lock_name:
self.__send_lock_request()
break
msg = self._session.recv()
if msg is None:
break
if (msg.type == ovs.jsonrpc.Message.T_NOTIFY
and msg.method == "update2"
and len(msg.params) == 2):
# Database contents changed.
self.__parse_update(msg.params[1], OVSDB_UPDATE2)
elif (msg.type == ovs.jsonrpc.Message.T_NOTIFY
and msg.method == "update"
and len(msg.params) == 2):
# Database contents changed.
self.__parse_update(msg.params[1], OVSDB_UPDATE)
elif (msg.type == ovs.jsonrpc.Message.T_REPLY
and self._monitor_request_id is not None
and self._monitor_request_id == msg.id):
# Reply to our "monitor" request.
try:
self.change_seqno += 1
self._monitor_request_id = None
self.__clear()
if self.state == self.IDL_S_MONITOR_COND_REQUESTED:
self.__parse_update(msg.result, OVSDB_UPDATE2)
else:
assert self.state == self.IDL_S_MONITOR_REQUESTED
self.__parse_update(msg.result, OVSDB_UPDATE)
except error.Error as e:
vlog.err("%s: parse error in received schema: %s"
% (self._session.get_name(), e))
self.__error()
elif (msg.type == ovs.jsonrpc.Message.T_REPLY
and self._lock_request_id is not None
and self._lock_request_id == msg.id):
# Reply to our "lock" request.
self.__parse_lock_reply(msg.result)
elif (msg.type == ovs.jsonrpc.Message.T_NOTIFY
and msg.method == "locked"):
# We got our lock.
self.__parse_lock_notify(msg.params, True)
elif (msg.type == ovs.jsonrpc.Message.T_NOTIFY
and msg.method == "stolen"):
# Someone else stole our lock.
self.__parse_lock_notify(msg.params, False)
elif msg.type == ovs.jsonrpc.Message.T_NOTIFY and msg.id == "echo":
# Reply to our echo request. Ignore it.
pass
elif (msg.type == ovs.jsonrpc.Message.T_ERROR and
self.state == self.IDL_S_MONITOR_COND_REQUESTED and
self._monitor_request_id == msg.id):
if msg.error == "unknown method":
self.__send_monitor_request()
elif (msg.type in (ovs.jsonrpc.Message.T_ERROR,
ovs.jsonrpc.Message.T_REPLY)
and self.__txn_process_reply(msg)):
# __txn_process_reply() did everything needed.
pass
else:
# This can happen if a transaction is destroyed before we
# receive the reply, so keep the log level low.
vlog.dbg("%s: received unexpected %s message"
% (self._session.get_name(),
ovs.jsonrpc.Message.type_to_string(msg.type)))
return initial_change_seqno != self.change_seqno
def send_cond_change(self):
if not self._session.is_connected():
return
for table in six.itervalues(self.tables):
if table.cond_changed:
self.__send_cond_change(table, table.condition)
table.cond_changed = False
def cond_change(self, table_name, cond):
"""Sets the condition for 'table_name' to 'cond', which should be a
conditional expression suitable for use directly in the OVSDB
protocol, with the exception that the empty condition []
matches no rows (instead of matching every row). That is, []
is equivalent to [False], not to [True].
"""
table = self.tables.get(table_name)
if not table:
raise error.Error('Unknown table "%s"' % table_name)
if cond == []:
cond = [False]
if table.condition != cond:
table.condition = cond
table.cond_changed = True
def wait(self, poller):
"""Arranges for poller.block() to wake up when self.run() has something
to do or when activity occurs on a transaction on 'self'."""
self._session.wait(poller)
self._session.recv_wait(poller)
def has_ever_connected(self):
"""Returns True, if the IDL successfully connected to the remote
database and retrieved its contents (even if the connection
subsequently dropped and is in the process of reconnecting). If so,
then the IDL contains an atomic snapshot of the database's contents
(but it might be arbitrarily old if the connection dropped).
Returns False if the IDL has never connected or retrieved the
database's contents. If so, the IDL is empty."""
return self.change_seqno != 0
def force_reconnect(self):
"""Forces the IDL to drop its connection to the database and reconnect.
In the meantime, the contents of the IDL will not change."""
self._session.force_reconnect()
def set_lock(self, lock_name):
"""If 'lock_name' is not None, configures the IDL to obtain the named
lock from the database server and to avoid modifying the database when
the lock cannot be acquired (that is, when another client has the same
lock).
If 'lock_name' is None, drops the locking requirement and releases the
lock."""
assert not self.txn
assert not self._outstanding_txns
if self.lock_name and (not lock_name or lock_name != self.lock_name):
# Release previous lock.
self.__send_unlock_request()
self.lock_name = None
self.is_lock_contended = False
if lock_name and not self.lock_name:
# Acquire new lock.
self.lock_name = lock_name
self.__send_lock_request()
def notify(self, event, row, updates=None):
"""Hook for implementing create/update/delete notifications
:param event: The event that was triggered
:type event: ROW_CREATE, ROW_UPDATE, or ROW_DELETE
:param row: The row as it is after the operation has occured
:type row: Row
:param updates: For updates, row with only old values of the changed
columns
:type updates: Row
"""
def __send_cond_change(self, table, cond):
monitor_cond_change = {table.name: [{"where": cond}]}
old_uuid = str(self.uuid)
self.uuid = uuid.uuid1()
params = [old_uuid, str(self.uuid), monitor_cond_change]
msg = ovs.jsonrpc.Message.create_request("monitor_cond_change", params)
self._session.send(msg)
def __clear(self):
changed = False
for table in six.itervalues(self.tables):
if table.rows:
changed = True
table.rows = custom_index.IndexedRows(table)
if changed:
self.change_seqno += 1
def __update_has_lock(self, new_has_lock):
if new_has_lock and not self.has_lock:
if self._monitor_request_id is None:
self.change_seqno += 1
else:
# We're waiting for a monitor reply, so don't signal that the
# database changed. The monitor reply will increment
# change_seqno anyhow.
pass
self.is_lock_contended = False
self.has_lock = new_has_lock
def __do_send_lock_request(self, method):
self.__update_has_lock(False)
self._lock_request_id = None
if self._session.is_connected():
msg = ovs.jsonrpc.Message.create_request(method, [self.lock_name])
msg_id = msg.id
self._session.send(msg)
else:
msg_id = None
return msg_id
def __send_lock_request(self):
self._lock_request_id = self.__do_send_lock_request("lock")
def __send_unlock_request(self):
self.__do_send_lock_request("unlock")
def __parse_lock_reply(self, result):
self._lock_request_id = None
got_lock = isinstance(result, dict) and result.get("locked") is True
self.__update_has_lock(got_lock)
if not got_lock:
self.is_lock_contended = True
def __parse_lock_notify(self, params, new_has_lock):
if (self.lock_name is not None
and isinstance(params, (list, tuple))
and params
and params[0] == self.lock_name):
self.__update_has_lock(new_has_lock)
if not new_has_lock:
self.is_lock_contended = True
def __send_monitor_request(self):
if self.state == self.IDL_S_INITIAL:
self.state = self.IDL_S_MONITOR_COND_REQUESTED
method = "monitor_cond"
else:
self.state = self.IDL_S_MONITOR_REQUESTED
method = "monitor"
monitor_requests = {}
for table in six.itervalues(self.tables):
columns = []
for column in six.iterkeys(table.columns):
if ((table.name not in self.readonly) or
(table.name in self.readonly) and
(column not in self.readonly[table.name])):
columns.append(column)
monitor_request = {"columns": columns}
if method == "monitor_cond" and table.condition != [True]:
monitor_request["where"] = table.condition
table.cond_change = False
monitor_requests[table.name] = [monitor_request]
msg = ovs.jsonrpc.Message.create_request(
method, [self._db.name, str(self.uuid), monitor_requests])
self._monitor_request_id = msg.id
self._session.send(msg)
def __parse_update(self, update, version):
try:
self.__do_parse_update(update, version)
except error.Error as e:
vlog.err("%s: error parsing update: %s"
% (self._session.get_name(), e))
def __do_parse_update(self, table_updates, version):
if not isinstance(table_updates, dict):
raise error.Error("<table-updates> is not an object",
table_updates)
for table_name, table_update in six.iteritems(table_updates):
table = self.tables.get(table_name)
if not table:
raise error.Error('<table-updates> includes unknown '
'table "%s"' % table_name)
if not isinstance(table_update, dict):
raise error.Error('<table-update> for table "%s" is not '
'an object' % table_name, table_update)
for uuid_string, row_update in six.iteritems(table_update):
if not ovs.ovsuuid.is_valid_string(uuid_string):
raise error.Error('<table-update> for table "%s" '
'contains bad UUID "%s" as member '
'name' % (table_name, uuid_string),
table_update)
uuid = ovs.ovsuuid.from_string(uuid_string)
if not isinstance(row_update, dict):
raise error.Error('<table-update> for table "%s" '
'contains <row-update> for %s that '
'is not an object'
% (table_name, uuid_string))
if version == OVSDB_UPDATE2:
if self.__process_update2(table, uuid, row_update):
self.change_seqno += 1
continue
parser = ovs.db.parser.Parser(row_update, "row-update")
old = parser.get_optional("old", [dict])
new = parser.get_optional("new", [dict])
parser.finish()
if not old and not new:
raise error.Error('<row-update> missing "old" and '
'"new" members', row_update)
if self.__process_update(table, uuid, old, new):
self.change_seqno += 1
def __process_update2(self, table, uuid, row_update):
row = table.rows.get(uuid)
changed = False
if "delete" in row_update:
if row:
del table.rows[uuid]
self.notify(ROW_DELETE, row)
changed = True
else:
# XXX rate-limit
vlog.warn("cannot delete missing row %s from table"
"%s" % (uuid, table.name))
elif "insert" in row_update or "initial" in row_update:
if row:
vlog.warn("cannot add existing row %s from table"
" %s" % (uuid, table.name))
del table.rows[uuid]
row = self.__create_row(table, uuid)
if "insert" in row_update:
row_update = row_update['insert']
else:
row_update = row_update['initial']
self.__add_default(table, row_update)
changed = self.__row_update(table, row, row_update)
table.rows[uuid] = row
if changed:
self.notify(ROW_CREATE, row)
elif "modify" in row_update:
if not row:
raise error.Error('Modify non-existing row')
old_row = self.__apply_diff(table, row, row_update['modify'])
self.notify(ROW_UPDATE, row, Row(self, table, uuid, old_row))
changed = True
else:
raise error.Error('<row-update> unknown operation',
row_update)
return changed
def __process_update(self, table, uuid, old, new):
"""Returns True if a column changed, False otherwise."""
row = table.rows.get(uuid)
changed = False
if not new:
# Delete row.
if row:
del table.rows[uuid]
changed = True
self.notify(ROW_DELETE, row)
else:
# XXX rate-limit
vlog.warn("cannot delete missing row %s from table %s"
% (uuid, table.name))
elif not old:
# Insert row.
op = ROW_CREATE
if not row:
row = self.__create_row(table, uuid)
changed = True
else:
# XXX rate-limit
op = ROW_UPDATE
vlog.warn("cannot add existing row %s to table %s"
% (uuid, table.name))
changed |= self.__row_update(table, row, new)
if op == ROW_CREATE:
table.rows[uuid] = row
if changed:
self.notify(ROW_CREATE, row)
else:
op = ROW_UPDATE
if not row:
row = self.__create_row(table, uuid)
changed = True
op = ROW_CREATE
# XXX rate-limit
vlog.warn("cannot modify missing row %s in table %s"
% (uuid, table.name))
changed |= self.__row_update(table, row, new)
if op == ROW_CREATE:
table.rows[uuid] = row
if changed:
self.notify(op, row, Row.from_json(self, table, uuid, old))
return changed
def __column_name(self, column):
if column.type.key.type == ovs.db.types.UuidType:
return ovs.ovsuuid.to_json(column.type.key.type.default)
else:
return column.type.key.type.default
def __add_default(self, table, row_update):
for column in six.itervalues(table.columns):
if column.name not in row_update:
if ((table.name not in self.readonly) or
(table.name in self.readonly) and
(column.name not in self.readonly[table.name])):
if column.type.n_min != 0 and not column.type.is_map():
row_update[column.name] = self.__column_name(column)
def __apply_diff(self, table, row, row_diff):
old_row = {}
for column_name, datum_diff_json in six.iteritems(row_diff):
column = table.columns.get(column_name)
if not column:
# XXX rate-limit
vlog.warn("unknown column %s updating table %s"
% (column_name, table.name))
continue
try:
datum_diff = data.Datum.from_json(column.type, datum_diff_json)
except error.Error as e:
# XXX rate-limit
vlog.warn("error parsing column %s in table %s: %s"
% (column_name, table.name, e))
continue
old_row[column_name] = row._data[column_name].copy()
datum = row._data[column_name].diff(datum_diff)
if datum != row._data[column_name]:
row._data[column_name] = datum
return old_row
def __row_update(self, table, row, row_json):
changed = False
for column_name, datum_json in six.iteritems(row_json):
column = table.columns.get(column_name)
if not column:
# XXX rate-limit
vlog.warn("unknown column %s updating table %s"
% (column_name, table.name))
continue
try:
datum = data.Datum.from_json(column.type, datum_json)
except error.Error as e:
# XXX rate-limit
vlog.warn("error parsing column %s in table %s: %s"
% (column_name, table.name, e))
continue
if datum != row._data[column_name]:
row._data[column_name] = datum
if column.alert:
changed = True
else:
# Didn't really change but the OVSDB monitor protocol always
# includes every value in a row.
pass
return changed
def __create_row(self, table, uuid):
data = {}
for column in six.itervalues(table.columns):
data[column.name] = ovs.db.data.Datum.default(column.type)
return Row(self, table, uuid, data)
def __error(self):
self._session.force_reconnect()
def __txn_abort_all(self):
while self._outstanding_txns:
txn = self._outstanding_txns.popitem()[1]
txn._status = Transaction.TRY_AGAIN
def __txn_process_reply(self, msg):
txn = self._outstanding_txns.pop(msg.id, None)
if txn:
txn._process_reply(msg)
return True
def _uuid_to_row(atom, base):
if base.ref_table:
return base.ref_table.rows.get(atom)
else:
return atom
def _row_to_uuid(value):
if isinstance(value, Row):
return value.uuid
else:
return value
@functools.total_ordering
class Row(object):
"""A row within an IDL.
The client may access the following attributes directly:
- 'uuid': a uuid.UUID object whose value is the row's database UUID.
- An attribute for each column in the Row's table, named for the column,
whose values are as returned by Datum.to_python() for the column's type.
If some error occurs (e.g. the database server's idea of the column is
different from the IDL's idea), then the attribute values is the
"default" value return by Datum.default() for the column's type. (It is
important to know this because the default value may violate constraints
for the column's type, e.g. the default integer value is 0 even if column
contraints require the column's value to be positive.)
When a transaction is active, column attributes may also be assigned new
values. Committing the transaction will then cause the new value to be
stored into the database.
*NOTE*: In the current implementation, the value of a column is a *copy*
of the value in the database. This means that modifying its value
directly will have no useful effect. For example, the following:
row.mycolumn["a"] = "b" # don't do this
will not change anything in the database, even after commit. To modify
the column, instead assign the modified column value back to the column:
d = row.mycolumn
d["a"] = "b"
row.mycolumn = d
"""
def __init__(self, idl, table, uuid, data):
# All of the explicit references to self.__dict__ below are required
# to set real attributes with invoking self.__getattr__().
self.__dict__["uuid"] = uuid
self.__dict__["_idl"] = idl
self.__dict__["_table"] = table
# _data is the committed data. It takes the following values:
#
# - A dictionary that maps every column name to a Datum, if the row
# exists in the committed form of the database.
#
# - None, if this row is newly inserted within the active transaction
# and thus has no committed form.
self.__dict__["_data"] = data
# _changes describes changes to this row within the active transaction.
# It takes the following values:
#
# - {}, the empty dictionary, if no transaction is active or if the
# row has yet not been changed within this transaction.
#
# - A dictionary that maps a column name to its new Datum, if an
# active transaction changes those columns' values.
#
# - A dictionary that maps every column name to a Datum, if the row
# is newly inserted within the active transaction.
#
# - None, if this transaction deletes this row.
self.__dict__["_changes"] = {}
# _mutations describes changes to this row to be handled via a
# mutate operation on the wire. It takes the following values:
#
# - {}, the empty dictionary, if no transaction is active or if the
# row has yet not been mutated within this transaction.
#
# - A dictionary that contains two keys:
#
# - "_inserts" contains a dictionary that maps column names to
# new keys/key-value pairs that should be inserted into the
# column
# - "_removes" contains a dictionary that maps column names to
# the keys/key-value pairs that should be removed from the
# column
#
# - None, if this transaction deletes this row.
self.__dict__["_mutations"] = {}
# A dictionary whose keys are the names of columns that must be
# verified as prerequisites when the transaction commits. The values
# in the dictionary are all None.
self.__dict__["_prereqs"] = {}
def __lt__(self, other):
if not isinstance(other, Row):
return NotImplemented
return bool(self.__dict__['uuid'] < other.__dict__['uuid'])
def __eq__(self, other):
if not isinstance(other, Row):
return NotImplemented
return bool(self.__dict__['uuid'] == other.__dict__['uuid'])
def __hash__(self):
return int(self.__dict__['uuid'])
def __getattr__(self, column_name):
assert self._changes is not None
assert self._mutations is not None
try:
column = self._table.columns[column_name]
except KeyError:
raise AttributeError("%s instance has no attribute '%s'" %
(self.__class__.__name__, column_name))
datum = self._changes.get(column_name)
inserts = None
if '_inserts' in self._mutations.keys():
inserts = self._mutations['_inserts'].get(column_name)
removes = None
if '_removes' in self._mutations.keys():
removes = self._mutations['_removes'].get(column_name)
if datum is None:
if self._data is None:
if inserts is None:
raise AttributeError("%s instance has no attribute '%s'" %
(self.__class__.__name__,
column_name))
else:
datum = data.Datum.from_python(column.type,
inserts,
_row_to_uuid)
elif column_name in self._data:
datum = self._data[column_name]
if column.type.is_set():
dlist = datum.as_list()
if inserts is not None:
dlist.extend(list(inserts))
if removes is not None:
removes_datum = data.Datum.from_python(column.type,
removes,
_row_to_uuid)
removes_list = removes_datum.as_list()
dlist = [x for x in dlist if x not in removes_list]
datum = data.Datum.from_python(column.type, dlist,
_row_to_uuid)
elif column.type.is_map():
dmap = datum.to_python(_uuid_to_row)
if inserts is not None:
dmap.update(inserts)
if removes is not None:
for key in removes:
if key not in (inserts or {}):
del dmap[key]
datum = data.Datum.from_python(column.type, dmap,
_row_to_uuid)
else:
if inserts is None:
raise AttributeError("%s instance has no attribute '%s'" %
(self.__class__.__name__,
column_name))
else:
datum = inserts
return datum.to_python(_uuid_to_row)
def __setattr__(self, column_name, value):
assert self._changes is not None
assert self._idl.txn
if ((self._table.name in self._idl.readonly) and
(column_name in self._idl.readonly[self._table.name])):
vlog.warn("attempting to write to readonly column %s"
% column_name)
return
column = self._table.columns[column_name]
try:
datum = data.Datum.from_python(column.type, value, _row_to_uuid)
except error.Error as e:
# XXX rate-limit
vlog.err("attempting to write bad value to column %s (%s)"
% (column_name, e))
return
# Remove prior version of the Row from the index if it has the indexed
# column set, and the column changing is an indexed column
if hasattr(self, column_name):
for idx in self._table.rows.indexes.values():
if column_name in (c.column for c in idx.columns):
idx.remove(self)
self._idl.txn._write(self, column, datum)
for idx in self._table.rows.indexes.values():
# Only update the index if indexed columns change
if column_name in (c.column for c in idx.columns):
idx.add(self)
def addvalue(self, column_name, key):
self._idl.txn._txn_rows[self.uuid] = self
column = self._table.columns[column_name]
try:
data.Datum.from_python(column.type, key, _row_to_uuid)
except error.Error as e:
# XXX rate-limit
vlog.err("attempting to write bad value to column %s (%s)"
% (column_name, e))
return
inserts = self._mutations.setdefault('_inserts', {})
column_value = inserts.setdefault(column_name, set())
column_value.add(key)
def delvalue(self, column_name, key):
self._idl.txn._txn_rows[self.uuid] = self
column = self._table.columns[column_name]
try:
data.Datum.from_python(column.type, key, _row_to_uuid)
except error.Error as e:
# XXX rate-limit
vlog.err("attempting to delete bad value from column %s (%s)"
% (column_name, e))
return
removes = self._mutations.setdefault('_removes', {})
column_value = removes.setdefault(column_name, set())
column_value.add(key)
def setkey(self, column_name, key, value):
self._idl.txn._txn_rows[self.uuid] = self
column = self._table.columns[column_name]
try:
data.Datum.from_python(column.type, {key: value}, _row_to_uuid)
except error.Error as e:
# XXX rate-limit
vlog.err("attempting to write bad value to column %s (%s)"
% (column_name, e))
return
if self._data and column_name in self._data:
# Remove existing key/value before updating.
removes = self._mutations.setdefault('_removes', {})
column_value = removes.setdefault(column_name, set())
column_value.add(key)
inserts = self._mutations.setdefault('_inserts', {})
column_value = inserts.setdefault(column_name, {})
column_value[key] = value
def delkey(self, column_name, key, value=None):
self._idl.txn._txn_rows[self.uuid] = self
if value:
try:
old_value = data.Datum.to_python(self._data[column_name],
_uuid_to_row)
except error.Error:
return
if key not in old_value:
return
if old_value[key] != value:
return
removes = self._mutations.setdefault('_removes', {})
column_value = removes.setdefault(column_name, set())
column_value.add(key)
return
@classmethod
def from_json(cls, idl, table, uuid, row_json):
data = {}
for column_name, datum_json in six.iteritems(row_json):
column = table.columns.get(column_name)
if not column:
# XXX rate-limit
vlog.warn("unknown column %s in table %s"
% (column_name, table.name))
continue
try:
datum = ovs.db.data.Datum.from_json(column.type, datum_json)
except error.Error as e:
# XXX rate-limit
vlog.warn("error parsing column %s in table %s: %s"
% (column_name, table.name, e))
continue
data[column_name] = datum
return cls(idl, table, uuid, data)
def verify(self, column_name):
"""Causes the original contents of column 'column_name' in this row to
be verified as a prerequisite to completing the transaction. That is,
if 'column_name' changed in this row (or if this row was deleted)
between the time that the IDL originally read its contents and the time
that the transaction commits, then the transaction aborts and
Transaction.commit() returns Transaction.TRY_AGAIN.
The intention is that, to ensure that no transaction commits based on
dirty reads, an application should call Row.verify() on each data item
read as part of a read-modify-write operation.
In some cases Row.verify() reduces to a no-op, because the current
value of the column is already known:
- If this row is a row created by the current transaction (returned
by Transaction.insert()).
- If the column has already been modified within the current
transaction.
Because of the latter property, always call Row.verify() *before*
modifying the column, for a given read-modify-write.
A transaction must be in progress."""
assert self._idl.txn
assert self._changes is not None
if not self._data or column_name in self._changes:
return
self._prereqs[column_name] = None
def delete(self):
"""Deletes this row from its table.
A transaction must be in progress."""
assert self._idl.txn
assert self._changes is not None
if self._data is None:
del self._idl.txn._txn_rows[self.uuid]
else:
self._idl.txn._txn_rows[self.uuid] = self
del self._table.rows[self.uuid]
self.__dict__["_changes"] = None
def fetch(self, column_name):
self._idl.txn._fetch(self, column_name)
def increment(self, column_name):
"""Causes the transaction, when committed, to increment the value of
'column_name' within this row by 1. 'column_name' must have an integer
type. After the transaction commits successfully, the client may
retrieve the final (incremented) value of 'column_name' with
Transaction.get_increment_new_value().
The client could accomplish something similar by reading and writing
and verify()ing columns. However, increment() will never (by itself)
cause a transaction to fail because of a verify error.
The intended use is for incrementing the "next_cfg" column in
the Open_vSwitch table."""
self._idl.txn._increment(self, column_name)
def _uuid_name_from_uuid(uuid):
return "row%s" % str(uuid).replace("-", "_")
def _where_uuid_equals(uuid):
return [["_uuid", "==", ["uuid", str(uuid)]]]
class _InsertedRow(object):
def __init__(self, op_index):
self.op_index = op_index
self.real = None
class Transaction(object):
"""A transaction may modify the contents of a database by modifying the
values of columns, deleting rows, inserting rows, or adding checks that
columns in the database have not changed ("verify" operations), through
Row methods.
Reading and writing columns and inserting and deleting rows are all
straightforward. The reasons to verify columns are less obvious.
Verification is the key to maintaining transactional integrity. Because
OVSDB handles multiple clients, it can happen that between the time that
OVSDB client A reads a column and writes a new value, OVSDB client B has
written that column. Client A's write should not ordinarily overwrite
client B's, especially if the column in question is a "map" column that
contains several more or less independent data items. If client A adds a
"verify" operation before it writes the column, then the transaction fails
in case client B modifies it first. Client A will then see the new value
of the column and compose a new transaction based on the new contents
written by client B.
When a transaction is complete, which must be before the next call to
Idl.run(), call Transaction.commit() or Transaction.abort().
The life-cycle of a transaction looks like this:
1. Create the transaction and record the initial sequence number:
seqno = idl.change_seqno(idl)
txn = Transaction(idl)
2. Modify the database with Row and Transaction methods.
3. Commit the transaction by calling Transaction.commit(). The first call
to this function probably returns Transaction.INCOMPLETE. The client
must keep calling again along as this remains true, calling Idl.run() in
between to let the IDL do protocol processing. (If the client doesn't
have anything else to do in the meantime, it can use
Transaction.commit_block() to avoid having to loop itself.)
4. If the final status is Transaction.TRY_AGAIN, wait for Idl.change_seqno
to change from the saved 'seqno' (it's possible that it's already
changed, in which case the client should not wait at all), then start
over from step 1. Only a call to Idl.run() will change the return value
of Idl.change_seqno. (Transaction.commit_block() calls Idl.run().)"""
# Status values that Transaction.commit() can return.
# Not yet committed or aborted.
UNCOMMITTED = "uncommitted"
# Transaction didn't include any changes.
UNCHANGED = "unchanged"
# Commit in progress, please wait.
INCOMPLETE = "incomplete"
# ovsdb_idl_txn_abort() called.
ABORTED = "aborted"
# Commit successful.
SUCCESS = "success"
# Commit failed because a "verify" operation
# reported an inconsistency, due to a network
# problem, or other transient failure. Wait
# for a change, then try again.
TRY_AGAIN = "try again"
# Server hasn't given us the lock yet.
NOT_LOCKED = "not locked"
# Commit failed due to a hard error.
ERROR = "error"
@staticmethod
def status_to_string(status):
"""Converts one of the status values that Transaction.commit() can
return into a human-readable string.
(The status values are in fact such strings already, so
there's nothing to do.)"""
return status
def __init__(self, idl):
"""Starts a new transaction on 'idl' (an instance of ovs.db.idl.Idl).
A given Idl may only have a single active transaction at a time.
A Transaction may modify the contents of a database by assigning new
values to columns (attributes of Row), deleting rows (with
Row.delete()), or inserting rows (with Transaction.insert()). It may
also check that columns in the database have not changed with
Row.verify().
When a transaction is complete (which must be before the next call to
Idl.run()), call Transaction.commit() or Transaction.abort()."""
assert idl.txn is None
idl.txn = self
self._request_id = None
self.idl = idl
self.dry_run = False
self._txn_rows = {}
self._status = Transaction.UNCOMMITTED
self._error = None
self._comments = []
self._inc_row = None
self._inc_column = None
self._fetch_requests = []
self._inserted_rows = {} # Map from UUID to _InsertedRow
def add_comment(self, comment):
"""Appends 'comment' to the comments that will be passed to the OVSDB
server when this transaction is committed. (The comment will be
committed to the OVSDB log, which "ovsdb-tool show-log" can print in a
relatively human-readable form.)"""
self._comments.append(comment)
def wait(self, poller):
"""Causes poll_block() to wake up if this transaction has completed
committing."""
if self._status not in (Transaction.UNCOMMITTED,
Transaction.INCOMPLETE):
poller.immediate_wake()
def _substitute_uuids(self, json):
if isinstance(json, (list, tuple)):
if (len(json) == 2
and json[0] == 'uuid'
and ovs.ovsuuid.is_valid_string(json[1])):
uuid = ovs.ovsuuid.from_string(json[1])
row = self._txn_rows.get(uuid, None)
if row and row._data is None:
return ["named-uuid", _uuid_name_from_uuid(uuid)]
else:
return [self._substitute_uuids(elem) for elem in json]
return json
def __disassemble(self):
self.idl.txn = None
for row in six.itervalues(self._txn_rows):
if row._changes is None:
# If we add the deleted row back to rows with _changes == None
# then __getattr__ will not work for the indexes
row.__dict__["_changes"] = {}
row.__dict__["_mutations"] = {}
row._table.rows[row.uuid] = row
elif row._data is None:
del row._table.rows[row.uuid]
row.__dict__["_changes"] = {}
row.__dict__["_mutations"] = {}
row.__dict__["_prereqs"] = {}
self._txn_rows = {}
def commit(self):
"""Attempts to commit 'txn'. Returns the status of the commit
operation, one of the following constants:
Transaction.INCOMPLETE:
The transaction is in progress, but not yet complete. The caller
should call again later, after calling Idl.run() to let the
IDL do OVSDB protocol processing.
Transaction.UNCHANGED:
The transaction is complete. (It didn't actually change the
database, so the IDL didn't send any request to the database
server.)
Transaction.ABORTED:
The caller previously called Transaction.abort().
Transaction.SUCCESS:
The transaction was successful. The update made by the
transaction (and possibly other changes made by other database
clients) should already be visible in the IDL.
Transaction.TRY_AGAIN:
The transaction failed for some transient reason, e.g. because a
"verify" operation reported an inconsistency or due to a network
problem. The caller should wait for a change to the database,
then compose a new transaction, and commit the new transaction.
Use Idl.change_seqno to wait for a change in the database. It is
important to use its value *before* the initial call to
Transaction.commit() as the baseline for this purpose, because
the change that one should wait for can happen after the initial
call but before the call that returns Transaction.TRY_AGAIN, and
using some other baseline value in that situation could cause an
indefinite wait if the database rarely changes.
Transaction.NOT_LOCKED:
The transaction failed because the IDL has been configured to
require a database lock (with Idl.set_lock()) but didn't
get it yet or has already lost it.
Committing a transaction rolls back all of the changes that it made to
the IDL's copy of the database. If the transaction commits
successfully, then the database server will send an update and, thus,
the IDL will be updated with the committed changes."""
# The status can only change if we're the active transaction.
# (Otherwise, our status will change only in Idl.run().)
if self != self.idl.txn:
return self._status
# If we need a lock but don't have it, give up quickly.
if self.idl.lock_name and not self.idl.has_lock:
self._status = Transaction.NOT_LOCKED
self.__disassemble()
return self._status
operations = [self.idl._db.name]
# Assert that we have the required lock (avoiding a race).
if self.idl.lock_name:
operations.append({"op": "assert",
"lock": self.idl.lock_name})
# Add prerequisites and declarations of new rows.
for row in six.itervalues(self._txn_rows):
if row._prereqs:
rows = {}
columns = []
for column_name in row._prereqs:
columns.append(column_name)
rows[column_name] = row._data[column_name].to_json()
operations.append({"op": "wait",
"table": row._table.name,
"timeout": 0,
"where": _where_uuid_equals(row.uuid),
"until": "==",
"columns": columns,
"rows": [rows]})
# Add updates.
any_updates = False
for row in six.itervalues(self._txn_rows):
if row._changes is None:
if row._table.is_root:
operations.append({"op": "delete",
"table": row._table.name,
"where": _where_uuid_equals(row.uuid)})
any_updates = True
else:
# Let ovsdb-server decide whether to really delete it.
pass
elif row._changes:
op = {"table": row._table.name}
if row._data is None:
op["op"] = "insert"
op["uuid-name"] = _uuid_name_from_uuid(row.uuid)
any_updates = True
op_index = len(operations) - 1
self._inserted_rows[row.uuid] = _InsertedRow(op_index)
else:
op["op"] = "update"
op["where"] = _where_uuid_equals(row.uuid)
row_json = {}
op["row"] = row_json
for column_name, datum in six.iteritems(row._changes):
if row._data is not None or not datum.is_default():
row_json[column_name] = (
self._substitute_uuids(datum.to_json()))
# If anything really changed, consider it an update.
# We can't suppress not-really-changed values earlier
# or transactions would become nonatomic (see the big
# comment inside Transaction._write()).
if (not any_updates and row._data is not None and
row._data[column_name] != datum):
any_updates = True
if row._data is None or row_json:
operations.append(op)
if row._mutations:
addop = False
op = {"table": row._table.name}
op["op"] = "mutate"
if row._data is None:
# New row
op["where"] = self._substitute_uuids(
_where_uuid_equals(row.uuid))
else:
# Existing row
op["where"] = _where_uuid_equals(row.uuid)
op["mutations"] = []
if '_removes' in row._mutations.keys():
for col, dat in six.iteritems(row._mutations['_removes']):
column = row._table.columns[col]
if column.type.is_map():
opdat = ["set"]
opdat.append(list(dat))
else:
opdat = ["set"]
inner_opdat = []
for ele in dat:
try:
datum = data.Datum.from_python(column.type,
ele, _row_to_uuid)
except error.Error:
return
inner_opdat.append(
self._substitute_uuids(datum.to_json()))
opdat.append(inner_opdat)
mutation = [col, "delete", opdat]
op["mutations"].append(mutation)
addop = True
if '_inserts' in row._mutations.keys():
for col, val in six.iteritems(row._mutations['_inserts']):
column = row._table.columns[col]
if column.type.is_map():
opdat = ["map"]
datum = data.Datum.from_python(column.type, val,
_row_to_uuid)
opdat.append(datum.as_list())
else:
opdat = ["set"]
inner_opdat = []
for ele in val:
try:
datum = data.Datum.from_python(column.type,
ele, _row_to_uuid)
except error.Error:
return
inner_opdat.append(
self._substitute_uuids(datum.to_json()))
opdat.append(inner_opdat)
mutation = [col, "insert", opdat]
op["mutations"].append(mutation)
addop = True
if addop:
operations.append(op)
any_updates = True
if self._fetch_requests:
for fetch in self._fetch_requests:
fetch["index"] = len(operations) - 1
operations.append({"op": "select",
"table": fetch["row"]._table.name,
"where": self._substitute_uuids(
_where_uuid_equals(fetch["row"].uuid)),
"columns": [fetch["column_name"]]})
any_updates = True
# Add increment.
if self._inc_row and any_updates:
self._inc_index = len(operations) - 1
operations.append({"op": "mutate",
"table": self._inc_row._table.name,
"where": self._substitute_uuids(
_where_uuid_equals(self._inc_row.uuid)),
"mutations": [[self._inc_column, "+=", 1]]})
operations.append({"op": "select",
"table": self._inc_row._table.name,
"where": self._substitute_uuids(
_where_uuid_equals(self._inc_row.uuid)),
"columns": [self._inc_column]})
# Add comment.
if self._comments:
operations.append({"op": "comment",
"comment": "\n".join(self._comments)})
# Dry run?
if self.dry_run:
operations.append({"op": "abort"})
if not any_updates:
self._status = Transaction.UNCHANGED
else:
msg = ovs.jsonrpc.Message.create_request("transact", operations)
self._request_id = msg.id
if not self.idl._session.send(msg):
self.idl._outstanding_txns[self._request_id] = self
self._status = Transaction.INCOMPLETE
else:
self._status = Transaction.TRY_AGAIN
self.__disassemble()
return self._status
def commit_block(self):
"""Attempts to commit this transaction, blocking until the commit
either succeeds or fails. Returns the final commit status, which may
be any Transaction.* value other than Transaction.INCOMPLETE.
This function calls Idl.run() on this transaction'ss IDL, so it may
cause Idl.change_seqno to change."""
while True:
status = self.commit()
if status != Transaction.INCOMPLETE:
return status
self.idl.run()
poller = ovs.poller.Poller()
self.idl.wait(poller)
self.wait(poller)
poller.block()
def get_increment_new_value(self):
"""Returns the final (incremented) value of the column in this
transaction that was set to be incremented by Row.increment. This
transaction must have committed successfully."""
assert self._status == Transaction.SUCCESS
return self._inc_new_value
def abort(self):
"""Aborts this transaction. If Transaction.commit() has already been
called then the transaction might get committed anyhow."""
self.__disassemble()
if self._status in (Transaction.UNCOMMITTED,
Transaction.INCOMPLETE):
self._status = Transaction.ABORTED
def get_error(self):
"""Returns a string representing this transaction's current status,
suitable for use in log messages."""
if self._status != Transaction.ERROR:
return Transaction.status_to_string(self._status)
elif self._error:
return self._error
else:
return "no error details available"
def __set_error_json(self, json):
if self._error is None:
self._error = ovs.json.to_string(json)
def get_insert_uuid(self, uuid):
"""Finds and returns the permanent UUID that the database assigned to a
newly inserted row, given the UUID that Transaction.insert() assigned
locally to that row.
Returns None if 'uuid' is not a UUID assigned by Transaction.insert()
or if it was assigned by that function and then deleted by Row.delete()
within the same transaction. (Rows that are inserted and then deleted
within a single transaction are never sent to the database server, so
it never assigns them a permanent UUID.)
This transaction must have completed successfully."""
assert self._status in (Transaction.SUCCESS,
Transaction.UNCHANGED)
inserted_row = self._inserted_rows.get(uuid)
if inserted_row:
return inserted_row.real
return None
def _increment(self, row, column):
assert not self._inc_row
self._inc_row = row
self._inc_column = column
def _fetch(self, row, column_name):
self._fetch_requests.append({"row": row, "column_name": column_name})
def _write(self, row, column, datum):
assert row._changes is not None
assert row._mutations is not None
txn = row._idl.txn
# If this is a write-only column and the datum being written is the
# same as the one already there, just skip the update entirely. This
# is worth optimizing because we have a lot of columns that get
# periodically refreshed into the database but don't actually change
# that often.
#
# We don't do this for read/write columns because that would break
# atomicity of transactions--some other client might have written a
# different value in that column since we read it. (But if a whole
# transaction only does writes of existing values, without making any
# real changes, we will drop the whole transaction later in
# ovsdb_idl_txn_commit().)
if (not column.alert and row._data and
row._data.get(column.name) == datum):
new_value = row._changes.get(column.name)
if new_value is None or new_value == datum:
return
txn._txn_rows[row.uuid] = row
if '_inserts' in row._mutations:
row._mutations['_inserts'].pop(column.name, None)
if '_removes' in row._mutations:
row._mutations['_removes'].pop(column.name, None)
row._changes[column.name] = datum.copy()
def insert(self, table, new_uuid=None):
"""Inserts and returns a new row in 'table', which must be one of the
ovs.db.schema.TableSchema objects in the Idl's 'tables' dict.
The new row is assigned a provisional UUID. If 'uuid' is None then one
is randomly generated; otherwise 'uuid' should specify a randomly
generated uuid.UUID not otherwise in use. ovsdb-server will assign a
different UUID when 'txn' is committed, but the IDL will replace any
uses of the provisional UUID in the data to be to be committed by the
UUID assigned by ovsdb-server."""
assert self._status == Transaction.UNCOMMITTED
if new_uuid is None:
new_uuid = uuid.uuid4()
row = Row(self.idl, table, new_uuid, None)
table.rows[row.uuid] = row
self._txn_rows[row.uuid] = row
return row
def _process_reply(self, msg):
if msg.type == ovs.jsonrpc.Message.T_ERROR:
self._status = Transaction.ERROR
elif not isinstance(msg.result, (list, tuple)):
# XXX rate-limit
vlog.warn('reply to "transact" is not JSON array')
else:
hard_errors = False
soft_errors = False
lock_errors = False
ops = msg.result
for op in ops:
if op is None:
# This isn't an error in itself but indicates that some
# prior operation failed, so make sure that we know about
# it.
soft_errors = True
elif isinstance(op, dict):
error = op.get("error")
if error is not None:
if error == "timed out":
soft_errors = True
elif error == "not owner":
lock_errors = True
elif error == "aborted":
pass
else:
hard_errors = True
self.__set_error_json(op)
else:
hard_errors = True
self.__set_error_json(op)
# XXX rate-limit
vlog.warn("operation reply is not JSON null or object")
if not soft_errors and not hard_errors and not lock_errors:
if self._inc_row and not self.__process_inc_reply(ops):
hard_errors = True
if self._fetch_requests:
if self.__process_fetch_reply(ops):
self.idl.change_seqno += 1
else:
hard_errors = True
for insert in six.itervalues(self._inserted_rows):
if not self.__process_insert_reply(insert, ops):
hard_errors = True
if hard_errors:
self._status = Transaction.ERROR
elif lock_errors:
self._status = Transaction.NOT_LOCKED
elif soft_errors:
self._status = Transaction.TRY_AGAIN
else:
self._status = Transaction.SUCCESS
@staticmethod
def __check_json_type(json, types, name):
if not json:
# XXX rate-limit
vlog.warn("%s is missing" % name)
return False
elif not isinstance(json, tuple(types)):
# XXX rate-limit
vlog.warn("%s has unexpected type %s" % (name, type(json)))
return False
else:
return True
def __process_fetch_reply(self, ops):
update = False
for fetch_request in self._fetch_requests:
row = fetch_request["row"]
column_name = fetch_request["column_name"]
index = fetch_request["index"]
table = row._table
select = ops[index]
fetched_rows = select.get("rows")
if not Transaction.__check_json_type(fetched_rows, (list, tuple),
'"select" reply "rows"'):
return False
if len(fetched_rows) != 1:
# XXX rate-limit
vlog.warn('"select" reply "rows" has %d elements '
'instead of 1' % len(fetched_rows))
continue
fetched_row = fetched_rows[0]
if not Transaction.__check_json_type(fetched_row, (dict,),
'"select" reply row'):
continue
column = table.columns.get(column_name)
datum_json = fetched_row.get(column_name)
datum = data.Datum.from_json(column.type, datum_json)
row._data[column_name] = datum
update = True
return update
def __process_inc_reply(self, ops):
if self._inc_index + 2 > len(ops):
# XXX rate-limit
vlog.warn("reply does not contain enough operations for "
"increment (has %d, needs %d)" %
(len(ops), self._inc_index + 2))
# We know that this is a JSON object because the loop in
# __process_reply() already checked.
mutate = ops[self._inc_index]
count = mutate.get("count")
if not Transaction.__check_json_type(count, six.integer_types,
'"mutate" reply "count"'):
return False
if count != 1:
# XXX rate-limit
vlog.warn('"mutate" reply "count" is %d instead of 1' % count)
return False
select = ops[self._inc_index + 1]
rows = select.get("rows")
if not Transaction.__check_json_type(rows, (list, tuple),
'"select" reply "rows"'):
return False
if len(rows) != 1:
# XXX rate-limit
vlog.warn('"select" reply "rows" has %d elements '
'instead of 1' % len(rows))
return False
row = rows[0]
if not Transaction.__check_json_type(row, (dict,),
'"select" reply row'):
return False
column = row.get(self._inc_column)
if not Transaction.__check_json_type(column, six.integer_types,
'"select" reply inc column'):
return False
self._inc_new_value = column
return True
def __process_insert_reply(self, insert, ops):
if insert.op_index >= len(ops):
# XXX rate-limit
vlog.warn("reply does not contain enough operations "
"for insert (has %d, needs %d)"
% (len(ops), insert.op_index))
return False
# We know that this is a JSON object because the loop in
# __process_reply() already checked.
reply = ops[insert.op_index]
json_uuid = reply.get("uuid")
if not Transaction.__check_json_type(json_uuid, (tuple, list),
'"insert" reply "uuid"'):
return False
try:
uuid_ = ovs.ovsuuid.from_json(json_uuid)
except error.Error:
# XXX rate-limit
vlog.warn('"insert" reply "uuid" is not a JSON UUID')
return False
insert.real = uuid_
return True
class SchemaHelper(object):
"""IDL Schema helper.
This class encapsulates the logic required to generate schemas suitable
for creating 'ovs.db.idl.Idl' objects. Clients should register columns
they are interested in using register_columns(). When finished, the
get_idl_schema() function may be called.
The location on disk of the schema used may be found in the
'schema_location' variable."""
def __init__(self, location=None, schema_json=None):
"""Creates a new Schema object.
'location' file path to ovs schema. None means default location
'schema_json' schema in json preresentation in memory
"""
if location and schema_json:
raise ValueError("both location and schema_json can't be "
"specified. it's ambiguous.")
if schema_json is None:
if location is None:
location = "%s/vswitch.ovsschema" % ovs.dirs.PKGDATADIR
schema_json = ovs.json.from_file(location)
self.schema_json = schema_json
self._tables = {}
self._readonly = {}
self._all = False
def register_columns(self, table, columns, readonly=[]):
"""Registers interest in the given 'columns' of 'table'. Future calls
to get_idl_schema() will include 'table':column for each column in
'columns'. This function automatically avoids adding duplicate entries
to the schema.
A subset of 'columns' can be specified as 'readonly'. The readonly
columns are not replicated but can be fetched on-demand by the user
with Row.fetch().
'table' must be a string.
'columns' must be a list of strings.
'readonly' must be a list of strings.
"""
assert isinstance(table, six.string_types)
assert isinstance(columns, list)
columns = set(columns) | self._tables.get(table, set())
self._tables[table] = columns
self._readonly[table] = readonly
def register_table(self, table):
"""Registers interest in the given all columns of 'table'. Future calls
to get_idl_schema() will include all columns of 'table'.
'table' must be a string
"""
assert isinstance(table, six.string_types)
self._tables[table] = set() # empty set means all columns in the table
def register_all(self):
"""Registers interest in every column of every table."""
self._all = True
def get_idl_schema(self):
"""Gets a schema appropriate for the creation of an 'ovs.db.id.IDL'
object based on columns registered using the register_columns()
function."""
schema = ovs.db.schema.DbSchema.from_json(self.schema_json)
self.schema_json = None
if not self._all:
schema_tables = {}
for table, columns in six.iteritems(self._tables):
schema_tables[table] = (
self._keep_table_columns(schema, table, columns))
schema.tables = schema_tables
schema.readonly = self._readonly
return schema
def _keep_table_columns(self, schema, table_name, columns):
assert table_name in schema.tables
table = schema.tables[table_name]
if not columns:
# empty set means all columns in the table
return table
new_columns = {}
for column_name in columns:
assert isinstance(column_name, six.string_types)
assert column_name in table.columns
new_columns[column_name] = table.columns[column_name]
table.columns = new_columns
return table
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.